xref: /linux/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c (revision e538109ac71d801d26776af5f3c54f548296c29c)
1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28 
29 #include <linux/aperture.h>
30 #include <linux/power_supply.h>
31 #include <linux/kthread.h>
32 #include <linux/module.h>
33 #include <linux/console.h>
34 #include <linux/slab.h>
35 #include <linux/iommu.h>
36 #include <linux/pci.h>
37 #include <linux/pci-p2pdma.h>
38 #include <linux/apple-gmux.h>
39 
40 #include <drm/drm_atomic_helper.h>
41 #include <drm/drm_client_event.h>
42 #include <drm/drm_crtc_helper.h>
43 #include <drm/drm_probe_helper.h>
44 #include <drm/amdgpu_drm.h>
45 #include <linux/device.h>
46 #include <linux/vgaarb.h>
47 #include <linux/vga_switcheroo.h>
48 #include <linux/efi.h>
49 #include "amdgpu.h"
50 #include "amdgpu_trace.h"
51 #include "amdgpu_i2c.h"
52 #include "atom.h"
53 #include "amdgpu_atombios.h"
54 #include "amdgpu_atomfirmware.h"
55 #include "amd_pcie.h"
56 #ifdef CONFIG_DRM_AMDGPU_SI
57 #include "si.h"
58 #endif
59 #ifdef CONFIG_DRM_AMDGPU_CIK
60 #include "cik.h"
61 #endif
62 #include "vi.h"
63 #include "soc15.h"
64 #include "nv.h"
65 #include "bif/bif_4_1_d.h"
66 #include <linux/firmware.h>
67 #include "amdgpu_vf_error.h"
68 
69 #include "amdgpu_amdkfd.h"
70 #include "amdgpu_pm.h"
71 
72 #include "amdgpu_xgmi.h"
73 #include "amdgpu_ras.h"
74 #include "amdgpu_pmu.h"
75 #include "amdgpu_fru_eeprom.h"
76 #include "amdgpu_reset.h"
77 #include "amdgpu_virt.h"
78 #include "amdgpu_dev_coredump.h"
79 
80 #include <linux/suspend.h>
81 #include <drm/task_barrier.h>
82 #include <linux/pm_runtime.h>
83 
84 #include <drm/drm_drv.h>
85 
86 #if IS_ENABLED(CONFIG_X86)
87 #include <asm/intel-family.h>
88 #include <asm/cpu_device_id.h>
89 #endif
90 
91 MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
92 MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin");
93 MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin");
94 MODULE_FIRMWARE("amdgpu/picasso_gpu_info.bin");
95 MODULE_FIRMWARE("amdgpu/raven2_gpu_info.bin");
96 MODULE_FIRMWARE("amdgpu/arcturus_gpu_info.bin");
97 MODULE_FIRMWARE("amdgpu/navi12_gpu_info.bin");
98 MODULE_FIRMWARE("amdgpu/cyan_skillfish_gpu_info.bin");
99 
100 #define AMDGPU_RESUME_MS		2000
101 #define AMDGPU_MAX_RETRY_LIMIT		2
102 #define AMDGPU_RETRY_SRIOV_RESET(r) ((r) == -EBUSY || (r) == -ETIMEDOUT || (r) == -EINVAL)
103 #define AMDGPU_PCIE_INDEX_FALLBACK (0x38 >> 2)
104 #define AMDGPU_PCIE_INDEX_HI_FALLBACK (0x44 >> 2)
105 #define AMDGPU_PCIE_DATA_FALLBACK (0x3C >> 2)
106 
107 #define AMDGPU_VBIOS_SKIP (1U << 0)
108 #define AMDGPU_VBIOS_OPTIONAL (1U << 1)
109 
110 static const struct drm_driver amdgpu_kms_driver;
111 
112 const char *amdgpu_asic_name[] = {
113 	"TAHITI",
114 	"PITCAIRN",
115 	"VERDE",
116 	"OLAND",
117 	"HAINAN",
118 	"BONAIRE",
119 	"KAVERI",
120 	"KABINI",
121 	"HAWAII",
122 	"MULLINS",
123 	"TOPAZ",
124 	"TONGA",
125 	"FIJI",
126 	"CARRIZO",
127 	"STONEY",
128 	"POLARIS10",
129 	"POLARIS11",
130 	"POLARIS12",
131 	"VEGAM",
132 	"VEGA10",
133 	"VEGA12",
134 	"VEGA20",
135 	"RAVEN",
136 	"ARCTURUS",
137 	"RENOIR",
138 	"ALDEBARAN",
139 	"NAVI10",
140 	"CYAN_SKILLFISH",
141 	"NAVI14",
142 	"NAVI12",
143 	"SIENNA_CICHLID",
144 	"NAVY_FLOUNDER",
145 	"VANGOGH",
146 	"DIMGREY_CAVEFISH",
147 	"BEIGE_GOBY",
148 	"YELLOW_CARP",
149 	"IP DISCOVERY",
150 	"LAST",
151 };
152 
153 #define AMDGPU_IP_BLK_MASK_ALL GENMASK(AMD_IP_BLOCK_TYPE_NUM  - 1, 0)
154 /*
155  * Default init level where all blocks are expected to be initialized. This is
156  * the level of initialization expected by default and also after a full reset
157  * of the device.
158  */
159 struct amdgpu_init_level amdgpu_init_default = {
160 	.level = AMDGPU_INIT_LEVEL_DEFAULT,
161 	.hwini_ip_block_mask = AMDGPU_IP_BLK_MASK_ALL,
162 };
163 
164 struct amdgpu_init_level amdgpu_init_recovery = {
165 	.level = AMDGPU_INIT_LEVEL_RESET_RECOVERY,
166 	.hwini_ip_block_mask = AMDGPU_IP_BLK_MASK_ALL,
167 };
168 
169 /*
170  * Minimal blocks needed to be initialized before a XGMI hive can be reset. This
171  * is used for cases like reset on initialization where the entire hive needs to
172  * be reset before first use.
173  */
174 struct amdgpu_init_level amdgpu_init_minimal_xgmi = {
175 	.level = AMDGPU_INIT_LEVEL_MINIMAL_XGMI,
176 	.hwini_ip_block_mask =
177 		BIT(AMD_IP_BLOCK_TYPE_GMC) | BIT(AMD_IP_BLOCK_TYPE_SMC) |
178 		BIT(AMD_IP_BLOCK_TYPE_COMMON) | BIT(AMD_IP_BLOCK_TYPE_IH) |
179 		BIT(AMD_IP_BLOCK_TYPE_PSP)
180 };
181 
182 static void amdgpu_device_load_switch_state(struct amdgpu_device *adev);
183 
amdgpu_ip_member_of_hwini(struct amdgpu_device * adev,enum amd_ip_block_type block)184 static inline bool amdgpu_ip_member_of_hwini(struct amdgpu_device *adev,
185 					     enum amd_ip_block_type block)
186 {
187 	return (adev->init_lvl->hwini_ip_block_mask & (1U << block)) != 0;
188 }
189 
amdgpu_set_init_level(struct amdgpu_device * adev,enum amdgpu_init_lvl_id lvl)190 void amdgpu_set_init_level(struct amdgpu_device *adev,
191 			   enum amdgpu_init_lvl_id lvl)
192 {
193 	switch (lvl) {
194 	case AMDGPU_INIT_LEVEL_MINIMAL_XGMI:
195 		adev->init_lvl = &amdgpu_init_minimal_xgmi;
196 		break;
197 	case AMDGPU_INIT_LEVEL_RESET_RECOVERY:
198 		adev->init_lvl = &amdgpu_init_recovery;
199 		break;
200 	case AMDGPU_INIT_LEVEL_DEFAULT:
201 		fallthrough;
202 	default:
203 		adev->init_lvl = &amdgpu_init_default;
204 		break;
205 	}
206 }
207 
208 static inline void amdgpu_device_stop_pending_resets(struct amdgpu_device *adev);
209 static int amdgpu_device_pm_notifier(struct notifier_block *nb, unsigned long mode,
210 				     void *data);
211 
212 /**
213  * DOC: pcie_replay_count
214  *
215  * The amdgpu driver provides a sysfs API for reporting the total number
216  * of PCIe replays (NAKs).
217  * The file pcie_replay_count is used for this and returns the total
218  * number of replays as a sum of the NAKs generated and NAKs received.
219  */
220 
amdgpu_device_get_pcie_replay_count(struct device * dev,struct device_attribute * attr,char * buf)221 static ssize_t amdgpu_device_get_pcie_replay_count(struct device *dev,
222 		struct device_attribute *attr, char *buf)
223 {
224 	struct drm_device *ddev = dev_get_drvdata(dev);
225 	struct amdgpu_device *adev = drm_to_adev(ddev);
226 	uint64_t cnt = amdgpu_asic_get_pcie_replay_count(adev);
227 
228 	return sysfs_emit(buf, "%llu\n", cnt);
229 }
230 
231 static DEVICE_ATTR(pcie_replay_count, 0444,
232 		amdgpu_device_get_pcie_replay_count, NULL);
233 
amdgpu_device_attr_sysfs_init(struct amdgpu_device * adev)234 static int amdgpu_device_attr_sysfs_init(struct amdgpu_device *adev)
235 {
236 	int ret = 0;
237 
238 	if (amdgpu_nbio_is_replay_cnt_supported(adev))
239 		ret = sysfs_create_file(&adev->dev->kobj,
240 					&dev_attr_pcie_replay_count.attr);
241 
242 	return ret;
243 }
244 
amdgpu_device_attr_sysfs_fini(struct amdgpu_device * adev)245 static void amdgpu_device_attr_sysfs_fini(struct amdgpu_device *adev)
246 {
247 	if (amdgpu_nbio_is_replay_cnt_supported(adev))
248 		sysfs_remove_file(&adev->dev->kobj,
249 				  &dev_attr_pcie_replay_count.attr);
250 }
251 
amdgpu_sysfs_reg_state_get(struct file * f,struct kobject * kobj,const struct bin_attribute * attr,char * buf,loff_t ppos,size_t count)252 static ssize_t amdgpu_sysfs_reg_state_get(struct file *f, struct kobject *kobj,
253 					  const struct bin_attribute *attr, char *buf,
254 					  loff_t ppos, size_t count)
255 {
256 	struct device *dev = kobj_to_dev(kobj);
257 	struct drm_device *ddev = dev_get_drvdata(dev);
258 	struct amdgpu_device *adev = drm_to_adev(ddev);
259 	ssize_t bytes_read;
260 
261 	switch (ppos) {
262 	case AMDGPU_SYS_REG_STATE_XGMI:
263 		bytes_read = amdgpu_asic_get_reg_state(
264 			adev, AMDGPU_REG_STATE_TYPE_XGMI, buf, count);
265 		break;
266 	case AMDGPU_SYS_REG_STATE_WAFL:
267 		bytes_read = amdgpu_asic_get_reg_state(
268 			adev, AMDGPU_REG_STATE_TYPE_WAFL, buf, count);
269 		break;
270 	case AMDGPU_SYS_REG_STATE_PCIE:
271 		bytes_read = amdgpu_asic_get_reg_state(
272 			adev, AMDGPU_REG_STATE_TYPE_PCIE, buf, count);
273 		break;
274 	case AMDGPU_SYS_REG_STATE_USR:
275 		bytes_read = amdgpu_asic_get_reg_state(
276 			adev, AMDGPU_REG_STATE_TYPE_USR, buf, count);
277 		break;
278 	case AMDGPU_SYS_REG_STATE_USR_1:
279 		bytes_read = amdgpu_asic_get_reg_state(
280 			adev, AMDGPU_REG_STATE_TYPE_USR_1, buf, count);
281 		break;
282 	default:
283 		return -EINVAL;
284 	}
285 
286 	return bytes_read;
287 }
288 
289 static const BIN_ATTR(reg_state, 0444, amdgpu_sysfs_reg_state_get, NULL,
290 		      AMDGPU_SYS_REG_STATE_END);
291 
amdgpu_reg_state_sysfs_init(struct amdgpu_device * adev)292 int amdgpu_reg_state_sysfs_init(struct amdgpu_device *adev)
293 {
294 	int ret;
295 
296 	if (!amdgpu_asic_get_reg_state_supported(adev))
297 		return 0;
298 
299 	ret = sysfs_create_bin_file(&adev->dev->kobj, &bin_attr_reg_state);
300 
301 	return ret;
302 }
303 
amdgpu_reg_state_sysfs_fini(struct amdgpu_device * adev)304 void amdgpu_reg_state_sysfs_fini(struct amdgpu_device *adev)
305 {
306 	if (!amdgpu_asic_get_reg_state_supported(adev))
307 		return;
308 	sysfs_remove_bin_file(&adev->dev->kobj, &bin_attr_reg_state);
309 }
310 
amdgpu_ip_block_suspend(struct amdgpu_ip_block * ip_block)311 int amdgpu_ip_block_suspend(struct amdgpu_ip_block *ip_block)
312 {
313 	int r;
314 
315 	if (ip_block->version->funcs->suspend) {
316 		r = ip_block->version->funcs->suspend(ip_block);
317 		if (r) {
318 			dev_err(ip_block->adev->dev,
319 				"suspend of IP block <%s> failed %d\n",
320 				ip_block->version->funcs->name, r);
321 			return r;
322 		}
323 	}
324 
325 	ip_block->status.hw = false;
326 	return 0;
327 }
328 
amdgpu_ip_block_resume(struct amdgpu_ip_block * ip_block)329 int amdgpu_ip_block_resume(struct amdgpu_ip_block *ip_block)
330 {
331 	int r;
332 
333 	if (ip_block->version->funcs->resume) {
334 		r = ip_block->version->funcs->resume(ip_block);
335 		if (r) {
336 			dev_err(ip_block->adev->dev,
337 				"resume of IP block <%s> failed %d\n",
338 				ip_block->version->funcs->name, r);
339 			return r;
340 		}
341 	}
342 
343 	ip_block->status.hw = true;
344 	return 0;
345 }
346 
347 /**
348  * DOC: board_info
349  *
350  * The amdgpu driver provides a sysfs API for giving board related information.
351  * It provides the form factor information in the format
352  *
353  *   type : form factor
354  *
355  * Possible form factor values
356  *
357  * - "cem"		- PCIE CEM card
358  * - "oam"		- Open Compute Accelerator Module
359  * - "unknown"	- Not known
360  *
361  */
362 
amdgpu_device_get_board_info(struct device * dev,struct device_attribute * attr,char * buf)363 static ssize_t amdgpu_device_get_board_info(struct device *dev,
364 					    struct device_attribute *attr,
365 					    char *buf)
366 {
367 	struct drm_device *ddev = dev_get_drvdata(dev);
368 	struct amdgpu_device *adev = drm_to_adev(ddev);
369 	enum amdgpu_pkg_type pkg_type = AMDGPU_PKG_TYPE_CEM;
370 	const char *pkg;
371 
372 	if (adev->smuio.funcs && adev->smuio.funcs->get_pkg_type)
373 		pkg_type = adev->smuio.funcs->get_pkg_type(adev);
374 
375 	switch (pkg_type) {
376 	case AMDGPU_PKG_TYPE_CEM:
377 		pkg = "cem";
378 		break;
379 	case AMDGPU_PKG_TYPE_OAM:
380 		pkg = "oam";
381 		break;
382 	default:
383 		pkg = "unknown";
384 		break;
385 	}
386 
387 	return sysfs_emit(buf, "%s : %s\n", "type", pkg);
388 }
389 
390 static DEVICE_ATTR(board_info, 0444, amdgpu_device_get_board_info, NULL);
391 
392 static struct attribute *amdgpu_board_attrs[] = {
393 	&dev_attr_board_info.attr,
394 	NULL,
395 };
396 
amdgpu_board_attrs_is_visible(struct kobject * kobj,struct attribute * attr,int n)397 static umode_t amdgpu_board_attrs_is_visible(struct kobject *kobj,
398 					     struct attribute *attr, int n)
399 {
400 	struct device *dev = kobj_to_dev(kobj);
401 	struct drm_device *ddev = dev_get_drvdata(dev);
402 	struct amdgpu_device *adev = drm_to_adev(ddev);
403 
404 	if (adev->flags & AMD_IS_APU)
405 		return 0;
406 
407 	return attr->mode;
408 }
409 
410 static const struct attribute_group amdgpu_board_attrs_group = {
411 	.attrs = amdgpu_board_attrs,
412 	.is_visible = amdgpu_board_attrs_is_visible
413 };
414 
415 static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev);
416 
417 /**
418  * amdgpu_device_supports_px - Is the device a dGPU with ATPX power control
419  *
420  * @adev: amdgpu device pointer
421  *
422  * Returns true if the device is a dGPU with ATPX power control,
423  * otherwise return false.
424  */
amdgpu_device_supports_px(struct amdgpu_device * adev)425 bool amdgpu_device_supports_px(struct amdgpu_device *adev)
426 {
427 	if ((adev->flags & AMD_IS_PX) && !amdgpu_is_atpx_hybrid())
428 		return true;
429 	return false;
430 }
431 
432 /**
433  * amdgpu_device_supports_boco - Is the device a dGPU with ACPI power resources
434  *
435  * @adev: amdgpu device pointer
436  *
437  * Returns true if the device is a dGPU with ACPI power control,
438  * otherwise return false.
439  */
amdgpu_device_supports_boco(struct amdgpu_device * adev)440 bool amdgpu_device_supports_boco(struct amdgpu_device *adev)
441 {
442 	if (!IS_ENABLED(CONFIG_HOTPLUG_PCI_PCIE))
443 		return false;
444 
445 	if (adev->has_pr3 ||
446 	    ((adev->flags & AMD_IS_PX) && amdgpu_is_atpx_hybrid()))
447 		return true;
448 	return false;
449 }
450 
451 /**
452  * amdgpu_device_supports_baco - Does the device support BACO
453  *
454  * @adev: amdgpu device pointer
455  *
456  * Return:
457  * 1 if the device supports BACO;
458  * 3 if the device supports MACO (only works if BACO is supported)
459  * otherwise return 0.
460  */
amdgpu_device_supports_baco(struct amdgpu_device * adev)461 int amdgpu_device_supports_baco(struct amdgpu_device *adev)
462 {
463 	return amdgpu_asic_supports_baco(adev);
464 }
465 
amdgpu_device_detect_runtime_pm_mode(struct amdgpu_device * adev)466 void amdgpu_device_detect_runtime_pm_mode(struct amdgpu_device *adev)
467 {
468 	int bamaco_support;
469 
470 	adev->pm.rpm_mode = AMDGPU_RUNPM_NONE;
471 	bamaco_support = amdgpu_device_supports_baco(adev);
472 
473 	switch (amdgpu_runtime_pm) {
474 	case 2:
475 		if (bamaco_support & MACO_SUPPORT) {
476 			adev->pm.rpm_mode = AMDGPU_RUNPM_BAMACO;
477 			dev_info(adev->dev, "Forcing BAMACO for runtime pm\n");
478 		} else if (bamaco_support == BACO_SUPPORT) {
479 			adev->pm.rpm_mode = AMDGPU_RUNPM_BACO;
480 			dev_info(adev->dev, "Requested mode BAMACO not available,fallback to use BACO\n");
481 		}
482 		break;
483 	case 1:
484 		if (bamaco_support & BACO_SUPPORT) {
485 			adev->pm.rpm_mode = AMDGPU_RUNPM_BACO;
486 			dev_info(adev->dev, "Forcing BACO for runtime pm\n");
487 		}
488 		break;
489 	case -1:
490 	case -2:
491 		if (amdgpu_device_supports_px(adev)) {
492 			/* enable PX as runtime mode */
493 			adev->pm.rpm_mode = AMDGPU_RUNPM_PX;
494 			dev_info(adev->dev, "Using ATPX for runtime pm\n");
495 		} else if (amdgpu_device_supports_boco(adev)) {
496 			/* enable boco as runtime mode */
497 			adev->pm.rpm_mode = AMDGPU_RUNPM_BOCO;
498 			dev_info(adev->dev, "Using BOCO for runtime pm\n");
499 		} else {
500 			if (!bamaco_support)
501 				goto no_runtime_pm;
502 
503 			switch (adev->asic_type) {
504 			case CHIP_VEGA20:
505 			case CHIP_ARCTURUS:
506 				/* BACO are not supported on vega20 and arctrus */
507 				break;
508 			case CHIP_VEGA10:
509 				/* enable BACO as runpm mode if noretry=0 */
510 				if (!adev->gmc.noretry && !amdgpu_passthrough(adev))
511 					adev->pm.rpm_mode = AMDGPU_RUNPM_BACO;
512 				break;
513 			default:
514 				/* enable BACO as runpm mode on CI+ */
515 				if (!amdgpu_passthrough(adev))
516 					adev->pm.rpm_mode = AMDGPU_RUNPM_BACO;
517 				break;
518 			}
519 
520 			if (adev->pm.rpm_mode == AMDGPU_RUNPM_BACO) {
521 				if (bamaco_support & MACO_SUPPORT) {
522 					adev->pm.rpm_mode = AMDGPU_RUNPM_BAMACO;
523 					dev_info(adev->dev, "Using BAMACO for runtime pm\n");
524 				} else {
525 					dev_info(adev->dev, "Using BACO for runtime pm\n");
526 				}
527 			}
528 		}
529 		break;
530 	case 0:
531 		dev_info(adev->dev, "runtime pm is manually disabled\n");
532 		break;
533 	default:
534 		break;
535 	}
536 
537 no_runtime_pm:
538 	if (adev->pm.rpm_mode == AMDGPU_RUNPM_NONE)
539 		dev_info(adev->dev, "Runtime PM not available\n");
540 }
541 /**
542  * amdgpu_device_supports_smart_shift - Is the device dGPU with
543  * smart shift support
544  *
545  * @adev: amdgpu device pointer
546  *
547  * Returns true if the device is a dGPU with Smart Shift support,
548  * otherwise returns false.
549  */
amdgpu_device_supports_smart_shift(struct amdgpu_device * adev)550 bool amdgpu_device_supports_smart_shift(struct amdgpu_device *adev)
551 {
552 	return (amdgpu_device_supports_boco(adev) &&
553 		amdgpu_acpi_is_power_shift_control_supported());
554 }
555 
556 /*
557  * VRAM access helper functions
558  */
559 
560 /**
561  * amdgpu_device_mm_access - access vram by MM_INDEX/MM_DATA
562  *
563  * @adev: amdgpu_device pointer
564  * @pos: offset of the buffer in vram
565  * @buf: virtual address of the buffer in system memory
566  * @size: read/write size, sizeof(@buf) must > @size
567  * @write: true - write to vram, otherwise - read from vram
568  */
amdgpu_device_mm_access(struct amdgpu_device * adev,loff_t pos,void * buf,size_t size,bool write)569 void amdgpu_device_mm_access(struct amdgpu_device *adev, loff_t pos,
570 			     void *buf, size_t size, bool write)
571 {
572 	unsigned long flags;
573 	uint32_t hi = ~0, tmp = 0;
574 	uint32_t *data = buf;
575 	uint64_t last;
576 	int idx;
577 
578 	if (!drm_dev_enter(adev_to_drm(adev), &idx))
579 		return;
580 
581 	BUG_ON(!IS_ALIGNED(pos, 4) || !IS_ALIGNED(size, 4));
582 
583 	spin_lock_irqsave(&adev->mmio_idx_lock, flags);
584 	for (last = pos + size; pos < last; pos += 4) {
585 		tmp = pos >> 31;
586 
587 		WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)pos) | 0x80000000);
588 		if (tmp != hi) {
589 			WREG32_NO_KIQ(mmMM_INDEX_HI, tmp);
590 			hi = tmp;
591 		}
592 		if (write)
593 			WREG32_NO_KIQ(mmMM_DATA, *data++);
594 		else
595 			*data++ = RREG32_NO_KIQ(mmMM_DATA);
596 	}
597 
598 	spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
599 	drm_dev_exit(idx);
600 }
601 
602 /**
603  * amdgpu_device_aper_access - access vram by vram aperture
604  *
605  * @adev: amdgpu_device pointer
606  * @pos: offset of the buffer in vram
607  * @buf: virtual address of the buffer in system memory
608  * @size: read/write size, sizeof(@buf) must > @size
609  * @write: true - write to vram, otherwise - read from vram
610  *
611  * The return value means how many bytes have been transferred.
612  */
amdgpu_device_aper_access(struct amdgpu_device * adev,loff_t pos,void * buf,size_t size,bool write)613 size_t amdgpu_device_aper_access(struct amdgpu_device *adev, loff_t pos,
614 				 void *buf, size_t size, bool write)
615 {
616 #ifdef CONFIG_64BIT
617 	void __iomem *addr;
618 	size_t count = 0;
619 	uint64_t last;
620 
621 	if (!adev->mman.aper_base_kaddr)
622 		return 0;
623 
624 	last = min(pos + size, adev->gmc.visible_vram_size);
625 	if (last > pos) {
626 		addr = adev->mman.aper_base_kaddr + pos;
627 		count = last - pos;
628 
629 		if (write) {
630 			memcpy_toio(addr, buf, count);
631 			/* Make sure HDP write cache flush happens without any reordering
632 			 * after the system memory contents are sent over PCIe device
633 			 */
634 			mb();
635 			amdgpu_device_flush_hdp(adev, NULL);
636 		} else {
637 			amdgpu_device_invalidate_hdp(adev, NULL);
638 			/* Make sure HDP read cache is invalidated before issuing a read
639 			 * to the PCIe device
640 			 */
641 			mb();
642 			memcpy_fromio(buf, addr, count);
643 		}
644 
645 	}
646 
647 	return count;
648 #else
649 	return 0;
650 #endif
651 }
652 
653 /**
654  * amdgpu_device_vram_access - read/write a buffer in vram
655  *
656  * @adev: amdgpu_device pointer
657  * @pos: offset of the buffer in vram
658  * @buf: virtual address of the buffer in system memory
659  * @size: read/write size, sizeof(@buf) must > @size
660  * @write: true - write to vram, otherwise - read from vram
661  */
amdgpu_device_vram_access(struct amdgpu_device * adev,loff_t pos,void * buf,size_t size,bool write)662 void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos,
663 			       void *buf, size_t size, bool write)
664 {
665 	size_t count;
666 
667 	/* try to using vram apreature to access vram first */
668 	count = amdgpu_device_aper_access(adev, pos, buf, size, write);
669 	size -= count;
670 	if (size) {
671 		/* using MM to access rest vram */
672 		pos += count;
673 		buf += count;
674 		amdgpu_device_mm_access(adev, pos, buf, size, write);
675 	}
676 }
677 
678 /*
679  * register access helper functions.
680  */
681 
682 /* Check if hw access should be skipped because of hotplug or device error */
amdgpu_device_skip_hw_access(struct amdgpu_device * adev)683 bool amdgpu_device_skip_hw_access(struct amdgpu_device *adev)
684 {
685 	if (adev->no_hw_access)
686 		return true;
687 
688 #ifdef CONFIG_LOCKDEP
689 	/*
690 	 * This is a bit complicated to understand, so worth a comment. What we assert
691 	 * here is that the GPU reset is not running on another thread in parallel.
692 	 *
693 	 * For this we trylock the read side of the reset semaphore, if that succeeds
694 	 * we know that the reset is not running in parallel.
695 	 *
696 	 * If the trylock fails we assert that we are either already holding the read
697 	 * side of the lock or are the reset thread itself and hold the write side of
698 	 * the lock.
699 	 */
700 	if (in_task()) {
701 		if (down_read_trylock(&adev->reset_domain->sem))
702 			up_read(&adev->reset_domain->sem);
703 		else
704 			lockdep_assert_held(&adev->reset_domain->sem);
705 	}
706 #endif
707 	return false;
708 }
709 
710 /**
711  * amdgpu_device_rreg - read a memory mapped IO or indirect register
712  *
713  * @adev: amdgpu_device pointer
714  * @reg: dword aligned register offset
715  * @acc_flags: access flags which require special behavior
716  *
717  * Returns the 32 bit value from the offset specified.
718  */
amdgpu_device_rreg(struct amdgpu_device * adev,uint32_t reg,uint32_t acc_flags)719 uint32_t amdgpu_device_rreg(struct amdgpu_device *adev,
720 			    uint32_t reg, uint32_t acc_flags)
721 {
722 	uint32_t ret;
723 
724 	if (amdgpu_device_skip_hw_access(adev))
725 		return 0;
726 
727 	if ((reg * 4) < adev->rmmio_size) {
728 		if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
729 		    amdgpu_sriov_runtime(adev) &&
730 		    down_read_trylock(&adev->reset_domain->sem)) {
731 			ret = amdgpu_kiq_rreg(adev, reg, 0);
732 			up_read(&adev->reset_domain->sem);
733 		} else {
734 			ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
735 		}
736 	} else {
737 		ret = adev->pcie_rreg(adev, reg * 4);
738 	}
739 
740 	trace_amdgpu_device_rreg(adev->pdev->device, reg, ret);
741 
742 	return ret;
743 }
744 
745 /*
746  * MMIO register read with bytes helper functions
747  * @offset:bytes offset from MMIO start
748  */
749 
750 /**
751  * amdgpu_mm_rreg8 - read a memory mapped IO register
752  *
753  * @adev: amdgpu_device pointer
754  * @offset: byte aligned register offset
755  *
756  * Returns the 8 bit value from the offset specified.
757  */
amdgpu_mm_rreg8(struct amdgpu_device * adev,uint32_t offset)758 uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset)
759 {
760 	if (amdgpu_device_skip_hw_access(adev))
761 		return 0;
762 
763 	if (offset < adev->rmmio_size)
764 		return (readb(adev->rmmio + offset));
765 	BUG();
766 }
767 
768 
769 /**
770  * amdgpu_device_xcc_rreg - read a memory mapped IO or indirect register with specific XCC
771  *
772  * @adev: amdgpu_device pointer
773  * @reg: dword aligned register offset
774  * @acc_flags: access flags which require special behavior
775  * @xcc_id: xcc accelerated compute core id
776  *
777  * Returns the 32 bit value from the offset specified.
778  */
amdgpu_device_xcc_rreg(struct amdgpu_device * adev,uint32_t reg,uint32_t acc_flags,uint32_t xcc_id)779 uint32_t amdgpu_device_xcc_rreg(struct amdgpu_device *adev,
780 				uint32_t reg, uint32_t acc_flags,
781 				uint32_t xcc_id)
782 {
783 	uint32_t ret, rlcg_flag;
784 
785 	if (amdgpu_device_skip_hw_access(adev))
786 		return 0;
787 
788 	if ((reg * 4) < adev->rmmio_size) {
789 		if (amdgpu_sriov_vf(adev) &&
790 		    !amdgpu_sriov_runtime(adev) &&
791 		    adev->gfx.rlc.rlcg_reg_access_supported &&
792 		    amdgpu_virt_get_rlcg_reg_access_flag(adev, acc_flags,
793 							 GC_HWIP, false,
794 							 &rlcg_flag)) {
795 			ret = amdgpu_virt_rlcg_reg_rw(adev, reg, 0, rlcg_flag, GET_INST(GC, xcc_id));
796 		} else if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
797 		    amdgpu_sriov_runtime(adev) &&
798 		    down_read_trylock(&adev->reset_domain->sem)) {
799 			ret = amdgpu_kiq_rreg(adev, reg, xcc_id);
800 			up_read(&adev->reset_domain->sem);
801 		} else {
802 			ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
803 		}
804 	} else {
805 		ret = adev->pcie_rreg(adev, reg * 4);
806 	}
807 
808 	return ret;
809 }
810 
811 /*
812  * MMIO register write with bytes helper functions
813  * @offset:bytes offset from MMIO start
814  * @value: the value want to be written to the register
815  */
816 
817 /**
818  * amdgpu_mm_wreg8 - read a memory mapped IO register
819  *
820  * @adev: amdgpu_device pointer
821  * @offset: byte aligned register offset
822  * @value: 8 bit value to write
823  *
824  * Writes the value specified to the offset specified.
825  */
amdgpu_mm_wreg8(struct amdgpu_device * adev,uint32_t offset,uint8_t value)826 void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value)
827 {
828 	if (amdgpu_device_skip_hw_access(adev))
829 		return;
830 
831 	if (offset < adev->rmmio_size)
832 		writeb(value, adev->rmmio + offset);
833 	else
834 		BUG();
835 }
836 
837 /**
838  * amdgpu_device_wreg - write to a memory mapped IO or indirect register
839  *
840  * @adev: amdgpu_device pointer
841  * @reg: dword aligned register offset
842  * @v: 32 bit value to write to the register
843  * @acc_flags: access flags which require special behavior
844  *
845  * Writes the value specified to the offset specified.
846  */
amdgpu_device_wreg(struct amdgpu_device * adev,uint32_t reg,uint32_t v,uint32_t acc_flags)847 void amdgpu_device_wreg(struct amdgpu_device *adev,
848 			uint32_t reg, uint32_t v,
849 			uint32_t acc_flags)
850 {
851 	if (amdgpu_device_skip_hw_access(adev))
852 		return;
853 
854 	if ((reg * 4) < adev->rmmio_size) {
855 		if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
856 		    amdgpu_sriov_runtime(adev) &&
857 		    down_read_trylock(&adev->reset_domain->sem)) {
858 			amdgpu_kiq_wreg(adev, reg, v, 0);
859 			up_read(&adev->reset_domain->sem);
860 		} else {
861 			writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
862 		}
863 	} else {
864 		adev->pcie_wreg(adev, reg * 4, v);
865 	}
866 
867 	trace_amdgpu_device_wreg(adev->pdev->device, reg, v);
868 }
869 
870 /**
871  * amdgpu_mm_wreg_mmio_rlc -  write register either with direct/indirect mmio or with RLC path if in range
872  *
873  * @adev: amdgpu_device pointer
874  * @reg: mmio/rlc register
875  * @v: value to write
876  * @xcc_id: xcc accelerated compute core id
877  *
878  * this function is invoked only for the debugfs register access
879  */
amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device * adev,uint32_t reg,uint32_t v,uint32_t xcc_id)880 void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev,
881 			     uint32_t reg, uint32_t v,
882 			     uint32_t xcc_id)
883 {
884 	if (amdgpu_device_skip_hw_access(adev))
885 		return;
886 
887 	if (amdgpu_sriov_fullaccess(adev) &&
888 	    adev->gfx.rlc.funcs &&
889 	    adev->gfx.rlc.funcs->is_rlcg_access_range) {
890 		if (adev->gfx.rlc.funcs->is_rlcg_access_range(adev, reg))
891 			return amdgpu_sriov_wreg(adev, reg, v, 0, 0, xcc_id);
892 	} else if ((reg * 4) >= adev->rmmio_size) {
893 		adev->pcie_wreg(adev, reg * 4, v);
894 	} else {
895 		writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
896 	}
897 }
898 
899 /**
900  * amdgpu_device_xcc_wreg - write to a memory mapped IO or indirect register with specific XCC
901  *
902  * @adev: amdgpu_device pointer
903  * @reg: dword aligned register offset
904  * @v: 32 bit value to write to the register
905  * @acc_flags: access flags which require special behavior
906  * @xcc_id: xcc accelerated compute core id
907  *
908  * Writes the value specified to the offset specified.
909  */
amdgpu_device_xcc_wreg(struct amdgpu_device * adev,uint32_t reg,uint32_t v,uint32_t acc_flags,uint32_t xcc_id)910 void amdgpu_device_xcc_wreg(struct amdgpu_device *adev,
911 			uint32_t reg, uint32_t v,
912 			uint32_t acc_flags, uint32_t xcc_id)
913 {
914 	uint32_t rlcg_flag;
915 
916 	if (amdgpu_device_skip_hw_access(adev))
917 		return;
918 
919 	if ((reg * 4) < adev->rmmio_size) {
920 		if (amdgpu_sriov_vf(adev) &&
921 		    !amdgpu_sriov_runtime(adev) &&
922 		    adev->gfx.rlc.rlcg_reg_access_supported &&
923 		    amdgpu_virt_get_rlcg_reg_access_flag(adev, acc_flags,
924 							 GC_HWIP, true,
925 							 &rlcg_flag)) {
926 			amdgpu_virt_rlcg_reg_rw(adev, reg, v, rlcg_flag, GET_INST(GC, xcc_id));
927 		} else if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
928 		    amdgpu_sriov_runtime(adev) &&
929 		    down_read_trylock(&adev->reset_domain->sem)) {
930 			amdgpu_kiq_wreg(adev, reg, v, xcc_id);
931 			up_read(&adev->reset_domain->sem);
932 		} else {
933 			writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
934 		}
935 	} else {
936 		adev->pcie_wreg(adev, reg * 4, v);
937 	}
938 }
939 
940 /**
941  * amdgpu_device_indirect_rreg - read an indirect register
942  *
943  * @adev: amdgpu_device pointer
944  * @reg_addr: indirect register address to read from
945  *
946  * Returns the value of indirect register @reg_addr
947  */
amdgpu_device_indirect_rreg(struct amdgpu_device * adev,u32 reg_addr)948 u32 amdgpu_device_indirect_rreg(struct amdgpu_device *adev,
949 				u32 reg_addr)
950 {
951 	unsigned long flags, pcie_index, pcie_data;
952 	void __iomem *pcie_index_offset;
953 	void __iomem *pcie_data_offset;
954 	u32 r;
955 
956 	pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
957 	pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
958 
959 	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
960 	pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
961 	pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
962 
963 	writel(reg_addr, pcie_index_offset);
964 	readl(pcie_index_offset);
965 	r = readl(pcie_data_offset);
966 	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
967 
968 	return r;
969 }
970 
amdgpu_device_indirect_rreg_ext(struct amdgpu_device * adev,u64 reg_addr)971 u32 amdgpu_device_indirect_rreg_ext(struct amdgpu_device *adev,
972 				    u64 reg_addr)
973 {
974 	unsigned long flags, pcie_index, pcie_index_hi, pcie_data;
975 	u32 r;
976 	void __iomem *pcie_index_offset;
977 	void __iomem *pcie_index_hi_offset;
978 	void __iomem *pcie_data_offset;
979 
980 	if (unlikely(!adev->nbio.funcs)) {
981 		pcie_index = AMDGPU_PCIE_INDEX_FALLBACK;
982 		pcie_data = AMDGPU_PCIE_DATA_FALLBACK;
983 	} else {
984 		pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
985 		pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
986 	}
987 
988 	if (reg_addr >> 32) {
989 		if (unlikely(!adev->nbio.funcs))
990 			pcie_index_hi = AMDGPU_PCIE_INDEX_HI_FALLBACK;
991 		else
992 			pcie_index_hi = adev->nbio.funcs->get_pcie_index_hi_offset(adev);
993 	} else {
994 		pcie_index_hi = 0;
995 	}
996 
997 	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
998 	pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
999 	pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
1000 	if (pcie_index_hi != 0)
1001 		pcie_index_hi_offset = (void __iomem *)adev->rmmio +
1002 				pcie_index_hi * 4;
1003 
1004 	writel(reg_addr, pcie_index_offset);
1005 	readl(pcie_index_offset);
1006 	if (pcie_index_hi != 0) {
1007 		writel((reg_addr >> 32) & 0xff, pcie_index_hi_offset);
1008 		readl(pcie_index_hi_offset);
1009 	}
1010 	r = readl(pcie_data_offset);
1011 
1012 	/* clear the high bits */
1013 	if (pcie_index_hi != 0) {
1014 		writel(0, pcie_index_hi_offset);
1015 		readl(pcie_index_hi_offset);
1016 	}
1017 
1018 	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
1019 
1020 	return r;
1021 }
1022 
1023 /**
1024  * amdgpu_device_indirect_rreg64 - read a 64bits indirect register
1025  *
1026  * @adev: amdgpu_device pointer
1027  * @reg_addr: indirect register address to read from
1028  *
1029  * Returns the value of indirect register @reg_addr
1030  */
amdgpu_device_indirect_rreg64(struct amdgpu_device * adev,u32 reg_addr)1031 u64 amdgpu_device_indirect_rreg64(struct amdgpu_device *adev,
1032 				  u32 reg_addr)
1033 {
1034 	unsigned long flags, pcie_index, pcie_data;
1035 	void __iomem *pcie_index_offset;
1036 	void __iomem *pcie_data_offset;
1037 	u64 r;
1038 
1039 	pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
1040 	pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
1041 
1042 	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
1043 	pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
1044 	pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
1045 
1046 	/* read low 32 bits */
1047 	writel(reg_addr, pcie_index_offset);
1048 	readl(pcie_index_offset);
1049 	r = readl(pcie_data_offset);
1050 	/* read high 32 bits */
1051 	writel(reg_addr + 4, pcie_index_offset);
1052 	readl(pcie_index_offset);
1053 	r |= ((u64)readl(pcie_data_offset) << 32);
1054 	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
1055 
1056 	return r;
1057 }
1058 
amdgpu_device_indirect_rreg64_ext(struct amdgpu_device * adev,u64 reg_addr)1059 u64 amdgpu_device_indirect_rreg64_ext(struct amdgpu_device *adev,
1060 				  u64 reg_addr)
1061 {
1062 	unsigned long flags, pcie_index, pcie_data;
1063 	unsigned long pcie_index_hi = 0;
1064 	void __iomem *pcie_index_offset;
1065 	void __iomem *pcie_index_hi_offset;
1066 	void __iomem *pcie_data_offset;
1067 	u64 r;
1068 
1069 	pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
1070 	pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
1071 	if ((reg_addr >> 32) && (adev->nbio.funcs->get_pcie_index_hi_offset))
1072 		pcie_index_hi = adev->nbio.funcs->get_pcie_index_hi_offset(adev);
1073 
1074 	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
1075 	pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
1076 	pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
1077 	if (pcie_index_hi != 0)
1078 		pcie_index_hi_offset = (void __iomem *)adev->rmmio +
1079 			pcie_index_hi * 4;
1080 
1081 	/* read low 32 bits */
1082 	writel(reg_addr, pcie_index_offset);
1083 	readl(pcie_index_offset);
1084 	if (pcie_index_hi != 0) {
1085 		writel((reg_addr >> 32) & 0xff, pcie_index_hi_offset);
1086 		readl(pcie_index_hi_offset);
1087 	}
1088 	r = readl(pcie_data_offset);
1089 	/* read high 32 bits */
1090 	writel(reg_addr + 4, pcie_index_offset);
1091 	readl(pcie_index_offset);
1092 	if (pcie_index_hi != 0) {
1093 		writel((reg_addr >> 32) & 0xff, pcie_index_hi_offset);
1094 		readl(pcie_index_hi_offset);
1095 	}
1096 	r |= ((u64)readl(pcie_data_offset) << 32);
1097 
1098 	/* clear the high bits */
1099 	if (pcie_index_hi != 0) {
1100 		writel(0, pcie_index_hi_offset);
1101 		readl(pcie_index_hi_offset);
1102 	}
1103 
1104 	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
1105 
1106 	return r;
1107 }
1108 
1109 /**
1110  * amdgpu_device_indirect_wreg - write an indirect register address
1111  *
1112  * @adev: amdgpu_device pointer
1113  * @reg_addr: indirect register offset
1114  * @reg_data: indirect register data
1115  *
1116  */
amdgpu_device_indirect_wreg(struct amdgpu_device * adev,u32 reg_addr,u32 reg_data)1117 void amdgpu_device_indirect_wreg(struct amdgpu_device *adev,
1118 				 u32 reg_addr, u32 reg_data)
1119 {
1120 	unsigned long flags, pcie_index, pcie_data;
1121 	void __iomem *pcie_index_offset;
1122 	void __iomem *pcie_data_offset;
1123 
1124 	pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
1125 	pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
1126 
1127 	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
1128 	pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
1129 	pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
1130 
1131 	writel(reg_addr, pcie_index_offset);
1132 	readl(pcie_index_offset);
1133 	writel(reg_data, pcie_data_offset);
1134 	readl(pcie_data_offset);
1135 	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
1136 }
1137 
amdgpu_device_indirect_wreg_ext(struct amdgpu_device * adev,u64 reg_addr,u32 reg_data)1138 void amdgpu_device_indirect_wreg_ext(struct amdgpu_device *adev,
1139 				     u64 reg_addr, u32 reg_data)
1140 {
1141 	unsigned long flags, pcie_index, pcie_index_hi, pcie_data;
1142 	void __iomem *pcie_index_offset;
1143 	void __iomem *pcie_index_hi_offset;
1144 	void __iomem *pcie_data_offset;
1145 
1146 	pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
1147 	pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
1148 	if ((reg_addr >> 32) && (adev->nbio.funcs->get_pcie_index_hi_offset))
1149 		pcie_index_hi = adev->nbio.funcs->get_pcie_index_hi_offset(adev);
1150 	else
1151 		pcie_index_hi = 0;
1152 
1153 	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
1154 	pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
1155 	pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
1156 	if (pcie_index_hi != 0)
1157 		pcie_index_hi_offset = (void __iomem *)adev->rmmio +
1158 				pcie_index_hi * 4;
1159 
1160 	writel(reg_addr, pcie_index_offset);
1161 	readl(pcie_index_offset);
1162 	if (pcie_index_hi != 0) {
1163 		writel((reg_addr >> 32) & 0xff, pcie_index_hi_offset);
1164 		readl(pcie_index_hi_offset);
1165 	}
1166 	writel(reg_data, pcie_data_offset);
1167 	readl(pcie_data_offset);
1168 
1169 	/* clear the high bits */
1170 	if (pcie_index_hi != 0) {
1171 		writel(0, pcie_index_hi_offset);
1172 		readl(pcie_index_hi_offset);
1173 	}
1174 
1175 	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
1176 }
1177 
1178 /**
1179  * amdgpu_device_indirect_wreg64 - write a 64bits indirect register address
1180  *
1181  * @adev: amdgpu_device pointer
1182  * @reg_addr: indirect register offset
1183  * @reg_data: indirect register data
1184  *
1185  */
amdgpu_device_indirect_wreg64(struct amdgpu_device * adev,u32 reg_addr,u64 reg_data)1186 void amdgpu_device_indirect_wreg64(struct amdgpu_device *adev,
1187 				   u32 reg_addr, u64 reg_data)
1188 {
1189 	unsigned long flags, pcie_index, pcie_data;
1190 	void __iomem *pcie_index_offset;
1191 	void __iomem *pcie_data_offset;
1192 
1193 	pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
1194 	pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
1195 
1196 	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
1197 	pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
1198 	pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
1199 
1200 	/* write low 32 bits */
1201 	writel(reg_addr, pcie_index_offset);
1202 	readl(pcie_index_offset);
1203 	writel((u32)(reg_data & 0xffffffffULL), pcie_data_offset);
1204 	readl(pcie_data_offset);
1205 	/* write high 32 bits */
1206 	writel(reg_addr + 4, pcie_index_offset);
1207 	readl(pcie_index_offset);
1208 	writel((u32)(reg_data >> 32), pcie_data_offset);
1209 	readl(pcie_data_offset);
1210 	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
1211 }
1212 
amdgpu_device_indirect_wreg64_ext(struct amdgpu_device * adev,u64 reg_addr,u64 reg_data)1213 void amdgpu_device_indirect_wreg64_ext(struct amdgpu_device *adev,
1214 				   u64 reg_addr, u64 reg_data)
1215 {
1216 	unsigned long flags, pcie_index, pcie_data;
1217 	unsigned long pcie_index_hi = 0;
1218 	void __iomem *pcie_index_offset;
1219 	void __iomem *pcie_index_hi_offset;
1220 	void __iomem *pcie_data_offset;
1221 
1222 	pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
1223 	pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
1224 	if ((reg_addr >> 32) && (adev->nbio.funcs->get_pcie_index_hi_offset))
1225 		pcie_index_hi = adev->nbio.funcs->get_pcie_index_hi_offset(adev);
1226 
1227 	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
1228 	pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
1229 	pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
1230 	if (pcie_index_hi != 0)
1231 		pcie_index_hi_offset = (void __iomem *)adev->rmmio +
1232 				pcie_index_hi * 4;
1233 
1234 	/* write low 32 bits */
1235 	writel(reg_addr, pcie_index_offset);
1236 	readl(pcie_index_offset);
1237 	if (pcie_index_hi != 0) {
1238 		writel((reg_addr >> 32) & 0xff, pcie_index_hi_offset);
1239 		readl(pcie_index_hi_offset);
1240 	}
1241 	writel((u32)(reg_data & 0xffffffffULL), pcie_data_offset);
1242 	readl(pcie_data_offset);
1243 	/* write high 32 bits */
1244 	writel(reg_addr + 4, pcie_index_offset);
1245 	readl(pcie_index_offset);
1246 	if (pcie_index_hi != 0) {
1247 		writel((reg_addr >> 32) & 0xff, pcie_index_hi_offset);
1248 		readl(pcie_index_hi_offset);
1249 	}
1250 	writel((u32)(reg_data >> 32), pcie_data_offset);
1251 	readl(pcie_data_offset);
1252 
1253 	/* clear the high bits */
1254 	if (pcie_index_hi != 0) {
1255 		writel(0, pcie_index_hi_offset);
1256 		readl(pcie_index_hi_offset);
1257 	}
1258 
1259 	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
1260 }
1261 
1262 /**
1263  * amdgpu_device_get_rev_id - query device rev_id
1264  *
1265  * @adev: amdgpu_device pointer
1266  *
1267  * Return device rev_id
1268  */
amdgpu_device_get_rev_id(struct amdgpu_device * adev)1269 u32 amdgpu_device_get_rev_id(struct amdgpu_device *adev)
1270 {
1271 	return adev->nbio.funcs->get_rev_id(adev);
1272 }
1273 
1274 /**
1275  * amdgpu_invalid_rreg - dummy reg read function
1276  *
1277  * @adev: amdgpu_device pointer
1278  * @reg: offset of register
1279  *
1280  * Dummy register read function.  Used for register blocks
1281  * that certain asics don't have (all asics).
1282  * Returns the value in the register.
1283  */
amdgpu_invalid_rreg(struct amdgpu_device * adev,uint32_t reg)1284 static uint32_t amdgpu_invalid_rreg(struct amdgpu_device *adev, uint32_t reg)
1285 {
1286 	dev_err(adev->dev, "Invalid callback to read register 0x%04X\n", reg);
1287 	BUG();
1288 	return 0;
1289 }
1290 
amdgpu_invalid_rreg_ext(struct amdgpu_device * adev,uint64_t reg)1291 static uint32_t amdgpu_invalid_rreg_ext(struct amdgpu_device *adev, uint64_t reg)
1292 {
1293 	dev_err(adev->dev, "Invalid callback to read register 0x%llX\n", reg);
1294 	BUG();
1295 	return 0;
1296 }
1297 
1298 /**
1299  * amdgpu_invalid_wreg - dummy reg write function
1300  *
1301  * @adev: amdgpu_device pointer
1302  * @reg: offset of register
1303  * @v: value to write to the register
1304  *
1305  * Dummy register read function.  Used for register blocks
1306  * that certain asics don't have (all asics).
1307  */
amdgpu_invalid_wreg(struct amdgpu_device * adev,uint32_t reg,uint32_t v)1308 static void amdgpu_invalid_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
1309 {
1310 	dev_err(adev->dev,
1311 		"Invalid callback to write register 0x%04X with 0x%08X\n", reg,
1312 		v);
1313 	BUG();
1314 }
1315 
amdgpu_invalid_wreg_ext(struct amdgpu_device * adev,uint64_t reg,uint32_t v)1316 static void amdgpu_invalid_wreg_ext(struct amdgpu_device *adev, uint64_t reg, uint32_t v)
1317 {
1318 	dev_err(adev->dev,
1319 		"Invalid callback to write register 0x%llX with 0x%08X\n", reg,
1320 		v);
1321 	BUG();
1322 }
1323 
1324 /**
1325  * amdgpu_invalid_rreg64 - dummy 64 bit reg read function
1326  *
1327  * @adev: amdgpu_device pointer
1328  * @reg: offset of register
1329  *
1330  * Dummy register read function.  Used for register blocks
1331  * that certain asics don't have (all asics).
1332  * Returns the value in the register.
1333  */
amdgpu_invalid_rreg64(struct amdgpu_device * adev,uint32_t reg)1334 static uint64_t amdgpu_invalid_rreg64(struct amdgpu_device *adev, uint32_t reg)
1335 {
1336 	dev_err(adev->dev, "Invalid callback to read 64 bit register 0x%04X\n",
1337 		reg);
1338 	BUG();
1339 	return 0;
1340 }
1341 
amdgpu_invalid_rreg64_ext(struct amdgpu_device * adev,uint64_t reg)1342 static uint64_t amdgpu_invalid_rreg64_ext(struct amdgpu_device *adev, uint64_t reg)
1343 {
1344 	dev_err(adev->dev, "Invalid callback to read register 0x%llX\n", reg);
1345 	BUG();
1346 	return 0;
1347 }
1348 
1349 /**
1350  * amdgpu_invalid_wreg64 - dummy reg write function
1351  *
1352  * @adev: amdgpu_device pointer
1353  * @reg: offset of register
1354  * @v: value to write to the register
1355  *
1356  * Dummy register read function.  Used for register blocks
1357  * that certain asics don't have (all asics).
1358  */
amdgpu_invalid_wreg64(struct amdgpu_device * adev,uint32_t reg,uint64_t v)1359 static void amdgpu_invalid_wreg64(struct amdgpu_device *adev, uint32_t reg, uint64_t v)
1360 {
1361 	dev_err(adev->dev,
1362 		"Invalid callback to write 64 bit register 0x%04X with 0x%08llX\n",
1363 		reg, v);
1364 	BUG();
1365 }
1366 
amdgpu_invalid_wreg64_ext(struct amdgpu_device * adev,uint64_t reg,uint64_t v)1367 static void amdgpu_invalid_wreg64_ext(struct amdgpu_device *adev, uint64_t reg, uint64_t v)
1368 {
1369 	dev_err(adev->dev,
1370 		"Invalid callback to write 64 bit register 0x%llX with 0x%08llX\n",
1371 		reg, v);
1372 	BUG();
1373 }
1374 
1375 /**
1376  * amdgpu_block_invalid_rreg - dummy reg read function
1377  *
1378  * @adev: amdgpu_device pointer
1379  * @block: offset of instance
1380  * @reg: offset of register
1381  *
1382  * Dummy register read function.  Used for register blocks
1383  * that certain asics don't have (all asics).
1384  * Returns the value in the register.
1385  */
amdgpu_block_invalid_rreg(struct amdgpu_device * adev,uint32_t block,uint32_t reg)1386 static uint32_t amdgpu_block_invalid_rreg(struct amdgpu_device *adev,
1387 					  uint32_t block, uint32_t reg)
1388 {
1389 	dev_err(adev->dev,
1390 		"Invalid callback to read register 0x%04X in block 0x%04X\n",
1391 		reg, block);
1392 	BUG();
1393 	return 0;
1394 }
1395 
1396 /**
1397  * amdgpu_block_invalid_wreg - dummy reg write function
1398  *
1399  * @adev: amdgpu_device pointer
1400  * @block: offset of instance
1401  * @reg: offset of register
1402  * @v: value to write to the register
1403  *
1404  * Dummy register read function.  Used for register blocks
1405  * that certain asics don't have (all asics).
1406  */
amdgpu_block_invalid_wreg(struct amdgpu_device * adev,uint32_t block,uint32_t reg,uint32_t v)1407 static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev,
1408 				      uint32_t block,
1409 				      uint32_t reg, uint32_t v)
1410 {
1411 	dev_err(adev->dev,
1412 		"Invalid block callback to write register 0x%04X in block 0x%04X with 0x%08X\n",
1413 		reg, block, v);
1414 	BUG();
1415 }
1416 
amdgpu_device_get_vbios_flags(struct amdgpu_device * adev)1417 static uint32_t amdgpu_device_get_vbios_flags(struct amdgpu_device *adev)
1418 {
1419 	if (hweight32(adev->aid_mask) && (adev->flags & AMD_IS_APU))
1420 		return AMDGPU_VBIOS_SKIP;
1421 
1422 	if (hweight32(adev->aid_mask) && amdgpu_passthrough(adev))
1423 		return AMDGPU_VBIOS_OPTIONAL;
1424 
1425 	return 0;
1426 }
1427 
1428 /**
1429  * amdgpu_device_asic_init - Wrapper for atom asic_init
1430  *
1431  * @adev: amdgpu_device pointer
1432  *
1433  * Does any asic specific work and then calls atom asic init.
1434  */
amdgpu_device_asic_init(struct amdgpu_device * adev)1435 static int amdgpu_device_asic_init(struct amdgpu_device *adev)
1436 {
1437 	uint32_t flags;
1438 	bool optional;
1439 	int ret;
1440 
1441 	amdgpu_asic_pre_asic_init(adev);
1442 	flags = amdgpu_device_get_vbios_flags(adev);
1443 	optional = !!(flags & (AMDGPU_VBIOS_OPTIONAL | AMDGPU_VBIOS_SKIP));
1444 
1445 	if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) ||
1446 	    amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4) ||
1447 	    amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 5, 0) ||
1448 	    amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(11, 0, 0)) {
1449 		amdgpu_psp_wait_for_bootloader(adev);
1450 		if (optional && !adev->bios)
1451 			return 0;
1452 
1453 		ret = amdgpu_atomfirmware_asic_init(adev, true);
1454 		return ret;
1455 	} else {
1456 		if (optional && !adev->bios)
1457 			return 0;
1458 
1459 		return amdgpu_atom_asic_init(adev->mode_info.atom_context);
1460 	}
1461 
1462 	return 0;
1463 }
1464 
1465 /**
1466  * amdgpu_device_mem_scratch_init - allocate the VRAM scratch page
1467  *
1468  * @adev: amdgpu_device pointer
1469  *
1470  * Allocates a scratch page of VRAM for use by various things in the
1471  * driver.
1472  */
amdgpu_device_mem_scratch_init(struct amdgpu_device * adev)1473 static int amdgpu_device_mem_scratch_init(struct amdgpu_device *adev)
1474 {
1475 	return amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_SIZE, PAGE_SIZE,
1476 				       AMDGPU_GEM_DOMAIN_VRAM |
1477 				       AMDGPU_GEM_DOMAIN_GTT,
1478 				       &adev->mem_scratch.robj,
1479 				       &adev->mem_scratch.gpu_addr,
1480 				       (void **)&adev->mem_scratch.ptr);
1481 }
1482 
1483 /**
1484  * amdgpu_device_mem_scratch_fini - Free the VRAM scratch page
1485  *
1486  * @adev: amdgpu_device pointer
1487  *
1488  * Frees the VRAM scratch page.
1489  */
amdgpu_device_mem_scratch_fini(struct amdgpu_device * adev)1490 static void amdgpu_device_mem_scratch_fini(struct amdgpu_device *adev)
1491 {
1492 	amdgpu_bo_free_kernel(&adev->mem_scratch.robj, NULL, NULL);
1493 }
1494 
1495 /**
1496  * amdgpu_device_program_register_sequence - program an array of registers.
1497  *
1498  * @adev: amdgpu_device pointer
1499  * @registers: pointer to the register array
1500  * @array_size: size of the register array
1501  *
1502  * Programs an array or registers with and or masks.
1503  * This is a helper for setting golden registers.
1504  */
amdgpu_device_program_register_sequence(struct amdgpu_device * adev,const u32 * registers,const u32 array_size)1505 void amdgpu_device_program_register_sequence(struct amdgpu_device *adev,
1506 					     const u32 *registers,
1507 					     const u32 array_size)
1508 {
1509 	u32 tmp, reg, and_mask, or_mask;
1510 	int i;
1511 
1512 	if (array_size % 3)
1513 		return;
1514 
1515 	for (i = 0; i < array_size; i += 3) {
1516 		reg = registers[i + 0];
1517 		and_mask = registers[i + 1];
1518 		or_mask = registers[i + 2];
1519 
1520 		if (and_mask == 0xffffffff) {
1521 			tmp = or_mask;
1522 		} else {
1523 			tmp = RREG32(reg);
1524 			tmp &= ~and_mask;
1525 			if (adev->family >= AMDGPU_FAMILY_AI)
1526 				tmp |= (or_mask & and_mask);
1527 			else
1528 				tmp |= or_mask;
1529 		}
1530 		WREG32(reg, tmp);
1531 	}
1532 }
1533 
1534 /**
1535  * amdgpu_device_pci_config_reset - reset the GPU
1536  *
1537  * @adev: amdgpu_device pointer
1538  *
1539  * Resets the GPU using the pci config reset sequence.
1540  * Only applicable to asics prior to vega10.
1541  */
amdgpu_device_pci_config_reset(struct amdgpu_device * adev)1542 void amdgpu_device_pci_config_reset(struct amdgpu_device *adev)
1543 {
1544 	pci_write_config_dword(adev->pdev, 0x7c, AMDGPU_ASIC_RESET_DATA);
1545 }
1546 
1547 /**
1548  * amdgpu_device_pci_reset - reset the GPU using generic PCI means
1549  *
1550  * @adev: amdgpu_device pointer
1551  *
1552  * Resets the GPU using generic pci reset interfaces (FLR, SBR, etc.).
1553  */
amdgpu_device_pci_reset(struct amdgpu_device * adev)1554 int amdgpu_device_pci_reset(struct amdgpu_device *adev)
1555 {
1556 	return pci_reset_function(adev->pdev);
1557 }
1558 
1559 /*
1560  * amdgpu_device_wb_*()
1561  * Writeback is the method by which the GPU updates special pages in memory
1562  * with the status of certain GPU events (fences, ring pointers,etc.).
1563  */
1564 
1565 /**
1566  * amdgpu_device_wb_fini - Disable Writeback and free memory
1567  *
1568  * @adev: amdgpu_device pointer
1569  *
1570  * Disables Writeback and frees the Writeback memory (all asics).
1571  * Used at driver shutdown.
1572  */
amdgpu_device_wb_fini(struct amdgpu_device * adev)1573 static void amdgpu_device_wb_fini(struct amdgpu_device *adev)
1574 {
1575 	if (adev->wb.wb_obj) {
1576 		amdgpu_bo_free_kernel(&adev->wb.wb_obj,
1577 				      &adev->wb.gpu_addr,
1578 				      (void **)&adev->wb.wb);
1579 		adev->wb.wb_obj = NULL;
1580 	}
1581 }
1582 
1583 /**
1584  * amdgpu_device_wb_init - Init Writeback driver info and allocate memory
1585  *
1586  * @adev: amdgpu_device pointer
1587  *
1588  * Initializes writeback and allocates writeback memory (all asics).
1589  * Used at driver startup.
1590  * Returns 0 on success or an -error on failure.
1591  */
amdgpu_device_wb_init(struct amdgpu_device * adev)1592 static int amdgpu_device_wb_init(struct amdgpu_device *adev)
1593 {
1594 	int r;
1595 
1596 	if (adev->wb.wb_obj == NULL) {
1597 		/* AMDGPU_MAX_WB * sizeof(uint32_t) * 8 = AMDGPU_MAX_WB 256bit slots */
1598 		r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * sizeof(uint32_t) * 8,
1599 					    PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
1600 					    &adev->wb.wb_obj, &adev->wb.gpu_addr,
1601 					    (void **)&adev->wb.wb);
1602 		if (r) {
1603 			dev_warn(adev->dev, "(%d) create WB bo failed\n", r);
1604 			return r;
1605 		}
1606 
1607 		adev->wb.num_wb = AMDGPU_MAX_WB;
1608 		memset(&adev->wb.used, 0, sizeof(adev->wb.used));
1609 
1610 		/* clear wb memory */
1611 		memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t) * 8);
1612 	}
1613 
1614 	return 0;
1615 }
1616 
1617 /**
1618  * amdgpu_device_wb_get - Allocate a wb entry
1619  *
1620  * @adev: amdgpu_device pointer
1621  * @wb: wb index
1622  *
1623  * Allocate a wb slot for use by the driver (all asics).
1624  * Returns 0 on success or -EINVAL on failure.
1625  */
amdgpu_device_wb_get(struct amdgpu_device * adev,u32 * wb)1626 int amdgpu_device_wb_get(struct amdgpu_device *adev, u32 *wb)
1627 {
1628 	unsigned long flags, offset;
1629 
1630 	spin_lock_irqsave(&adev->wb.lock, flags);
1631 	offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb);
1632 	if (offset < adev->wb.num_wb) {
1633 		__set_bit(offset, adev->wb.used);
1634 		spin_unlock_irqrestore(&adev->wb.lock, flags);
1635 		*wb = offset << 3; /* convert to dw offset */
1636 		return 0;
1637 	} else {
1638 		spin_unlock_irqrestore(&adev->wb.lock, flags);
1639 		return -EINVAL;
1640 	}
1641 }
1642 
1643 /**
1644  * amdgpu_device_wb_free - Free a wb entry
1645  *
1646  * @adev: amdgpu_device pointer
1647  * @wb: wb index
1648  *
1649  * Free a wb slot allocated for use by the driver (all asics)
1650  */
amdgpu_device_wb_free(struct amdgpu_device * adev,u32 wb)1651 void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb)
1652 {
1653 	unsigned long flags;
1654 
1655 	wb >>= 3;
1656 	spin_lock_irqsave(&adev->wb.lock, flags);
1657 	if (wb < adev->wb.num_wb)
1658 		__clear_bit(wb, adev->wb.used);
1659 	spin_unlock_irqrestore(&adev->wb.lock, flags);
1660 }
1661 
1662 /**
1663  * amdgpu_device_resize_fb_bar - try to resize FB BAR
1664  *
1665  * @adev: amdgpu_device pointer
1666  *
1667  * Try to resize FB BAR to make all VRAM CPU accessible. We try very hard not
1668  * to fail, but if any of the BARs is not accessible after the size we abort
1669  * driver loading by returning -ENODEV.
1670  */
amdgpu_device_resize_fb_bar(struct amdgpu_device * adev)1671 int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
1672 {
1673 	int rbar_size = pci_rebar_bytes_to_size(adev->gmc.real_vram_size);
1674 	struct pci_bus *root;
1675 	struct resource *res;
1676 	unsigned int i;
1677 	u16 cmd;
1678 	int r;
1679 
1680 	if (!IS_ENABLED(CONFIG_PHYS_ADDR_T_64BIT))
1681 		return 0;
1682 
1683 	/* Bypass for VF */
1684 	if (amdgpu_sriov_vf(adev))
1685 		return 0;
1686 
1687 	if (!amdgpu_rebar)
1688 		return 0;
1689 
1690 	/* resizing on Dell G5 SE platforms causes problems with runtime pm */
1691 	if ((amdgpu_runtime_pm != 0) &&
1692 	    adev->pdev->vendor == PCI_VENDOR_ID_ATI &&
1693 	    adev->pdev->device == 0x731f &&
1694 	    adev->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
1695 		return 0;
1696 
1697 	/* PCI_EXT_CAP_ID_VNDR extended capability is located at 0x100 */
1698 	if (!pci_find_ext_capability(adev->pdev, PCI_EXT_CAP_ID_VNDR))
1699 		dev_warn(
1700 			adev->dev,
1701 			"System can't access extended configuration space, please check!!\n");
1702 
1703 	/* skip if the bios has already enabled large BAR */
1704 	if (adev->gmc.real_vram_size &&
1705 	    (pci_resource_len(adev->pdev, 0) >= adev->gmc.real_vram_size))
1706 		return 0;
1707 
1708 	/* Check if the root BUS has 64bit memory resources */
1709 	root = adev->pdev->bus;
1710 	while (root->parent)
1711 		root = root->parent;
1712 
1713 	pci_bus_for_each_resource(root, res, i) {
1714 		if (res && res->flags & (IORESOURCE_MEM | IORESOURCE_MEM_64) &&
1715 		    res->start > 0x100000000ull)
1716 			break;
1717 	}
1718 
1719 	/* Trying to resize is pointless without a root hub window above 4GB */
1720 	if (!res)
1721 		return 0;
1722 
1723 	/* Limit the BAR size to what is available */
1724 	rbar_size = min(fls(pci_rebar_get_possible_sizes(adev->pdev, 0)) - 1,
1725 			rbar_size);
1726 
1727 	/* Disable memory decoding while we change the BAR addresses and size */
1728 	pci_read_config_word(adev->pdev, PCI_COMMAND, &cmd);
1729 	pci_write_config_word(adev->pdev, PCI_COMMAND,
1730 			      cmd & ~PCI_COMMAND_MEMORY);
1731 
1732 	/* Free the VRAM and doorbell BAR, we most likely need to move both. */
1733 	amdgpu_doorbell_fini(adev);
1734 	if (adev->asic_type >= CHIP_BONAIRE)
1735 		pci_release_resource(adev->pdev, 2);
1736 
1737 	pci_release_resource(adev->pdev, 0);
1738 
1739 	r = pci_resize_resource(adev->pdev, 0, rbar_size);
1740 	if (r == -ENOSPC)
1741 		dev_info(adev->dev,
1742 			 "Not enough PCI address space for a large BAR.");
1743 	else if (r && r != -ENOTSUPP)
1744 		dev_err(adev->dev, "Problem resizing BAR0 (%d).", r);
1745 
1746 	pci_assign_unassigned_bus_resources(adev->pdev->bus);
1747 
1748 	/* When the doorbell or fb BAR isn't available we have no chance of
1749 	 * using the device.
1750 	 */
1751 	r = amdgpu_doorbell_init(adev);
1752 	if (r || (pci_resource_flags(adev->pdev, 0) & IORESOURCE_UNSET))
1753 		return -ENODEV;
1754 
1755 	pci_write_config_word(adev->pdev, PCI_COMMAND, cmd);
1756 
1757 	return 0;
1758 }
1759 
1760 /*
1761  * GPU helpers function.
1762  */
1763 /**
1764  * amdgpu_device_need_post - check if the hw need post or not
1765  *
1766  * @adev: amdgpu_device pointer
1767  *
1768  * Check if the asic has been initialized (all asics) at driver startup
1769  * or post is needed if  hw reset is performed.
1770  * Returns true if need or false if not.
1771  */
amdgpu_device_need_post(struct amdgpu_device * adev)1772 bool amdgpu_device_need_post(struct amdgpu_device *adev)
1773 {
1774 	uint32_t reg, flags;
1775 
1776 	if (amdgpu_sriov_vf(adev))
1777 		return false;
1778 
1779 	flags = amdgpu_device_get_vbios_flags(adev);
1780 	if (flags & AMDGPU_VBIOS_SKIP)
1781 		return false;
1782 	if ((flags & AMDGPU_VBIOS_OPTIONAL) && !adev->bios)
1783 		return false;
1784 
1785 	if (amdgpu_passthrough(adev)) {
1786 		/* for FIJI: In whole GPU pass-through virtualization case, after VM reboot
1787 		 * some old smc fw still need driver do vPost otherwise gpu hang, while
1788 		 * those smc fw version above 22.15 doesn't have this flaw, so we force
1789 		 * vpost executed for smc version below 22.15
1790 		 */
1791 		if (adev->asic_type == CHIP_FIJI) {
1792 			int err;
1793 			uint32_t fw_ver;
1794 
1795 			err = request_firmware(&adev->pm.fw, "amdgpu/fiji_smc.bin", adev->dev);
1796 			/* force vPost if error occurred */
1797 			if (err)
1798 				return true;
1799 
1800 			fw_ver = *((uint32_t *)adev->pm.fw->data + 69);
1801 			release_firmware(adev->pm.fw);
1802 			if (fw_ver < 0x00160e00)
1803 				return true;
1804 		}
1805 	}
1806 
1807 	/* Don't post if we need to reset whole hive on init */
1808 	if (adev->init_lvl->level == AMDGPU_INIT_LEVEL_MINIMAL_XGMI)
1809 		return false;
1810 
1811 	if (adev->has_hw_reset) {
1812 		adev->has_hw_reset = false;
1813 		return true;
1814 	}
1815 
1816 	/* bios scratch used on CIK+ */
1817 	if (adev->asic_type >= CHIP_BONAIRE)
1818 		return amdgpu_atombios_scratch_need_asic_init(adev);
1819 
1820 	/* check MEM_SIZE for older asics */
1821 	reg = amdgpu_asic_get_config_memsize(adev);
1822 
1823 	if ((reg != 0) && (reg != 0xffffffff))
1824 		return false;
1825 
1826 	return true;
1827 }
1828 
1829 /*
1830  * Check whether seamless boot is supported.
1831  *
1832  * So far we only support seamless boot on DCE 3.0 or later.
1833  * If users report that it works on older ASICS as well, we may
1834  * loosen this.
1835  */
amdgpu_device_seamless_boot_supported(struct amdgpu_device * adev)1836 bool amdgpu_device_seamless_boot_supported(struct amdgpu_device *adev)
1837 {
1838 	switch (amdgpu_seamless) {
1839 	case -1:
1840 		break;
1841 	case 1:
1842 		return true;
1843 	case 0:
1844 		return false;
1845 	default:
1846 		dev_err(adev->dev, "Invalid value for amdgpu.seamless: %d\n",
1847 			amdgpu_seamless);
1848 		return false;
1849 	}
1850 
1851 	if (!(adev->flags & AMD_IS_APU))
1852 		return false;
1853 
1854 	if (adev->mman.keep_stolen_vga_memory)
1855 		return false;
1856 
1857 	return amdgpu_ip_version(adev, DCE_HWIP, 0) >= IP_VERSION(3, 0, 0);
1858 }
1859 
1860 /*
1861  * Intel hosts such as Rocket Lake, Alder Lake, Raptor Lake and Sapphire Rapids
1862  * don't support dynamic speed switching. Until we have confirmation from Intel
1863  * that a specific host supports it, it's safer that we keep it disabled for all.
1864  *
1865  * https://edc.intel.com/content/www/us/en/design/products/platforms/details/raptor-lake-s/13th-generation-core-processors-datasheet-volume-1-of-2/005/pci-express-support/
1866  * https://gitlab.freedesktop.org/drm/amd/-/issues/2663
1867  */
amdgpu_device_pcie_dynamic_switching_supported(struct amdgpu_device * adev)1868 static bool amdgpu_device_pcie_dynamic_switching_supported(struct amdgpu_device *adev)
1869 {
1870 #if IS_ENABLED(CONFIG_X86)
1871 	struct cpuinfo_x86 *c = &cpu_data(0);
1872 
1873 	/* eGPU change speeds based on USB4 fabric conditions */
1874 	if (dev_is_removable(adev->dev))
1875 		return true;
1876 
1877 	if (c->x86_vendor == X86_VENDOR_INTEL)
1878 		return false;
1879 #endif
1880 	return true;
1881 }
1882 
amdgpu_device_aspm_support_quirk(struct amdgpu_device * adev)1883 static bool amdgpu_device_aspm_support_quirk(struct amdgpu_device *adev)
1884 {
1885 	/* Enabling ASPM causes randoms hangs on Tahiti and Oland on Zen4.
1886 	 * It's unclear if this is a platform-specific or GPU-specific issue.
1887 	 * Disable ASPM on SI for the time being.
1888 	 */
1889 	if (adev->family == AMDGPU_FAMILY_SI)
1890 		return true;
1891 
1892 #if IS_ENABLED(CONFIG_X86)
1893 	struct cpuinfo_x86 *c = &cpu_data(0);
1894 
1895 	if (!(amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(12, 0, 0) ||
1896 		  amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(12, 0, 1)))
1897 		return false;
1898 
1899 	if (c->x86 == 6 &&
1900 		adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN5) {
1901 		switch (c->x86_model) {
1902 		case VFM_MODEL(INTEL_ALDERLAKE):
1903 		case VFM_MODEL(INTEL_ALDERLAKE_L):
1904 		case VFM_MODEL(INTEL_RAPTORLAKE):
1905 		case VFM_MODEL(INTEL_RAPTORLAKE_P):
1906 		case VFM_MODEL(INTEL_RAPTORLAKE_S):
1907 			return true;
1908 		default:
1909 			return false;
1910 		}
1911 	} else {
1912 		return false;
1913 	}
1914 #else
1915 	return false;
1916 #endif
1917 }
1918 
1919 /**
1920  * amdgpu_device_should_use_aspm - check if the device should program ASPM
1921  *
1922  * @adev: amdgpu_device pointer
1923  *
1924  * Confirm whether the module parameter and pcie bridge agree that ASPM should
1925  * be set for this device.
1926  *
1927  * Returns true if it should be used or false if not.
1928  */
amdgpu_device_should_use_aspm(struct amdgpu_device * adev)1929 bool amdgpu_device_should_use_aspm(struct amdgpu_device *adev)
1930 {
1931 	switch (amdgpu_aspm) {
1932 	case -1:
1933 		break;
1934 	case 0:
1935 		return false;
1936 	case 1:
1937 		return true;
1938 	default:
1939 		return false;
1940 	}
1941 	if (adev->flags & AMD_IS_APU)
1942 		return false;
1943 	if (amdgpu_device_aspm_support_quirk(adev))
1944 		return false;
1945 	return pcie_aspm_enabled(adev->pdev);
1946 }
1947 
1948 /* if we get transitioned to only one device, take VGA back */
1949 /**
1950  * amdgpu_device_vga_set_decode - enable/disable vga decode
1951  *
1952  * @pdev: PCI device pointer
1953  * @state: enable/disable vga decode
1954  *
1955  * Enable/disable vga decode (all asics).
1956  * Returns VGA resource flags.
1957  */
amdgpu_device_vga_set_decode(struct pci_dev * pdev,bool state)1958 static unsigned int amdgpu_device_vga_set_decode(struct pci_dev *pdev,
1959 		bool state)
1960 {
1961 	struct amdgpu_device *adev = drm_to_adev(pci_get_drvdata(pdev));
1962 
1963 	amdgpu_asic_set_vga_state(adev, state);
1964 	if (state)
1965 		return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
1966 		       VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1967 	else
1968 		return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1969 }
1970 
1971 /**
1972  * amdgpu_device_check_block_size - validate the vm block size
1973  *
1974  * @adev: amdgpu_device pointer
1975  *
1976  * Validates the vm block size specified via module parameter.
1977  * The vm block size defines number of bits in page table versus page directory,
1978  * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1979  * page table and the remaining bits are in the page directory.
1980  */
amdgpu_device_check_block_size(struct amdgpu_device * adev)1981 static void amdgpu_device_check_block_size(struct amdgpu_device *adev)
1982 {
1983 	/* defines number of bits in page table versus page directory,
1984 	 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1985 	 * page table and the remaining bits are in the page directory
1986 	 */
1987 	if (amdgpu_vm_block_size == -1)
1988 		return;
1989 
1990 	if (amdgpu_vm_block_size < 9) {
1991 		dev_warn(adev->dev, "VM page table size (%d) too small\n",
1992 			 amdgpu_vm_block_size);
1993 		amdgpu_vm_block_size = -1;
1994 	}
1995 }
1996 
1997 /**
1998  * amdgpu_device_check_vm_size - validate the vm size
1999  *
2000  * @adev: amdgpu_device pointer
2001  *
2002  * Validates the vm size in GB specified via module parameter.
2003  * The VM size is the size of the GPU virtual memory space in GB.
2004  */
amdgpu_device_check_vm_size(struct amdgpu_device * adev)2005 static void amdgpu_device_check_vm_size(struct amdgpu_device *adev)
2006 {
2007 	/* no need to check the default value */
2008 	if (amdgpu_vm_size == -1)
2009 		return;
2010 
2011 	if (amdgpu_vm_size < 1) {
2012 		dev_warn(adev->dev, "VM size (%d) too small, min is 1GB\n",
2013 			 amdgpu_vm_size);
2014 		amdgpu_vm_size = -1;
2015 	}
2016 }
2017 
amdgpu_device_check_smu_prv_buffer_size(struct amdgpu_device * adev)2018 static void amdgpu_device_check_smu_prv_buffer_size(struct amdgpu_device *adev)
2019 {
2020 	struct sysinfo si;
2021 	bool is_os_64 = (sizeof(void *) == 8);
2022 	uint64_t total_memory;
2023 	uint64_t dram_size_seven_GB = 0x1B8000000;
2024 	uint64_t dram_size_three_GB = 0xB8000000;
2025 
2026 	if (amdgpu_smu_memory_pool_size == 0)
2027 		return;
2028 
2029 	if (!is_os_64) {
2030 		dev_warn(adev->dev, "Not 64-bit OS, feature not supported\n");
2031 		goto def_value;
2032 	}
2033 	si_meminfo(&si);
2034 	total_memory = (uint64_t)si.totalram * si.mem_unit;
2035 
2036 	if ((amdgpu_smu_memory_pool_size == 1) ||
2037 		(amdgpu_smu_memory_pool_size == 2)) {
2038 		if (total_memory < dram_size_three_GB)
2039 			goto def_value1;
2040 	} else if ((amdgpu_smu_memory_pool_size == 4) ||
2041 		(amdgpu_smu_memory_pool_size == 8)) {
2042 		if (total_memory < dram_size_seven_GB)
2043 			goto def_value1;
2044 	} else {
2045 		dev_warn(adev->dev, "Smu memory pool size not supported\n");
2046 		goto def_value;
2047 	}
2048 	adev->pm.smu_prv_buffer_size = amdgpu_smu_memory_pool_size << 28;
2049 
2050 	return;
2051 
2052 def_value1:
2053 	dev_warn(adev->dev, "No enough system memory\n");
2054 def_value:
2055 	adev->pm.smu_prv_buffer_size = 0;
2056 }
2057 
amdgpu_device_init_apu_flags(struct amdgpu_device * adev)2058 static int amdgpu_device_init_apu_flags(struct amdgpu_device *adev)
2059 {
2060 	if (!(adev->flags & AMD_IS_APU) ||
2061 	    adev->asic_type < CHIP_RAVEN)
2062 		return 0;
2063 
2064 	switch (adev->asic_type) {
2065 	case CHIP_RAVEN:
2066 		if (adev->pdev->device == 0x15dd)
2067 			adev->apu_flags |= AMD_APU_IS_RAVEN;
2068 		if (adev->pdev->device == 0x15d8)
2069 			adev->apu_flags |= AMD_APU_IS_PICASSO;
2070 		break;
2071 	case CHIP_RENOIR:
2072 		if ((adev->pdev->device == 0x1636) ||
2073 		    (adev->pdev->device == 0x164c))
2074 			adev->apu_flags |= AMD_APU_IS_RENOIR;
2075 		else
2076 			adev->apu_flags |= AMD_APU_IS_GREEN_SARDINE;
2077 		break;
2078 	case CHIP_VANGOGH:
2079 		adev->apu_flags |= AMD_APU_IS_VANGOGH;
2080 		break;
2081 	case CHIP_YELLOW_CARP:
2082 		break;
2083 	case CHIP_CYAN_SKILLFISH:
2084 		if ((adev->pdev->device == 0x13FE) ||
2085 		    (adev->pdev->device == 0x143F))
2086 			adev->apu_flags |= AMD_APU_IS_CYAN_SKILLFISH2;
2087 		break;
2088 	default:
2089 		break;
2090 	}
2091 
2092 	return 0;
2093 }
2094 
2095 /**
2096  * amdgpu_device_check_arguments - validate module params
2097  *
2098  * @adev: amdgpu_device pointer
2099  *
2100  * Validates certain module parameters and updates
2101  * the associated values used by the driver (all asics).
2102  */
amdgpu_device_check_arguments(struct amdgpu_device * adev)2103 static int amdgpu_device_check_arguments(struct amdgpu_device *adev)
2104 {
2105 	int i;
2106 
2107 	if (amdgpu_sched_jobs < 4) {
2108 		dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n",
2109 			 amdgpu_sched_jobs);
2110 		amdgpu_sched_jobs = 4;
2111 	} else if (!is_power_of_2(amdgpu_sched_jobs)) {
2112 		dev_warn(adev->dev, "sched jobs (%d) must be a power of 2\n",
2113 			 amdgpu_sched_jobs);
2114 		amdgpu_sched_jobs = roundup_pow_of_two(amdgpu_sched_jobs);
2115 	}
2116 
2117 	if (amdgpu_gart_size != -1 && amdgpu_gart_size < 32) {
2118 		/* gart size must be greater or equal to 32M */
2119 		dev_warn(adev->dev, "gart size (%d) too small\n",
2120 			 amdgpu_gart_size);
2121 		amdgpu_gart_size = -1;
2122 	}
2123 
2124 	if (amdgpu_gtt_size != -1 && amdgpu_gtt_size < 32) {
2125 		/* gtt size must be greater or equal to 32M */
2126 		dev_warn(adev->dev, "gtt size (%d) too small\n",
2127 				 amdgpu_gtt_size);
2128 		amdgpu_gtt_size = -1;
2129 	}
2130 
2131 	/* valid range is between 4 and 9 inclusive */
2132 	if (amdgpu_vm_fragment_size != -1 &&
2133 	    (amdgpu_vm_fragment_size > 9 || amdgpu_vm_fragment_size < 4)) {
2134 		dev_warn(adev->dev, "valid range is between 4 and 9\n");
2135 		amdgpu_vm_fragment_size = -1;
2136 	}
2137 
2138 	if (amdgpu_sched_hw_submission < 2) {
2139 		dev_warn(adev->dev, "sched hw submission jobs (%d) must be at least 2\n",
2140 			 amdgpu_sched_hw_submission);
2141 		amdgpu_sched_hw_submission = 2;
2142 	} else if (!is_power_of_2(amdgpu_sched_hw_submission)) {
2143 		dev_warn(adev->dev, "sched hw submission jobs (%d) must be a power of 2\n",
2144 			 amdgpu_sched_hw_submission);
2145 		amdgpu_sched_hw_submission = roundup_pow_of_two(amdgpu_sched_hw_submission);
2146 	}
2147 
2148 	if (amdgpu_reset_method < -1 || amdgpu_reset_method > 4) {
2149 		dev_warn(adev->dev, "invalid option for reset method, reverting to default\n");
2150 		amdgpu_reset_method = -1;
2151 	}
2152 
2153 	amdgpu_device_check_smu_prv_buffer_size(adev);
2154 
2155 	amdgpu_device_check_vm_size(adev);
2156 
2157 	amdgpu_device_check_block_size(adev);
2158 
2159 	adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type);
2160 
2161 	for (i = 0; i < MAX_XCP; i++) {
2162 		switch (amdgpu_enforce_isolation) {
2163 		case -1:
2164 		case 0:
2165 		default:
2166 			/* disable */
2167 			adev->enforce_isolation[i] = AMDGPU_ENFORCE_ISOLATION_DISABLE;
2168 			break;
2169 		case 1:
2170 			/* enable */
2171 			adev->enforce_isolation[i] =
2172 				AMDGPU_ENFORCE_ISOLATION_ENABLE;
2173 			break;
2174 		case 2:
2175 			/* enable legacy mode */
2176 			adev->enforce_isolation[i] =
2177 				AMDGPU_ENFORCE_ISOLATION_ENABLE_LEGACY;
2178 			break;
2179 		case 3:
2180 			/* enable only process isolation without submitting cleaner shader */
2181 			adev->enforce_isolation[i] =
2182 				AMDGPU_ENFORCE_ISOLATION_NO_CLEANER_SHADER;
2183 			break;
2184 		}
2185 	}
2186 
2187 	return 0;
2188 }
2189 
2190 /**
2191  * amdgpu_switcheroo_set_state - set switcheroo state
2192  *
2193  * @pdev: pci dev pointer
2194  * @state: vga_switcheroo state
2195  *
2196  * Callback for the switcheroo driver.  Suspends or resumes
2197  * the asics before or after it is powered up using ACPI methods.
2198  */
amdgpu_switcheroo_set_state(struct pci_dev * pdev,enum vga_switcheroo_state state)2199 static void amdgpu_switcheroo_set_state(struct pci_dev *pdev,
2200 					enum vga_switcheroo_state state)
2201 {
2202 	struct drm_device *dev = pci_get_drvdata(pdev);
2203 	int r;
2204 
2205 	if (amdgpu_device_supports_px(drm_to_adev(dev)) &&
2206 	    state == VGA_SWITCHEROO_OFF)
2207 		return;
2208 
2209 	if (state == VGA_SWITCHEROO_ON) {
2210 		pr_info("switched on\n");
2211 		/* don't suspend or resume card normally */
2212 		dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
2213 
2214 		pci_set_power_state(pdev, PCI_D0);
2215 		amdgpu_device_load_pci_state(pdev);
2216 		r = pci_enable_device(pdev);
2217 		if (r)
2218 			dev_warn(&pdev->dev, "pci_enable_device failed (%d)\n",
2219 				 r);
2220 		amdgpu_device_resume(dev, true);
2221 
2222 		dev->switch_power_state = DRM_SWITCH_POWER_ON;
2223 	} else {
2224 		dev_info(&pdev->dev, "switched off\n");
2225 		dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
2226 		amdgpu_device_prepare(dev);
2227 		amdgpu_device_suspend(dev, true);
2228 		amdgpu_device_cache_pci_state(pdev);
2229 		/* Shut down the device */
2230 		pci_disable_device(pdev);
2231 		pci_set_power_state(pdev, PCI_D3cold);
2232 		dev->switch_power_state = DRM_SWITCH_POWER_OFF;
2233 	}
2234 }
2235 
2236 /**
2237  * amdgpu_switcheroo_can_switch - see if switcheroo state can change
2238  *
2239  * @pdev: pci dev pointer
2240  *
2241  * Callback for the switcheroo driver.  Check of the switcheroo
2242  * state can be changed.
2243  * Returns true if the state can be changed, false if not.
2244  */
amdgpu_switcheroo_can_switch(struct pci_dev * pdev)2245 static bool amdgpu_switcheroo_can_switch(struct pci_dev *pdev)
2246 {
2247 	struct drm_device *dev = pci_get_drvdata(pdev);
2248 
2249        /*
2250 	* FIXME: open_count is protected by drm_global_mutex but that would lead to
2251 	* locking inversion with the driver load path. And the access here is
2252 	* completely racy anyway. So don't bother with locking for now.
2253 	*/
2254 	return atomic_read(&dev->open_count) == 0;
2255 }
2256 
2257 static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = {
2258 	.set_gpu_state = amdgpu_switcheroo_set_state,
2259 	.reprobe = NULL,
2260 	.can_switch = amdgpu_switcheroo_can_switch,
2261 };
2262 
2263 /**
2264  * amdgpu_device_ip_set_clockgating_state - set the CG state
2265  *
2266  * @dev: amdgpu_device pointer
2267  * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
2268  * @state: clockgating state (gate or ungate)
2269  *
2270  * Sets the requested clockgating state for all instances of
2271  * the hardware IP specified.
2272  * Returns the error code from the last instance.
2273  */
amdgpu_device_ip_set_clockgating_state(void * dev,enum amd_ip_block_type block_type,enum amd_clockgating_state state)2274 int amdgpu_device_ip_set_clockgating_state(void *dev,
2275 					   enum amd_ip_block_type block_type,
2276 					   enum amd_clockgating_state state)
2277 {
2278 	struct amdgpu_device *adev = dev;
2279 	int i, r = 0;
2280 
2281 	for (i = 0; i < adev->num_ip_blocks; i++) {
2282 		if (!adev->ip_blocks[i].status.valid)
2283 			continue;
2284 		if (adev->ip_blocks[i].version->type != block_type)
2285 			continue;
2286 		if (!adev->ip_blocks[i].version->funcs->set_clockgating_state)
2287 			continue;
2288 		r = adev->ip_blocks[i].version->funcs->set_clockgating_state(
2289 			&adev->ip_blocks[i], state);
2290 		if (r)
2291 			dev_err(adev->dev,
2292 				"set_clockgating_state of IP block <%s> failed %d\n",
2293 				adev->ip_blocks[i].version->funcs->name, r);
2294 	}
2295 	return r;
2296 }
2297 
2298 /**
2299  * amdgpu_device_ip_set_powergating_state - set the PG state
2300  *
2301  * @dev: amdgpu_device pointer
2302  * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
2303  * @state: powergating state (gate or ungate)
2304  *
2305  * Sets the requested powergating state for all instances of
2306  * the hardware IP specified.
2307  * Returns the error code from the last instance.
2308  */
amdgpu_device_ip_set_powergating_state(void * dev,enum amd_ip_block_type block_type,enum amd_powergating_state state)2309 int amdgpu_device_ip_set_powergating_state(void *dev,
2310 					   enum amd_ip_block_type block_type,
2311 					   enum amd_powergating_state state)
2312 {
2313 	struct amdgpu_device *adev = dev;
2314 	int i, r = 0;
2315 
2316 	for (i = 0; i < adev->num_ip_blocks; i++) {
2317 		if (!adev->ip_blocks[i].status.valid)
2318 			continue;
2319 		if (adev->ip_blocks[i].version->type != block_type)
2320 			continue;
2321 		if (!adev->ip_blocks[i].version->funcs->set_powergating_state)
2322 			continue;
2323 		r = adev->ip_blocks[i].version->funcs->set_powergating_state(
2324 			&adev->ip_blocks[i], state);
2325 		if (r)
2326 			dev_err(adev->dev,
2327 				"set_powergating_state of IP block <%s> failed %d\n",
2328 				adev->ip_blocks[i].version->funcs->name, r);
2329 	}
2330 	return r;
2331 }
2332 
2333 /**
2334  * amdgpu_device_ip_get_clockgating_state - get the CG state
2335  *
2336  * @adev: amdgpu_device pointer
2337  * @flags: clockgating feature flags
2338  *
2339  * Walks the list of IPs on the device and updates the clockgating
2340  * flags for each IP.
2341  * Updates @flags with the feature flags for each hardware IP where
2342  * clockgating is enabled.
2343  */
amdgpu_device_ip_get_clockgating_state(struct amdgpu_device * adev,u64 * flags)2344 void amdgpu_device_ip_get_clockgating_state(struct amdgpu_device *adev,
2345 					    u64 *flags)
2346 {
2347 	int i;
2348 
2349 	for (i = 0; i < adev->num_ip_blocks; i++) {
2350 		if (!adev->ip_blocks[i].status.valid)
2351 			continue;
2352 		if (adev->ip_blocks[i].version->funcs->get_clockgating_state)
2353 			adev->ip_blocks[i].version->funcs->get_clockgating_state(
2354 				&adev->ip_blocks[i], flags);
2355 	}
2356 }
2357 
2358 /**
2359  * amdgpu_device_ip_wait_for_idle - wait for idle
2360  *
2361  * @adev: amdgpu_device pointer
2362  * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
2363  *
2364  * Waits for the request hardware IP to be idle.
2365  * Returns 0 for success or a negative error code on failure.
2366  */
amdgpu_device_ip_wait_for_idle(struct amdgpu_device * adev,enum amd_ip_block_type block_type)2367 int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev,
2368 				   enum amd_ip_block_type block_type)
2369 {
2370 	int i, r;
2371 
2372 	for (i = 0; i < adev->num_ip_blocks; i++) {
2373 		if (!adev->ip_blocks[i].status.valid)
2374 			continue;
2375 		if (adev->ip_blocks[i].version->type == block_type) {
2376 			if (adev->ip_blocks[i].version->funcs->wait_for_idle) {
2377 				r = adev->ip_blocks[i].version->funcs->wait_for_idle(
2378 								&adev->ip_blocks[i]);
2379 				if (r)
2380 					return r;
2381 			}
2382 			break;
2383 		}
2384 	}
2385 	return 0;
2386 
2387 }
2388 
2389 /**
2390  * amdgpu_device_ip_is_valid - is the hardware IP enabled
2391  *
2392  * @adev: amdgpu_device pointer
2393  * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
2394  *
2395  * Check if the hardware IP is enable or not.
2396  * Returns true if it the IP is enable, false if not.
2397  */
amdgpu_device_ip_is_valid(struct amdgpu_device * adev,enum amd_ip_block_type block_type)2398 bool amdgpu_device_ip_is_valid(struct amdgpu_device *adev,
2399 			       enum amd_ip_block_type block_type)
2400 {
2401 	int i;
2402 
2403 	for (i = 0; i < adev->num_ip_blocks; i++) {
2404 		if (adev->ip_blocks[i].version->type == block_type)
2405 			return adev->ip_blocks[i].status.valid;
2406 	}
2407 	return false;
2408 
2409 }
2410 
2411 /**
2412  * amdgpu_device_ip_get_ip_block - get a hw IP pointer
2413  *
2414  * @adev: amdgpu_device pointer
2415  * @type: Type of hardware IP (SMU, GFX, UVD, etc.)
2416  *
2417  * Returns a pointer to the hardware IP block structure
2418  * if it exists for the asic, otherwise NULL.
2419  */
2420 struct amdgpu_ip_block *
amdgpu_device_ip_get_ip_block(struct amdgpu_device * adev,enum amd_ip_block_type type)2421 amdgpu_device_ip_get_ip_block(struct amdgpu_device *adev,
2422 			      enum amd_ip_block_type type)
2423 {
2424 	int i;
2425 
2426 	for (i = 0; i < adev->num_ip_blocks; i++)
2427 		if (adev->ip_blocks[i].version->type == type)
2428 			return &adev->ip_blocks[i];
2429 
2430 	return NULL;
2431 }
2432 
2433 /**
2434  * amdgpu_device_ip_block_version_cmp
2435  *
2436  * @adev: amdgpu_device pointer
2437  * @type: enum amd_ip_block_type
2438  * @major: major version
2439  * @minor: minor version
2440  *
2441  * return 0 if equal or greater
2442  * return 1 if smaller or the ip_block doesn't exist
2443  */
amdgpu_device_ip_block_version_cmp(struct amdgpu_device * adev,enum amd_ip_block_type type,u32 major,u32 minor)2444 int amdgpu_device_ip_block_version_cmp(struct amdgpu_device *adev,
2445 				       enum amd_ip_block_type type,
2446 				       u32 major, u32 minor)
2447 {
2448 	struct amdgpu_ip_block *ip_block = amdgpu_device_ip_get_ip_block(adev, type);
2449 
2450 	if (ip_block && ((ip_block->version->major > major) ||
2451 			((ip_block->version->major == major) &&
2452 			(ip_block->version->minor >= minor))))
2453 		return 0;
2454 
2455 	return 1;
2456 }
2457 
2458 static const char *ip_block_names[] = {
2459 	[AMD_IP_BLOCK_TYPE_COMMON] = "common",
2460 	[AMD_IP_BLOCK_TYPE_GMC] = "gmc",
2461 	[AMD_IP_BLOCK_TYPE_IH] = "ih",
2462 	[AMD_IP_BLOCK_TYPE_SMC] = "smu",
2463 	[AMD_IP_BLOCK_TYPE_PSP] = "psp",
2464 	[AMD_IP_BLOCK_TYPE_DCE] = "dce",
2465 	[AMD_IP_BLOCK_TYPE_GFX] = "gfx",
2466 	[AMD_IP_BLOCK_TYPE_SDMA] = "sdma",
2467 	[AMD_IP_BLOCK_TYPE_UVD] = "uvd",
2468 	[AMD_IP_BLOCK_TYPE_VCE] = "vce",
2469 	[AMD_IP_BLOCK_TYPE_ACP] = "acp",
2470 	[AMD_IP_BLOCK_TYPE_VCN] = "vcn",
2471 	[AMD_IP_BLOCK_TYPE_MES] = "mes",
2472 	[AMD_IP_BLOCK_TYPE_JPEG] = "jpeg",
2473 	[AMD_IP_BLOCK_TYPE_VPE] = "vpe",
2474 	[AMD_IP_BLOCK_TYPE_UMSCH_MM] = "umsch_mm",
2475 	[AMD_IP_BLOCK_TYPE_ISP] = "isp",
2476 };
2477 
ip_block_name(struct amdgpu_device * adev,enum amd_ip_block_type type)2478 static const char *ip_block_name(struct amdgpu_device *adev, enum amd_ip_block_type type)
2479 {
2480 	int idx = (int)type;
2481 
2482 	return idx < ARRAY_SIZE(ip_block_names) ? ip_block_names[idx] : "unknown";
2483 }
2484 
2485 /**
2486  * amdgpu_device_ip_block_add
2487  *
2488  * @adev: amdgpu_device pointer
2489  * @ip_block_version: pointer to the IP to add
2490  *
2491  * Adds the IP block driver information to the collection of IPs
2492  * on the asic.
2493  */
amdgpu_device_ip_block_add(struct amdgpu_device * adev,const struct amdgpu_ip_block_version * ip_block_version)2494 int amdgpu_device_ip_block_add(struct amdgpu_device *adev,
2495 			       const struct amdgpu_ip_block_version *ip_block_version)
2496 {
2497 	if (!ip_block_version)
2498 		return -EINVAL;
2499 
2500 	switch (ip_block_version->type) {
2501 	case AMD_IP_BLOCK_TYPE_VCN:
2502 		if (adev->harvest_ip_mask & AMD_HARVEST_IP_VCN_MASK)
2503 			return 0;
2504 		break;
2505 	case AMD_IP_BLOCK_TYPE_JPEG:
2506 		if (adev->harvest_ip_mask & AMD_HARVEST_IP_JPEG_MASK)
2507 			return 0;
2508 		break;
2509 	default:
2510 		break;
2511 	}
2512 
2513 	dev_info(adev->dev, "detected ip block number %d <%s_v%d_%d_%d> (%s)\n",
2514 		 adev->num_ip_blocks,
2515 		 ip_block_name(adev, ip_block_version->type),
2516 		 ip_block_version->major,
2517 		 ip_block_version->minor,
2518 		 ip_block_version->rev,
2519 		 ip_block_version->funcs->name);
2520 
2521 	adev->ip_blocks[adev->num_ip_blocks].adev = adev;
2522 
2523 	adev->ip_blocks[adev->num_ip_blocks++].version = ip_block_version;
2524 
2525 	return 0;
2526 }
2527 
2528 /**
2529  * amdgpu_device_enable_virtual_display - enable virtual display feature
2530  *
2531  * @adev: amdgpu_device pointer
2532  *
2533  * Enabled the virtual display feature if the user has enabled it via
2534  * the module parameter virtual_display.  This feature provides a virtual
2535  * display hardware on headless boards or in virtualized environments.
2536  * This function parses and validates the configuration string specified by
2537  * the user and configures the virtual display configuration (number of
2538  * virtual connectors, crtcs, etc.) specified.
2539  */
amdgpu_device_enable_virtual_display(struct amdgpu_device * adev)2540 static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev)
2541 {
2542 	adev->enable_virtual_display = false;
2543 
2544 	if (amdgpu_virtual_display) {
2545 		const char *pci_address_name = pci_name(adev->pdev);
2546 		char *pciaddstr, *pciaddstr_tmp, *pciaddname_tmp, *pciaddname;
2547 
2548 		pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL);
2549 		pciaddstr_tmp = pciaddstr;
2550 		while ((pciaddname_tmp = strsep(&pciaddstr_tmp, ";"))) {
2551 			pciaddname = strsep(&pciaddname_tmp, ",");
2552 			if (!strcmp("all", pciaddname)
2553 			    || !strcmp(pci_address_name, pciaddname)) {
2554 				long num_crtc;
2555 				int res = -1;
2556 
2557 				adev->enable_virtual_display = true;
2558 
2559 				if (pciaddname_tmp)
2560 					res = kstrtol(pciaddname_tmp, 10,
2561 						      &num_crtc);
2562 
2563 				if (!res) {
2564 					if (num_crtc < 1)
2565 						num_crtc = 1;
2566 					if (num_crtc > 6)
2567 						num_crtc = 6;
2568 					adev->mode_info.num_crtc = num_crtc;
2569 				} else {
2570 					adev->mode_info.num_crtc = 1;
2571 				}
2572 				break;
2573 			}
2574 		}
2575 
2576 		dev_info(
2577 			adev->dev,
2578 			"virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n",
2579 			amdgpu_virtual_display, pci_address_name,
2580 			adev->enable_virtual_display, adev->mode_info.num_crtc);
2581 
2582 		kfree(pciaddstr);
2583 	}
2584 }
2585 
amdgpu_device_set_sriov_virtual_display(struct amdgpu_device * adev)2586 void amdgpu_device_set_sriov_virtual_display(struct amdgpu_device *adev)
2587 {
2588 	if (amdgpu_sriov_vf(adev) && !adev->enable_virtual_display) {
2589 		adev->mode_info.num_crtc = 1;
2590 		adev->enable_virtual_display = true;
2591 		dev_info(adev->dev, "virtual_display:%d, num_crtc:%d\n",
2592 			 adev->enable_virtual_display,
2593 			 adev->mode_info.num_crtc);
2594 	}
2595 }
2596 
2597 /**
2598  * amdgpu_device_parse_gpu_info_fw - parse gpu info firmware
2599  *
2600  * @adev: amdgpu_device pointer
2601  *
2602  * Parses the asic configuration parameters specified in the gpu info
2603  * firmware and makes them available to the driver for use in configuring
2604  * the asic.
2605  * Returns 0 on success, -EINVAL on failure.
2606  */
amdgpu_device_parse_gpu_info_fw(struct amdgpu_device * adev)2607 static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
2608 {
2609 	const char *chip_name;
2610 	int err;
2611 	const struct gpu_info_firmware_header_v1_0 *hdr;
2612 
2613 	adev->firmware.gpu_info_fw = NULL;
2614 
2615 	switch (adev->asic_type) {
2616 	default:
2617 		return 0;
2618 	case CHIP_VEGA10:
2619 		chip_name = "vega10";
2620 		break;
2621 	case CHIP_VEGA12:
2622 		chip_name = "vega12";
2623 		break;
2624 	case CHIP_RAVEN:
2625 		if (adev->apu_flags & AMD_APU_IS_RAVEN2)
2626 			chip_name = "raven2";
2627 		else if (adev->apu_flags & AMD_APU_IS_PICASSO)
2628 			chip_name = "picasso";
2629 		else
2630 			chip_name = "raven";
2631 		break;
2632 	case CHIP_ARCTURUS:
2633 		chip_name = "arcturus";
2634 		break;
2635 	case CHIP_NAVI12:
2636 		if (adev->mman.discovery_bin)
2637 			return 0;
2638 		chip_name = "navi12";
2639 		break;
2640 	case CHIP_CYAN_SKILLFISH:
2641 		if (adev->mman.discovery_bin)
2642 			return 0;
2643 		chip_name = "cyan_skillfish";
2644 		break;
2645 	}
2646 
2647 	err = amdgpu_ucode_request(adev, &adev->firmware.gpu_info_fw,
2648 				   AMDGPU_UCODE_OPTIONAL,
2649 				   "amdgpu/%s_gpu_info.bin", chip_name);
2650 	if (err) {
2651 		dev_err(adev->dev,
2652 			"Failed to get gpu_info firmware \"%s_gpu_info.bin\"\n",
2653 			chip_name);
2654 		goto out;
2655 	}
2656 
2657 	hdr = (const struct gpu_info_firmware_header_v1_0 *)adev->firmware.gpu_info_fw->data;
2658 	amdgpu_ucode_print_gpu_info_hdr(&hdr->header);
2659 
2660 	switch (hdr->version_major) {
2661 	case 1:
2662 	{
2663 		const struct gpu_info_firmware_v1_0 *gpu_info_fw =
2664 			(const struct gpu_info_firmware_v1_0 *)(adev->firmware.gpu_info_fw->data +
2665 								le32_to_cpu(hdr->header.ucode_array_offset_bytes));
2666 
2667 		/*
2668 		 * Should be dropped when DAL no longer needs it.
2669 		 */
2670 		if (adev->asic_type == CHIP_NAVI12)
2671 			goto parse_soc_bounding_box;
2672 
2673 		adev->gfx.config.max_shader_engines = le32_to_cpu(gpu_info_fw->gc_num_se);
2674 		adev->gfx.config.max_cu_per_sh = le32_to_cpu(gpu_info_fw->gc_num_cu_per_sh);
2675 		adev->gfx.config.max_sh_per_se = le32_to_cpu(gpu_info_fw->gc_num_sh_per_se);
2676 		adev->gfx.config.max_backends_per_se = le32_to_cpu(gpu_info_fw->gc_num_rb_per_se);
2677 		adev->gfx.config.max_texture_channel_caches =
2678 			le32_to_cpu(gpu_info_fw->gc_num_tccs);
2679 		adev->gfx.config.max_gprs = le32_to_cpu(gpu_info_fw->gc_num_gprs);
2680 		adev->gfx.config.max_gs_threads = le32_to_cpu(gpu_info_fw->gc_num_max_gs_thds);
2681 		adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gpu_info_fw->gc_gs_table_depth);
2682 		adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gpu_info_fw->gc_gsprim_buff_depth);
2683 		adev->gfx.config.double_offchip_lds_buf =
2684 			le32_to_cpu(gpu_info_fw->gc_double_offchip_lds_buffer);
2685 		adev->gfx.cu_info.wave_front_size = le32_to_cpu(gpu_info_fw->gc_wave_size);
2686 		adev->gfx.cu_info.max_waves_per_simd =
2687 			le32_to_cpu(gpu_info_fw->gc_max_waves_per_simd);
2688 		adev->gfx.cu_info.max_scratch_slots_per_cu =
2689 			le32_to_cpu(gpu_info_fw->gc_max_scratch_slots_per_cu);
2690 		adev->gfx.cu_info.lds_size = le32_to_cpu(gpu_info_fw->gc_lds_size);
2691 		if (hdr->version_minor >= 1) {
2692 			const struct gpu_info_firmware_v1_1 *gpu_info_fw =
2693 				(const struct gpu_info_firmware_v1_1 *)(adev->firmware.gpu_info_fw->data +
2694 									le32_to_cpu(hdr->header.ucode_array_offset_bytes));
2695 			adev->gfx.config.num_sc_per_sh =
2696 				le32_to_cpu(gpu_info_fw->num_sc_per_sh);
2697 			adev->gfx.config.num_packer_per_sc =
2698 				le32_to_cpu(gpu_info_fw->num_packer_per_sc);
2699 		}
2700 
2701 parse_soc_bounding_box:
2702 		/*
2703 		 * soc bounding box info is not integrated in disocovery table,
2704 		 * we always need to parse it from gpu info firmware if needed.
2705 		 */
2706 		if (hdr->version_minor == 2) {
2707 			const struct gpu_info_firmware_v1_2 *gpu_info_fw =
2708 				(const struct gpu_info_firmware_v1_2 *)(adev->firmware.gpu_info_fw->data +
2709 									le32_to_cpu(hdr->header.ucode_array_offset_bytes));
2710 			adev->dm.soc_bounding_box = &gpu_info_fw->soc_bounding_box;
2711 		}
2712 		break;
2713 	}
2714 	default:
2715 		dev_err(adev->dev,
2716 			"Unsupported gpu_info table %d\n", hdr->header.ucode_version);
2717 		err = -EINVAL;
2718 		goto out;
2719 	}
2720 out:
2721 	return err;
2722 }
2723 
amdgpu_uid_init(struct amdgpu_device * adev)2724 static void amdgpu_uid_init(struct amdgpu_device *adev)
2725 {
2726 	/* Initialize the UID for the device */
2727 	adev->uid_info = kzalloc(sizeof(struct amdgpu_uid), GFP_KERNEL);
2728 	if (!adev->uid_info) {
2729 		dev_warn(adev->dev, "Failed to allocate memory for UID\n");
2730 		return;
2731 	}
2732 	adev->uid_info->adev = adev;
2733 }
2734 
amdgpu_uid_fini(struct amdgpu_device * adev)2735 static void amdgpu_uid_fini(struct amdgpu_device *adev)
2736 {
2737 	/* Free the UID memory */
2738 	kfree(adev->uid_info);
2739 	adev->uid_info = NULL;
2740 }
2741 
2742 /**
2743  * amdgpu_device_ip_early_init - run early init for hardware IPs
2744  *
2745  * @adev: amdgpu_device pointer
2746  *
2747  * Early initialization pass for hardware IPs.  The hardware IPs that make
2748  * up each asic are discovered each IP's early_init callback is run.  This
2749  * is the first stage in initializing the asic.
2750  * Returns 0 on success, negative error code on failure.
2751  */
amdgpu_device_ip_early_init(struct amdgpu_device * adev)2752 static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
2753 {
2754 	struct amdgpu_ip_block *ip_block;
2755 	struct pci_dev *parent;
2756 	bool total, skip_bios;
2757 	uint32_t bios_flags;
2758 	int i, r;
2759 
2760 	amdgpu_device_enable_virtual_display(adev);
2761 
2762 	if (amdgpu_sriov_vf(adev)) {
2763 		r = amdgpu_virt_request_full_gpu(adev, true);
2764 		if (r)
2765 			return r;
2766 	}
2767 
2768 	switch (adev->asic_type) {
2769 #ifdef CONFIG_DRM_AMDGPU_SI
2770 	case CHIP_VERDE:
2771 	case CHIP_TAHITI:
2772 	case CHIP_PITCAIRN:
2773 	case CHIP_OLAND:
2774 	case CHIP_HAINAN:
2775 		adev->family = AMDGPU_FAMILY_SI;
2776 		r = si_set_ip_blocks(adev);
2777 		if (r)
2778 			return r;
2779 		break;
2780 #endif
2781 #ifdef CONFIG_DRM_AMDGPU_CIK
2782 	case CHIP_BONAIRE:
2783 	case CHIP_HAWAII:
2784 	case CHIP_KAVERI:
2785 	case CHIP_KABINI:
2786 	case CHIP_MULLINS:
2787 		if (adev->flags & AMD_IS_APU)
2788 			adev->family = AMDGPU_FAMILY_KV;
2789 		else
2790 			adev->family = AMDGPU_FAMILY_CI;
2791 
2792 		r = cik_set_ip_blocks(adev);
2793 		if (r)
2794 			return r;
2795 		break;
2796 #endif
2797 	case CHIP_TOPAZ:
2798 	case CHIP_TONGA:
2799 	case CHIP_FIJI:
2800 	case CHIP_POLARIS10:
2801 	case CHIP_POLARIS11:
2802 	case CHIP_POLARIS12:
2803 	case CHIP_VEGAM:
2804 	case CHIP_CARRIZO:
2805 	case CHIP_STONEY:
2806 		if (adev->flags & AMD_IS_APU)
2807 			adev->family = AMDGPU_FAMILY_CZ;
2808 		else
2809 			adev->family = AMDGPU_FAMILY_VI;
2810 
2811 		r = vi_set_ip_blocks(adev);
2812 		if (r)
2813 			return r;
2814 		break;
2815 	default:
2816 		r = amdgpu_discovery_set_ip_blocks(adev);
2817 		if (r)
2818 			return r;
2819 		break;
2820 	}
2821 
2822 	/* Check for IP version 9.4.3 with A0 hardware */
2823 	if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) &&
2824 	    !amdgpu_device_get_rev_id(adev)) {
2825 		dev_err(adev->dev, "Unsupported A0 hardware\n");
2826 		return -ENODEV;	/* device unsupported - no device error */
2827 	}
2828 
2829 	if (amdgpu_has_atpx() &&
2830 	    (amdgpu_is_atpx_hybrid() ||
2831 	     amdgpu_has_atpx_dgpu_power_cntl()) &&
2832 	    ((adev->flags & AMD_IS_APU) == 0) &&
2833 	    !dev_is_removable(&adev->pdev->dev))
2834 		adev->flags |= AMD_IS_PX;
2835 
2836 	if (!(adev->flags & AMD_IS_APU)) {
2837 		parent = pcie_find_root_port(adev->pdev);
2838 		adev->has_pr3 = parent ? pci_pr3_present(parent) : false;
2839 	}
2840 
2841 	adev->pm.pp_feature = amdgpu_pp_feature_mask;
2842 	if (amdgpu_sriov_vf(adev) || sched_policy == KFD_SCHED_POLICY_NO_HWS)
2843 		adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
2844 	if (amdgpu_sriov_vf(adev) && adev->asic_type == CHIP_SIENNA_CICHLID)
2845 		adev->pm.pp_feature &= ~PP_OVERDRIVE_MASK;
2846 	if (!amdgpu_device_pcie_dynamic_switching_supported(adev))
2847 		adev->pm.pp_feature &= ~PP_PCIE_DPM_MASK;
2848 
2849 	adev->virt.is_xgmi_node_migrate_enabled = false;
2850 	if (amdgpu_sriov_vf(adev)) {
2851 		adev->virt.is_xgmi_node_migrate_enabled =
2852 			amdgpu_ip_version((adev), GC_HWIP, 0) == IP_VERSION(9, 4, 4);
2853 	}
2854 
2855 	total = true;
2856 	for (i = 0; i < adev->num_ip_blocks; i++) {
2857 		ip_block = &adev->ip_blocks[i];
2858 
2859 		if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
2860 			dev_warn(adev->dev, "disabled ip block: %d <%s>\n", i,
2861 				 adev->ip_blocks[i].version->funcs->name);
2862 			adev->ip_blocks[i].status.valid = false;
2863 		} else if (ip_block->version->funcs->early_init) {
2864 			r = ip_block->version->funcs->early_init(ip_block);
2865 			if (r == -ENOENT) {
2866 				adev->ip_blocks[i].status.valid = false;
2867 			} else if (r) {
2868 				dev_err(adev->dev,
2869 					"early_init of IP block <%s> failed %d\n",
2870 					adev->ip_blocks[i].version->funcs->name,
2871 					r);
2872 				total = false;
2873 			} else {
2874 				adev->ip_blocks[i].status.valid = true;
2875 			}
2876 		} else {
2877 			adev->ip_blocks[i].status.valid = true;
2878 		}
2879 		/* get the vbios after the asic_funcs are set up */
2880 		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) {
2881 			r = amdgpu_device_parse_gpu_info_fw(adev);
2882 			if (r)
2883 				return r;
2884 
2885 			bios_flags = amdgpu_device_get_vbios_flags(adev);
2886 			skip_bios = !!(bios_flags & AMDGPU_VBIOS_SKIP);
2887 			/* Read BIOS */
2888 			if (!skip_bios) {
2889 				bool optional =
2890 					!!(bios_flags & AMDGPU_VBIOS_OPTIONAL);
2891 				if (!amdgpu_get_bios(adev) && !optional)
2892 					return -EINVAL;
2893 
2894 				if (optional && !adev->bios)
2895 					dev_info(
2896 						adev->dev,
2897 						"VBIOS image optional, proceeding without VBIOS image");
2898 
2899 				if (adev->bios) {
2900 					r = amdgpu_atombios_init(adev);
2901 					if (r) {
2902 						dev_err(adev->dev,
2903 							"amdgpu_atombios_init failed\n");
2904 						amdgpu_vf_error_put(
2905 							adev,
2906 							AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL,
2907 							0, 0);
2908 						return r;
2909 					}
2910 				}
2911 			}
2912 
2913 			/*get pf2vf msg info at it's earliest time*/
2914 			if (amdgpu_sriov_vf(adev))
2915 				amdgpu_virt_init_data_exchange(adev);
2916 
2917 		}
2918 	}
2919 	if (!total)
2920 		return -ENODEV;
2921 
2922 	if (adev->gmc.xgmi.supported)
2923 		amdgpu_xgmi_early_init(adev);
2924 
2925 	if (amdgpu_is_multi_aid(adev))
2926 		amdgpu_uid_init(adev);
2927 	ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GFX);
2928 	if (ip_block->status.valid != false)
2929 		amdgpu_amdkfd_device_probe(adev);
2930 
2931 	adev->cg_flags &= amdgpu_cg_mask;
2932 	adev->pg_flags &= amdgpu_pg_mask;
2933 
2934 	return 0;
2935 }
2936 
amdgpu_device_ip_hw_init_phase1(struct amdgpu_device * adev)2937 static int amdgpu_device_ip_hw_init_phase1(struct amdgpu_device *adev)
2938 {
2939 	int i, r;
2940 
2941 	for (i = 0; i < adev->num_ip_blocks; i++) {
2942 		if (!adev->ip_blocks[i].status.sw)
2943 			continue;
2944 		if (adev->ip_blocks[i].status.hw)
2945 			continue;
2946 		if (!amdgpu_ip_member_of_hwini(
2947 			    adev, adev->ip_blocks[i].version->type))
2948 			continue;
2949 		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
2950 		    (amdgpu_sriov_vf(adev) && (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)) ||
2951 		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) {
2952 			r = adev->ip_blocks[i].version->funcs->hw_init(&adev->ip_blocks[i]);
2953 			if (r) {
2954 				dev_err(adev->dev,
2955 					"hw_init of IP block <%s> failed %d\n",
2956 					adev->ip_blocks[i].version->funcs->name,
2957 					r);
2958 				return r;
2959 			}
2960 			adev->ip_blocks[i].status.hw = true;
2961 		}
2962 	}
2963 
2964 	return 0;
2965 }
2966 
amdgpu_device_ip_hw_init_phase2(struct amdgpu_device * adev)2967 static int amdgpu_device_ip_hw_init_phase2(struct amdgpu_device *adev)
2968 {
2969 	int i, r;
2970 
2971 	for (i = 0; i < adev->num_ip_blocks; i++) {
2972 		if (!adev->ip_blocks[i].status.sw)
2973 			continue;
2974 		if (adev->ip_blocks[i].status.hw)
2975 			continue;
2976 		if (!amdgpu_ip_member_of_hwini(
2977 			    adev, adev->ip_blocks[i].version->type))
2978 			continue;
2979 		r = adev->ip_blocks[i].version->funcs->hw_init(&adev->ip_blocks[i]);
2980 		if (r) {
2981 			dev_err(adev->dev,
2982 				"hw_init of IP block <%s> failed %d\n",
2983 				adev->ip_blocks[i].version->funcs->name, r);
2984 			return r;
2985 		}
2986 		adev->ip_blocks[i].status.hw = true;
2987 	}
2988 
2989 	return 0;
2990 }
2991 
amdgpu_device_fw_loading(struct amdgpu_device * adev)2992 static int amdgpu_device_fw_loading(struct amdgpu_device *adev)
2993 {
2994 	int r = 0;
2995 	int i;
2996 	uint32_t smu_version;
2997 
2998 	if (adev->asic_type >= CHIP_VEGA10) {
2999 		for (i = 0; i < adev->num_ip_blocks; i++) {
3000 			if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_PSP)
3001 				continue;
3002 
3003 			if (!amdgpu_ip_member_of_hwini(adev,
3004 						       AMD_IP_BLOCK_TYPE_PSP))
3005 				break;
3006 
3007 			if (!adev->ip_blocks[i].status.sw)
3008 				continue;
3009 
3010 			/* no need to do the fw loading again if already done*/
3011 			if (adev->ip_blocks[i].status.hw == true)
3012 				break;
3013 
3014 			if (amdgpu_in_reset(adev) || adev->in_suspend) {
3015 				r = amdgpu_ip_block_resume(&adev->ip_blocks[i]);
3016 				if (r)
3017 					return r;
3018 			} else {
3019 				r = adev->ip_blocks[i].version->funcs->hw_init(&adev->ip_blocks[i]);
3020 				if (r) {
3021 					dev_err(adev->dev,
3022 						"hw_init of IP block <%s> failed %d\n",
3023 						adev->ip_blocks[i]
3024 							.version->funcs->name,
3025 						r);
3026 					return r;
3027 				}
3028 				adev->ip_blocks[i].status.hw = true;
3029 			}
3030 			break;
3031 		}
3032 	}
3033 
3034 	if (!amdgpu_sriov_vf(adev) || adev->asic_type == CHIP_TONGA)
3035 		r = amdgpu_pm_load_smu_firmware(adev, &smu_version);
3036 
3037 	return r;
3038 }
3039 
amdgpu_device_init_schedulers(struct amdgpu_device * adev)3040 static int amdgpu_device_init_schedulers(struct amdgpu_device *adev)
3041 {
3042 	struct drm_sched_init_args args = {
3043 		.ops = &amdgpu_sched_ops,
3044 		.num_rqs = DRM_SCHED_PRIORITY_COUNT,
3045 		.timeout_wq = adev->reset_domain->wq,
3046 		.dev = adev->dev,
3047 	};
3048 	long timeout;
3049 	int r, i;
3050 
3051 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
3052 		struct amdgpu_ring *ring = adev->rings[i];
3053 
3054 		/* No need to setup the GPU scheduler for rings that don't need it */
3055 		if (!ring || ring->no_scheduler)
3056 			continue;
3057 
3058 		switch (ring->funcs->type) {
3059 		case AMDGPU_RING_TYPE_GFX:
3060 			timeout = adev->gfx_timeout;
3061 			break;
3062 		case AMDGPU_RING_TYPE_COMPUTE:
3063 			timeout = adev->compute_timeout;
3064 			break;
3065 		case AMDGPU_RING_TYPE_SDMA:
3066 			timeout = adev->sdma_timeout;
3067 			break;
3068 		default:
3069 			timeout = adev->video_timeout;
3070 			break;
3071 		}
3072 
3073 		args.timeout = timeout;
3074 		args.credit_limit = ring->num_hw_submission;
3075 		args.score = ring->sched_score;
3076 		args.name = ring->name;
3077 
3078 		r = drm_sched_init(&ring->sched, &args);
3079 		if (r) {
3080 			dev_err(adev->dev,
3081 				"Failed to create scheduler on ring %s.\n",
3082 				ring->name);
3083 			return r;
3084 		}
3085 		r = amdgpu_uvd_entity_init(adev, ring);
3086 		if (r) {
3087 			dev_err(adev->dev,
3088 				"Failed to create UVD scheduling entity on ring %s.\n",
3089 				ring->name);
3090 			return r;
3091 		}
3092 		r = amdgpu_vce_entity_init(adev, ring);
3093 		if (r) {
3094 			dev_err(adev->dev,
3095 				"Failed to create VCE scheduling entity on ring %s.\n",
3096 				ring->name);
3097 			return r;
3098 		}
3099 	}
3100 
3101 	if (adev->xcp_mgr)
3102 		amdgpu_xcp_update_partition_sched_list(adev);
3103 
3104 	return 0;
3105 }
3106 
3107 
3108 /**
3109  * amdgpu_device_ip_init - run init for hardware IPs
3110  *
3111  * @adev: amdgpu_device pointer
3112  *
3113  * Main initialization pass for hardware IPs.  The list of all the hardware
3114  * IPs that make up the asic is walked and the sw_init and hw_init callbacks
3115  * are run.  sw_init initializes the software state associated with each IP
3116  * and hw_init initializes the hardware associated with each IP.
3117  * Returns 0 on success, negative error code on failure.
3118  */
amdgpu_device_ip_init(struct amdgpu_device * adev)3119 static int amdgpu_device_ip_init(struct amdgpu_device *adev)
3120 {
3121 	bool init_badpage;
3122 	int i, r;
3123 
3124 	r = amdgpu_ras_init(adev);
3125 	if (r)
3126 		return r;
3127 
3128 	for (i = 0; i < adev->num_ip_blocks; i++) {
3129 		if (!adev->ip_blocks[i].status.valid)
3130 			continue;
3131 		if (adev->ip_blocks[i].version->funcs->sw_init) {
3132 			r = adev->ip_blocks[i].version->funcs->sw_init(&adev->ip_blocks[i]);
3133 			if (r) {
3134 				dev_err(adev->dev,
3135 					"sw_init of IP block <%s> failed %d\n",
3136 					adev->ip_blocks[i].version->funcs->name,
3137 					r);
3138 				goto init_failed;
3139 			}
3140 		}
3141 		adev->ip_blocks[i].status.sw = true;
3142 
3143 		if (!amdgpu_ip_member_of_hwini(
3144 			    adev, adev->ip_blocks[i].version->type))
3145 			continue;
3146 
3147 		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) {
3148 			/* need to do common hw init early so everything is set up for gmc */
3149 			r = adev->ip_blocks[i].version->funcs->hw_init(&adev->ip_blocks[i]);
3150 			if (r) {
3151 				dev_err(adev->dev, "hw_init %d failed %d\n", i,
3152 					r);
3153 				goto init_failed;
3154 			}
3155 			adev->ip_blocks[i].status.hw = true;
3156 		} else if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
3157 			/* need to do gmc hw init early so we can allocate gpu mem */
3158 			/* Try to reserve bad pages early */
3159 			if (amdgpu_sriov_vf(adev))
3160 				amdgpu_virt_exchange_data(adev);
3161 
3162 			r = amdgpu_device_mem_scratch_init(adev);
3163 			if (r) {
3164 				dev_err(adev->dev,
3165 					"amdgpu_mem_scratch_init failed %d\n",
3166 					r);
3167 				goto init_failed;
3168 			}
3169 			r = adev->ip_blocks[i].version->funcs->hw_init(&adev->ip_blocks[i]);
3170 			if (r) {
3171 				dev_err(adev->dev, "hw_init %d failed %d\n", i,
3172 					r);
3173 				goto init_failed;
3174 			}
3175 			r = amdgpu_device_wb_init(adev);
3176 			if (r) {
3177 				dev_err(adev->dev,
3178 					"amdgpu_device_wb_init failed %d\n", r);
3179 				goto init_failed;
3180 			}
3181 			adev->ip_blocks[i].status.hw = true;
3182 
3183 			/* right after GMC hw init, we create CSA */
3184 			if (adev->gfx.mcbp) {
3185 				r = amdgpu_allocate_static_csa(adev, &adev->virt.csa_obj,
3186 							       AMDGPU_GEM_DOMAIN_VRAM |
3187 							       AMDGPU_GEM_DOMAIN_GTT,
3188 							       AMDGPU_CSA_SIZE);
3189 				if (r) {
3190 					dev_err(adev->dev,
3191 						"allocate CSA failed %d\n", r);
3192 					goto init_failed;
3193 				}
3194 			}
3195 
3196 			r = amdgpu_seq64_init(adev);
3197 			if (r) {
3198 				dev_err(adev->dev, "allocate seq64 failed %d\n",
3199 					r);
3200 				goto init_failed;
3201 			}
3202 		}
3203 	}
3204 
3205 	if (amdgpu_sriov_vf(adev))
3206 		amdgpu_virt_init_data_exchange(adev);
3207 
3208 	r = amdgpu_ib_pool_init(adev);
3209 	if (r) {
3210 		dev_err(adev->dev, "IB initialization failed (%d).\n", r);
3211 		amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_IB_INIT_FAIL, 0, r);
3212 		goto init_failed;
3213 	}
3214 
3215 	r = amdgpu_ucode_create_bo(adev); /* create ucode bo when sw_init complete*/
3216 	if (r)
3217 		goto init_failed;
3218 
3219 	r = amdgpu_device_ip_hw_init_phase1(adev);
3220 	if (r)
3221 		goto init_failed;
3222 
3223 	r = amdgpu_device_fw_loading(adev);
3224 	if (r)
3225 		goto init_failed;
3226 
3227 	r = amdgpu_device_ip_hw_init_phase2(adev);
3228 	if (r)
3229 		goto init_failed;
3230 
3231 	/*
3232 	 * retired pages will be loaded from eeprom and reserved here,
3233 	 * it should be called after amdgpu_device_ip_hw_init_phase2  since
3234 	 * for some ASICs the RAS EEPROM code relies on SMU fully functioning
3235 	 * for I2C communication which only true at this point.
3236 	 *
3237 	 * amdgpu_ras_recovery_init may fail, but the upper only cares the
3238 	 * failure from bad gpu situation and stop amdgpu init process
3239 	 * accordingly. For other failed cases, it will still release all
3240 	 * the resource and print error message, rather than returning one
3241 	 * negative value to upper level.
3242 	 *
3243 	 * Note: theoretically, this should be called before all vram allocations
3244 	 * to protect retired page from abusing
3245 	 */
3246 	init_badpage = (adev->init_lvl->level != AMDGPU_INIT_LEVEL_MINIMAL_XGMI);
3247 	r = amdgpu_ras_recovery_init(adev, init_badpage);
3248 	if (r)
3249 		goto init_failed;
3250 
3251 	/**
3252 	 * In case of XGMI grab extra reference for reset domain for this device
3253 	 */
3254 	if (adev->gmc.xgmi.num_physical_nodes > 1) {
3255 		if (amdgpu_xgmi_add_device(adev) == 0) {
3256 			if (!amdgpu_sriov_vf(adev)) {
3257 				struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
3258 
3259 				if (WARN_ON(!hive)) {
3260 					r = -ENOENT;
3261 					goto init_failed;
3262 				}
3263 
3264 				if (!hive->reset_domain ||
3265 				    !amdgpu_reset_get_reset_domain(hive->reset_domain)) {
3266 					r = -ENOENT;
3267 					amdgpu_put_xgmi_hive(hive);
3268 					goto init_failed;
3269 				}
3270 
3271 				/* Drop the early temporary reset domain we created for device */
3272 				amdgpu_reset_put_reset_domain(adev->reset_domain);
3273 				adev->reset_domain = hive->reset_domain;
3274 				amdgpu_put_xgmi_hive(hive);
3275 			}
3276 		}
3277 	}
3278 
3279 	r = amdgpu_device_init_schedulers(adev);
3280 	if (r)
3281 		goto init_failed;
3282 
3283 	if (adev->mman.buffer_funcs_ring->sched.ready)
3284 		amdgpu_ttm_set_buffer_funcs_status(adev, true);
3285 
3286 	/* Don't init kfd if whole hive need to be reset during init */
3287 	if (adev->init_lvl->level != AMDGPU_INIT_LEVEL_MINIMAL_XGMI) {
3288 		kgd2kfd_init_zone_device(adev);
3289 		amdgpu_amdkfd_device_init(adev);
3290 	}
3291 
3292 	amdgpu_fru_get_product_info(adev);
3293 
3294 	if (!amdgpu_sriov_vf(adev) || amdgpu_sriov_ras_cper_en(adev))
3295 		r = amdgpu_cper_init(adev);
3296 
3297 init_failed:
3298 
3299 	return r;
3300 }
3301 
3302 /**
3303  * amdgpu_device_fill_reset_magic - writes reset magic to gart pointer
3304  *
3305  * @adev: amdgpu_device pointer
3306  *
3307  * Writes a reset magic value to the gart pointer in VRAM.  The driver calls
3308  * this function before a GPU reset.  If the value is retained after a
3309  * GPU reset, VRAM has not been lost. Some GPU resets may destroy VRAM contents.
3310  */
amdgpu_device_fill_reset_magic(struct amdgpu_device * adev)3311 static void amdgpu_device_fill_reset_magic(struct amdgpu_device *adev)
3312 {
3313 	memcpy(adev->reset_magic, adev->gart.ptr, AMDGPU_RESET_MAGIC_NUM);
3314 }
3315 
3316 /**
3317  * amdgpu_device_check_vram_lost - check if vram is valid
3318  *
3319  * @adev: amdgpu_device pointer
3320  *
3321  * Checks the reset magic value written to the gart pointer in VRAM.
3322  * The driver calls this after a GPU reset to see if the contents of
3323  * VRAM is lost or now.
3324  * returns true if vram is lost, false if not.
3325  */
amdgpu_device_check_vram_lost(struct amdgpu_device * adev)3326 static bool amdgpu_device_check_vram_lost(struct amdgpu_device *adev)
3327 {
3328 	if (memcmp(adev->gart.ptr, adev->reset_magic,
3329 			AMDGPU_RESET_MAGIC_NUM))
3330 		return true;
3331 
3332 	if (!amdgpu_in_reset(adev))
3333 		return false;
3334 
3335 	/*
3336 	 * For all ASICs with baco/mode1 reset, the VRAM is
3337 	 * always assumed to be lost.
3338 	 */
3339 	switch (amdgpu_asic_reset_method(adev)) {
3340 	case AMD_RESET_METHOD_LEGACY:
3341 	case AMD_RESET_METHOD_LINK:
3342 	case AMD_RESET_METHOD_BACO:
3343 	case AMD_RESET_METHOD_MODE1:
3344 		return true;
3345 	default:
3346 		return false;
3347 	}
3348 }
3349 
3350 /**
3351  * amdgpu_device_set_cg_state - set clockgating for amdgpu device
3352  *
3353  * @adev: amdgpu_device pointer
3354  * @state: clockgating state (gate or ungate)
3355  *
3356  * The list of all the hardware IPs that make up the asic is walked and the
3357  * set_clockgating_state callbacks are run.
3358  * Late initialization pass enabling clockgating for hardware IPs.
3359  * Fini or suspend, pass disabling clockgating for hardware IPs.
3360  * Returns 0 on success, negative error code on failure.
3361  */
3362 
amdgpu_device_set_cg_state(struct amdgpu_device * adev,enum amd_clockgating_state state)3363 int amdgpu_device_set_cg_state(struct amdgpu_device *adev,
3364 			       enum amd_clockgating_state state)
3365 {
3366 	int i, j, r;
3367 
3368 	if (amdgpu_emu_mode == 1)
3369 		return 0;
3370 
3371 	for (j = 0; j < adev->num_ip_blocks; j++) {
3372 		i = state == AMD_CG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
3373 		if (!adev->ip_blocks[i].status.late_initialized)
3374 			continue;
3375 		/* skip CG for GFX, SDMA on S0ix */
3376 		if (adev->in_s0ix &&
3377 		    (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX ||
3378 		     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SDMA))
3379 			continue;
3380 		/* skip CG for VCE/UVD, it's handled specially */
3381 		if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
3382 		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
3383 		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
3384 		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
3385 		    adev->ip_blocks[i].version->funcs->set_clockgating_state) {
3386 			/* enable clockgating to save power */
3387 			r = adev->ip_blocks[i].version->funcs->set_clockgating_state(&adev->ip_blocks[i],
3388 										     state);
3389 			if (r) {
3390 				dev_err(adev->dev,
3391 					"set_clockgating_state(gate) of IP block <%s> failed %d\n",
3392 					adev->ip_blocks[i].version->funcs->name,
3393 					r);
3394 				return r;
3395 			}
3396 		}
3397 	}
3398 
3399 	return 0;
3400 }
3401 
amdgpu_device_set_pg_state(struct amdgpu_device * adev,enum amd_powergating_state state)3402 int amdgpu_device_set_pg_state(struct amdgpu_device *adev,
3403 			       enum amd_powergating_state state)
3404 {
3405 	int i, j, r;
3406 
3407 	if (amdgpu_emu_mode == 1)
3408 		return 0;
3409 
3410 	for (j = 0; j < adev->num_ip_blocks; j++) {
3411 		i = state == AMD_PG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
3412 		if (!adev->ip_blocks[i].status.late_initialized)
3413 			continue;
3414 		/* skip PG for GFX, SDMA on S0ix */
3415 		if (adev->in_s0ix &&
3416 		    (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX ||
3417 		     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SDMA))
3418 			continue;
3419 		/* skip CG for VCE/UVD/VPE, it's handled specially */
3420 		if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
3421 		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
3422 		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
3423 		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VPE &&
3424 		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
3425 		    adev->ip_blocks[i].version->funcs->set_powergating_state) {
3426 			/* enable powergating to save power */
3427 			r = adev->ip_blocks[i].version->funcs->set_powergating_state(&adev->ip_blocks[i],
3428 											state);
3429 			if (r) {
3430 				dev_err(adev->dev,
3431 					"set_powergating_state(gate) of IP block <%s> failed %d\n",
3432 					adev->ip_blocks[i].version->funcs->name,
3433 					r);
3434 				return r;
3435 			}
3436 		}
3437 	}
3438 	return 0;
3439 }
3440 
amdgpu_device_enable_mgpu_fan_boost(void)3441 static int amdgpu_device_enable_mgpu_fan_boost(void)
3442 {
3443 	struct amdgpu_gpu_instance *gpu_ins;
3444 	struct amdgpu_device *adev;
3445 	int i, ret = 0;
3446 
3447 	mutex_lock(&mgpu_info.mutex);
3448 
3449 	/*
3450 	 * MGPU fan boost feature should be enabled
3451 	 * only when there are two or more dGPUs in
3452 	 * the system
3453 	 */
3454 	if (mgpu_info.num_dgpu < 2)
3455 		goto out;
3456 
3457 	for (i = 0; i < mgpu_info.num_dgpu; i++) {
3458 		gpu_ins = &(mgpu_info.gpu_ins[i]);
3459 		adev = gpu_ins->adev;
3460 		if (!(adev->flags & AMD_IS_APU || amdgpu_sriov_multi_vf_mode(adev)) &&
3461 		    !gpu_ins->mgpu_fan_enabled) {
3462 			ret = amdgpu_dpm_enable_mgpu_fan_boost(adev);
3463 			if (ret)
3464 				break;
3465 
3466 			gpu_ins->mgpu_fan_enabled = 1;
3467 		}
3468 	}
3469 
3470 out:
3471 	mutex_unlock(&mgpu_info.mutex);
3472 
3473 	return ret;
3474 }
3475 
3476 /**
3477  * amdgpu_device_ip_late_init - run late init for hardware IPs
3478  *
3479  * @adev: amdgpu_device pointer
3480  *
3481  * Late initialization pass for hardware IPs.  The list of all the hardware
3482  * IPs that make up the asic is walked and the late_init callbacks are run.
3483  * late_init covers any special initialization that an IP requires
3484  * after all of the have been initialized or something that needs to happen
3485  * late in the init process.
3486  * Returns 0 on success, negative error code on failure.
3487  */
amdgpu_device_ip_late_init(struct amdgpu_device * adev)3488 static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
3489 {
3490 	struct amdgpu_gpu_instance *gpu_instance;
3491 	int i = 0, r;
3492 
3493 	for (i = 0; i < adev->num_ip_blocks; i++) {
3494 		if (!adev->ip_blocks[i].status.hw)
3495 			continue;
3496 		if (adev->ip_blocks[i].version->funcs->late_init) {
3497 			r = adev->ip_blocks[i].version->funcs->late_init(&adev->ip_blocks[i]);
3498 			if (r) {
3499 				dev_err(adev->dev,
3500 					"late_init of IP block <%s> failed %d\n",
3501 					adev->ip_blocks[i].version->funcs->name,
3502 					r);
3503 				return r;
3504 			}
3505 		}
3506 		adev->ip_blocks[i].status.late_initialized = true;
3507 	}
3508 
3509 	r = amdgpu_ras_late_init(adev);
3510 	if (r) {
3511 		dev_err(adev->dev, "amdgpu_ras_late_init failed %d", r);
3512 		return r;
3513 	}
3514 
3515 	if (!amdgpu_reset_in_recovery(adev))
3516 		amdgpu_ras_set_error_query_ready(adev, true);
3517 
3518 	amdgpu_device_set_cg_state(adev, AMD_CG_STATE_GATE);
3519 	amdgpu_device_set_pg_state(adev, AMD_PG_STATE_GATE);
3520 
3521 	amdgpu_device_fill_reset_magic(adev);
3522 
3523 	r = amdgpu_device_enable_mgpu_fan_boost();
3524 	if (r)
3525 		dev_err(adev->dev, "enable mgpu fan boost failed (%d).\n", r);
3526 
3527 	/* For passthrough configuration on arcturus and aldebaran, enable special handling SBR */
3528 	if (amdgpu_passthrough(adev) &&
3529 	    ((adev->asic_type == CHIP_ARCTURUS && adev->gmc.xgmi.num_physical_nodes > 1) ||
3530 	     adev->asic_type == CHIP_ALDEBARAN))
3531 		amdgpu_dpm_handle_passthrough_sbr(adev, true);
3532 
3533 	if (adev->gmc.xgmi.num_physical_nodes > 1) {
3534 		mutex_lock(&mgpu_info.mutex);
3535 
3536 		/*
3537 		 * Reset device p-state to low as this was booted with high.
3538 		 *
3539 		 * This should be performed only after all devices from the same
3540 		 * hive get initialized.
3541 		 *
3542 		 * However, it's unknown how many device in the hive in advance.
3543 		 * As this is counted one by one during devices initializations.
3544 		 *
3545 		 * So, we wait for all XGMI interlinked devices initialized.
3546 		 * This may bring some delays as those devices may come from
3547 		 * different hives. But that should be OK.
3548 		 */
3549 		if (mgpu_info.num_dgpu == adev->gmc.xgmi.num_physical_nodes) {
3550 			for (i = 0; i < mgpu_info.num_gpu; i++) {
3551 				gpu_instance = &(mgpu_info.gpu_ins[i]);
3552 				if (gpu_instance->adev->flags & AMD_IS_APU)
3553 					continue;
3554 
3555 				r = amdgpu_xgmi_set_pstate(gpu_instance->adev,
3556 						AMDGPU_XGMI_PSTATE_MIN);
3557 				if (r) {
3558 					dev_err(adev->dev,
3559 						"pstate setting failed (%d).\n",
3560 						r);
3561 					break;
3562 				}
3563 			}
3564 		}
3565 
3566 		mutex_unlock(&mgpu_info.mutex);
3567 	}
3568 
3569 	return 0;
3570 }
3571 
amdgpu_ip_block_hw_fini(struct amdgpu_ip_block * ip_block)3572 static void amdgpu_ip_block_hw_fini(struct amdgpu_ip_block *ip_block)
3573 {
3574 	struct amdgpu_device *adev = ip_block->adev;
3575 	int r;
3576 
3577 	if (!ip_block->version->funcs->hw_fini) {
3578 		dev_err(adev->dev, "hw_fini of IP block <%s> not defined\n",
3579 			ip_block->version->funcs->name);
3580 	} else {
3581 		r = ip_block->version->funcs->hw_fini(ip_block);
3582 		/* XXX handle errors */
3583 		if (r) {
3584 			dev_dbg(adev->dev,
3585 				"hw_fini of IP block <%s> failed %d\n",
3586 				ip_block->version->funcs->name, r);
3587 		}
3588 	}
3589 
3590 	ip_block->status.hw = false;
3591 }
3592 
3593 /**
3594  * amdgpu_device_smu_fini_early - smu hw_fini wrapper
3595  *
3596  * @adev: amdgpu_device pointer
3597  *
3598  * For ASICs need to disable SMC first
3599  */
amdgpu_device_smu_fini_early(struct amdgpu_device * adev)3600 static void amdgpu_device_smu_fini_early(struct amdgpu_device *adev)
3601 {
3602 	int i;
3603 
3604 	if (amdgpu_ip_version(adev, GC_HWIP, 0) > IP_VERSION(9, 0, 0))
3605 		return;
3606 
3607 	for (i = 0; i < adev->num_ip_blocks; i++) {
3608 		if (!adev->ip_blocks[i].status.hw)
3609 			continue;
3610 		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
3611 			amdgpu_ip_block_hw_fini(&adev->ip_blocks[i]);
3612 			break;
3613 		}
3614 	}
3615 }
3616 
amdgpu_device_ip_fini_early(struct amdgpu_device * adev)3617 static int amdgpu_device_ip_fini_early(struct amdgpu_device *adev)
3618 {
3619 	int i, r;
3620 
3621 	for (i = 0; i < adev->num_ip_blocks; i++) {
3622 		if (!adev->ip_blocks[i].version->funcs->early_fini)
3623 			continue;
3624 
3625 		r = adev->ip_blocks[i].version->funcs->early_fini(&adev->ip_blocks[i]);
3626 		if (r) {
3627 			dev_dbg(adev->dev,
3628 				"early_fini of IP block <%s> failed %d\n",
3629 				adev->ip_blocks[i].version->funcs->name, r);
3630 		}
3631 	}
3632 
3633 	amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
3634 	amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
3635 
3636 	amdgpu_amdkfd_suspend(adev, true);
3637 	amdgpu_userq_suspend(adev);
3638 
3639 	/* Workaround for ASICs need to disable SMC first */
3640 	amdgpu_device_smu_fini_early(adev);
3641 
3642 	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
3643 		if (!adev->ip_blocks[i].status.hw)
3644 			continue;
3645 
3646 		amdgpu_ip_block_hw_fini(&adev->ip_blocks[i]);
3647 	}
3648 
3649 	if (amdgpu_sriov_vf(adev)) {
3650 		if (amdgpu_virt_release_full_gpu(adev, false))
3651 			dev_err(adev->dev,
3652 				"failed to release exclusive mode on fini\n");
3653 	}
3654 
3655 	return 0;
3656 }
3657 
3658 /**
3659  * amdgpu_device_ip_fini - run fini for hardware IPs
3660  *
3661  * @adev: amdgpu_device pointer
3662  *
3663  * Main teardown pass for hardware IPs.  The list of all the hardware
3664  * IPs that make up the asic is walked and the hw_fini and sw_fini callbacks
3665  * are run.  hw_fini tears down the hardware associated with each IP
3666  * and sw_fini tears down any software state associated with each IP.
3667  * Returns 0 on success, negative error code on failure.
3668  */
amdgpu_device_ip_fini(struct amdgpu_device * adev)3669 static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
3670 {
3671 	int i, r;
3672 
3673 	amdgpu_cper_fini(adev);
3674 
3675 	if (amdgpu_sriov_vf(adev) && adev->virt.ras_init_done)
3676 		amdgpu_virt_release_ras_err_handler_data(adev);
3677 
3678 	if (adev->gmc.xgmi.num_physical_nodes > 1)
3679 		amdgpu_xgmi_remove_device(adev);
3680 
3681 	amdgpu_amdkfd_device_fini_sw(adev);
3682 
3683 	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
3684 		if (!adev->ip_blocks[i].status.sw)
3685 			continue;
3686 
3687 		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
3688 			amdgpu_ucode_free_bo(adev);
3689 			amdgpu_free_static_csa(&adev->virt.csa_obj);
3690 			amdgpu_device_wb_fini(adev);
3691 			amdgpu_device_mem_scratch_fini(adev);
3692 			amdgpu_ib_pool_fini(adev);
3693 			amdgpu_seq64_fini(adev);
3694 			amdgpu_doorbell_fini(adev);
3695 		}
3696 		if (adev->ip_blocks[i].version->funcs->sw_fini) {
3697 			r = adev->ip_blocks[i].version->funcs->sw_fini(&adev->ip_blocks[i]);
3698 			/* XXX handle errors */
3699 			if (r) {
3700 				dev_dbg(adev->dev,
3701 					"sw_fini of IP block <%s> failed %d\n",
3702 					adev->ip_blocks[i].version->funcs->name,
3703 					r);
3704 			}
3705 		}
3706 		adev->ip_blocks[i].status.sw = false;
3707 		adev->ip_blocks[i].status.valid = false;
3708 	}
3709 
3710 	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
3711 		if (!adev->ip_blocks[i].status.late_initialized)
3712 			continue;
3713 		if (adev->ip_blocks[i].version->funcs->late_fini)
3714 			adev->ip_blocks[i].version->funcs->late_fini(&adev->ip_blocks[i]);
3715 		adev->ip_blocks[i].status.late_initialized = false;
3716 	}
3717 
3718 	amdgpu_ras_fini(adev);
3719 	amdgpu_uid_fini(adev);
3720 
3721 	return 0;
3722 }
3723 
3724 /**
3725  * amdgpu_device_delayed_init_work_handler - work handler for IB tests
3726  *
3727  * @work: work_struct.
3728  */
amdgpu_device_delayed_init_work_handler(struct work_struct * work)3729 static void amdgpu_device_delayed_init_work_handler(struct work_struct *work)
3730 {
3731 	struct amdgpu_device *adev =
3732 		container_of(work, struct amdgpu_device, delayed_init_work.work);
3733 	int r;
3734 
3735 	r = amdgpu_ib_ring_tests(adev);
3736 	if (r)
3737 		dev_err(adev->dev, "ib ring test failed (%d).\n", r);
3738 }
3739 
amdgpu_device_delay_enable_gfx_off(struct work_struct * work)3740 static void amdgpu_device_delay_enable_gfx_off(struct work_struct *work)
3741 {
3742 	struct amdgpu_device *adev =
3743 		container_of(work, struct amdgpu_device, gfx.gfx_off_delay_work.work);
3744 
3745 	WARN_ON_ONCE(adev->gfx.gfx_off_state);
3746 	WARN_ON_ONCE(adev->gfx.gfx_off_req_count);
3747 
3748 	if (!amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, true, 0))
3749 		adev->gfx.gfx_off_state = true;
3750 }
3751 
3752 /**
3753  * amdgpu_device_ip_suspend_phase1 - run suspend for hardware IPs (phase 1)
3754  *
3755  * @adev: amdgpu_device pointer
3756  *
3757  * Main suspend function for hardware IPs.  The list of all the hardware
3758  * IPs that make up the asic is walked, clockgating is disabled and the
3759  * suspend callbacks are run.  suspend puts the hardware and software state
3760  * in each IP into a state suitable for suspend.
3761  * Returns 0 on success, negative error code on failure.
3762  */
amdgpu_device_ip_suspend_phase1(struct amdgpu_device * adev)3763 static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev)
3764 {
3765 	int i, r;
3766 
3767 	amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
3768 	amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
3769 
3770 	/*
3771 	 * Per PMFW team's suggestion, driver needs to handle gfxoff
3772 	 * and df cstate features disablement for gpu reset(e.g. Mode1Reset)
3773 	 * scenario. Add the missing df cstate disablement here.
3774 	 */
3775 	if (amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_DISALLOW))
3776 		dev_warn(adev->dev, "Failed to disallow df cstate");
3777 
3778 	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
3779 		if (!adev->ip_blocks[i].status.valid)
3780 			continue;
3781 
3782 		/* displays are handled separately */
3783 		if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_DCE)
3784 			continue;
3785 
3786 		/* XXX handle errors */
3787 		r = amdgpu_ip_block_suspend(&adev->ip_blocks[i]);
3788 		if (r)
3789 			return r;
3790 	}
3791 
3792 	return 0;
3793 }
3794 
3795 /**
3796  * amdgpu_device_ip_suspend_phase2 - run suspend for hardware IPs (phase 2)
3797  *
3798  * @adev: amdgpu_device pointer
3799  *
3800  * Main suspend function for hardware IPs.  The list of all the hardware
3801  * IPs that make up the asic is walked, clockgating is disabled and the
3802  * suspend callbacks are run.  suspend puts the hardware and software state
3803  * in each IP into a state suitable for suspend.
3804  * Returns 0 on success, negative error code on failure.
3805  */
amdgpu_device_ip_suspend_phase2(struct amdgpu_device * adev)3806 static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
3807 {
3808 	int i, r;
3809 
3810 	if (adev->in_s0ix)
3811 		amdgpu_dpm_gfx_state_change(adev, sGpuChangeState_D3Entry);
3812 
3813 	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
3814 		if (!adev->ip_blocks[i].status.valid)
3815 			continue;
3816 		/* displays are handled in phase1 */
3817 		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE)
3818 			continue;
3819 		/* PSP lost connection when err_event_athub occurs */
3820 		if (amdgpu_ras_intr_triggered() &&
3821 		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
3822 			adev->ip_blocks[i].status.hw = false;
3823 			continue;
3824 		}
3825 
3826 		/* skip unnecessary suspend if we do not initialize them yet */
3827 		if (!amdgpu_ip_member_of_hwini(
3828 			    adev, adev->ip_blocks[i].version->type))
3829 			continue;
3830 
3831 		/* Since we skip suspend for S0i3, we need to cancel the delayed
3832 		 * idle work here as the suspend callback never gets called.
3833 		 */
3834 		if (adev->in_s0ix &&
3835 		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX &&
3836 		    amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(10, 0, 0))
3837 			cancel_delayed_work_sync(&adev->gfx.idle_work);
3838 		/* skip suspend of gfx/mes and psp for S0ix
3839 		 * gfx is in gfxoff state, so on resume it will exit gfxoff just
3840 		 * like at runtime. PSP is also part of the always on hardware
3841 		 * so no need to suspend it.
3842 		 */
3843 		if (adev->in_s0ix &&
3844 		    (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP ||
3845 		     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX ||
3846 		     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_MES))
3847 			continue;
3848 
3849 		/* SDMA 5.x+ is part of GFX power domain so it's covered by GFXOFF */
3850 		if (adev->in_s0ix &&
3851 		    (amdgpu_ip_version(adev, SDMA0_HWIP, 0) >=
3852 		     IP_VERSION(5, 0, 0)) &&
3853 		    (adev->ip_blocks[i].version->type ==
3854 		     AMD_IP_BLOCK_TYPE_SDMA))
3855 			continue;
3856 
3857 		/* Once swPSP provides the IMU, RLC FW binaries to TOS during cold-boot.
3858 		 * These are in TMR, hence are expected to be reused by PSP-TOS to reload
3859 		 * from this location and RLC Autoload automatically also gets loaded
3860 		 * from here based on PMFW -> PSP message during re-init sequence.
3861 		 * Therefore, the psp suspend & resume should be skipped to avoid destroy
3862 		 * the TMR and reload FWs again for IMU enabled APU ASICs.
3863 		 */
3864 		if (amdgpu_in_reset(adev) &&
3865 		    (adev->flags & AMD_IS_APU) && adev->gfx.imu.funcs &&
3866 		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)
3867 			continue;
3868 
3869 		/* XXX handle errors */
3870 		r = amdgpu_ip_block_suspend(&adev->ip_blocks[i]);
3871 		adev->ip_blocks[i].status.hw = false;
3872 
3873 		/* handle putting the SMC in the appropriate state */
3874 		if (!amdgpu_sriov_vf(adev)) {
3875 			if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
3876 				r = amdgpu_dpm_set_mp1_state(adev, adev->mp1_state);
3877 				if (r) {
3878 					dev_err(adev->dev,
3879 						"SMC failed to set mp1 state %d, %d\n",
3880 						adev->mp1_state, r);
3881 					return r;
3882 				}
3883 			}
3884 		}
3885 	}
3886 
3887 	return 0;
3888 }
3889 
3890 /**
3891  * amdgpu_device_ip_suspend - run suspend for hardware IPs
3892  *
3893  * @adev: amdgpu_device pointer
3894  *
3895  * Main suspend function for hardware IPs.  The list of all the hardware
3896  * IPs that make up the asic is walked, clockgating is disabled and the
3897  * suspend callbacks are run.  suspend puts the hardware and software state
3898  * in each IP into a state suitable for suspend.
3899  * Returns 0 on success, negative error code on failure.
3900  */
amdgpu_device_ip_suspend(struct amdgpu_device * adev)3901 int amdgpu_device_ip_suspend(struct amdgpu_device *adev)
3902 {
3903 	int r;
3904 
3905 	if (amdgpu_sriov_vf(adev)) {
3906 		amdgpu_virt_fini_data_exchange(adev);
3907 		amdgpu_virt_request_full_gpu(adev, false);
3908 	}
3909 
3910 	amdgpu_ttm_set_buffer_funcs_status(adev, false);
3911 
3912 	r = amdgpu_device_ip_suspend_phase1(adev);
3913 	if (r)
3914 		return r;
3915 	r = amdgpu_device_ip_suspend_phase2(adev);
3916 
3917 	if (amdgpu_sriov_vf(adev))
3918 		amdgpu_virt_release_full_gpu(adev, false);
3919 
3920 	return r;
3921 }
3922 
amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device * adev)3923 static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
3924 {
3925 	int i, r;
3926 
3927 	static enum amd_ip_block_type ip_order[] = {
3928 		AMD_IP_BLOCK_TYPE_COMMON,
3929 		AMD_IP_BLOCK_TYPE_GMC,
3930 		AMD_IP_BLOCK_TYPE_PSP,
3931 		AMD_IP_BLOCK_TYPE_IH,
3932 	};
3933 
3934 	for (i = 0; i < adev->num_ip_blocks; i++) {
3935 		int j;
3936 		struct amdgpu_ip_block *block;
3937 
3938 		block = &adev->ip_blocks[i];
3939 		block->status.hw = false;
3940 
3941 		for (j = 0; j < ARRAY_SIZE(ip_order); j++) {
3942 
3943 			if (block->version->type != ip_order[j] ||
3944 				!block->status.valid)
3945 				continue;
3946 
3947 			r = block->version->funcs->hw_init(&adev->ip_blocks[i]);
3948 			if (r) {
3949 				dev_err(adev->dev, "RE-INIT-early: %s failed\n",
3950 					 block->version->funcs->name);
3951 				return r;
3952 			}
3953 			block->status.hw = true;
3954 		}
3955 	}
3956 
3957 	return 0;
3958 }
3959 
amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device * adev)3960 static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev)
3961 {
3962 	struct amdgpu_ip_block *block;
3963 	int i, r = 0;
3964 
3965 	static enum amd_ip_block_type ip_order[] = {
3966 		AMD_IP_BLOCK_TYPE_SMC,
3967 		AMD_IP_BLOCK_TYPE_DCE,
3968 		AMD_IP_BLOCK_TYPE_GFX,
3969 		AMD_IP_BLOCK_TYPE_SDMA,
3970 		AMD_IP_BLOCK_TYPE_MES,
3971 		AMD_IP_BLOCK_TYPE_UVD,
3972 		AMD_IP_BLOCK_TYPE_VCE,
3973 		AMD_IP_BLOCK_TYPE_VCN,
3974 		AMD_IP_BLOCK_TYPE_JPEG
3975 	};
3976 
3977 	for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
3978 		block = amdgpu_device_ip_get_ip_block(adev, ip_order[i]);
3979 
3980 		if (!block)
3981 			continue;
3982 
3983 		if (block->status.valid && !block->status.hw) {
3984 			if (block->version->type == AMD_IP_BLOCK_TYPE_SMC) {
3985 				r = amdgpu_ip_block_resume(block);
3986 			} else {
3987 				r = block->version->funcs->hw_init(block);
3988 			}
3989 
3990 			if (r) {
3991 				dev_err(adev->dev, "RE-INIT-late: %s failed\n",
3992 					 block->version->funcs->name);
3993 				break;
3994 			}
3995 			block->status.hw = true;
3996 		}
3997 	}
3998 
3999 	return r;
4000 }
4001 
4002 /**
4003  * amdgpu_device_ip_resume_phase1 - run resume for hardware IPs
4004  *
4005  * @adev: amdgpu_device pointer
4006  *
4007  * First resume function for hardware IPs.  The list of all the hardware
4008  * IPs that make up the asic is walked and the resume callbacks are run for
4009  * COMMON, GMC, and IH.  resume puts the hardware into a functional state
4010  * after a suspend and updates the software state as necessary.  This
4011  * function is also used for restoring the GPU after a GPU reset.
4012  * Returns 0 on success, negative error code on failure.
4013  */
amdgpu_device_ip_resume_phase1(struct amdgpu_device * adev)4014 static int amdgpu_device_ip_resume_phase1(struct amdgpu_device *adev)
4015 {
4016 	int i, r;
4017 
4018 	for (i = 0; i < adev->num_ip_blocks; i++) {
4019 		if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
4020 			continue;
4021 		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
4022 		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
4023 		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
4024 		    (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP && amdgpu_sriov_vf(adev))) {
4025 
4026 			r = amdgpu_ip_block_resume(&adev->ip_blocks[i]);
4027 			if (r)
4028 				return r;
4029 		}
4030 	}
4031 
4032 	return 0;
4033 }
4034 
4035 /**
4036  * amdgpu_device_ip_resume_phase2 - run resume for hardware IPs
4037  *
4038  * @adev: amdgpu_device pointer
4039  *
4040  * Second resume function for hardware IPs.  The list of all the hardware
4041  * IPs that make up the asic is walked and the resume callbacks are run for
4042  * all blocks except COMMON, GMC, and IH.  resume puts the hardware into a
4043  * functional state after a suspend and updates the software state as
4044  * necessary.  This function is also used for restoring the GPU after a GPU
4045  * reset.
4046  * Returns 0 on success, negative error code on failure.
4047  */
amdgpu_device_ip_resume_phase2(struct amdgpu_device * adev)4048 static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev)
4049 {
4050 	int i, r;
4051 
4052 	for (i = 0; i < adev->num_ip_blocks; i++) {
4053 		if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
4054 			continue;
4055 		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
4056 		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
4057 		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
4058 		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE ||
4059 		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)
4060 			continue;
4061 		r = amdgpu_ip_block_resume(&adev->ip_blocks[i]);
4062 		if (r)
4063 			return r;
4064 	}
4065 
4066 	return 0;
4067 }
4068 
4069 /**
4070  * amdgpu_device_ip_resume_phase3 - run resume for hardware IPs
4071  *
4072  * @adev: amdgpu_device pointer
4073  *
4074  * Third resume function for hardware IPs.  The list of all the hardware
4075  * IPs that make up the asic is walked and the resume callbacks are run for
4076  * all DCE.  resume puts the hardware into a functional state after a suspend
4077  * and updates the software state as necessary.  This function is also used
4078  * for restoring the GPU after a GPU reset.
4079  *
4080  * Returns 0 on success, negative error code on failure.
4081  */
amdgpu_device_ip_resume_phase3(struct amdgpu_device * adev)4082 static int amdgpu_device_ip_resume_phase3(struct amdgpu_device *adev)
4083 {
4084 	int i, r;
4085 
4086 	for (i = 0; i < adev->num_ip_blocks; i++) {
4087 		if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
4088 			continue;
4089 		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) {
4090 			r = amdgpu_ip_block_resume(&adev->ip_blocks[i]);
4091 			if (r)
4092 				return r;
4093 		}
4094 	}
4095 
4096 	return 0;
4097 }
4098 
4099 /**
4100  * amdgpu_device_ip_resume - run resume for hardware IPs
4101  *
4102  * @adev: amdgpu_device pointer
4103  *
4104  * Main resume function for hardware IPs.  The hardware IPs
4105  * are split into two resume functions because they are
4106  * also used in recovering from a GPU reset and some additional
4107  * steps need to be take between them.  In this case (S3/S4) they are
4108  * run sequentially.
4109  * Returns 0 on success, negative error code on failure.
4110  */
amdgpu_device_ip_resume(struct amdgpu_device * adev)4111 static int amdgpu_device_ip_resume(struct amdgpu_device *adev)
4112 {
4113 	int r;
4114 
4115 	r = amdgpu_device_ip_resume_phase1(adev);
4116 	if (r)
4117 		return r;
4118 
4119 	r = amdgpu_device_fw_loading(adev);
4120 	if (r)
4121 		return r;
4122 
4123 	r = amdgpu_device_ip_resume_phase2(adev);
4124 
4125 	if (adev->mman.buffer_funcs_ring->sched.ready)
4126 		amdgpu_ttm_set_buffer_funcs_status(adev, true);
4127 
4128 	if (r)
4129 		return r;
4130 
4131 	amdgpu_fence_driver_hw_init(adev);
4132 
4133 	r = amdgpu_device_ip_resume_phase3(adev);
4134 
4135 	return r;
4136 }
4137 
4138 /**
4139  * amdgpu_device_detect_sriov_bios - determine if the board supports SR-IOV
4140  *
4141  * @adev: amdgpu_device pointer
4142  *
4143  * Query the VBIOS data tables to determine if the board supports SR-IOV.
4144  */
amdgpu_device_detect_sriov_bios(struct amdgpu_device * adev)4145 static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
4146 {
4147 	if (amdgpu_sriov_vf(adev)) {
4148 		if (adev->is_atom_fw) {
4149 			if (amdgpu_atomfirmware_gpu_virtualization_supported(adev))
4150 				adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
4151 		} else {
4152 			if (amdgpu_atombios_has_gpu_virtualization_table(adev))
4153 				adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
4154 		}
4155 
4156 		if (!(adev->virt.caps & AMDGPU_SRIOV_CAPS_SRIOV_VBIOS))
4157 			amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_NO_VBIOS, 0, 0);
4158 	}
4159 }
4160 
4161 /**
4162  * amdgpu_device_asic_has_dc_support - determine if DC supports the asic
4163  *
4164  * @pdev : pci device context
4165  * @asic_type: AMD asic type
4166  *
4167  * Check if there is DC (new modesetting infrastructre) support for an asic.
4168  * returns true if DC has support, false if not.
4169  */
amdgpu_device_asic_has_dc_support(struct pci_dev * pdev,enum amd_asic_type asic_type)4170 bool amdgpu_device_asic_has_dc_support(struct pci_dev *pdev,
4171 				       enum amd_asic_type asic_type)
4172 {
4173 	switch (asic_type) {
4174 #ifdef CONFIG_DRM_AMDGPU_SI
4175 	case CHIP_HAINAN:
4176 #endif
4177 	case CHIP_TOPAZ:
4178 		/* chips with no display hardware */
4179 		return false;
4180 #if defined(CONFIG_DRM_AMD_DC)
4181 	case CHIP_TAHITI:
4182 	case CHIP_PITCAIRN:
4183 	case CHIP_VERDE:
4184 	case CHIP_OLAND:
4185 		/*
4186 		 * We have systems in the wild with these ASICs that require
4187 		 * LVDS and VGA support which is not supported with DC.
4188 		 *
4189 		 * Fallback to the non-DC driver here by default so as not to
4190 		 * cause regressions.
4191 		 */
4192 #if defined(CONFIG_DRM_AMD_DC_SI)
4193 		return amdgpu_dc > 0;
4194 #else
4195 		return false;
4196 #endif
4197 	case CHIP_BONAIRE:
4198 	case CHIP_KAVERI:
4199 	case CHIP_KABINI:
4200 	case CHIP_MULLINS:
4201 		/*
4202 		 * We have systems in the wild with these ASICs that require
4203 		 * VGA support which is not supported with DC.
4204 		 *
4205 		 * Fallback to the non-DC driver here by default so as not to
4206 		 * cause regressions.
4207 		 */
4208 		return amdgpu_dc > 0;
4209 	default:
4210 		return amdgpu_dc != 0;
4211 #else
4212 	default:
4213 		if (amdgpu_dc > 0)
4214 			dev_info_once(
4215 				&pdev->dev,
4216 				"Display Core has been requested via kernel parameter but isn't supported by ASIC, ignoring\n");
4217 		return false;
4218 #endif
4219 	}
4220 }
4221 
4222 /**
4223  * amdgpu_device_has_dc_support - check if dc is supported
4224  *
4225  * @adev: amdgpu_device pointer
4226  *
4227  * Returns true for supported, false for not supported
4228  */
amdgpu_device_has_dc_support(struct amdgpu_device * adev)4229 bool amdgpu_device_has_dc_support(struct amdgpu_device *adev)
4230 {
4231 	if (adev->enable_virtual_display ||
4232 	    (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK))
4233 		return false;
4234 
4235 	return amdgpu_device_asic_has_dc_support(adev->pdev, adev->asic_type);
4236 }
4237 
amdgpu_device_xgmi_reset_func(struct work_struct * __work)4238 static void amdgpu_device_xgmi_reset_func(struct work_struct *__work)
4239 {
4240 	struct amdgpu_device *adev =
4241 		container_of(__work, struct amdgpu_device, xgmi_reset_work);
4242 	struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
4243 
4244 	/* It's a bug to not have a hive within this function */
4245 	if (WARN_ON(!hive))
4246 		return;
4247 
4248 	/*
4249 	 * Use task barrier to synchronize all xgmi reset works across the
4250 	 * hive. task_barrier_enter and task_barrier_exit will block
4251 	 * until all the threads running the xgmi reset works reach
4252 	 * those points. task_barrier_full will do both blocks.
4253 	 */
4254 	if (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
4255 
4256 		task_barrier_enter(&hive->tb);
4257 		adev->asic_reset_res = amdgpu_device_baco_enter(adev);
4258 
4259 		if (adev->asic_reset_res)
4260 			goto fail;
4261 
4262 		task_barrier_exit(&hive->tb);
4263 		adev->asic_reset_res = amdgpu_device_baco_exit(adev);
4264 
4265 		if (adev->asic_reset_res)
4266 			goto fail;
4267 
4268 		amdgpu_ras_reset_error_count(adev, AMDGPU_RAS_BLOCK__MMHUB);
4269 	} else {
4270 
4271 		task_barrier_full(&hive->tb);
4272 		adev->asic_reset_res =  amdgpu_asic_reset(adev);
4273 	}
4274 
4275 fail:
4276 	if (adev->asic_reset_res)
4277 		dev_warn(adev->dev,
4278 			 "ASIC reset failed with error, %d for drm dev, %s",
4279 			 adev->asic_reset_res, adev_to_drm(adev)->unique);
4280 	amdgpu_put_xgmi_hive(hive);
4281 }
4282 
amdgpu_device_get_job_timeout_settings(struct amdgpu_device * adev)4283 static int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev)
4284 {
4285 	char *input = amdgpu_lockup_timeout;
4286 	char *timeout_setting = NULL;
4287 	int index = 0;
4288 	long timeout;
4289 	int ret = 0;
4290 
4291 	/*
4292 	 * By default timeout for jobs is 10 sec
4293 	 */
4294 	adev->compute_timeout = adev->gfx_timeout = msecs_to_jiffies(10000);
4295 	adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
4296 
4297 	if (strnlen(input, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
4298 		while ((timeout_setting = strsep(&input, ",")) &&
4299 				strnlen(timeout_setting, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
4300 			ret = kstrtol(timeout_setting, 0, &timeout);
4301 			if (ret)
4302 				return ret;
4303 
4304 			if (timeout == 0) {
4305 				index++;
4306 				continue;
4307 			} else if (timeout < 0) {
4308 				timeout = MAX_SCHEDULE_TIMEOUT;
4309 				dev_warn(adev->dev, "lockup timeout disabled");
4310 				add_taint(TAINT_SOFTLOCKUP, LOCKDEP_STILL_OK);
4311 			} else {
4312 				timeout = msecs_to_jiffies(timeout);
4313 			}
4314 
4315 			switch (index++) {
4316 			case 0:
4317 				adev->gfx_timeout = timeout;
4318 				break;
4319 			case 1:
4320 				adev->compute_timeout = timeout;
4321 				break;
4322 			case 2:
4323 				adev->sdma_timeout = timeout;
4324 				break;
4325 			case 3:
4326 				adev->video_timeout = timeout;
4327 				break;
4328 			default:
4329 				break;
4330 			}
4331 		}
4332 		/*
4333 		 * There is only one value specified and
4334 		 * it should apply to all non-compute jobs.
4335 		 */
4336 		if (index == 1) {
4337 			adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
4338 			if (amdgpu_sriov_vf(adev) || amdgpu_passthrough(adev))
4339 				adev->compute_timeout = adev->gfx_timeout;
4340 		}
4341 	}
4342 
4343 	return ret;
4344 }
4345 
4346 /**
4347  * amdgpu_device_check_iommu_direct_map - check if RAM direct mapped to GPU
4348  *
4349  * @adev: amdgpu_device pointer
4350  *
4351  * RAM direct mapped to GPU if IOMMU is not enabled or is pass through mode
4352  */
amdgpu_device_check_iommu_direct_map(struct amdgpu_device * adev)4353 static void amdgpu_device_check_iommu_direct_map(struct amdgpu_device *adev)
4354 {
4355 	struct iommu_domain *domain;
4356 
4357 	domain = iommu_get_domain_for_dev(adev->dev);
4358 	if (!domain || domain->type == IOMMU_DOMAIN_IDENTITY)
4359 		adev->ram_is_direct_mapped = true;
4360 }
4361 
4362 #if defined(CONFIG_HSA_AMD_P2P)
4363 /**
4364  * amdgpu_device_check_iommu_remap - Check if DMA remapping is enabled.
4365  *
4366  * @adev: amdgpu_device pointer
4367  *
4368  * return if IOMMU remapping bar address
4369  */
amdgpu_device_check_iommu_remap(struct amdgpu_device * adev)4370 static bool amdgpu_device_check_iommu_remap(struct amdgpu_device *adev)
4371 {
4372 	struct iommu_domain *domain;
4373 
4374 	domain = iommu_get_domain_for_dev(adev->dev);
4375 	if (domain && (domain->type == IOMMU_DOMAIN_DMA ||
4376 		domain->type ==	IOMMU_DOMAIN_DMA_FQ))
4377 		return true;
4378 
4379 	return false;
4380 }
4381 #endif
4382 
amdgpu_device_set_mcbp(struct amdgpu_device * adev)4383 static void amdgpu_device_set_mcbp(struct amdgpu_device *adev)
4384 {
4385 	if (amdgpu_mcbp == 1)
4386 		adev->gfx.mcbp = true;
4387 	else if (amdgpu_mcbp == 0)
4388 		adev->gfx.mcbp = false;
4389 
4390 	if (amdgpu_sriov_vf(adev))
4391 		adev->gfx.mcbp = true;
4392 
4393 	if (adev->gfx.mcbp)
4394 		dev_info(adev->dev, "MCBP is enabled\n");
4395 }
4396 
4397 /**
4398  * amdgpu_device_init - initialize the driver
4399  *
4400  * @adev: amdgpu_device pointer
4401  * @flags: driver flags
4402  *
4403  * Initializes the driver info and hw (all asics).
4404  * Returns 0 for success or an error on failure.
4405  * Called at driver startup.
4406  */
amdgpu_device_init(struct amdgpu_device * adev,uint32_t flags)4407 int amdgpu_device_init(struct amdgpu_device *adev,
4408 		       uint32_t flags)
4409 {
4410 	struct pci_dev *pdev = adev->pdev;
4411 	int r, i;
4412 	bool px = false;
4413 	u32 max_MBps;
4414 	int tmp;
4415 
4416 	adev->shutdown = false;
4417 	adev->flags = flags;
4418 
4419 	if (amdgpu_force_asic_type >= 0 && amdgpu_force_asic_type < CHIP_LAST)
4420 		adev->asic_type = amdgpu_force_asic_type;
4421 	else
4422 		adev->asic_type = flags & AMD_ASIC_MASK;
4423 
4424 	adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT;
4425 	if (amdgpu_emu_mode == 1)
4426 		adev->usec_timeout *= 10;
4427 	adev->gmc.gart_size = 512 * 1024 * 1024;
4428 	adev->accel_working = false;
4429 	adev->num_rings = 0;
4430 	RCU_INIT_POINTER(adev->gang_submit, dma_fence_get_stub());
4431 	adev->mman.buffer_funcs = NULL;
4432 	adev->mman.buffer_funcs_ring = NULL;
4433 	adev->vm_manager.vm_pte_funcs = NULL;
4434 	adev->vm_manager.vm_pte_num_scheds = 0;
4435 	adev->gmc.gmc_funcs = NULL;
4436 	adev->harvest_ip_mask = 0x0;
4437 	adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
4438 	bitmap_zero(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
4439 
4440 	adev->smc_rreg = &amdgpu_invalid_rreg;
4441 	adev->smc_wreg = &amdgpu_invalid_wreg;
4442 	adev->pcie_rreg = &amdgpu_invalid_rreg;
4443 	adev->pcie_wreg = &amdgpu_invalid_wreg;
4444 	adev->pcie_rreg_ext = &amdgpu_invalid_rreg_ext;
4445 	adev->pcie_wreg_ext = &amdgpu_invalid_wreg_ext;
4446 	adev->pciep_rreg = &amdgpu_invalid_rreg;
4447 	adev->pciep_wreg = &amdgpu_invalid_wreg;
4448 	adev->pcie_rreg64 = &amdgpu_invalid_rreg64;
4449 	adev->pcie_wreg64 = &amdgpu_invalid_wreg64;
4450 	adev->pcie_rreg64_ext = &amdgpu_invalid_rreg64_ext;
4451 	adev->pcie_wreg64_ext = &amdgpu_invalid_wreg64_ext;
4452 	adev->uvd_ctx_rreg = &amdgpu_invalid_rreg;
4453 	adev->uvd_ctx_wreg = &amdgpu_invalid_wreg;
4454 	adev->didt_rreg = &amdgpu_invalid_rreg;
4455 	adev->didt_wreg = &amdgpu_invalid_wreg;
4456 	adev->gc_cac_rreg = &amdgpu_invalid_rreg;
4457 	adev->gc_cac_wreg = &amdgpu_invalid_wreg;
4458 	adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg;
4459 	adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg;
4460 
4461 	dev_info(
4462 		adev->dev,
4463 		"initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
4464 		amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device,
4465 		pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
4466 
4467 	/* mutex initialization are all done here so we
4468 	 * can recall function without having locking issues
4469 	 */
4470 	mutex_init(&adev->firmware.mutex);
4471 	mutex_init(&adev->pm.mutex);
4472 	mutex_init(&adev->gfx.gpu_clock_mutex);
4473 	mutex_init(&adev->srbm_mutex);
4474 	mutex_init(&adev->gfx.pipe_reserve_mutex);
4475 	mutex_init(&adev->gfx.gfx_off_mutex);
4476 	mutex_init(&adev->gfx.partition_mutex);
4477 	mutex_init(&adev->grbm_idx_mutex);
4478 	mutex_init(&adev->mn_lock);
4479 	mutex_init(&adev->virt.vf_errors.lock);
4480 	hash_init(adev->mn_hash);
4481 	mutex_init(&adev->psp.mutex);
4482 	mutex_init(&adev->notifier_lock);
4483 	mutex_init(&adev->pm.stable_pstate_ctx_lock);
4484 	mutex_init(&adev->benchmark_mutex);
4485 	mutex_init(&adev->gfx.reset_sem_mutex);
4486 	/* Initialize the mutex for cleaner shader isolation between GFX and compute processes */
4487 	mutex_init(&adev->enforce_isolation_mutex);
4488 	for (i = 0; i < MAX_XCP; ++i) {
4489 		adev->isolation[i].spearhead = dma_fence_get_stub();
4490 		amdgpu_sync_create(&adev->isolation[i].active);
4491 		amdgpu_sync_create(&adev->isolation[i].prev);
4492 	}
4493 	mutex_init(&adev->gfx.userq_sch_mutex);
4494 	mutex_init(&adev->gfx.workload_profile_mutex);
4495 	mutex_init(&adev->vcn.workload_profile_mutex);
4496 	mutex_init(&adev->userq_mutex);
4497 
4498 	amdgpu_device_init_apu_flags(adev);
4499 
4500 	r = amdgpu_device_check_arguments(adev);
4501 	if (r)
4502 		return r;
4503 
4504 	spin_lock_init(&adev->mmio_idx_lock);
4505 	spin_lock_init(&adev->smc_idx_lock);
4506 	spin_lock_init(&adev->pcie_idx_lock);
4507 	spin_lock_init(&adev->uvd_ctx_idx_lock);
4508 	spin_lock_init(&adev->didt_idx_lock);
4509 	spin_lock_init(&adev->gc_cac_idx_lock);
4510 	spin_lock_init(&adev->se_cac_idx_lock);
4511 	spin_lock_init(&adev->audio_endpt_idx_lock);
4512 	spin_lock_init(&adev->mm_stats.lock);
4513 	spin_lock_init(&adev->virt.rlcg_reg_lock);
4514 	spin_lock_init(&adev->wb.lock);
4515 
4516 	xa_init_flags(&adev->userq_xa, XA_FLAGS_LOCK_IRQ);
4517 
4518 	INIT_LIST_HEAD(&adev->reset_list);
4519 
4520 	INIT_LIST_HEAD(&adev->ras_list);
4521 
4522 	INIT_LIST_HEAD(&adev->pm.od_kobj_list);
4523 
4524 	INIT_LIST_HEAD(&adev->userq_mgr_list);
4525 
4526 	INIT_DELAYED_WORK(&adev->delayed_init_work,
4527 			  amdgpu_device_delayed_init_work_handler);
4528 	INIT_DELAYED_WORK(&adev->gfx.gfx_off_delay_work,
4529 			  amdgpu_device_delay_enable_gfx_off);
4530 	/*
4531 	 * Initialize the enforce_isolation work structures for each XCP
4532 	 * partition.  This work handler is responsible for enforcing shader
4533 	 * isolation on AMD GPUs.  It counts the number of emitted fences for
4534 	 * each GFX and compute ring.  If there are any fences, it schedules
4535 	 * the `enforce_isolation_work` to be run after a delay.  If there are
4536 	 * no fences, it signals the Kernel Fusion Driver (KFD) to resume the
4537 	 * runqueue.
4538 	 */
4539 	for (i = 0; i < MAX_XCP; i++) {
4540 		INIT_DELAYED_WORK(&adev->gfx.enforce_isolation[i].work,
4541 				  amdgpu_gfx_enforce_isolation_handler);
4542 		adev->gfx.enforce_isolation[i].adev = adev;
4543 		adev->gfx.enforce_isolation[i].xcp_id = i;
4544 	}
4545 
4546 	INIT_WORK(&adev->xgmi_reset_work, amdgpu_device_xgmi_reset_func);
4547 
4548 	adev->gfx.gfx_off_req_count = 1;
4549 	adev->gfx.gfx_off_residency = 0;
4550 	adev->gfx.gfx_off_entrycount = 0;
4551 	adev->pm.ac_power = power_supply_is_system_supplied() > 0;
4552 
4553 	atomic_set(&adev->throttling_logging_enabled, 1);
4554 	/*
4555 	 * If throttling continues, logging will be performed every minute
4556 	 * to avoid log flooding. "-1" is subtracted since the thermal
4557 	 * throttling interrupt comes every second. Thus, the total logging
4558 	 * interval is 59 seconds(retelimited printk interval) + 1(waiting
4559 	 * for throttling interrupt) = 60 seconds.
4560 	 */
4561 	ratelimit_state_init(&adev->throttling_logging_rs, (60 - 1) * HZ, 1);
4562 
4563 	ratelimit_set_flags(&adev->throttling_logging_rs, RATELIMIT_MSG_ON_RELEASE);
4564 
4565 	/* Registers mapping */
4566 	/* TODO: block userspace mapping of io register */
4567 	if (adev->asic_type >= CHIP_BONAIRE) {
4568 		adev->rmmio_base = pci_resource_start(adev->pdev, 5);
4569 		adev->rmmio_size = pci_resource_len(adev->pdev, 5);
4570 	} else {
4571 		adev->rmmio_base = pci_resource_start(adev->pdev, 2);
4572 		adev->rmmio_size = pci_resource_len(adev->pdev, 2);
4573 	}
4574 
4575 	for (i = 0; i < AMD_IP_BLOCK_TYPE_NUM; i++)
4576 		atomic_set(&adev->pm.pwr_state[i], POWER_STATE_UNKNOWN);
4577 
4578 	adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size);
4579 	if (!adev->rmmio)
4580 		return -ENOMEM;
4581 
4582 	dev_info(adev->dev, "register mmio base: 0x%08X\n",
4583 		 (uint32_t)adev->rmmio_base);
4584 	dev_info(adev->dev, "register mmio size: %u\n",
4585 		 (unsigned int)adev->rmmio_size);
4586 
4587 	/*
4588 	 * Reset domain needs to be present early, before XGMI hive discovered
4589 	 * (if any) and initialized to use reset sem and in_gpu reset flag
4590 	 * early on during init and before calling to RREG32.
4591 	 */
4592 	adev->reset_domain = amdgpu_reset_create_reset_domain(SINGLE_DEVICE, "amdgpu-reset-dev");
4593 	if (!adev->reset_domain)
4594 		return -ENOMEM;
4595 
4596 	/* detect hw virtualization here */
4597 	amdgpu_virt_init(adev);
4598 
4599 	amdgpu_device_get_pcie_info(adev);
4600 
4601 	r = amdgpu_device_get_job_timeout_settings(adev);
4602 	if (r) {
4603 		dev_err(adev->dev, "invalid lockup_timeout parameter syntax\n");
4604 		return r;
4605 	}
4606 
4607 	amdgpu_device_set_mcbp(adev);
4608 
4609 	/*
4610 	 * By default, use default mode where all blocks are expected to be
4611 	 * initialized. At present a 'swinit' of blocks is required to be
4612 	 * completed before the need for a different level is detected.
4613 	 */
4614 	amdgpu_set_init_level(adev, AMDGPU_INIT_LEVEL_DEFAULT);
4615 	/* early init functions */
4616 	r = amdgpu_device_ip_early_init(adev);
4617 	if (r)
4618 		return r;
4619 
4620 	/*
4621 	 * No need to remove conflicting FBs for non-display class devices.
4622 	 * This prevents the sysfb from being freed accidently.
4623 	 */
4624 	if ((pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA ||
4625 	    (pdev->class >> 8) == PCI_CLASS_DISPLAY_OTHER) {
4626 		/* Get rid of things like offb */
4627 		r = aperture_remove_conflicting_pci_devices(adev->pdev, amdgpu_kms_driver.name);
4628 		if (r)
4629 			return r;
4630 	}
4631 
4632 	/* Enable TMZ based on IP_VERSION */
4633 	amdgpu_gmc_tmz_set(adev);
4634 
4635 	if (amdgpu_sriov_vf(adev) &&
4636 	    amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(10, 3, 0))
4637 		/* VF MMIO access (except mailbox range) from CPU
4638 		 * will be blocked during sriov runtime
4639 		 */
4640 		adev->virt.caps |= AMDGPU_VF_MMIO_ACCESS_PROTECT;
4641 
4642 	amdgpu_gmc_noretry_set(adev);
4643 	/* Need to get xgmi info early to decide the reset behavior*/
4644 	if (adev->gmc.xgmi.supported) {
4645 		r = adev->gfxhub.funcs->get_xgmi_info(adev);
4646 		if (r)
4647 			return r;
4648 	}
4649 
4650 	/* enable PCIE atomic ops */
4651 	if (amdgpu_sriov_vf(adev)) {
4652 		if (adev->virt.fw_reserve.p_pf2vf)
4653 			adev->have_atomics_support = ((struct amd_sriov_msg_pf2vf_info *)
4654 						      adev->virt.fw_reserve.p_pf2vf)->pcie_atomic_ops_support_flags ==
4655 				(PCI_EXP_DEVCAP2_ATOMIC_COMP32 | PCI_EXP_DEVCAP2_ATOMIC_COMP64);
4656 	/* APUs w/ gfx9 onwards doesn't reply on PCIe atomics, rather it is a
4657 	 * internal path natively support atomics, set have_atomics_support to true.
4658 	 */
4659 	} else if ((adev->flags & AMD_IS_APU) &&
4660 		   (amdgpu_ip_version(adev, GC_HWIP, 0) >
4661 		    IP_VERSION(9, 0, 0))) {
4662 		adev->have_atomics_support = true;
4663 	} else {
4664 		adev->have_atomics_support =
4665 			!pci_enable_atomic_ops_to_root(adev->pdev,
4666 					  PCI_EXP_DEVCAP2_ATOMIC_COMP32 |
4667 					  PCI_EXP_DEVCAP2_ATOMIC_COMP64);
4668 	}
4669 
4670 	if (!adev->have_atomics_support)
4671 		dev_info(adev->dev, "PCIE atomic ops is not supported\n");
4672 
4673 	/* doorbell bar mapping and doorbell index init*/
4674 	amdgpu_doorbell_init(adev);
4675 
4676 	if (amdgpu_emu_mode == 1) {
4677 		/* post the asic on emulation mode */
4678 		emu_soc_asic_init(adev);
4679 		goto fence_driver_init;
4680 	}
4681 
4682 	amdgpu_reset_init(adev);
4683 
4684 	/* detect if we are with an SRIOV vbios */
4685 	if (adev->bios)
4686 		amdgpu_device_detect_sriov_bios(adev);
4687 
4688 	/* check if we need to reset the asic
4689 	 *  E.g., driver was not cleanly unloaded previously, etc.
4690 	 */
4691 	if (!amdgpu_sriov_vf(adev) && amdgpu_asic_need_reset_on_init(adev)) {
4692 		if (adev->gmc.xgmi.num_physical_nodes) {
4693 			dev_info(adev->dev, "Pending hive reset.\n");
4694 			amdgpu_set_init_level(adev,
4695 					      AMDGPU_INIT_LEVEL_MINIMAL_XGMI);
4696 		} else if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 10) &&
4697 				   !amdgpu_device_has_display_hardware(adev)) {
4698 					r = psp_gpu_reset(adev);
4699 		} else {
4700 				tmp = amdgpu_reset_method;
4701 				/* It should do a default reset when loading or reloading the driver,
4702 				 * regardless of the module parameter reset_method.
4703 				 */
4704 				amdgpu_reset_method = AMD_RESET_METHOD_NONE;
4705 				r = amdgpu_asic_reset(adev);
4706 				amdgpu_reset_method = tmp;
4707 		}
4708 
4709 		if (r) {
4710 		  dev_err(adev->dev, "asic reset on init failed\n");
4711 		  goto failed;
4712 		}
4713 	}
4714 
4715 	/* Post card if necessary */
4716 	if (amdgpu_device_need_post(adev)) {
4717 		if (!adev->bios) {
4718 			dev_err(adev->dev, "no vBIOS found\n");
4719 			r = -EINVAL;
4720 			goto failed;
4721 		}
4722 		dev_info(adev->dev, "GPU posting now...\n");
4723 		r = amdgpu_device_asic_init(adev);
4724 		if (r) {
4725 			dev_err(adev->dev, "gpu post error!\n");
4726 			goto failed;
4727 		}
4728 	}
4729 
4730 	if (adev->bios) {
4731 		if (adev->is_atom_fw) {
4732 			/* Initialize clocks */
4733 			r = amdgpu_atomfirmware_get_clock_info(adev);
4734 			if (r) {
4735 				dev_err(adev->dev, "amdgpu_atomfirmware_get_clock_info failed\n");
4736 				amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
4737 				goto failed;
4738 			}
4739 		} else {
4740 			/* Initialize clocks */
4741 			r = amdgpu_atombios_get_clock_info(adev);
4742 			if (r) {
4743 				dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n");
4744 				amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
4745 				goto failed;
4746 			}
4747 			/* init i2c buses */
4748 			amdgpu_i2c_init(adev);
4749 		}
4750 	}
4751 
4752 fence_driver_init:
4753 	/* Fence driver */
4754 	r = amdgpu_fence_driver_sw_init(adev);
4755 	if (r) {
4756 		dev_err(adev->dev, "amdgpu_fence_driver_sw_init failed\n");
4757 		amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_FENCE_INIT_FAIL, 0, 0);
4758 		goto failed;
4759 	}
4760 
4761 	/* init the mode config */
4762 	drm_mode_config_init(adev_to_drm(adev));
4763 
4764 	r = amdgpu_device_ip_init(adev);
4765 	if (r) {
4766 		dev_err(adev->dev, "amdgpu_device_ip_init failed\n");
4767 		amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0);
4768 		goto release_ras_con;
4769 	}
4770 
4771 	amdgpu_fence_driver_hw_init(adev);
4772 
4773 	dev_info(adev->dev,
4774 		"SE %d, SH per SE %d, CU per SH %d, active_cu_number %d\n",
4775 			adev->gfx.config.max_shader_engines,
4776 			adev->gfx.config.max_sh_per_se,
4777 			adev->gfx.config.max_cu_per_sh,
4778 			adev->gfx.cu_info.number);
4779 
4780 	adev->accel_working = true;
4781 
4782 	amdgpu_vm_check_compute_bug(adev);
4783 
4784 	/* Initialize the buffer migration limit. */
4785 	if (amdgpu_moverate >= 0)
4786 		max_MBps = amdgpu_moverate;
4787 	else
4788 		max_MBps = 8; /* Allow 8 MB/s. */
4789 	/* Get a log2 for easy divisions. */
4790 	adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps));
4791 
4792 	/*
4793 	 * Register gpu instance before amdgpu_device_enable_mgpu_fan_boost.
4794 	 * Otherwise the mgpu fan boost feature will be skipped due to the
4795 	 * gpu instance is counted less.
4796 	 */
4797 	amdgpu_register_gpu_instance(adev);
4798 
4799 	/* enable clockgating, etc. after ib tests, etc. since some blocks require
4800 	 * explicit gating rather than handling it automatically.
4801 	 */
4802 	if (adev->init_lvl->level != AMDGPU_INIT_LEVEL_MINIMAL_XGMI) {
4803 		r = amdgpu_device_ip_late_init(adev);
4804 		if (r) {
4805 			dev_err(adev->dev, "amdgpu_device_ip_late_init failed\n");
4806 			amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_LATE_INIT_FAIL, 0, r);
4807 			goto release_ras_con;
4808 		}
4809 		/* must succeed. */
4810 		amdgpu_ras_resume(adev);
4811 		queue_delayed_work(system_wq, &adev->delayed_init_work,
4812 				   msecs_to_jiffies(AMDGPU_RESUME_MS));
4813 	}
4814 
4815 	if (amdgpu_sriov_vf(adev)) {
4816 		amdgpu_virt_release_full_gpu(adev, true);
4817 		flush_delayed_work(&adev->delayed_init_work);
4818 	}
4819 
4820 	/*
4821 	 * Place those sysfs registering after `late_init`. As some of those
4822 	 * operations performed in `late_init` might affect the sysfs
4823 	 * interfaces creating.
4824 	 */
4825 	r = amdgpu_atombios_sysfs_init(adev);
4826 	if (r)
4827 		drm_err(&adev->ddev,
4828 			"registering atombios sysfs failed (%d).\n", r);
4829 
4830 	r = amdgpu_pm_sysfs_init(adev);
4831 	if (r)
4832 		dev_err(adev->dev, "registering pm sysfs failed (%d).\n", r);
4833 
4834 	r = amdgpu_ucode_sysfs_init(adev);
4835 	if (r) {
4836 		adev->ucode_sysfs_en = false;
4837 		dev_err(adev->dev, "Creating firmware sysfs failed (%d).\n", r);
4838 	} else
4839 		adev->ucode_sysfs_en = true;
4840 
4841 	r = amdgpu_device_attr_sysfs_init(adev);
4842 	if (r)
4843 		dev_err(adev->dev, "Could not create amdgpu device attr\n");
4844 
4845 	r = devm_device_add_group(adev->dev, &amdgpu_board_attrs_group);
4846 	if (r)
4847 		dev_err(adev->dev,
4848 			"Could not create amdgpu board attributes\n");
4849 
4850 	amdgpu_fru_sysfs_init(adev);
4851 	amdgpu_reg_state_sysfs_init(adev);
4852 	amdgpu_xcp_sysfs_init(adev);
4853 
4854 	if (IS_ENABLED(CONFIG_PERF_EVENTS))
4855 		r = amdgpu_pmu_init(adev);
4856 	if (r)
4857 		dev_err(adev->dev, "amdgpu_pmu_init failed\n");
4858 
4859 	/* Have stored pci confspace at hand for restore in sudden PCI error */
4860 	if (amdgpu_device_cache_pci_state(adev->pdev))
4861 		pci_restore_state(pdev);
4862 
4863 	/* if we have > 1 VGA cards, then disable the amdgpu VGA resources */
4864 	/* this will fail for cards that aren't VGA class devices, just
4865 	 * ignore it
4866 	 */
4867 	if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
4868 		vga_client_register(adev->pdev, amdgpu_device_vga_set_decode);
4869 
4870 	px = amdgpu_device_supports_px(adev);
4871 
4872 	if (px || (!dev_is_removable(&adev->pdev->dev) &&
4873 				apple_gmux_detect(NULL, NULL)))
4874 		vga_switcheroo_register_client(adev->pdev,
4875 					       &amdgpu_switcheroo_ops, px);
4876 
4877 	if (px)
4878 		vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
4879 
4880 	if (adev->init_lvl->level == AMDGPU_INIT_LEVEL_MINIMAL_XGMI)
4881 		amdgpu_xgmi_reset_on_init(adev);
4882 
4883 	amdgpu_device_check_iommu_direct_map(adev);
4884 
4885 	adev->pm_nb.notifier_call = amdgpu_device_pm_notifier;
4886 	r = register_pm_notifier(&adev->pm_nb);
4887 	if (r)
4888 		goto failed;
4889 
4890 	return 0;
4891 
4892 release_ras_con:
4893 	if (amdgpu_sriov_vf(adev))
4894 		amdgpu_virt_release_full_gpu(adev, true);
4895 
4896 	/* failed in exclusive mode due to timeout */
4897 	if (amdgpu_sriov_vf(adev) &&
4898 		!amdgpu_sriov_runtime(adev) &&
4899 		amdgpu_virt_mmio_blocked(adev) &&
4900 		!amdgpu_virt_wait_reset(adev)) {
4901 		dev_err(adev->dev, "VF exclusive mode timeout\n");
4902 		/* Don't send request since VF is inactive. */
4903 		adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
4904 		adev->virt.ops = NULL;
4905 		r = -EAGAIN;
4906 	}
4907 	amdgpu_release_ras_context(adev);
4908 
4909 failed:
4910 	amdgpu_vf_error_trans_all(adev);
4911 
4912 	return r;
4913 }
4914 
amdgpu_device_unmap_mmio(struct amdgpu_device * adev)4915 static void amdgpu_device_unmap_mmio(struct amdgpu_device *adev)
4916 {
4917 
4918 	/* Clear all CPU mappings pointing to this device */
4919 	unmap_mapping_range(adev->ddev.anon_inode->i_mapping, 0, 0, 1);
4920 
4921 	/* Unmap all mapped bars - Doorbell, registers and VRAM */
4922 	amdgpu_doorbell_fini(adev);
4923 
4924 	iounmap(adev->rmmio);
4925 	adev->rmmio = NULL;
4926 	if (adev->mman.aper_base_kaddr)
4927 		iounmap(adev->mman.aper_base_kaddr);
4928 	adev->mman.aper_base_kaddr = NULL;
4929 
4930 	/* Memory manager related */
4931 	if (!adev->gmc.xgmi.connected_to_cpu && !adev->gmc.is_app_apu) {
4932 		arch_phys_wc_del(adev->gmc.vram_mtrr);
4933 		arch_io_free_memtype_wc(adev->gmc.aper_base, adev->gmc.aper_size);
4934 	}
4935 }
4936 
4937 /**
4938  * amdgpu_device_fini_hw - tear down the driver
4939  *
4940  * @adev: amdgpu_device pointer
4941  *
4942  * Tear down the driver info (all asics).
4943  * Called at driver shutdown.
4944  */
amdgpu_device_fini_hw(struct amdgpu_device * adev)4945 void amdgpu_device_fini_hw(struct amdgpu_device *adev)
4946 {
4947 	dev_info(adev->dev, "amdgpu: finishing device.\n");
4948 	flush_delayed_work(&adev->delayed_init_work);
4949 
4950 	if (adev->mman.initialized)
4951 		drain_workqueue(adev->mman.bdev.wq);
4952 	adev->shutdown = true;
4953 
4954 	unregister_pm_notifier(&adev->pm_nb);
4955 
4956 	/* make sure IB test finished before entering exclusive mode
4957 	 * to avoid preemption on IB test
4958 	 */
4959 	if (amdgpu_sriov_vf(adev)) {
4960 		amdgpu_virt_request_full_gpu(adev, false);
4961 		amdgpu_virt_fini_data_exchange(adev);
4962 	}
4963 
4964 	/* disable all interrupts */
4965 	amdgpu_irq_disable_all(adev);
4966 	if (adev->mode_info.mode_config_initialized) {
4967 		if (!drm_drv_uses_atomic_modeset(adev_to_drm(adev)))
4968 			drm_helper_force_disable_all(adev_to_drm(adev));
4969 		else
4970 			drm_atomic_helper_shutdown(adev_to_drm(adev));
4971 	}
4972 	amdgpu_fence_driver_hw_fini(adev);
4973 
4974 	if (adev->pm.sysfs_initialized)
4975 		amdgpu_pm_sysfs_fini(adev);
4976 	if (adev->ucode_sysfs_en)
4977 		amdgpu_ucode_sysfs_fini(adev);
4978 	amdgpu_device_attr_sysfs_fini(adev);
4979 	amdgpu_fru_sysfs_fini(adev);
4980 
4981 	amdgpu_reg_state_sysfs_fini(adev);
4982 	amdgpu_xcp_sysfs_fini(adev);
4983 
4984 	/* disable ras feature must before hw fini */
4985 	amdgpu_ras_pre_fini(adev);
4986 
4987 	amdgpu_ttm_set_buffer_funcs_status(adev, false);
4988 
4989 	amdgpu_device_ip_fini_early(adev);
4990 
4991 	amdgpu_irq_fini_hw(adev);
4992 
4993 	if (adev->mman.initialized)
4994 		ttm_device_clear_dma_mappings(&adev->mman.bdev);
4995 
4996 	amdgpu_gart_dummy_page_fini(adev);
4997 
4998 	if (drm_dev_is_unplugged(adev_to_drm(adev)))
4999 		amdgpu_device_unmap_mmio(adev);
5000 
5001 }
5002 
amdgpu_device_fini_sw(struct amdgpu_device * adev)5003 void amdgpu_device_fini_sw(struct amdgpu_device *adev)
5004 {
5005 	int i, idx;
5006 	bool px;
5007 
5008 	amdgpu_device_ip_fini(adev);
5009 	amdgpu_fence_driver_sw_fini(adev);
5010 	amdgpu_ucode_release(&adev->firmware.gpu_info_fw);
5011 	adev->accel_working = false;
5012 	dma_fence_put(rcu_dereference_protected(adev->gang_submit, true));
5013 	for (i = 0; i < MAX_XCP; ++i) {
5014 		dma_fence_put(adev->isolation[i].spearhead);
5015 		amdgpu_sync_free(&adev->isolation[i].active);
5016 		amdgpu_sync_free(&adev->isolation[i].prev);
5017 	}
5018 
5019 	amdgpu_reset_fini(adev);
5020 
5021 	/* free i2c buses */
5022 	amdgpu_i2c_fini(adev);
5023 
5024 	if (adev->bios) {
5025 		if (amdgpu_emu_mode != 1)
5026 			amdgpu_atombios_fini(adev);
5027 		amdgpu_bios_release(adev);
5028 	}
5029 
5030 	kfree(adev->fru_info);
5031 	adev->fru_info = NULL;
5032 
5033 	kfree(adev->xcp_mgr);
5034 	adev->xcp_mgr = NULL;
5035 
5036 	px = amdgpu_device_supports_px(adev);
5037 
5038 	if (px || (!dev_is_removable(&adev->pdev->dev) &&
5039 				apple_gmux_detect(NULL, NULL)))
5040 		vga_switcheroo_unregister_client(adev->pdev);
5041 
5042 	if (px)
5043 		vga_switcheroo_fini_domain_pm_ops(adev->dev);
5044 
5045 	if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
5046 		vga_client_unregister(adev->pdev);
5047 
5048 	if (drm_dev_enter(adev_to_drm(adev), &idx)) {
5049 
5050 		iounmap(adev->rmmio);
5051 		adev->rmmio = NULL;
5052 		drm_dev_exit(idx);
5053 	}
5054 
5055 	if (IS_ENABLED(CONFIG_PERF_EVENTS))
5056 		amdgpu_pmu_fini(adev);
5057 	if (adev->mman.discovery_bin)
5058 		amdgpu_discovery_fini(adev);
5059 
5060 	amdgpu_reset_put_reset_domain(adev->reset_domain);
5061 	adev->reset_domain = NULL;
5062 
5063 	kfree(adev->pci_state);
5064 	kfree(adev->pcie_reset_ctx.swds_pcistate);
5065 	kfree(adev->pcie_reset_ctx.swus_pcistate);
5066 }
5067 
5068 /**
5069  * amdgpu_device_evict_resources - evict device resources
5070  * @adev: amdgpu device object
5071  *
5072  * Evicts all ttm device resources(vram BOs, gart table) from the lru list
5073  * of the vram memory type. Mainly used for evicting device resources
5074  * at suspend time.
5075  *
5076  */
amdgpu_device_evict_resources(struct amdgpu_device * adev)5077 static int amdgpu_device_evict_resources(struct amdgpu_device *adev)
5078 {
5079 	int ret;
5080 
5081 	/* No need to evict vram on APUs unless going to S4 */
5082 	if (!adev->in_s4 && (adev->flags & AMD_IS_APU))
5083 		return 0;
5084 
5085 	/* No need to evict when going to S5 through S4 callbacks */
5086 	if (system_state == SYSTEM_POWER_OFF)
5087 		return 0;
5088 
5089 	ret = amdgpu_ttm_evict_resources(adev, TTM_PL_VRAM);
5090 	if (ret) {
5091 		dev_warn(adev->dev, "evicting device resources failed\n");
5092 		return ret;
5093 	}
5094 
5095 	if (adev->in_s4) {
5096 		ret = ttm_device_prepare_hibernation(&adev->mman.bdev);
5097 		if (ret)
5098 			dev_err(adev->dev, "prepare hibernation failed, %d\n", ret);
5099 	}
5100 	return ret;
5101 }
5102 
5103 /*
5104  * Suspend & resume.
5105  */
5106 /**
5107  * amdgpu_device_pm_notifier - Notification block for Suspend/Hibernate events
5108  * @nb: notifier block
5109  * @mode: suspend mode
5110  * @data: data
5111  *
5112  * This function is called when the system is about to suspend or hibernate.
5113  * It is used to set the appropriate flags so that eviction can be optimized
5114  * in the pm prepare callback.
5115  */
amdgpu_device_pm_notifier(struct notifier_block * nb,unsigned long mode,void * data)5116 static int amdgpu_device_pm_notifier(struct notifier_block *nb, unsigned long mode,
5117 				     void *data)
5118 {
5119 	struct amdgpu_device *adev = container_of(nb, struct amdgpu_device, pm_nb);
5120 
5121 	switch (mode) {
5122 	case PM_HIBERNATION_PREPARE:
5123 		adev->in_s4 = true;
5124 		break;
5125 	case PM_POST_HIBERNATION:
5126 		adev->in_s4 = false;
5127 		break;
5128 	}
5129 
5130 	return NOTIFY_DONE;
5131 }
5132 
5133 /**
5134  * amdgpu_device_prepare - prepare for device suspend
5135  *
5136  * @dev: drm dev pointer
5137  *
5138  * Prepare to put the hw in the suspend state (all asics).
5139  * Returns 0 for success or an error on failure.
5140  * Called at driver suspend.
5141  */
amdgpu_device_prepare(struct drm_device * dev)5142 int amdgpu_device_prepare(struct drm_device *dev)
5143 {
5144 	struct amdgpu_device *adev = drm_to_adev(dev);
5145 	int i, r;
5146 
5147 	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
5148 		return 0;
5149 
5150 	/* Evict the majority of BOs before starting suspend sequence */
5151 	r = amdgpu_device_evict_resources(adev);
5152 	if (r)
5153 		return r;
5154 
5155 	flush_delayed_work(&adev->gfx.gfx_off_delay_work);
5156 
5157 	for (i = 0; i < adev->num_ip_blocks; i++) {
5158 		if (!adev->ip_blocks[i].status.valid)
5159 			continue;
5160 		if (!adev->ip_blocks[i].version->funcs->prepare_suspend)
5161 			continue;
5162 		r = adev->ip_blocks[i].version->funcs->prepare_suspend(&adev->ip_blocks[i]);
5163 		if (r)
5164 			return r;
5165 	}
5166 
5167 	return 0;
5168 }
5169 
5170 /**
5171  * amdgpu_device_complete - complete power state transition
5172  *
5173  * @dev: drm dev pointer
5174  *
5175  * Undo the changes from amdgpu_device_prepare. This will be
5176  * called on all resume transitions, including those that failed.
5177  */
amdgpu_device_complete(struct drm_device * dev)5178 void amdgpu_device_complete(struct drm_device *dev)
5179 {
5180 	struct amdgpu_device *adev = drm_to_adev(dev);
5181 	int i;
5182 
5183 	for (i = 0; i < adev->num_ip_blocks; i++) {
5184 		if (!adev->ip_blocks[i].status.valid)
5185 			continue;
5186 		if (!adev->ip_blocks[i].version->funcs->complete)
5187 			continue;
5188 		adev->ip_blocks[i].version->funcs->complete(&adev->ip_blocks[i]);
5189 	}
5190 }
5191 
5192 /**
5193  * amdgpu_device_suspend - initiate device suspend
5194  *
5195  * @dev: drm dev pointer
5196  * @notify_clients: notify in-kernel DRM clients
5197  *
5198  * Puts the hw in the suspend state (all asics).
5199  * Returns 0 for success or an error on failure.
5200  * Called at driver suspend.
5201  */
amdgpu_device_suspend(struct drm_device * dev,bool notify_clients)5202 int amdgpu_device_suspend(struct drm_device *dev, bool notify_clients)
5203 {
5204 	struct amdgpu_device *adev = drm_to_adev(dev);
5205 	int r = 0;
5206 
5207 	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
5208 		return 0;
5209 
5210 	adev->in_suspend = true;
5211 
5212 	if (amdgpu_sriov_vf(adev)) {
5213 		if (!adev->in_runpm)
5214 			amdgpu_amdkfd_suspend_process(adev);
5215 		amdgpu_virt_fini_data_exchange(adev);
5216 		r = amdgpu_virt_request_full_gpu(adev, false);
5217 		if (r)
5218 			return r;
5219 	}
5220 
5221 	if (amdgpu_acpi_smart_shift_update(adev, AMDGPU_SS_DEV_D3))
5222 		dev_warn(adev->dev, "smart shift update failed\n");
5223 
5224 	if (notify_clients)
5225 		drm_client_dev_suspend(adev_to_drm(adev), false);
5226 
5227 	cancel_delayed_work_sync(&adev->delayed_init_work);
5228 
5229 	amdgpu_ras_suspend(adev);
5230 
5231 	amdgpu_device_ip_suspend_phase1(adev);
5232 
5233 	amdgpu_amdkfd_suspend(adev, !amdgpu_sriov_vf(adev) && !adev->in_runpm);
5234 	amdgpu_userq_suspend(adev);
5235 
5236 	r = amdgpu_device_evict_resources(adev);
5237 	if (r)
5238 		return r;
5239 
5240 	amdgpu_ttm_set_buffer_funcs_status(adev, false);
5241 
5242 	amdgpu_fence_driver_hw_fini(adev);
5243 
5244 	amdgpu_device_ip_suspend_phase2(adev);
5245 
5246 	if (amdgpu_sriov_vf(adev))
5247 		amdgpu_virt_release_full_gpu(adev, false);
5248 
5249 	return 0;
5250 }
5251 
amdgpu_virt_resume(struct amdgpu_device * adev)5252 static inline int amdgpu_virt_resume(struct amdgpu_device *adev)
5253 {
5254 	int r;
5255 	unsigned int prev_physical_node_id = adev->gmc.xgmi.physical_node_id;
5256 
5257 	/* During VM resume, QEMU programming of VF MSIX table (register GFXMSIX_VECT0_ADDR_LO)
5258 	 * may not work. The access could be blocked by nBIF protection as VF isn't in
5259 	 * exclusive access mode. Exclusive access is enabled now, disable/enable MSIX
5260 	 * so that QEMU reprograms MSIX table.
5261 	 */
5262 	amdgpu_restore_msix(adev);
5263 
5264 	r = adev->gfxhub.funcs->get_xgmi_info(adev);
5265 	if (r)
5266 		return r;
5267 
5268 	dev_info(adev->dev, "xgmi node, old id %d, new id %d\n",
5269 		prev_physical_node_id, adev->gmc.xgmi.physical_node_id);
5270 
5271 	adev->vm_manager.vram_base_offset = adev->gfxhub.funcs->get_mc_fb_offset(adev);
5272 	adev->vm_manager.vram_base_offset +=
5273 		adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
5274 
5275 	return 0;
5276 }
5277 
5278 /**
5279  * amdgpu_device_resume - initiate device resume
5280  *
5281  * @dev: drm dev pointer
5282  * @notify_clients: notify in-kernel DRM clients
5283  *
5284  * Bring the hw back to operating state (all asics).
5285  * Returns 0 for success or an error on failure.
5286  * Called at driver resume.
5287  */
amdgpu_device_resume(struct drm_device * dev,bool notify_clients)5288 int amdgpu_device_resume(struct drm_device *dev, bool notify_clients)
5289 {
5290 	struct amdgpu_device *adev = drm_to_adev(dev);
5291 	int r = 0;
5292 
5293 	if (amdgpu_sriov_vf(adev)) {
5294 		r = amdgpu_virt_request_full_gpu(adev, true);
5295 		if (r)
5296 			return r;
5297 	}
5298 
5299 	if (amdgpu_virt_xgmi_migrate_enabled(adev)) {
5300 		r = amdgpu_virt_resume(adev);
5301 		if (r)
5302 			goto exit;
5303 	}
5304 
5305 	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
5306 		return 0;
5307 
5308 	if (adev->in_s0ix)
5309 		amdgpu_dpm_gfx_state_change(adev, sGpuChangeState_D0Entry);
5310 
5311 	/* post card */
5312 	if (amdgpu_device_need_post(adev)) {
5313 		r = amdgpu_device_asic_init(adev);
5314 		if (r)
5315 			dev_err(adev->dev, "amdgpu asic init failed\n");
5316 	}
5317 
5318 	r = amdgpu_device_ip_resume(adev);
5319 
5320 	if (r) {
5321 		dev_err(adev->dev, "amdgpu_device_ip_resume failed (%d).\n", r);
5322 		goto exit;
5323 	}
5324 
5325 	r = amdgpu_amdkfd_resume(adev, !amdgpu_sriov_vf(adev) && !adev->in_runpm);
5326 	if (r)
5327 		goto exit;
5328 
5329 	r = amdgpu_userq_resume(adev);
5330 	if (r)
5331 		goto exit;
5332 
5333 	r = amdgpu_device_ip_late_init(adev);
5334 	if (r)
5335 		goto exit;
5336 
5337 	queue_delayed_work(system_wq, &adev->delayed_init_work,
5338 			   msecs_to_jiffies(AMDGPU_RESUME_MS));
5339 exit:
5340 	if (amdgpu_sriov_vf(adev)) {
5341 		amdgpu_virt_init_data_exchange(adev);
5342 		amdgpu_virt_release_full_gpu(adev, true);
5343 
5344 		if (!r && !adev->in_runpm)
5345 			r = amdgpu_amdkfd_resume_process(adev);
5346 	}
5347 
5348 	if (r)
5349 		return r;
5350 
5351 	/* Make sure IB tests flushed */
5352 	flush_delayed_work(&adev->delayed_init_work);
5353 
5354 	if (notify_clients)
5355 		drm_client_dev_resume(adev_to_drm(adev), false);
5356 
5357 	amdgpu_ras_resume(adev);
5358 
5359 	if (adev->mode_info.num_crtc) {
5360 		/*
5361 		 * Most of the connector probing functions try to acquire runtime pm
5362 		 * refs to ensure that the GPU is powered on when connector polling is
5363 		 * performed. Since we're calling this from a runtime PM callback,
5364 		 * trying to acquire rpm refs will cause us to deadlock.
5365 		 *
5366 		 * Since we're guaranteed to be holding the rpm lock, it's safe to
5367 		 * temporarily disable the rpm helpers so this doesn't deadlock us.
5368 		 */
5369 #ifdef CONFIG_PM
5370 		dev->dev->power.disable_depth++;
5371 #endif
5372 		if (!adev->dc_enabled)
5373 			drm_helper_hpd_irq_event(dev);
5374 		else
5375 			drm_kms_helper_hotplug_event(dev);
5376 #ifdef CONFIG_PM
5377 		dev->dev->power.disable_depth--;
5378 #endif
5379 	}
5380 
5381 	amdgpu_vram_mgr_clear_reset_blocks(adev);
5382 	adev->in_suspend = false;
5383 
5384 	if (amdgpu_acpi_smart_shift_update(adev, AMDGPU_SS_DEV_D0))
5385 		dev_warn(adev->dev, "smart shift update failed\n");
5386 
5387 	return 0;
5388 }
5389 
5390 /**
5391  * amdgpu_device_ip_check_soft_reset - did soft reset succeed
5392  *
5393  * @adev: amdgpu_device pointer
5394  *
5395  * The list of all the hardware IPs that make up the asic is walked and
5396  * the check_soft_reset callbacks are run.  check_soft_reset determines
5397  * if the asic is still hung or not.
5398  * Returns true if any of the IPs are still in a hung state, false if not.
5399  */
amdgpu_device_ip_check_soft_reset(struct amdgpu_device * adev)5400 static bool amdgpu_device_ip_check_soft_reset(struct amdgpu_device *adev)
5401 {
5402 	int i;
5403 	bool asic_hang = false;
5404 
5405 	if (amdgpu_sriov_vf(adev))
5406 		return true;
5407 
5408 	if (amdgpu_asic_need_full_reset(adev))
5409 		return true;
5410 
5411 	for (i = 0; i < adev->num_ip_blocks; i++) {
5412 		if (!adev->ip_blocks[i].status.valid)
5413 			continue;
5414 		if (adev->ip_blocks[i].version->funcs->check_soft_reset)
5415 			adev->ip_blocks[i].status.hang =
5416 				adev->ip_blocks[i].version->funcs->check_soft_reset(
5417 					&adev->ip_blocks[i]);
5418 		if (adev->ip_blocks[i].status.hang) {
5419 			dev_info(adev->dev, "IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name);
5420 			asic_hang = true;
5421 		}
5422 	}
5423 	return asic_hang;
5424 }
5425 
5426 /**
5427  * amdgpu_device_ip_pre_soft_reset - prepare for soft reset
5428  *
5429  * @adev: amdgpu_device pointer
5430  *
5431  * The list of all the hardware IPs that make up the asic is walked and the
5432  * pre_soft_reset callbacks are run if the block is hung.  pre_soft_reset
5433  * handles any IP specific hardware or software state changes that are
5434  * necessary for a soft reset to succeed.
5435  * Returns 0 on success, negative error code on failure.
5436  */
amdgpu_device_ip_pre_soft_reset(struct amdgpu_device * adev)5437 static int amdgpu_device_ip_pre_soft_reset(struct amdgpu_device *adev)
5438 {
5439 	int i, r = 0;
5440 
5441 	for (i = 0; i < adev->num_ip_blocks; i++) {
5442 		if (!adev->ip_blocks[i].status.valid)
5443 			continue;
5444 		if (adev->ip_blocks[i].status.hang &&
5445 		    adev->ip_blocks[i].version->funcs->pre_soft_reset) {
5446 			r = adev->ip_blocks[i].version->funcs->pre_soft_reset(&adev->ip_blocks[i]);
5447 			if (r)
5448 				return r;
5449 		}
5450 	}
5451 
5452 	return 0;
5453 }
5454 
5455 /**
5456  * amdgpu_device_ip_need_full_reset - check if a full asic reset is needed
5457  *
5458  * @adev: amdgpu_device pointer
5459  *
5460  * Some hardware IPs cannot be soft reset.  If they are hung, a full gpu
5461  * reset is necessary to recover.
5462  * Returns true if a full asic reset is required, false if not.
5463  */
amdgpu_device_ip_need_full_reset(struct amdgpu_device * adev)5464 static bool amdgpu_device_ip_need_full_reset(struct amdgpu_device *adev)
5465 {
5466 	int i;
5467 
5468 	if (amdgpu_asic_need_full_reset(adev))
5469 		return true;
5470 
5471 	for (i = 0; i < adev->num_ip_blocks; i++) {
5472 		if (!adev->ip_blocks[i].status.valid)
5473 			continue;
5474 		if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) ||
5475 		    (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) ||
5476 		    (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_ACP) ||
5477 		    (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) ||
5478 		     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
5479 			if (adev->ip_blocks[i].status.hang) {
5480 				dev_info(adev->dev, "Some block need full reset!\n");
5481 				return true;
5482 			}
5483 		}
5484 	}
5485 	return false;
5486 }
5487 
5488 /**
5489  * amdgpu_device_ip_soft_reset - do a soft reset
5490  *
5491  * @adev: amdgpu_device pointer
5492  *
5493  * The list of all the hardware IPs that make up the asic is walked and the
5494  * soft_reset callbacks are run if the block is hung.  soft_reset handles any
5495  * IP specific hardware or software state changes that are necessary to soft
5496  * reset the IP.
5497  * Returns 0 on success, negative error code on failure.
5498  */
amdgpu_device_ip_soft_reset(struct amdgpu_device * adev)5499 static int amdgpu_device_ip_soft_reset(struct amdgpu_device *adev)
5500 {
5501 	int i, r = 0;
5502 
5503 	for (i = 0; i < adev->num_ip_blocks; i++) {
5504 		if (!adev->ip_blocks[i].status.valid)
5505 			continue;
5506 		if (adev->ip_blocks[i].status.hang &&
5507 		    adev->ip_blocks[i].version->funcs->soft_reset) {
5508 			r = adev->ip_blocks[i].version->funcs->soft_reset(&adev->ip_blocks[i]);
5509 			if (r)
5510 				return r;
5511 		}
5512 	}
5513 
5514 	return 0;
5515 }
5516 
5517 /**
5518  * amdgpu_device_ip_post_soft_reset - clean up from soft reset
5519  *
5520  * @adev: amdgpu_device pointer
5521  *
5522  * The list of all the hardware IPs that make up the asic is walked and the
5523  * post_soft_reset callbacks are run if the asic was hung.  post_soft_reset
5524  * handles any IP specific hardware or software state changes that are
5525  * necessary after the IP has been soft reset.
5526  * Returns 0 on success, negative error code on failure.
5527  */
amdgpu_device_ip_post_soft_reset(struct amdgpu_device * adev)5528 static int amdgpu_device_ip_post_soft_reset(struct amdgpu_device *adev)
5529 {
5530 	int i, r = 0;
5531 
5532 	for (i = 0; i < adev->num_ip_blocks; i++) {
5533 		if (!adev->ip_blocks[i].status.valid)
5534 			continue;
5535 		if (adev->ip_blocks[i].status.hang &&
5536 		    adev->ip_blocks[i].version->funcs->post_soft_reset)
5537 			r = adev->ip_blocks[i].version->funcs->post_soft_reset(&adev->ip_blocks[i]);
5538 		if (r)
5539 			return r;
5540 	}
5541 
5542 	return 0;
5543 }
5544 
5545 /**
5546  * amdgpu_device_reset_sriov - reset ASIC for SR-IOV vf
5547  *
5548  * @adev: amdgpu_device pointer
5549  * @reset_context: amdgpu reset context pointer
5550  *
5551  * do VF FLR and reinitialize Asic
5552  * return 0 means succeeded otherwise failed
5553  */
amdgpu_device_reset_sriov(struct amdgpu_device * adev,struct amdgpu_reset_context * reset_context)5554 static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
5555 				     struct amdgpu_reset_context *reset_context)
5556 {
5557 	int r;
5558 	struct amdgpu_hive_info *hive = NULL;
5559 
5560 	if (test_bit(AMDGPU_HOST_FLR, &reset_context->flags)) {
5561 		if (!amdgpu_ras_get_fed_status(adev))
5562 			amdgpu_virt_ready_to_reset(adev);
5563 		amdgpu_virt_wait_reset(adev);
5564 		clear_bit(AMDGPU_HOST_FLR, &reset_context->flags);
5565 		r = amdgpu_virt_request_full_gpu(adev, true);
5566 	} else {
5567 		r = amdgpu_virt_reset_gpu(adev);
5568 	}
5569 	if (r)
5570 		return r;
5571 
5572 	amdgpu_ras_clear_err_state(adev);
5573 	amdgpu_irq_gpu_reset_resume_helper(adev);
5574 
5575 	/* some sw clean up VF needs to do before recover */
5576 	amdgpu_virt_post_reset(adev);
5577 
5578 	/* Resume IP prior to SMC */
5579 	r = amdgpu_device_ip_reinit_early_sriov(adev);
5580 	if (r)
5581 		return r;
5582 
5583 	amdgpu_virt_init_data_exchange(adev);
5584 
5585 	r = amdgpu_device_fw_loading(adev);
5586 	if (r)
5587 		return r;
5588 
5589 	/* now we are okay to resume SMC/CP/SDMA */
5590 	r = amdgpu_device_ip_reinit_late_sriov(adev);
5591 	if (r)
5592 		return r;
5593 
5594 	hive = amdgpu_get_xgmi_hive(adev);
5595 	/* Update PSP FW topology after reset */
5596 	if (hive && adev->gmc.xgmi.num_physical_nodes > 1)
5597 		r = amdgpu_xgmi_update_topology(hive, adev);
5598 	if (hive)
5599 		amdgpu_put_xgmi_hive(hive);
5600 	if (r)
5601 		return r;
5602 
5603 	r = amdgpu_ib_ring_tests(adev);
5604 	if (r)
5605 		return r;
5606 
5607 	if (adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST)
5608 		amdgpu_inc_vram_lost(adev);
5609 
5610 	/* need to be called during full access so we can't do it later like
5611 	 * bare-metal does.
5612 	 */
5613 	amdgpu_amdkfd_post_reset(adev);
5614 	amdgpu_virt_release_full_gpu(adev, true);
5615 
5616 	/* Aldebaran and gfx_11_0_3 support ras in SRIOV, so need resume ras during reset */
5617 	if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 2) ||
5618 	    amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) ||
5619 	    amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4) ||
5620 	    amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 5, 0) ||
5621 	    amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(11, 0, 3))
5622 		amdgpu_ras_resume(adev);
5623 
5624 	amdgpu_virt_ras_telemetry_post_reset(adev);
5625 
5626 	return 0;
5627 }
5628 
5629 /**
5630  * amdgpu_device_has_job_running - check if there is any unfinished job
5631  *
5632  * @adev: amdgpu_device pointer
5633  *
5634  * check if there is any job running on the device when guest driver receives
5635  * FLR notification from host driver. If there are still jobs running, then
5636  * the guest driver will not respond the FLR reset. Instead, let the job hit
5637  * the timeout and guest driver then issue the reset request.
5638  */
amdgpu_device_has_job_running(struct amdgpu_device * adev)5639 bool amdgpu_device_has_job_running(struct amdgpu_device *adev)
5640 {
5641 	int i;
5642 
5643 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5644 		struct amdgpu_ring *ring = adev->rings[i];
5645 
5646 		if (!amdgpu_ring_sched_ready(ring))
5647 			continue;
5648 
5649 		if (amdgpu_fence_count_emitted(ring))
5650 			return true;
5651 	}
5652 	return false;
5653 }
5654 
5655 /**
5656  * amdgpu_device_should_recover_gpu - check if we should try GPU recovery
5657  *
5658  * @adev: amdgpu_device pointer
5659  *
5660  * Check amdgpu_gpu_recovery and SRIOV status to see if we should try to recover
5661  * a hung GPU.
5662  */
amdgpu_device_should_recover_gpu(struct amdgpu_device * adev)5663 bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev)
5664 {
5665 
5666 	if (amdgpu_gpu_recovery == 0)
5667 		goto disabled;
5668 
5669 	/* Skip soft reset check in fatal error mode */
5670 	if (!amdgpu_ras_is_poison_mode_supported(adev))
5671 		return true;
5672 
5673 	if (amdgpu_sriov_vf(adev))
5674 		return true;
5675 
5676 	if (amdgpu_gpu_recovery == -1) {
5677 		switch (adev->asic_type) {
5678 #ifdef CONFIG_DRM_AMDGPU_SI
5679 		case CHIP_VERDE:
5680 		case CHIP_TAHITI:
5681 		case CHIP_PITCAIRN:
5682 		case CHIP_OLAND:
5683 		case CHIP_HAINAN:
5684 #endif
5685 #ifdef CONFIG_DRM_AMDGPU_CIK
5686 		case CHIP_KAVERI:
5687 		case CHIP_KABINI:
5688 		case CHIP_MULLINS:
5689 #endif
5690 		case CHIP_CARRIZO:
5691 		case CHIP_STONEY:
5692 		case CHIP_CYAN_SKILLFISH:
5693 			goto disabled;
5694 		default:
5695 			break;
5696 		}
5697 	}
5698 
5699 	return true;
5700 
5701 disabled:
5702 		dev_info(adev->dev, "GPU recovery disabled.\n");
5703 		return false;
5704 }
5705 
amdgpu_device_mode1_reset(struct amdgpu_device * adev)5706 int amdgpu_device_mode1_reset(struct amdgpu_device *adev)
5707 {
5708 	u32 i;
5709 	int ret = 0;
5710 
5711 	if (adev->bios)
5712 		amdgpu_atombios_scratch_regs_engine_hung(adev, true);
5713 
5714 	dev_info(adev->dev, "GPU mode1 reset\n");
5715 
5716 	/* Cache the state before bus master disable. The saved config space
5717 	 * values are used in other cases like restore after mode-2 reset.
5718 	 */
5719 	amdgpu_device_cache_pci_state(adev->pdev);
5720 
5721 	/* disable BM */
5722 	pci_clear_master(adev->pdev);
5723 
5724 	if (amdgpu_dpm_is_mode1_reset_supported(adev)) {
5725 		dev_info(adev->dev, "GPU smu mode1 reset\n");
5726 		ret = amdgpu_dpm_mode1_reset(adev);
5727 	} else {
5728 		dev_info(adev->dev, "GPU psp mode1 reset\n");
5729 		ret = psp_gpu_reset(adev);
5730 	}
5731 
5732 	if (ret)
5733 		goto mode1_reset_failed;
5734 
5735 	amdgpu_device_load_pci_state(adev->pdev);
5736 	ret = amdgpu_psp_wait_for_bootloader(adev);
5737 	if (ret)
5738 		goto mode1_reset_failed;
5739 
5740 	/* wait for asic to come out of reset */
5741 	for (i = 0; i < adev->usec_timeout; i++) {
5742 		u32 memsize = adev->nbio.funcs->get_memsize(adev);
5743 
5744 		if (memsize != 0xffffffff)
5745 			break;
5746 		udelay(1);
5747 	}
5748 
5749 	if (i >= adev->usec_timeout) {
5750 		ret = -ETIMEDOUT;
5751 		goto mode1_reset_failed;
5752 	}
5753 
5754 	if (adev->bios)
5755 		amdgpu_atombios_scratch_regs_engine_hung(adev, false);
5756 
5757 	return 0;
5758 
5759 mode1_reset_failed:
5760 	dev_err(adev->dev, "GPU mode1 reset failed\n");
5761 	return ret;
5762 }
5763 
amdgpu_device_link_reset(struct amdgpu_device * adev)5764 int amdgpu_device_link_reset(struct amdgpu_device *adev)
5765 {
5766 	int ret = 0;
5767 
5768 	dev_info(adev->dev, "GPU link reset\n");
5769 
5770 	if (!amdgpu_reset_in_dpc(adev))
5771 		ret = amdgpu_dpm_link_reset(adev);
5772 
5773 	if (ret)
5774 		goto link_reset_failed;
5775 
5776 	ret = amdgpu_psp_wait_for_bootloader(adev);
5777 	if (ret)
5778 		goto link_reset_failed;
5779 
5780 	return 0;
5781 
5782 link_reset_failed:
5783 	dev_err(adev->dev, "GPU link reset failed\n");
5784 	return ret;
5785 }
5786 
amdgpu_device_pre_asic_reset(struct amdgpu_device * adev,struct amdgpu_reset_context * reset_context)5787 int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
5788 				 struct amdgpu_reset_context *reset_context)
5789 {
5790 	int i, r = 0;
5791 	struct amdgpu_job *job = NULL;
5792 	struct amdgpu_device *tmp_adev = reset_context->reset_req_dev;
5793 	bool need_full_reset =
5794 		test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
5795 
5796 	if (reset_context->reset_req_dev == adev)
5797 		job = reset_context->job;
5798 
5799 	if (amdgpu_sriov_vf(adev))
5800 		amdgpu_virt_pre_reset(adev);
5801 
5802 	amdgpu_fence_driver_isr_toggle(adev, true);
5803 
5804 	/* block all schedulers and reset given job's ring */
5805 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5806 		struct amdgpu_ring *ring = adev->rings[i];
5807 
5808 		if (!amdgpu_ring_sched_ready(ring))
5809 			continue;
5810 
5811 		/* Clear job fence from fence drv to avoid force_completion
5812 		 * leave NULL and vm flush fence in fence drv
5813 		 */
5814 		amdgpu_fence_driver_clear_job_fences(ring);
5815 
5816 		/* after all hw jobs are reset, hw fence is meaningless, so force_completion */
5817 		amdgpu_fence_driver_force_completion(ring);
5818 	}
5819 
5820 	amdgpu_fence_driver_isr_toggle(adev, false);
5821 
5822 	if (job && job->vm)
5823 		drm_sched_increase_karma(&job->base);
5824 
5825 	r = amdgpu_reset_prepare_hwcontext(adev, reset_context);
5826 	/* If reset handler not implemented, continue; otherwise return */
5827 	if (r == -EOPNOTSUPP)
5828 		r = 0;
5829 	else
5830 		return r;
5831 
5832 	/* Don't suspend on bare metal if we are not going to HW reset the ASIC */
5833 	if (!amdgpu_sriov_vf(adev)) {
5834 
5835 		if (!need_full_reset)
5836 			need_full_reset = amdgpu_device_ip_need_full_reset(adev);
5837 
5838 		if (!need_full_reset && amdgpu_gpu_recovery &&
5839 		    amdgpu_device_ip_check_soft_reset(adev)) {
5840 			amdgpu_device_ip_pre_soft_reset(adev);
5841 			r = amdgpu_device_ip_soft_reset(adev);
5842 			amdgpu_device_ip_post_soft_reset(adev);
5843 			if (r || amdgpu_device_ip_check_soft_reset(adev)) {
5844 				dev_info(adev->dev, "soft reset failed, will fallback to full reset!\n");
5845 				need_full_reset = true;
5846 			}
5847 		}
5848 
5849 		if (!test_bit(AMDGPU_SKIP_COREDUMP, &reset_context->flags)) {
5850 			dev_info(tmp_adev->dev, "Dumping IP State\n");
5851 			/* Trigger ip dump before we reset the asic */
5852 			for (i = 0; i < tmp_adev->num_ip_blocks; i++)
5853 				if (tmp_adev->ip_blocks[i].version->funcs->dump_ip_state)
5854 					tmp_adev->ip_blocks[i].version->funcs
5855 						->dump_ip_state((void *)&tmp_adev->ip_blocks[i]);
5856 			dev_info(tmp_adev->dev, "Dumping IP State Completed\n");
5857 		}
5858 
5859 		if (need_full_reset)
5860 			r = amdgpu_device_ip_suspend(adev);
5861 		if (need_full_reset)
5862 			set_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
5863 		else
5864 			clear_bit(AMDGPU_NEED_FULL_RESET,
5865 				  &reset_context->flags);
5866 	}
5867 
5868 	return r;
5869 }
5870 
amdgpu_device_reinit_after_reset(struct amdgpu_reset_context * reset_context)5871 int amdgpu_device_reinit_after_reset(struct amdgpu_reset_context *reset_context)
5872 {
5873 	struct list_head *device_list_handle;
5874 	bool full_reset, vram_lost = false;
5875 	struct amdgpu_device *tmp_adev;
5876 	int r, init_level;
5877 
5878 	device_list_handle = reset_context->reset_device_list;
5879 
5880 	if (!device_list_handle)
5881 		return -EINVAL;
5882 
5883 	full_reset = test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
5884 
5885 	/**
5886 	 * If it's reset on init, it's default init level, otherwise keep level
5887 	 * as recovery level.
5888 	 */
5889 	if (reset_context->method == AMD_RESET_METHOD_ON_INIT)
5890 			init_level = AMDGPU_INIT_LEVEL_DEFAULT;
5891 	else
5892 			init_level = AMDGPU_INIT_LEVEL_RESET_RECOVERY;
5893 
5894 	r = 0;
5895 	list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5896 		amdgpu_set_init_level(tmp_adev, init_level);
5897 		if (full_reset) {
5898 			/* post card */
5899 			amdgpu_reset_set_dpc_status(tmp_adev, false);
5900 			amdgpu_ras_clear_err_state(tmp_adev);
5901 			r = amdgpu_device_asic_init(tmp_adev);
5902 			if (r) {
5903 				dev_warn(tmp_adev->dev, "asic atom init failed!");
5904 			} else {
5905 				dev_info(tmp_adev->dev, "GPU reset succeeded, trying to resume\n");
5906 
5907 				r = amdgpu_device_ip_resume_phase1(tmp_adev);
5908 				if (r)
5909 					goto out;
5910 
5911 				vram_lost = amdgpu_device_check_vram_lost(tmp_adev);
5912 
5913 				if (!test_bit(AMDGPU_SKIP_COREDUMP, &reset_context->flags))
5914 					amdgpu_coredump(tmp_adev, false, vram_lost, reset_context->job);
5915 
5916 				if (vram_lost) {
5917 					dev_info(
5918 						tmp_adev->dev,
5919 						"VRAM is lost due to GPU reset!\n");
5920 					amdgpu_inc_vram_lost(tmp_adev);
5921 				}
5922 
5923 				r = amdgpu_device_fw_loading(tmp_adev);
5924 				if (r)
5925 					return r;
5926 
5927 				r = amdgpu_xcp_restore_partition_mode(
5928 					tmp_adev->xcp_mgr);
5929 				if (r)
5930 					goto out;
5931 
5932 				r = amdgpu_device_ip_resume_phase2(tmp_adev);
5933 				if (r)
5934 					goto out;
5935 
5936 				if (tmp_adev->mman.buffer_funcs_ring->sched.ready)
5937 					amdgpu_ttm_set_buffer_funcs_status(tmp_adev, true);
5938 
5939 				r = amdgpu_device_ip_resume_phase3(tmp_adev);
5940 				if (r)
5941 					goto out;
5942 
5943 				if (vram_lost)
5944 					amdgpu_device_fill_reset_magic(tmp_adev);
5945 
5946 				/*
5947 				 * Add this ASIC as tracked as reset was already
5948 				 * complete successfully.
5949 				 */
5950 				amdgpu_register_gpu_instance(tmp_adev);
5951 
5952 				if (!reset_context->hive &&
5953 				    tmp_adev->gmc.xgmi.num_physical_nodes > 1)
5954 					amdgpu_xgmi_add_device(tmp_adev);
5955 
5956 				r = amdgpu_device_ip_late_init(tmp_adev);
5957 				if (r)
5958 					goto out;
5959 
5960 				drm_client_dev_resume(adev_to_drm(tmp_adev), false);
5961 
5962 				/*
5963 				 * The GPU enters bad state once faulty pages
5964 				 * by ECC has reached the threshold, and ras
5965 				 * recovery is scheduled next. So add one check
5966 				 * here to break recovery if it indeed exceeds
5967 				 * bad page threshold, and remind user to
5968 				 * retire this GPU or setting one bigger
5969 				 * bad_page_threshold value to fix this once
5970 				 * probing driver again.
5971 				 */
5972 				if (!amdgpu_ras_is_rma(tmp_adev)) {
5973 					/* must succeed. */
5974 					amdgpu_ras_resume(tmp_adev);
5975 				} else {
5976 					r = -EINVAL;
5977 					goto out;
5978 				}
5979 
5980 				/* Update PSP FW topology after reset */
5981 				if (reset_context->hive &&
5982 				    tmp_adev->gmc.xgmi.num_physical_nodes > 1)
5983 					r = amdgpu_xgmi_update_topology(
5984 						reset_context->hive, tmp_adev);
5985 			}
5986 		}
5987 
5988 out:
5989 		if (!r) {
5990 			/* IP init is complete now, set level as default */
5991 			amdgpu_set_init_level(tmp_adev,
5992 					      AMDGPU_INIT_LEVEL_DEFAULT);
5993 			amdgpu_irq_gpu_reset_resume_helper(tmp_adev);
5994 			r = amdgpu_ib_ring_tests(tmp_adev);
5995 			if (r) {
5996 				dev_err(tmp_adev->dev, "ib ring test failed (%d).\n", r);
5997 				r = -EAGAIN;
5998 				goto end;
5999 			}
6000 		}
6001 
6002 		if (r)
6003 			tmp_adev->asic_reset_res = r;
6004 	}
6005 
6006 end:
6007 	return r;
6008 }
6009 
amdgpu_do_asic_reset(struct list_head * device_list_handle,struct amdgpu_reset_context * reset_context)6010 int amdgpu_do_asic_reset(struct list_head *device_list_handle,
6011 			 struct amdgpu_reset_context *reset_context)
6012 {
6013 	struct amdgpu_device *tmp_adev = NULL;
6014 	bool need_full_reset, skip_hw_reset;
6015 	int r = 0;
6016 
6017 	/* Try reset handler method first */
6018 	tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
6019 				    reset_list);
6020 
6021 	reset_context->reset_device_list = device_list_handle;
6022 	r = amdgpu_reset_perform_reset(tmp_adev, reset_context);
6023 	/* If reset handler not implemented, continue; otherwise return */
6024 	if (r == -EOPNOTSUPP)
6025 		r = 0;
6026 	else
6027 		return r;
6028 
6029 	/* Reset handler not implemented, use the default method */
6030 	need_full_reset =
6031 		test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
6032 	skip_hw_reset = test_bit(AMDGPU_SKIP_HW_RESET, &reset_context->flags);
6033 
6034 	/*
6035 	 * ASIC reset has to be done on all XGMI hive nodes ASAP
6036 	 * to allow proper links negotiation in FW (within 1 sec)
6037 	 */
6038 	if (!skip_hw_reset && need_full_reset) {
6039 		list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
6040 			/* For XGMI run all resets in parallel to speed up the process */
6041 			if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
6042 				if (!queue_work(system_unbound_wq,
6043 						&tmp_adev->xgmi_reset_work))
6044 					r = -EALREADY;
6045 			} else
6046 				r = amdgpu_asic_reset(tmp_adev);
6047 
6048 			if (r) {
6049 				dev_err(tmp_adev->dev,
6050 					"ASIC reset failed with error, %d for drm dev, %s",
6051 					r, adev_to_drm(tmp_adev)->unique);
6052 				goto out;
6053 			}
6054 		}
6055 
6056 		/* For XGMI wait for all resets to complete before proceed */
6057 		if (!r) {
6058 			list_for_each_entry(tmp_adev, device_list_handle,
6059 					    reset_list) {
6060 				if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
6061 					flush_work(&tmp_adev->xgmi_reset_work);
6062 					r = tmp_adev->asic_reset_res;
6063 					if (r)
6064 						break;
6065 				}
6066 			}
6067 		}
6068 	}
6069 
6070 	if (!r && amdgpu_ras_intr_triggered()) {
6071 		list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
6072 			amdgpu_ras_reset_error_count(tmp_adev,
6073 						     AMDGPU_RAS_BLOCK__MMHUB);
6074 		}
6075 
6076 		amdgpu_ras_intr_cleared();
6077 	}
6078 
6079 	r = amdgpu_device_reinit_after_reset(reset_context);
6080 	if (r == -EAGAIN)
6081 		set_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
6082 	else
6083 		clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
6084 
6085 out:
6086 	return r;
6087 }
6088 
amdgpu_device_set_mp1_state(struct amdgpu_device * adev)6089 static void amdgpu_device_set_mp1_state(struct amdgpu_device *adev)
6090 {
6091 
6092 	switch (amdgpu_asic_reset_method(adev)) {
6093 	case AMD_RESET_METHOD_MODE1:
6094 	case AMD_RESET_METHOD_LINK:
6095 		adev->mp1_state = PP_MP1_STATE_SHUTDOWN;
6096 		break;
6097 	case AMD_RESET_METHOD_MODE2:
6098 		adev->mp1_state = PP_MP1_STATE_RESET;
6099 		break;
6100 	default:
6101 		adev->mp1_state = PP_MP1_STATE_NONE;
6102 		break;
6103 	}
6104 }
6105 
amdgpu_device_unset_mp1_state(struct amdgpu_device * adev)6106 static void amdgpu_device_unset_mp1_state(struct amdgpu_device *adev)
6107 {
6108 	amdgpu_vf_error_trans_all(adev);
6109 	adev->mp1_state = PP_MP1_STATE_NONE;
6110 }
6111 
amdgpu_device_resume_display_audio(struct amdgpu_device * adev)6112 static void amdgpu_device_resume_display_audio(struct amdgpu_device *adev)
6113 {
6114 	struct pci_dev *p = NULL;
6115 
6116 	p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
6117 			adev->pdev->bus->number, 1);
6118 	if (p) {
6119 		pm_runtime_enable(&(p->dev));
6120 		pm_runtime_resume(&(p->dev));
6121 	}
6122 
6123 	pci_dev_put(p);
6124 }
6125 
amdgpu_device_suspend_display_audio(struct amdgpu_device * adev)6126 static int amdgpu_device_suspend_display_audio(struct amdgpu_device *adev)
6127 {
6128 	enum amd_reset_method reset_method;
6129 	struct pci_dev *p = NULL;
6130 	u64 expires;
6131 
6132 	/*
6133 	 * For now, only BACO and mode1 reset are confirmed
6134 	 * to suffer the audio issue without proper suspended.
6135 	 */
6136 	reset_method = amdgpu_asic_reset_method(adev);
6137 	if ((reset_method != AMD_RESET_METHOD_BACO) &&
6138 	     (reset_method != AMD_RESET_METHOD_MODE1))
6139 		return -EINVAL;
6140 
6141 	p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
6142 			adev->pdev->bus->number, 1);
6143 	if (!p)
6144 		return -ENODEV;
6145 
6146 	expires = pm_runtime_autosuspend_expiration(&(p->dev));
6147 	if (!expires)
6148 		/*
6149 		 * If we cannot get the audio device autosuspend delay,
6150 		 * a fixed 4S interval will be used. Considering 3S is
6151 		 * the audio controller default autosuspend delay setting.
6152 		 * 4S used here is guaranteed to cover that.
6153 		 */
6154 		expires = ktime_get_mono_fast_ns() + NSEC_PER_SEC * 4ULL;
6155 
6156 	while (!pm_runtime_status_suspended(&(p->dev))) {
6157 		if (!pm_runtime_suspend(&(p->dev)))
6158 			break;
6159 
6160 		if (expires < ktime_get_mono_fast_ns()) {
6161 			dev_warn(adev->dev, "failed to suspend display audio\n");
6162 			pci_dev_put(p);
6163 			/* TODO: abort the succeeding gpu reset? */
6164 			return -ETIMEDOUT;
6165 		}
6166 	}
6167 
6168 	pm_runtime_disable(&(p->dev));
6169 
6170 	pci_dev_put(p);
6171 	return 0;
6172 }
6173 
amdgpu_device_stop_pending_resets(struct amdgpu_device * adev)6174 static inline void amdgpu_device_stop_pending_resets(struct amdgpu_device *adev)
6175 {
6176 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
6177 
6178 #if defined(CONFIG_DEBUG_FS)
6179 	if (!amdgpu_sriov_vf(adev))
6180 		cancel_work(&adev->reset_work);
6181 #endif
6182 
6183 	if (adev->kfd.dev)
6184 		cancel_work(&adev->kfd.reset_work);
6185 
6186 	if (amdgpu_sriov_vf(adev))
6187 		cancel_work(&adev->virt.flr_work);
6188 
6189 	if (con && adev->ras_enabled)
6190 		cancel_work(&con->recovery_work);
6191 
6192 }
6193 
amdgpu_device_health_check(struct list_head * device_list_handle)6194 static int amdgpu_device_health_check(struct list_head *device_list_handle)
6195 {
6196 	struct amdgpu_device *tmp_adev;
6197 	int ret = 0;
6198 
6199 	list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
6200 		ret |= amdgpu_device_bus_status_check(tmp_adev);
6201 	}
6202 
6203 	return ret;
6204 }
6205 
amdgpu_device_recovery_prepare(struct amdgpu_device * adev,struct list_head * device_list,struct amdgpu_hive_info * hive)6206 static void amdgpu_device_recovery_prepare(struct amdgpu_device *adev,
6207 					  struct list_head *device_list,
6208 					  struct amdgpu_hive_info *hive)
6209 {
6210 	struct amdgpu_device *tmp_adev = NULL;
6211 
6212 	/*
6213 	 * Build list of devices to reset.
6214 	 * In case we are in XGMI hive mode, resort the device list
6215 	 * to put adev in the 1st position.
6216 	 */
6217 	if (!amdgpu_sriov_vf(adev) && (adev->gmc.xgmi.num_physical_nodes > 1) && hive) {
6218 		list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
6219 			list_add_tail(&tmp_adev->reset_list, device_list);
6220 			if (adev->shutdown)
6221 				tmp_adev->shutdown = true;
6222 			if (amdgpu_reset_in_dpc(adev))
6223 				tmp_adev->pcie_reset_ctx.in_link_reset = true;
6224 		}
6225 		if (!list_is_first(&adev->reset_list, device_list))
6226 			list_rotate_to_front(&adev->reset_list, device_list);
6227 	} else {
6228 		list_add_tail(&adev->reset_list, device_list);
6229 	}
6230 }
6231 
amdgpu_device_recovery_get_reset_lock(struct amdgpu_device * adev,struct list_head * device_list)6232 static void amdgpu_device_recovery_get_reset_lock(struct amdgpu_device *adev,
6233 						  struct list_head *device_list)
6234 {
6235 	struct amdgpu_device *tmp_adev = NULL;
6236 
6237 	if (list_empty(device_list))
6238 		return;
6239 	tmp_adev =
6240 		list_first_entry(device_list, struct amdgpu_device, reset_list);
6241 	amdgpu_device_lock_reset_domain(tmp_adev->reset_domain);
6242 }
6243 
amdgpu_device_recovery_put_reset_lock(struct amdgpu_device * adev,struct list_head * device_list)6244 static void amdgpu_device_recovery_put_reset_lock(struct amdgpu_device *adev,
6245 						  struct list_head *device_list)
6246 {
6247 	struct amdgpu_device *tmp_adev = NULL;
6248 
6249 	if (list_empty(device_list))
6250 		return;
6251 	tmp_adev =
6252 		list_first_entry(device_list, struct amdgpu_device, reset_list);
6253 	amdgpu_device_unlock_reset_domain(tmp_adev->reset_domain);
6254 }
6255 
amdgpu_device_halt_activities(struct amdgpu_device * adev,struct amdgpu_job * job,struct amdgpu_reset_context * reset_context,struct list_head * device_list,struct amdgpu_hive_info * hive,bool need_emergency_restart)6256 static void amdgpu_device_halt_activities(struct amdgpu_device *adev,
6257 					  struct amdgpu_job *job,
6258 					  struct amdgpu_reset_context *reset_context,
6259 					  struct list_head *device_list,
6260 					  struct amdgpu_hive_info *hive,
6261 					  bool need_emergency_restart)
6262 {
6263 	struct amdgpu_device *tmp_adev = NULL;
6264 	int i;
6265 
6266 	/* block all schedulers and reset given job's ring */
6267 	list_for_each_entry(tmp_adev, device_list, reset_list) {
6268 		amdgpu_device_set_mp1_state(tmp_adev);
6269 
6270 		/*
6271 		 * Try to put the audio codec into suspend state
6272 		 * before gpu reset started.
6273 		 *
6274 		 * Due to the power domain of the graphics device
6275 		 * is shared with AZ power domain. Without this,
6276 		 * we may change the audio hardware from behind
6277 		 * the audio driver's back. That will trigger
6278 		 * some audio codec errors.
6279 		 */
6280 		if (!amdgpu_device_suspend_display_audio(tmp_adev))
6281 			tmp_adev->pcie_reset_ctx.audio_suspended = true;
6282 
6283 		amdgpu_ras_set_error_query_ready(tmp_adev, false);
6284 
6285 		cancel_delayed_work_sync(&tmp_adev->delayed_init_work);
6286 
6287 		amdgpu_amdkfd_pre_reset(tmp_adev, reset_context);
6288 
6289 		/*
6290 		 * Mark these ASICs to be reset as untracked first
6291 		 * And add them back after reset completed
6292 		 */
6293 		amdgpu_unregister_gpu_instance(tmp_adev);
6294 
6295 		drm_client_dev_suspend(adev_to_drm(tmp_adev), false);
6296 
6297 		/* disable ras on ALL IPs */
6298 		if (!need_emergency_restart && !amdgpu_reset_in_dpc(adev) &&
6299 		    amdgpu_device_ip_need_full_reset(tmp_adev))
6300 			amdgpu_ras_suspend(tmp_adev);
6301 
6302 		for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
6303 			struct amdgpu_ring *ring = tmp_adev->rings[i];
6304 
6305 			if (!amdgpu_ring_sched_ready(ring))
6306 				continue;
6307 
6308 			drm_sched_stop(&ring->sched, job ? &job->base : NULL);
6309 
6310 			if (need_emergency_restart)
6311 				amdgpu_job_stop_all_jobs_on_sched(&ring->sched);
6312 		}
6313 		atomic_inc(&tmp_adev->gpu_reset_counter);
6314 	}
6315 }
6316 
amdgpu_device_asic_reset(struct amdgpu_device * adev,struct list_head * device_list,struct amdgpu_reset_context * reset_context)6317 static int amdgpu_device_asic_reset(struct amdgpu_device *adev,
6318 			      struct list_head *device_list,
6319 			      struct amdgpu_reset_context *reset_context)
6320 {
6321 	struct amdgpu_device *tmp_adev = NULL;
6322 	int retry_limit = AMDGPU_MAX_RETRY_LIMIT;
6323 	int r = 0;
6324 
6325 retry:	/* Rest of adevs pre asic reset from XGMI hive. */
6326 	list_for_each_entry(tmp_adev, device_list, reset_list) {
6327 		r = amdgpu_device_pre_asic_reset(tmp_adev, reset_context);
6328 		/*TODO Should we stop ?*/
6329 		if (r) {
6330 			dev_err(tmp_adev->dev, "GPU pre asic reset failed with err, %d for drm dev, %s ",
6331 				  r, adev_to_drm(tmp_adev)->unique);
6332 			tmp_adev->asic_reset_res = r;
6333 		}
6334 	}
6335 
6336 	/* Actual ASIC resets if needed.*/
6337 	/* Host driver will handle XGMI hive reset for SRIOV */
6338 	if (amdgpu_sriov_vf(adev)) {
6339 
6340 		/* Bail out of reset early */
6341 		if (amdgpu_ras_is_rma(adev))
6342 			return -ENODEV;
6343 
6344 		if (amdgpu_ras_get_fed_status(adev) || amdgpu_virt_rcvd_ras_interrupt(adev)) {
6345 			dev_dbg(adev->dev, "Detected RAS error, wait for FLR completion\n");
6346 			amdgpu_ras_set_fed(adev, true);
6347 			set_bit(AMDGPU_HOST_FLR, &reset_context->flags);
6348 		}
6349 
6350 		r = amdgpu_device_reset_sriov(adev, reset_context);
6351 		if (AMDGPU_RETRY_SRIOV_RESET(r) && (retry_limit--) > 0) {
6352 			amdgpu_virt_release_full_gpu(adev, true);
6353 			goto retry;
6354 		}
6355 		if (r)
6356 			adev->asic_reset_res = r;
6357 	} else {
6358 		r = amdgpu_do_asic_reset(device_list, reset_context);
6359 		if (r && r == -EAGAIN)
6360 			goto retry;
6361 	}
6362 
6363 	list_for_each_entry(tmp_adev, device_list, reset_list) {
6364 		/*
6365 		 * Drop any pending non scheduler resets queued before reset is done.
6366 		 * Any reset scheduled after this point would be valid. Scheduler resets
6367 		 * were already dropped during drm_sched_stop and no new ones can come
6368 		 * in before drm_sched_start.
6369 		 */
6370 		amdgpu_device_stop_pending_resets(tmp_adev);
6371 	}
6372 
6373 	return r;
6374 }
6375 
amdgpu_device_sched_resume(struct list_head * device_list,struct amdgpu_reset_context * reset_context,bool job_signaled)6376 static int amdgpu_device_sched_resume(struct list_head *device_list,
6377 			      struct amdgpu_reset_context *reset_context,
6378 			      bool   job_signaled)
6379 {
6380 	struct amdgpu_device *tmp_adev = NULL;
6381 	int i, r = 0;
6382 
6383 	/* Post ASIC reset for all devs .*/
6384 	list_for_each_entry(tmp_adev, device_list, reset_list) {
6385 
6386 		for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
6387 			struct amdgpu_ring *ring = tmp_adev->rings[i];
6388 
6389 			if (!amdgpu_ring_sched_ready(ring))
6390 				continue;
6391 
6392 			drm_sched_start(&ring->sched, 0);
6393 		}
6394 
6395 		if (!drm_drv_uses_atomic_modeset(adev_to_drm(tmp_adev)) && !job_signaled)
6396 			drm_helper_resume_force_mode(adev_to_drm(tmp_adev));
6397 
6398 		if (tmp_adev->asic_reset_res) {
6399 			/* bad news, how to tell it to userspace ?
6400 			 * for ras error, we should report GPU bad status instead of
6401 			 * reset failure
6402 			 */
6403 			if (reset_context->src != AMDGPU_RESET_SRC_RAS ||
6404 			    !amdgpu_ras_eeprom_check_err_threshold(tmp_adev))
6405 				dev_info(
6406 					tmp_adev->dev,
6407 					"GPU reset(%d) failed with error %d \n",
6408 					atomic_read(
6409 						&tmp_adev->gpu_reset_counter),
6410 					tmp_adev->asic_reset_res);
6411 			amdgpu_vf_error_put(tmp_adev,
6412 					    AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0,
6413 					    tmp_adev->asic_reset_res);
6414 			if (!r)
6415 				r = tmp_adev->asic_reset_res;
6416 			tmp_adev->asic_reset_res = 0;
6417 		} else {
6418 			dev_info(tmp_adev->dev, "GPU reset(%d) succeeded!\n",
6419 				 atomic_read(&tmp_adev->gpu_reset_counter));
6420 			if (amdgpu_acpi_smart_shift_update(tmp_adev,
6421 							   AMDGPU_SS_DEV_D0))
6422 				dev_warn(tmp_adev->dev,
6423 					 "smart shift update failed\n");
6424 		}
6425 	}
6426 
6427 	return r;
6428 }
6429 
amdgpu_device_gpu_resume(struct amdgpu_device * adev,struct list_head * device_list,bool need_emergency_restart)6430 static void amdgpu_device_gpu_resume(struct amdgpu_device *adev,
6431 			      struct list_head *device_list,
6432 			      bool   need_emergency_restart)
6433 {
6434 	struct amdgpu_device *tmp_adev = NULL;
6435 
6436 	list_for_each_entry(tmp_adev, device_list, reset_list) {
6437 		/* unlock kfd: SRIOV would do it separately */
6438 		if (!need_emergency_restart && !amdgpu_sriov_vf(tmp_adev))
6439 			amdgpu_amdkfd_post_reset(tmp_adev);
6440 
6441 		/* kfd_post_reset will do nothing if kfd device is not initialized,
6442 		 * need to bring up kfd here if it's not be initialized before
6443 		 */
6444 		if (!adev->kfd.init_complete)
6445 			amdgpu_amdkfd_device_init(adev);
6446 
6447 		if (tmp_adev->pcie_reset_ctx.audio_suspended)
6448 			amdgpu_device_resume_display_audio(tmp_adev);
6449 
6450 		amdgpu_device_unset_mp1_state(tmp_adev);
6451 
6452 		amdgpu_ras_set_error_query_ready(tmp_adev, true);
6453 
6454 	}
6455 }
6456 
6457 
6458 /**
6459  * amdgpu_device_gpu_recover - reset the asic and recover scheduler
6460  *
6461  * @adev: amdgpu_device pointer
6462  * @job: which job trigger hang
6463  * @reset_context: amdgpu reset context pointer
6464  *
6465  * Attempt to reset the GPU if it has hung (all asics).
6466  * Attempt to do soft-reset or full-reset and reinitialize Asic
6467  * Returns 0 for success or an error on failure.
6468  */
6469 
amdgpu_device_gpu_recover(struct amdgpu_device * adev,struct amdgpu_job * job,struct amdgpu_reset_context * reset_context)6470 int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
6471 			      struct amdgpu_job *job,
6472 			      struct amdgpu_reset_context *reset_context)
6473 {
6474 	struct list_head device_list;
6475 	bool job_signaled = false;
6476 	struct amdgpu_hive_info *hive = NULL;
6477 	int r = 0;
6478 	bool need_emergency_restart = false;
6479 
6480 	/*
6481 	 * If it reaches here because of hang/timeout and a RAS error is
6482 	 * detected at the same time, let RAS recovery take care of it.
6483 	 */
6484 	if (amdgpu_ras_is_err_state(adev, AMDGPU_RAS_BLOCK__ANY) &&
6485 	    !amdgpu_sriov_vf(adev) &&
6486 	    reset_context->src != AMDGPU_RESET_SRC_RAS) {
6487 		dev_dbg(adev->dev,
6488 			"Gpu recovery from source: %d yielding to RAS error recovery handling",
6489 			reset_context->src);
6490 		return 0;
6491 	}
6492 
6493 	/*
6494 	 * Special case: RAS triggered and full reset isn't supported
6495 	 */
6496 	need_emergency_restart = amdgpu_ras_need_emergency_restart(adev);
6497 
6498 	/*
6499 	 * Flush RAM to disk so that after reboot
6500 	 * the user can read log and see why the system rebooted.
6501 	 */
6502 	if (need_emergency_restart && amdgpu_ras_get_context(adev) &&
6503 		amdgpu_ras_get_context(adev)->reboot) {
6504 		dev_warn(adev->dev, "Emergency reboot.");
6505 
6506 		ksys_sync_helper();
6507 		emergency_restart();
6508 	}
6509 
6510 	dev_info(adev->dev, "GPU %s begin!. Source:  %d\n",
6511 		 need_emergency_restart ? "jobs stop" : "reset",
6512 		 reset_context->src);
6513 
6514 	if (!amdgpu_sriov_vf(adev))
6515 		hive = amdgpu_get_xgmi_hive(adev);
6516 	if (hive)
6517 		mutex_lock(&hive->hive_lock);
6518 
6519 	reset_context->job = job;
6520 	reset_context->hive = hive;
6521 	INIT_LIST_HEAD(&device_list);
6522 
6523 	amdgpu_device_recovery_prepare(adev, &device_list, hive);
6524 
6525 	if (!amdgpu_sriov_vf(adev)) {
6526 		r = amdgpu_device_health_check(&device_list);
6527 		if (r)
6528 			goto end_reset;
6529 	}
6530 
6531 	/* We need to lock reset domain only once both for XGMI and single device */
6532 	amdgpu_device_recovery_get_reset_lock(adev, &device_list);
6533 
6534 	amdgpu_device_halt_activities(adev, job, reset_context, &device_list,
6535 				      hive, need_emergency_restart);
6536 	if (need_emergency_restart)
6537 		goto skip_sched_resume;
6538 	/*
6539 	 * Must check guilty signal here since after this point all old
6540 	 * HW fences are force signaled.
6541 	 *
6542 	 * job->base holds a reference to parent fence
6543 	 */
6544 	if (job && dma_fence_is_signaled(&job->hw_fence.base)) {
6545 		job_signaled = true;
6546 		dev_info(adev->dev, "Guilty job already signaled, skipping HW reset");
6547 		goto skip_hw_reset;
6548 	}
6549 
6550 	r = amdgpu_device_asic_reset(adev, &device_list, reset_context);
6551 	if (r)
6552 		goto reset_unlock;
6553 skip_hw_reset:
6554 	r = amdgpu_device_sched_resume(&device_list, reset_context, job_signaled);
6555 	if (r)
6556 		goto reset_unlock;
6557 skip_sched_resume:
6558 	amdgpu_device_gpu_resume(adev, &device_list, need_emergency_restart);
6559 reset_unlock:
6560 	amdgpu_device_recovery_put_reset_lock(adev, &device_list);
6561 end_reset:
6562 	if (hive) {
6563 		mutex_unlock(&hive->hive_lock);
6564 		amdgpu_put_xgmi_hive(hive);
6565 	}
6566 
6567 	if (r)
6568 		dev_info(adev->dev, "GPU reset end with ret = %d\n", r);
6569 
6570 	atomic_set(&adev->reset_domain->reset_res, r);
6571 
6572 	if (!r) {
6573 		struct amdgpu_task_info *ti = NULL;
6574 
6575 		if (job)
6576 			ti = amdgpu_vm_get_task_info_pasid(adev, job->pasid);
6577 
6578 		drm_dev_wedged_event(adev_to_drm(adev), DRM_WEDGE_RECOVERY_NONE,
6579 				     ti ? &ti->task : NULL);
6580 
6581 		amdgpu_vm_put_task_info(ti);
6582 	}
6583 
6584 	return r;
6585 }
6586 
6587 /**
6588  * amdgpu_device_partner_bandwidth - find the bandwidth of appropriate partner
6589  *
6590  * @adev: amdgpu_device pointer
6591  * @speed: pointer to the speed of the link
6592  * @width: pointer to the width of the link
6593  *
6594  * Evaluate the hierarchy to find the speed and bandwidth capabilities of the
6595  * first physical partner to an AMD dGPU.
6596  * This will exclude any virtual switches and links.
6597  */
amdgpu_device_partner_bandwidth(struct amdgpu_device * adev,enum pci_bus_speed * speed,enum pcie_link_width * width)6598 static void amdgpu_device_partner_bandwidth(struct amdgpu_device *adev,
6599 					    enum pci_bus_speed *speed,
6600 					    enum pcie_link_width *width)
6601 {
6602 	struct pci_dev *parent = adev->pdev;
6603 
6604 	if (!speed || !width)
6605 		return;
6606 
6607 	*speed = PCI_SPEED_UNKNOWN;
6608 	*width = PCIE_LNK_WIDTH_UNKNOWN;
6609 
6610 	if (amdgpu_device_pcie_dynamic_switching_supported(adev)) {
6611 		while ((parent = pci_upstream_bridge(parent))) {
6612 			/* skip upstream/downstream switches internal to dGPU*/
6613 			if (parent->vendor == PCI_VENDOR_ID_ATI)
6614 				continue;
6615 			*speed = pcie_get_speed_cap(parent);
6616 			*width = pcie_get_width_cap(parent);
6617 			break;
6618 		}
6619 	} else {
6620 		/* use the current speeds rather than max if switching is not supported */
6621 		pcie_bandwidth_available(adev->pdev, NULL, speed, width);
6622 	}
6623 }
6624 
6625 /**
6626  * amdgpu_device_gpu_bandwidth - find the bandwidth of the GPU
6627  *
6628  * @adev: amdgpu_device pointer
6629  * @speed: pointer to the speed of the link
6630  * @width: pointer to the width of the link
6631  *
6632  * Evaluate the hierarchy to find the speed and bandwidth capabilities of the
6633  * AMD dGPU which may be a virtual upstream bridge.
6634  */
amdgpu_device_gpu_bandwidth(struct amdgpu_device * adev,enum pci_bus_speed * speed,enum pcie_link_width * width)6635 static void amdgpu_device_gpu_bandwidth(struct amdgpu_device *adev,
6636 					enum pci_bus_speed *speed,
6637 					enum pcie_link_width *width)
6638 {
6639 	struct pci_dev *parent = adev->pdev;
6640 
6641 	if (!speed || !width)
6642 		return;
6643 
6644 	parent = pci_upstream_bridge(parent);
6645 	if (parent && parent->vendor == PCI_VENDOR_ID_ATI) {
6646 		/* use the upstream/downstream switches internal to dGPU */
6647 		*speed = pcie_get_speed_cap(parent);
6648 		*width = pcie_get_width_cap(parent);
6649 		while ((parent = pci_upstream_bridge(parent))) {
6650 			if (parent->vendor == PCI_VENDOR_ID_ATI) {
6651 				/* use the upstream/downstream switches internal to dGPU */
6652 				*speed = pcie_get_speed_cap(parent);
6653 				*width = pcie_get_width_cap(parent);
6654 			}
6655 		}
6656 	} else {
6657 		/* use the device itself */
6658 		*speed = pcie_get_speed_cap(adev->pdev);
6659 		*width = pcie_get_width_cap(adev->pdev);
6660 	}
6661 }
6662 
6663 /**
6664  * amdgpu_device_get_pcie_info - fence pcie info about the PCIE slot
6665  *
6666  * @adev: amdgpu_device pointer
6667  *
6668  * Fetches and stores in the driver the PCIE capabilities (gen speed
6669  * and lanes) of the slot the device is in. Handles APUs and
6670  * virtualized environments where PCIE config space may not be available.
6671  */
amdgpu_device_get_pcie_info(struct amdgpu_device * adev)6672 static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev)
6673 {
6674 	enum pci_bus_speed speed_cap, platform_speed_cap;
6675 	enum pcie_link_width platform_link_width, link_width;
6676 
6677 	if (amdgpu_pcie_gen_cap)
6678 		adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap;
6679 
6680 	if (amdgpu_pcie_lane_cap)
6681 		adev->pm.pcie_mlw_mask = amdgpu_pcie_lane_cap;
6682 
6683 	/* covers APUs as well */
6684 	if (pci_is_root_bus(adev->pdev->bus) && !amdgpu_passthrough(adev)) {
6685 		if (adev->pm.pcie_gen_mask == 0)
6686 			adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
6687 		if (adev->pm.pcie_mlw_mask == 0)
6688 			adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
6689 		return;
6690 	}
6691 
6692 	if (adev->pm.pcie_gen_mask && adev->pm.pcie_mlw_mask)
6693 		return;
6694 
6695 	amdgpu_device_partner_bandwidth(adev, &platform_speed_cap,
6696 					&platform_link_width);
6697 	amdgpu_device_gpu_bandwidth(adev, &speed_cap, &link_width);
6698 
6699 	if (adev->pm.pcie_gen_mask == 0) {
6700 		/* asic caps */
6701 		if (speed_cap == PCI_SPEED_UNKNOWN) {
6702 			adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
6703 						  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
6704 						  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
6705 		} else {
6706 			if (speed_cap == PCIE_SPEED_32_0GT)
6707 				adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
6708 							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
6709 							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 |
6710 							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4 |
6711 							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN5);
6712 			else if (speed_cap == PCIE_SPEED_16_0GT)
6713 				adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
6714 							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
6715 							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 |
6716 							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4);
6717 			else if (speed_cap == PCIE_SPEED_8_0GT)
6718 				adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
6719 							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
6720 							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
6721 			else if (speed_cap == PCIE_SPEED_5_0GT)
6722 				adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
6723 							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2);
6724 			else
6725 				adev->pm.pcie_gen_mask |= CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1;
6726 		}
6727 		/* platform caps */
6728 		if (platform_speed_cap == PCI_SPEED_UNKNOWN) {
6729 			adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
6730 						   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
6731 		} else {
6732 			if (platform_speed_cap == PCIE_SPEED_32_0GT)
6733 				adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
6734 							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
6735 							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 |
6736 							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4 |
6737 							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN5);
6738 			else if (platform_speed_cap == PCIE_SPEED_16_0GT)
6739 				adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
6740 							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
6741 							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 |
6742 							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4);
6743 			else if (platform_speed_cap == PCIE_SPEED_8_0GT)
6744 				adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
6745 							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
6746 							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3);
6747 			else if (platform_speed_cap == PCIE_SPEED_5_0GT)
6748 				adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
6749 							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
6750 			else
6751 				adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1;
6752 
6753 		}
6754 	}
6755 	if (adev->pm.pcie_mlw_mask == 0) {
6756 		/* asic caps */
6757 		if (link_width == PCIE_LNK_WIDTH_UNKNOWN) {
6758 			adev->pm.pcie_mlw_mask |= AMDGPU_DEFAULT_ASIC_PCIE_MLW_MASK;
6759 		} else {
6760 			switch (link_width) {
6761 			case PCIE_LNK_X32:
6762 				adev->pm.pcie_mlw_mask |= (CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X32 |
6763 							   CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X16 |
6764 							   CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X12 |
6765 							   CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X8 |
6766 							   CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X4 |
6767 							   CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X2 |
6768 							   CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X1);
6769 				break;
6770 			case PCIE_LNK_X16:
6771 				adev->pm.pcie_mlw_mask |= (CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X16 |
6772 							   CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X12 |
6773 							   CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X8 |
6774 							   CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X4 |
6775 							   CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X2 |
6776 							   CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X1);
6777 				break;
6778 			case PCIE_LNK_X12:
6779 				adev->pm.pcie_mlw_mask |= (CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X12 |
6780 							   CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X8 |
6781 							   CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X4 |
6782 							   CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X2 |
6783 							   CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X1);
6784 				break;
6785 			case PCIE_LNK_X8:
6786 				adev->pm.pcie_mlw_mask |= (CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X8 |
6787 							   CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X4 |
6788 							   CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X2 |
6789 							   CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X1);
6790 				break;
6791 			case PCIE_LNK_X4:
6792 				adev->pm.pcie_mlw_mask |= (CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X4 |
6793 							   CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X2 |
6794 							   CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X1);
6795 				break;
6796 			case PCIE_LNK_X2:
6797 				adev->pm.pcie_mlw_mask |= (CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X2 |
6798 							   CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X1);
6799 				break;
6800 			case PCIE_LNK_X1:
6801 				adev->pm.pcie_mlw_mask |= CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X1;
6802 				break;
6803 			default:
6804 				break;
6805 			}
6806 		}
6807 		/* platform caps */
6808 		if (platform_link_width == PCIE_LNK_WIDTH_UNKNOWN) {
6809 			adev->pm.pcie_mlw_mask |= AMDGPU_DEFAULT_PCIE_MLW_MASK;
6810 		} else {
6811 			switch (platform_link_width) {
6812 			case PCIE_LNK_X32:
6813 				adev->pm.pcie_mlw_mask |= (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 |
6814 							   CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
6815 							   CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
6816 							   CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
6817 							   CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
6818 							   CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
6819 							   CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
6820 				break;
6821 			case PCIE_LNK_X16:
6822 				adev->pm.pcie_mlw_mask |= (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
6823 							   CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
6824 							   CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
6825 							   CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
6826 							   CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
6827 							   CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
6828 				break;
6829 			case PCIE_LNK_X12:
6830 				adev->pm.pcie_mlw_mask |= (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
6831 							   CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
6832 							   CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
6833 							   CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
6834 							   CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
6835 				break;
6836 			case PCIE_LNK_X8:
6837 				adev->pm.pcie_mlw_mask |= (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
6838 							   CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
6839 							   CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
6840 							   CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
6841 				break;
6842 			case PCIE_LNK_X4:
6843 				adev->pm.pcie_mlw_mask |= (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
6844 							   CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
6845 							   CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
6846 				break;
6847 			case PCIE_LNK_X2:
6848 				adev->pm.pcie_mlw_mask |= (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
6849 							   CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
6850 				break;
6851 			case PCIE_LNK_X1:
6852 				adev->pm.pcie_mlw_mask |= CAIL_PCIE_LINK_WIDTH_SUPPORT_X1;
6853 				break;
6854 			default:
6855 				break;
6856 			}
6857 		}
6858 	}
6859 }
6860 
6861 /**
6862  * amdgpu_device_is_peer_accessible - Check peer access through PCIe BAR
6863  *
6864  * @adev: amdgpu_device pointer
6865  * @peer_adev: amdgpu_device pointer for peer device trying to access @adev
6866  *
6867  * Return true if @peer_adev can access (DMA) @adev through the PCIe
6868  * BAR, i.e. @adev is "large BAR" and the BAR matches the DMA mask of
6869  * @peer_adev.
6870  */
amdgpu_device_is_peer_accessible(struct amdgpu_device * adev,struct amdgpu_device * peer_adev)6871 bool amdgpu_device_is_peer_accessible(struct amdgpu_device *adev,
6872 				      struct amdgpu_device *peer_adev)
6873 {
6874 #ifdef CONFIG_HSA_AMD_P2P
6875 	bool p2p_access =
6876 		!adev->gmc.xgmi.connected_to_cpu &&
6877 		!(pci_p2pdma_distance(adev->pdev, peer_adev->dev, false) < 0);
6878 	if (!p2p_access)
6879 		dev_info(adev->dev, "PCIe P2P access from peer device %s is not supported by the chipset\n",
6880 			pci_name(peer_adev->pdev));
6881 
6882 	bool is_large_bar = adev->gmc.visible_vram_size &&
6883 		adev->gmc.real_vram_size == adev->gmc.visible_vram_size;
6884 	bool p2p_addressable = amdgpu_device_check_iommu_remap(peer_adev);
6885 
6886 	if (!p2p_addressable) {
6887 		uint64_t address_mask = peer_adev->dev->dma_mask ?
6888 			~*peer_adev->dev->dma_mask : ~((1ULL << 32) - 1);
6889 		resource_size_t aper_limit =
6890 			adev->gmc.aper_base + adev->gmc.aper_size - 1;
6891 
6892 		p2p_addressable = !(adev->gmc.aper_base & address_mask ||
6893 				     aper_limit & address_mask);
6894 	}
6895 	return pcie_p2p && is_large_bar && p2p_access && p2p_addressable;
6896 #else
6897 	return false;
6898 #endif
6899 }
6900 
amdgpu_device_baco_enter(struct amdgpu_device * adev)6901 int amdgpu_device_baco_enter(struct amdgpu_device *adev)
6902 {
6903 	struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
6904 
6905 	if (!amdgpu_device_supports_baco(adev))
6906 		return -ENOTSUPP;
6907 
6908 	if (ras && adev->ras_enabled &&
6909 	    adev->nbio.funcs->enable_doorbell_interrupt)
6910 		adev->nbio.funcs->enable_doorbell_interrupt(adev, false);
6911 
6912 	return amdgpu_dpm_baco_enter(adev);
6913 }
6914 
amdgpu_device_baco_exit(struct amdgpu_device * adev)6915 int amdgpu_device_baco_exit(struct amdgpu_device *adev)
6916 {
6917 	struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
6918 	int ret = 0;
6919 
6920 	if (!amdgpu_device_supports_baco(adev))
6921 		return -ENOTSUPP;
6922 
6923 	ret = amdgpu_dpm_baco_exit(adev);
6924 	if (ret)
6925 		return ret;
6926 
6927 	if (ras && adev->ras_enabled &&
6928 	    adev->nbio.funcs->enable_doorbell_interrupt)
6929 		adev->nbio.funcs->enable_doorbell_interrupt(adev, true);
6930 
6931 	if (amdgpu_passthrough(adev) && adev->nbio.funcs &&
6932 	    adev->nbio.funcs->clear_doorbell_interrupt)
6933 		adev->nbio.funcs->clear_doorbell_interrupt(adev);
6934 
6935 	return 0;
6936 }
6937 
6938 /**
6939  * amdgpu_pci_error_detected - Called when a PCI error is detected.
6940  * @pdev: PCI device struct
6941  * @state: PCI channel state
6942  *
6943  * Description: Called when a PCI error is detected.
6944  *
6945  * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT.
6946  */
amdgpu_pci_error_detected(struct pci_dev * pdev,pci_channel_state_t state)6947 pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
6948 {
6949 	struct drm_device *dev = pci_get_drvdata(pdev);
6950 	struct amdgpu_device *adev = drm_to_adev(dev);
6951 	struct amdgpu_hive_info *hive __free(xgmi_put_hive) =
6952 		amdgpu_get_xgmi_hive(adev);
6953 	struct amdgpu_reset_context reset_context;
6954 	struct list_head device_list;
6955 
6956 	dev_info(adev->dev, "PCI error: detected callback!!\n");
6957 
6958 	adev->pci_channel_state = state;
6959 
6960 	switch (state) {
6961 	case pci_channel_io_normal:
6962 		dev_info(adev->dev, "pci_channel_io_normal: state(%d)!!\n", state);
6963 		return PCI_ERS_RESULT_CAN_RECOVER;
6964 	case pci_channel_io_frozen:
6965 		/* Fatal error, prepare for slot reset */
6966 		dev_info(adev->dev, "pci_channel_io_frozen: state(%d)!!\n", state);
6967 		if (hive) {
6968 			/* Hive devices should be able to support FW based
6969 			 * link reset on other devices, if not return.
6970 			 */
6971 			if (!amdgpu_dpm_is_link_reset_supported(adev)) {
6972 				dev_warn(adev->dev,
6973 					 "No support for XGMI hive yet...\n");
6974 				return PCI_ERS_RESULT_DISCONNECT;
6975 			}
6976 			/* Set dpc status only if device is part of hive
6977 			 * Non-hive devices should be able to recover after
6978 			 * link reset.
6979 			 */
6980 			amdgpu_reset_set_dpc_status(adev, true);
6981 
6982 			mutex_lock(&hive->hive_lock);
6983 		}
6984 		memset(&reset_context, 0, sizeof(reset_context));
6985 		INIT_LIST_HEAD(&device_list);
6986 
6987 		amdgpu_device_recovery_prepare(adev, &device_list, hive);
6988 		amdgpu_device_recovery_get_reset_lock(adev, &device_list);
6989 		amdgpu_device_halt_activities(adev, NULL, &reset_context, &device_list,
6990 					      hive, false);
6991 		if (hive)
6992 			mutex_unlock(&hive->hive_lock);
6993 		return PCI_ERS_RESULT_NEED_RESET;
6994 	case pci_channel_io_perm_failure:
6995 		/* Permanent error, prepare for device removal */
6996 		dev_info(adev->dev, "pci_channel_io_perm_failure: state(%d)!!\n", state);
6997 		return PCI_ERS_RESULT_DISCONNECT;
6998 	}
6999 
7000 	return PCI_ERS_RESULT_NEED_RESET;
7001 }
7002 
7003 /**
7004  * amdgpu_pci_mmio_enabled - Enable MMIO and dump debug registers
7005  * @pdev: pointer to PCI device
7006  */
amdgpu_pci_mmio_enabled(struct pci_dev * pdev)7007 pci_ers_result_t amdgpu_pci_mmio_enabled(struct pci_dev *pdev)
7008 {
7009 	struct drm_device *dev = pci_get_drvdata(pdev);
7010 	struct amdgpu_device *adev = drm_to_adev(dev);
7011 
7012 	dev_info(adev->dev, "PCI error: mmio enabled callback!!\n");
7013 
7014 	/* TODO - dump whatever for debugging purposes */
7015 
7016 	/* This called only if amdgpu_pci_error_detected returns
7017 	 * PCI_ERS_RESULT_CAN_RECOVER. Read/write to the device still
7018 	 * works, no need to reset slot.
7019 	 */
7020 
7021 	return PCI_ERS_RESULT_RECOVERED;
7022 }
7023 
7024 /**
7025  * amdgpu_pci_slot_reset - Called when PCI slot has been reset.
7026  * @pdev: PCI device struct
7027  *
7028  * Description: This routine is called by the pci error recovery
7029  * code after the PCI slot has been reset, just before we
7030  * should resume normal operations.
7031  */
amdgpu_pci_slot_reset(struct pci_dev * pdev)7032 pci_ers_result_t amdgpu_pci_slot_reset(struct pci_dev *pdev)
7033 {
7034 	struct drm_device *dev = pci_get_drvdata(pdev);
7035 	struct amdgpu_device *adev = drm_to_adev(dev);
7036 	struct amdgpu_reset_context reset_context;
7037 	struct amdgpu_device *tmp_adev;
7038 	struct amdgpu_hive_info *hive;
7039 	struct list_head device_list;
7040 	struct pci_dev *link_dev;
7041 	int r = 0, i, timeout;
7042 	u32 memsize;
7043 	u16 status;
7044 
7045 	dev_info(adev->dev, "PCI error: slot reset callback!!\n");
7046 
7047 	memset(&reset_context, 0, sizeof(reset_context));
7048 
7049 	if (adev->pcie_reset_ctx.swus)
7050 		link_dev = adev->pcie_reset_ctx.swus;
7051 	else
7052 		link_dev = adev->pdev;
7053 	/* wait for asic to come out of reset, timeout = 10s */
7054 	timeout = 10000;
7055 	do {
7056 		usleep_range(10000, 10500);
7057 		r = pci_read_config_word(link_dev, PCI_VENDOR_ID, &status);
7058 		timeout -= 10;
7059 	} while (timeout > 0 && (status != PCI_VENDOR_ID_ATI) &&
7060 		 (status != PCI_VENDOR_ID_AMD));
7061 
7062 	if ((status != PCI_VENDOR_ID_ATI) && (status != PCI_VENDOR_ID_AMD)) {
7063 		r = -ETIME;
7064 		goto out;
7065 	}
7066 
7067 	amdgpu_device_load_switch_state(adev);
7068 	/* Restore PCI confspace */
7069 	amdgpu_device_load_pci_state(pdev);
7070 
7071 	/* confirm  ASIC came out of reset */
7072 	for (i = 0; i < adev->usec_timeout; i++) {
7073 		memsize = amdgpu_asic_get_config_memsize(adev);
7074 
7075 		if (memsize != 0xffffffff)
7076 			break;
7077 		udelay(1);
7078 	}
7079 	if (memsize == 0xffffffff) {
7080 		r = -ETIME;
7081 		goto out;
7082 	}
7083 
7084 	reset_context.method = AMD_RESET_METHOD_NONE;
7085 	reset_context.reset_req_dev = adev;
7086 	set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
7087 	set_bit(AMDGPU_SKIP_COREDUMP, &reset_context.flags);
7088 	INIT_LIST_HEAD(&device_list);
7089 
7090 	hive = amdgpu_get_xgmi_hive(adev);
7091 	if (hive) {
7092 		mutex_lock(&hive->hive_lock);
7093 		reset_context.hive = hive;
7094 		list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
7095 			tmp_adev->pcie_reset_ctx.in_link_reset = true;
7096 			list_add_tail(&tmp_adev->reset_list, &device_list);
7097 		}
7098 	} else {
7099 		set_bit(AMDGPU_SKIP_HW_RESET, &reset_context.flags);
7100 		list_add_tail(&adev->reset_list, &device_list);
7101 	}
7102 
7103 	r = amdgpu_device_asic_reset(adev, &device_list, &reset_context);
7104 out:
7105 	if (!r) {
7106 		if (amdgpu_device_cache_pci_state(adev->pdev))
7107 			pci_restore_state(adev->pdev);
7108 		dev_info(adev->dev, "PCIe error recovery succeeded\n");
7109 	} else {
7110 		dev_err(adev->dev, "PCIe error recovery failed, err:%d\n", r);
7111 		if (hive) {
7112 			list_for_each_entry(tmp_adev, &device_list, reset_list)
7113 				amdgpu_device_unset_mp1_state(tmp_adev);
7114 		}
7115 		amdgpu_device_recovery_put_reset_lock(adev, &device_list);
7116 	}
7117 
7118 	if (hive) {
7119 		mutex_unlock(&hive->hive_lock);
7120 		amdgpu_put_xgmi_hive(hive);
7121 	}
7122 
7123 	return r ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
7124 }
7125 
7126 /**
7127  * amdgpu_pci_resume() - resume normal ops after PCI reset
7128  * @pdev: pointer to PCI device
7129  *
7130  * Called when the error recovery driver tells us that its
7131  * OK to resume normal operation.
7132  */
amdgpu_pci_resume(struct pci_dev * pdev)7133 void amdgpu_pci_resume(struct pci_dev *pdev)
7134 {
7135 	struct drm_device *dev = pci_get_drvdata(pdev);
7136 	struct amdgpu_device *adev = drm_to_adev(dev);
7137 	struct list_head device_list;
7138 	struct amdgpu_hive_info *hive = NULL;
7139 	struct amdgpu_device *tmp_adev = NULL;
7140 
7141 	dev_info(adev->dev, "PCI error: resume callback!!\n");
7142 
7143 	/* Only continue execution for the case of pci_channel_io_frozen */
7144 	if (adev->pci_channel_state != pci_channel_io_frozen)
7145 		return;
7146 
7147 	INIT_LIST_HEAD(&device_list);
7148 
7149 	hive = amdgpu_get_xgmi_hive(adev);
7150 	if (hive) {
7151 		mutex_lock(&hive->hive_lock);
7152 		list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
7153 			tmp_adev->pcie_reset_ctx.in_link_reset = false;
7154 			list_add_tail(&tmp_adev->reset_list, &device_list);
7155 		}
7156 	} else
7157 		list_add_tail(&adev->reset_list, &device_list);
7158 
7159 	amdgpu_device_sched_resume(&device_list, NULL, NULL);
7160 	amdgpu_device_gpu_resume(adev, &device_list, false);
7161 	amdgpu_device_recovery_put_reset_lock(adev, &device_list);
7162 
7163 	if (hive) {
7164 		mutex_unlock(&hive->hive_lock);
7165 		amdgpu_put_xgmi_hive(hive);
7166 	}
7167 }
7168 
amdgpu_device_cache_switch_state(struct amdgpu_device * adev)7169 static void amdgpu_device_cache_switch_state(struct amdgpu_device *adev)
7170 {
7171 	struct pci_dev *swus, *swds;
7172 	int r;
7173 
7174 	swds = pci_upstream_bridge(adev->pdev);
7175 	if (!swds || swds->vendor != PCI_VENDOR_ID_ATI ||
7176 	    pci_pcie_type(swds) != PCI_EXP_TYPE_DOWNSTREAM)
7177 		return;
7178 	swus = pci_upstream_bridge(swds);
7179 	if (!swus ||
7180 	    (swus->vendor != PCI_VENDOR_ID_ATI &&
7181 	     swus->vendor != PCI_VENDOR_ID_AMD) ||
7182 	    pci_pcie_type(swus) != PCI_EXP_TYPE_UPSTREAM)
7183 		return;
7184 
7185 	/* If already saved, return */
7186 	if (adev->pcie_reset_ctx.swus)
7187 		return;
7188 	/* Upstream bridge is ATI, assume it's SWUS/DS architecture */
7189 	r = pci_save_state(swds);
7190 	if (r)
7191 		return;
7192 	adev->pcie_reset_ctx.swds_pcistate = pci_store_saved_state(swds);
7193 
7194 	r = pci_save_state(swus);
7195 	if (r)
7196 		return;
7197 	adev->pcie_reset_ctx.swus_pcistate = pci_store_saved_state(swus);
7198 
7199 	adev->pcie_reset_ctx.swus = swus;
7200 }
7201 
amdgpu_device_load_switch_state(struct amdgpu_device * adev)7202 static void amdgpu_device_load_switch_state(struct amdgpu_device *adev)
7203 {
7204 	struct pci_dev *pdev;
7205 	int r;
7206 
7207 	if (!adev->pcie_reset_ctx.swds_pcistate ||
7208 	    !adev->pcie_reset_ctx.swus_pcistate)
7209 		return;
7210 
7211 	pdev = adev->pcie_reset_ctx.swus;
7212 	r = pci_load_saved_state(pdev, adev->pcie_reset_ctx.swus_pcistate);
7213 	if (!r) {
7214 		pci_restore_state(pdev);
7215 	} else {
7216 		dev_warn(adev->dev, "Failed to load SWUS state, err:%d\n", r);
7217 		return;
7218 	}
7219 
7220 	pdev = pci_upstream_bridge(adev->pdev);
7221 	r = pci_load_saved_state(pdev, adev->pcie_reset_ctx.swds_pcistate);
7222 	if (!r)
7223 		pci_restore_state(pdev);
7224 	else
7225 		dev_warn(adev->dev, "Failed to load SWDS state, err:%d\n", r);
7226 }
7227 
amdgpu_device_cache_pci_state(struct pci_dev * pdev)7228 bool amdgpu_device_cache_pci_state(struct pci_dev *pdev)
7229 {
7230 	struct drm_device *dev = pci_get_drvdata(pdev);
7231 	struct amdgpu_device *adev = drm_to_adev(dev);
7232 	int r;
7233 
7234 	if (amdgpu_sriov_vf(adev))
7235 		return false;
7236 
7237 	r = pci_save_state(pdev);
7238 	if (!r) {
7239 		kfree(adev->pci_state);
7240 
7241 		adev->pci_state = pci_store_saved_state(pdev);
7242 
7243 		if (!adev->pci_state) {
7244 			dev_err(adev->dev, "Failed to store PCI saved state");
7245 			return false;
7246 		}
7247 	} else {
7248 		dev_warn(adev->dev, "Failed to save PCI state, err:%d\n", r);
7249 		return false;
7250 	}
7251 
7252 	amdgpu_device_cache_switch_state(adev);
7253 
7254 	return true;
7255 }
7256 
amdgpu_device_load_pci_state(struct pci_dev * pdev)7257 bool amdgpu_device_load_pci_state(struct pci_dev *pdev)
7258 {
7259 	struct drm_device *dev = pci_get_drvdata(pdev);
7260 	struct amdgpu_device *adev = drm_to_adev(dev);
7261 	int r;
7262 
7263 	if (!adev->pci_state)
7264 		return false;
7265 
7266 	r = pci_load_saved_state(pdev, adev->pci_state);
7267 
7268 	if (!r) {
7269 		pci_restore_state(pdev);
7270 	} else {
7271 		dev_warn(adev->dev, "Failed to load PCI state, err:%d\n", r);
7272 		return false;
7273 	}
7274 
7275 	return true;
7276 }
7277 
amdgpu_device_flush_hdp(struct amdgpu_device * adev,struct amdgpu_ring * ring)7278 void amdgpu_device_flush_hdp(struct amdgpu_device *adev,
7279 		struct amdgpu_ring *ring)
7280 {
7281 #ifdef CONFIG_X86_64
7282 	if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev))
7283 		return;
7284 #endif
7285 	if (adev->gmc.xgmi.connected_to_cpu)
7286 		return;
7287 
7288 	if (ring && ring->funcs->emit_hdp_flush)
7289 		amdgpu_ring_emit_hdp_flush(ring);
7290 	else
7291 		amdgpu_asic_flush_hdp(adev, ring);
7292 }
7293 
amdgpu_device_invalidate_hdp(struct amdgpu_device * adev,struct amdgpu_ring * ring)7294 void amdgpu_device_invalidate_hdp(struct amdgpu_device *adev,
7295 		struct amdgpu_ring *ring)
7296 {
7297 #ifdef CONFIG_X86_64
7298 	if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev))
7299 		return;
7300 #endif
7301 	if (adev->gmc.xgmi.connected_to_cpu)
7302 		return;
7303 
7304 	amdgpu_asic_invalidate_hdp(adev, ring);
7305 }
7306 
amdgpu_in_reset(struct amdgpu_device * adev)7307 int amdgpu_in_reset(struct amdgpu_device *adev)
7308 {
7309 	return atomic_read(&adev->reset_domain->in_gpu_reset);
7310 }
7311 
7312 /**
7313  * amdgpu_device_halt() - bring hardware to some kind of halt state
7314  *
7315  * @adev: amdgpu_device pointer
7316  *
7317  * Bring hardware to some kind of halt state so that no one can touch it
7318  * any more. It will help to maintain error context when error occurred.
7319  * Compare to a simple hang, the system will keep stable at least for SSH
7320  * access. Then it should be trivial to inspect the hardware state and
7321  * see what's going on. Implemented as following:
7322  *
7323  * 1. drm_dev_unplug() makes device inaccessible to user space(IOCTLs, etc),
7324  *    clears all CPU mappings to device, disallows remappings through page faults
7325  * 2. amdgpu_irq_disable_all() disables all interrupts
7326  * 3. amdgpu_fence_driver_hw_fini() signals all HW fences
7327  * 4. set adev->no_hw_access to avoid potential crashes after setp 5
7328  * 5. amdgpu_device_unmap_mmio() clears all MMIO mappings
7329  * 6. pci_disable_device() and pci_wait_for_pending_transaction()
7330  *    flush any in flight DMA operations
7331  */
amdgpu_device_halt(struct amdgpu_device * adev)7332 void amdgpu_device_halt(struct amdgpu_device *adev)
7333 {
7334 	struct pci_dev *pdev = adev->pdev;
7335 	struct drm_device *ddev = adev_to_drm(adev);
7336 
7337 	amdgpu_xcp_dev_unplug(adev);
7338 	drm_dev_unplug(ddev);
7339 
7340 	amdgpu_irq_disable_all(adev);
7341 
7342 	amdgpu_fence_driver_hw_fini(adev);
7343 
7344 	adev->no_hw_access = true;
7345 
7346 	amdgpu_device_unmap_mmio(adev);
7347 
7348 	pci_disable_device(pdev);
7349 	pci_wait_for_pending_transaction(pdev);
7350 }
7351 
amdgpu_device_pcie_port_rreg(struct amdgpu_device * adev,u32 reg)7352 u32 amdgpu_device_pcie_port_rreg(struct amdgpu_device *adev,
7353 				u32 reg)
7354 {
7355 	unsigned long flags, address, data;
7356 	u32 r;
7357 
7358 	address = adev->nbio.funcs->get_pcie_port_index_offset(adev);
7359 	data = adev->nbio.funcs->get_pcie_port_data_offset(adev);
7360 
7361 	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
7362 	WREG32(address, reg * 4);
7363 	(void)RREG32(address);
7364 	r = RREG32(data);
7365 	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
7366 	return r;
7367 }
7368 
amdgpu_device_pcie_port_wreg(struct amdgpu_device * adev,u32 reg,u32 v)7369 void amdgpu_device_pcie_port_wreg(struct amdgpu_device *adev,
7370 				u32 reg, u32 v)
7371 {
7372 	unsigned long flags, address, data;
7373 
7374 	address = adev->nbio.funcs->get_pcie_port_index_offset(adev);
7375 	data = adev->nbio.funcs->get_pcie_port_data_offset(adev);
7376 
7377 	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
7378 	WREG32(address, reg * 4);
7379 	(void)RREG32(address);
7380 	WREG32(data, v);
7381 	(void)RREG32(data);
7382 	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
7383 }
7384 
7385 /**
7386  * amdgpu_device_get_gang - return a reference to the current gang
7387  * @adev: amdgpu_device pointer
7388  *
7389  * Returns: A new reference to the current gang leader.
7390  */
amdgpu_device_get_gang(struct amdgpu_device * adev)7391 struct dma_fence *amdgpu_device_get_gang(struct amdgpu_device *adev)
7392 {
7393 	struct dma_fence *fence;
7394 
7395 	rcu_read_lock();
7396 	fence = dma_fence_get_rcu_safe(&adev->gang_submit);
7397 	rcu_read_unlock();
7398 	return fence;
7399 }
7400 
7401 /**
7402  * amdgpu_device_switch_gang - switch to a new gang
7403  * @adev: amdgpu_device pointer
7404  * @gang: the gang to switch to
7405  *
7406  * Try to switch to a new gang.
7407  * Returns: NULL if we switched to the new gang or a reference to the current
7408  * gang leader.
7409  */
amdgpu_device_switch_gang(struct amdgpu_device * adev,struct dma_fence * gang)7410 struct dma_fence *amdgpu_device_switch_gang(struct amdgpu_device *adev,
7411 					    struct dma_fence *gang)
7412 {
7413 	struct dma_fence *old = NULL;
7414 
7415 	dma_fence_get(gang);
7416 	do {
7417 		dma_fence_put(old);
7418 		old = amdgpu_device_get_gang(adev);
7419 		if (old == gang)
7420 			break;
7421 
7422 		if (!dma_fence_is_signaled(old)) {
7423 			dma_fence_put(gang);
7424 			return old;
7425 		}
7426 
7427 	} while (cmpxchg((struct dma_fence __force **)&adev->gang_submit,
7428 			 old, gang) != old);
7429 
7430 	/*
7431 	 * Drop it once for the exchanged reference in adev and once for the
7432 	 * thread local reference acquired in amdgpu_device_get_gang().
7433 	 */
7434 	dma_fence_put(old);
7435 	dma_fence_put(old);
7436 	return NULL;
7437 }
7438 
7439 /**
7440  * amdgpu_device_enforce_isolation - enforce HW isolation
7441  * @adev: the amdgpu device pointer
7442  * @ring: the HW ring the job is supposed to run on
7443  * @job: the job which is about to be pushed to the HW ring
7444  *
7445  * Makes sure that only one client at a time can use the GFX block.
7446  * Returns: The dependency to wait on before the job can be pushed to the HW.
7447  * The function is called multiple times until NULL is returned.
7448  */
amdgpu_device_enforce_isolation(struct amdgpu_device * adev,struct amdgpu_ring * ring,struct amdgpu_job * job)7449 struct dma_fence *amdgpu_device_enforce_isolation(struct amdgpu_device *adev,
7450 						  struct amdgpu_ring *ring,
7451 						  struct amdgpu_job *job)
7452 {
7453 	struct amdgpu_isolation *isolation = &adev->isolation[ring->xcp_id];
7454 	struct drm_sched_fence *f = job->base.s_fence;
7455 	struct dma_fence *dep;
7456 	void *owner;
7457 	int r;
7458 
7459 	/*
7460 	 * For now enforce isolation only for the GFX block since we only need
7461 	 * the cleaner shader on those rings.
7462 	 */
7463 	if (ring->funcs->type != AMDGPU_RING_TYPE_GFX &&
7464 	    ring->funcs->type != AMDGPU_RING_TYPE_COMPUTE)
7465 		return NULL;
7466 
7467 	/*
7468 	 * All submissions where enforce isolation is false are handled as if
7469 	 * they come from a single client. Use ~0l as the owner to distinct it
7470 	 * from kernel submissions where the owner is NULL.
7471 	 */
7472 	owner = job->enforce_isolation ? f->owner : (void *)~0l;
7473 
7474 	mutex_lock(&adev->enforce_isolation_mutex);
7475 
7476 	/*
7477 	 * The "spearhead" submission is the first one which changes the
7478 	 * ownership to its client. We always need to wait for it to be
7479 	 * pushed to the HW before proceeding with anything.
7480 	 */
7481 	if (&f->scheduled != isolation->spearhead &&
7482 	    !dma_fence_is_signaled(isolation->spearhead)) {
7483 		dep = isolation->spearhead;
7484 		goto out_grab_ref;
7485 	}
7486 
7487 	if (isolation->owner != owner) {
7488 
7489 		/*
7490 		 * Wait for any gang to be assembled before switching to a
7491 		 * different owner or otherwise we could deadlock the
7492 		 * submissions.
7493 		 */
7494 		if (!job->gang_submit) {
7495 			dep = amdgpu_device_get_gang(adev);
7496 			if (!dma_fence_is_signaled(dep))
7497 				goto out_return_dep;
7498 			dma_fence_put(dep);
7499 		}
7500 
7501 		dma_fence_put(isolation->spearhead);
7502 		isolation->spearhead = dma_fence_get(&f->scheduled);
7503 		amdgpu_sync_move(&isolation->active, &isolation->prev);
7504 		trace_amdgpu_isolation(isolation->owner, owner);
7505 		isolation->owner = owner;
7506 	}
7507 
7508 	/*
7509 	 * Specifying the ring here helps to pipeline submissions even when
7510 	 * isolation is enabled. If that is not desired for testing NULL can be
7511 	 * used instead of the ring to enforce a CPU round trip while switching
7512 	 * between clients.
7513 	 */
7514 	dep = amdgpu_sync_peek_fence(&isolation->prev, ring);
7515 	r = amdgpu_sync_fence(&isolation->active, &f->finished, GFP_NOWAIT);
7516 	if (r)
7517 		dev_warn(adev->dev, "OOM tracking isolation\n");
7518 
7519 out_grab_ref:
7520 	dma_fence_get(dep);
7521 out_return_dep:
7522 	mutex_unlock(&adev->enforce_isolation_mutex);
7523 	return dep;
7524 }
7525 
amdgpu_device_has_display_hardware(struct amdgpu_device * adev)7526 bool amdgpu_device_has_display_hardware(struct amdgpu_device *adev)
7527 {
7528 	switch (adev->asic_type) {
7529 #ifdef CONFIG_DRM_AMDGPU_SI
7530 	case CHIP_HAINAN:
7531 #endif
7532 	case CHIP_TOPAZ:
7533 		/* chips with no display hardware */
7534 		return false;
7535 #ifdef CONFIG_DRM_AMDGPU_SI
7536 	case CHIP_TAHITI:
7537 	case CHIP_PITCAIRN:
7538 	case CHIP_VERDE:
7539 	case CHIP_OLAND:
7540 #endif
7541 #ifdef CONFIG_DRM_AMDGPU_CIK
7542 	case CHIP_BONAIRE:
7543 	case CHIP_HAWAII:
7544 	case CHIP_KAVERI:
7545 	case CHIP_KABINI:
7546 	case CHIP_MULLINS:
7547 #endif
7548 	case CHIP_TONGA:
7549 	case CHIP_FIJI:
7550 	case CHIP_POLARIS10:
7551 	case CHIP_POLARIS11:
7552 	case CHIP_POLARIS12:
7553 	case CHIP_VEGAM:
7554 	case CHIP_CARRIZO:
7555 	case CHIP_STONEY:
7556 		/* chips with display hardware */
7557 		return true;
7558 	default:
7559 		/* IP discovery */
7560 		if (!amdgpu_ip_version(adev, DCE_HWIP, 0) ||
7561 		    (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK))
7562 			return false;
7563 		return true;
7564 	}
7565 }
7566 
amdgpu_device_wait_on_rreg(struct amdgpu_device * adev,uint32_t inst,uint32_t reg_addr,char reg_name[],uint32_t expected_value,uint32_t mask)7567 uint32_t amdgpu_device_wait_on_rreg(struct amdgpu_device *adev,
7568 		uint32_t inst, uint32_t reg_addr, char reg_name[],
7569 		uint32_t expected_value, uint32_t mask)
7570 {
7571 	uint32_t ret = 0;
7572 	uint32_t old_ = 0;
7573 	uint32_t tmp_ = RREG32(reg_addr);
7574 	uint32_t loop = adev->usec_timeout;
7575 
7576 	while ((tmp_ & (mask)) != (expected_value)) {
7577 		if (old_ != tmp_) {
7578 			loop = adev->usec_timeout;
7579 			old_ = tmp_;
7580 		} else
7581 			udelay(1);
7582 		tmp_ = RREG32(reg_addr);
7583 		loop--;
7584 		if (!loop) {
7585 			dev_warn(
7586 				adev->dev,
7587 				"Register(%d) [%s] failed to reach value 0x%08x != 0x%08xn",
7588 				inst, reg_name, (uint32_t)expected_value,
7589 				(uint32_t)(tmp_ & (mask)));
7590 			ret = -ETIMEDOUT;
7591 			break;
7592 		}
7593 	}
7594 	return ret;
7595 }
7596 
amdgpu_get_soft_full_reset_mask(struct amdgpu_ring * ring)7597 ssize_t amdgpu_get_soft_full_reset_mask(struct amdgpu_ring *ring)
7598 {
7599 	ssize_t size = 0;
7600 
7601 	if (!ring || !ring->adev)
7602 		return size;
7603 
7604 	if (amdgpu_device_should_recover_gpu(ring->adev))
7605 		size |= AMDGPU_RESET_TYPE_FULL;
7606 
7607 	if (unlikely(!ring->adev->debug_disable_soft_recovery) &&
7608 	    !amdgpu_sriov_vf(ring->adev) && ring->funcs->soft_recovery)
7609 		size |= AMDGPU_RESET_TYPE_SOFT_RESET;
7610 
7611 	return size;
7612 }
7613 
amdgpu_show_reset_mask(char * buf,uint32_t supported_reset)7614 ssize_t amdgpu_show_reset_mask(char *buf, uint32_t supported_reset)
7615 {
7616 	ssize_t size = 0;
7617 
7618 	if (supported_reset == 0) {
7619 		size += sysfs_emit_at(buf, size, "unsupported");
7620 		size += sysfs_emit_at(buf, size, "\n");
7621 		return size;
7622 
7623 	}
7624 
7625 	if (supported_reset & AMDGPU_RESET_TYPE_SOFT_RESET)
7626 		size += sysfs_emit_at(buf, size, "soft ");
7627 
7628 	if (supported_reset & AMDGPU_RESET_TYPE_PER_QUEUE)
7629 		size += sysfs_emit_at(buf, size, "queue ");
7630 
7631 	if (supported_reset & AMDGPU_RESET_TYPE_PER_PIPE)
7632 		size += sysfs_emit_at(buf, size, "pipe ");
7633 
7634 	if (supported_reset & AMDGPU_RESET_TYPE_FULL)
7635 		size += sysfs_emit_at(buf, size, "full ");
7636 
7637 	size += sysfs_emit_at(buf, size, "\n");
7638 	return size;
7639 }
7640 
amdgpu_device_set_uid(struct amdgpu_uid * uid_info,enum amdgpu_uid_type type,uint8_t inst,uint64_t uid)7641 void amdgpu_device_set_uid(struct amdgpu_uid *uid_info,
7642 			   enum amdgpu_uid_type type, uint8_t inst,
7643 			   uint64_t uid)
7644 {
7645 	if (!uid_info)
7646 		return;
7647 
7648 	if (type >= AMDGPU_UID_TYPE_MAX) {
7649 		dev_err_once(uid_info->adev->dev, "Invalid UID type %d\n",
7650 			     type);
7651 		return;
7652 	}
7653 
7654 	if (inst >= AMDGPU_UID_INST_MAX) {
7655 		dev_err_once(uid_info->adev->dev, "Invalid UID instance %d\n",
7656 			     inst);
7657 		return;
7658 	}
7659 
7660 	if (uid_info->uid[type][inst] != 0) {
7661 		dev_warn_once(
7662 			uid_info->adev->dev,
7663 			"Overwriting existing UID %llu for type %d instance %d\n",
7664 			uid_info->uid[type][inst], type, inst);
7665 	}
7666 
7667 	uid_info->uid[type][inst] = uid;
7668 }
7669 
amdgpu_device_get_uid(struct amdgpu_uid * uid_info,enum amdgpu_uid_type type,uint8_t inst)7670 u64 amdgpu_device_get_uid(struct amdgpu_uid *uid_info,
7671 			  enum amdgpu_uid_type type, uint8_t inst)
7672 {
7673 	if (!uid_info)
7674 		return 0;
7675 
7676 	if (type >= AMDGPU_UID_TYPE_MAX) {
7677 		dev_err_once(uid_info->adev->dev, "Invalid UID type %d\n",
7678 			     type);
7679 		return 0;
7680 	}
7681 
7682 	if (inst >= AMDGPU_UID_INST_MAX) {
7683 		dev_err_once(uid_info->adev->dev, "Invalid UID instance %d\n",
7684 			     inst);
7685 		return 0;
7686 	}
7687 
7688 	return uid_info->uid[type][inst];
7689 }
7690