1 /*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28
29 #include <linux/aperture.h>
30 #include <linux/power_supply.h>
31 #include <linux/kthread.h>
32 #include <linux/module.h>
33 #include <linux/console.h>
34 #include <linux/slab.h>
35 #include <linux/iommu.h>
36 #include <linux/pci.h>
37 #include <linux/pci-p2pdma.h>
38 #include <linux/apple-gmux.h>
39 #include <linux/nospec.h>
40
41 #include <drm/drm_atomic_helper.h>
42 #include <drm/drm_client_event.h>
43 #include <drm/drm_crtc_helper.h>
44 #include <drm/drm_probe_helper.h>
45 #include <drm/amdgpu_drm.h>
46 #include <linux/device.h>
47 #include <linux/vgaarb.h>
48 #include <linux/vga_switcheroo.h>
49 #include <linux/efi.h>
50 #include "amdgpu.h"
51 #include "amdgpu_trace.h"
52 #include "amdgpu_i2c.h"
53 #include "atom.h"
54 #include "amdgpu_atombios.h"
55 #include "amdgpu_atomfirmware.h"
56 #include "amd_pcie.h"
57 #ifdef CONFIG_DRM_AMDGPU_SI
58 #include "si.h"
59 #endif
60 #ifdef CONFIG_DRM_AMDGPU_CIK
61 #include "cik.h"
62 #endif
63 #include "vi.h"
64 #include "soc15.h"
65 #include "nv.h"
66 #include "bif/bif_4_1_d.h"
67 #include <linux/firmware.h>
68 #include "amdgpu_vf_error.h"
69
70 #include "amdgpu_amdkfd.h"
71 #include "amdgpu_pm.h"
72
73 #include "amdgpu_xgmi.h"
74 #include "amdgpu_ras.h"
75 #include "amdgpu_ras_mgr.h"
76 #include "amdgpu_pmu.h"
77 #include "amdgpu_fru_eeprom.h"
78 #include "amdgpu_reset.h"
79 #include "amdgpu_virt.h"
80 #include "amdgpu_dev_coredump.h"
81
82 #include <linux/suspend.h>
83 #include <drm/task_barrier.h>
84 #include <linux/pm_runtime.h>
85
86 #include <drm/drm_drv.h>
87
88 #if IS_ENABLED(CONFIG_X86)
89 #include <asm/intel-family.h>
90 #include <asm/cpu_device_id.h>
91 #endif
92
93 MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
94 MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin");
95 MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin");
96 MODULE_FIRMWARE("amdgpu/picasso_gpu_info.bin");
97 MODULE_FIRMWARE("amdgpu/raven2_gpu_info.bin");
98 MODULE_FIRMWARE("amdgpu/arcturus_gpu_info.bin");
99 MODULE_FIRMWARE("amdgpu/navi12_gpu_info.bin");
100 MODULE_FIRMWARE("amdgpu/cyan_skillfish_gpu_info.bin");
101
102 #define AMDGPU_RESUME_MS 2000
103 #define AMDGPU_MAX_RETRY_LIMIT 2
104 #define AMDGPU_RETRY_SRIOV_RESET(r) ((r) == -EBUSY || (r) == -ETIMEDOUT || (r) == -EINVAL)
105 #define AMDGPU_PCIE_INDEX_FALLBACK (0x38 >> 2)
106 #define AMDGPU_PCIE_INDEX_HI_FALLBACK (0x44 >> 2)
107 #define AMDGPU_PCIE_DATA_FALLBACK (0x3C >> 2)
108
109 #define AMDGPU_VBIOS_SKIP (1U << 0)
110 #define AMDGPU_VBIOS_OPTIONAL (1U << 1)
111
112 static const struct drm_driver amdgpu_kms_driver;
113
114 const char *amdgpu_asic_name[] = {
115 "TAHITI",
116 "PITCAIRN",
117 "VERDE",
118 "OLAND",
119 "HAINAN",
120 "BONAIRE",
121 "KAVERI",
122 "KABINI",
123 "HAWAII",
124 "MULLINS",
125 "TOPAZ",
126 "TONGA",
127 "FIJI",
128 "CARRIZO",
129 "STONEY",
130 "POLARIS10",
131 "POLARIS11",
132 "POLARIS12",
133 "VEGAM",
134 "VEGA10",
135 "VEGA12",
136 "VEGA20",
137 "RAVEN",
138 "ARCTURUS",
139 "RENOIR",
140 "ALDEBARAN",
141 "NAVI10",
142 "CYAN_SKILLFISH",
143 "NAVI14",
144 "NAVI12",
145 "SIENNA_CICHLID",
146 "NAVY_FLOUNDER",
147 "VANGOGH",
148 "DIMGREY_CAVEFISH",
149 "BEIGE_GOBY",
150 "YELLOW_CARP",
151 "IP DISCOVERY",
152 "LAST",
153 };
154
155 #define AMDGPU_IP_BLK_MASK_ALL GENMASK(AMD_IP_BLOCK_TYPE_NUM - 1, 0)
156 /*
157 * Default init level where all blocks are expected to be initialized. This is
158 * the level of initialization expected by default and also after a full reset
159 * of the device.
160 */
161 struct amdgpu_init_level amdgpu_init_default = {
162 .level = AMDGPU_INIT_LEVEL_DEFAULT,
163 .hwini_ip_block_mask = AMDGPU_IP_BLK_MASK_ALL,
164 };
165
166 struct amdgpu_init_level amdgpu_init_recovery = {
167 .level = AMDGPU_INIT_LEVEL_RESET_RECOVERY,
168 .hwini_ip_block_mask = AMDGPU_IP_BLK_MASK_ALL,
169 };
170
171 /*
172 * Minimal blocks needed to be initialized before a XGMI hive can be reset. This
173 * is used for cases like reset on initialization where the entire hive needs to
174 * be reset before first use.
175 */
176 struct amdgpu_init_level amdgpu_init_minimal_xgmi = {
177 .level = AMDGPU_INIT_LEVEL_MINIMAL_XGMI,
178 .hwini_ip_block_mask =
179 BIT(AMD_IP_BLOCK_TYPE_GMC) | BIT(AMD_IP_BLOCK_TYPE_SMC) |
180 BIT(AMD_IP_BLOCK_TYPE_COMMON) | BIT(AMD_IP_BLOCK_TYPE_IH) |
181 BIT(AMD_IP_BLOCK_TYPE_PSP)
182 };
183
184 static int amdgpu_device_ip_resume_phase1(struct amdgpu_device *adev);
185 static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev);
186 static int amdgpu_device_ip_resume_phase3(struct amdgpu_device *adev);
187
188 static void amdgpu_device_load_switch_state(struct amdgpu_device *adev);
189
amdgpu_ip_member_of_hwini(struct amdgpu_device * adev,enum amd_ip_block_type block)190 static inline bool amdgpu_ip_member_of_hwini(struct amdgpu_device *adev,
191 enum amd_ip_block_type block)
192 {
193 return (adev->init_lvl->hwini_ip_block_mask & (1U << block)) != 0;
194 }
195
amdgpu_set_init_level(struct amdgpu_device * adev,enum amdgpu_init_lvl_id lvl)196 void amdgpu_set_init_level(struct amdgpu_device *adev,
197 enum amdgpu_init_lvl_id lvl)
198 {
199 switch (lvl) {
200 case AMDGPU_INIT_LEVEL_MINIMAL_XGMI:
201 adev->init_lvl = &amdgpu_init_minimal_xgmi;
202 break;
203 case AMDGPU_INIT_LEVEL_RESET_RECOVERY:
204 adev->init_lvl = &amdgpu_init_recovery;
205 break;
206 case AMDGPU_INIT_LEVEL_DEFAULT:
207 fallthrough;
208 default:
209 adev->init_lvl = &amdgpu_init_default;
210 break;
211 }
212 }
213
214 static inline void amdgpu_device_stop_pending_resets(struct amdgpu_device *adev);
215 static int amdgpu_device_pm_notifier(struct notifier_block *nb, unsigned long mode,
216 void *data);
217
218 /**
219 * DOC: pcie_replay_count
220 *
221 * The amdgpu driver provides a sysfs API for reporting the total number
222 * of PCIe replays (NAKs).
223 * The file pcie_replay_count is used for this and returns the total
224 * number of replays as a sum of the NAKs generated and NAKs received.
225 */
226
amdgpu_device_get_pcie_replay_count(struct device * dev,struct device_attribute * attr,char * buf)227 static ssize_t amdgpu_device_get_pcie_replay_count(struct device *dev,
228 struct device_attribute *attr, char *buf)
229 {
230 struct drm_device *ddev = dev_get_drvdata(dev);
231 struct amdgpu_device *adev = drm_to_adev(ddev);
232 uint64_t cnt = amdgpu_asic_get_pcie_replay_count(adev);
233
234 return sysfs_emit(buf, "%llu\n", cnt);
235 }
236
237 static DEVICE_ATTR(pcie_replay_count, 0444,
238 amdgpu_device_get_pcie_replay_count, NULL);
239
amdgpu_device_attr_sysfs_init(struct amdgpu_device * adev)240 static int amdgpu_device_attr_sysfs_init(struct amdgpu_device *adev)
241 {
242 int ret = 0;
243
244 if (amdgpu_nbio_is_replay_cnt_supported(adev))
245 ret = sysfs_create_file(&adev->dev->kobj,
246 &dev_attr_pcie_replay_count.attr);
247
248 return ret;
249 }
250
amdgpu_device_attr_sysfs_fini(struct amdgpu_device * adev)251 static void amdgpu_device_attr_sysfs_fini(struct amdgpu_device *adev)
252 {
253 if (amdgpu_nbio_is_replay_cnt_supported(adev))
254 sysfs_remove_file(&adev->dev->kobj,
255 &dev_attr_pcie_replay_count.attr);
256 }
257
amdgpu_sysfs_reg_state_get(struct file * f,struct kobject * kobj,const struct bin_attribute * attr,char * buf,loff_t ppos,size_t count)258 static ssize_t amdgpu_sysfs_reg_state_get(struct file *f, struct kobject *kobj,
259 const struct bin_attribute *attr, char *buf,
260 loff_t ppos, size_t count)
261 {
262 struct device *dev = kobj_to_dev(kobj);
263 struct drm_device *ddev = dev_get_drvdata(dev);
264 struct amdgpu_device *adev = drm_to_adev(ddev);
265 ssize_t bytes_read;
266
267 switch (ppos) {
268 case AMDGPU_SYS_REG_STATE_XGMI:
269 bytes_read = amdgpu_asic_get_reg_state(
270 adev, AMDGPU_REG_STATE_TYPE_XGMI, buf, count);
271 break;
272 case AMDGPU_SYS_REG_STATE_WAFL:
273 bytes_read = amdgpu_asic_get_reg_state(
274 adev, AMDGPU_REG_STATE_TYPE_WAFL, buf, count);
275 break;
276 case AMDGPU_SYS_REG_STATE_PCIE:
277 bytes_read = amdgpu_asic_get_reg_state(
278 adev, AMDGPU_REG_STATE_TYPE_PCIE, buf, count);
279 break;
280 case AMDGPU_SYS_REG_STATE_USR:
281 bytes_read = amdgpu_asic_get_reg_state(
282 adev, AMDGPU_REG_STATE_TYPE_USR, buf, count);
283 break;
284 case AMDGPU_SYS_REG_STATE_USR_1:
285 bytes_read = amdgpu_asic_get_reg_state(
286 adev, AMDGPU_REG_STATE_TYPE_USR_1, buf, count);
287 break;
288 default:
289 return -EINVAL;
290 }
291
292 return bytes_read;
293 }
294
295 static const BIN_ATTR(reg_state, 0444, amdgpu_sysfs_reg_state_get, NULL,
296 AMDGPU_SYS_REG_STATE_END);
297
amdgpu_reg_state_sysfs_init(struct amdgpu_device * adev)298 int amdgpu_reg_state_sysfs_init(struct amdgpu_device *adev)
299 {
300 int ret;
301
302 if (!amdgpu_asic_get_reg_state_supported(adev))
303 return 0;
304
305 ret = sysfs_create_bin_file(&adev->dev->kobj, &bin_attr_reg_state);
306
307 return ret;
308 }
309
amdgpu_reg_state_sysfs_fini(struct amdgpu_device * adev)310 void amdgpu_reg_state_sysfs_fini(struct amdgpu_device *adev)
311 {
312 if (!amdgpu_asic_get_reg_state_supported(adev))
313 return;
314 sysfs_remove_bin_file(&adev->dev->kobj, &bin_attr_reg_state);
315 }
316
317 /**
318 * DOC: board_info
319 *
320 * The amdgpu driver provides a sysfs API for giving board related information.
321 * It provides the form factor information in the format
322 *
323 * type : form factor
324 *
325 * Possible form factor values
326 *
327 * - "cem" - PCIE CEM card
328 * - "oam" - Open Compute Accelerator Module
329 * - "unknown" - Not known
330 *
331 */
332
amdgpu_device_get_board_info(struct device * dev,struct device_attribute * attr,char * buf)333 static ssize_t amdgpu_device_get_board_info(struct device *dev,
334 struct device_attribute *attr,
335 char *buf)
336 {
337 struct drm_device *ddev = dev_get_drvdata(dev);
338 struct amdgpu_device *adev = drm_to_adev(ddev);
339 enum amdgpu_pkg_type pkg_type = AMDGPU_PKG_TYPE_CEM;
340 const char *pkg;
341
342 if (adev->smuio.funcs && adev->smuio.funcs->get_pkg_type)
343 pkg_type = adev->smuio.funcs->get_pkg_type(adev);
344
345 switch (pkg_type) {
346 case AMDGPU_PKG_TYPE_CEM:
347 pkg = "cem";
348 break;
349 case AMDGPU_PKG_TYPE_OAM:
350 pkg = "oam";
351 break;
352 default:
353 pkg = "unknown";
354 break;
355 }
356
357 return sysfs_emit(buf, "%s : %s\n", "type", pkg);
358 }
359
360 static DEVICE_ATTR(board_info, 0444, amdgpu_device_get_board_info, NULL);
361
362 static struct attribute *amdgpu_board_attrs[] = {
363 &dev_attr_board_info.attr,
364 NULL,
365 };
366
amdgpu_board_attrs_is_visible(struct kobject * kobj,struct attribute * attr,int n)367 static umode_t amdgpu_board_attrs_is_visible(struct kobject *kobj,
368 struct attribute *attr, int n)
369 {
370 struct device *dev = kobj_to_dev(kobj);
371 struct drm_device *ddev = dev_get_drvdata(dev);
372 struct amdgpu_device *adev = drm_to_adev(ddev);
373
374 if (adev->flags & AMD_IS_APU)
375 return 0;
376
377 return attr->mode;
378 }
379
380 static const struct attribute_group amdgpu_board_attrs_group = {
381 .attrs = amdgpu_board_attrs,
382 .is_visible = amdgpu_board_attrs_is_visible
383 };
384
385 /**
386 * DOC: uma/carveout_options
387 *
388 * This is a read-only file that lists all available UMA allocation
389 * options and their corresponding indices. Example output::
390 *
391 * $ cat uma/carveout_options
392 * 0: Minimum (512 MB)
393 * 1: (1 GB)
394 * 2: (2 GB)
395 * 3: (4 GB)
396 * 4: (6 GB)
397 * 5: (8 GB)
398 * 6: (12 GB)
399 * 7: Medium (16 GB)
400 * 8: (24 GB)
401 * 9: High (32 GB)
402 */
carveout_options_show(struct device * dev,struct device_attribute * attr,char * buf)403 static ssize_t carveout_options_show(struct device *dev,
404 struct device_attribute *attr,
405 char *buf)
406 {
407 struct drm_device *ddev = dev_get_drvdata(dev);
408 struct amdgpu_device *adev = drm_to_adev(ddev);
409 struct amdgpu_uma_carveout_info *uma_info = &adev->uma_info;
410 uint32_t memory_carved;
411 ssize_t size = 0;
412
413 if (!uma_info || !uma_info->num_entries)
414 return -ENODEV;
415
416 for (int i = 0; i < uma_info->num_entries; i++) {
417 memory_carved = uma_info->entries[i].memory_carved_mb;
418 if (memory_carved >= SZ_1G/SZ_1M) {
419 size += sysfs_emit_at(buf, size, "%d: %s (%u GB)\n",
420 i,
421 uma_info->entries[i].name,
422 memory_carved >> 10);
423 } else {
424 size += sysfs_emit_at(buf, size, "%d: %s (%u MB)\n",
425 i,
426 uma_info->entries[i].name,
427 memory_carved);
428 }
429 }
430
431 return size;
432 }
433 static DEVICE_ATTR_RO(carveout_options);
434
435 /**
436 * DOC: uma/carveout
437 *
438 * This file is both readable and writable. When read, it shows the
439 * index of the current setting. Writing a valid index to this file
440 * allows users to change the UMA carveout size to the selected option
441 * on the next boot.
442 *
443 * The available options and their corresponding indices can be read
444 * from the uma/carveout_options file.
445 */
carveout_show(struct device * dev,struct device_attribute * attr,char * buf)446 static ssize_t carveout_show(struct device *dev,
447 struct device_attribute *attr,
448 char *buf)
449 {
450 struct drm_device *ddev = dev_get_drvdata(dev);
451 struct amdgpu_device *adev = drm_to_adev(ddev);
452
453 return sysfs_emit(buf, "%u\n", adev->uma_info.uma_option_index);
454 }
455
carveout_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)456 static ssize_t carveout_store(struct device *dev,
457 struct device_attribute *attr,
458 const char *buf, size_t count)
459 {
460 struct drm_device *ddev = dev_get_drvdata(dev);
461 struct amdgpu_device *adev = drm_to_adev(ddev);
462 struct amdgpu_uma_carveout_info *uma_info = &adev->uma_info;
463 struct amdgpu_uma_carveout_option *opt;
464 unsigned long val;
465 uint8_t flags;
466 int r;
467
468 r = kstrtoul(buf, 10, &val);
469 if (r)
470 return r;
471
472 if (val >= uma_info->num_entries)
473 return -EINVAL;
474
475 val = array_index_nospec(val, uma_info->num_entries);
476 opt = &uma_info->entries[val];
477
478 if (!(opt->flags & AMDGPU_UMA_FLAG_AUTO) &&
479 !(opt->flags & AMDGPU_UMA_FLAG_CUSTOM)) {
480 drm_err_once(ddev, "Option %lu not supported due to lack of Custom/Auto flag", val);
481 return -EINVAL;
482 }
483
484 flags = opt->flags;
485 flags &= ~((flags & AMDGPU_UMA_FLAG_AUTO) >> 1);
486
487 guard(mutex)(&uma_info->update_lock);
488
489 r = amdgpu_acpi_set_uma_allocation_size(adev, val, flags);
490 if (r)
491 return r;
492
493 uma_info->uma_option_index = val;
494
495 return count;
496 }
497 static DEVICE_ATTR_RW(carveout);
498
499 static struct attribute *amdgpu_uma_attrs[] = {
500 &dev_attr_carveout.attr,
501 &dev_attr_carveout_options.attr,
502 NULL
503 };
504
505 const struct attribute_group amdgpu_uma_attr_group = {
506 .name = "uma",
507 .attrs = amdgpu_uma_attrs
508 };
509
amdgpu_uma_sysfs_init(struct amdgpu_device * adev)510 static void amdgpu_uma_sysfs_init(struct amdgpu_device *adev)
511 {
512 int rc;
513
514 if (!(adev->flags & AMD_IS_APU))
515 return;
516
517 if (!amdgpu_acpi_is_set_uma_allocation_size_supported())
518 return;
519
520 rc = amdgpu_atomfirmware_get_uma_carveout_info(adev, &adev->uma_info);
521 if (rc) {
522 drm_dbg(adev_to_drm(adev),
523 "Failed to parse UMA carveout info from VBIOS: %d\n", rc);
524 goto out_info;
525 }
526
527 mutex_init(&adev->uma_info.update_lock);
528
529 rc = devm_device_add_group(adev->dev, &amdgpu_uma_attr_group);
530 if (rc) {
531 drm_dbg(adev_to_drm(adev), "Failed to add UMA carveout sysfs interfaces %d\n", rc);
532 goto out_attr;
533 }
534
535 return;
536
537 out_attr:
538 mutex_destroy(&adev->uma_info.update_lock);
539 out_info:
540 return;
541 }
542
amdgpu_uma_sysfs_fini(struct amdgpu_device * adev)543 static void amdgpu_uma_sysfs_fini(struct amdgpu_device *adev)
544 {
545 struct amdgpu_uma_carveout_info *uma_info = &adev->uma_info;
546
547 if (!amdgpu_acpi_is_set_uma_allocation_size_supported())
548 return;
549
550 mutex_destroy(&uma_info->update_lock);
551 uma_info->num_entries = 0;
552 }
553
554 static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev);
555
556 /**
557 * amdgpu_device_supports_px - Is the device a dGPU with ATPX power control
558 *
559 * @adev: amdgpu device pointer
560 *
561 * Returns true if the device is a dGPU with ATPX power control,
562 * otherwise return false.
563 */
amdgpu_device_supports_px(struct amdgpu_device * adev)564 bool amdgpu_device_supports_px(struct amdgpu_device *adev)
565 {
566 if ((adev->flags & AMD_IS_PX) && !amdgpu_is_atpx_hybrid())
567 return true;
568 return false;
569 }
570
571 /**
572 * amdgpu_device_supports_boco - Is the device a dGPU with ACPI power resources
573 *
574 * @adev: amdgpu device pointer
575 *
576 * Returns true if the device is a dGPU with ACPI power control,
577 * otherwise return false.
578 */
amdgpu_device_supports_boco(struct amdgpu_device * adev)579 bool amdgpu_device_supports_boco(struct amdgpu_device *adev)
580 {
581 if (!IS_ENABLED(CONFIG_HOTPLUG_PCI_PCIE))
582 return false;
583
584 if (adev->has_pr3 ||
585 ((adev->flags & AMD_IS_PX) && amdgpu_is_atpx_hybrid()))
586 return true;
587 return false;
588 }
589
590 /**
591 * amdgpu_device_supports_baco - Does the device support BACO
592 *
593 * @adev: amdgpu device pointer
594 *
595 * Return:
596 * 1 if the device supports BACO;
597 * 3 if the device supports MACO (only works if BACO is supported)
598 * otherwise return 0.
599 */
amdgpu_device_supports_baco(struct amdgpu_device * adev)600 int amdgpu_device_supports_baco(struct amdgpu_device *adev)
601 {
602 return amdgpu_asic_supports_baco(adev);
603 }
604
amdgpu_device_detect_runtime_pm_mode(struct amdgpu_device * adev)605 void amdgpu_device_detect_runtime_pm_mode(struct amdgpu_device *adev)
606 {
607 int bamaco_support;
608
609 adev->pm.rpm_mode = AMDGPU_RUNPM_NONE;
610 bamaco_support = amdgpu_device_supports_baco(adev);
611
612 switch (amdgpu_runtime_pm) {
613 case 2:
614 if (bamaco_support & MACO_SUPPORT) {
615 adev->pm.rpm_mode = AMDGPU_RUNPM_BAMACO;
616 dev_info(adev->dev, "Forcing BAMACO for runtime pm\n");
617 } else if (bamaco_support == BACO_SUPPORT) {
618 adev->pm.rpm_mode = AMDGPU_RUNPM_BACO;
619 dev_info(adev->dev, "Requested mode BAMACO not available,fallback to use BACO\n");
620 }
621 break;
622 case 1:
623 if (bamaco_support & BACO_SUPPORT) {
624 adev->pm.rpm_mode = AMDGPU_RUNPM_BACO;
625 dev_info(adev->dev, "Forcing BACO for runtime pm\n");
626 }
627 break;
628 case -1:
629 case -2:
630 if (amdgpu_device_supports_px(adev)) {
631 /* enable PX as runtime mode */
632 adev->pm.rpm_mode = AMDGPU_RUNPM_PX;
633 dev_info(adev->dev, "Using ATPX for runtime pm\n");
634 } else if (amdgpu_device_supports_boco(adev)) {
635 /* enable boco as runtime mode */
636 adev->pm.rpm_mode = AMDGPU_RUNPM_BOCO;
637 dev_info(adev->dev, "Using BOCO for runtime pm\n");
638 } else {
639 if (!bamaco_support)
640 goto no_runtime_pm;
641
642 switch (adev->asic_type) {
643 case CHIP_VEGA20:
644 case CHIP_ARCTURUS:
645 /* BACO are not supported on vega20 and arctrus */
646 break;
647 case CHIP_VEGA10:
648 /* enable BACO as runpm mode if noretry=0 */
649 if (!adev->gmc.noretry && !amdgpu_passthrough(adev))
650 adev->pm.rpm_mode = AMDGPU_RUNPM_BACO;
651 break;
652 default:
653 /* enable BACO as runpm mode on CI+ */
654 if (!amdgpu_passthrough(adev))
655 adev->pm.rpm_mode = AMDGPU_RUNPM_BACO;
656 break;
657 }
658
659 if (adev->pm.rpm_mode == AMDGPU_RUNPM_BACO) {
660 if (bamaco_support & MACO_SUPPORT) {
661 adev->pm.rpm_mode = AMDGPU_RUNPM_BAMACO;
662 dev_info(adev->dev, "Using BAMACO for runtime pm\n");
663 } else {
664 dev_info(adev->dev, "Using BACO for runtime pm\n");
665 }
666 }
667 }
668 break;
669 case 0:
670 dev_info(adev->dev, "runtime pm is manually disabled\n");
671 break;
672 default:
673 break;
674 }
675
676 no_runtime_pm:
677 if (adev->pm.rpm_mode == AMDGPU_RUNPM_NONE)
678 dev_info(adev->dev, "Runtime PM not available\n");
679 }
680 /**
681 * amdgpu_device_supports_smart_shift - Is the device dGPU with
682 * smart shift support
683 *
684 * @adev: amdgpu device pointer
685 *
686 * Returns true if the device is a dGPU with Smart Shift support,
687 * otherwise returns false.
688 */
amdgpu_device_supports_smart_shift(struct amdgpu_device * adev)689 bool amdgpu_device_supports_smart_shift(struct amdgpu_device *adev)
690 {
691 return (amdgpu_device_supports_boco(adev) &&
692 amdgpu_acpi_is_power_shift_control_supported());
693 }
694
695 /*
696 * VRAM access helper functions
697 */
698
699 /**
700 * amdgpu_device_mm_access - access vram by MM_INDEX/MM_DATA
701 *
702 * @adev: amdgpu_device pointer
703 * @pos: offset of the buffer in vram
704 * @buf: virtual address of the buffer in system memory
705 * @size: read/write size, sizeof(@buf) must > @size
706 * @write: true - write to vram, otherwise - read from vram
707 */
amdgpu_device_mm_access(struct amdgpu_device * adev,loff_t pos,void * buf,size_t size,bool write)708 void amdgpu_device_mm_access(struct amdgpu_device *adev, loff_t pos,
709 void *buf, size_t size, bool write)
710 {
711 unsigned long flags;
712 uint32_t hi = ~0, tmp = 0;
713 uint32_t *data = buf;
714 uint64_t last;
715 int idx;
716
717 if (!drm_dev_enter(adev_to_drm(adev), &idx))
718 return;
719
720 BUG_ON(!IS_ALIGNED(pos, 4) || !IS_ALIGNED(size, 4));
721
722 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
723 for (last = pos + size; pos < last; pos += 4) {
724 tmp = pos >> 31;
725
726 WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)pos) | 0x80000000);
727 if (tmp != hi) {
728 WREG32_NO_KIQ(mmMM_INDEX_HI, tmp);
729 hi = tmp;
730 }
731 if (write)
732 WREG32_NO_KIQ(mmMM_DATA, *data++);
733 else
734 *data++ = RREG32_NO_KIQ(mmMM_DATA);
735 }
736
737 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
738 drm_dev_exit(idx);
739 }
740
741 /**
742 * amdgpu_device_aper_access - access vram by vram aperture
743 *
744 * @adev: amdgpu_device pointer
745 * @pos: offset of the buffer in vram
746 * @buf: virtual address of the buffer in system memory
747 * @size: read/write size, sizeof(@buf) must > @size
748 * @write: true - write to vram, otherwise - read from vram
749 *
750 * The return value means how many bytes have been transferred.
751 */
amdgpu_device_aper_access(struct amdgpu_device * adev,loff_t pos,void * buf,size_t size,bool write)752 size_t amdgpu_device_aper_access(struct amdgpu_device *adev, loff_t pos,
753 void *buf, size_t size, bool write)
754 {
755 #ifdef CONFIG_64BIT
756 void __iomem *addr;
757 size_t count = 0;
758 uint64_t last;
759
760 if (!adev->mman.aper_base_kaddr)
761 return 0;
762
763 last = min(pos + size, adev->gmc.visible_vram_size);
764 if (last > pos) {
765 addr = adev->mman.aper_base_kaddr + pos;
766 count = last - pos;
767
768 if (write) {
769 memcpy_toio(addr, buf, count);
770 /* Make sure HDP write cache flush happens without any reordering
771 * after the system memory contents are sent over PCIe device
772 */
773 mb();
774 amdgpu_device_flush_hdp(adev, NULL);
775 } else {
776 amdgpu_device_invalidate_hdp(adev, NULL);
777 /* Make sure HDP read cache is invalidated before issuing a read
778 * to the PCIe device
779 */
780 mb();
781 memcpy_fromio(buf, addr, count);
782 }
783
784 }
785
786 return count;
787 #else
788 return 0;
789 #endif
790 }
791
792 /**
793 * amdgpu_device_vram_access - read/write a buffer in vram
794 *
795 * @adev: amdgpu_device pointer
796 * @pos: offset of the buffer in vram
797 * @buf: virtual address of the buffer in system memory
798 * @size: read/write size, sizeof(@buf) must > @size
799 * @write: true - write to vram, otherwise - read from vram
800 */
amdgpu_device_vram_access(struct amdgpu_device * adev,loff_t pos,void * buf,size_t size,bool write)801 void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos,
802 void *buf, size_t size, bool write)
803 {
804 size_t count;
805
806 /* try to using vram apreature to access vram first */
807 count = amdgpu_device_aper_access(adev, pos, buf, size, write);
808 size -= count;
809 if (size) {
810 /* using MM to access rest vram */
811 pos += count;
812 buf += count;
813 amdgpu_device_mm_access(adev, pos, buf, size, write);
814 }
815 }
816
817 /*
818 * register access helper functions.
819 */
820
821 /* Check if hw access should be skipped because of hotplug or device error */
amdgpu_device_skip_hw_access(struct amdgpu_device * adev)822 bool amdgpu_device_skip_hw_access(struct amdgpu_device *adev)
823 {
824 if (adev->no_hw_access)
825 return true;
826
827 #ifdef CONFIG_LOCKDEP
828 /*
829 * This is a bit complicated to understand, so worth a comment. What we assert
830 * here is that the GPU reset is not running on another thread in parallel.
831 *
832 * For this we trylock the read side of the reset semaphore, if that succeeds
833 * we know that the reset is not running in parallel.
834 *
835 * If the trylock fails we assert that we are either already holding the read
836 * side of the lock or are the reset thread itself and hold the write side of
837 * the lock.
838 */
839 if (in_task()) {
840 if (down_read_trylock(&adev->reset_domain->sem))
841 up_read(&adev->reset_domain->sem);
842 else
843 lockdep_assert_held(&adev->reset_domain->sem);
844 }
845 #endif
846 return false;
847 }
848
849 /**
850 * amdgpu_device_get_rev_id - query device rev_id
851 *
852 * @adev: amdgpu_device pointer
853 *
854 * Return device rev_id
855 */
amdgpu_device_get_rev_id(struct amdgpu_device * adev)856 u32 amdgpu_device_get_rev_id(struct amdgpu_device *adev)
857 {
858 return adev->nbio.funcs->get_rev_id(adev);
859 }
860
amdgpu_device_get_vbios_flags(struct amdgpu_device * adev)861 static uint32_t amdgpu_device_get_vbios_flags(struct amdgpu_device *adev)
862 {
863 if (hweight32(adev->aid_mask) && (adev->flags & AMD_IS_APU))
864 return AMDGPU_VBIOS_SKIP;
865
866 if (hweight32(adev->aid_mask) && amdgpu_passthrough(adev))
867 return AMDGPU_VBIOS_OPTIONAL;
868
869 return 0;
870 }
871
872 /**
873 * amdgpu_device_asic_init - Wrapper for atom asic_init
874 *
875 * @adev: amdgpu_device pointer
876 *
877 * Does any asic specific work and then calls atom asic init.
878 */
amdgpu_device_asic_init(struct amdgpu_device * adev)879 static int amdgpu_device_asic_init(struct amdgpu_device *adev)
880 {
881 uint32_t flags;
882 bool optional;
883 int ret;
884
885 amdgpu_asic_pre_asic_init(adev);
886 flags = amdgpu_device_get_vbios_flags(adev);
887 optional = !!(flags & (AMDGPU_VBIOS_OPTIONAL | AMDGPU_VBIOS_SKIP));
888
889 if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) ||
890 amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4) ||
891 amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 5, 0) ||
892 amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(11, 0, 0)) {
893 amdgpu_psp_wait_for_bootloader(adev);
894 if (optional && !adev->bios)
895 return 0;
896
897 ret = amdgpu_atomfirmware_asic_init(adev, true);
898 return ret;
899 } else {
900 if (optional && !adev->bios)
901 return 0;
902
903 return amdgpu_atom_asic_init(adev->mode_info.atom_context);
904 }
905
906 return 0;
907 }
908
909 /**
910 * amdgpu_device_mem_scratch_init - allocate the VRAM scratch page
911 *
912 * @adev: amdgpu_device pointer
913 *
914 * Allocates a scratch page of VRAM for use by various things in the
915 * driver.
916 */
amdgpu_device_mem_scratch_init(struct amdgpu_device * adev)917 static int amdgpu_device_mem_scratch_init(struct amdgpu_device *adev)
918 {
919 return amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_SIZE, PAGE_SIZE,
920 AMDGPU_GEM_DOMAIN_VRAM |
921 AMDGPU_GEM_DOMAIN_GTT,
922 &adev->mem_scratch.robj,
923 &adev->mem_scratch.gpu_addr,
924 (void **)&adev->mem_scratch.ptr);
925 }
926
927 /**
928 * amdgpu_device_mem_scratch_fini - Free the VRAM scratch page
929 *
930 * @adev: amdgpu_device pointer
931 *
932 * Frees the VRAM scratch page.
933 */
amdgpu_device_mem_scratch_fini(struct amdgpu_device * adev)934 static void amdgpu_device_mem_scratch_fini(struct amdgpu_device *adev)
935 {
936 amdgpu_bo_free_kernel(&adev->mem_scratch.robj, NULL, NULL);
937 }
938
939 /**
940 * amdgpu_device_program_register_sequence - program an array of registers.
941 *
942 * @adev: amdgpu_device pointer
943 * @registers: pointer to the register array
944 * @array_size: size of the register array
945 *
946 * Programs an array or registers with and or masks.
947 * This is a helper for setting golden registers.
948 */
amdgpu_device_program_register_sequence(struct amdgpu_device * adev,const u32 * registers,const u32 array_size)949 void amdgpu_device_program_register_sequence(struct amdgpu_device *adev,
950 const u32 *registers,
951 const u32 array_size)
952 {
953 u32 tmp, reg, and_mask, or_mask;
954 int i;
955
956 if (array_size % 3)
957 return;
958
959 for (i = 0; i < array_size; i += 3) {
960 reg = registers[i + 0];
961 and_mask = registers[i + 1];
962 or_mask = registers[i + 2];
963
964 if (and_mask == 0xffffffff) {
965 tmp = or_mask;
966 } else {
967 tmp = RREG32(reg);
968 tmp &= ~and_mask;
969 if (adev->family >= AMDGPU_FAMILY_AI)
970 tmp |= (or_mask & and_mask);
971 else
972 tmp |= or_mask;
973 }
974 WREG32(reg, tmp);
975 }
976 }
977
978 /**
979 * amdgpu_device_pci_config_reset - reset the GPU
980 *
981 * @adev: amdgpu_device pointer
982 *
983 * Resets the GPU using the pci config reset sequence.
984 * Only applicable to asics prior to vega10.
985 */
amdgpu_device_pci_config_reset(struct amdgpu_device * adev)986 void amdgpu_device_pci_config_reset(struct amdgpu_device *adev)
987 {
988 pci_write_config_dword(adev->pdev, 0x7c, AMDGPU_ASIC_RESET_DATA);
989 }
990
991 /**
992 * amdgpu_device_pci_reset - reset the GPU using generic PCI means
993 *
994 * @adev: amdgpu_device pointer
995 *
996 * Resets the GPU using generic pci reset interfaces (FLR, SBR, etc.).
997 */
amdgpu_device_pci_reset(struct amdgpu_device * adev)998 int amdgpu_device_pci_reset(struct amdgpu_device *adev)
999 {
1000 return pci_reset_function(adev->pdev);
1001 }
1002
1003 /*
1004 * amdgpu_device_wb_*()
1005 * Writeback is the method by which the GPU updates special pages in memory
1006 * with the status of certain GPU events (fences, ring pointers,etc.).
1007 */
1008
1009 /**
1010 * amdgpu_device_wb_fini - Disable Writeback and free memory
1011 *
1012 * @adev: amdgpu_device pointer
1013 *
1014 * Disables Writeback and frees the Writeback memory (all asics).
1015 * Used at driver shutdown.
1016 */
amdgpu_device_wb_fini(struct amdgpu_device * adev)1017 static void amdgpu_device_wb_fini(struct amdgpu_device *adev)
1018 {
1019 if (adev->wb.wb_obj) {
1020 amdgpu_bo_free_kernel(&adev->wb.wb_obj,
1021 &adev->wb.gpu_addr,
1022 (void **)&adev->wb.wb);
1023 adev->wb.wb_obj = NULL;
1024 }
1025 }
1026
1027 /**
1028 * amdgpu_device_wb_init - Init Writeback driver info and allocate memory
1029 *
1030 * @adev: amdgpu_device pointer
1031 *
1032 * Initializes writeback and allocates writeback memory (all asics).
1033 * Used at driver startup.
1034 * Returns 0 on success or an -error on failure.
1035 */
amdgpu_device_wb_init(struct amdgpu_device * adev)1036 static int amdgpu_device_wb_init(struct amdgpu_device *adev)
1037 {
1038 int r;
1039
1040 if (adev->wb.wb_obj == NULL) {
1041 /* AMDGPU_MAX_WB * sizeof(uint32_t) * 8 = AMDGPU_MAX_WB 256bit slots */
1042 r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * sizeof(uint32_t) * 8,
1043 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
1044 &adev->wb.wb_obj, &adev->wb.gpu_addr,
1045 (void **)&adev->wb.wb);
1046 if (r) {
1047 dev_warn(adev->dev, "(%d) create WB bo failed\n", r);
1048 return r;
1049 }
1050
1051 adev->wb.num_wb = AMDGPU_MAX_WB;
1052 memset(&adev->wb.used, 0, sizeof(adev->wb.used));
1053
1054 /* clear wb memory */
1055 memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t) * 8);
1056 }
1057
1058 return 0;
1059 }
1060
1061 /**
1062 * amdgpu_device_wb_get - Allocate a wb entry
1063 *
1064 * @adev: amdgpu_device pointer
1065 * @wb: wb index
1066 *
1067 * Allocate a wb slot for use by the driver (all asics).
1068 * Returns 0 on success or -EINVAL on failure.
1069 */
amdgpu_device_wb_get(struct amdgpu_device * adev,u32 * wb)1070 int amdgpu_device_wb_get(struct amdgpu_device *adev, u32 *wb)
1071 {
1072 unsigned long flags, offset;
1073
1074 spin_lock_irqsave(&adev->wb.lock, flags);
1075 offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb);
1076 if (offset < adev->wb.num_wb) {
1077 __set_bit(offset, adev->wb.used);
1078 spin_unlock_irqrestore(&adev->wb.lock, flags);
1079 *wb = offset << 3; /* convert to dw offset */
1080 return 0;
1081 } else {
1082 spin_unlock_irqrestore(&adev->wb.lock, flags);
1083 return -EINVAL;
1084 }
1085 }
1086
1087 /**
1088 * amdgpu_device_wb_free - Free a wb entry
1089 *
1090 * @adev: amdgpu_device pointer
1091 * @wb: wb index
1092 *
1093 * Free a wb slot allocated for use by the driver (all asics)
1094 */
amdgpu_device_wb_free(struct amdgpu_device * adev,u32 wb)1095 void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb)
1096 {
1097 unsigned long flags;
1098
1099 wb >>= 3;
1100 spin_lock_irqsave(&adev->wb.lock, flags);
1101 if (wb < adev->wb.num_wb)
1102 __clear_bit(wb, adev->wb.used);
1103 spin_unlock_irqrestore(&adev->wb.lock, flags);
1104 }
1105
1106 /**
1107 * amdgpu_device_resize_fb_bar - try to resize FB BAR
1108 *
1109 * @adev: amdgpu_device pointer
1110 *
1111 * Try to resize FB BAR to make all VRAM CPU accessible. We try very hard not
1112 * to fail, but if any of the BARs is not accessible after the size we abort
1113 * driver loading by returning -ENODEV.
1114 */
amdgpu_device_resize_fb_bar(struct amdgpu_device * adev)1115 int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
1116 {
1117 int rbar_size = pci_rebar_bytes_to_size(adev->gmc.real_vram_size);
1118 struct pci_bus *root;
1119 struct resource *res;
1120 int max_size, r;
1121 unsigned int i;
1122 u16 cmd;
1123
1124 if (!IS_ENABLED(CONFIG_PHYS_ADDR_T_64BIT))
1125 return 0;
1126
1127 /* Bypass for VF */
1128 if (amdgpu_sriov_vf(adev))
1129 return 0;
1130
1131 if (!amdgpu_rebar)
1132 return 0;
1133
1134 /* resizing on Dell G5 SE platforms causes problems with runtime pm */
1135 if ((amdgpu_runtime_pm != 0) &&
1136 adev->pdev->vendor == PCI_VENDOR_ID_ATI &&
1137 adev->pdev->device == 0x731f &&
1138 adev->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
1139 return 0;
1140
1141 /* PCI_EXT_CAP_ID_VNDR extended capability is located at 0x100 */
1142 if (!pci_find_ext_capability(adev->pdev, PCI_EXT_CAP_ID_VNDR))
1143 dev_warn(
1144 adev->dev,
1145 "System can't access extended configuration space, please check!!\n");
1146
1147 /* skip if the bios has already enabled large BAR */
1148 if (adev->gmc.real_vram_size &&
1149 (pci_resource_len(adev->pdev, 0) >= adev->gmc.real_vram_size))
1150 return 0;
1151
1152 /* Check if the root BUS has 64bit memory resources */
1153 root = adev->pdev->bus;
1154 while (root->parent)
1155 root = root->parent;
1156
1157 pci_bus_for_each_resource(root, res, i) {
1158 if (res && res->flags & (IORESOURCE_MEM | IORESOURCE_MEM_64) &&
1159 res->start > 0x100000000ull)
1160 break;
1161 }
1162
1163 /* Trying to resize is pointless without a root hub window above 4GB */
1164 if (!res)
1165 return 0;
1166
1167 /* Limit the BAR size to what is available */
1168 max_size = pci_rebar_get_max_size(adev->pdev, 0);
1169 if (max_size < 0)
1170 return 0;
1171 rbar_size = min(max_size, rbar_size);
1172
1173 /* Disable memory decoding while we change the BAR addresses and size */
1174 pci_read_config_word(adev->pdev, PCI_COMMAND, &cmd);
1175 pci_write_config_word(adev->pdev, PCI_COMMAND,
1176 cmd & ~PCI_COMMAND_MEMORY);
1177
1178 /* Tear down doorbell as resizing will release BARs */
1179 amdgpu_doorbell_fini(adev);
1180
1181 r = pci_resize_resource(adev->pdev, 0, rbar_size,
1182 (adev->asic_type >= CHIP_BONAIRE) ? 1 << 5
1183 : 1 << 2);
1184 if (r == -ENOSPC)
1185 dev_info(adev->dev,
1186 "Not enough PCI address space for a large BAR.");
1187 else if (r && r != -ENOTSUPP)
1188 dev_err(adev->dev, "Problem resizing BAR0 (%d).", r);
1189
1190 /* When the doorbell or fb BAR isn't available we have no chance of
1191 * using the device.
1192 */
1193 r = amdgpu_doorbell_init(adev);
1194 if (r || (pci_resource_flags(adev->pdev, 0) & IORESOURCE_UNSET))
1195 return -ENODEV;
1196
1197 pci_write_config_word(adev->pdev, PCI_COMMAND, cmd);
1198
1199 return 0;
1200 }
1201
1202 /*
1203 * GPU helpers function.
1204 */
1205 /**
1206 * amdgpu_device_need_post - check if the hw need post or not
1207 *
1208 * @adev: amdgpu_device pointer
1209 *
1210 * Check if the asic has been initialized (all asics) at driver startup
1211 * or post is needed if hw reset is performed.
1212 * Returns true if need or false if not.
1213 */
amdgpu_device_need_post(struct amdgpu_device * adev)1214 bool amdgpu_device_need_post(struct amdgpu_device *adev)
1215 {
1216 uint32_t reg, flags;
1217
1218 if (amdgpu_sriov_vf(adev))
1219 return false;
1220
1221 flags = amdgpu_device_get_vbios_flags(adev);
1222 if (flags & AMDGPU_VBIOS_SKIP)
1223 return false;
1224 if ((flags & AMDGPU_VBIOS_OPTIONAL) && !adev->bios)
1225 return false;
1226
1227 if (amdgpu_passthrough(adev)) {
1228 /* for FIJI: In whole GPU pass-through virtualization case, after VM reboot
1229 * some old smc fw still need driver do vPost otherwise gpu hang, while
1230 * those smc fw version above 22.15 doesn't have this flaw, so we force
1231 * vpost executed for smc version below 22.15
1232 */
1233 if (adev->asic_type == CHIP_FIJI) {
1234 int err;
1235 uint32_t fw_ver;
1236
1237 err = request_firmware(&adev->pm.fw, "amdgpu/fiji_smc.bin", adev->dev);
1238 /* force vPost if error occurred */
1239 if (err)
1240 return true;
1241
1242 fw_ver = *((uint32_t *)adev->pm.fw->data + 69);
1243 release_firmware(adev->pm.fw);
1244 if (fw_ver < 0x00160e00)
1245 return true;
1246 }
1247 }
1248
1249 /* Don't post if we need to reset whole hive on init */
1250 if (adev->init_lvl->level == AMDGPU_INIT_LEVEL_MINIMAL_XGMI)
1251 return false;
1252
1253 if (adev->has_hw_reset) {
1254 adev->has_hw_reset = false;
1255 return true;
1256 }
1257
1258 /* bios scratch used on CIK+ */
1259 if (adev->asic_type >= CHIP_BONAIRE)
1260 return amdgpu_atombios_scratch_need_asic_init(adev);
1261
1262 /* check MEM_SIZE for older asics */
1263 reg = amdgpu_asic_get_config_memsize(adev);
1264
1265 if ((reg != 0) && (reg != 0xffffffff))
1266 return false;
1267
1268 return true;
1269 }
1270
1271 /*
1272 * Check whether seamless boot is supported.
1273 *
1274 * So far we only support seamless boot on DCE 3.0 or later.
1275 * If users report that it works on older ASICS as well, we may
1276 * loosen this.
1277 */
amdgpu_device_seamless_boot_supported(struct amdgpu_device * adev)1278 bool amdgpu_device_seamless_boot_supported(struct amdgpu_device *adev)
1279 {
1280 switch (amdgpu_seamless) {
1281 case -1:
1282 break;
1283 case 1:
1284 return true;
1285 case 0:
1286 return false;
1287 default:
1288 dev_err(adev->dev, "Invalid value for amdgpu.seamless: %d\n",
1289 amdgpu_seamless);
1290 return false;
1291 }
1292
1293 if (!(adev->flags & AMD_IS_APU))
1294 return false;
1295
1296 if (adev->mman.keep_stolen_vga_memory)
1297 return false;
1298
1299 return amdgpu_ip_version(adev, DCE_HWIP, 0) >= IP_VERSION(3, 0, 0);
1300 }
1301
1302 /*
1303 * Intel hosts such as Rocket Lake, Alder Lake, Raptor Lake and Sapphire Rapids
1304 * don't support dynamic speed switching. Until we have confirmation from Intel
1305 * that a specific host supports it, it's safer that we keep it disabled for all.
1306 *
1307 * https://edc.intel.com/content/www/us/en/design/products/platforms/details/raptor-lake-s/13th-generation-core-processors-datasheet-volume-1-of-2/005/pci-express-support/
1308 * https://gitlab.freedesktop.org/drm/amd/-/issues/2663
1309 */
amdgpu_device_pcie_dynamic_switching_supported(struct amdgpu_device * adev)1310 static bool amdgpu_device_pcie_dynamic_switching_supported(struct amdgpu_device *adev)
1311 {
1312 #if IS_ENABLED(CONFIG_X86)
1313 struct cpuinfo_x86 *c = &cpu_data(0);
1314
1315 /* eGPU change speeds based on USB4 fabric conditions */
1316 if (dev_is_removable(adev->dev))
1317 return true;
1318
1319 if (c->x86_vendor == X86_VENDOR_INTEL)
1320 return false;
1321 #endif
1322 return true;
1323 }
1324
amdgpu_device_aspm_support_quirk(struct amdgpu_device * adev)1325 static bool amdgpu_device_aspm_support_quirk(struct amdgpu_device *adev)
1326 {
1327 /* Enabling ASPM causes randoms hangs on Tahiti and Oland on Zen4.
1328 * It's unclear if this is a platform-specific or GPU-specific issue.
1329 * Disable ASPM on SI for the time being.
1330 */
1331 if (adev->family == AMDGPU_FAMILY_SI)
1332 return true;
1333
1334 #if IS_ENABLED(CONFIG_X86)
1335 struct cpuinfo_x86 *c = &cpu_data(0);
1336
1337 if (c->x86_vendor == X86_VENDOR_INTEL) {
1338 switch (c->x86_model) {
1339 case VFM_MODEL(INTEL_ALDERLAKE):
1340 case VFM_MODEL(INTEL_ALDERLAKE_L):
1341 case VFM_MODEL(INTEL_RAPTORLAKE):
1342 case VFM_MODEL(INTEL_RAPTORLAKE_P):
1343 case VFM_MODEL(INTEL_RAPTORLAKE_S):
1344 case VFM_MODEL(INTEL_TIGERLAKE):
1345 case VFM_MODEL(INTEL_TIGERLAKE_L):
1346 return true;
1347 default:
1348 return false;
1349 }
1350 } else {
1351 return false;
1352 }
1353 #else
1354 return false;
1355 #endif
1356 }
1357
1358 /**
1359 * amdgpu_device_should_use_aspm - check if the device should program ASPM
1360 *
1361 * @adev: amdgpu_device pointer
1362 *
1363 * Confirm whether the module parameter and pcie bridge agree that ASPM should
1364 * be set for this device.
1365 *
1366 * Returns true if it should be used or false if not.
1367 */
amdgpu_device_should_use_aspm(struct amdgpu_device * adev)1368 bool amdgpu_device_should_use_aspm(struct amdgpu_device *adev)
1369 {
1370 switch (amdgpu_aspm) {
1371 case -1:
1372 break;
1373 case 0:
1374 return false;
1375 case 1:
1376 return true;
1377 default:
1378 return false;
1379 }
1380 if (adev->flags & AMD_IS_APU)
1381 return false;
1382 if (amdgpu_device_aspm_support_quirk(adev))
1383 return false;
1384 return pcie_aspm_enabled(adev->pdev);
1385 }
1386
1387 /* if we get transitioned to only one device, take VGA back */
1388 /**
1389 * amdgpu_device_vga_set_decode - enable/disable vga decode
1390 *
1391 * @pdev: PCI device pointer
1392 * @state: enable/disable vga decode
1393 *
1394 * Enable/disable vga decode (all asics).
1395 * Returns VGA resource flags.
1396 */
amdgpu_device_vga_set_decode(struct pci_dev * pdev,bool state)1397 static unsigned int amdgpu_device_vga_set_decode(struct pci_dev *pdev,
1398 bool state)
1399 {
1400 struct amdgpu_device *adev = drm_to_adev(pci_get_drvdata(pdev));
1401
1402 amdgpu_asic_set_vga_state(adev, state);
1403 if (state)
1404 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
1405 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1406 else
1407 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1408 }
1409
1410 /**
1411 * amdgpu_device_check_block_size - validate the vm block size
1412 *
1413 * @adev: amdgpu_device pointer
1414 *
1415 * Validates the vm block size specified via module parameter.
1416 * The vm block size defines number of bits in page table versus page directory,
1417 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1418 * page table and the remaining bits are in the page directory.
1419 */
amdgpu_device_check_block_size(struct amdgpu_device * adev)1420 static void amdgpu_device_check_block_size(struct amdgpu_device *adev)
1421 {
1422 /* defines number of bits in page table versus page directory,
1423 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1424 * page table and the remaining bits are in the page directory
1425 */
1426 if (amdgpu_vm_block_size == -1)
1427 return;
1428
1429 if (amdgpu_vm_block_size < 9) {
1430 dev_warn(adev->dev, "VM page table size (%d) too small\n",
1431 amdgpu_vm_block_size);
1432 amdgpu_vm_block_size = -1;
1433 }
1434 }
1435
1436 /**
1437 * amdgpu_device_check_vm_size - validate the vm size
1438 *
1439 * @adev: amdgpu_device pointer
1440 *
1441 * Validates the vm size in GB specified via module parameter.
1442 * The VM size is the size of the GPU virtual memory space in GB.
1443 */
amdgpu_device_check_vm_size(struct amdgpu_device * adev)1444 static void amdgpu_device_check_vm_size(struct amdgpu_device *adev)
1445 {
1446 /* no need to check the default value */
1447 if (amdgpu_vm_size == -1)
1448 return;
1449
1450 if (amdgpu_vm_size < 1) {
1451 dev_warn(adev->dev, "VM size (%d) too small, min is 1GB\n",
1452 amdgpu_vm_size);
1453 amdgpu_vm_size = -1;
1454 }
1455 }
1456
amdgpu_device_check_smu_prv_buffer_size(struct amdgpu_device * adev)1457 static void amdgpu_device_check_smu_prv_buffer_size(struct amdgpu_device *adev)
1458 {
1459 struct sysinfo si;
1460 bool is_os_64 = (sizeof(void *) == 8);
1461 uint64_t total_memory;
1462 uint64_t dram_size_seven_GB = 0x1B8000000;
1463 uint64_t dram_size_three_GB = 0xB8000000;
1464
1465 if (amdgpu_smu_memory_pool_size == 0)
1466 return;
1467
1468 if (!is_os_64) {
1469 dev_warn(adev->dev, "Not 64-bit OS, feature not supported\n");
1470 goto def_value;
1471 }
1472 si_meminfo(&si);
1473 total_memory = (uint64_t)si.totalram * si.mem_unit;
1474
1475 if ((amdgpu_smu_memory_pool_size == 1) ||
1476 (amdgpu_smu_memory_pool_size == 2)) {
1477 if (total_memory < dram_size_three_GB)
1478 goto def_value1;
1479 } else if ((amdgpu_smu_memory_pool_size == 4) ||
1480 (amdgpu_smu_memory_pool_size == 8)) {
1481 if (total_memory < dram_size_seven_GB)
1482 goto def_value1;
1483 } else {
1484 dev_warn(adev->dev, "Smu memory pool size not supported\n");
1485 goto def_value;
1486 }
1487 adev->pm.smu_prv_buffer_size = amdgpu_smu_memory_pool_size << 28;
1488
1489 return;
1490
1491 def_value1:
1492 dev_warn(adev->dev, "No enough system memory\n");
1493 def_value:
1494 adev->pm.smu_prv_buffer_size = 0;
1495 }
1496
amdgpu_device_init_apu_flags(struct amdgpu_device * adev)1497 static int amdgpu_device_init_apu_flags(struct amdgpu_device *adev)
1498 {
1499 if (!(adev->flags & AMD_IS_APU) ||
1500 adev->asic_type < CHIP_RAVEN)
1501 return 0;
1502
1503 switch (adev->asic_type) {
1504 case CHIP_RAVEN:
1505 if (adev->pdev->device == 0x15dd)
1506 adev->apu_flags |= AMD_APU_IS_RAVEN;
1507 if (adev->pdev->device == 0x15d8)
1508 adev->apu_flags |= AMD_APU_IS_PICASSO;
1509 break;
1510 case CHIP_RENOIR:
1511 if ((adev->pdev->device == 0x1636) ||
1512 (adev->pdev->device == 0x164c))
1513 adev->apu_flags |= AMD_APU_IS_RENOIR;
1514 else
1515 adev->apu_flags |= AMD_APU_IS_GREEN_SARDINE;
1516 break;
1517 case CHIP_VANGOGH:
1518 adev->apu_flags |= AMD_APU_IS_VANGOGH;
1519 break;
1520 case CHIP_YELLOW_CARP:
1521 break;
1522 case CHIP_CYAN_SKILLFISH:
1523 if ((adev->pdev->device == 0x13FE) ||
1524 (adev->pdev->device == 0x143F))
1525 adev->apu_flags |= AMD_APU_IS_CYAN_SKILLFISH2;
1526 break;
1527 default:
1528 break;
1529 }
1530
1531 return 0;
1532 }
1533
1534 /**
1535 * amdgpu_device_check_arguments - validate module params
1536 *
1537 * @adev: amdgpu_device pointer
1538 *
1539 * Validates certain module parameters and updates
1540 * the associated values used by the driver (all asics).
1541 */
amdgpu_device_check_arguments(struct amdgpu_device * adev)1542 static int amdgpu_device_check_arguments(struct amdgpu_device *adev)
1543 {
1544 int i;
1545
1546 if (amdgpu_sched_jobs < 4) {
1547 dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n",
1548 amdgpu_sched_jobs);
1549 amdgpu_sched_jobs = 4;
1550 } else if (!is_power_of_2(amdgpu_sched_jobs)) {
1551 dev_warn(adev->dev, "sched jobs (%d) must be a power of 2\n",
1552 amdgpu_sched_jobs);
1553 amdgpu_sched_jobs = roundup_pow_of_two(amdgpu_sched_jobs);
1554 }
1555
1556 if (amdgpu_gart_size != -1 && amdgpu_gart_size < 32) {
1557 /* gart size must be greater or equal to 32M */
1558 dev_warn(adev->dev, "gart size (%d) too small\n",
1559 amdgpu_gart_size);
1560 amdgpu_gart_size = -1;
1561 }
1562
1563 if (amdgpu_gtt_size != -1 && amdgpu_gtt_size < 32) {
1564 /* gtt size must be greater or equal to 32M */
1565 dev_warn(adev->dev, "gtt size (%d) too small\n",
1566 amdgpu_gtt_size);
1567 amdgpu_gtt_size = -1;
1568 }
1569
1570 /* valid range is between 4 and 9 inclusive */
1571 if (amdgpu_vm_fragment_size != -1 &&
1572 (amdgpu_vm_fragment_size > 9 || amdgpu_vm_fragment_size < 4)) {
1573 dev_warn(adev->dev, "valid range is between 4 and 9\n");
1574 amdgpu_vm_fragment_size = -1;
1575 }
1576
1577 if (amdgpu_sched_hw_submission < 2) {
1578 dev_warn(adev->dev, "sched hw submission jobs (%d) must be at least 2\n",
1579 amdgpu_sched_hw_submission);
1580 amdgpu_sched_hw_submission = 2;
1581 } else if (!is_power_of_2(amdgpu_sched_hw_submission)) {
1582 dev_warn(adev->dev, "sched hw submission jobs (%d) must be a power of 2\n",
1583 amdgpu_sched_hw_submission);
1584 amdgpu_sched_hw_submission = roundup_pow_of_two(amdgpu_sched_hw_submission);
1585 }
1586
1587 if (amdgpu_reset_method < -1 || amdgpu_reset_method > 4) {
1588 dev_warn(adev->dev, "invalid option for reset method, reverting to default\n");
1589 amdgpu_reset_method = -1;
1590 }
1591
1592 amdgpu_device_check_smu_prv_buffer_size(adev);
1593
1594 amdgpu_device_check_vm_size(adev);
1595
1596 amdgpu_device_check_block_size(adev);
1597
1598 adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type);
1599
1600 for (i = 0; i < MAX_XCP; i++) {
1601 switch (amdgpu_enforce_isolation) {
1602 case -1:
1603 case 0:
1604 default:
1605 /* disable */
1606 adev->enforce_isolation[i] = AMDGPU_ENFORCE_ISOLATION_DISABLE;
1607 break;
1608 case 1:
1609 /* enable */
1610 adev->enforce_isolation[i] =
1611 AMDGPU_ENFORCE_ISOLATION_ENABLE;
1612 break;
1613 case 2:
1614 /* enable legacy mode */
1615 adev->enforce_isolation[i] =
1616 AMDGPU_ENFORCE_ISOLATION_ENABLE_LEGACY;
1617 break;
1618 case 3:
1619 /* enable only process isolation without submitting cleaner shader */
1620 adev->enforce_isolation[i] =
1621 AMDGPU_ENFORCE_ISOLATION_NO_CLEANER_SHADER;
1622 break;
1623 }
1624 }
1625
1626 return 0;
1627 }
1628
1629 /**
1630 * amdgpu_switcheroo_set_state - set switcheroo state
1631 *
1632 * @pdev: pci dev pointer
1633 * @state: vga_switcheroo state
1634 *
1635 * Callback for the switcheroo driver. Suspends or resumes
1636 * the asics before or after it is powered up using ACPI methods.
1637 */
amdgpu_switcheroo_set_state(struct pci_dev * pdev,enum vga_switcheroo_state state)1638 static void amdgpu_switcheroo_set_state(struct pci_dev *pdev,
1639 enum vga_switcheroo_state state)
1640 {
1641 struct drm_device *dev = pci_get_drvdata(pdev);
1642 int r;
1643
1644 if (amdgpu_device_supports_px(drm_to_adev(dev)) &&
1645 state == VGA_SWITCHEROO_OFF)
1646 return;
1647
1648 if (state == VGA_SWITCHEROO_ON) {
1649 pr_info("switched on\n");
1650 /* don't suspend or resume card normally */
1651 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1652
1653 pci_set_power_state(pdev, PCI_D0);
1654 amdgpu_device_load_pci_state(pdev);
1655 r = pci_enable_device(pdev);
1656 if (r)
1657 dev_warn(&pdev->dev, "pci_enable_device failed (%d)\n",
1658 r);
1659 amdgpu_device_resume(dev, true);
1660
1661 dev->switch_power_state = DRM_SWITCH_POWER_ON;
1662 } else {
1663 dev_info(&pdev->dev, "switched off\n");
1664 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1665 amdgpu_device_prepare(dev);
1666 amdgpu_device_suspend(dev, true);
1667 amdgpu_device_cache_pci_state(pdev);
1668 /* Shut down the device */
1669 pci_disable_device(pdev);
1670 pci_set_power_state(pdev, PCI_D3cold);
1671 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
1672 }
1673 }
1674
1675 /**
1676 * amdgpu_switcheroo_can_switch - see if switcheroo state can change
1677 *
1678 * @pdev: pci dev pointer
1679 *
1680 * Callback for the switcheroo driver. Check of the switcheroo
1681 * state can be changed.
1682 * Returns true if the state can be changed, false if not.
1683 */
amdgpu_switcheroo_can_switch(struct pci_dev * pdev)1684 static bool amdgpu_switcheroo_can_switch(struct pci_dev *pdev)
1685 {
1686 struct drm_device *dev = pci_get_drvdata(pdev);
1687
1688 /*
1689 * FIXME: open_count is protected by drm_global_mutex but that would lead to
1690 * locking inversion with the driver load path. And the access here is
1691 * completely racy anyway. So don't bother with locking for now.
1692 */
1693 return atomic_read(&dev->open_count) == 0;
1694 }
1695
1696 static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = {
1697 .set_gpu_state = amdgpu_switcheroo_set_state,
1698 .reprobe = NULL,
1699 .can_switch = amdgpu_switcheroo_can_switch,
1700 };
1701
1702 /**
1703 * amdgpu_device_enable_virtual_display - enable virtual display feature
1704 *
1705 * @adev: amdgpu_device pointer
1706 *
1707 * Enabled the virtual display feature if the user has enabled it via
1708 * the module parameter virtual_display. This feature provides a virtual
1709 * display hardware on headless boards or in virtualized environments.
1710 * This function parses and validates the configuration string specified by
1711 * the user and configures the virtual display configuration (number of
1712 * virtual connectors, crtcs, etc.) specified.
1713 */
amdgpu_device_enable_virtual_display(struct amdgpu_device * adev)1714 static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev)
1715 {
1716 adev->enable_virtual_display = false;
1717
1718 if (amdgpu_virtual_display) {
1719 const char *pci_address_name = pci_name(adev->pdev);
1720 char *pciaddstr, *pciaddstr_tmp, *pciaddname_tmp, *pciaddname;
1721
1722 pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL);
1723 pciaddstr_tmp = pciaddstr;
1724 while ((pciaddname_tmp = strsep(&pciaddstr_tmp, ";"))) {
1725 pciaddname = strsep(&pciaddname_tmp, ",");
1726 if (!strcmp("all", pciaddname)
1727 || !strcmp(pci_address_name, pciaddname)) {
1728 long num_crtc;
1729 int res = -1;
1730
1731 adev->enable_virtual_display = true;
1732
1733 if (pciaddname_tmp)
1734 res = kstrtol(pciaddname_tmp, 10,
1735 &num_crtc);
1736
1737 if (!res) {
1738 if (num_crtc < 1)
1739 num_crtc = 1;
1740 if (num_crtc > 6)
1741 num_crtc = 6;
1742 adev->mode_info.num_crtc = num_crtc;
1743 } else {
1744 adev->mode_info.num_crtc = 1;
1745 }
1746 break;
1747 }
1748 }
1749
1750 dev_info(
1751 adev->dev,
1752 "virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n",
1753 amdgpu_virtual_display, pci_address_name,
1754 adev->enable_virtual_display, adev->mode_info.num_crtc);
1755
1756 kfree(pciaddstr);
1757 }
1758 }
1759
amdgpu_device_set_sriov_virtual_display(struct amdgpu_device * adev)1760 void amdgpu_device_set_sriov_virtual_display(struct amdgpu_device *adev)
1761 {
1762 if (amdgpu_sriov_vf(adev) && !adev->enable_virtual_display) {
1763 adev->mode_info.num_crtc = 1;
1764 adev->enable_virtual_display = true;
1765 dev_info(adev->dev, "virtual_display:%d, num_crtc:%d\n",
1766 adev->enable_virtual_display,
1767 adev->mode_info.num_crtc);
1768 }
1769 }
1770
1771 /**
1772 * amdgpu_device_parse_gpu_info_fw - parse gpu info firmware
1773 *
1774 * @adev: amdgpu_device pointer
1775 *
1776 * Parses the asic configuration parameters specified in the gpu info
1777 * firmware and makes them available to the driver for use in configuring
1778 * the asic.
1779 * Returns 0 on success, -EINVAL on failure.
1780 */
amdgpu_device_parse_gpu_info_fw(struct amdgpu_device * adev)1781 static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
1782 {
1783 const char *chip_name;
1784 int err;
1785 const struct gpu_info_firmware_header_v1_0 *hdr;
1786
1787 adev->firmware.gpu_info_fw = NULL;
1788
1789 switch (adev->asic_type) {
1790 default:
1791 return 0;
1792 case CHIP_VEGA10:
1793 chip_name = "vega10";
1794 break;
1795 case CHIP_VEGA12:
1796 chip_name = "vega12";
1797 break;
1798 case CHIP_RAVEN:
1799 if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1800 chip_name = "raven2";
1801 else if (adev->apu_flags & AMD_APU_IS_PICASSO)
1802 chip_name = "picasso";
1803 else
1804 chip_name = "raven";
1805 break;
1806 case CHIP_ARCTURUS:
1807 chip_name = "arcturus";
1808 break;
1809 case CHIP_NAVI12:
1810 if (adev->discovery.bin)
1811 return 0;
1812 chip_name = "navi12";
1813 break;
1814 case CHIP_CYAN_SKILLFISH:
1815 if (adev->discovery.bin)
1816 return 0;
1817 chip_name = "cyan_skillfish";
1818 break;
1819 }
1820
1821 err = amdgpu_ucode_request(adev, &adev->firmware.gpu_info_fw,
1822 AMDGPU_UCODE_OPTIONAL,
1823 "amdgpu/%s_gpu_info.bin", chip_name);
1824 if (err) {
1825 dev_err(adev->dev,
1826 "Failed to get gpu_info firmware \"%s_gpu_info.bin\"\n",
1827 chip_name);
1828 goto out;
1829 }
1830
1831 hdr = (const struct gpu_info_firmware_header_v1_0 *)adev->firmware.gpu_info_fw->data;
1832 amdgpu_ucode_print_gpu_info_hdr(&hdr->header);
1833
1834 switch (hdr->version_major) {
1835 case 1:
1836 {
1837 const struct gpu_info_firmware_v1_0 *gpu_info_fw =
1838 (const struct gpu_info_firmware_v1_0 *)(adev->firmware.gpu_info_fw->data +
1839 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1840
1841 /*
1842 * Should be dropped when DAL no longer needs it.
1843 */
1844 if (adev->asic_type == CHIP_NAVI12)
1845 goto parse_soc_bounding_box;
1846
1847 adev->gfx.config.max_shader_engines = le32_to_cpu(gpu_info_fw->gc_num_se);
1848 adev->gfx.config.max_cu_per_sh = le32_to_cpu(gpu_info_fw->gc_num_cu_per_sh);
1849 adev->gfx.config.max_sh_per_se = le32_to_cpu(gpu_info_fw->gc_num_sh_per_se);
1850 adev->gfx.config.max_backends_per_se = le32_to_cpu(gpu_info_fw->gc_num_rb_per_se);
1851 adev->gfx.config.max_texture_channel_caches =
1852 le32_to_cpu(gpu_info_fw->gc_num_tccs);
1853 adev->gfx.config.max_gprs = le32_to_cpu(gpu_info_fw->gc_num_gprs);
1854 adev->gfx.config.max_gs_threads = le32_to_cpu(gpu_info_fw->gc_num_max_gs_thds);
1855 adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gpu_info_fw->gc_gs_table_depth);
1856 adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gpu_info_fw->gc_gsprim_buff_depth);
1857 adev->gfx.config.double_offchip_lds_buf =
1858 le32_to_cpu(gpu_info_fw->gc_double_offchip_lds_buffer);
1859 adev->gfx.cu_info.wave_front_size = le32_to_cpu(gpu_info_fw->gc_wave_size);
1860 adev->gfx.cu_info.max_waves_per_simd =
1861 le32_to_cpu(gpu_info_fw->gc_max_waves_per_simd);
1862 adev->gfx.cu_info.max_scratch_slots_per_cu =
1863 le32_to_cpu(gpu_info_fw->gc_max_scratch_slots_per_cu);
1864 adev->gfx.cu_info.lds_size = le32_to_cpu(gpu_info_fw->gc_lds_size);
1865 if (hdr->version_minor >= 1) {
1866 const struct gpu_info_firmware_v1_1 *gpu_info_fw =
1867 (const struct gpu_info_firmware_v1_1 *)(adev->firmware.gpu_info_fw->data +
1868 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1869 adev->gfx.config.num_sc_per_sh =
1870 le32_to_cpu(gpu_info_fw->num_sc_per_sh);
1871 adev->gfx.config.num_packer_per_sc =
1872 le32_to_cpu(gpu_info_fw->num_packer_per_sc);
1873 }
1874
1875 parse_soc_bounding_box:
1876 /*
1877 * soc bounding box info is not integrated in disocovery table,
1878 * we always need to parse it from gpu info firmware if needed.
1879 */
1880 if (hdr->version_minor == 2) {
1881 const struct gpu_info_firmware_v1_2 *gpu_info_fw =
1882 (const struct gpu_info_firmware_v1_2 *)(adev->firmware.gpu_info_fw->data +
1883 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1884 adev->dm.soc_bounding_box = &gpu_info_fw->soc_bounding_box;
1885 }
1886 break;
1887 }
1888 default:
1889 dev_err(adev->dev,
1890 "Unsupported gpu_info table %d\n", hdr->header.ucode_version);
1891 err = -EINVAL;
1892 goto out;
1893 }
1894 out:
1895 return err;
1896 }
1897
amdgpu_uid_init(struct amdgpu_device * adev)1898 static void amdgpu_uid_init(struct amdgpu_device *adev)
1899 {
1900 /* Initialize the UID for the device */
1901 adev->uid_info = kzalloc_obj(struct amdgpu_uid);
1902 if (!adev->uid_info) {
1903 dev_warn(adev->dev, "Failed to allocate memory for UID\n");
1904 return;
1905 }
1906 adev->uid_info->adev = adev;
1907 }
1908
amdgpu_uid_fini(struct amdgpu_device * adev)1909 static void amdgpu_uid_fini(struct amdgpu_device *adev)
1910 {
1911 /* Free the UID memory */
1912 kfree(adev->uid_info);
1913 adev->uid_info = NULL;
1914 }
1915
1916 /**
1917 * amdgpu_device_ip_early_init - run early init for hardware IPs
1918 *
1919 * @adev: amdgpu_device pointer
1920 *
1921 * Early initialization pass for hardware IPs. The hardware IPs that make
1922 * up each asic are discovered each IP's early_init callback is run. This
1923 * is the first stage in initializing the asic.
1924 * Returns 0 on success, negative error code on failure.
1925 */
amdgpu_device_ip_early_init(struct amdgpu_device * adev)1926 static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
1927 {
1928 struct amdgpu_ip_block *ip_block;
1929 struct pci_dev *parent;
1930 bool total, skip_bios;
1931 uint32_t bios_flags;
1932 int i, r;
1933
1934 amdgpu_device_enable_virtual_display(adev);
1935
1936 if (amdgpu_sriov_vf(adev)) {
1937 r = amdgpu_virt_request_full_gpu(adev, true);
1938 if (r)
1939 return r;
1940
1941 r = amdgpu_virt_init_critical_region(adev);
1942 if (r)
1943 return r;
1944 }
1945
1946 switch (adev->asic_type) {
1947 #ifdef CONFIG_DRM_AMDGPU_SI
1948 case CHIP_VERDE:
1949 case CHIP_TAHITI:
1950 case CHIP_PITCAIRN:
1951 case CHIP_OLAND:
1952 case CHIP_HAINAN:
1953 adev->family = AMDGPU_FAMILY_SI;
1954 r = si_set_ip_blocks(adev);
1955 if (r)
1956 return r;
1957 break;
1958 #endif
1959 #ifdef CONFIG_DRM_AMDGPU_CIK
1960 case CHIP_BONAIRE:
1961 case CHIP_HAWAII:
1962 case CHIP_KAVERI:
1963 case CHIP_KABINI:
1964 case CHIP_MULLINS:
1965 if (adev->flags & AMD_IS_APU)
1966 adev->family = AMDGPU_FAMILY_KV;
1967 else
1968 adev->family = AMDGPU_FAMILY_CI;
1969
1970 r = cik_set_ip_blocks(adev);
1971 if (r)
1972 return r;
1973 break;
1974 #endif
1975 case CHIP_TOPAZ:
1976 case CHIP_TONGA:
1977 case CHIP_FIJI:
1978 case CHIP_POLARIS10:
1979 case CHIP_POLARIS11:
1980 case CHIP_POLARIS12:
1981 case CHIP_VEGAM:
1982 case CHIP_CARRIZO:
1983 case CHIP_STONEY:
1984 if (adev->flags & AMD_IS_APU)
1985 adev->family = AMDGPU_FAMILY_CZ;
1986 else
1987 adev->family = AMDGPU_FAMILY_VI;
1988
1989 r = vi_set_ip_blocks(adev);
1990 if (r)
1991 return r;
1992 break;
1993 default:
1994 r = amdgpu_discovery_set_ip_blocks(adev);
1995 if (r) {
1996 adev->num_ip_blocks = 0;
1997 return r;
1998 }
1999 break;
2000 }
2001
2002 /* Check for IP version 9.4.3 with A0 hardware */
2003 if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) &&
2004 !amdgpu_device_get_rev_id(adev)) {
2005 dev_err(adev->dev, "Unsupported A0 hardware\n");
2006 return -ENODEV; /* device unsupported - no device error */
2007 }
2008
2009 if (amdgpu_has_atpx() &&
2010 (amdgpu_is_atpx_hybrid() ||
2011 amdgpu_has_atpx_dgpu_power_cntl()) &&
2012 ((adev->flags & AMD_IS_APU) == 0) &&
2013 !dev_is_removable(&adev->pdev->dev))
2014 adev->flags |= AMD_IS_PX;
2015
2016 if (!(adev->flags & AMD_IS_APU)) {
2017 parent = pcie_find_root_port(adev->pdev);
2018 adev->has_pr3 = parent ? pci_pr3_present(parent) : false;
2019 }
2020
2021 adev->pm.pp_feature = amdgpu_pp_feature_mask;
2022 if (amdgpu_sriov_vf(adev) || sched_policy == KFD_SCHED_POLICY_NO_HWS)
2023 adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
2024 if (amdgpu_sriov_vf(adev) && adev->asic_type == CHIP_SIENNA_CICHLID)
2025 adev->pm.pp_feature &= ~PP_OVERDRIVE_MASK;
2026 if (!amdgpu_device_pcie_dynamic_switching_supported(adev))
2027 adev->pm.pp_feature &= ~PP_PCIE_DPM_MASK;
2028
2029 adev->virt.is_xgmi_node_migrate_enabled = false;
2030 if (amdgpu_sriov_vf(adev)) {
2031 adev->virt.is_xgmi_node_migrate_enabled =
2032 amdgpu_ip_version((adev), GC_HWIP, 0) == IP_VERSION(9, 4, 4);
2033 }
2034
2035 total = true;
2036 for (i = 0; i < adev->num_ip_blocks; i++) {
2037 ip_block = &adev->ip_blocks[i];
2038
2039 if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
2040 dev_warn(adev->dev, "disabled ip block: %d <%s>\n", i,
2041 adev->ip_blocks[i].version->funcs->name);
2042 adev->ip_blocks[i].status.valid = false;
2043 } else if (ip_block->version->funcs->early_init) {
2044 r = ip_block->version->funcs->early_init(ip_block);
2045 if (r == -ENOENT) {
2046 adev->ip_blocks[i].status.valid = false;
2047 } else if (r) {
2048 dev_err(adev->dev,
2049 "early_init of IP block <%s> failed %d\n",
2050 adev->ip_blocks[i].version->funcs->name,
2051 r);
2052 total = false;
2053 } else {
2054 adev->ip_blocks[i].status.valid = true;
2055 }
2056 } else {
2057 adev->ip_blocks[i].status.valid = true;
2058 }
2059 /* get the vbios after the asic_funcs are set up */
2060 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) {
2061 r = amdgpu_device_parse_gpu_info_fw(adev);
2062 if (r)
2063 return r;
2064
2065 bios_flags = amdgpu_device_get_vbios_flags(adev);
2066 skip_bios = !!(bios_flags & AMDGPU_VBIOS_SKIP);
2067 /* Read BIOS */
2068 if (!skip_bios) {
2069 bool optional =
2070 !!(bios_flags & AMDGPU_VBIOS_OPTIONAL);
2071 if (!amdgpu_get_bios(adev) && !optional)
2072 return -EINVAL;
2073
2074 if (optional && !adev->bios)
2075 dev_info(
2076 adev->dev,
2077 "VBIOS image optional, proceeding without VBIOS image");
2078
2079 if (adev->bios) {
2080 r = amdgpu_atombios_init(adev);
2081 if (r) {
2082 dev_err(adev->dev,
2083 "amdgpu_atombios_init failed\n");
2084 amdgpu_vf_error_put(
2085 adev,
2086 AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL,
2087 0, 0);
2088 return r;
2089 }
2090 }
2091 }
2092
2093 /*get pf2vf msg info at it's earliest time*/
2094 if (amdgpu_sriov_vf(adev))
2095 amdgpu_virt_init_data_exchange(adev);
2096
2097 }
2098 }
2099 if (!total)
2100 return -ENODEV;
2101
2102 if (adev->gmc.xgmi.supported)
2103 amdgpu_xgmi_early_init(adev);
2104
2105 if (amdgpu_is_multi_aid(adev))
2106 amdgpu_uid_init(adev);
2107 ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GFX);
2108 if (ip_block->status.valid != false)
2109 amdgpu_amdkfd_device_probe(adev);
2110
2111 adev->cg_flags &= amdgpu_cg_mask;
2112 adev->pg_flags &= amdgpu_pg_mask;
2113
2114 return 0;
2115 }
2116
amdgpu_device_ip_hw_init_phase1(struct amdgpu_device * adev)2117 static int amdgpu_device_ip_hw_init_phase1(struct amdgpu_device *adev)
2118 {
2119 int i, r;
2120
2121 for (i = 0; i < adev->num_ip_blocks; i++) {
2122 if (!adev->ip_blocks[i].status.sw)
2123 continue;
2124 if (adev->ip_blocks[i].status.hw)
2125 continue;
2126 if (!amdgpu_ip_member_of_hwini(
2127 adev, adev->ip_blocks[i].version->type))
2128 continue;
2129 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
2130 (amdgpu_sriov_vf(adev) && (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)) ||
2131 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) {
2132 r = adev->ip_blocks[i].version->funcs->hw_init(&adev->ip_blocks[i]);
2133 if (r) {
2134 dev_err(adev->dev,
2135 "hw_init of IP block <%s> failed %d\n",
2136 adev->ip_blocks[i].version->funcs->name,
2137 r);
2138 return r;
2139 }
2140 adev->ip_blocks[i].status.hw = true;
2141 }
2142 }
2143
2144 return 0;
2145 }
2146
amdgpu_device_ip_hw_init_phase2(struct amdgpu_device * adev)2147 static int amdgpu_device_ip_hw_init_phase2(struct amdgpu_device *adev)
2148 {
2149 int i, r;
2150
2151 for (i = 0; i < adev->num_ip_blocks; i++) {
2152 if (!adev->ip_blocks[i].status.sw)
2153 continue;
2154 if (adev->ip_blocks[i].status.hw)
2155 continue;
2156 if (!amdgpu_ip_member_of_hwini(
2157 adev, adev->ip_blocks[i].version->type))
2158 continue;
2159 r = adev->ip_blocks[i].version->funcs->hw_init(&adev->ip_blocks[i]);
2160 if (r) {
2161 dev_err(adev->dev,
2162 "hw_init of IP block <%s> failed %d\n",
2163 adev->ip_blocks[i].version->funcs->name, r);
2164 return r;
2165 }
2166 adev->ip_blocks[i].status.hw = true;
2167 }
2168
2169 return 0;
2170 }
2171
amdgpu_device_fw_loading(struct amdgpu_device * adev)2172 static int amdgpu_device_fw_loading(struct amdgpu_device *adev)
2173 {
2174 int r = 0;
2175 int i;
2176 uint32_t smu_version;
2177
2178 if (adev->asic_type >= CHIP_VEGA10) {
2179 for (i = 0; i < adev->num_ip_blocks; i++) {
2180 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_PSP)
2181 continue;
2182
2183 if (!amdgpu_ip_member_of_hwini(adev,
2184 AMD_IP_BLOCK_TYPE_PSP))
2185 break;
2186
2187 if (!adev->ip_blocks[i].status.sw)
2188 continue;
2189
2190 /* no need to do the fw loading again if already done*/
2191 if (adev->ip_blocks[i].status.hw == true)
2192 break;
2193
2194 if (amdgpu_in_reset(adev) || adev->in_suspend) {
2195 r = amdgpu_ip_block_resume(&adev->ip_blocks[i]);
2196 if (r)
2197 return r;
2198 } else {
2199 r = adev->ip_blocks[i].version->funcs->hw_init(&adev->ip_blocks[i]);
2200 if (r) {
2201 dev_err(adev->dev,
2202 "hw_init of IP block <%s> failed %d\n",
2203 adev->ip_blocks[i]
2204 .version->funcs->name,
2205 r);
2206 return r;
2207 }
2208 adev->ip_blocks[i].status.hw = true;
2209 }
2210 break;
2211 }
2212 }
2213
2214 if (!amdgpu_sriov_vf(adev) || adev->asic_type == CHIP_TONGA)
2215 r = amdgpu_pm_load_smu_firmware(adev, &smu_version);
2216
2217 return r;
2218 }
2219
amdgpu_device_init_schedulers(struct amdgpu_device * adev)2220 static int amdgpu_device_init_schedulers(struct amdgpu_device *adev)
2221 {
2222 struct drm_sched_init_args args = {
2223 .ops = &amdgpu_sched_ops,
2224 .num_rqs = DRM_SCHED_PRIORITY_COUNT,
2225 .timeout_wq = adev->reset_domain->wq,
2226 .dev = adev->dev,
2227 };
2228 long timeout;
2229 int r, i;
2230
2231 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
2232 struct amdgpu_ring *ring = adev->rings[i];
2233
2234 /* No need to setup the GPU scheduler for rings that don't need it */
2235 if (!ring || ring->no_scheduler)
2236 continue;
2237
2238 switch (ring->funcs->type) {
2239 case AMDGPU_RING_TYPE_GFX:
2240 timeout = adev->gfx_timeout;
2241 break;
2242 case AMDGPU_RING_TYPE_COMPUTE:
2243 timeout = adev->compute_timeout;
2244 break;
2245 case AMDGPU_RING_TYPE_SDMA:
2246 timeout = adev->sdma_timeout;
2247 break;
2248 default:
2249 timeout = adev->video_timeout;
2250 break;
2251 }
2252
2253 args.timeout = timeout;
2254 args.credit_limit = ring->num_hw_submission;
2255 args.score = ring->sched_score;
2256 args.name = ring->name;
2257
2258 r = drm_sched_init(&ring->sched, &args);
2259 if (r) {
2260 dev_err(adev->dev,
2261 "Failed to create scheduler on ring %s.\n",
2262 ring->name);
2263 return r;
2264 }
2265 r = amdgpu_uvd_entity_init(adev, ring);
2266 if (r) {
2267 dev_err(adev->dev,
2268 "Failed to create UVD scheduling entity on ring %s.\n",
2269 ring->name);
2270 return r;
2271 }
2272 r = amdgpu_vce_entity_init(adev, ring);
2273 if (r) {
2274 dev_err(adev->dev,
2275 "Failed to create VCE scheduling entity on ring %s.\n",
2276 ring->name);
2277 return r;
2278 }
2279 }
2280
2281 if (adev->xcp_mgr)
2282 amdgpu_xcp_update_partition_sched_list(adev);
2283
2284 return 0;
2285 }
2286
2287
2288 /**
2289 * amdgpu_device_ip_init - run init for hardware IPs
2290 *
2291 * @adev: amdgpu_device pointer
2292 *
2293 * Main initialization pass for hardware IPs. The list of all the hardware
2294 * IPs that make up the asic is walked and the sw_init and hw_init callbacks
2295 * are run. sw_init initializes the software state associated with each IP
2296 * and hw_init initializes the hardware associated with each IP.
2297 * Returns 0 on success, negative error code on failure.
2298 */
amdgpu_device_ip_init(struct amdgpu_device * adev)2299 static int amdgpu_device_ip_init(struct amdgpu_device *adev)
2300 {
2301 bool init_badpage;
2302 int i, r;
2303
2304 r = amdgpu_ras_init(adev);
2305 if (r)
2306 return r;
2307
2308 for (i = 0; i < adev->num_ip_blocks; i++) {
2309 if (!adev->ip_blocks[i].status.valid)
2310 continue;
2311 if (adev->ip_blocks[i].version->funcs->sw_init) {
2312 r = adev->ip_blocks[i].version->funcs->sw_init(&adev->ip_blocks[i]);
2313 if (r) {
2314 dev_err(adev->dev,
2315 "sw_init of IP block <%s> failed %d\n",
2316 adev->ip_blocks[i].version->funcs->name,
2317 r);
2318 goto init_failed;
2319 }
2320 }
2321 adev->ip_blocks[i].status.sw = true;
2322
2323 if (!amdgpu_ip_member_of_hwini(
2324 adev, adev->ip_blocks[i].version->type))
2325 continue;
2326
2327 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) {
2328 /* need to do common hw init early so everything is set up for gmc */
2329 r = adev->ip_blocks[i].version->funcs->hw_init(&adev->ip_blocks[i]);
2330 if (r) {
2331 dev_err(adev->dev, "hw_init %d failed %d\n", i,
2332 r);
2333 goto init_failed;
2334 }
2335 adev->ip_blocks[i].status.hw = true;
2336 } else if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
2337 /* need to do gmc hw init early so we can allocate gpu mem */
2338 /* Try to reserve bad pages early */
2339 if (amdgpu_sriov_vf(adev))
2340 amdgpu_virt_exchange_data(adev);
2341
2342 r = amdgpu_device_mem_scratch_init(adev);
2343 if (r) {
2344 dev_err(adev->dev,
2345 "amdgpu_mem_scratch_init failed %d\n",
2346 r);
2347 goto init_failed;
2348 }
2349 r = adev->ip_blocks[i].version->funcs->hw_init(&adev->ip_blocks[i]);
2350 if (r) {
2351 dev_err(adev->dev, "hw_init %d failed %d\n", i,
2352 r);
2353 goto init_failed;
2354 }
2355 r = amdgpu_device_wb_init(adev);
2356 if (r) {
2357 dev_err(adev->dev,
2358 "amdgpu_device_wb_init failed %d\n", r);
2359 goto init_failed;
2360 }
2361 adev->ip_blocks[i].status.hw = true;
2362
2363 /* right after GMC hw init, we create CSA */
2364 if (adev->gfx.mcbp) {
2365 r = amdgpu_allocate_static_csa(adev, &adev->virt.csa_obj,
2366 AMDGPU_GEM_DOMAIN_VRAM |
2367 AMDGPU_GEM_DOMAIN_GTT,
2368 AMDGPU_CSA_SIZE);
2369 if (r) {
2370 dev_err(adev->dev,
2371 "allocate CSA failed %d\n", r);
2372 goto init_failed;
2373 }
2374 }
2375
2376 r = amdgpu_seq64_init(adev);
2377 if (r) {
2378 dev_err(adev->dev, "allocate seq64 failed %d\n",
2379 r);
2380 goto init_failed;
2381 }
2382 }
2383 }
2384
2385 if (amdgpu_sriov_vf(adev))
2386 amdgpu_virt_init_data_exchange(adev);
2387
2388 r = amdgpu_ib_pool_init(adev);
2389 if (r) {
2390 dev_err(adev->dev, "IB initialization failed (%d).\n", r);
2391 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_IB_INIT_FAIL, 0, r);
2392 goto init_failed;
2393 }
2394
2395 r = amdgpu_ucode_create_bo(adev); /* create ucode bo when sw_init complete*/
2396 if (r)
2397 goto init_failed;
2398
2399 r = amdgpu_device_ip_hw_init_phase1(adev);
2400 if (r)
2401 goto init_failed;
2402
2403 r = amdgpu_device_fw_loading(adev);
2404 if (r)
2405 goto init_failed;
2406
2407 r = amdgpu_device_ip_hw_init_phase2(adev);
2408 if (r)
2409 goto init_failed;
2410
2411 /*
2412 * retired pages will be loaded from eeprom and reserved here,
2413 * it should be called after amdgpu_device_ip_hw_init_phase2 since
2414 * for some ASICs the RAS EEPROM code relies on SMU fully functioning
2415 * for I2C communication which only true at this point.
2416 *
2417 * amdgpu_ras_recovery_init may fail, but the upper only cares the
2418 * failure from bad gpu situation and stop amdgpu init process
2419 * accordingly. For other failed cases, it will still release all
2420 * the resource and print error message, rather than returning one
2421 * negative value to upper level.
2422 *
2423 * Note: theoretically, this should be called before all vram allocations
2424 * to protect retired page from abusing
2425 */
2426 init_badpage = (adev->init_lvl->level != AMDGPU_INIT_LEVEL_MINIMAL_XGMI);
2427 r = amdgpu_ras_recovery_init(adev, init_badpage);
2428 if (r)
2429 goto init_failed;
2430
2431 /**
2432 * In case of XGMI grab extra reference for reset domain for this device
2433 */
2434 if (adev->gmc.xgmi.num_physical_nodes > 1) {
2435 if (amdgpu_xgmi_add_device(adev) == 0) {
2436 if (!amdgpu_sriov_vf(adev)) {
2437 struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
2438
2439 if (WARN_ON(!hive)) {
2440 r = -ENOENT;
2441 goto init_failed;
2442 }
2443
2444 if (!hive->reset_domain ||
2445 !amdgpu_reset_get_reset_domain(hive->reset_domain)) {
2446 r = -ENOENT;
2447 amdgpu_put_xgmi_hive(hive);
2448 goto init_failed;
2449 }
2450
2451 /* Drop the early temporary reset domain we created for device */
2452 amdgpu_reset_put_reset_domain(adev->reset_domain);
2453 adev->reset_domain = hive->reset_domain;
2454 amdgpu_put_xgmi_hive(hive);
2455 }
2456 }
2457 }
2458
2459 r = amdgpu_device_init_schedulers(adev);
2460 if (r)
2461 goto init_failed;
2462
2463 amdgpu_ttm_set_buffer_funcs_status(adev, true);
2464
2465 /* Don't init kfd if whole hive need to be reset during init */
2466 if (adev->init_lvl->level != AMDGPU_INIT_LEVEL_MINIMAL_XGMI) {
2467 amdgpu_amdkfd_device_init(adev);
2468 }
2469
2470 amdgpu_fru_get_product_info(adev);
2471
2472 r = amdgpu_cper_init(adev);
2473
2474 init_failed:
2475
2476 return r;
2477 }
2478
2479 /**
2480 * amdgpu_device_fill_reset_magic - writes reset magic to gart pointer
2481 *
2482 * @adev: amdgpu_device pointer
2483 *
2484 * Writes a reset magic value to the gart pointer in VRAM. The driver calls
2485 * this function before a GPU reset. If the value is retained after a
2486 * GPU reset, VRAM has not been lost. Some GPU resets may destroy VRAM contents.
2487 */
amdgpu_device_fill_reset_magic(struct amdgpu_device * adev)2488 static void amdgpu_device_fill_reset_magic(struct amdgpu_device *adev)
2489 {
2490 memcpy(adev->reset_magic, adev->gart.ptr, AMDGPU_RESET_MAGIC_NUM);
2491 }
2492
2493 /**
2494 * amdgpu_device_check_vram_lost - check if vram is valid
2495 *
2496 * @adev: amdgpu_device pointer
2497 *
2498 * Checks the reset magic value written to the gart pointer in VRAM.
2499 * The driver calls this after a GPU reset to see if the contents of
2500 * VRAM is lost or now.
2501 * returns true if vram is lost, false if not.
2502 */
amdgpu_device_check_vram_lost(struct amdgpu_device * adev)2503 static bool amdgpu_device_check_vram_lost(struct amdgpu_device *adev)
2504 {
2505 if (memcmp(adev->gart.ptr, adev->reset_magic,
2506 AMDGPU_RESET_MAGIC_NUM))
2507 return true;
2508
2509 if (!amdgpu_in_reset(adev))
2510 return false;
2511
2512 /*
2513 * For all ASICs with baco/mode1 reset, the VRAM is
2514 * always assumed to be lost.
2515 */
2516 switch (amdgpu_asic_reset_method(adev)) {
2517 case AMD_RESET_METHOD_LEGACY:
2518 case AMD_RESET_METHOD_LINK:
2519 case AMD_RESET_METHOD_BACO:
2520 case AMD_RESET_METHOD_MODE1:
2521 return true;
2522 default:
2523 return false;
2524 }
2525 }
2526
2527 /**
2528 * amdgpu_device_set_cg_state - set clockgating for amdgpu device
2529 *
2530 * @adev: amdgpu_device pointer
2531 * @state: clockgating state (gate or ungate)
2532 *
2533 * The list of all the hardware IPs that make up the asic is walked and the
2534 * set_clockgating_state callbacks are run.
2535 * Late initialization pass enabling clockgating for hardware IPs.
2536 * Fini or suspend, pass disabling clockgating for hardware IPs.
2537 * Returns 0 on success, negative error code on failure.
2538 */
2539
amdgpu_device_set_cg_state(struct amdgpu_device * adev,enum amd_clockgating_state state)2540 int amdgpu_device_set_cg_state(struct amdgpu_device *adev,
2541 enum amd_clockgating_state state)
2542 {
2543 int i, j, r;
2544
2545 if (amdgpu_emu_mode == 1)
2546 return 0;
2547
2548 for (j = 0; j < adev->num_ip_blocks; j++) {
2549 i = state == AMD_CG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
2550 if (!adev->ip_blocks[i].status.late_initialized)
2551 continue;
2552 if (!adev->ip_blocks[i].version)
2553 continue;
2554 /* skip CG for GFX, SDMA on S0ix */
2555 if (adev->in_s0ix &&
2556 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX ||
2557 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SDMA))
2558 continue;
2559 /* skip CG for VCE/UVD, it's handled specially */
2560 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
2561 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
2562 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
2563 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
2564 adev->ip_blocks[i].version->funcs->set_clockgating_state) {
2565 /* enable clockgating to save power */
2566 r = adev->ip_blocks[i].version->funcs->set_clockgating_state(&adev->ip_blocks[i],
2567 state);
2568 if (r) {
2569 dev_err(adev->dev,
2570 "set_clockgating_state(gate) of IP block <%s> failed %d\n",
2571 adev->ip_blocks[i].version->funcs->name,
2572 r);
2573 return r;
2574 }
2575 }
2576 }
2577
2578 return 0;
2579 }
2580
amdgpu_device_set_pg_state(struct amdgpu_device * adev,enum amd_powergating_state state)2581 int amdgpu_device_set_pg_state(struct amdgpu_device *adev,
2582 enum amd_powergating_state state)
2583 {
2584 int i, j, r;
2585
2586 if (amdgpu_emu_mode == 1)
2587 return 0;
2588
2589 for (j = 0; j < adev->num_ip_blocks; j++) {
2590 i = state == AMD_PG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
2591 if (!adev->ip_blocks[i].status.late_initialized)
2592 continue;
2593 if (!adev->ip_blocks[i].version)
2594 continue;
2595 /* skip PG for GFX, SDMA on S0ix */
2596 if (adev->in_s0ix &&
2597 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX ||
2598 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SDMA))
2599 continue;
2600 /* skip CG for VCE/UVD, it's handled specially */
2601 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
2602 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
2603 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
2604 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
2605 adev->ip_blocks[i].version->funcs->set_powergating_state) {
2606 /* enable powergating to save power */
2607 r = adev->ip_blocks[i].version->funcs->set_powergating_state(&adev->ip_blocks[i],
2608 state);
2609 if (r) {
2610 dev_err(adev->dev,
2611 "set_powergating_state(gate) of IP block <%s> failed %d\n",
2612 adev->ip_blocks[i].version->funcs->name,
2613 r);
2614 return r;
2615 }
2616 }
2617 }
2618 return 0;
2619 }
2620
amdgpu_device_enable_mgpu_fan_boost(void)2621 static int amdgpu_device_enable_mgpu_fan_boost(void)
2622 {
2623 struct amdgpu_gpu_instance *gpu_ins;
2624 struct amdgpu_device *adev;
2625 int i, ret = 0;
2626
2627 mutex_lock(&mgpu_info.mutex);
2628
2629 /*
2630 * MGPU fan boost feature should be enabled
2631 * only when there are two or more dGPUs in
2632 * the system
2633 */
2634 if (mgpu_info.num_dgpu < 2)
2635 goto out;
2636
2637 for (i = 0; i < mgpu_info.num_dgpu; i++) {
2638 gpu_ins = &(mgpu_info.gpu_ins[i]);
2639 adev = gpu_ins->adev;
2640 if (!(adev->flags & AMD_IS_APU || amdgpu_sriov_multi_vf_mode(adev)) &&
2641 !gpu_ins->mgpu_fan_enabled) {
2642 ret = amdgpu_dpm_enable_mgpu_fan_boost(adev);
2643 if (ret)
2644 break;
2645
2646 gpu_ins->mgpu_fan_enabled = 1;
2647 }
2648 }
2649
2650 out:
2651 mutex_unlock(&mgpu_info.mutex);
2652
2653 return ret;
2654 }
2655
2656 /**
2657 * amdgpu_device_ip_late_init - run late init for hardware IPs
2658 *
2659 * @adev: amdgpu_device pointer
2660 *
2661 * Late initialization pass for hardware IPs. The list of all the hardware
2662 * IPs that make up the asic is walked and the late_init callbacks are run.
2663 * late_init covers any special initialization that an IP requires
2664 * after all of the have been initialized or something that needs to happen
2665 * late in the init process.
2666 * Returns 0 on success, negative error code on failure.
2667 */
amdgpu_device_ip_late_init(struct amdgpu_device * adev)2668 static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
2669 {
2670 struct amdgpu_gpu_instance *gpu_instance;
2671 int i = 0, r;
2672
2673 for (i = 0; i < adev->num_ip_blocks; i++) {
2674 if (!adev->ip_blocks[i].status.hw)
2675 continue;
2676 if (adev->ip_blocks[i].version->funcs->late_init) {
2677 r = adev->ip_blocks[i].version->funcs->late_init(&adev->ip_blocks[i]);
2678 if (r) {
2679 dev_err(adev->dev,
2680 "late_init of IP block <%s> failed %d\n",
2681 adev->ip_blocks[i].version->funcs->name,
2682 r);
2683 return r;
2684 }
2685 }
2686 adev->ip_blocks[i].status.late_initialized = true;
2687 }
2688
2689 r = amdgpu_ras_late_init(adev);
2690 if (r) {
2691 dev_err(adev->dev, "amdgpu_ras_late_init failed %d", r);
2692 return r;
2693 }
2694
2695 if (!amdgpu_reset_in_recovery(adev))
2696 amdgpu_ras_set_error_query_ready(adev, true);
2697
2698 amdgpu_device_set_cg_state(adev, AMD_CG_STATE_GATE);
2699 amdgpu_device_set_pg_state(adev, AMD_PG_STATE_GATE);
2700
2701 amdgpu_device_fill_reset_magic(adev);
2702
2703 r = amdgpu_device_enable_mgpu_fan_boost();
2704 if (r)
2705 dev_err(adev->dev, "enable mgpu fan boost failed (%d).\n", r);
2706
2707 /* For passthrough configuration on arcturus and aldebaran, enable special handling SBR */
2708 if (amdgpu_passthrough(adev) &&
2709 ((adev->asic_type == CHIP_ARCTURUS && adev->gmc.xgmi.num_physical_nodes > 1) ||
2710 adev->asic_type == CHIP_ALDEBARAN))
2711 amdgpu_dpm_handle_passthrough_sbr(adev, true);
2712
2713 if (adev->gmc.xgmi.num_physical_nodes > 1) {
2714 mutex_lock(&mgpu_info.mutex);
2715
2716 /*
2717 * Reset device p-state to low as this was booted with high.
2718 *
2719 * This should be performed only after all devices from the same
2720 * hive get initialized.
2721 *
2722 * However, it's unknown how many device in the hive in advance.
2723 * As this is counted one by one during devices initializations.
2724 *
2725 * So, we wait for all XGMI interlinked devices initialized.
2726 * This may bring some delays as those devices may come from
2727 * different hives. But that should be OK.
2728 */
2729 if (mgpu_info.num_dgpu == adev->gmc.xgmi.num_physical_nodes) {
2730 for (i = 0; i < mgpu_info.num_gpu; i++) {
2731 gpu_instance = &(mgpu_info.gpu_ins[i]);
2732 if (gpu_instance->adev->flags & AMD_IS_APU)
2733 continue;
2734
2735 r = amdgpu_xgmi_set_pstate(gpu_instance->adev,
2736 AMDGPU_XGMI_PSTATE_MIN);
2737 if (r) {
2738 dev_err(adev->dev,
2739 "pstate setting failed (%d).\n",
2740 r);
2741 break;
2742 }
2743 }
2744 }
2745
2746 mutex_unlock(&mgpu_info.mutex);
2747 }
2748
2749 return 0;
2750 }
2751
amdgpu_ip_block_hw_fini(struct amdgpu_ip_block * ip_block)2752 static void amdgpu_ip_block_hw_fini(struct amdgpu_ip_block *ip_block)
2753 {
2754 struct amdgpu_device *adev = ip_block->adev;
2755 int r;
2756
2757 if (!ip_block->version->funcs->hw_fini) {
2758 dev_err(adev->dev, "hw_fini of IP block <%s> not defined\n",
2759 ip_block->version->funcs->name);
2760 } else {
2761 r = ip_block->version->funcs->hw_fini(ip_block);
2762 /* XXX handle errors */
2763 if (r) {
2764 dev_dbg(adev->dev,
2765 "hw_fini of IP block <%s> failed %d\n",
2766 ip_block->version->funcs->name, r);
2767 }
2768 }
2769
2770 ip_block->status.hw = false;
2771 }
2772
2773 /**
2774 * amdgpu_device_smu_fini_early - smu hw_fini wrapper
2775 *
2776 * @adev: amdgpu_device pointer
2777 *
2778 * For ASICs need to disable SMC first
2779 */
amdgpu_device_smu_fini_early(struct amdgpu_device * adev)2780 static void amdgpu_device_smu_fini_early(struct amdgpu_device *adev)
2781 {
2782 int i;
2783
2784 if (amdgpu_ip_version(adev, GC_HWIP, 0) > IP_VERSION(9, 0, 0))
2785 return;
2786
2787 for (i = 0; i < adev->num_ip_blocks; i++) {
2788 if (!adev->ip_blocks[i].status.hw)
2789 continue;
2790 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
2791 amdgpu_ip_block_hw_fini(&adev->ip_blocks[i]);
2792 break;
2793 }
2794 }
2795 }
2796
amdgpu_device_ip_fini_early(struct amdgpu_device * adev)2797 static int amdgpu_device_ip_fini_early(struct amdgpu_device *adev)
2798 {
2799 int i, r;
2800
2801 for (i = 0; i < adev->num_ip_blocks; i++) {
2802 if (!adev->ip_blocks[i].version)
2803 continue;
2804 if (!adev->ip_blocks[i].version->funcs->early_fini)
2805 continue;
2806
2807 r = adev->ip_blocks[i].version->funcs->early_fini(&adev->ip_blocks[i]);
2808 if (r) {
2809 dev_dbg(adev->dev,
2810 "early_fini of IP block <%s> failed %d\n",
2811 adev->ip_blocks[i].version->funcs->name, r);
2812 }
2813 }
2814
2815 amdgpu_amdkfd_suspend(adev, true);
2816 amdgpu_amdkfd_teardown_processes(adev);
2817 amdgpu_userq_suspend(adev);
2818
2819 /* Workaround for ASICs need to disable SMC first */
2820 amdgpu_device_smu_fini_early(adev);
2821
2822 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2823 if (!adev->ip_blocks[i].status.hw)
2824 continue;
2825
2826 amdgpu_ip_block_hw_fini(&adev->ip_blocks[i]);
2827 }
2828
2829 if (amdgpu_sriov_vf(adev)) {
2830 if (amdgpu_virt_release_full_gpu(adev, false))
2831 dev_err(adev->dev,
2832 "failed to release exclusive mode on fini\n");
2833 }
2834
2835 /*
2836 * Driver reload on the APU can fail due to firmware validation because
2837 * the PSP is always running, as it is shared across the whole SoC.
2838 * This same issue does not occur on dGPU because it has a mechanism
2839 * that checks whether the PSP is running. A solution for those issues
2840 * in the APU is to trigger a GPU reset, but this should be done during
2841 * the unload phase to avoid adding boot latency and screen flicker.
2842 */
2843 if ((adev->flags & AMD_IS_APU) && !adev->gmc.is_app_apu) {
2844 r = amdgpu_asic_reset(adev);
2845 if (r)
2846 dev_err(adev->dev, "asic reset on %s failed\n", __func__);
2847 }
2848
2849 return 0;
2850 }
2851
2852 /**
2853 * amdgpu_device_ip_fini - run fini for hardware IPs
2854 *
2855 * @adev: amdgpu_device pointer
2856 *
2857 * Main teardown pass for hardware IPs. The list of all the hardware
2858 * IPs that make up the asic is walked and the hw_fini and sw_fini callbacks
2859 * are run. hw_fini tears down the hardware associated with each IP
2860 * and sw_fini tears down any software state associated with each IP.
2861 * Returns 0 on success, negative error code on failure.
2862 */
amdgpu_device_ip_fini(struct amdgpu_device * adev)2863 static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
2864 {
2865 int i, r;
2866
2867 amdgpu_cper_fini(adev);
2868
2869 if (amdgpu_sriov_vf(adev) && adev->virt.ras_init_done)
2870 amdgpu_virt_release_ras_err_handler_data(adev);
2871
2872 if (adev->gmc.xgmi.num_physical_nodes > 1)
2873 amdgpu_xgmi_remove_device(adev);
2874
2875 amdgpu_amdkfd_device_fini_sw(adev);
2876
2877 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2878 if (!adev->ip_blocks[i].status.sw)
2879 continue;
2880
2881 if (!adev->ip_blocks[i].version)
2882 continue;
2883 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
2884 amdgpu_ucode_free_bo(adev);
2885 amdgpu_free_static_csa(&adev->virt.csa_obj);
2886 amdgpu_device_wb_fini(adev);
2887 amdgpu_device_mem_scratch_fini(adev);
2888 amdgpu_ib_pool_fini(adev);
2889 amdgpu_seq64_fini(adev);
2890 amdgpu_doorbell_fini(adev);
2891 }
2892 if (adev->ip_blocks[i].version->funcs->sw_fini) {
2893 r = adev->ip_blocks[i].version->funcs->sw_fini(&adev->ip_blocks[i]);
2894 /* XXX handle errors */
2895 if (r) {
2896 dev_dbg(adev->dev,
2897 "sw_fini of IP block <%s> failed %d\n",
2898 adev->ip_blocks[i].version->funcs->name,
2899 r);
2900 }
2901 }
2902 adev->ip_blocks[i].status.sw = false;
2903 adev->ip_blocks[i].status.valid = false;
2904 }
2905
2906 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2907 if (!adev->ip_blocks[i].status.late_initialized)
2908 continue;
2909 if (!adev->ip_blocks[i].version)
2910 continue;
2911 if (adev->ip_blocks[i].version->funcs->late_fini)
2912 adev->ip_blocks[i].version->funcs->late_fini(&adev->ip_blocks[i]);
2913 adev->ip_blocks[i].status.late_initialized = false;
2914 }
2915
2916 amdgpu_ras_fini(adev);
2917 amdgpu_uid_fini(adev);
2918
2919 return 0;
2920 }
2921
2922 /**
2923 * amdgpu_device_delayed_init_work_handler - work handler for IB tests
2924 *
2925 * @work: work_struct.
2926 */
amdgpu_device_delayed_init_work_handler(struct work_struct * work)2927 static void amdgpu_device_delayed_init_work_handler(struct work_struct *work)
2928 {
2929 struct amdgpu_device *adev =
2930 container_of(work, struct amdgpu_device, delayed_init_work.work);
2931 int r;
2932
2933 r = amdgpu_ib_ring_tests(adev);
2934 if (r)
2935 dev_err(adev->dev, "ib ring test failed (%d).\n", r);
2936 }
2937
amdgpu_device_delay_enable_gfx_off(struct work_struct * work)2938 static void amdgpu_device_delay_enable_gfx_off(struct work_struct *work)
2939 {
2940 struct amdgpu_device *adev =
2941 container_of(work, struct amdgpu_device, gfx.gfx_off_delay_work.work);
2942
2943 WARN_ON_ONCE(adev->gfx.gfx_off_state);
2944 WARN_ON_ONCE(adev->gfx.gfx_off_req_count);
2945
2946 if (!amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, true, 0))
2947 adev->gfx.gfx_off_state = true;
2948 }
2949
2950 /**
2951 * amdgpu_device_ip_suspend_phase1 - run suspend for hardware IPs (phase 1)
2952 *
2953 * @adev: amdgpu_device pointer
2954 *
2955 * Main suspend function for hardware IPs. The list of all the hardware
2956 * IPs that make up the asic is walked, clockgating is disabled and the
2957 * suspend callbacks are run. suspend puts the hardware and software state
2958 * in each IP into a state suitable for suspend.
2959 * Returns 0 on success, negative error code on failure.
2960 */
amdgpu_device_ip_suspend_phase1(struct amdgpu_device * adev)2961 static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev)
2962 {
2963 int i, r, rec;
2964
2965 amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
2966 amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
2967
2968 /*
2969 * Per PMFW team's suggestion, driver needs to handle gfxoff
2970 * and df cstate features disablement for gpu reset(e.g. Mode1Reset)
2971 * scenario. Add the missing df cstate disablement here.
2972 */
2973 if (amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_DISALLOW))
2974 dev_warn(adev->dev, "Failed to disallow df cstate");
2975
2976 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2977 if (!adev->ip_blocks[i].status.valid)
2978 continue;
2979
2980 /* displays are handled separately */
2981 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_DCE)
2982 continue;
2983
2984 r = amdgpu_ip_block_suspend(&adev->ip_blocks[i]);
2985 if (r)
2986 goto unwind;
2987 }
2988
2989 return 0;
2990 unwind:
2991 rec = amdgpu_device_ip_resume_phase3(adev);
2992 if (rec)
2993 dev_err(adev->dev,
2994 "amdgpu_device_ip_resume_phase3 failed during unwind: %d\n",
2995 rec);
2996
2997 amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_ALLOW);
2998
2999 amdgpu_device_set_pg_state(adev, AMD_PG_STATE_GATE);
3000 amdgpu_device_set_cg_state(adev, AMD_CG_STATE_GATE);
3001
3002 return r;
3003 }
3004
3005 /**
3006 * amdgpu_device_ip_suspend_phase2 - run suspend for hardware IPs (phase 2)
3007 *
3008 * @adev: amdgpu_device pointer
3009 *
3010 * Main suspend function for hardware IPs. The list of all the hardware
3011 * IPs that make up the asic is walked, clockgating is disabled and the
3012 * suspend callbacks are run. suspend puts the hardware and software state
3013 * in each IP into a state suitable for suspend.
3014 * Returns 0 on success, negative error code on failure.
3015 */
amdgpu_device_ip_suspend_phase2(struct amdgpu_device * adev)3016 static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
3017 {
3018 int i, r, rec;
3019
3020 if (adev->in_s0ix)
3021 amdgpu_dpm_gfx_state_change(adev, sGpuChangeState_D3Entry);
3022
3023 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
3024 if (!adev->ip_blocks[i].status.valid)
3025 continue;
3026 /* displays are handled in phase1 */
3027 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE)
3028 continue;
3029 /* PSP lost connection when err_event_athub occurs */
3030 if (amdgpu_ras_intr_triggered() &&
3031 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
3032 adev->ip_blocks[i].status.hw = false;
3033 continue;
3034 }
3035
3036 /* skip unnecessary suspend if we do not initialize them yet */
3037 if (!amdgpu_ip_member_of_hwini(
3038 adev, adev->ip_blocks[i].version->type))
3039 continue;
3040
3041 /* Since we skip suspend for S0i3, we need to cancel the delayed
3042 * idle work here as the suspend callback never gets called.
3043 */
3044 if (adev->in_s0ix &&
3045 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX &&
3046 amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(10, 0, 0))
3047 cancel_delayed_work_sync(&adev->gfx.idle_work);
3048 /* skip suspend of gfx/mes and psp for S0ix
3049 * gfx is in gfxoff state, so on resume it will exit gfxoff just
3050 * like at runtime. PSP is also part of the always on hardware
3051 * so no need to suspend it.
3052 */
3053 if (adev->in_s0ix &&
3054 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP ||
3055 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX ||
3056 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_MES))
3057 continue;
3058
3059 /* SDMA 5.x+ is part of GFX power domain so it's covered by GFXOFF */
3060 if (adev->in_s0ix &&
3061 (amdgpu_ip_version(adev, SDMA0_HWIP, 0) >=
3062 IP_VERSION(5, 0, 0)) &&
3063 (adev->ip_blocks[i].version->type ==
3064 AMD_IP_BLOCK_TYPE_SDMA))
3065 continue;
3066
3067 /* Once swPSP provides the IMU, RLC FW binaries to TOS during cold-boot.
3068 * These are in TMR, hence are expected to be reused by PSP-TOS to reload
3069 * from this location and RLC Autoload automatically also gets loaded
3070 * from here based on PMFW -> PSP message during re-init sequence.
3071 * Therefore, the psp suspend & resume should be skipped to avoid destroy
3072 * the TMR and reload FWs again for IMU enabled APU ASICs.
3073 */
3074 if (amdgpu_in_reset(adev) &&
3075 (adev->flags & AMD_IS_APU) && adev->gfx.imu.funcs &&
3076 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)
3077 continue;
3078
3079 r = amdgpu_ip_block_suspend(&adev->ip_blocks[i]);
3080 if (r)
3081 goto unwind;
3082
3083 /* handle putting the SMC in the appropriate state */
3084 if (!amdgpu_sriov_vf(adev)) {
3085 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
3086 r = amdgpu_dpm_set_mp1_state(adev, adev->mp1_state);
3087 if (r) {
3088 dev_err(adev->dev,
3089 "SMC failed to set mp1 state %d, %d\n",
3090 adev->mp1_state, r);
3091 goto unwind;
3092 }
3093 }
3094 }
3095 }
3096
3097 return 0;
3098 unwind:
3099 /* suspend phase 2 = resume phase 1 + resume phase 2 */
3100 rec = amdgpu_device_ip_resume_phase1(adev);
3101 if (rec) {
3102 dev_err(adev->dev,
3103 "amdgpu_device_ip_resume_phase1 failed during unwind: %d\n",
3104 rec);
3105 return r;
3106 }
3107
3108 rec = amdgpu_device_fw_loading(adev);
3109 if (rec) {
3110 dev_err(adev->dev,
3111 "amdgpu_device_fw_loading failed during unwind: %d\n",
3112 rec);
3113 return r;
3114 }
3115
3116 rec = amdgpu_device_ip_resume_phase2(adev);
3117 if (rec) {
3118 dev_err(adev->dev,
3119 "amdgpu_device_ip_resume_phase2 failed during unwind: %d\n",
3120 rec);
3121 return r;
3122 }
3123
3124 return r;
3125 }
3126
3127 /**
3128 * amdgpu_device_ip_suspend - run suspend for hardware IPs
3129 *
3130 * @adev: amdgpu_device pointer
3131 *
3132 * Main suspend function for hardware IPs. The list of all the hardware
3133 * IPs that make up the asic is walked, clockgating is disabled and the
3134 * suspend callbacks are run. suspend puts the hardware and software state
3135 * in each IP into a state suitable for suspend.
3136 * Returns 0 on success, negative error code on failure.
3137 */
amdgpu_device_ip_suspend(struct amdgpu_device * adev)3138 static int amdgpu_device_ip_suspend(struct amdgpu_device *adev)
3139 {
3140 int r;
3141
3142 if (amdgpu_sriov_vf(adev)) {
3143 amdgpu_virt_fini_data_exchange(adev);
3144 amdgpu_virt_request_full_gpu(adev, false);
3145 }
3146
3147 amdgpu_ttm_set_buffer_funcs_status(adev, false);
3148
3149 r = amdgpu_device_ip_suspend_phase1(adev);
3150 if (r)
3151 return r;
3152 r = amdgpu_device_ip_suspend_phase2(adev);
3153
3154 if (amdgpu_sriov_vf(adev))
3155 amdgpu_virt_release_full_gpu(adev, false);
3156
3157 return r;
3158 }
3159
amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device * adev)3160 static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
3161 {
3162 int i, r;
3163
3164 static enum amd_ip_block_type ip_order[] = {
3165 AMD_IP_BLOCK_TYPE_COMMON,
3166 AMD_IP_BLOCK_TYPE_GMC,
3167 AMD_IP_BLOCK_TYPE_PSP,
3168 AMD_IP_BLOCK_TYPE_IH,
3169 };
3170
3171 for (i = 0; i < adev->num_ip_blocks; i++) {
3172 int j;
3173 struct amdgpu_ip_block *block;
3174
3175 block = &adev->ip_blocks[i];
3176 block->status.hw = false;
3177
3178 for (j = 0; j < ARRAY_SIZE(ip_order); j++) {
3179
3180 if (block->version->type != ip_order[j] ||
3181 !block->status.valid)
3182 continue;
3183
3184 r = block->version->funcs->hw_init(&adev->ip_blocks[i]);
3185 if (r) {
3186 dev_err(adev->dev, "RE-INIT-early: %s failed\n",
3187 block->version->funcs->name);
3188 return r;
3189 }
3190 block->status.hw = true;
3191 }
3192 }
3193
3194 return 0;
3195 }
3196
amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device * adev)3197 static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev)
3198 {
3199 struct amdgpu_ip_block *block;
3200 int i, r = 0;
3201
3202 static enum amd_ip_block_type ip_order[] = {
3203 AMD_IP_BLOCK_TYPE_SMC,
3204 AMD_IP_BLOCK_TYPE_DCE,
3205 AMD_IP_BLOCK_TYPE_GFX,
3206 AMD_IP_BLOCK_TYPE_SDMA,
3207 AMD_IP_BLOCK_TYPE_MES,
3208 AMD_IP_BLOCK_TYPE_UVD,
3209 AMD_IP_BLOCK_TYPE_VCE,
3210 AMD_IP_BLOCK_TYPE_VCN,
3211 AMD_IP_BLOCK_TYPE_JPEG
3212 };
3213
3214 for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
3215 block = amdgpu_device_ip_get_ip_block(adev, ip_order[i]);
3216
3217 if (!block)
3218 continue;
3219
3220 if (block->status.valid && !block->status.hw) {
3221 if (block->version->type == AMD_IP_BLOCK_TYPE_SMC) {
3222 r = amdgpu_ip_block_resume(block);
3223 } else {
3224 r = block->version->funcs->hw_init(block);
3225 }
3226
3227 if (r) {
3228 dev_err(adev->dev, "RE-INIT-late: %s failed\n",
3229 block->version->funcs->name);
3230 break;
3231 }
3232 block->status.hw = true;
3233 }
3234 }
3235
3236 return r;
3237 }
3238
3239 /**
3240 * amdgpu_device_ip_resume_phase1 - run resume for hardware IPs
3241 *
3242 * @adev: amdgpu_device pointer
3243 *
3244 * First resume function for hardware IPs. The list of all the hardware
3245 * IPs that make up the asic is walked and the resume callbacks are run for
3246 * COMMON, GMC, and IH. resume puts the hardware into a functional state
3247 * after a suspend and updates the software state as necessary. This
3248 * function is also used for restoring the GPU after a GPU reset.
3249 * Returns 0 on success, negative error code on failure.
3250 */
amdgpu_device_ip_resume_phase1(struct amdgpu_device * adev)3251 static int amdgpu_device_ip_resume_phase1(struct amdgpu_device *adev)
3252 {
3253 int i, r;
3254
3255 for (i = 0; i < adev->num_ip_blocks; i++) {
3256 if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
3257 continue;
3258 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3259 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3260 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
3261 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP && amdgpu_sriov_vf(adev))) {
3262
3263 r = amdgpu_ip_block_resume(&adev->ip_blocks[i]);
3264 if (r)
3265 return r;
3266 }
3267 }
3268
3269 return 0;
3270 }
3271
3272 /**
3273 * amdgpu_device_ip_resume_phase2 - run resume for hardware IPs
3274 *
3275 * @adev: amdgpu_device pointer
3276 *
3277 * Second resume function for hardware IPs. The list of all the hardware
3278 * IPs that make up the asic is walked and the resume callbacks are run for
3279 * all blocks except COMMON, GMC, and IH. resume puts the hardware into a
3280 * functional state after a suspend and updates the software state as
3281 * necessary. This function is also used for restoring the GPU after a GPU
3282 * reset.
3283 * Returns 0 on success, negative error code on failure.
3284 */
amdgpu_device_ip_resume_phase2(struct amdgpu_device * adev)3285 static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev)
3286 {
3287 int i, r;
3288
3289 for (i = 0; i < adev->num_ip_blocks; i++) {
3290 if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
3291 continue;
3292 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3293 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3294 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
3295 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE ||
3296 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)
3297 continue;
3298 r = amdgpu_ip_block_resume(&adev->ip_blocks[i]);
3299 if (r)
3300 return r;
3301 }
3302
3303 return 0;
3304 }
3305
3306 /**
3307 * amdgpu_device_ip_resume_phase3 - run resume for hardware IPs
3308 *
3309 * @adev: amdgpu_device pointer
3310 *
3311 * Third resume function for hardware IPs. The list of all the hardware
3312 * IPs that make up the asic is walked and the resume callbacks are run for
3313 * all DCE. resume puts the hardware into a functional state after a suspend
3314 * and updates the software state as necessary. This function is also used
3315 * for restoring the GPU after a GPU reset.
3316 *
3317 * Returns 0 on success, negative error code on failure.
3318 */
amdgpu_device_ip_resume_phase3(struct amdgpu_device * adev)3319 static int amdgpu_device_ip_resume_phase3(struct amdgpu_device *adev)
3320 {
3321 int i, r;
3322
3323 for (i = 0; i < adev->num_ip_blocks; i++) {
3324 if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
3325 continue;
3326 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) {
3327 r = amdgpu_ip_block_resume(&adev->ip_blocks[i]);
3328 if (r)
3329 return r;
3330 }
3331 }
3332
3333 return 0;
3334 }
3335
3336 /**
3337 * amdgpu_device_ip_resume - run resume for hardware IPs
3338 *
3339 * @adev: amdgpu_device pointer
3340 *
3341 * Main resume function for hardware IPs. The hardware IPs
3342 * are split into two resume functions because they are
3343 * also used in recovering from a GPU reset and some additional
3344 * steps need to be take between them. In this case (S3/S4) they are
3345 * run sequentially.
3346 * Returns 0 on success, negative error code on failure.
3347 */
amdgpu_device_ip_resume(struct amdgpu_device * adev)3348 static int amdgpu_device_ip_resume(struct amdgpu_device *adev)
3349 {
3350 int r;
3351
3352 r = amdgpu_device_ip_resume_phase1(adev);
3353 if (r)
3354 return r;
3355
3356 r = amdgpu_device_fw_loading(adev);
3357 if (r)
3358 return r;
3359
3360 r = amdgpu_device_ip_resume_phase2(adev);
3361
3362 amdgpu_ttm_set_buffer_funcs_status(adev, true);
3363
3364 if (r)
3365 return r;
3366
3367 amdgpu_fence_driver_hw_init(adev);
3368
3369 r = amdgpu_device_ip_resume_phase3(adev);
3370
3371 return r;
3372 }
3373
3374 /**
3375 * amdgpu_device_detect_sriov_bios - determine if the board supports SR-IOV
3376 *
3377 * @adev: amdgpu_device pointer
3378 *
3379 * Query the VBIOS data tables to determine if the board supports SR-IOV.
3380 */
amdgpu_device_detect_sriov_bios(struct amdgpu_device * adev)3381 static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
3382 {
3383 if (amdgpu_sriov_vf(adev)) {
3384 if (adev->is_atom_fw) {
3385 if (amdgpu_atomfirmware_gpu_virtualization_supported(adev))
3386 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
3387 } else {
3388 if (amdgpu_atombios_has_gpu_virtualization_table(adev))
3389 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
3390 }
3391
3392 if (!(adev->virt.caps & AMDGPU_SRIOV_CAPS_SRIOV_VBIOS))
3393 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_NO_VBIOS, 0, 0);
3394 }
3395 }
3396
3397 /**
3398 * amdgpu_device_asic_has_dc_support - determine if DC supports the asic
3399 *
3400 * @pdev : pci device context
3401 * @asic_type: AMD asic type
3402 *
3403 * Check if there is DC (new modesetting infrastructre) support for an asic.
3404 * returns true if DC has support, false if not.
3405 */
amdgpu_device_asic_has_dc_support(struct pci_dev * pdev,enum amd_asic_type asic_type)3406 bool amdgpu_device_asic_has_dc_support(struct pci_dev *pdev,
3407 enum amd_asic_type asic_type)
3408 {
3409 switch (asic_type) {
3410 #ifdef CONFIG_DRM_AMDGPU_SI
3411 case CHIP_HAINAN:
3412 #endif
3413 case CHIP_TOPAZ:
3414 /* chips with no display hardware */
3415 return false;
3416 #if defined(CONFIG_DRM_AMD_DC)
3417 case CHIP_TAHITI:
3418 case CHIP_PITCAIRN:
3419 case CHIP_VERDE:
3420 case CHIP_OLAND:
3421 return amdgpu_dc != 0 && IS_ENABLED(CONFIG_DRM_AMD_DC_SI);
3422 default:
3423 return amdgpu_dc != 0;
3424 #else
3425 default:
3426 if (amdgpu_dc > 0)
3427 dev_info_once(
3428 &pdev->dev,
3429 "Display Core has been requested via kernel parameter but isn't supported by ASIC, ignoring\n");
3430 return false;
3431 #endif
3432 }
3433 }
3434
3435 /**
3436 * amdgpu_device_has_dc_support - check if dc is supported
3437 *
3438 * @adev: amdgpu_device pointer
3439 *
3440 * Returns true for supported, false for not supported
3441 */
amdgpu_device_has_dc_support(struct amdgpu_device * adev)3442 bool amdgpu_device_has_dc_support(struct amdgpu_device *adev)
3443 {
3444 if (adev->enable_virtual_display ||
3445 (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK))
3446 return false;
3447
3448 return amdgpu_device_asic_has_dc_support(adev->pdev, adev->asic_type);
3449 }
3450
amdgpu_device_xgmi_reset_func(struct work_struct * __work)3451 static void amdgpu_device_xgmi_reset_func(struct work_struct *__work)
3452 {
3453 struct amdgpu_device *adev =
3454 container_of(__work, struct amdgpu_device, xgmi_reset_work);
3455 struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
3456
3457 /* It's a bug to not have a hive within this function */
3458 if (WARN_ON(!hive))
3459 return;
3460
3461 /*
3462 * Use task barrier to synchronize all xgmi reset works across the
3463 * hive. task_barrier_enter and task_barrier_exit will block
3464 * until all the threads running the xgmi reset works reach
3465 * those points. task_barrier_full will do both blocks.
3466 */
3467 if (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
3468
3469 task_barrier_enter(&hive->tb);
3470 adev->asic_reset_res = amdgpu_device_baco_enter(adev);
3471
3472 if (adev->asic_reset_res)
3473 goto fail;
3474
3475 task_barrier_exit(&hive->tb);
3476 adev->asic_reset_res = amdgpu_device_baco_exit(adev);
3477
3478 if (adev->asic_reset_res)
3479 goto fail;
3480
3481 amdgpu_ras_reset_error_count(adev, AMDGPU_RAS_BLOCK__MMHUB);
3482 } else {
3483
3484 task_barrier_full(&hive->tb);
3485 adev->asic_reset_res = amdgpu_asic_reset(adev);
3486 }
3487
3488 fail:
3489 if (adev->asic_reset_res)
3490 dev_warn(adev->dev,
3491 "ASIC reset failed with error, %d for drm dev, %s",
3492 adev->asic_reset_res, adev_to_drm(adev)->unique);
3493 amdgpu_put_xgmi_hive(hive);
3494 }
3495
amdgpu_device_get_job_timeout_settings(struct amdgpu_device * adev)3496 static int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev)
3497 {
3498 char buf[AMDGPU_MAX_TIMEOUT_PARAM_LENGTH];
3499 char *input = buf;
3500 char *timeout_setting = NULL;
3501 int index = 0;
3502 long timeout;
3503 int ret = 0;
3504
3505 /* By default timeout for all queues is 2 sec */
3506 adev->gfx_timeout = adev->compute_timeout = adev->sdma_timeout =
3507 adev->video_timeout = msecs_to_jiffies(2000);
3508
3509 if (!strnlen(amdgpu_lockup_timeout, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH))
3510 return 0;
3511
3512 /*
3513 * strsep() destructively modifies its input by replacing delimiters
3514 * with '\0'. Use a stack copy so the global module parameter buffer
3515 * remains intact for multi-GPU systems where this function is called
3516 * once per device.
3517 */
3518 strscpy(buf, amdgpu_lockup_timeout, sizeof(buf));
3519
3520 while ((timeout_setting = strsep(&input, ",")) &&
3521 strnlen(timeout_setting, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
3522 ret = kstrtol(timeout_setting, 0, &timeout);
3523 if (ret)
3524 return ret;
3525
3526 if (timeout == 0) {
3527 index++;
3528 continue;
3529 } else if (timeout < 0) {
3530 timeout = MAX_SCHEDULE_TIMEOUT;
3531 dev_warn(adev->dev, "lockup timeout disabled");
3532 add_taint(TAINT_SOFTLOCKUP, LOCKDEP_STILL_OK);
3533 } else {
3534 timeout = msecs_to_jiffies(timeout);
3535 }
3536
3537 switch (index++) {
3538 case 0:
3539 adev->gfx_timeout = timeout;
3540 break;
3541 case 1:
3542 adev->compute_timeout = timeout;
3543 break;
3544 case 2:
3545 adev->sdma_timeout = timeout;
3546 break;
3547 case 3:
3548 adev->video_timeout = timeout;
3549 break;
3550 default:
3551 break;
3552 }
3553 }
3554
3555 /* When only one value specified apply it to all queues. */
3556 if (index == 1)
3557 adev->gfx_timeout = adev->compute_timeout = adev->sdma_timeout =
3558 adev->video_timeout = timeout;
3559
3560 return ret;
3561 }
3562
3563 /**
3564 * amdgpu_device_check_iommu_direct_map - check if RAM direct mapped to GPU
3565 *
3566 * @adev: amdgpu_device pointer
3567 *
3568 * RAM direct mapped to GPU if IOMMU is not enabled or is pass through mode
3569 */
amdgpu_device_check_iommu_direct_map(struct amdgpu_device * adev)3570 static void amdgpu_device_check_iommu_direct_map(struct amdgpu_device *adev)
3571 {
3572 struct iommu_domain *domain;
3573
3574 domain = iommu_get_domain_for_dev(adev->dev);
3575 if (!domain || domain->type == IOMMU_DOMAIN_IDENTITY)
3576 adev->ram_is_direct_mapped = true;
3577 }
3578
3579 #if defined(CONFIG_HSA_AMD_P2P)
3580 /**
3581 * amdgpu_device_check_iommu_remap - Check if DMA remapping is enabled.
3582 *
3583 * @adev: amdgpu_device pointer
3584 *
3585 * return if IOMMU remapping bar address
3586 */
amdgpu_device_check_iommu_remap(struct amdgpu_device * adev)3587 static bool amdgpu_device_check_iommu_remap(struct amdgpu_device *adev)
3588 {
3589 struct iommu_domain *domain;
3590
3591 domain = iommu_get_domain_for_dev(adev->dev);
3592 if (domain && (domain->type == IOMMU_DOMAIN_DMA ||
3593 domain->type == IOMMU_DOMAIN_DMA_FQ))
3594 return true;
3595
3596 return false;
3597 }
3598 #endif
3599
amdgpu_device_set_mcbp(struct amdgpu_device * adev)3600 static void amdgpu_device_set_mcbp(struct amdgpu_device *adev)
3601 {
3602 if (amdgpu_mcbp == 1)
3603 adev->gfx.mcbp = true;
3604 else if (amdgpu_mcbp == 0)
3605 adev->gfx.mcbp = false;
3606
3607 if (amdgpu_sriov_vf(adev))
3608 adev->gfx.mcbp = true;
3609
3610 if (adev->gfx.mcbp)
3611 dev_info(adev->dev, "MCBP is enabled\n");
3612 }
3613
amdgpu_device_sys_interface_init(struct amdgpu_device * adev)3614 static int amdgpu_device_sys_interface_init(struct amdgpu_device *adev)
3615 {
3616 int r;
3617
3618 r = amdgpu_atombios_sysfs_init(adev);
3619 if (r)
3620 drm_err(&adev->ddev,
3621 "registering atombios sysfs failed (%d).\n", r);
3622
3623 r = amdgpu_pm_sysfs_init(adev);
3624 if (r)
3625 dev_err(adev->dev, "registering pm sysfs failed (%d).\n", r);
3626
3627 r = amdgpu_ucode_sysfs_init(adev);
3628 if (r) {
3629 adev->ucode_sysfs_en = false;
3630 dev_err(adev->dev, "Creating firmware sysfs failed (%d).\n", r);
3631 } else
3632 adev->ucode_sysfs_en = true;
3633
3634 r = amdgpu_device_attr_sysfs_init(adev);
3635 if (r)
3636 dev_err(adev->dev, "Could not create amdgpu device attr\n");
3637
3638 r = devm_device_add_group(adev->dev, &amdgpu_board_attrs_group);
3639 if (r)
3640 dev_err(adev->dev,
3641 "Could not create amdgpu board attributes\n");
3642
3643 amdgpu_fru_sysfs_init(adev);
3644 amdgpu_reg_state_sysfs_init(adev);
3645 amdgpu_xcp_sysfs_init(adev);
3646 amdgpu_uma_sysfs_init(adev);
3647
3648 return r;
3649 }
3650
amdgpu_device_sys_interface_fini(struct amdgpu_device * adev)3651 static void amdgpu_device_sys_interface_fini(struct amdgpu_device *adev)
3652 {
3653 if (adev->pm.sysfs_initialized)
3654 amdgpu_pm_sysfs_fini(adev);
3655 if (adev->ucode_sysfs_en)
3656 amdgpu_ucode_sysfs_fini(adev);
3657 amdgpu_device_attr_sysfs_fini(adev);
3658 amdgpu_fru_sysfs_fini(adev);
3659
3660 amdgpu_reg_state_sysfs_fini(adev);
3661 amdgpu_xcp_sysfs_fini(adev);
3662 amdgpu_uma_sysfs_fini(adev);
3663 }
3664
3665 /**
3666 * amdgpu_device_init - initialize the driver
3667 *
3668 * @adev: amdgpu_device pointer
3669 * @flags: driver flags
3670 *
3671 * Initializes the driver info and hw (all asics).
3672 * Returns 0 for success or an error on failure.
3673 * Called at driver startup.
3674 */
amdgpu_device_init(struct amdgpu_device * adev,uint32_t flags)3675 int amdgpu_device_init(struct amdgpu_device *adev,
3676 uint32_t flags)
3677 {
3678 struct pci_dev *pdev = adev->pdev;
3679 int r, i;
3680 bool px = false;
3681 u32 max_MBps;
3682 int tmp;
3683
3684 adev->shutdown = false;
3685 adev->flags = flags;
3686
3687 if (amdgpu_force_asic_type >= 0 && amdgpu_force_asic_type < CHIP_LAST)
3688 adev->asic_type = amdgpu_force_asic_type;
3689 else
3690 adev->asic_type = flags & AMD_ASIC_MASK;
3691
3692 adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT;
3693 if (amdgpu_emu_mode == 1)
3694 adev->usec_timeout *= 10;
3695 adev->gmc.gart_size = 512 * 1024 * 1024;
3696 adev->accel_working = false;
3697 adev->num_rings = 0;
3698 RCU_INIT_POINTER(adev->gang_submit, dma_fence_get_stub());
3699 adev->mman.buffer_funcs = NULL;
3700 adev->mman.buffer_funcs_ring = NULL;
3701 adev->vm_manager.vm_pte_funcs = NULL;
3702 adev->vm_manager.vm_pte_num_scheds = 0;
3703 adev->gmc.gmc_funcs = NULL;
3704 adev->harvest_ip_mask = 0x0;
3705 adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
3706 bitmap_zero(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
3707
3708 amdgpu_reg_access_init(adev);
3709
3710 dev_info(
3711 adev->dev,
3712 "initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
3713 amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device,
3714 pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
3715
3716 /* mutex initialization are all done here so we
3717 * can recall function without having locking issues
3718 */
3719 mutex_init(&adev->firmware.mutex);
3720 mutex_init(&adev->pm.mutex);
3721 mutex_init(&adev->gfx.gpu_clock_mutex);
3722 mutex_init(&adev->srbm_mutex);
3723 mutex_init(&adev->gfx.pipe_reserve_mutex);
3724 mutex_init(&adev->gfx.gfx_off_mutex);
3725 mutex_init(&adev->gfx.partition_mutex);
3726 mutex_init(&adev->grbm_idx_mutex);
3727 mutex_init(&adev->mn_lock);
3728 mutex_init(&adev->virt.vf_errors.lock);
3729 hash_init(adev->mn_hash);
3730 mutex_init(&adev->psp.mutex);
3731 mutex_init(&adev->notifier_lock);
3732 mutex_init(&adev->pm.stable_pstate_ctx_lock);
3733 mutex_init(&adev->benchmark_mutex);
3734 mutex_init(&adev->gfx.reset_sem_mutex);
3735 /* Initialize the mutex for cleaner shader isolation between GFX and compute processes */
3736 mutex_init(&adev->enforce_isolation_mutex);
3737 for (i = 0; i < MAX_XCP; ++i) {
3738 adev->isolation[i].spearhead = dma_fence_get_stub();
3739 amdgpu_sync_create(&adev->isolation[i].active);
3740 amdgpu_sync_create(&adev->isolation[i].prev);
3741 }
3742 mutex_init(&adev->gfx.userq_sch_mutex);
3743 mutex_init(&adev->gfx.workload_profile_mutex);
3744 mutex_init(&adev->vcn.workload_profile_mutex);
3745
3746 amdgpu_device_init_apu_flags(adev);
3747
3748 r = amdgpu_device_check_arguments(adev);
3749 if (r)
3750 return r;
3751
3752 spin_lock_init(&adev->mmio_idx_lock);
3753 spin_lock_init(&adev->mm_stats.lock);
3754 spin_lock_init(&adev->virt.rlcg_reg_lock);
3755 spin_lock_init(&adev->wb.lock);
3756
3757 INIT_LIST_HEAD(&adev->reset_list);
3758
3759 INIT_LIST_HEAD(&adev->ras_list);
3760
3761 INIT_LIST_HEAD(&adev->pm.od_kobj_list);
3762
3763 xa_init_flags(&adev->userq_doorbell_xa, XA_FLAGS_LOCK_IRQ);
3764
3765 INIT_DELAYED_WORK(&adev->delayed_init_work,
3766 amdgpu_device_delayed_init_work_handler);
3767 INIT_DELAYED_WORK(&adev->gfx.gfx_off_delay_work,
3768 amdgpu_device_delay_enable_gfx_off);
3769 /*
3770 * Initialize the enforce_isolation work structures for each XCP
3771 * partition. This work handler is responsible for enforcing shader
3772 * isolation on AMD GPUs. It counts the number of emitted fences for
3773 * each GFX and compute ring. If there are any fences, it schedules
3774 * the `enforce_isolation_work` to be run after a delay. If there are
3775 * no fences, it signals the Kernel Fusion Driver (KFD) to resume the
3776 * runqueue.
3777 */
3778 for (i = 0; i < MAX_XCP; i++) {
3779 INIT_DELAYED_WORK(&adev->gfx.enforce_isolation[i].work,
3780 amdgpu_gfx_enforce_isolation_handler);
3781 adev->gfx.enforce_isolation[i].adev = adev;
3782 adev->gfx.enforce_isolation[i].xcp_id = i;
3783 }
3784
3785 INIT_WORK(&adev->xgmi_reset_work, amdgpu_device_xgmi_reset_func);
3786 INIT_WORK(&adev->userq_reset_work, amdgpu_userq_reset_work);
3787
3788 amdgpu_coredump_init(adev);
3789
3790 adev->gfx.gfx_off_req_count = 1;
3791 adev->gfx.gfx_off_residency = 0;
3792 adev->gfx.gfx_off_entrycount = 0;
3793 adev->pm.ac_power = power_supply_is_system_supplied() > 0;
3794
3795 atomic_set(&adev->throttling_logging_enabled, 1);
3796 /*
3797 * If throttling continues, logging will be performed every minute
3798 * to avoid log flooding. "-1" is subtracted since the thermal
3799 * throttling interrupt comes every second. Thus, the total logging
3800 * interval is 59 seconds(retelimited printk interval) + 1(waiting
3801 * for throttling interrupt) = 60 seconds.
3802 */
3803 ratelimit_state_init(&adev->throttling_logging_rs, (60 - 1) * HZ, 1);
3804
3805 ratelimit_set_flags(&adev->throttling_logging_rs, RATELIMIT_MSG_ON_RELEASE);
3806
3807 /* Registers mapping */
3808 /* TODO: block userspace mapping of io register */
3809 if (adev->asic_type >= CHIP_BONAIRE) {
3810 adev->rmmio_base = pci_resource_start(adev->pdev, 5);
3811 adev->rmmio_size = pci_resource_len(adev->pdev, 5);
3812 } else {
3813 adev->rmmio_base = pci_resource_start(adev->pdev, 2);
3814 adev->rmmio_size = pci_resource_len(adev->pdev, 2);
3815 }
3816
3817 for (i = 0; i < AMD_IP_BLOCK_TYPE_NUM; i++)
3818 atomic_set(&adev->pm.pwr_state[i], POWER_STATE_UNKNOWN);
3819
3820 adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size);
3821 if (!adev->rmmio)
3822 return -ENOMEM;
3823
3824 dev_info(adev->dev, "register mmio base: 0x%08X\n",
3825 (uint32_t)adev->rmmio_base);
3826 dev_info(adev->dev, "register mmio size: %u\n",
3827 (unsigned int)adev->rmmio_size);
3828
3829 /*
3830 * Reset domain needs to be present early, before XGMI hive discovered
3831 * (if any) and initialized to use reset sem and in_gpu reset flag
3832 * early on during init and before calling to RREG32.
3833 */
3834 adev->reset_domain = amdgpu_reset_create_reset_domain(SINGLE_DEVICE, "amdgpu-reset-dev");
3835 if (!adev->reset_domain)
3836 return -ENOMEM;
3837
3838 /* detect hw virtualization here */
3839 amdgpu_virt_init(adev);
3840
3841 amdgpu_device_get_pcie_info(adev);
3842
3843 r = amdgpu_device_get_job_timeout_settings(adev);
3844 if (r) {
3845 dev_err(adev->dev, "invalid lockup_timeout parameter syntax\n");
3846 return r;
3847 }
3848
3849 amdgpu_device_set_mcbp(adev);
3850
3851 /*
3852 * By default, use default mode where all blocks are expected to be
3853 * initialized. At present a 'swinit' of blocks is required to be
3854 * completed before the need for a different level is detected.
3855 */
3856 amdgpu_set_init_level(adev, AMDGPU_INIT_LEVEL_DEFAULT);
3857 /* early init functions */
3858 r = amdgpu_device_ip_early_init(adev);
3859 if (r)
3860 return r;
3861
3862 /*
3863 * No need to remove conflicting FBs for non-display class devices.
3864 * This prevents the sysfb from being freed accidently.
3865 */
3866 if ((pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA ||
3867 (pdev->class >> 8) == PCI_CLASS_DISPLAY_OTHER) {
3868 /* Get rid of things like offb */
3869 r = aperture_remove_conflicting_pci_devices(adev->pdev, amdgpu_kms_driver.name);
3870 if (r)
3871 return r;
3872 }
3873
3874 /* Enable TMZ based on IP_VERSION */
3875 amdgpu_gmc_tmz_set(adev);
3876
3877 if (amdgpu_sriov_vf(adev) &&
3878 amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(10, 3, 0))
3879 /* VF MMIO access (except mailbox range) from CPU
3880 * will be blocked during sriov runtime
3881 */
3882 adev->virt.caps |= AMDGPU_VF_MMIO_ACCESS_PROTECT;
3883
3884 amdgpu_gmc_noretry_set(adev);
3885 /* Need to get xgmi info early to decide the reset behavior*/
3886 if (adev->gmc.xgmi.supported) {
3887 if (adev->gfxhub.funcs &&
3888 adev->gfxhub.funcs->get_xgmi_info) {
3889 r = adev->gfxhub.funcs->get_xgmi_info(adev);
3890 if (r)
3891 return r;
3892 }
3893 }
3894
3895 if (adev->gmc.xgmi.connected_to_cpu) {
3896 if (adev->mmhub.funcs &&
3897 adev->mmhub.funcs->get_xgmi_info) {
3898 r = adev->mmhub.funcs->get_xgmi_info(adev);
3899 if (r)
3900 return r;
3901 }
3902 }
3903
3904 /* enable PCIE atomic ops */
3905 if (amdgpu_sriov_vf(adev)) {
3906 if (adev->virt.fw_reserve.p_pf2vf)
3907 adev->have_atomics_support = ((struct amd_sriov_msg_pf2vf_info *)
3908 adev->virt.fw_reserve.p_pf2vf)->pcie_atomic_ops_support_flags ==
3909 (PCI_EXP_DEVCAP2_ATOMIC_COMP32 | PCI_EXP_DEVCAP2_ATOMIC_COMP64);
3910 /* APUs w/ gfx9 onwards doesn't reply on PCIe atomics, rather it is a
3911 * internal path natively support atomics, set have_atomics_support to true.
3912 */
3913 } else if ((adev->flags & AMD_IS_APU &&
3914 amdgpu_ip_version(adev, GC_HWIP, 0) > IP_VERSION(9, 0, 0)) ||
3915 (adev->gmc.xgmi.connected_to_cpu &&
3916 amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(12, 1, 0))) {
3917 adev->have_atomics_support = true;
3918 } else {
3919 adev->have_atomics_support =
3920 !pci_enable_atomic_ops_to_root(adev->pdev,
3921 PCI_EXP_DEVCAP2_ATOMIC_COMP32 |
3922 PCI_EXP_DEVCAP2_ATOMIC_COMP64);
3923 }
3924
3925 if (!adev->have_atomics_support)
3926 dev_info(adev->dev, "PCIE atomic ops is not supported\n");
3927
3928 /* doorbell bar mapping and doorbell index init*/
3929 amdgpu_doorbell_init(adev);
3930
3931 if (amdgpu_emu_mode == 1) {
3932 /* post the asic on emulation mode */
3933 emu_soc_asic_init(adev);
3934 goto fence_driver_init;
3935 }
3936
3937 amdgpu_reset_init(adev);
3938
3939 /* detect if we are with an SRIOV vbios */
3940 if (adev->bios)
3941 amdgpu_device_detect_sriov_bios(adev);
3942
3943 /* check if we need to reset the asic
3944 * E.g., driver was not cleanly unloaded previously, etc.
3945 */
3946 if (!amdgpu_sriov_vf(adev) && amdgpu_asic_need_reset_on_init(adev)) {
3947 if (adev->gmc.xgmi.num_physical_nodes) {
3948 dev_info(adev->dev, "Pending hive reset.\n");
3949 amdgpu_set_init_level(adev,
3950 AMDGPU_INIT_LEVEL_MINIMAL_XGMI);
3951 } else {
3952 tmp = amdgpu_reset_method;
3953 /* It should do a default reset when loading or reloading the driver,
3954 * regardless of the module parameter reset_method.
3955 */
3956 amdgpu_reset_method = AMD_RESET_METHOD_NONE;
3957 r = amdgpu_asic_reset(adev);
3958 amdgpu_reset_method = tmp;
3959 }
3960
3961 if (r) {
3962 dev_err(adev->dev, "asic reset on init failed\n");
3963 goto failed;
3964 }
3965 }
3966
3967 /* Post card if necessary */
3968 if (amdgpu_device_need_post(adev)) {
3969 if (!adev->bios) {
3970 dev_err(adev->dev, "no vBIOS found\n");
3971 r = -EINVAL;
3972 goto failed;
3973 }
3974 dev_info(adev->dev, "GPU posting now...\n");
3975 r = amdgpu_device_asic_init(adev);
3976 if (r) {
3977 dev_err(adev->dev, "gpu post error!\n");
3978 goto failed;
3979 }
3980 }
3981
3982 if (adev->bios) {
3983 if (adev->is_atom_fw) {
3984 /* Initialize clocks */
3985 r = amdgpu_atomfirmware_get_clock_info(adev);
3986 if (r) {
3987 dev_err(adev->dev, "amdgpu_atomfirmware_get_clock_info failed\n");
3988 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
3989 goto failed;
3990 }
3991 } else {
3992 /* Initialize clocks */
3993 r = amdgpu_atombios_get_clock_info(adev);
3994 if (r) {
3995 dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n");
3996 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
3997 goto failed;
3998 }
3999 /* init i2c buses */
4000 amdgpu_i2c_init(adev);
4001 }
4002 }
4003
4004 fence_driver_init:
4005 /* Fence driver */
4006 r = amdgpu_fence_driver_sw_init(adev);
4007 if (r) {
4008 dev_err(adev->dev, "amdgpu_fence_driver_sw_init failed\n");
4009 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_FENCE_INIT_FAIL, 0, 0);
4010 goto failed;
4011 }
4012
4013 /* init the mode config */
4014 drm_mode_config_init(adev_to_drm(adev));
4015
4016 r = amdgpu_device_ip_init(adev);
4017 if (r) {
4018 dev_err(adev->dev, "amdgpu_device_ip_init failed\n");
4019 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0);
4020 goto release_ras_con;
4021 }
4022
4023 amdgpu_fence_driver_hw_init(adev);
4024
4025 dev_info(adev->dev,
4026 "SE %d, SH per SE %d, CU per SH %d, active_cu_number %d\n",
4027 adev->gfx.config.max_shader_engines,
4028 adev->gfx.config.max_sh_per_se,
4029 adev->gfx.config.max_cu_per_sh,
4030 adev->gfx.cu_info.number);
4031
4032 adev->accel_working = true;
4033
4034 amdgpu_vm_check_compute_bug(adev);
4035
4036 /* Initialize the buffer migration limit. */
4037 if (amdgpu_moverate >= 0)
4038 max_MBps = amdgpu_moverate;
4039 else
4040 max_MBps = 8; /* Allow 8 MB/s. */
4041 /* Get a log2 for easy divisions. */
4042 adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps));
4043
4044 /*
4045 * Register gpu instance before amdgpu_device_enable_mgpu_fan_boost.
4046 * Otherwise the mgpu fan boost feature will be skipped due to the
4047 * gpu instance is counted less.
4048 */
4049 amdgpu_register_gpu_instance(adev);
4050
4051 /* enable clockgating, etc. after ib tests, etc. since some blocks require
4052 * explicit gating rather than handling it automatically.
4053 */
4054 if (adev->init_lvl->level != AMDGPU_INIT_LEVEL_MINIMAL_XGMI) {
4055 r = amdgpu_device_ip_late_init(adev);
4056 if (r) {
4057 dev_err(adev->dev, "amdgpu_device_ip_late_init failed\n");
4058 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_LATE_INIT_FAIL, 0, r);
4059 goto release_ras_con;
4060 }
4061 /* must succeed. */
4062 amdgpu_ras_resume(adev);
4063 queue_delayed_work(system_dfl_wq, &adev->delayed_init_work,
4064 msecs_to_jiffies(AMDGPU_RESUME_MS));
4065 }
4066
4067 if (amdgpu_sriov_vf(adev)) {
4068 amdgpu_virt_release_full_gpu(adev, true);
4069 flush_delayed_work(&adev->delayed_init_work);
4070 }
4071
4072 /* Don't init kfd if whole hive need to be reset during init */
4073 if (adev->init_lvl->level != AMDGPU_INIT_LEVEL_MINIMAL_XGMI) {
4074 kgd2kfd_init_zone_device(adev);
4075 kfd_update_svm_support_properties(adev);
4076 }
4077
4078 if (adev->init_lvl->level == AMDGPU_INIT_LEVEL_MINIMAL_XGMI)
4079 amdgpu_xgmi_reset_on_init(adev);
4080
4081 /*
4082 * Place those sysfs registering after `late_init`. As some of those
4083 * operations performed in `late_init` might affect the sysfs
4084 * interfaces creating.
4085 */
4086 r = amdgpu_device_sys_interface_init(adev);
4087
4088 if (IS_ENABLED(CONFIG_PERF_EVENTS))
4089 r = amdgpu_pmu_init(adev);
4090 if (r)
4091 dev_err(adev->dev, "amdgpu_pmu_init failed\n");
4092
4093 /* Have stored pci confspace at hand for restore in sudden PCI error */
4094 if (amdgpu_device_cache_pci_state(adev->pdev))
4095 pci_restore_state(pdev);
4096
4097 /* if we have > 1 VGA cards, then disable the amdgpu VGA resources */
4098 /* this will fail for cards that aren't VGA class devices, just
4099 * ignore it
4100 */
4101 if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
4102 vga_client_register(adev->pdev, amdgpu_device_vga_set_decode);
4103
4104 px = amdgpu_device_supports_px(adev);
4105
4106 if (px || (!dev_is_removable(&adev->pdev->dev) &&
4107 apple_gmux_detect(NULL, NULL)))
4108 vga_switcheroo_register_client(adev->pdev,
4109 &amdgpu_switcheroo_ops, px);
4110
4111 if (px)
4112 vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
4113
4114 amdgpu_device_check_iommu_direct_map(adev);
4115
4116 adev->pm_nb.notifier_call = amdgpu_device_pm_notifier;
4117 r = register_pm_notifier(&adev->pm_nb);
4118 if (r)
4119 goto failed;
4120
4121 return 0;
4122
4123 release_ras_con:
4124 if (amdgpu_sriov_vf(adev))
4125 amdgpu_virt_release_full_gpu(adev, true);
4126
4127 /* failed in exclusive mode due to timeout */
4128 if (amdgpu_sriov_vf(adev) &&
4129 !amdgpu_sriov_runtime(adev) &&
4130 amdgpu_virt_mmio_blocked(adev) &&
4131 !amdgpu_virt_wait_reset(adev)) {
4132 dev_err(adev->dev, "VF exclusive mode timeout\n");
4133 /* Don't send request since VF is inactive. */
4134 adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
4135 adev->virt.ops = NULL;
4136 r = -EAGAIN;
4137 }
4138 amdgpu_release_ras_context(adev);
4139
4140 failed:
4141 amdgpu_vf_error_trans_all(adev);
4142
4143 return r;
4144 }
4145
amdgpu_device_unmap_mmio(struct amdgpu_device * adev)4146 static void amdgpu_device_unmap_mmio(struct amdgpu_device *adev)
4147 {
4148
4149 /* Clear all CPU mappings pointing to this device */
4150 unmap_mapping_range(adev->ddev.anon_inode->i_mapping, 0, 0, 1);
4151
4152 /* Unmap all mapped bars - Doorbell, registers and VRAM */
4153 amdgpu_doorbell_fini(adev);
4154
4155 iounmap(adev->rmmio);
4156 adev->rmmio = NULL;
4157 if (adev->mman.aper_base_kaddr)
4158 iounmap(adev->mman.aper_base_kaddr);
4159 adev->mman.aper_base_kaddr = NULL;
4160
4161 /* Memory manager related */
4162 if (!adev->gmc.xgmi.connected_to_cpu && !adev->gmc.is_app_apu) {
4163 arch_phys_wc_del(adev->gmc.vram_mtrr);
4164 arch_io_free_memtype_wc(adev->gmc.aper_base, adev->gmc.aper_size);
4165 }
4166 }
4167
4168 /**
4169 * amdgpu_device_fini_hw - tear down the driver
4170 *
4171 * @adev: amdgpu_device pointer
4172 *
4173 * Tear down the driver info (all asics).
4174 * Called at driver shutdown.
4175 */
amdgpu_device_fini_hw(struct amdgpu_device * adev)4176 void amdgpu_device_fini_hw(struct amdgpu_device *adev)
4177 {
4178 dev_info(adev->dev, "finishing device.\n");
4179 flush_delayed_work(&adev->delayed_init_work);
4180
4181 if (adev->mman.initialized)
4182 drain_workqueue(adev->mman.bdev.wq);
4183 adev->shutdown = true;
4184
4185 unregister_pm_notifier(&adev->pm_nb);
4186
4187 /* make sure IB test finished before entering exclusive mode
4188 * to avoid preemption on IB test
4189 */
4190 if (amdgpu_sriov_vf(adev)) {
4191 amdgpu_virt_request_full_gpu(adev, false);
4192 amdgpu_virt_fini_data_exchange(adev);
4193 }
4194
4195 amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
4196 amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
4197
4198 /* disable all interrupts */
4199 amdgpu_irq_disable_all(adev);
4200 if (adev->mode_info.mode_config_initialized) {
4201 if (!drm_drv_uses_atomic_modeset(adev_to_drm(adev)))
4202 drm_helper_force_disable_all(adev_to_drm(adev));
4203 else
4204 drm_atomic_helper_shutdown(adev_to_drm(adev));
4205 }
4206 amdgpu_fence_driver_hw_fini(adev);
4207
4208 amdgpu_device_sys_interface_fini(adev);
4209
4210 /* disable ras feature must before hw fini */
4211 amdgpu_ras_pre_fini(adev);
4212
4213 amdgpu_ttm_set_buffer_funcs_status(adev, false);
4214
4215 /*
4216 * device went through surprise hotplug; we need to destroy topology
4217 * before ip_fini_early to prevent kfd locking refcount issues by calling
4218 * amdgpu_amdkfd_suspend()
4219 */
4220 if (pci_dev_is_disconnected(adev->pdev))
4221 amdgpu_amdkfd_device_fini_sw(adev);
4222
4223 amdgpu_coredump_fini(adev);
4224 amdgpu_device_ip_fini_early(adev);
4225
4226 amdgpu_irq_fini_hw(adev);
4227
4228 if (adev->mman.initialized)
4229 ttm_device_clear_dma_mappings(&adev->mman.bdev);
4230
4231 amdgpu_gart_dummy_page_fini(adev);
4232
4233 if (pci_dev_is_disconnected(adev->pdev))
4234 amdgpu_device_unmap_mmio(adev);
4235
4236 }
4237
amdgpu_device_fini_sw(struct amdgpu_device * adev)4238 void amdgpu_device_fini_sw(struct amdgpu_device *adev)
4239 {
4240 int i, idx;
4241 bool px;
4242
4243 amdgpu_device_ip_fini(adev);
4244 amdgpu_fence_driver_sw_fini(adev);
4245 amdgpu_ucode_release(&adev->firmware.gpu_info_fw);
4246 adev->accel_working = false;
4247 dma_fence_put(rcu_dereference_protected(adev->gang_submit, true));
4248 for (i = 0; i < MAX_XCP; ++i) {
4249 dma_fence_put(adev->isolation[i].spearhead);
4250 amdgpu_sync_free(&adev->isolation[i].active);
4251 amdgpu_sync_free(&adev->isolation[i].prev);
4252 }
4253
4254 amdgpu_reset_fini(adev);
4255
4256 /* free i2c buses */
4257 amdgpu_i2c_fini(adev);
4258
4259 if (adev->bios) {
4260 if (amdgpu_emu_mode != 1)
4261 amdgpu_atombios_fini(adev);
4262 amdgpu_bios_release(adev);
4263 }
4264
4265 kfree(adev->fru_info);
4266 adev->fru_info = NULL;
4267
4268 kfree(adev->xcp_mgr);
4269 adev->xcp_mgr = NULL;
4270
4271 px = amdgpu_device_supports_px(adev);
4272
4273 if (px || (!dev_is_removable(&adev->pdev->dev) &&
4274 apple_gmux_detect(NULL, NULL)))
4275 vga_switcheroo_unregister_client(adev->pdev);
4276
4277 if (px)
4278 vga_switcheroo_fini_domain_pm_ops(adev->dev);
4279
4280 if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
4281 vga_client_unregister(adev->pdev);
4282
4283 if (drm_dev_enter(adev_to_drm(adev), &idx)) {
4284
4285 iounmap(adev->rmmio);
4286 adev->rmmio = NULL;
4287 drm_dev_exit(idx);
4288 }
4289
4290 if (IS_ENABLED(CONFIG_PERF_EVENTS))
4291 amdgpu_pmu_fini(adev);
4292 if (adev->discovery.bin)
4293 amdgpu_discovery_fini(adev);
4294
4295 amdgpu_reset_put_reset_domain(adev->reset_domain);
4296 adev->reset_domain = NULL;
4297
4298 kfree(adev->pci_state);
4299 kfree(adev->pcie_reset_ctx.swds_pcistate);
4300 kfree(adev->pcie_reset_ctx.swus_pcistate);
4301 }
4302
4303 /**
4304 * amdgpu_device_evict_resources - evict device resources
4305 * @adev: amdgpu device object
4306 *
4307 * Evicts all ttm device resources(vram BOs, gart table) from the lru list
4308 * of the vram memory type. Mainly used for evicting device resources
4309 * at suspend time.
4310 *
4311 */
amdgpu_device_evict_resources(struct amdgpu_device * adev)4312 static int amdgpu_device_evict_resources(struct amdgpu_device *adev)
4313 {
4314 int ret;
4315
4316 /* No need to evict vram on APUs unless going to S4 */
4317 if (!adev->in_s4 && (adev->flags & AMD_IS_APU))
4318 return 0;
4319
4320 /* No need to evict when going to S5 through S4 callbacks */
4321 if (system_state == SYSTEM_POWER_OFF)
4322 return 0;
4323
4324 ret = amdgpu_ttm_evict_resources(adev, TTM_PL_VRAM);
4325 if (ret) {
4326 dev_warn(adev->dev, "evicting device resources failed\n");
4327 return ret;
4328 }
4329
4330 if (adev->in_s4) {
4331 ret = ttm_device_prepare_hibernation(&adev->mman.bdev);
4332 if (ret)
4333 dev_err(adev->dev, "prepare hibernation failed, %d\n", ret);
4334 }
4335 return ret;
4336 }
4337
4338 /*
4339 * Suspend & resume.
4340 */
4341 /**
4342 * amdgpu_device_pm_notifier - Notification block for Suspend/Hibernate events
4343 * @nb: notifier block
4344 * @mode: suspend mode
4345 * @data: data
4346 *
4347 * This function is called when the system is about to suspend or hibernate.
4348 * It is used to set the appropriate flags so that eviction can be optimized
4349 * in the pm prepare callback.
4350 */
amdgpu_device_pm_notifier(struct notifier_block * nb,unsigned long mode,void * data)4351 static int amdgpu_device_pm_notifier(struct notifier_block *nb, unsigned long mode,
4352 void *data)
4353 {
4354 struct amdgpu_device *adev = container_of(nb, struct amdgpu_device, pm_nb);
4355
4356 switch (mode) {
4357 case PM_HIBERNATION_PREPARE:
4358 adev->in_s4 = true;
4359 break;
4360 case PM_POST_HIBERNATION:
4361 adev->in_s4 = false;
4362 break;
4363 }
4364
4365 return NOTIFY_DONE;
4366 }
4367
4368 /**
4369 * amdgpu_device_prepare - prepare for device suspend
4370 *
4371 * @dev: drm dev pointer
4372 *
4373 * Prepare to put the hw in the suspend state (all asics).
4374 * Returns 0 for success or an error on failure.
4375 * Called at driver suspend.
4376 */
amdgpu_device_prepare(struct drm_device * dev)4377 int amdgpu_device_prepare(struct drm_device *dev)
4378 {
4379 struct amdgpu_device *adev = drm_to_adev(dev);
4380 int i, r;
4381
4382 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
4383 return 0;
4384
4385 /* Evict the majority of BOs before starting suspend sequence */
4386 r = amdgpu_device_evict_resources(adev);
4387 if (r)
4388 return r;
4389
4390 flush_delayed_work(&adev->gfx.gfx_off_delay_work);
4391
4392 for (i = 0; i < adev->num_ip_blocks; i++) {
4393 if (!adev->ip_blocks[i].status.valid)
4394 continue;
4395 if (!adev->ip_blocks[i].version->funcs->prepare_suspend)
4396 continue;
4397 r = adev->ip_blocks[i].version->funcs->prepare_suspend(&adev->ip_blocks[i]);
4398 if (r)
4399 return r;
4400 }
4401
4402 return 0;
4403 }
4404
4405 /**
4406 * amdgpu_device_complete - complete power state transition
4407 *
4408 * @dev: drm dev pointer
4409 *
4410 * Undo the changes from amdgpu_device_prepare. This will be
4411 * called on all resume transitions, including those that failed.
4412 */
amdgpu_device_complete(struct drm_device * dev)4413 void amdgpu_device_complete(struct drm_device *dev)
4414 {
4415 struct amdgpu_device *adev = drm_to_adev(dev);
4416 int i;
4417
4418 for (i = 0; i < adev->num_ip_blocks; i++) {
4419 if (!adev->ip_blocks[i].status.valid)
4420 continue;
4421 if (!adev->ip_blocks[i].version->funcs->complete)
4422 continue;
4423 adev->ip_blocks[i].version->funcs->complete(&adev->ip_blocks[i]);
4424 }
4425 }
4426
4427 /**
4428 * amdgpu_device_suspend - initiate device suspend
4429 *
4430 * @dev: drm dev pointer
4431 * @notify_clients: notify in-kernel DRM clients
4432 *
4433 * Puts the hw in the suspend state (all asics).
4434 * Returns 0 for success or an error on failure.
4435 * Called at driver suspend.
4436 */
amdgpu_device_suspend(struct drm_device * dev,bool notify_clients)4437 int amdgpu_device_suspend(struct drm_device *dev, bool notify_clients)
4438 {
4439 struct amdgpu_device *adev = drm_to_adev(dev);
4440 int r, rec;
4441
4442 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
4443 return 0;
4444
4445 adev->in_suspend = true;
4446
4447 if (amdgpu_sriov_vf(adev)) {
4448 if (!adev->in_runpm)
4449 amdgpu_amdkfd_suspend_process(adev);
4450 amdgpu_virt_fini_data_exchange(adev);
4451 r = amdgpu_virt_request_full_gpu(adev, false);
4452 if (r)
4453 return r;
4454 }
4455
4456 r = amdgpu_acpi_smart_shift_update(adev, AMDGPU_SS_DEV_D3);
4457 if (r)
4458 goto unwind_sriov;
4459
4460 if (notify_clients)
4461 drm_client_dev_suspend(adev_to_drm(adev));
4462
4463 cancel_delayed_work_sync(&adev->delayed_init_work);
4464
4465 amdgpu_ras_suspend(adev);
4466
4467 r = amdgpu_device_ip_suspend_phase1(adev);
4468 if (r)
4469 goto unwind_smartshift;
4470
4471 amdgpu_amdkfd_suspend(adev, !amdgpu_sriov_vf(adev) && !adev->in_runpm);
4472 r = amdgpu_userq_suspend(adev);
4473 if (r)
4474 goto unwind_ip_phase1;
4475
4476 r = amdgpu_device_evict_resources(adev);
4477 if (r)
4478 goto unwind_userq;
4479
4480 amdgpu_ttm_set_buffer_funcs_status(adev, false);
4481
4482 amdgpu_fence_driver_hw_fini(adev);
4483
4484 r = amdgpu_device_ip_suspend_phase2(adev);
4485 if (r)
4486 goto unwind_evict;
4487
4488 if (amdgpu_sriov_vf(adev))
4489 amdgpu_virt_release_full_gpu(adev, false);
4490
4491 return 0;
4492
4493 unwind_evict:
4494 amdgpu_ttm_set_buffer_funcs_status(adev, true);
4495 amdgpu_fence_driver_hw_init(adev);
4496
4497 unwind_userq:
4498 rec = amdgpu_userq_resume(adev);
4499 if (rec) {
4500 dev_warn(adev->dev, "failed to re-initialize user queues: %d\n", rec);
4501 return r;
4502 }
4503 rec = amdgpu_amdkfd_resume(adev, !amdgpu_sriov_vf(adev) && !adev->in_runpm);
4504 if (rec) {
4505 dev_warn(adev->dev, "failed to re-initialize kfd: %d\n", rec);
4506 return r;
4507 }
4508
4509 unwind_ip_phase1:
4510 /* suspend phase 1 = resume phase 3 */
4511 rec = amdgpu_device_ip_resume_phase3(adev);
4512 if (rec) {
4513 dev_warn(adev->dev, "failed to re-initialize IPs phase1: %d\n", rec);
4514 return r;
4515 }
4516
4517 unwind_smartshift:
4518 rec = amdgpu_acpi_smart_shift_update(adev, AMDGPU_SS_DEV_D0);
4519 if (rec) {
4520 dev_warn(adev->dev, "failed to re-update smart shift: %d\n", rec);
4521 return r;
4522 }
4523
4524 if (notify_clients)
4525 drm_client_dev_resume(adev_to_drm(adev));
4526
4527 amdgpu_ras_resume(adev);
4528
4529 unwind_sriov:
4530 if (amdgpu_sriov_vf(adev)) {
4531 rec = amdgpu_virt_request_full_gpu(adev, true);
4532 if (rec) {
4533 dev_warn(adev->dev, "failed to reinitialize sriov: %d\n", rec);
4534 return r;
4535 }
4536 }
4537
4538 adev->in_suspend = adev->in_s0ix = adev->in_s3 = false;
4539
4540 return r;
4541 }
4542
amdgpu_virt_resume(struct amdgpu_device * adev)4543 static inline int amdgpu_virt_resume(struct amdgpu_device *adev)
4544 {
4545 int r;
4546 unsigned int prev_physical_node_id = adev->gmc.xgmi.physical_node_id;
4547
4548 /* During VM resume, QEMU programming of VF MSIX table (register GFXMSIX_VECT0_ADDR_LO)
4549 * may not work. The access could be blocked by nBIF protection as VF isn't in
4550 * exclusive access mode. Exclusive access is enabled now, disable/enable MSIX
4551 * so that QEMU reprograms MSIX table.
4552 */
4553 amdgpu_restore_msix(adev);
4554
4555 r = adev->gfxhub.funcs->get_xgmi_info(adev);
4556 if (r)
4557 return r;
4558
4559 dev_info(adev->dev, "xgmi node, old id %d, new id %d\n",
4560 prev_physical_node_id, adev->gmc.xgmi.physical_node_id);
4561
4562 adev->vm_manager.vram_base_offset = adev->gfxhub.funcs->get_mc_fb_offset(adev);
4563 adev->vm_manager.vram_base_offset +=
4564 adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
4565
4566 return 0;
4567 }
4568
4569 /**
4570 * amdgpu_device_resume - initiate device resume
4571 *
4572 * @dev: drm dev pointer
4573 * @notify_clients: notify in-kernel DRM clients
4574 *
4575 * Bring the hw back to operating state (all asics).
4576 * Returns 0 for success or an error on failure.
4577 * Called at driver resume.
4578 */
amdgpu_device_resume(struct drm_device * dev,bool notify_clients)4579 int amdgpu_device_resume(struct drm_device *dev, bool notify_clients)
4580 {
4581 struct amdgpu_device *adev = drm_to_adev(dev);
4582 int r = 0;
4583
4584 if (amdgpu_sriov_vf(adev)) {
4585 r = amdgpu_virt_request_full_gpu(adev, true);
4586 if (r)
4587 return r;
4588 }
4589
4590 if (amdgpu_virt_xgmi_migrate_enabled(adev)) {
4591 r = amdgpu_virt_resume(adev);
4592 if (r)
4593 goto exit;
4594 }
4595
4596 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
4597 return 0;
4598
4599 if (adev->in_s0ix)
4600 amdgpu_dpm_gfx_state_change(adev, sGpuChangeState_D0Entry);
4601
4602 /* post card */
4603 if (amdgpu_device_need_post(adev)) {
4604 r = amdgpu_device_asic_init(adev);
4605 if (r)
4606 dev_err(adev->dev, "amdgpu asic init failed\n");
4607 }
4608
4609 r = amdgpu_device_ip_resume(adev);
4610
4611 if (r) {
4612 dev_err(adev->dev, "amdgpu_device_ip_resume failed (%d).\n", r);
4613 goto exit;
4614 }
4615
4616 r = amdgpu_amdkfd_resume(adev, !amdgpu_sriov_vf(adev) && !adev->in_runpm);
4617 if (r)
4618 goto exit;
4619
4620 r = amdgpu_userq_resume(adev);
4621 if (r)
4622 goto exit;
4623
4624 r = amdgpu_device_ip_late_init(adev);
4625 if (r)
4626 goto exit;
4627
4628 queue_delayed_work(system_dfl_wq, &adev->delayed_init_work,
4629 msecs_to_jiffies(AMDGPU_RESUME_MS));
4630 exit:
4631 if (amdgpu_sriov_vf(adev)) {
4632 amdgpu_virt_init_data_exchange(adev);
4633 amdgpu_virt_release_full_gpu(adev, true);
4634
4635 if (!r && !adev->in_runpm)
4636 r = amdgpu_amdkfd_resume_process(adev);
4637 }
4638
4639 if (r)
4640 return r;
4641
4642 /* Make sure IB tests flushed */
4643 flush_delayed_work(&adev->delayed_init_work);
4644
4645 if (notify_clients)
4646 drm_client_dev_resume(adev_to_drm(adev));
4647
4648 amdgpu_ras_resume(adev);
4649
4650 if (adev->mode_info.num_crtc) {
4651 /*
4652 * Most of the connector probing functions try to acquire runtime pm
4653 * refs to ensure that the GPU is powered on when connector polling is
4654 * performed. Since we're calling this from a runtime PM callback,
4655 * trying to acquire rpm refs will cause us to deadlock.
4656 *
4657 * Since we're guaranteed to be holding the rpm lock, it's safe to
4658 * temporarily disable the rpm helpers so this doesn't deadlock us.
4659 */
4660 #ifdef CONFIG_PM
4661 dev->dev->power.disable_depth++;
4662 #endif
4663 if (!adev->dc_enabled)
4664 drm_helper_hpd_irq_event(dev);
4665 else
4666 drm_kms_helper_hotplug_event(dev);
4667 #ifdef CONFIG_PM
4668 dev->dev->power.disable_depth--;
4669 #endif
4670 }
4671
4672 amdgpu_vram_mgr_clear_reset_blocks(adev);
4673 adev->in_suspend = false;
4674
4675 if (amdgpu_acpi_smart_shift_update(adev, AMDGPU_SS_DEV_D0))
4676 dev_warn(adev->dev, "smart shift update failed\n");
4677
4678 return 0;
4679 }
4680
4681 /**
4682 * amdgpu_device_ip_check_soft_reset - did soft reset succeed
4683 *
4684 * @adev: amdgpu_device pointer
4685 *
4686 * The list of all the hardware IPs that make up the asic is walked and
4687 * the check_soft_reset callbacks are run. check_soft_reset determines
4688 * if the asic is still hung or not.
4689 * Returns true if any of the IPs are still in a hung state, false if not.
4690 */
amdgpu_device_ip_check_soft_reset(struct amdgpu_device * adev)4691 static bool amdgpu_device_ip_check_soft_reset(struct amdgpu_device *adev)
4692 {
4693 int i;
4694 bool asic_hang = false;
4695
4696 if (amdgpu_sriov_vf(adev))
4697 return true;
4698
4699 if (amdgpu_asic_need_full_reset(adev))
4700 return true;
4701
4702 for (i = 0; i < adev->num_ip_blocks; i++) {
4703 if (!adev->ip_blocks[i].status.valid)
4704 continue;
4705 if (adev->ip_blocks[i].version->funcs->check_soft_reset)
4706 adev->ip_blocks[i].status.hang =
4707 adev->ip_blocks[i].version->funcs->check_soft_reset(
4708 &adev->ip_blocks[i]);
4709 if (adev->ip_blocks[i].status.hang) {
4710 dev_info(adev->dev, "IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name);
4711 asic_hang = true;
4712 }
4713 }
4714 return asic_hang;
4715 }
4716
4717 /**
4718 * amdgpu_device_ip_pre_soft_reset - prepare for soft reset
4719 *
4720 * @adev: amdgpu_device pointer
4721 *
4722 * The list of all the hardware IPs that make up the asic is walked and the
4723 * pre_soft_reset callbacks are run if the block is hung. pre_soft_reset
4724 * handles any IP specific hardware or software state changes that are
4725 * necessary for a soft reset to succeed.
4726 * Returns 0 on success, negative error code on failure.
4727 */
amdgpu_device_ip_pre_soft_reset(struct amdgpu_device * adev)4728 static int amdgpu_device_ip_pre_soft_reset(struct amdgpu_device *adev)
4729 {
4730 int i, r = 0;
4731
4732 for (i = 0; i < adev->num_ip_blocks; i++) {
4733 if (!adev->ip_blocks[i].status.valid)
4734 continue;
4735 if (adev->ip_blocks[i].status.hang &&
4736 adev->ip_blocks[i].version->funcs->pre_soft_reset) {
4737 r = adev->ip_blocks[i].version->funcs->pre_soft_reset(&adev->ip_blocks[i]);
4738 if (r)
4739 return r;
4740 }
4741 }
4742
4743 return 0;
4744 }
4745
4746 /**
4747 * amdgpu_device_ip_need_full_reset - check if a full asic reset is needed
4748 *
4749 * @adev: amdgpu_device pointer
4750 *
4751 * Some hardware IPs cannot be soft reset. If they are hung, a full gpu
4752 * reset is necessary to recover.
4753 * Returns true if a full asic reset is required, false if not.
4754 */
amdgpu_device_ip_need_full_reset(struct amdgpu_device * adev)4755 static bool amdgpu_device_ip_need_full_reset(struct amdgpu_device *adev)
4756 {
4757 int i;
4758
4759 if (amdgpu_asic_need_full_reset(adev))
4760 return true;
4761
4762 for (i = 0; i < adev->num_ip_blocks; i++) {
4763 if (!adev->ip_blocks[i].status.valid)
4764 continue;
4765 if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) ||
4766 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) ||
4767 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_ACP) ||
4768 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) ||
4769 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
4770 if (adev->ip_blocks[i].status.hang) {
4771 dev_info(adev->dev, "Some block need full reset!\n");
4772 return true;
4773 }
4774 }
4775 }
4776 return false;
4777 }
4778
4779 /**
4780 * amdgpu_device_ip_soft_reset - do a soft reset
4781 *
4782 * @adev: amdgpu_device pointer
4783 *
4784 * The list of all the hardware IPs that make up the asic is walked and the
4785 * soft_reset callbacks are run if the block is hung. soft_reset handles any
4786 * IP specific hardware or software state changes that are necessary to soft
4787 * reset the IP.
4788 * Returns 0 on success, negative error code on failure.
4789 */
amdgpu_device_ip_soft_reset(struct amdgpu_device * adev)4790 static int amdgpu_device_ip_soft_reset(struct amdgpu_device *adev)
4791 {
4792 int i, r = 0;
4793
4794 for (i = 0; i < adev->num_ip_blocks; i++) {
4795 if (!adev->ip_blocks[i].status.valid)
4796 continue;
4797 if (adev->ip_blocks[i].status.hang &&
4798 adev->ip_blocks[i].version->funcs->soft_reset) {
4799 r = adev->ip_blocks[i].version->funcs->soft_reset(&adev->ip_blocks[i]);
4800 if (r)
4801 return r;
4802 }
4803 }
4804
4805 return 0;
4806 }
4807
4808 /**
4809 * amdgpu_device_ip_post_soft_reset - clean up from soft reset
4810 *
4811 * @adev: amdgpu_device pointer
4812 *
4813 * The list of all the hardware IPs that make up the asic is walked and the
4814 * post_soft_reset callbacks are run if the asic was hung. post_soft_reset
4815 * handles any IP specific hardware or software state changes that are
4816 * necessary after the IP has been soft reset.
4817 * Returns 0 on success, negative error code on failure.
4818 */
amdgpu_device_ip_post_soft_reset(struct amdgpu_device * adev)4819 static int amdgpu_device_ip_post_soft_reset(struct amdgpu_device *adev)
4820 {
4821 int i, r = 0;
4822
4823 for (i = 0; i < adev->num_ip_blocks; i++) {
4824 if (!adev->ip_blocks[i].status.valid)
4825 continue;
4826 if (adev->ip_blocks[i].status.hang &&
4827 adev->ip_blocks[i].version->funcs->post_soft_reset)
4828 r = adev->ip_blocks[i].version->funcs->post_soft_reset(&adev->ip_blocks[i]);
4829 if (r)
4830 return r;
4831 }
4832
4833 return 0;
4834 }
4835
4836 /**
4837 * amdgpu_device_reset_sriov - reset ASIC for SR-IOV vf
4838 *
4839 * @adev: amdgpu_device pointer
4840 * @reset_context: amdgpu reset context pointer
4841 *
4842 * do VF FLR and reinitialize Asic
4843 * return 0 means succeeded otherwise failed
4844 */
amdgpu_device_reset_sriov(struct amdgpu_device * adev,struct amdgpu_reset_context * reset_context)4845 static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
4846 struct amdgpu_reset_context *reset_context)
4847 {
4848 int r;
4849 struct amdgpu_hive_info *hive = NULL;
4850
4851 if (test_bit(AMDGPU_HOST_FLR, &reset_context->flags)) {
4852 if (!amdgpu_ras_get_fed_status(adev))
4853 amdgpu_virt_ready_to_reset(adev);
4854 amdgpu_virt_wait_reset(adev);
4855 clear_bit(AMDGPU_HOST_FLR, &reset_context->flags);
4856 r = amdgpu_virt_request_full_gpu(adev, true);
4857 } else {
4858 r = amdgpu_virt_reset_gpu(adev);
4859 }
4860 if (r)
4861 return r;
4862
4863 amdgpu_ras_clear_err_state(adev);
4864 amdgpu_irq_gpu_reset_resume_helper(adev);
4865
4866 /* some sw clean up VF needs to do before recover */
4867 amdgpu_virt_post_reset(adev);
4868
4869 /* Resume IP prior to SMC */
4870 r = amdgpu_device_ip_reinit_early_sriov(adev);
4871 if (r)
4872 return r;
4873
4874 amdgpu_virt_init_data_exchange(adev);
4875
4876 r = amdgpu_device_fw_loading(adev);
4877 if (r)
4878 return r;
4879
4880 /* now we are okay to resume SMC/CP/SDMA */
4881 r = amdgpu_device_ip_reinit_late_sriov(adev);
4882 if (r)
4883 return r;
4884
4885 hive = amdgpu_get_xgmi_hive(adev);
4886 /* Update PSP FW topology after reset */
4887 if (hive && adev->gmc.xgmi.num_physical_nodes > 1)
4888 r = amdgpu_xgmi_update_topology(hive, adev);
4889 if (hive)
4890 amdgpu_put_xgmi_hive(hive);
4891 if (r)
4892 return r;
4893
4894 r = amdgpu_ib_ring_tests(adev);
4895 if (r)
4896 return r;
4897
4898 if (adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST)
4899 amdgpu_inc_vram_lost(adev);
4900
4901 /* need to be called during full access so we can't do it later like
4902 * bare-metal does.
4903 */
4904 amdgpu_amdkfd_post_reset(adev);
4905 amdgpu_virt_release_full_gpu(adev, true);
4906
4907 /* Aldebaran and gfx_11_0_3 support ras in SRIOV, so need resume ras during reset */
4908 if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 2) ||
4909 amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) ||
4910 amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4) ||
4911 amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 5, 0) ||
4912 amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(11, 0, 3))
4913 amdgpu_ras_resume(adev);
4914
4915 amdgpu_virt_ras_telemetry_post_reset(adev);
4916
4917 return 0;
4918 }
4919
4920 /**
4921 * amdgpu_device_has_job_running - check if there is any unfinished job
4922 *
4923 * @adev: amdgpu_device pointer
4924 *
4925 * check if there is any job running on the device when guest driver receives
4926 * FLR notification from host driver. If there are still jobs running, then
4927 * the guest driver will not respond the FLR reset. Instead, let the job hit
4928 * the timeout and guest driver then issue the reset request.
4929 */
amdgpu_device_has_job_running(struct amdgpu_device * adev)4930 bool amdgpu_device_has_job_running(struct amdgpu_device *adev)
4931 {
4932 int i;
4933
4934 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
4935 struct amdgpu_ring *ring = adev->rings[i];
4936
4937 if (!amdgpu_ring_sched_ready(ring))
4938 continue;
4939
4940 if (amdgpu_fence_count_emitted(ring))
4941 return true;
4942 }
4943 return false;
4944 }
4945
4946 /**
4947 * amdgpu_device_should_recover_gpu - check if we should try GPU recovery
4948 *
4949 * @adev: amdgpu_device pointer
4950 *
4951 * Check amdgpu_gpu_recovery and SRIOV status to see if we should try to recover
4952 * a hung GPU.
4953 */
amdgpu_device_should_recover_gpu(struct amdgpu_device * adev)4954 bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev)
4955 {
4956
4957 if (amdgpu_gpu_recovery == 0)
4958 goto disabled;
4959
4960 /* Skip soft reset check in fatal error mode */
4961 if (!amdgpu_ras_is_poison_mode_supported(adev))
4962 return true;
4963
4964 if (amdgpu_sriov_vf(adev))
4965 return true;
4966
4967 if (amdgpu_gpu_recovery == -1) {
4968 switch (adev->asic_type) {
4969 #ifdef CONFIG_DRM_AMDGPU_SI
4970 case CHIP_VERDE:
4971 case CHIP_TAHITI:
4972 case CHIP_PITCAIRN:
4973 case CHIP_OLAND:
4974 case CHIP_HAINAN:
4975 #endif
4976 #ifdef CONFIG_DRM_AMDGPU_CIK
4977 case CHIP_KAVERI:
4978 case CHIP_KABINI:
4979 case CHIP_MULLINS:
4980 #endif
4981 case CHIP_CARRIZO:
4982 case CHIP_STONEY:
4983 case CHIP_CYAN_SKILLFISH:
4984 goto disabled;
4985 default:
4986 break;
4987 }
4988 }
4989
4990 return true;
4991
4992 disabled:
4993 dev_info(adev->dev, "GPU recovery disabled.\n");
4994 return false;
4995 }
4996
amdgpu_device_mode1_reset(struct amdgpu_device * adev)4997 int amdgpu_device_mode1_reset(struct amdgpu_device *adev)
4998 {
4999 u32 i;
5000 int ret = 0;
5001
5002 if (adev->bios)
5003 amdgpu_atombios_scratch_regs_engine_hung(adev, true);
5004
5005 dev_info(adev->dev, "GPU mode1 reset\n");
5006
5007 /* Cache the state before bus master disable. The saved config space
5008 * values are used in other cases like restore after mode-2 reset.
5009 */
5010 amdgpu_device_cache_pci_state(adev->pdev);
5011
5012 /* disable BM */
5013 pci_clear_master(adev->pdev);
5014
5015 if (amdgpu_dpm_is_mode1_reset_supported(adev)) {
5016 dev_info(adev->dev, "GPU smu mode1 reset\n");
5017 ret = amdgpu_dpm_mode1_reset(adev);
5018 } else {
5019 dev_info(adev->dev, "GPU psp mode1 reset\n");
5020 ret = psp_gpu_reset(adev);
5021 }
5022
5023 if (ret)
5024 goto mode1_reset_failed;
5025
5026 /* enable mmio access after mode 1 reset completed */
5027 adev->no_hw_access = false;
5028
5029 /* ensure no_hw_access is updated before we access hw */
5030 smp_mb();
5031
5032 amdgpu_device_load_pci_state(adev->pdev);
5033 ret = amdgpu_psp_wait_for_bootloader(adev);
5034 if (ret)
5035 goto mode1_reset_failed;
5036
5037 /* wait for asic to come out of reset */
5038 for (i = 0; i < adev->usec_timeout; i++) {
5039 u32 memsize = adev->nbio.funcs->get_memsize(adev);
5040
5041 if (memsize != 0xffffffff)
5042 break;
5043 udelay(1);
5044 }
5045
5046 if (i >= adev->usec_timeout) {
5047 ret = -ETIMEDOUT;
5048 goto mode1_reset_failed;
5049 }
5050
5051 if (adev->bios)
5052 amdgpu_atombios_scratch_regs_engine_hung(adev, false);
5053
5054 return 0;
5055
5056 mode1_reset_failed:
5057 dev_err(adev->dev, "GPU mode1 reset failed\n");
5058 return ret;
5059 }
5060
amdgpu_device_link_reset(struct amdgpu_device * adev)5061 int amdgpu_device_link_reset(struct amdgpu_device *adev)
5062 {
5063 int ret = 0;
5064
5065 dev_info(adev->dev, "GPU link reset\n");
5066
5067 if (!amdgpu_reset_in_dpc(adev))
5068 ret = amdgpu_dpm_link_reset(adev);
5069
5070 if (ret)
5071 goto link_reset_failed;
5072
5073 ret = amdgpu_psp_wait_for_bootloader(adev);
5074 if (ret)
5075 goto link_reset_failed;
5076
5077 return 0;
5078
5079 link_reset_failed:
5080 dev_err(adev->dev, "GPU link reset failed\n");
5081 return ret;
5082 }
5083
amdgpu_device_pre_asic_reset(struct amdgpu_device * adev,struct amdgpu_reset_context * reset_context)5084 int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
5085 struct amdgpu_reset_context *reset_context)
5086 {
5087 int i, r = 0;
5088 struct amdgpu_job *job = NULL;
5089 struct amdgpu_device *tmp_adev = reset_context->reset_req_dev;
5090 bool need_full_reset =
5091 test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
5092
5093 if (reset_context->reset_req_dev == adev)
5094 job = reset_context->job;
5095
5096 if (amdgpu_sriov_vf(adev))
5097 amdgpu_virt_pre_reset(adev);
5098
5099 amdgpu_fence_driver_isr_toggle(adev, true);
5100
5101 /* block all schedulers and reset given job's ring */
5102 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5103 struct amdgpu_ring *ring = adev->rings[i];
5104
5105 if (!amdgpu_ring_sched_ready(ring))
5106 continue;
5107
5108 /* after all hw jobs are reset, hw fence is meaningless, so force_completion */
5109 amdgpu_fence_driver_force_completion(ring);
5110 }
5111
5112 amdgpu_fence_driver_isr_toggle(adev, false);
5113
5114 if (job && job->vm)
5115 drm_sched_increase_karma(&job->base);
5116
5117 r = amdgpu_reset_prepare_hwcontext(adev, reset_context);
5118 /* If reset handler not implemented, continue; otherwise return */
5119 if (r == -EOPNOTSUPP)
5120 r = 0;
5121 else
5122 return r;
5123
5124 /* Don't suspend on bare metal if we are not going to HW reset the ASIC */
5125 if (!amdgpu_sriov_vf(adev)) {
5126
5127 if (!need_full_reset)
5128 need_full_reset = amdgpu_device_ip_need_full_reset(adev);
5129
5130 if (!need_full_reset && amdgpu_gpu_recovery &&
5131 amdgpu_device_ip_check_soft_reset(adev)) {
5132 amdgpu_device_ip_pre_soft_reset(adev);
5133 r = amdgpu_device_ip_soft_reset(adev);
5134 amdgpu_device_ip_post_soft_reset(adev);
5135 if (r || amdgpu_device_ip_check_soft_reset(adev)) {
5136 dev_info(adev->dev, "soft reset failed, will fallback to full reset!\n");
5137 need_full_reset = true;
5138 }
5139 }
5140
5141 if (!test_bit(AMDGPU_SKIP_COREDUMP, &reset_context->flags)) {
5142 dev_info(tmp_adev->dev, "Dumping IP State\n");
5143 /* Trigger ip dump before we reset the asic */
5144 for (i = 0; i < tmp_adev->num_ip_blocks; i++)
5145 if (tmp_adev->ip_blocks[i].version->funcs->dump_ip_state)
5146 tmp_adev->ip_blocks[i].version->funcs
5147 ->dump_ip_state((void *)&tmp_adev->ip_blocks[i]);
5148 dev_info(tmp_adev->dev, "Dumping IP State Completed\n");
5149 }
5150
5151 if (need_full_reset)
5152 r = amdgpu_device_ip_suspend(adev);
5153 if (need_full_reset)
5154 set_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
5155 else
5156 clear_bit(AMDGPU_NEED_FULL_RESET,
5157 &reset_context->flags);
5158 }
5159
5160 return r;
5161 }
5162
amdgpu_device_reinit_after_reset(struct amdgpu_reset_context * reset_context)5163 int amdgpu_device_reinit_after_reset(struct amdgpu_reset_context *reset_context)
5164 {
5165 struct list_head *device_list_handle;
5166 bool full_reset, vram_lost = false;
5167 struct amdgpu_device *tmp_adev;
5168 int r, init_level;
5169
5170 device_list_handle = reset_context->reset_device_list;
5171
5172 if (!device_list_handle)
5173 return -EINVAL;
5174
5175 full_reset = test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
5176
5177 /**
5178 * If it's reset on init, it's default init level, otherwise keep level
5179 * as recovery level.
5180 */
5181 if (reset_context->method == AMD_RESET_METHOD_ON_INIT)
5182 init_level = AMDGPU_INIT_LEVEL_DEFAULT;
5183 else
5184 init_level = AMDGPU_INIT_LEVEL_RESET_RECOVERY;
5185
5186 r = 0;
5187 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5188 amdgpu_set_init_level(tmp_adev, init_level);
5189 if (full_reset) {
5190 /* post card */
5191 amdgpu_reset_set_dpc_status(tmp_adev, false);
5192 amdgpu_ras_clear_err_state(tmp_adev);
5193 r = amdgpu_device_asic_init(tmp_adev);
5194 if (r) {
5195 dev_warn(tmp_adev->dev, "asic atom init failed!");
5196 } else {
5197 dev_info(tmp_adev->dev, "GPU reset succeeded, trying to resume\n");
5198
5199 r = amdgpu_device_ip_resume_phase1(tmp_adev);
5200 if (r)
5201 goto out;
5202
5203 vram_lost = amdgpu_device_check_vram_lost(tmp_adev);
5204
5205 if (!test_bit(AMDGPU_SKIP_COREDUMP, &reset_context->flags))
5206 amdgpu_coredump(tmp_adev, false, vram_lost, reset_context->job);
5207
5208 if (vram_lost) {
5209 dev_info(
5210 tmp_adev->dev,
5211 "VRAM is lost due to GPU reset!\n");
5212 amdgpu_inc_vram_lost(tmp_adev);
5213 }
5214
5215 r = amdgpu_device_fw_loading(tmp_adev);
5216 if (r)
5217 return r;
5218
5219 r = amdgpu_xcp_restore_partition_mode(
5220 tmp_adev->xcp_mgr);
5221 if (r)
5222 goto out;
5223
5224 r = amdgpu_device_ip_resume_phase2(tmp_adev);
5225 if (r)
5226 goto out;
5227
5228 amdgpu_ttm_set_buffer_funcs_status(tmp_adev, true);
5229
5230 r = amdgpu_device_ip_resume_phase3(tmp_adev);
5231 if (r)
5232 goto out;
5233
5234 if (vram_lost)
5235 amdgpu_device_fill_reset_magic(tmp_adev);
5236
5237 /*
5238 * Add this ASIC as tracked as reset was already
5239 * complete successfully.
5240 */
5241 amdgpu_register_gpu_instance(tmp_adev);
5242
5243 if (!reset_context->hive &&
5244 tmp_adev->gmc.xgmi.num_physical_nodes > 1)
5245 amdgpu_xgmi_add_device(tmp_adev);
5246
5247 r = amdgpu_device_ip_late_init(tmp_adev);
5248 if (r)
5249 goto out;
5250
5251 r = amdgpu_userq_post_reset(tmp_adev, vram_lost);
5252 if (r)
5253 goto out;
5254
5255 drm_client_dev_resume(adev_to_drm(tmp_adev));
5256
5257 /*
5258 * The GPU enters bad state once faulty pages
5259 * by ECC has reached the threshold, and ras
5260 * recovery is scheduled next. So add one check
5261 * here to break recovery if it indeed exceeds
5262 * bad page threshold, and remind user to
5263 * retire this GPU or setting one bigger
5264 * bad_page_threshold value to fix this once
5265 * probing driver again.
5266 */
5267 if (!amdgpu_ras_is_rma(tmp_adev)) {
5268 /* must succeed. */
5269 amdgpu_ras_resume(tmp_adev);
5270 } else {
5271 r = -EINVAL;
5272 goto out;
5273 }
5274
5275 /* Update PSP FW topology after reset */
5276 if (reset_context->hive &&
5277 tmp_adev->gmc.xgmi.num_physical_nodes > 1)
5278 r = amdgpu_xgmi_update_topology(
5279 reset_context->hive, tmp_adev);
5280 }
5281 }
5282
5283 out:
5284 if (!r) {
5285 /* IP init is complete now, set level as default */
5286 amdgpu_set_init_level(tmp_adev,
5287 AMDGPU_INIT_LEVEL_DEFAULT);
5288 amdgpu_irq_gpu_reset_resume_helper(tmp_adev);
5289 r = amdgpu_ib_ring_tests(tmp_adev);
5290 if (r) {
5291 dev_err(tmp_adev->dev, "ib ring test failed (%d).\n", r);
5292 r = -EAGAIN;
5293 goto end;
5294 }
5295 }
5296
5297 if (r)
5298 tmp_adev->asic_reset_res = r;
5299 }
5300
5301 end:
5302 return r;
5303 }
5304
amdgpu_do_asic_reset(struct list_head * device_list_handle,struct amdgpu_reset_context * reset_context)5305 int amdgpu_do_asic_reset(struct list_head *device_list_handle,
5306 struct amdgpu_reset_context *reset_context)
5307 {
5308 struct amdgpu_device *tmp_adev = NULL;
5309 bool need_full_reset, skip_hw_reset;
5310 int r = 0;
5311
5312 /* Try reset handler method first */
5313 tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
5314 reset_list);
5315
5316 reset_context->reset_device_list = device_list_handle;
5317 r = amdgpu_reset_perform_reset(tmp_adev, reset_context);
5318 /* If reset handler not implemented, continue; otherwise return */
5319 if (r == -EOPNOTSUPP)
5320 r = 0;
5321 else
5322 return r;
5323
5324 /* Reset handler not implemented, use the default method */
5325 need_full_reset =
5326 test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
5327 skip_hw_reset = test_bit(AMDGPU_SKIP_HW_RESET, &reset_context->flags);
5328
5329 /*
5330 * ASIC reset has to be done on all XGMI hive nodes ASAP
5331 * to allow proper links negotiation in FW (within 1 sec)
5332 */
5333 if (!skip_hw_reset && need_full_reset) {
5334 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5335 /* For XGMI run all resets in parallel to speed up the process */
5336 if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
5337 if (!queue_work(system_dfl_wq,
5338 &tmp_adev->xgmi_reset_work))
5339 r = -EALREADY;
5340 } else
5341 r = amdgpu_asic_reset(tmp_adev);
5342
5343 if (r) {
5344 dev_err(tmp_adev->dev,
5345 "ASIC reset failed with error, %d for drm dev, %s",
5346 r, adev_to_drm(tmp_adev)->unique);
5347 goto out;
5348 }
5349 }
5350
5351 /* For XGMI wait for all resets to complete before proceed */
5352 if (!r) {
5353 list_for_each_entry(tmp_adev, device_list_handle,
5354 reset_list) {
5355 if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
5356 flush_work(&tmp_adev->xgmi_reset_work);
5357 r = tmp_adev->asic_reset_res;
5358 if (r)
5359 break;
5360 }
5361 }
5362 }
5363 }
5364
5365 if (!r && amdgpu_ras_intr_triggered()) {
5366 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5367 amdgpu_ras_reset_error_count(tmp_adev,
5368 AMDGPU_RAS_BLOCK__MMHUB);
5369 }
5370
5371 amdgpu_ras_intr_cleared();
5372 }
5373
5374 r = amdgpu_device_reinit_after_reset(reset_context);
5375 if (r == -EAGAIN)
5376 set_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
5377 else
5378 clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
5379
5380 out:
5381 return r;
5382 }
5383
amdgpu_device_set_mp1_state(struct amdgpu_device * adev)5384 static void amdgpu_device_set_mp1_state(struct amdgpu_device *adev)
5385 {
5386
5387 switch (amdgpu_asic_reset_method(adev)) {
5388 case AMD_RESET_METHOD_MODE1:
5389 case AMD_RESET_METHOD_LINK:
5390 adev->mp1_state = PP_MP1_STATE_SHUTDOWN;
5391 break;
5392 case AMD_RESET_METHOD_MODE2:
5393 adev->mp1_state = PP_MP1_STATE_RESET;
5394 break;
5395 default:
5396 adev->mp1_state = PP_MP1_STATE_NONE;
5397 break;
5398 }
5399 }
5400
amdgpu_device_unset_mp1_state(struct amdgpu_device * adev)5401 static void amdgpu_device_unset_mp1_state(struct amdgpu_device *adev)
5402 {
5403 amdgpu_vf_error_trans_all(adev);
5404 adev->mp1_state = PP_MP1_STATE_NONE;
5405 }
5406
amdgpu_device_resume_display_audio(struct amdgpu_device * adev)5407 static void amdgpu_device_resume_display_audio(struct amdgpu_device *adev)
5408 {
5409 struct pci_dev *p = NULL;
5410
5411 p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
5412 adev->pdev->bus->number, 1);
5413 if (p) {
5414 pm_runtime_enable(&(p->dev));
5415 pm_runtime_resume(&(p->dev));
5416 }
5417
5418 pci_dev_put(p);
5419 }
5420
amdgpu_device_suspend_display_audio(struct amdgpu_device * adev)5421 static int amdgpu_device_suspend_display_audio(struct amdgpu_device *adev)
5422 {
5423 enum amd_reset_method reset_method;
5424 struct pci_dev *p = NULL;
5425 u64 expires;
5426
5427 /*
5428 * For now, only BACO and mode1 reset are confirmed
5429 * to suffer the audio issue without proper suspended.
5430 */
5431 reset_method = amdgpu_asic_reset_method(adev);
5432 if ((reset_method != AMD_RESET_METHOD_BACO) &&
5433 (reset_method != AMD_RESET_METHOD_MODE1))
5434 return -EINVAL;
5435
5436 p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
5437 adev->pdev->bus->number, 1);
5438 if (!p)
5439 return -ENODEV;
5440
5441 expires = pm_runtime_autosuspend_expiration(&(p->dev));
5442 if (!expires)
5443 /*
5444 * If we cannot get the audio device autosuspend delay,
5445 * a fixed 4S interval will be used. Considering 3S is
5446 * the audio controller default autosuspend delay setting.
5447 * 4S used here is guaranteed to cover that.
5448 */
5449 expires = ktime_get_mono_fast_ns() + NSEC_PER_SEC * 4ULL;
5450
5451 while (!pm_runtime_status_suspended(&(p->dev))) {
5452 if (!pm_runtime_suspend(&(p->dev)))
5453 break;
5454
5455 if (expires < ktime_get_mono_fast_ns()) {
5456 dev_warn(adev->dev, "failed to suspend display audio\n");
5457 pci_dev_put(p);
5458 /* TODO: abort the succeeding gpu reset? */
5459 return -ETIMEDOUT;
5460 }
5461 }
5462
5463 pm_runtime_disable(&(p->dev));
5464
5465 pci_dev_put(p);
5466 return 0;
5467 }
5468
amdgpu_device_stop_pending_resets(struct amdgpu_device * adev)5469 static inline void amdgpu_device_stop_pending_resets(struct amdgpu_device *adev)
5470 {
5471 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
5472
5473 #if defined(CONFIG_DEBUG_FS)
5474 if (!amdgpu_sriov_vf(adev))
5475 cancel_work(&adev->reset_work);
5476 #endif
5477 cancel_work(&adev->userq_reset_work);
5478
5479 if (adev->kfd.dev)
5480 cancel_work(&adev->kfd.reset_work);
5481
5482 if (amdgpu_sriov_vf(adev))
5483 cancel_work(&adev->virt.flr_work);
5484
5485 if (con && adev->ras_enabled)
5486 cancel_work(&con->recovery_work);
5487
5488 }
5489
amdgpu_device_health_check(struct list_head * device_list_handle)5490 static int amdgpu_device_health_check(struct list_head *device_list_handle)
5491 {
5492 struct amdgpu_device *tmp_adev;
5493 int ret = 0;
5494
5495 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5496 ret |= amdgpu_device_bus_status_check(tmp_adev);
5497 }
5498
5499 return ret;
5500 }
5501
amdgpu_device_recovery_prepare(struct amdgpu_device * adev,struct list_head * device_list,struct amdgpu_hive_info * hive)5502 static void amdgpu_device_recovery_prepare(struct amdgpu_device *adev,
5503 struct list_head *device_list,
5504 struct amdgpu_hive_info *hive)
5505 {
5506 struct amdgpu_device *tmp_adev = NULL;
5507
5508 /*
5509 * Build list of devices to reset.
5510 * In case we are in XGMI hive mode, resort the device list
5511 * to put adev in the 1st position.
5512 */
5513 if (!amdgpu_sriov_vf(adev) && (adev->gmc.xgmi.num_physical_nodes > 1) && hive) {
5514 list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
5515 list_add_tail(&tmp_adev->reset_list, device_list);
5516 if (adev->shutdown)
5517 tmp_adev->shutdown = true;
5518 }
5519 if (!list_is_first(&adev->reset_list, device_list))
5520 list_rotate_to_front(&adev->reset_list, device_list);
5521 } else {
5522 list_add_tail(&adev->reset_list, device_list);
5523 }
5524 }
5525
amdgpu_device_recovery_get_reset_lock(struct amdgpu_device * adev,struct list_head * device_list)5526 static void amdgpu_device_recovery_get_reset_lock(struct amdgpu_device *adev,
5527 struct list_head *device_list)
5528 {
5529 struct amdgpu_device *tmp_adev = NULL;
5530
5531 if (list_empty(device_list))
5532 return;
5533 tmp_adev =
5534 list_first_entry(device_list, struct amdgpu_device, reset_list);
5535 amdgpu_device_lock_reset_domain(tmp_adev->reset_domain);
5536 }
5537
amdgpu_device_recovery_put_reset_lock(struct amdgpu_device * adev,struct list_head * device_list)5538 static void amdgpu_device_recovery_put_reset_lock(struct amdgpu_device *adev,
5539 struct list_head *device_list)
5540 {
5541 struct amdgpu_device *tmp_adev = NULL;
5542
5543 if (list_empty(device_list))
5544 return;
5545 tmp_adev =
5546 list_first_entry(device_list, struct amdgpu_device, reset_list);
5547 amdgpu_device_unlock_reset_domain(tmp_adev->reset_domain);
5548 }
5549
amdgpu_device_halt_activities(struct amdgpu_device * adev,struct amdgpu_job * job,struct amdgpu_reset_context * reset_context,struct list_head * device_list,struct amdgpu_hive_info * hive,bool need_emergency_restart)5550 static void amdgpu_device_halt_activities(struct amdgpu_device *adev,
5551 struct amdgpu_job *job,
5552 struct amdgpu_reset_context *reset_context,
5553 struct list_head *device_list,
5554 struct amdgpu_hive_info *hive,
5555 bool need_emergency_restart)
5556 {
5557 struct amdgpu_device *tmp_adev = NULL;
5558 int i;
5559
5560 /* block all schedulers and reset given job's ring */
5561 list_for_each_entry(tmp_adev, device_list, reset_list) {
5562 amdgpu_device_set_mp1_state(tmp_adev);
5563
5564 /*
5565 * Try to put the audio codec into suspend state
5566 * before gpu reset started.
5567 *
5568 * Due to the power domain of the graphics device
5569 * is shared with AZ power domain. Without this,
5570 * we may change the audio hardware from behind
5571 * the audio driver's back. That will trigger
5572 * some audio codec errors.
5573 */
5574 if (!amdgpu_device_suspend_display_audio(tmp_adev))
5575 tmp_adev->pcie_reset_ctx.audio_suspended = true;
5576
5577 amdgpu_ras_set_error_query_ready(tmp_adev, false);
5578
5579 cancel_delayed_work_sync(&tmp_adev->delayed_init_work);
5580
5581 amdgpu_amdkfd_pre_reset(tmp_adev, reset_context);
5582
5583 /*
5584 * Mark these ASICs to be reset as untracked first
5585 * And add them back after reset completed
5586 */
5587 amdgpu_unregister_gpu_instance(tmp_adev);
5588
5589 drm_client_dev_suspend(adev_to_drm(tmp_adev));
5590
5591 /* disable ras on ALL IPs */
5592 if (!need_emergency_restart && !amdgpu_reset_in_dpc(adev) &&
5593 amdgpu_device_ip_need_full_reset(tmp_adev))
5594 amdgpu_ras_suspend(tmp_adev);
5595
5596 amdgpu_userq_pre_reset(tmp_adev);
5597
5598 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5599 struct amdgpu_ring *ring = tmp_adev->rings[i];
5600
5601 if (!amdgpu_ring_sched_ready(ring))
5602 continue;
5603
5604 drm_sched_wqueue_stop(&ring->sched);
5605
5606 if (need_emergency_restart)
5607 amdgpu_job_stop_all_jobs_on_sched(&ring->sched);
5608 }
5609 atomic_inc(&tmp_adev->gpu_reset_counter);
5610 }
5611 }
5612
amdgpu_device_asic_reset(struct amdgpu_device * adev,struct list_head * device_list,struct amdgpu_reset_context * reset_context)5613 static int amdgpu_device_asic_reset(struct amdgpu_device *adev,
5614 struct list_head *device_list,
5615 struct amdgpu_reset_context *reset_context)
5616 {
5617 struct amdgpu_device *tmp_adev = NULL;
5618 int retry_limit = AMDGPU_MAX_RETRY_LIMIT;
5619 int r = 0;
5620
5621 retry: /* Rest of adevs pre asic reset from XGMI hive. */
5622 list_for_each_entry(tmp_adev, device_list, reset_list) {
5623 r = amdgpu_device_pre_asic_reset(tmp_adev, reset_context);
5624 /*TODO Should we stop ?*/
5625 if (r) {
5626 dev_err(tmp_adev->dev, "GPU pre asic reset failed with err, %d for drm dev, %s ",
5627 r, adev_to_drm(tmp_adev)->unique);
5628 tmp_adev->asic_reset_res = r;
5629 }
5630 }
5631
5632 /* Actual ASIC resets if needed.*/
5633 /* Host driver will handle XGMI hive reset for SRIOV */
5634 if (amdgpu_sriov_vf(adev)) {
5635
5636 /* Bail out of reset early */
5637 if (amdgpu_ras_is_rma(adev))
5638 return -ENODEV;
5639
5640 if (amdgpu_ras_get_fed_status(adev) || amdgpu_virt_rcvd_ras_interrupt(adev)) {
5641 dev_dbg(adev->dev, "Detected RAS error, wait for FLR completion\n");
5642 amdgpu_ras_set_fed(adev, true);
5643 set_bit(AMDGPU_HOST_FLR, &reset_context->flags);
5644 }
5645
5646 r = amdgpu_device_reset_sriov(adev, reset_context);
5647 if (AMDGPU_RETRY_SRIOV_RESET(r) && (retry_limit--) > 0) {
5648 amdgpu_virt_release_full_gpu(adev, true);
5649 goto retry;
5650 }
5651 if (r)
5652 adev->asic_reset_res = r;
5653 } else {
5654 r = amdgpu_do_asic_reset(device_list, reset_context);
5655 if (r && r == -EAGAIN)
5656 goto retry;
5657 }
5658
5659 list_for_each_entry(tmp_adev, device_list, reset_list) {
5660 /*
5661 * Drop any pending non scheduler resets queued before reset is done.
5662 * Any reset scheduled after this point would be valid. Scheduler resets
5663 * were already dropped during drm_sched_stop and no new ones can come
5664 * in before drm_sched_start.
5665 */
5666 amdgpu_device_stop_pending_resets(tmp_adev);
5667 }
5668
5669 return r;
5670 }
5671
amdgpu_device_sched_resume(struct list_head * device_list,struct amdgpu_reset_context * reset_context,bool job_signaled)5672 static int amdgpu_device_sched_resume(struct list_head *device_list,
5673 struct amdgpu_reset_context *reset_context,
5674 bool job_signaled)
5675 {
5676 struct amdgpu_device *tmp_adev = NULL;
5677 int i, r = 0;
5678
5679 /* Post ASIC reset for all devs .*/
5680 list_for_each_entry(tmp_adev, device_list, reset_list) {
5681
5682 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5683 struct amdgpu_ring *ring = tmp_adev->rings[i];
5684
5685 if (!amdgpu_ring_sched_ready(ring))
5686 continue;
5687
5688 drm_sched_wqueue_start(&ring->sched);
5689 }
5690
5691 if (!drm_drv_uses_atomic_modeset(adev_to_drm(tmp_adev)) && !job_signaled)
5692 drm_helper_resume_force_mode(adev_to_drm(tmp_adev));
5693
5694 if (tmp_adev->asic_reset_res) {
5695 /* bad news, how to tell it to userspace ?
5696 * for ras error, we should report GPU bad status instead of
5697 * reset failure
5698 */
5699 if (reset_context->src != AMDGPU_RESET_SRC_RAS ||
5700 !amdgpu_ras_eeprom_check_err_threshold(tmp_adev))
5701 dev_info(
5702 tmp_adev->dev,
5703 "GPU reset(%d) failed with error %d\n",
5704 atomic_read(
5705 &tmp_adev->gpu_reset_counter),
5706 tmp_adev->asic_reset_res);
5707 amdgpu_vf_error_put(tmp_adev,
5708 AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0,
5709 tmp_adev->asic_reset_res);
5710 if (!r)
5711 r = tmp_adev->asic_reset_res;
5712 tmp_adev->asic_reset_res = 0;
5713 } else {
5714 dev_info(tmp_adev->dev, "GPU reset(%d) succeeded!\n",
5715 atomic_read(&tmp_adev->gpu_reset_counter));
5716 if (amdgpu_acpi_smart_shift_update(tmp_adev,
5717 AMDGPU_SS_DEV_D0))
5718 dev_warn(tmp_adev->dev,
5719 "smart shift update failed\n");
5720 }
5721 }
5722
5723 return r;
5724 }
5725
amdgpu_device_gpu_resume(struct amdgpu_device * adev,struct list_head * device_list,bool need_emergency_restart)5726 static void amdgpu_device_gpu_resume(struct amdgpu_device *adev,
5727 struct list_head *device_list,
5728 bool need_emergency_restart)
5729 {
5730 struct amdgpu_device *tmp_adev = NULL;
5731
5732 list_for_each_entry(tmp_adev, device_list, reset_list) {
5733 /* unlock kfd: SRIOV would do it separately */
5734 if (!need_emergency_restart && !amdgpu_sriov_vf(tmp_adev))
5735 amdgpu_amdkfd_post_reset(tmp_adev);
5736
5737 /* kfd_post_reset will do nothing if kfd device is not initialized,
5738 * need to bring up kfd here if it's not be initialized before
5739 */
5740 if (!adev->kfd.init_complete)
5741 amdgpu_amdkfd_device_init(adev);
5742
5743 if (tmp_adev->pcie_reset_ctx.audio_suspended)
5744 amdgpu_device_resume_display_audio(tmp_adev);
5745
5746 amdgpu_device_unset_mp1_state(tmp_adev);
5747
5748 amdgpu_ras_set_error_query_ready(tmp_adev, true);
5749
5750 }
5751 }
5752
5753
5754 /**
5755 * amdgpu_device_gpu_recover - reset the asic and recover scheduler
5756 *
5757 * @adev: amdgpu_device pointer
5758 * @job: which job trigger hang
5759 * @reset_context: amdgpu reset context pointer
5760 *
5761 * Attempt to reset the GPU if it has hung (all asics).
5762 * Attempt to do soft-reset or full-reset and reinitialize Asic
5763 * Returns 0 for success or an error on failure.
5764 */
5765
amdgpu_device_gpu_recover(struct amdgpu_device * adev,struct amdgpu_job * job,struct amdgpu_reset_context * reset_context)5766 int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
5767 struct amdgpu_job *job,
5768 struct amdgpu_reset_context *reset_context)
5769 {
5770 struct list_head device_list;
5771 bool job_signaled = false;
5772 struct amdgpu_hive_info *hive = NULL;
5773 int r = 0;
5774 bool need_emergency_restart = false;
5775 /* save the pasid here as the job may be freed before the end of the reset */
5776 int pasid = job ? job->pasid : -EINVAL;
5777
5778 /*
5779 * If it reaches here because of hang/timeout and a RAS error is
5780 * detected at the same time, let RAS recovery take care of it.
5781 */
5782 if (amdgpu_ras_is_err_state(adev, AMDGPU_RAS_BLOCK__ANY) &&
5783 !amdgpu_sriov_vf(adev) &&
5784 reset_context->src != AMDGPU_RESET_SRC_RAS) {
5785 dev_dbg(adev->dev,
5786 "Gpu recovery from source: %d yielding to RAS error recovery handling",
5787 reset_context->src);
5788 return 0;
5789 }
5790
5791 /*
5792 * Special case: RAS triggered and full reset isn't supported
5793 */
5794 need_emergency_restart = amdgpu_ras_need_emergency_restart(adev);
5795
5796 /*
5797 * Flush RAM to disk so that after reboot
5798 * the user can read log and see why the system rebooted.
5799 */
5800 if (need_emergency_restart && amdgpu_ras_get_context(adev) &&
5801 amdgpu_ras_get_context(adev)->reboot) {
5802 dev_warn(adev->dev, "Emergency reboot.");
5803
5804 ksys_sync_helper();
5805 emergency_restart();
5806 }
5807
5808 dev_info(adev->dev, "GPU %s begin!. Source: %d\n",
5809 need_emergency_restart ? "jobs stop" : "reset",
5810 reset_context->src);
5811
5812 if (!amdgpu_sriov_vf(adev))
5813 hive = amdgpu_get_xgmi_hive(adev);
5814 if (hive)
5815 mutex_lock(&hive->hive_lock);
5816
5817 reset_context->job = job;
5818 reset_context->hive = hive;
5819 INIT_LIST_HEAD(&device_list);
5820
5821 amdgpu_device_recovery_prepare(adev, &device_list, hive);
5822
5823 if (!amdgpu_sriov_vf(adev)) {
5824 r = amdgpu_device_health_check(&device_list);
5825 if (r)
5826 goto end_reset;
5827 }
5828
5829 /* Cannot be called after locking reset domain */
5830 amdgpu_ras_pre_reset(adev, &device_list);
5831
5832 /* We need to lock reset domain only once both for XGMI and single device */
5833 amdgpu_device_recovery_get_reset_lock(adev, &device_list);
5834
5835 amdgpu_device_halt_activities(adev, job, reset_context, &device_list,
5836 hive, need_emergency_restart);
5837 if (need_emergency_restart)
5838 goto skip_sched_resume;
5839 /*
5840 * Must check guilty signal here since after this point all old
5841 * HW fences are force signaled.
5842 *
5843 * job->base holds a reference to parent fence
5844 */
5845 if (job && (dma_fence_get_status(&job->hw_fence->base) > 0)) {
5846 job_signaled = true;
5847 dev_info(adev->dev, "Guilty job already signaled, skipping HW reset");
5848 goto skip_hw_reset;
5849 }
5850
5851 r = amdgpu_device_asic_reset(adev, &device_list, reset_context);
5852 if (r)
5853 goto reset_unlock;
5854 skip_hw_reset:
5855 r = amdgpu_device_sched_resume(&device_list, reset_context, job_signaled);
5856 if (r)
5857 goto reset_unlock;
5858 skip_sched_resume:
5859 amdgpu_device_gpu_resume(adev, &device_list, need_emergency_restart);
5860 reset_unlock:
5861 amdgpu_device_recovery_put_reset_lock(adev, &device_list);
5862 amdgpu_ras_post_reset(adev, &device_list);
5863 end_reset:
5864 if (hive) {
5865 mutex_unlock(&hive->hive_lock);
5866 amdgpu_put_xgmi_hive(hive);
5867 }
5868
5869 if (r)
5870 dev_info(adev->dev, "GPU reset end with ret = %d\n", r);
5871
5872 atomic_set(&adev->reset_domain->reset_res, r);
5873
5874 if (!r) {
5875 struct amdgpu_task_info *ti = NULL;
5876
5877 /*
5878 * The job may already be freed at this point via the sched tdr workqueue so
5879 * use the cached pasid.
5880 */
5881 if (pasid >= 0)
5882 ti = amdgpu_vm_get_task_info_pasid(adev, pasid);
5883
5884 drm_dev_wedged_event(adev_to_drm(adev), DRM_WEDGE_RECOVERY_NONE,
5885 ti ? &ti->task : NULL);
5886
5887 amdgpu_vm_put_task_info(ti);
5888 }
5889
5890 return r;
5891 }
5892
5893 /**
5894 * amdgpu_device_partner_bandwidth - find the bandwidth of appropriate partner
5895 *
5896 * @adev: amdgpu_device pointer
5897 * @speed: pointer to the speed of the link
5898 * @width: pointer to the width of the link
5899 *
5900 * Evaluate the hierarchy to find the speed and bandwidth capabilities of the
5901 * first physical partner to an AMD dGPU.
5902 * This will exclude any virtual switches and links.
5903 */
amdgpu_device_partner_bandwidth(struct amdgpu_device * adev,enum pci_bus_speed * speed,enum pcie_link_width * width)5904 static void amdgpu_device_partner_bandwidth(struct amdgpu_device *adev,
5905 enum pci_bus_speed *speed,
5906 enum pcie_link_width *width)
5907 {
5908 struct pci_dev *parent = adev->pdev;
5909
5910 if (!speed || !width)
5911 return;
5912
5913 *speed = PCI_SPEED_UNKNOWN;
5914 *width = PCIE_LNK_WIDTH_UNKNOWN;
5915
5916 if (amdgpu_device_pcie_dynamic_switching_supported(adev)) {
5917 while ((parent = pci_upstream_bridge(parent))) {
5918 /* skip upstream/downstream switches internal to dGPU*/
5919 if (parent->vendor == PCI_VENDOR_ID_ATI)
5920 continue;
5921 *speed = pcie_get_speed_cap(parent);
5922 *width = pcie_get_width_cap(parent);
5923 break;
5924 }
5925 } else {
5926 /* use the current speeds rather than max if switching is not supported */
5927 pcie_bandwidth_available(adev->pdev, NULL, speed, width);
5928 }
5929 }
5930
5931 /**
5932 * amdgpu_device_gpu_bandwidth - find the bandwidth of the GPU
5933 *
5934 * @adev: amdgpu_device pointer
5935 * @speed: pointer to the speed of the link
5936 * @width: pointer to the width of the link
5937 *
5938 * Evaluate the hierarchy to find the speed and bandwidth capabilities of the
5939 * AMD dGPU which may be a virtual upstream bridge.
5940 */
amdgpu_device_gpu_bandwidth(struct amdgpu_device * adev,enum pci_bus_speed * speed,enum pcie_link_width * width)5941 static void amdgpu_device_gpu_bandwidth(struct amdgpu_device *adev,
5942 enum pci_bus_speed *speed,
5943 enum pcie_link_width *width)
5944 {
5945 struct pci_dev *parent = adev->pdev;
5946
5947 if (!speed || !width)
5948 return;
5949
5950 parent = pci_upstream_bridge(parent);
5951 if (parent && parent->vendor == PCI_VENDOR_ID_ATI) {
5952 /* use the upstream/downstream switches internal to dGPU */
5953 *speed = pcie_get_speed_cap(parent);
5954 *width = pcie_get_width_cap(parent);
5955 while ((parent = pci_upstream_bridge(parent))) {
5956 if (parent->vendor == PCI_VENDOR_ID_ATI) {
5957 /* use the upstream/downstream switches internal to dGPU */
5958 *speed = pcie_get_speed_cap(parent);
5959 *width = pcie_get_width_cap(parent);
5960 }
5961 }
5962 } else {
5963 /* use the device itself */
5964 *speed = pcie_get_speed_cap(adev->pdev);
5965 *width = pcie_get_width_cap(adev->pdev);
5966 }
5967 }
5968
5969 /**
5970 * amdgpu_device_get_pcie_info - fence pcie info about the PCIE slot
5971 *
5972 * @adev: amdgpu_device pointer
5973 *
5974 * Fetches and stores in the driver the PCIE capabilities (gen speed
5975 * and lanes) of the slot the device is in. Handles APUs and
5976 * virtualized environments where PCIE config space may not be available.
5977 */
amdgpu_device_get_pcie_info(struct amdgpu_device * adev)5978 static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev)
5979 {
5980 enum pci_bus_speed speed_cap, platform_speed_cap;
5981 enum pcie_link_width platform_link_width, link_width;
5982
5983 if (amdgpu_pcie_gen_cap)
5984 adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap;
5985
5986 if (amdgpu_pcie_lane_cap)
5987 adev->pm.pcie_mlw_mask = amdgpu_pcie_lane_cap;
5988
5989 /* covers APUs as well */
5990 if (pci_is_root_bus(adev->pdev->bus) && !amdgpu_passthrough(adev)) {
5991 if (adev->pm.pcie_gen_mask == 0)
5992 adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
5993 if (adev->pm.pcie_mlw_mask == 0)
5994 adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
5995 return;
5996 }
5997
5998 if (adev->pm.pcie_gen_mask && adev->pm.pcie_mlw_mask)
5999 return;
6000
6001 amdgpu_device_partner_bandwidth(adev, &platform_speed_cap,
6002 &platform_link_width);
6003 amdgpu_device_gpu_bandwidth(adev, &speed_cap, &link_width);
6004
6005 if (adev->pm.pcie_gen_mask == 0) {
6006 /* asic caps */
6007 if (speed_cap == PCI_SPEED_UNKNOWN) {
6008 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
6009 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
6010 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
6011 } else {
6012 if (speed_cap == PCIE_SPEED_32_0GT)
6013 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
6014 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
6015 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 |
6016 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4 |
6017 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN5);
6018 else if (speed_cap == PCIE_SPEED_16_0GT)
6019 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
6020 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
6021 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 |
6022 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4);
6023 else if (speed_cap == PCIE_SPEED_8_0GT)
6024 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
6025 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
6026 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
6027 else if (speed_cap == PCIE_SPEED_5_0GT)
6028 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
6029 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2);
6030 else
6031 adev->pm.pcie_gen_mask |= CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1;
6032 }
6033 /* platform caps */
6034 if (platform_speed_cap == PCI_SPEED_UNKNOWN) {
6035 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
6036 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
6037 } else {
6038 if (platform_speed_cap == PCIE_SPEED_32_0GT)
6039 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
6040 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
6041 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 |
6042 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4 |
6043 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN5);
6044 else if (platform_speed_cap == PCIE_SPEED_16_0GT)
6045 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
6046 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
6047 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 |
6048 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4);
6049 else if (platform_speed_cap == PCIE_SPEED_8_0GT)
6050 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
6051 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
6052 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3);
6053 else if (platform_speed_cap == PCIE_SPEED_5_0GT)
6054 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
6055 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
6056 else
6057 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1;
6058
6059 }
6060 }
6061 if (adev->pm.pcie_mlw_mask == 0) {
6062 /* asic caps */
6063 if (link_width == PCIE_LNK_WIDTH_UNKNOWN) {
6064 adev->pm.pcie_mlw_mask |= AMDGPU_DEFAULT_ASIC_PCIE_MLW_MASK;
6065 } else {
6066 switch (link_width) {
6067 case PCIE_LNK_X32:
6068 adev->pm.pcie_mlw_mask |= (CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X32 |
6069 CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X16 |
6070 CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X12 |
6071 CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X8 |
6072 CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X4 |
6073 CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X2 |
6074 CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X1);
6075 break;
6076 case PCIE_LNK_X16:
6077 adev->pm.pcie_mlw_mask |= (CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X16 |
6078 CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X12 |
6079 CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X8 |
6080 CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X4 |
6081 CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X2 |
6082 CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X1);
6083 break;
6084 case PCIE_LNK_X12:
6085 adev->pm.pcie_mlw_mask |= (CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X12 |
6086 CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X8 |
6087 CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X4 |
6088 CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X2 |
6089 CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X1);
6090 break;
6091 case PCIE_LNK_X8:
6092 adev->pm.pcie_mlw_mask |= (CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X8 |
6093 CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X4 |
6094 CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X2 |
6095 CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X1);
6096 break;
6097 case PCIE_LNK_X4:
6098 adev->pm.pcie_mlw_mask |= (CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X4 |
6099 CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X2 |
6100 CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X1);
6101 break;
6102 case PCIE_LNK_X2:
6103 adev->pm.pcie_mlw_mask |= (CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X2 |
6104 CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X1);
6105 break;
6106 case PCIE_LNK_X1:
6107 adev->pm.pcie_mlw_mask |= CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X1;
6108 break;
6109 default:
6110 break;
6111 }
6112 }
6113 /* platform caps */
6114 if (platform_link_width == PCIE_LNK_WIDTH_UNKNOWN) {
6115 adev->pm.pcie_mlw_mask |= AMDGPU_DEFAULT_PCIE_MLW_MASK;
6116 } else {
6117 switch (platform_link_width) {
6118 case PCIE_LNK_X32:
6119 adev->pm.pcie_mlw_mask |= (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 |
6120 CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
6121 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
6122 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
6123 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
6124 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
6125 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
6126 break;
6127 case PCIE_LNK_X16:
6128 adev->pm.pcie_mlw_mask |= (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
6129 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
6130 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
6131 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
6132 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
6133 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
6134 break;
6135 case PCIE_LNK_X12:
6136 adev->pm.pcie_mlw_mask |= (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
6137 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
6138 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
6139 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
6140 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
6141 break;
6142 case PCIE_LNK_X8:
6143 adev->pm.pcie_mlw_mask |= (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
6144 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
6145 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
6146 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
6147 break;
6148 case PCIE_LNK_X4:
6149 adev->pm.pcie_mlw_mask |= (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
6150 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
6151 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
6152 break;
6153 case PCIE_LNK_X2:
6154 adev->pm.pcie_mlw_mask |= (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
6155 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
6156 break;
6157 case PCIE_LNK_X1:
6158 adev->pm.pcie_mlw_mask |= CAIL_PCIE_LINK_WIDTH_SUPPORT_X1;
6159 break;
6160 default:
6161 break;
6162 }
6163 }
6164 }
6165 }
6166
6167 /**
6168 * amdgpu_device_is_peer_accessible - Check peer access through PCIe BAR
6169 *
6170 * @adev: amdgpu_device pointer
6171 * @peer_adev: amdgpu_device pointer for peer device trying to access @adev
6172 *
6173 * Return true if @peer_adev can access (DMA) @adev through the PCIe
6174 * BAR, i.e. @adev is "large BAR" and the BAR matches the DMA mask of
6175 * @peer_adev.
6176 */
amdgpu_device_is_peer_accessible(struct amdgpu_device * adev,struct amdgpu_device * peer_adev)6177 bool amdgpu_device_is_peer_accessible(struct amdgpu_device *adev,
6178 struct amdgpu_device *peer_adev)
6179 {
6180 #ifdef CONFIG_HSA_AMD_P2P
6181 bool p2p_access =
6182 !adev->gmc.xgmi.connected_to_cpu &&
6183 !(pci_p2pdma_distance(adev->pdev, peer_adev->dev, false) < 0);
6184 if (!p2p_access)
6185 dev_info(adev->dev, "PCIe P2P access from peer device %s is not supported by the chipset\n",
6186 pci_name(peer_adev->pdev));
6187
6188 bool is_large_bar = adev->gmc.visible_vram_size &&
6189 adev->gmc.real_vram_size == adev->gmc.visible_vram_size;
6190 bool p2p_addressable = amdgpu_device_check_iommu_remap(peer_adev);
6191
6192 if (!p2p_addressable) {
6193 uint64_t address_mask = peer_adev->dev->dma_mask ?
6194 ~*peer_adev->dev->dma_mask : ~((1ULL << 32) - 1);
6195 resource_size_t aper_limit =
6196 adev->gmc.aper_base + adev->gmc.aper_size - 1;
6197
6198 p2p_addressable = !(adev->gmc.aper_base & address_mask ||
6199 aper_limit & address_mask);
6200 }
6201 return pcie_p2p && is_large_bar && p2p_access && p2p_addressable;
6202 #else
6203 return false;
6204 #endif
6205 }
6206
amdgpu_device_baco_enter(struct amdgpu_device * adev)6207 int amdgpu_device_baco_enter(struct amdgpu_device *adev)
6208 {
6209 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
6210
6211 if (!amdgpu_device_supports_baco(adev))
6212 return -ENOTSUPP;
6213
6214 if (ras && adev->ras_enabled &&
6215 adev->nbio.funcs->enable_doorbell_interrupt)
6216 adev->nbio.funcs->enable_doorbell_interrupt(adev, false);
6217
6218 return amdgpu_dpm_baco_enter(adev);
6219 }
6220
amdgpu_device_baco_exit(struct amdgpu_device * adev)6221 int amdgpu_device_baco_exit(struct amdgpu_device *adev)
6222 {
6223 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
6224 int ret = 0;
6225
6226 if (!amdgpu_device_supports_baco(adev))
6227 return -ENOTSUPP;
6228
6229 ret = amdgpu_dpm_baco_exit(adev);
6230 if (ret)
6231 return ret;
6232
6233 if (ras && adev->ras_enabled &&
6234 adev->nbio.funcs->enable_doorbell_interrupt)
6235 adev->nbio.funcs->enable_doorbell_interrupt(adev, true);
6236
6237 if (amdgpu_passthrough(adev) && adev->nbio.funcs &&
6238 adev->nbio.funcs->clear_doorbell_interrupt)
6239 adev->nbio.funcs->clear_doorbell_interrupt(adev);
6240
6241 return 0;
6242 }
6243
6244 /**
6245 * amdgpu_pci_error_detected - Called when a PCI error is detected.
6246 * @pdev: PCI device struct
6247 * @state: PCI channel state
6248 *
6249 * Description: Called when a PCI error is detected.
6250 *
6251 * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT.
6252 */
amdgpu_pci_error_detected(struct pci_dev * pdev,pci_channel_state_t state)6253 pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
6254 {
6255 struct drm_device *dev = pci_get_drvdata(pdev);
6256 struct amdgpu_device *adev = drm_to_adev(dev);
6257 struct amdgpu_hive_info *hive __free(xgmi_put_hive) =
6258 amdgpu_get_xgmi_hive(adev);
6259 struct amdgpu_reset_context reset_context;
6260 struct list_head device_list;
6261
6262 dev_info(adev->dev, "PCI error: detected callback!!\n");
6263
6264 adev->pci_channel_state = state;
6265
6266 switch (state) {
6267 case pci_channel_io_normal:
6268 dev_info(adev->dev, "pci_channel_io_normal: state(%d)!!\n", state);
6269 return PCI_ERS_RESULT_CAN_RECOVER;
6270 case pci_channel_io_frozen:
6271 /* Fatal error, prepare for slot reset */
6272 dev_info(adev->dev, "pci_channel_io_frozen: state(%d)!!\n", state);
6273 if (hive) {
6274 /* Hive devices should be able to support FW based
6275 * link reset on other devices, if not return.
6276 */
6277 if (!amdgpu_dpm_is_link_reset_supported(adev)) {
6278 dev_warn(adev->dev,
6279 "No support for XGMI hive yet...\n");
6280 return PCI_ERS_RESULT_DISCONNECT;
6281 }
6282 /* Set dpc status only if device is part of hive
6283 * Non-hive devices should be able to recover after
6284 * link reset.
6285 */
6286 amdgpu_reset_set_dpc_status(adev, true);
6287
6288 mutex_lock(&hive->hive_lock);
6289 } else {
6290 if (amdgpu_device_bus_status_check(adev))
6291 amdgpu_reset_set_dpc_status(adev, true);
6292 }
6293 memset(&reset_context, 0, sizeof(reset_context));
6294 INIT_LIST_HEAD(&device_list);
6295
6296 amdgpu_device_recovery_prepare(adev, &device_list, hive);
6297 amdgpu_device_recovery_get_reset_lock(adev, &device_list);
6298 amdgpu_device_halt_activities(adev, NULL, &reset_context, &device_list,
6299 hive, false);
6300 if (hive)
6301 mutex_unlock(&hive->hive_lock);
6302 return PCI_ERS_RESULT_NEED_RESET;
6303 case pci_channel_io_perm_failure:
6304 /* Permanent error, prepare for device removal */
6305 dev_info(adev->dev, "pci_channel_io_perm_failure: state(%d)!!\n", state);
6306 return PCI_ERS_RESULT_DISCONNECT;
6307 }
6308
6309 return PCI_ERS_RESULT_NEED_RESET;
6310 }
6311
6312 /**
6313 * amdgpu_pci_mmio_enabled - Enable MMIO and dump debug registers
6314 * @pdev: pointer to PCI device
6315 */
amdgpu_pci_mmio_enabled(struct pci_dev * pdev)6316 pci_ers_result_t amdgpu_pci_mmio_enabled(struct pci_dev *pdev)
6317 {
6318 struct drm_device *dev = pci_get_drvdata(pdev);
6319 struct amdgpu_device *adev = drm_to_adev(dev);
6320
6321 dev_info(adev->dev, "PCI error: mmio enabled callback!!\n");
6322
6323 /* TODO - dump whatever for debugging purposes */
6324
6325 /* This called only if amdgpu_pci_error_detected returns
6326 * PCI_ERS_RESULT_CAN_RECOVER. Read/write to the device still
6327 * works, no need to reset slot.
6328 */
6329
6330 return PCI_ERS_RESULT_RECOVERED;
6331 }
6332
6333 /**
6334 * amdgpu_pci_slot_reset - Called when PCI slot has been reset.
6335 * @pdev: PCI device struct
6336 *
6337 * Description: This routine is called by the pci error recovery
6338 * code after the PCI slot has been reset, just before we
6339 * should resume normal operations.
6340 */
amdgpu_pci_slot_reset(struct pci_dev * pdev)6341 pci_ers_result_t amdgpu_pci_slot_reset(struct pci_dev *pdev)
6342 {
6343 struct drm_device *dev = pci_get_drvdata(pdev);
6344 struct amdgpu_device *adev = drm_to_adev(dev);
6345 struct amdgpu_reset_context reset_context;
6346 struct amdgpu_device *tmp_adev;
6347 struct amdgpu_hive_info *hive;
6348 struct list_head device_list;
6349 struct pci_dev *link_dev;
6350 int r = 0, i, timeout;
6351 u32 memsize;
6352 u16 status;
6353
6354 dev_info(adev->dev, "PCI error: slot reset callback!!\n");
6355
6356 memset(&reset_context, 0, sizeof(reset_context));
6357 INIT_LIST_HEAD(&device_list);
6358 hive = amdgpu_get_xgmi_hive(adev);
6359 if (hive) {
6360 mutex_lock(&hive->hive_lock);
6361 list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head)
6362 list_add_tail(&tmp_adev->reset_list, &device_list);
6363 } else {
6364 list_add_tail(&adev->reset_list, &device_list);
6365 }
6366
6367 if (adev->pcie_reset_ctx.swus)
6368 link_dev = adev->pcie_reset_ctx.swus;
6369 else
6370 link_dev = adev->pdev;
6371 /* wait for asic to come out of reset, timeout = 10s */
6372 timeout = 10000;
6373 do {
6374 usleep_range(10000, 10500);
6375 r = pci_read_config_word(link_dev, PCI_VENDOR_ID, &status);
6376 timeout -= 10;
6377 } while (timeout > 0 && (status != PCI_VENDOR_ID_ATI) &&
6378 (status != PCI_VENDOR_ID_AMD));
6379
6380 if ((status != PCI_VENDOR_ID_ATI) && (status != PCI_VENDOR_ID_AMD)) {
6381 r = -ETIME;
6382 goto out;
6383 }
6384
6385 amdgpu_device_load_switch_state(adev);
6386 /* Restore PCI confspace */
6387 amdgpu_device_load_pci_state(pdev);
6388
6389 /* confirm ASIC came out of reset */
6390 for (i = 0; i < adev->usec_timeout; i++) {
6391 memsize = amdgpu_asic_get_config_memsize(adev);
6392
6393 if (memsize != 0xffffffff)
6394 break;
6395 udelay(1);
6396 }
6397 if (memsize == 0xffffffff) {
6398 r = -ETIME;
6399 goto out;
6400 }
6401
6402 reset_context.method = AMD_RESET_METHOD_NONE;
6403 reset_context.reset_req_dev = adev;
6404 set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
6405 set_bit(AMDGPU_SKIP_COREDUMP, &reset_context.flags);
6406
6407 if (hive) {
6408 reset_context.hive = hive;
6409 list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head)
6410 tmp_adev->pcie_reset_ctx.in_link_reset = true;
6411 } else {
6412 adev->pcie_reset_ctx.in_link_reset = true;
6413 set_bit(AMDGPU_SKIP_HW_RESET, &reset_context.flags);
6414 }
6415
6416 r = amdgpu_device_asic_reset(adev, &device_list, &reset_context);
6417 out:
6418 if (!r) {
6419 if (amdgpu_device_cache_pci_state(adev->pdev))
6420 pci_restore_state(adev->pdev);
6421 dev_info(adev->dev, "PCIe error recovery succeeded\n");
6422 } else {
6423 dev_err(adev->dev, "PCIe error recovery failed, err:%d\n", r);
6424 if (hive) {
6425 list_for_each_entry(tmp_adev, &device_list, reset_list)
6426 amdgpu_device_unset_mp1_state(tmp_adev);
6427 }
6428 amdgpu_device_recovery_put_reset_lock(adev, &device_list);
6429 }
6430
6431 if (hive) {
6432 mutex_unlock(&hive->hive_lock);
6433 amdgpu_put_xgmi_hive(hive);
6434 }
6435
6436 return r ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
6437 }
6438
6439 /**
6440 * amdgpu_pci_resume() - resume normal ops after PCI reset
6441 * @pdev: pointer to PCI device
6442 *
6443 * Called when the error recovery driver tells us that its
6444 * OK to resume normal operation.
6445 */
amdgpu_pci_resume(struct pci_dev * pdev)6446 void amdgpu_pci_resume(struct pci_dev *pdev)
6447 {
6448 struct drm_device *dev = pci_get_drvdata(pdev);
6449 struct amdgpu_device *adev = drm_to_adev(dev);
6450 struct list_head device_list;
6451 struct amdgpu_hive_info *hive = NULL;
6452 struct amdgpu_device *tmp_adev = NULL;
6453
6454 dev_info(adev->dev, "PCI error: resume callback!!\n");
6455
6456 /* Only continue execution for the case of pci_channel_io_frozen */
6457 if (adev->pci_channel_state != pci_channel_io_frozen)
6458 return;
6459
6460 INIT_LIST_HEAD(&device_list);
6461
6462 hive = amdgpu_get_xgmi_hive(adev);
6463 if (hive) {
6464 mutex_lock(&hive->hive_lock);
6465 list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
6466 tmp_adev->pcie_reset_ctx.in_link_reset = false;
6467 list_add_tail(&tmp_adev->reset_list, &device_list);
6468 }
6469 } else {
6470 adev->pcie_reset_ctx.in_link_reset = false;
6471 list_add_tail(&adev->reset_list, &device_list);
6472 }
6473 amdgpu_device_sched_resume(&device_list, NULL, NULL);
6474 amdgpu_device_gpu_resume(adev, &device_list, false);
6475 amdgpu_device_recovery_put_reset_lock(adev, &device_list);
6476
6477 if (hive) {
6478 mutex_unlock(&hive->hive_lock);
6479 amdgpu_put_xgmi_hive(hive);
6480 }
6481 }
6482
amdgpu_device_cache_switch_state(struct amdgpu_device * adev)6483 static void amdgpu_device_cache_switch_state(struct amdgpu_device *adev)
6484 {
6485 struct pci_dev *swus, *swds;
6486 int r;
6487
6488 swds = pci_upstream_bridge(adev->pdev);
6489 if (!swds || swds->vendor != PCI_VENDOR_ID_ATI ||
6490 pci_pcie_type(swds) != PCI_EXP_TYPE_DOWNSTREAM)
6491 return;
6492 swus = pci_upstream_bridge(swds);
6493 if (!swus ||
6494 (swus->vendor != PCI_VENDOR_ID_ATI &&
6495 swus->vendor != PCI_VENDOR_ID_AMD) ||
6496 pci_pcie_type(swus) != PCI_EXP_TYPE_UPSTREAM)
6497 return;
6498
6499 /* If already saved, return */
6500 if (adev->pcie_reset_ctx.swus)
6501 return;
6502 /* Upstream bridge is ATI, assume it's SWUS/DS architecture */
6503 r = pci_save_state(swds);
6504 if (r)
6505 return;
6506 adev->pcie_reset_ctx.swds_pcistate = pci_store_saved_state(swds);
6507
6508 r = pci_save_state(swus);
6509 if (r)
6510 return;
6511 adev->pcie_reset_ctx.swus_pcistate = pci_store_saved_state(swus);
6512
6513 adev->pcie_reset_ctx.swus = swus;
6514 }
6515
amdgpu_device_load_switch_state(struct amdgpu_device * adev)6516 static void amdgpu_device_load_switch_state(struct amdgpu_device *adev)
6517 {
6518 struct pci_dev *pdev;
6519 int r;
6520
6521 if (!adev->pcie_reset_ctx.swds_pcistate ||
6522 !adev->pcie_reset_ctx.swus_pcistate)
6523 return;
6524
6525 pdev = adev->pcie_reset_ctx.swus;
6526 r = pci_load_saved_state(pdev, adev->pcie_reset_ctx.swus_pcistate);
6527 if (!r) {
6528 pci_restore_state(pdev);
6529 } else {
6530 dev_warn(adev->dev, "Failed to load SWUS state, err:%d\n", r);
6531 return;
6532 }
6533
6534 pdev = pci_upstream_bridge(adev->pdev);
6535 r = pci_load_saved_state(pdev, adev->pcie_reset_ctx.swds_pcistate);
6536 if (!r)
6537 pci_restore_state(pdev);
6538 else
6539 dev_warn(adev->dev, "Failed to load SWDS state, err:%d\n", r);
6540 }
6541
amdgpu_device_cache_pci_state(struct pci_dev * pdev)6542 bool amdgpu_device_cache_pci_state(struct pci_dev *pdev)
6543 {
6544 struct drm_device *dev = pci_get_drvdata(pdev);
6545 struct amdgpu_device *adev = drm_to_adev(dev);
6546 int r;
6547
6548 if (amdgpu_sriov_vf(adev))
6549 return false;
6550
6551 r = pci_save_state(pdev);
6552 if (!r) {
6553 kfree(adev->pci_state);
6554
6555 adev->pci_state = pci_store_saved_state(pdev);
6556
6557 if (!adev->pci_state) {
6558 dev_err(adev->dev, "Failed to store PCI saved state");
6559 return false;
6560 }
6561 } else {
6562 dev_warn(adev->dev, "Failed to save PCI state, err:%d\n", r);
6563 return false;
6564 }
6565
6566 amdgpu_device_cache_switch_state(adev);
6567
6568 return true;
6569 }
6570
amdgpu_device_load_pci_state(struct pci_dev * pdev)6571 bool amdgpu_device_load_pci_state(struct pci_dev *pdev)
6572 {
6573 struct drm_device *dev = pci_get_drvdata(pdev);
6574 struct amdgpu_device *adev = drm_to_adev(dev);
6575 int r;
6576
6577 if (!adev->pci_state)
6578 return false;
6579
6580 r = pci_load_saved_state(pdev, adev->pci_state);
6581
6582 if (!r) {
6583 pci_restore_state(pdev);
6584 } else {
6585 dev_warn(adev->dev, "Failed to load PCI state, err:%d\n", r);
6586 return false;
6587 }
6588
6589 return true;
6590 }
6591
amdgpu_device_flush_hdp(struct amdgpu_device * adev,struct amdgpu_ring * ring)6592 void amdgpu_device_flush_hdp(struct amdgpu_device *adev,
6593 struct amdgpu_ring *ring)
6594 {
6595 #ifdef CONFIG_X86_64
6596 if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev))
6597 return;
6598 #endif
6599 if (adev->gmc.xgmi.connected_to_cpu)
6600 return;
6601
6602 if (ring && ring->funcs->emit_hdp_flush) {
6603 amdgpu_ring_emit_hdp_flush(ring);
6604 return;
6605 }
6606
6607 if (!ring && amdgpu_sriov_runtime(adev)) {
6608 if (!amdgpu_kiq_hdp_flush(adev))
6609 return;
6610 }
6611
6612 amdgpu_hdp_flush(adev, ring);
6613 }
6614
amdgpu_device_invalidate_hdp(struct amdgpu_device * adev,struct amdgpu_ring * ring)6615 void amdgpu_device_invalidate_hdp(struct amdgpu_device *adev,
6616 struct amdgpu_ring *ring)
6617 {
6618 #ifdef CONFIG_X86_64
6619 if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev))
6620 return;
6621 #endif
6622 if (adev->gmc.xgmi.connected_to_cpu)
6623 return;
6624
6625 amdgpu_hdp_invalidate(adev, ring);
6626 }
6627
amdgpu_in_reset(struct amdgpu_device * adev)6628 int amdgpu_in_reset(struct amdgpu_device *adev)
6629 {
6630 return atomic_read(&adev->reset_domain->in_gpu_reset);
6631 }
6632
6633 /**
6634 * amdgpu_device_halt() - bring hardware to some kind of halt state
6635 *
6636 * @adev: amdgpu_device pointer
6637 *
6638 * Bring hardware to some kind of halt state so that no one can touch it
6639 * any more. It will help to maintain error context when error occurred.
6640 * Compare to a simple hang, the system will keep stable at least for SSH
6641 * access. Then it should be trivial to inspect the hardware state and
6642 * see what's going on. Implemented as following:
6643 *
6644 * 1. drm_dev_unplug() makes device inaccessible to user space(IOCTLs, etc),
6645 * clears all CPU mappings to device, disallows remappings through page faults
6646 * 2. amdgpu_irq_disable_all() disables all interrupts
6647 * 3. amdgpu_fence_driver_hw_fini() signals all HW fences
6648 * 4. set adev->no_hw_access to avoid potential crashes after setp 5
6649 * 5. amdgpu_device_unmap_mmio() clears all MMIO mappings
6650 * 6. pci_disable_device() and pci_wait_for_pending_transaction()
6651 * flush any in flight DMA operations
6652 */
amdgpu_device_halt(struct amdgpu_device * adev)6653 void amdgpu_device_halt(struct amdgpu_device *adev)
6654 {
6655 struct pci_dev *pdev = adev->pdev;
6656 struct drm_device *ddev = adev_to_drm(adev);
6657
6658 amdgpu_xcp_dev_unplug(adev);
6659 drm_dev_unplug(ddev);
6660
6661 amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
6662 amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
6663
6664 amdgpu_irq_disable_all(adev);
6665
6666 amdgpu_fence_driver_hw_fini(adev);
6667
6668 adev->no_hw_access = true;
6669
6670 amdgpu_device_unmap_mmio(adev);
6671
6672 pci_disable_device(pdev);
6673 pci_wait_for_pending_transaction(pdev);
6674 }
6675
6676 /**
6677 * amdgpu_device_get_gang - return a reference to the current gang
6678 * @adev: amdgpu_device pointer
6679 *
6680 * Returns: A new reference to the current gang leader.
6681 */
amdgpu_device_get_gang(struct amdgpu_device * adev)6682 struct dma_fence *amdgpu_device_get_gang(struct amdgpu_device *adev)
6683 {
6684 struct dma_fence *fence;
6685
6686 rcu_read_lock();
6687 fence = dma_fence_get_rcu_safe(&adev->gang_submit);
6688 rcu_read_unlock();
6689 return fence;
6690 }
6691
6692 /**
6693 * amdgpu_device_switch_gang - switch to a new gang
6694 * @adev: amdgpu_device pointer
6695 * @gang: the gang to switch to
6696 *
6697 * Try to switch to a new gang.
6698 * Returns: NULL if we switched to the new gang or a reference to the current
6699 * gang leader.
6700 */
amdgpu_device_switch_gang(struct amdgpu_device * adev,struct dma_fence * gang)6701 struct dma_fence *amdgpu_device_switch_gang(struct amdgpu_device *adev,
6702 struct dma_fence *gang)
6703 {
6704 struct dma_fence *old = NULL;
6705
6706 dma_fence_get(gang);
6707 do {
6708 dma_fence_put(old);
6709 old = amdgpu_device_get_gang(adev);
6710 if (old == gang)
6711 break;
6712
6713 if (!dma_fence_is_signaled(old)) {
6714 dma_fence_put(gang);
6715 return old;
6716 }
6717
6718 } while (cmpxchg((struct dma_fence __force **)&adev->gang_submit,
6719 old, gang) != old);
6720
6721 /*
6722 * Drop it once for the exchanged reference in adev and once for the
6723 * thread local reference acquired in amdgpu_device_get_gang().
6724 */
6725 dma_fence_put(old);
6726 dma_fence_put(old);
6727 return NULL;
6728 }
6729
6730 /**
6731 * amdgpu_device_enforce_isolation - enforce HW isolation
6732 * @adev: the amdgpu device pointer
6733 * @ring: the HW ring the job is supposed to run on
6734 * @job: the job which is about to be pushed to the HW ring
6735 *
6736 * Makes sure that only one client at a time can use the GFX block.
6737 * Returns: The dependency to wait on before the job can be pushed to the HW.
6738 * The function is called multiple times until NULL is returned.
6739 */
amdgpu_device_enforce_isolation(struct amdgpu_device * adev,struct amdgpu_ring * ring,struct amdgpu_job * job)6740 struct dma_fence *amdgpu_device_enforce_isolation(struct amdgpu_device *adev,
6741 struct amdgpu_ring *ring,
6742 struct amdgpu_job *job)
6743 {
6744 struct amdgpu_isolation *isolation = &adev->isolation[ring->xcp_id];
6745 struct drm_sched_fence *f = job->base.s_fence;
6746 struct dma_fence *dep;
6747 void *owner;
6748 int r;
6749
6750 /*
6751 * For now enforce isolation only for the GFX block since we only need
6752 * the cleaner shader on those rings.
6753 */
6754 if (ring->funcs->type != AMDGPU_RING_TYPE_GFX &&
6755 ring->funcs->type != AMDGPU_RING_TYPE_COMPUTE)
6756 return NULL;
6757
6758 /*
6759 * All submissions where enforce isolation is false are handled as if
6760 * they come from a single client. Use ~0l as the owner to distinct it
6761 * from kernel submissions where the owner is NULL.
6762 */
6763 owner = job->enforce_isolation ? f->owner : (void *)~0l;
6764
6765 mutex_lock(&adev->enforce_isolation_mutex);
6766
6767 /*
6768 * The "spearhead" submission is the first one which changes the
6769 * ownership to its client. We always need to wait for it to be
6770 * pushed to the HW before proceeding with anything.
6771 */
6772 if (&f->scheduled != isolation->spearhead &&
6773 !dma_fence_is_signaled(isolation->spearhead)) {
6774 dep = isolation->spearhead;
6775 goto out_grab_ref;
6776 }
6777
6778 if (isolation->owner != owner) {
6779
6780 /*
6781 * Wait for any gang to be assembled before switching to a
6782 * different owner or otherwise we could deadlock the
6783 * submissions.
6784 */
6785 if (!job->gang_submit) {
6786 dep = amdgpu_device_get_gang(adev);
6787 if (!dma_fence_is_signaled(dep))
6788 goto out_return_dep;
6789 dma_fence_put(dep);
6790 }
6791
6792 dma_fence_put(isolation->spearhead);
6793 isolation->spearhead = dma_fence_get(&f->scheduled);
6794 amdgpu_sync_move(&isolation->active, &isolation->prev);
6795 trace_amdgpu_isolation(isolation->owner, owner);
6796 isolation->owner = owner;
6797 }
6798
6799 /*
6800 * Specifying the ring here helps to pipeline submissions even when
6801 * isolation is enabled. If that is not desired for testing NULL can be
6802 * used instead of the ring to enforce a CPU round trip while switching
6803 * between clients.
6804 */
6805 dep = amdgpu_sync_peek_fence(&isolation->prev, ring);
6806 r = amdgpu_sync_fence(&isolation->active, &f->finished, GFP_NOWAIT);
6807 if (r)
6808 dev_warn(adev->dev, "OOM tracking isolation\n");
6809
6810 out_grab_ref:
6811 dma_fence_get(dep);
6812 out_return_dep:
6813 mutex_unlock(&adev->enforce_isolation_mutex);
6814 return dep;
6815 }
6816
amdgpu_device_has_display_hardware(struct amdgpu_device * adev)6817 bool amdgpu_device_has_display_hardware(struct amdgpu_device *adev)
6818 {
6819 switch (adev->asic_type) {
6820 #ifdef CONFIG_DRM_AMDGPU_SI
6821 case CHIP_HAINAN:
6822 #endif
6823 case CHIP_TOPAZ:
6824 /* chips with no display hardware */
6825 return false;
6826 #ifdef CONFIG_DRM_AMDGPU_SI
6827 case CHIP_TAHITI:
6828 case CHIP_PITCAIRN:
6829 case CHIP_VERDE:
6830 case CHIP_OLAND:
6831 #endif
6832 #ifdef CONFIG_DRM_AMDGPU_CIK
6833 case CHIP_BONAIRE:
6834 case CHIP_HAWAII:
6835 case CHIP_KAVERI:
6836 case CHIP_KABINI:
6837 case CHIP_MULLINS:
6838 #endif
6839 case CHIP_TONGA:
6840 case CHIP_FIJI:
6841 case CHIP_POLARIS10:
6842 case CHIP_POLARIS11:
6843 case CHIP_POLARIS12:
6844 case CHIP_VEGAM:
6845 case CHIP_CARRIZO:
6846 case CHIP_STONEY:
6847 /* chips with display hardware */
6848 return true;
6849 default:
6850 /* IP discovery */
6851 if (!amdgpu_ip_version(adev, DCE_HWIP, 0) ||
6852 (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK))
6853 return false;
6854 return true;
6855 }
6856 }
6857
amdgpu_get_soft_full_reset_mask(struct amdgpu_ring * ring)6858 ssize_t amdgpu_get_soft_full_reset_mask(struct amdgpu_ring *ring)
6859 {
6860 ssize_t size = 0;
6861
6862 if (!ring || !ring->adev)
6863 return size;
6864
6865 if (amdgpu_device_should_recover_gpu(ring->adev))
6866 size |= AMDGPU_RESET_TYPE_FULL;
6867
6868 if (unlikely(!ring->adev->debug_disable_soft_recovery) &&
6869 !amdgpu_sriov_vf(ring->adev) && ring->funcs->soft_recovery)
6870 size |= AMDGPU_RESET_TYPE_SOFT_RESET;
6871
6872 return size;
6873 }
6874
amdgpu_show_reset_mask(char * buf,uint32_t supported_reset)6875 ssize_t amdgpu_show_reset_mask(char *buf, uint32_t supported_reset)
6876 {
6877 ssize_t size = 0;
6878
6879 if (supported_reset == 0) {
6880 size += sysfs_emit_at(buf, size, "unsupported");
6881 size += sysfs_emit_at(buf, size, "\n");
6882 return size;
6883
6884 }
6885
6886 if (supported_reset & AMDGPU_RESET_TYPE_SOFT_RESET)
6887 size += sysfs_emit_at(buf, size, "soft ");
6888
6889 if (supported_reset & AMDGPU_RESET_TYPE_PER_QUEUE)
6890 size += sysfs_emit_at(buf, size, "queue ");
6891
6892 if (supported_reset & AMDGPU_RESET_TYPE_PER_PIPE)
6893 size += sysfs_emit_at(buf, size, "pipe ");
6894
6895 if (supported_reset & AMDGPU_RESET_TYPE_FULL)
6896 size += sysfs_emit_at(buf, size, "full ");
6897
6898 size += sysfs_emit_at(buf, size, "\n");
6899 return size;
6900 }
6901
amdgpu_device_set_uid(struct amdgpu_uid * uid_info,enum amdgpu_uid_type type,uint8_t inst,uint64_t uid)6902 void amdgpu_device_set_uid(struct amdgpu_uid *uid_info,
6903 enum amdgpu_uid_type type, uint8_t inst,
6904 uint64_t uid)
6905 {
6906 if (!uid_info)
6907 return;
6908
6909 if (type >= AMDGPU_UID_TYPE_MAX) {
6910 dev_err_once(uid_info->adev->dev, "Invalid UID type %d\n",
6911 type);
6912 return;
6913 }
6914
6915 if (inst >= AMDGPU_UID_INST_MAX) {
6916 dev_err_once(uid_info->adev->dev, "Invalid UID instance %d\n",
6917 inst);
6918 return;
6919 }
6920
6921 if (uid_info->uid[type][inst] != 0) {
6922 dev_warn_once(
6923 uid_info->adev->dev,
6924 "Overwriting existing UID %llu for type %d instance %d\n",
6925 uid_info->uid[type][inst], type, inst);
6926 }
6927
6928 uid_info->uid[type][inst] = uid;
6929 }
6930
amdgpu_device_get_uid(struct amdgpu_uid * uid_info,enum amdgpu_uid_type type,uint8_t inst)6931 u64 amdgpu_device_get_uid(struct amdgpu_uid *uid_info,
6932 enum amdgpu_uid_type type, uint8_t inst)
6933 {
6934 if (!uid_info)
6935 return 0;
6936
6937 if (type >= AMDGPU_UID_TYPE_MAX) {
6938 dev_err_once(uid_info->adev->dev, "Invalid UID type %d\n",
6939 type);
6940 return 0;
6941 }
6942
6943 if (inst >= AMDGPU_UID_INST_MAX) {
6944 dev_err_once(uid_info->adev->dev, "Invalid UID instance %d\n",
6945 inst);
6946 return 0;
6947 }
6948
6949 return uid_info->uid[type][inst];
6950 }
6951