1 /* 2 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. 3 * All Rights Reserved. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the "Software"), 7 * to deal in the Software without restriction, including without limitation 8 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 * and/or sell copies of the Software, and to permit persons to whom the 10 * Software is furnished to do so, subject to the following conditions: 11 * 12 * The above copyright notice and this permission notice (including the next 13 * paragraph) shall be included in all copies or substantial portions of the 14 * Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 * OTHER DEALINGS IN THE SOFTWARE. 23 */ 24 25 #include <drm/amdgpu_drm.h> 26 #include <drm/drm_drv.h> 27 #include <drm/drm_fbdev_generic.h> 28 #include <drm/drm_gem.h> 29 #include <drm/drm_managed.h> 30 #include <drm/drm_pciids.h> 31 #include <drm/drm_probe_helper.h> 32 #include <drm/drm_vblank.h> 33 34 #include <linux/cc_platform.h> 35 #include <linux/dynamic_debug.h> 36 #include <linux/module.h> 37 #include <linux/mmu_notifier.h> 38 #include <linux/pm_runtime.h> 39 #include <linux/suspend.h> 40 #include <linux/vga_switcheroo.h> 41 42 #include "amdgpu.h" 43 #include "amdgpu_amdkfd.h" 44 #include "amdgpu_dma_buf.h" 45 #include "amdgpu_drv.h" 46 #include "amdgpu_fdinfo.h" 47 #include "amdgpu_irq.h" 48 #include "amdgpu_psp.h" 49 #include "amdgpu_ras.h" 50 #include "amdgpu_reset.h" 51 #include "amdgpu_sched.h" 52 #include "amdgpu_xgmi.h" 53 #include "../amdxcp/amdgpu_xcp_drv.h" 54 55 /* 56 * KMS wrapper. 57 * - 3.0.0 - initial driver 58 * - 3.1.0 - allow reading more status registers (GRBM, SRBM, SDMA, CP) 59 * - 3.2.0 - GFX8: Uses EOP_TC_WB_ACTION_EN, so UMDs don't have to do the same 60 * at the end of IBs. 61 * - 3.3.0 - Add VM support for UVD on supported hardware. 62 * - 3.4.0 - Add AMDGPU_INFO_NUM_EVICTIONS. 63 * - 3.5.0 - Add support for new UVD_NO_OP register. 64 * - 3.6.0 - kmd involves use CONTEXT_CONTROL in ring buffer. 65 * - 3.7.0 - Add support for VCE clock list packet 66 * - 3.8.0 - Add support raster config init in the kernel 67 * - 3.9.0 - Add support for memory query info about VRAM and GTT. 68 * - 3.10.0 - Add support for new fences ioctl, new gem ioctl flags 69 * - 3.11.0 - Add support for sensor query info (clocks, temp, etc). 70 * - 3.12.0 - Add query for double offchip LDS buffers 71 * - 3.13.0 - Add PRT support 72 * - 3.14.0 - Fix race in amdgpu_ctx_get_fence() and note new functionality 73 * - 3.15.0 - Export more gpu info for gfx9 74 * - 3.16.0 - Add reserved vmid support 75 * - 3.17.0 - Add AMDGPU_NUM_VRAM_CPU_PAGE_FAULTS. 76 * - 3.18.0 - Export gpu always on cu bitmap 77 * - 3.19.0 - Add support for UVD MJPEG decode 78 * - 3.20.0 - Add support for local BOs 79 * - 3.21.0 - Add DRM_AMDGPU_FENCE_TO_HANDLE ioctl 80 * - 3.22.0 - Add DRM_AMDGPU_SCHED ioctl 81 * - 3.23.0 - Add query for VRAM lost counter 82 * - 3.24.0 - Add high priority compute support for gfx9 83 * - 3.25.0 - Add support for sensor query info (stable pstate sclk/mclk). 84 * - 3.26.0 - GFX9: Process AMDGPU_IB_FLAG_TC_WB_NOT_INVALIDATE. 85 * - 3.27.0 - Add new chunk to AMDGPU_CS to enable BO_LIST creation. 86 * - 3.28.0 - Add AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES 87 * - 3.29.0 - Add AMDGPU_IB_FLAG_RESET_GDS_MAX_WAVE_ID 88 * - 3.30.0 - Add AMDGPU_SCHED_OP_CONTEXT_PRIORITY_OVERRIDE. 89 * - 3.31.0 - Add support for per-flip tiling attribute changes with DC 90 * - 3.32.0 - Add syncobj timeline support to AMDGPU_CS. 91 * - 3.33.0 - Fixes for GDS ENOMEM failures in AMDGPU_CS. 92 * - 3.34.0 - Non-DC can flip correctly between buffers with different pitches 93 * - 3.35.0 - Add drm_amdgpu_info_device::tcc_disabled_mask 94 * - 3.36.0 - Allow reading more status registers on si/cik 95 * - 3.37.0 - L2 is invalidated before SDMA IBs, needed for correctness 96 * - 3.38.0 - Add AMDGPU_IB_FLAG_EMIT_MEM_SYNC 97 * - 3.39.0 - DMABUF implicit sync does a full pipeline sync 98 * - 3.40.0 - Add AMDGPU_IDS_FLAGS_TMZ 99 * - 3.41.0 - Add video codec query 100 * - 3.42.0 - Add 16bpc fixed point display support 101 * - 3.43.0 - Add device hot plug/unplug support 102 * - 3.44.0 - DCN3 supports DCC independent block settings: !64B && 128B, 64B && 128B 103 * - 3.45.0 - Add context ioctl stable pstate interface 104 * - 3.46.0 - To enable hot plug amdgpu tests in libdrm 105 * - 3.47.0 - Add AMDGPU_GEM_CREATE_DISCARDABLE and AMDGPU_VM_NOALLOC flags 106 * - 3.48.0 - Add IP discovery version info to HW INFO 107 * - 3.49.0 - Add gang submit into CS IOCTL 108 * - 3.50.0 - Update AMDGPU_INFO_DEV_INFO IOCTL for minimum engine and memory clock 109 * Update AMDGPU_INFO_SENSOR IOCTL for PEAK_PSTATE engine and memory clock 110 * 3.51.0 - Return the PCIe gen and lanes from the INFO ioctl 111 * 3.52.0 - Add AMDGPU_IDS_FLAGS_CONFORMANT_TRUNC_COORD, add device_info fields: 112 * tcp_cache_size, num_sqc_per_wgp, sqc_data_cache_size, sqc_inst_cache_size, 113 * gl1c_cache_size, gl2c_cache_size, mall_size, enabled_rb_pipes_mask_hi 114 * 3.53.0 - Support for GFX11 CP GFX shadowing 115 * 3.54.0 - Add AMDGPU_CTX_QUERY2_FLAGS_RESET_IN_PROGRESS support 116 * - 3.55.0 - Add AMDGPU_INFO_GPUVM_FAULT query 117 * - 3.56.0 - Update IB start address and size alignment for decode and encode 118 * - 3.57.0 - Compute tunneling on GFX10+ 119 */ 120 #define KMS_DRIVER_MAJOR 3 121 #define KMS_DRIVER_MINOR 57 122 #define KMS_DRIVER_PATCHLEVEL 0 123 124 /* 125 * amdgpu.debug module options. Are all disabled by default 126 */ 127 enum AMDGPU_DEBUG_MASK { 128 AMDGPU_DEBUG_VM = BIT(0), 129 AMDGPU_DEBUG_LARGEBAR = BIT(1), 130 AMDGPU_DEBUG_DISABLE_GPU_SOFT_RECOVERY = BIT(2), 131 AMDGPU_DEBUG_USE_VRAM_FW_BUF = BIT(3), 132 }; 133 134 unsigned int amdgpu_vram_limit = UINT_MAX; 135 int amdgpu_vis_vram_limit; 136 int amdgpu_gart_size = -1; /* auto */ 137 int amdgpu_gtt_size = -1; /* auto */ 138 int amdgpu_moverate = -1; /* auto */ 139 int amdgpu_audio = -1; 140 int amdgpu_disp_priority; 141 int amdgpu_hw_i2c; 142 int amdgpu_pcie_gen2 = -1; 143 int amdgpu_msi = -1; 144 char amdgpu_lockup_timeout[AMDGPU_MAX_TIMEOUT_PARAM_LENGTH]; 145 int amdgpu_dpm = -1; 146 int amdgpu_fw_load_type = -1; 147 int amdgpu_aspm = -1; 148 int amdgpu_runtime_pm = -1; 149 uint amdgpu_ip_block_mask = 0xffffffff; 150 int amdgpu_bapm = -1; 151 int amdgpu_deep_color; 152 int amdgpu_vm_size = -1; 153 int amdgpu_vm_fragment_size = -1; 154 int amdgpu_vm_block_size = -1; 155 int amdgpu_vm_fault_stop; 156 int amdgpu_vm_update_mode = -1; 157 int amdgpu_exp_hw_support; 158 int amdgpu_dc = -1; 159 int amdgpu_sched_jobs = 32; 160 int amdgpu_sched_hw_submission = 2; 161 uint amdgpu_pcie_gen_cap; 162 uint amdgpu_pcie_lane_cap; 163 u64 amdgpu_cg_mask = 0xffffffffffffffff; 164 uint amdgpu_pg_mask = 0xffffffff; 165 uint amdgpu_sdma_phase_quantum = 32; 166 char *amdgpu_disable_cu; 167 char *amdgpu_virtual_display; 168 bool enforce_isolation; 169 /* 170 * OverDrive(bit 14) disabled by default 171 * GFX DCS(bit 19) disabled by default 172 */ 173 uint amdgpu_pp_feature_mask = 0xfff7bfff; 174 uint amdgpu_force_long_training; 175 int amdgpu_lbpw = -1; 176 int amdgpu_compute_multipipe = -1; 177 int amdgpu_gpu_recovery = -1; /* auto */ 178 int amdgpu_emu_mode; 179 uint amdgpu_smu_memory_pool_size; 180 int amdgpu_smu_pptable_id = -1; 181 /* 182 * FBC (bit 0) disabled by default 183 * MULTI_MON_PP_MCLK_SWITCH (bit 1) enabled by default 184 * - With this, for multiple monitors in sync(e.g. with the same model), 185 * mclk switching will be allowed. And the mclk will be not foced to the 186 * highest. That helps saving some idle power. 187 * DISABLE_FRACTIONAL_PWM (bit 2) disabled by default 188 * PSR (bit 3) disabled by default 189 * EDP NO POWER SEQUENCING (bit 4) disabled by default 190 */ 191 uint amdgpu_dc_feature_mask = 2; 192 uint amdgpu_dc_debug_mask; 193 uint amdgpu_dc_visual_confirm; 194 int amdgpu_async_gfx_ring = 1; 195 int amdgpu_mcbp = -1; 196 int amdgpu_discovery = -1; 197 int amdgpu_mes; 198 int amdgpu_mes_log_enable = 0; 199 int amdgpu_mes_kiq; 200 int amdgpu_noretry = -1; 201 int amdgpu_force_asic_type = -1; 202 int amdgpu_tmz = -1; /* auto */ 203 uint amdgpu_freesync_vid_mode; 204 int amdgpu_reset_method = -1; /* auto */ 205 int amdgpu_num_kcq = -1; 206 int amdgpu_smartshift_bias; 207 int amdgpu_use_xgmi_p2p = 1; 208 int amdgpu_vcnfw_log; 209 int amdgpu_sg_display = -1; /* auto */ 210 int amdgpu_user_partt_mode = AMDGPU_AUTO_COMPUTE_PARTITION_MODE; 211 int amdgpu_umsch_mm; 212 int amdgpu_seamless = -1; /* auto */ 213 uint amdgpu_debug_mask; 214 int amdgpu_agp = -1; /* auto */ 215 int amdgpu_wbrf = -1; 216 int amdgpu_damage_clips = -1; /* auto */ 217 218 static void amdgpu_drv_delayed_reset_work_handler(struct work_struct *work); 219 220 DECLARE_DYNDBG_CLASSMAP(drm_debug_classes, DD_CLASS_TYPE_DISJOINT_BITS, 0, 221 "DRM_UT_CORE", 222 "DRM_UT_DRIVER", 223 "DRM_UT_KMS", 224 "DRM_UT_PRIME", 225 "DRM_UT_ATOMIC", 226 "DRM_UT_VBL", 227 "DRM_UT_STATE", 228 "DRM_UT_LEASE", 229 "DRM_UT_DP", 230 "DRM_UT_DRMRES"); 231 232 struct amdgpu_mgpu_info mgpu_info = { 233 .mutex = __MUTEX_INITIALIZER(mgpu_info.mutex), 234 .delayed_reset_work = __DELAYED_WORK_INITIALIZER( 235 mgpu_info.delayed_reset_work, 236 amdgpu_drv_delayed_reset_work_handler, 0), 237 }; 238 int amdgpu_ras_enable = -1; 239 uint amdgpu_ras_mask = 0xffffffff; 240 int amdgpu_bad_page_threshold = -1; 241 struct amdgpu_watchdog_timer amdgpu_watchdog_timer = { 242 .timeout_fatal_disable = false, 243 .period = 0x0, /* default to 0x0 (timeout disable) */ 244 }; 245 246 /** 247 * DOC: vramlimit (int) 248 * Restrict the total amount of VRAM in MiB for testing. The default is 0 (Use full VRAM). 249 */ 250 MODULE_PARM_DESC(vramlimit, "Restrict VRAM for testing, in megabytes"); 251 module_param_named(vramlimit, amdgpu_vram_limit, int, 0600); 252 253 /** 254 * DOC: vis_vramlimit (int) 255 * Restrict the amount of CPU visible VRAM in MiB for testing. The default is 0 (Use full CPU visible VRAM). 256 */ 257 MODULE_PARM_DESC(vis_vramlimit, "Restrict visible VRAM for testing, in megabytes"); 258 module_param_named(vis_vramlimit, amdgpu_vis_vram_limit, int, 0444); 259 260 /** 261 * DOC: gartsize (uint) 262 * Restrict the size of GART (for kernel use) in Mib (32, 64, etc.) for testing. 263 * The default is -1 (The size depends on asic). 264 */ 265 MODULE_PARM_DESC(gartsize, "Size of kernel GART to setup in megabytes (32, 64, etc., -1=auto)"); 266 module_param_named(gartsize, amdgpu_gart_size, uint, 0600); 267 268 /** 269 * DOC: gttsize (int) 270 * Restrict the size of GTT domain (for userspace use) in MiB for testing. 271 * The default is -1 (Use 1/2 RAM, minimum value is 3GB). 272 */ 273 MODULE_PARM_DESC(gttsize, "Size of the GTT userspace domain in megabytes (-1 = auto)"); 274 module_param_named(gttsize, amdgpu_gtt_size, int, 0600); 275 276 /** 277 * DOC: moverate (int) 278 * Set maximum buffer migration rate in MB/s. The default is -1 (8 MB/s). 279 */ 280 MODULE_PARM_DESC(moverate, "Maximum buffer migration rate in MB/s. (32, 64, etc., -1=auto, 0=1=disabled)"); 281 module_param_named(moverate, amdgpu_moverate, int, 0600); 282 283 /** 284 * DOC: audio (int) 285 * Set HDMI/DPAudio. Only affects non-DC display handling. The default is -1 (Enabled), set 0 to disabled it. 286 */ 287 MODULE_PARM_DESC(audio, "Audio enable (-1 = auto, 0 = disable, 1 = enable)"); 288 module_param_named(audio, amdgpu_audio, int, 0444); 289 290 /** 291 * DOC: disp_priority (int) 292 * Set display Priority (1 = normal, 2 = high). Only affects non-DC display handling. The default is 0 (auto). 293 */ 294 MODULE_PARM_DESC(disp_priority, "Display Priority (0 = auto, 1 = normal, 2 = high)"); 295 module_param_named(disp_priority, amdgpu_disp_priority, int, 0444); 296 297 /** 298 * DOC: hw_i2c (int) 299 * To enable hw i2c engine. Only affects non-DC display handling. The default is 0 (Disabled). 300 */ 301 MODULE_PARM_DESC(hw_i2c, "hw i2c engine enable (0 = disable)"); 302 module_param_named(hw_i2c, amdgpu_hw_i2c, int, 0444); 303 304 /** 305 * DOC: pcie_gen2 (int) 306 * To disable PCIE Gen2/3 mode (0 = disable, 1 = enable). The default is -1 (auto, enabled). 307 */ 308 MODULE_PARM_DESC(pcie_gen2, "PCIE Gen2 mode (-1 = auto, 0 = disable, 1 = enable)"); 309 module_param_named(pcie_gen2, amdgpu_pcie_gen2, int, 0444); 310 311 /** 312 * DOC: msi (int) 313 * To disable Message Signaled Interrupts (MSI) functionality (1 = enable, 0 = disable). The default is -1 (auto, enabled). 314 */ 315 MODULE_PARM_DESC(msi, "MSI support (1 = enable, 0 = disable, -1 = auto)"); 316 module_param_named(msi, amdgpu_msi, int, 0444); 317 318 /** 319 * DOC: lockup_timeout (string) 320 * Set GPU scheduler timeout value in ms. 321 * 322 * The format can be [Non-Compute] or [GFX,Compute,SDMA,Video]. That is there can be one or 323 * multiple values specified. 0 and negative values are invalidated. They will be adjusted 324 * to the default timeout. 325 * 326 * - With one value specified, the setting will apply to all non-compute jobs. 327 * - With multiple values specified, the first one will be for GFX. 328 * The second one is for Compute. The third and fourth ones are 329 * for SDMA and Video. 330 * 331 * By default(with no lockup_timeout settings), the timeout for all non-compute(GFX, SDMA and Video) 332 * jobs is 10000. The timeout for compute is 60000. 333 */ 334 MODULE_PARM_DESC(lockup_timeout, "GPU lockup timeout in ms (default: for bare metal 10000 for non-compute jobs and 60000 for compute jobs; " 335 "for passthrough or sriov, 10000 for all jobs. 0: keep default value. negative: infinity timeout), format: for bare metal [Non-Compute] or [GFX,Compute,SDMA,Video]; " 336 "for passthrough or sriov [all jobs] or [GFX,Compute,SDMA,Video]."); 337 module_param_string(lockup_timeout, amdgpu_lockup_timeout, sizeof(amdgpu_lockup_timeout), 0444); 338 339 /** 340 * DOC: dpm (int) 341 * Override for dynamic power management setting 342 * (0 = disable, 1 = enable) 343 * The default is -1 (auto). 344 */ 345 MODULE_PARM_DESC(dpm, "DPM support (1 = enable, 0 = disable, -1 = auto)"); 346 module_param_named(dpm, amdgpu_dpm, int, 0444); 347 348 /** 349 * DOC: fw_load_type (int) 350 * Set different firmware loading type for debugging, if supported. 351 * Set to 0 to force direct loading if supported by the ASIC. Set 352 * to -1 to select the default loading mode for the ASIC, as defined 353 * by the driver. The default is -1 (auto). 354 */ 355 MODULE_PARM_DESC(fw_load_type, "firmware loading type (3 = rlc backdoor autoload if supported, 2 = smu load if supported, 1 = psp load, 0 = force direct if supported, -1 = auto)"); 356 module_param_named(fw_load_type, amdgpu_fw_load_type, int, 0444); 357 358 /** 359 * DOC: aspm (int) 360 * To disable ASPM (1 = enable, 0 = disable). The default is -1 (auto, enabled). 361 */ 362 MODULE_PARM_DESC(aspm, "ASPM support (1 = enable, 0 = disable, -1 = auto)"); 363 module_param_named(aspm, amdgpu_aspm, int, 0444); 364 365 /** 366 * DOC: runpm (int) 367 * Override for runtime power management control for dGPUs. The amdgpu driver can dynamically power down 368 * the dGPUs when they are idle if supported. The default is -1 (auto enable). 369 * Setting the value to 0 disables this functionality. 370 * Setting the value to -2 is auto enabled with power down when displays are attached. 371 */ 372 MODULE_PARM_DESC(runpm, "PX runtime pm (2 = force enable with BAMACO, 1 = force enable with BACO, 0 = disable, -1 = auto, -2 = auto with displays)"); 373 module_param_named(runpm, amdgpu_runtime_pm, int, 0444); 374 375 /** 376 * DOC: ip_block_mask (uint) 377 * Override what IP blocks are enabled on the GPU. Each GPU is a collection of IP blocks (gfx, display, video, etc.). 378 * Use this parameter to disable specific blocks. Note that the IP blocks do not have a fixed index. Some asics may not have 379 * some IPs or may include multiple instances of an IP so the ordering various from asic to asic. See the driver output in 380 * the kernel log for the list of IPs on the asic. The default is 0xffffffff (enable all blocks on a device). 381 */ 382 MODULE_PARM_DESC(ip_block_mask, "IP Block Mask (all blocks enabled (default))"); 383 module_param_named(ip_block_mask, amdgpu_ip_block_mask, uint, 0444); 384 385 /** 386 * DOC: bapm (int) 387 * Bidirectional Application Power Management (BAPM) used to dynamically share TDP between CPU and GPU. Set value 0 to disable it. 388 * The default -1 (auto, enabled) 389 */ 390 MODULE_PARM_DESC(bapm, "BAPM support (1 = enable, 0 = disable, -1 = auto)"); 391 module_param_named(bapm, amdgpu_bapm, int, 0444); 392 393 /** 394 * DOC: deep_color (int) 395 * Set 1 to enable Deep Color support. Only affects non-DC display handling. The default is 0 (disabled). 396 */ 397 MODULE_PARM_DESC(deep_color, "Deep Color support (1 = enable, 0 = disable (default))"); 398 module_param_named(deep_color, amdgpu_deep_color, int, 0444); 399 400 /** 401 * DOC: vm_size (int) 402 * Override the size of the GPU's per client virtual address space in GiB. The default is -1 (automatic for each asic). 403 */ 404 MODULE_PARM_DESC(vm_size, "VM address space size in gigabytes (default 64GB)"); 405 module_param_named(vm_size, amdgpu_vm_size, int, 0444); 406 407 /** 408 * DOC: vm_fragment_size (int) 409 * Override VM fragment size in bits (4, 5, etc. 4 = 64K, 9 = 2M). The default is -1 (automatic for each asic). 410 */ 411 MODULE_PARM_DESC(vm_fragment_size, "VM fragment size in bits (4, 5, etc. 4 = 64K (default), Max 9 = 2M)"); 412 module_param_named(vm_fragment_size, amdgpu_vm_fragment_size, int, 0444); 413 414 /** 415 * DOC: vm_block_size (int) 416 * Override VM page table size in bits (default depending on vm_size and hw setup). The default is -1 (automatic for each asic). 417 */ 418 MODULE_PARM_DESC(vm_block_size, "VM page table size in bits (default depending on vm_size)"); 419 module_param_named(vm_block_size, amdgpu_vm_block_size, int, 0444); 420 421 /** 422 * DOC: vm_fault_stop (int) 423 * Stop on VM fault for debugging (0 = never, 1 = print first, 2 = always). The default is 0 (No stop). 424 */ 425 MODULE_PARM_DESC(vm_fault_stop, "Stop on VM fault (0 = never (default), 1 = print first, 2 = always)"); 426 module_param_named(vm_fault_stop, amdgpu_vm_fault_stop, int, 0444); 427 428 /** 429 * DOC: vm_update_mode (int) 430 * Override VM update mode. VM updated by using CPU (0 = never, 1 = Graphics only, 2 = Compute only, 3 = Both). The default 431 * is -1 (Only in large BAR(LB) systems Compute VM tables will be updated by CPU, otherwise 0, never). 432 */ 433 MODULE_PARM_DESC(vm_update_mode, "VM update using CPU (0 = never (default except for large BAR(LB)), 1 = Graphics only, 2 = Compute only (default for LB), 3 = Both"); 434 module_param_named(vm_update_mode, amdgpu_vm_update_mode, int, 0444); 435 436 /** 437 * DOC: exp_hw_support (int) 438 * Enable experimental hw support (1 = enable). The default is 0 (disabled). 439 */ 440 MODULE_PARM_DESC(exp_hw_support, "experimental hw support (1 = enable, 0 = disable (default))"); 441 module_param_named(exp_hw_support, amdgpu_exp_hw_support, int, 0444); 442 443 /** 444 * DOC: dc (int) 445 * Disable/Enable Display Core driver for debugging (1 = enable, 0 = disable). The default is -1 (automatic for each asic). 446 */ 447 MODULE_PARM_DESC(dc, "Display Core driver (1 = enable, 0 = disable, -1 = auto (default))"); 448 module_param_named(dc, amdgpu_dc, int, 0444); 449 450 /** 451 * DOC: sched_jobs (int) 452 * Override the max number of jobs supported in the sw queue. The default is 32. 453 */ 454 MODULE_PARM_DESC(sched_jobs, "the max number of jobs supported in the sw queue (default 32)"); 455 module_param_named(sched_jobs, amdgpu_sched_jobs, int, 0444); 456 457 /** 458 * DOC: sched_hw_submission (int) 459 * Override the max number of HW submissions. The default is 2. 460 */ 461 MODULE_PARM_DESC(sched_hw_submission, "the max number of HW submissions (default 2)"); 462 module_param_named(sched_hw_submission, amdgpu_sched_hw_submission, int, 0444); 463 464 /** 465 * DOC: ppfeaturemask (hexint) 466 * Override power features enabled. See enum PP_FEATURE_MASK in drivers/gpu/drm/amd/include/amd_shared.h. 467 * The default is the current set of stable power features. 468 */ 469 MODULE_PARM_DESC(ppfeaturemask, "all power features enabled (default))"); 470 module_param_named(ppfeaturemask, amdgpu_pp_feature_mask, hexint, 0444); 471 472 /** 473 * DOC: forcelongtraining (uint) 474 * Force long memory training in resume. 475 * The default is zero, indicates short training in resume. 476 */ 477 MODULE_PARM_DESC(forcelongtraining, "force memory long training"); 478 module_param_named(forcelongtraining, amdgpu_force_long_training, uint, 0444); 479 480 /** 481 * DOC: pcie_gen_cap (uint) 482 * Override PCIE gen speed capabilities. See the CAIL flags in drivers/gpu/drm/amd/include/amd_pcie.h. 483 * The default is 0 (automatic for each asic). 484 */ 485 MODULE_PARM_DESC(pcie_gen_cap, "PCIE Gen Caps (0: autodetect (default))"); 486 module_param_named(pcie_gen_cap, amdgpu_pcie_gen_cap, uint, 0444); 487 488 /** 489 * DOC: pcie_lane_cap (uint) 490 * Override PCIE lanes capabilities. See the CAIL flags in drivers/gpu/drm/amd/include/amd_pcie.h. 491 * The default is 0 (automatic for each asic). 492 */ 493 MODULE_PARM_DESC(pcie_lane_cap, "PCIE Lane Caps (0: autodetect (default))"); 494 module_param_named(pcie_lane_cap, amdgpu_pcie_lane_cap, uint, 0444); 495 496 /** 497 * DOC: cg_mask (ullong) 498 * Override Clockgating features enabled on GPU (0 = disable clock gating). See the AMD_CG_SUPPORT flags in 499 * drivers/gpu/drm/amd/include/amd_shared.h. The default is 0xffffffffffffffff (all enabled). 500 */ 501 MODULE_PARM_DESC(cg_mask, "Clockgating flags mask (0 = disable clock gating)"); 502 module_param_named(cg_mask, amdgpu_cg_mask, ullong, 0444); 503 504 /** 505 * DOC: pg_mask (uint) 506 * Override Powergating features enabled on GPU (0 = disable power gating). See the AMD_PG_SUPPORT flags in 507 * drivers/gpu/drm/amd/include/amd_shared.h. The default is 0xffffffff (all enabled). 508 */ 509 MODULE_PARM_DESC(pg_mask, "Powergating flags mask (0 = disable power gating)"); 510 module_param_named(pg_mask, amdgpu_pg_mask, uint, 0444); 511 512 /** 513 * DOC: sdma_phase_quantum (uint) 514 * Override SDMA context switch phase quantum (x 1K GPU clock cycles, 0 = no change). The default is 32. 515 */ 516 MODULE_PARM_DESC(sdma_phase_quantum, "SDMA context switch phase quantum (x 1K GPU clock cycles, 0 = no change (default 32))"); 517 module_param_named(sdma_phase_quantum, amdgpu_sdma_phase_quantum, uint, 0444); 518 519 /** 520 * DOC: disable_cu (charp) 521 * Set to disable CUs (It's set like se.sh.cu,...). The default is NULL. 522 */ 523 MODULE_PARM_DESC(disable_cu, "Disable CUs (se.sh.cu,...)"); 524 module_param_named(disable_cu, amdgpu_disable_cu, charp, 0444); 525 526 /** 527 * DOC: virtual_display (charp) 528 * Set to enable virtual display feature. This feature provides a virtual display hardware on headless boards 529 * or in virtualized environments. It will be set like xxxx:xx:xx.x,x;xxxx:xx:xx.x,x. It's the pci address of 530 * the device, plus the number of crtcs to expose. E.g., 0000:26:00.0,4 would enable 4 virtual crtcs on the pci 531 * device at 26:00.0. The default is NULL. 532 */ 533 MODULE_PARM_DESC(virtual_display, 534 "Enable virtual display feature (the virtual_display will be set like xxxx:xx:xx.x,x;xxxx:xx:xx.x,x)"); 535 module_param_named(virtual_display, amdgpu_virtual_display, charp, 0444); 536 537 /** 538 * DOC: lbpw (int) 539 * Override Load Balancing Per Watt (LBPW) support (1 = enable, 0 = disable). The default is -1 (auto, enabled). 540 */ 541 MODULE_PARM_DESC(lbpw, "Load Balancing Per Watt (LBPW) support (1 = enable, 0 = disable, -1 = auto)"); 542 module_param_named(lbpw, amdgpu_lbpw, int, 0444); 543 544 MODULE_PARM_DESC(compute_multipipe, "Force compute queues to be spread across pipes (1 = enable, 0 = disable, -1 = auto)"); 545 module_param_named(compute_multipipe, amdgpu_compute_multipipe, int, 0444); 546 547 /** 548 * DOC: gpu_recovery (int) 549 * Set to enable GPU recovery mechanism (1 = enable, 0 = disable). The default is -1 (auto, disabled except SRIOV). 550 */ 551 MODULE_PARM_DESC(gpu_recovery, "Enable GPU recovery mechanism, (1 = enable, 0 = disable, -1 = auto)"); 552 module_param_named(gpu_recovery, amdgpu_gpu_recovery, int, 0444); 553 554 /** 555 * DOC: emu_mode (int) 556 * Set value 1 to enable emulation mode. This is only needed when running on an emulator. The default is 0 (disabled). 557 */ 558 MODULE_PARM_DESC(emu_mode, "Emulation mode, (1 = enable, 0 = disable)"); 559 module_param_named(emu_mode, amdgpu_emu_mode, int, 0444); 560 561 /** 562 * DOC: ras_enable (int) 563 * Enable RAS features on the GPU (0 = disable, 1 = enable, -1 = auto (default)) 564 */ 565 MODULE_PARM_DESC(ras_enable, "Enable RAS features on the GPU (0 = disable, 1 = enable, -1 = auto (default))"); 566 module_param_named(ras_enable, amdgpu_ras_enable, int, 0444); 567 568 /** 569 * DOC: ras_mask (uint) 570 * Mask of RAS features to enable (default 0xffffffff), only valid when ras_enable == 1 571 * See the flags in drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h 572 */ 573 MODULE_PARM_DESC(ras_mask, "Mask of RAS features to enable (default 0xffffffff), only valid when ras_enable == 1"); 574 module_param_named(ras_mask, amdgpu_ras_mask, uint, 0444); 575 576 /** 577 * DOC: timeout_fatal_disable (bool) 578 * Disable Watchdog timeout fatal error event 579 */ 580 MODULE_PARM_DESC(timeout_fatal_disable, "disable watchdog timeout fatal error (false = default)"); 581 module_param_named(timeout_fatal_disable, amdgpu_watchdog_timer.timeout_fatal_disable, bool, 0644); 582 583 /** 584 * DOC: timeout_period (uint) 585 * Modify the watchdog timeout max_cycles as (1 << period) 586 */ 587 MODULE_PARM_DESC(timeout_period, "watchdog timeout period (0 = timeout disabled, 1 ~ 0x23 = timeout maxcycles = (1 << period)"); 588 module_param_named(timeout_period, amdgpu_watchdog_timer.period, uint, 0644); 589 590 /** 591 * DOC: si_support (int) 592 * Set SI support driver. This parameter works after set config CONFIG_DRM_AMDGPU_SI. For SI asic, when radeon driver is enabled, 593 * set value 0 to use radeon driver, while set value 1 to use amdgpu driver. The default is using radeon driver when it available, 594 * otherwise using amdgpu driver. 595 */ 596 #ifdef CONFIG_DRM_AMDGPU_SI 597 598 #if IS_ENABLED(CONFIG_DRM_RADEON) || IS_ENABLED(CONFIG_DRM_RADEON_MODULE) 599 int amdgpu_si_support; 600 MODULE_PARM_DESC(si_support, "SI support (1 = enabled, 0 = disabled (default))"); 601 #else 602 int amdgpu_si_support = 1; 603 MODULE_PARM_DESC(si_support, "SI support (1 = enabled (default), 0 = disabled)"); 604 #endif 605 606 module_param_named(si_support, amdgpu_si_support, int, 0444); 607 #endif 608 609 /** 610 * DOC: cik_support (int) 611 * Set CIK support driver. This parameter works after set config CONFIG_DRM_AMDGPU_CIK. For CIK asic, when radeon driver is enabled, 612 * set value 0 to use radeon driver, while set value 1 to use amdgpu driver. The default is using radeon driver when it available, 613 * otherwise using amdgpu driver. 614 */ 615 #ifdef CONFIG_DRM_AMDGPU_CIK 616 617 #if IS_ENABLED(CONFIG_DRM_RADEON) || IS_ENABLED(CONFIG_DRM_RADEON_MODULE) 618 int amdgpu_cik_support; 619 MODULE_PARM_DESC(cik_support, "CIK support (1 = enabled, 0 = disabled (default))"); 620 #else 621 int amdgpu_cik_support = 1; 622 MODULE_PARM_DESC(cik_support, "CIK support (1 = enabled (default), 0 = disabled)"); 623 #endif 624 625 module_param_named(cik_support, amdgpu_cik_support, int, 0444); 626 #endif 627 628 /** 629 * DOC: smu_memory_pool_size (uint) 630 * It is used to reserve gtt for smu debug usage, setting value 0 to disable it. The actual size is value * 256MiB. 631 * E.g. 0x1 = 256Mbyte, 0x2 = 512Mbyte, 0x4 = 1 Gbyte, 0x8 = 2GByte. The default is 0 (disabled). 632 */ 633 MODULE_PARM_DESC(smu_memory_pool_size, 634 "reserve gtt for smu debug usage, 0 = disable,0x1 = 256Mbyte, 0x2 = 512Mbyte, 0x4 = 1 Gbyte, 0x8 = 2GByte"); 635 module_param_named(smu_memory_pool_size, amdgpu_smu_memory_pool_size, uint, 0444); 636 637 /** 638 * DOC: async_gfx_ring (int) 639 * It is used to enable gfx rings that could be configured with different prioritites or equal priorities 640 */ 641 MODULE_PARM_DESC(async_gfx_ring, 642 "Asynchronous GFX rings that could be configured with either different priorities (HP3D ring and LP3D ring), or equal priorities (0 = disabled, 1 = enabled (default))"); 643 module_param_named(async_gfx_ring, amdgpu_async_gfx_ring, int, 0444); 644 645 /** 646 * DOC: mcbp (int) 647 * It is used to enable mid command buffer preemption. (0 = disabled, 1 = enabled, -1 auto (default)) 648 */ 649 MODULE_PARM_DESC(mcbp, 650 "Enable Mid-command buffer preemption (0 = disabled, 1 = enabled), -1 = auto (default)"); 651 module_param_named(mcbp, amdgpu_mcbp, int, 0444); 652 653 /** 654 * DOC: discovery (int) 655 * Allow driver to discover hardware IP information from IP Discovery table at the top of VRAM. 656 * (-1 = auto (default), 0 = disabled, 1 = enabled, 2 = use ip_discovery table from file) 657 */ 658 MODULE_PARM_DESC(discovery, 659 "Allow driver to discover hardware IPs from IP Discovery table at the top of VRAM"); 660 module_param_named(discovery, amdgpu_discovery, int, 0444); 661 662 /** 663 * DOC: mes (int) 664 * Enable Micro Engine Scheduler. This is a new hw scheduling engine for gfx, sdma, and compute. 665 * (0 = disabled (default), 1 = enabled) 666 */ 667 MODULE_PARM_DESC(mes, 668 "Enable Micro Engine Scheduler (0 = disabled (default), 1 = enabled)"); 669 module_param_named(mes, amdgpu_mes, int, 0444); 670 671 /** 672 * DOC: mes_log_enable (int) 673 * Enable Micro Engine Scheduler log. This is used to enable/disable MES internal log. 674 * (0 = disabled (default), 1 = enabled) 675 */ 676 MODULE_PARM_DESC(mes_log_enable, 677 "Enable Micro Engine Scheduler log (0 = disabled (default), 1 = enabled)"); 678 module_param_named(mes_log_enable, amdgpu_mes_log_enable, int, 0444); 679 680 /** 681 * DOC: mes_kiq (int) 682 * Enable Micro Engine Scheduler KIQ. This is a new engine pipe for kiq. 683 * (0 = disabled (default), 1 = enabled) 684 */ 685 MODULE_PARM_DESC(mes_kiq, 686 "Enable Micro Engine Scheduler KIQ (0 = disabled (default), 1 = enabled)"); 687 module_param_named(mes_kiq, amdgpu_mes_kiq, int, 0444); 688 689 /** 690 * DOC: noretry (int) 691 * Disable XNACK retry in the SQ by default on GFXv9 hardware. On ASICs that 692 * do not support per-process XNACK this also disables retry page faults. 693 * (0 = retry enabled, 1 = retry disabled, -1 auto (default)) 694 */ 695 MODULE_PARM_DESC(noretry, 696 "Disable retry faults (0 = retry enabled, 1 = retry disabled, -1 auto (default))"); 697 module_param_named(noretry, amdgpu_noretry, int, 0644); 698 699 /** 700 * DOC: force_asic_type (int) 701 * A non negative value used to specify the asic type for all supported GPUs. 702 */ 703 MODULE_PARM_DESC(force_asic_type, 704 "A non negative value used to specify the asic type for all supported GPUs"); 705 module_param_named(force_asic_type, amdgpu_force_asic_type, int, 0444); 706 707 /** 708 * DOC: use_xgmi_p2p (int) 709 * Enables/disables XGMI P2P interface (0 = disable, 1 = enable). 710 */ 711 MODULE_PARM_DESC(use_xgmi_p2p, 712 "Enable XGMI P2P interface (0 = disable; 1 = enable (default))"); 713 module_param_named(use_xgmi_p2p, amdgpu_use_xgmi_p2p, int, 0444); 714 715 716 #ifdef CONFIG_HSA_AMD 717 /** 718 * DOC: sched_policy (int) 719 * Set scheduling policy. Default is HWS(hardware scheduling) with over-subscription. 720 * Setting 1 disables over-subscription. Setting 2 disables HWS and statically 721 * assigns queues to HQDs. 722 */ 723 int sched_policy = KFD_SCHED_POLICY_HWS; 724 module_param(sched_policy, int, 0444); 725 MODULE_PARM_DESC(sched_policy, 726 "Scheduling policy (0 = HWS (Default), 1 = HWS without over-subscription, 2 = Non-HWS (Used for debugging only)"); 727 728 /** 729 * DOC: hws_max_conc_proc (int) 730 * Maximum number of processes that HWS can schedule concurrently. The maximum is the 731 * number of VMIDs assigned to the HWS, which is also the default. 732 */ 733 int hws_max_conc_proc = -1; 734 module_param(hws_max_conc_proc, int, 0444); 735 MODULE_PARM_DESC(hws_max_conc_proc, 736 "Max # processes HWS can execute concurrently when sched_policy=0 (0 = no concurrency, #VMIDs for KFD = Maximum(default))"); 737 738 /** 739 * DOC: cwsr_enable (int) 740 * CWSR(compute wave store and resume) allows the GPU to preempt shader execution in 741 * the middle of a compute wave. Default is 1 to enable this feature. Setting 0 742 * disables it. 743 */ 744 int cwsr_enable = 1; 745 module_param(cwsr_enable, int, 0444); 746 MODULE_PARM_DESC(cwsr_enable, "CWSR enable (0 = Off, 1 = On (Default))"); 747 748 /** 749 * DOC: max_num_of_queues_per_device (int) 750 * Maximum number of queues per device. Valid setting is between 1 and 4096. Default 751 * is 4096. 752 */ 753 int max_num_of_queues_per_device = KFD_MAX_NUM_OF_QUEUES_PER_DEVICE_DEFAULT; 754 module_param(max_num_of_queues_per_device, int, 0444); 755 MODULE_PARM_DESC(max_num_of_queues_per_device, 756 "Maximum number of supported queues per device (1 = Minimum, 4096 = default)"); 757 758 /** 759 * DOC: send_sigterm (int) 760 * Send sigterm to HSA process on unhandled exceptions. Default is not to send sigterm 761 * but just print errors on dmesg. Setting 1 enables sending sigterm. 762 */ 763 int send_sigterm; 764 module_param(send_sigterm, int, 0444); 765 MODULE_PARM_DESC(send_sigterm, 766 "Send sigterm to HSA process on unhandled exception (0 = disable, 1 = enable)"); 767 768 /** 769 * DOC: halt_if_hws_hang (int) 770 * Halt if HWS hang is detected. Default value, 0, disables the halt on hang. 771 * Setting 1 enables halt on hang. 772 */ 773 int halt_if_hws_hang; 774 module_param(halt_if_hws_hang, int, 0644); 775 MODULE_PARM_DESC(halt_if_hws_hang, "Halt if HWS hang is detected (0 = off (default), 1 = on)"); 776 777 /** 778 * DOC: hws_gws_support(bool) 779 * Assume that HWS supports GWS barriers regardless of what firmware version 780 * check says. Default value: false (rely on MEC2 firmware version check). 781 */ 782 bool hws_gws_support; 783 module_param(hws_gws_support, bool, 0444); 784 MODULE_PARM_DESC(hws_gws_support, "Assume MEC2 FW supports GWS barriers (false = rely on FW version check (Default), true = force supported)"); 785 786 /** 787 * DOC: queue_preemption_timeout_ms (int) 788 * queue preemption timeout in ms (1 = Minimum, 9000 = default) 789 */ 790 int queue_preemption_timeout_ms = 9000; 791 module_param(queue_preemption_timeout_ms, int, 0644); 792 MODULE_PARM_DESC(queue_preemption_timeout_ms, "queue preemption timeout in ms (1 = Minimum, 9000 = default)"); 793 794 /** 795 * DOC: debug_evictions(bool) 796 * Enable extra debug messages to help determine the cause of evictions 797 */ 798 bool debug_evictions; 799 module_param(debug_evictions, bool, 0644); 800 MODULE_PARM_DESC(debug_evictions, "enable eviction debug messages (false = default)"); 801 802 /** 803 * DOC: no_system_mem_limit(bool) 804 * Disable system memory limit, to support multiple process shared memory 805 */ 806 bool no_system_mem_limit; 807 module_param(no_system_mem_limit, bool, 0644); 808 MODULE_PARM_DESC(no_system_mem_limit, "disable system memory limit (false = default)"); 809 810 /** 811 * DOC: no_queue_eviction_on_vm_fault (int) 812 * If set, process queues will not be evicted on gpuvm fault. This is to keep the wavefront context for debugging (0 = queue eviction, 1 = no queue eviction). The default is 0 (queue eviction). 813 */ 814 int amdgpu_no_queue_eviction_on_vm_fault; 815 MODULE_PARM_DESC(no_queue_eviction_on_vm_fault, "No queue eviction on VM fault (0 = queue eviction, 1 = no queue eviction)"); 816 module_param_named(no_queue_eviction_on_vm_fault, amdgpu_no_queue_eviction_on_vm_fault, int, 0444); 817 #endif 818 819 /** 820 * DOC: mtype_local (int) 821 */ 822 int amdgpu_mtype_local; 823 MODULE_PARM_DESC(mtype_local, "MTYPE for local memory (0 = MTYPE_RW (default), 1 = MTYPE_NC, 2 = MTYPE_CC)"); 824 module_param_named(mtype_local, amdgpu_mtype_local, int, 0444); 825 826 /** 827 * DOC: pcie_p2p (bool) 828 * Enable PCIe P2P (requires large-BAR). Default value: true (on) 829 */ 830 #ifdef CONFIG_HSA_AMD_P2P 831 bool pcie_p2p = true; 832 module_param(pcie_p2p, bool, 0444); 833 MODULE_PARM_DESC(pcie_p2p, "Enable PCIe P2P (requires large-BAR). (N = off, Y = on(default))"); 834 #endif 835 836 /** 837 * DOC: dcfeaturemask (uint) 838 * Override display features enabled. See enum DC_FEATURE_MASK in drivers/gpu/drm/amd/include/amd_shared.h. 839 * The default is the current set of stable display features. 840 */ 841 MODULE_PARM_DESC(dcfeaturemask, "all stable DC features enabled (default))"); 842 module_param_named(dcfeaturemask, amdgpu_dc_feature_mask, uint, 0444); 843 844 /** 845 * DOC: dcdebugmask (uint) 846 * Override display features enabled. See enum DC_DEBUG_MASK in drivers/gpu/drm/amd/include/amd_shared.h. 847 */ 848 MODULE_PARM_DESC(dcdebugmask, "all debug options disabled (default))"); 849 module_param_named(dcdebugmask, amdgpu_dc_debug_mask, uint, 0444); 850 851 MODULE_PARM_DESC(visualconfirm, "Visual confirm (0 = off (default), 1 = MPO, 5 = PSR)"); 852 module_param_named(visualconfirm, amdgpu_dc_visual_confirm, uint, 0444); 853 854 /** 855 * DOC: abmlevel (uint) 856 * Override the default ABM (Adaptive Backlight Management) level used for DC 857 * enabled hardware. Requires DMCU to be supported and loaded. 858 * Valid levels are 0-4. A value of 0 indicates that ABM should be disabled by 859 * default. Values 1-4 control the maximum allowable brightness reduction via 860 * the ABM algorithm, with 1 being the least reduction and 4 being the most 861 * reduction. 862 * 863 * Defaults to -1, or disabled. Userspace can only override this level after 864 * boot if it's set to auto. 865 */ 866 int amdgpu_dm_abm_level = -1; 867 MODULE_PARM_DESC(abmlevel, 868 "ABM level (0 = off, 1-4 = backlight reduction level, -1 auto (default))"); 869 module_param_named(abmlevel, amdgpu_dm_abm_level, int, 0444); 870 871 int amdgpu_backlight = -1; 872 MODULE_PARM_DESC(backlight, "Backlight control (0 = pwm, 1 = aux, -1 auto (default))"); 873 module_param_named(backlight, amdgpu_backlight, bint, 0444); 874 875 /** 876 * DOC: damageclips (int) 877 * Enable or disable damage clips support. If damage clips support is disabled, 878 * we will force full frame updates, irrespective of what user space sends to 879 * us. 880 * 881 * Defaults to -1 (where it is enabled unless a PSR-SU display is detected). 882 */ 883 MODULE_PARM_DESC(damageclips, 884 "Damage clips support (0 = disable, 1 = enable, -1 auto (default))"); 885 module_param_named(damageclips, amdgpu_damage_clips, int, 0444); 886 887 /** 888 * DOC: tmz (int) 889 * Trusted Memory Zone (TMZ) is a method to protect data being written 890 * to or read from memory. 891 * 892 * The default value: 0 (off). TODO: change to auto till it is completed. 893 */ 894 MODULE_PARM_DESC(tmz, "Enable TMZ feature (-1 = auto (default), 0 = off, 1 = on)"); 895 module_param_named(tmz, amdgpu_tmz, int, 0444); 896 897 /** 898 * DOC: freesync_video (uint) 899 * Enable the optimization to adjust front porch timing to achieve seamless 900 * mode change experience when setting a freesync supported mode for which full 901 * modeset is not needed. 902 * 903 * The Display Core will add a set of modes derived from the base FreeSync 904 * video mode into the corresponding connector's mode list based on commonly 905 * used refresh rates and VRR range of the connected display, when users enable 906 * this feature. From the userspace perspective, they can see a seamless mode 907 * change experience when the change between different refresh rates under the 908 * same resolution. Additionally, userspace applications such as Video playback 909 * can read this modeset list and change the refresh rate based on the video 910 * frame rate. Finally, the userspace can also derive an appropriate mode for a 911 * particular refresh rate based on the FreeSync Mode and add it to the 912 * connector's mode list. 913 * 914 * Note: This is an experimental feature. 915 * 916 * The default value: 0 (off). 917 */ 918 MODULE_PARM_DESC( 919 freesync_video, 920 "Enable freesync modesetting optimization feature (0 = off (default), 1 = on)"); 921 module_param_named(freesync_video, amdgpu_freesync_vid_mode, uint, 0444); 922 923 /** 924 * DOC: reset_method (int) 925 * GPU reset method (-1 = auto (default), 0 = legacy, 1 = mode0, 2 = mode1, 3 = mode2, 4 = baco) 926 */ 927 MODULE_PARM_DESC(reset_method, "GPU reset method (-1 = auto (default), 0 = legacy, 1 = mode0, 2 = mode1, 3 = mode2, 4 = baco/bamaco)"); 928 module_param_named(reset_method, amdgpu_reset_method, int, 0644); 929 930 /** 931 * DOC: bad_page_threshold (int) Bad page threshold is specifies the 932 * threshold value of faulty pages detected by RAS ECC, which may 933 * result in the GPU entering bad status when the number of total 934 * faulty pages by ECC exceeds the threshold value. 935 */ 936 MODULE_PARM_DESC(bad_page_threshold, "Bad page threshold(-1 = ignore threshold (default value), 0 = disable bad page retirement, -2 = driver sets threshold)"); 937 module_param_named(bad_page_threshold, amdgpu_bad_page_threshold, int, 0444); 938 939 MODULE_PARM_DESC(num_kcq, "number of kernel compute queue user want to setup (8 if set to greater than 8 or less than 0, only affect gfx 8+)"); 940 module_param_named(num_kcq, amdgpu_num_kcq, int, 0444); 941 942 /** 943 * DOC: vcnfw_log (int) 944 * Enable vcnfw log output for debugging, the default is disabled. 945 */ 946 MODULE_PARM_DESC(vcnfw_log, "Enable vcnfw log(0 = disable (default value), 1 = enable)"); 947 module_param_named(vcnfw_log, amdgpu_vcnfw_log, int, 0444); 948 949 /** 950 * DOC: sg_display (int) 951 * Disable S/G (scatter/gather) display (i.e., display from system memory). 952 * This option is only relevant on APUs. Set this option to 0 to disable 953 * S/G display if you experience flickering or other issues under memory 954 * pressure and report the issue. 955 */ 956 MODULE_PARM_DESC(sg_display, "S/G Display (-1 = auto (default), 0 = disable)"); 957 module_param_named(sg_display, amdgpu_sg_display, int, 0444); 958 959 /** 960 * DOC: umsch_mm (int) 961 * Enable Multi Media User Mode Scheduler. This is a HW scheduling engine for VCN and VPE. 962 * (0 = disabled (default), 1 = enabled) 963 */ 964 MODULE_PARM_DESC(umsch_mm, 965 "Enable Multi Media User Mode Scheduler (0 = disabled (default), 1 = enabled)"); 966 module_param_named(umsch_mm, amdgpu_umsch_mm, int, 0444); 967 968 /** 969 * DOC: smu_pptable_id (int) 970 * Used to override pptable id. id = 0 use VBIOS pptable. 971 * id > 0 use the soft pptable with specicfied id. 972 */ 973 MODULE_PARM_DESC(smu_pptable_id, 974 "specify pptable id to be used (-1 = auto(default) value, 0 = use pptable from vbios, > 0 = soft pptable id)"); 975 module_param_named(smu_pptable_id, amdgpu_smu_pptable_id, int, 0444); 976 977 /** 978 * DOC: partition_mode (int) 979 * Used to override the default SPX mode. 980 */ 981 MODULE_PARM_DESC( 982 user_partt_mode, 983 "specify partition mode to be used (-2 = AMDGPU_AUTO_COMPUTE_PARTITION_MODE(default value) \ 984 0 = AMDGPU_SPX_PARTITION_MODE, \ 985 1 = AMDGPU_DPX_PARTITION_MODE, \ 986 2 = AMDGPU_TPX_PARTITION_MODE, \ 987 3 = AMDGPU_QPX_PARTITION_MODE, \ 988 4 = AMDGPU_CPX_PARTITION_MODE)"); 989 module_param_named(user_partt_mode, amdgpu_user_partt_mode, uint, 0444); 990 991 992 /** 993 * DOC: enforce_isolation (bool) 994 * enforce process isolation between graphics and compute via using the same reserved vmid. 995 */ 996 module_param(enforce_isolation, bool, 0444); 997 MODULE_PARM_DESC(enforce_isolation, "enforce process isolation between graphics and compute . enforce_isolation = on"); 998 999 /** 1000 * DOC: seamless (int) 1001 * Seamless boot will keep the image on the screen during the boot process. 1002 */ 1003 MODULE_PARM_DESC(seamless, "Seamless boot (-1 = auto (default), 0 = disable, 1 = enable)"); 1004 module_param_named(seamless, amdgpu_seamless, int, 0444); 1005 1006 /** 1007 * DOC: debug_mask (uint) 1008 * Debug options for amdgpu, work as a binary mask with the following options: 1009 * 1010 * - 0x1: Debug VM handling 1011 * - 0x2: Enable simulating large-bar capability on non-large bar system. This 1012 * limits the VRAM size reported to ROCm applications to the visible 1013 * size, usually 256MB. 1014 * - 0x4: Disable GPU soft recovery, always do a full reset 1015 */ 1016 MODULE_PARM_DESC(debug_mask, "debug options for amdgpu, disabled by default"); 1017 module_param_named(debug_mask, amdgpu_debug_mask, uint, 0444); 1018 1019 /** 1020 * DOC: agp (int) 1021 * Enable the AGP aperture. This provides an aperture in the GPU's internal 1022 * address space for direct access to system memory. Note that these accesses 1023 * are non-snooped, so they are only used for access to uncached memory. 1024 */ 1025 MODULE_PARM_DESC(agp, "AGP (-1 = auto (default), 0 = disable, 1 = enable)"); 1026 module_param_named(agp, amdgpu_agp, int, 0444); 1027 1028 /** 1029 * DOC: wbrf (int) 1030 * Enable Wifi RFI interference mitigation feature. 1031 * Due to electrical and mechanical constraints there may be likely interference of 1032 * relatively high-powered harmonics of the (G-)DDR memory clocks with local radio 1033 * module frequency bands used by Wifi 6/6e/7. To mitigate the possible RFI interference, 1034 * with this feature enabled, PMFW will use either “shadowed P-State” or “P-State” based 1035 * on active list of frequencies in-use (to be avoided) as part of initial setting or 1036 * P-state transition. However, there may be potential performance impact with this 1037 * feature enabled. 1038 * (0 = disabled, 1 = enabled, -1 = auto (default setting, will be enabled if supported)) 1039 */ 1040 MODULE_PARM_DESC(wbrf, 1041 "Enable Wifi RFI interference mitigation (0 = disabled, 1 = enabled, -1 = auto(default)"); 1042 module_param_named(wbrf, amdgpu_wbrf, int, 0444); 1043 1044 /* These devices are not supported by amdgpu. 1045 * They are supported by the mach64, r128, radeon drivers 1046 */ 1047 static const u16 amdgpu_unsupported_pciidlist[] = { 1048 /* mach64 */ 1049 0x4354, 1050 0x4358, 1051 0x4554, 1052 0x4742, 1053 0x4744, 1054 0x4749, 1055 0x474C, 1056 0x474D, 1057 0x474E, 1058 0x474F, 1059 0x4750, 1060 0x4751, 1061 0x4752, 1062 0x4753, 1063 0x4754, 1064 0x4755, 1065 0x4756, 1066 0x4757, 1067 0x4758, 1068 0x4759, 1069 0x475A, 1070 0x4C42, 1071 0x4C44, 1072 0x4C47, 1073 0x4C49, 1074 0x4C4D, 1075 0x4C4E, 1076 0x4C50, 1077 0x4C51, 1078 0x4C52, 1079 0x4C53, 1080 0x5654, 1081 0x5655, 1082 0x5656, 1083 /* r128 */ 1084 0x4c45, 1085 0x4c46, 1086 0x4d46, 1087 0x4d4c, 1088 0x5041, 1089 0x5042, 1090 0x5043, 1091 0x5044, 1092 0x5045, 1093 0x5046, 1094 0x5047, 1095 0x5048, 1096 0x5049, 1097 0x504A, 1098 0x504B, 1099 0x504C, 1100 0x504D, 1101 0x504E, 1102 0x504F, 1103 0x5050, 1104 0x5051, 1105 0x5052, 1106 0x5053, 1107 0x5054, 1108 0x5055, 1109 0x5056, 1110 0x5057, 1111 0x5058, 1112 0x5245, 1113 0x5246, 1114 0x5247, 1115 0x524b, 1116 0x524c, 1117 0x534d, 1118 0x5446, 1119 0x544C, 1120 0x5452, 1121 /* radeon */ 1122 0x3150, 1123 0x3151, 1124 0x3152, 1125 0x3154, 1126 0x3155, 1127 0x3E50, 1128 0x3E54, 1129 0x4136, 1130 0x4137, 1131 0x4144, 1132 0x4145, 1133 0x4146, 1134 0x4147, 1135 0x4148, 1136 0x4149, 1137 0x414A, 1138 0x414B, 1139 0x4150, 1140 0x4151, 1141 0x4152, 1142 0x4153, 1143 0x4154, 1144 0x4155, 1145 0x4156, 1146 0x4237, 1147 0x4242, 1148 0x4336, 1149 0x4337, 1150 0x4437, 1151 0x4966, 1152 0x4967, 1153 0x4A48, 1154 0x4A49, 1155 0x4A4A, 1156 0x4A4B, 1157 0x4A4C, 1158 0x4A4D, 1159 0x4A4E, 1160 0x4A4F, 1161 0x4A50, 1162 0x4A54, 1163 0x4B48, 1164 0x4B49, 1165 0x4B4A, 1166 0x4B4B, 1167 0x4B4C, 1168 0x4C57, 1169 0x4C58, 1170 0x4C59, 1171 0x4C5A, 1172 0x4C64, 1173 0x4C66, 1174 0x4C67, 1175 0x4E44, 1176 0x4E45, 1177 0x4E46, 1178 0x4E47, 1179 0x4E48, 1180 0x4E49, 1181 0x4E4A, 1182 0x4E4B, 1183 0x4E50, 1184 0x4E51, 1185 0x4E52, 1186 0x4E53, 1187 0x4E54, 1188 0x4E56, 1189 0x5144, 1190 0x5145, 1191 0x5146, 1192 0x5147, 1193 0x5148, 1194 0x514C, 1195 0x514D, 1196 0x5157, 1197 0x5158, 1198 0x5159, 1199 0x515A, 1200 0x515E, 1201 0x5460, 1202 0x5462, 1203 0x5464, 1204 0x5548, 1205 0x5549, 1206 0x554A, 1207 0x554B, 1208 0x554C, 1209 0x554D, 1210 0x554E, 1211 0x554F, 1212 0x5550, 1213 0x5551, 1214 0x5552, 1215 0x5554, 1216 0x564A, 1217 0x564B, 1218 0x564F, 1219 0x5652, 1220 0x5653, 1221 0x5657, 1222 0x5834, 1223 0x5835, 1224 0x5954, 1225 0x5955, 1226 0x5974, 1227 0x5975, 1228 0x5960, 1229 0x5961, 1230 0x5962, 1231 0x5964, 1232 0x5965, 1233 0x5969, 1234 0x5a41, 1235 0x5a42, 1236 0x5a61, 1237 0x5a62, 1238 0x5b60, 1239 0x5b62, 1240 0x5b63, 1241 0x5b64, 1242 0x5b65, 1243 0x5c61, 1244 0x5c63, 1245 0x5d48, 1246 0x5d49, 1247 0x5d4a, 1248 0x5d4c, 1249 0x5d4d, 1250 0x5d4e, 1251 0x5d4f, 1252 0x5d50, 1253 0x5d52, 1254 0x5d57, 1255 0x5e48, 1256 0x5e4a, 1257 0x5e4b, 1258 0x5e4c, 1259 0x5e4d, 1260 0x5e4f, 1261 0x6700, 1262 0x6701, 1263 0x6702, 1264 0x6703, 1265 0x6704, 1266 0x6705, 1267 0x6706, 1268 0x6707, 1269 0x6708, 1270 0x6709, 1271 0x6718, 1272 0x6719, 1273 0x671c, 1274 0x671d, 1275 0x671f, 1276 0x6720, 1277 0x6721, 1278 0x6722, 1279 0x6723, 1280 0x6724, 1281 0x6725, 1282 0x6726, 1283 0x6727, 1284 0x6728, 1285 0x6729, 1286 0x6738, 1287 0x6739, 1288 0x673e, 1289 0x6740, 1290 0x6741, 1291 0x6742, 1292 0x6743, 1293 0x6744, 1294 0x6745, 1295 0x6746, 1296 0x6747, 1297 0x6748, 1298 0x6749, 1299 0x674A, 1300 0x6750, 1301 0x6751, 1302 0x6758, 1303 0x6759, 1304 0x675B, 1305 0x675D, 1306 0x675F, 1307 0x6760, 1308 0x6761, 1309 0x6762, 1310 0x6763, 1311 0x6764, 1312 0x6765, 1313 0x6766, 1314 0x6767, 1315 0x6768, 1316 0x6770, 1317 0x6771, 1318 0x6772, 1319 0x6778, 1320 0x6779, 1321 0x677B, 1322 0x6840, 1323 0x6841, 1324 0x6842, 1325 0x6843, 1326 0x6849, 1327 0x684C, 1328 0x6850, 1329 0x6858, 1330 0x6859, 1331 0x6880, 1332 0x6888, 1333 0x6889, 1334 0x688A, 1335 0x688C, 1336 0x688D, 1337 0x6898, 1338 0x6899, 1339 0x689b, 1340 0x689c, 1341 0x689d, 1342 0x689e, 1343 0x68a0, 1344 0x68a1, 1345 0x68a8, 1346 0x68a9, 1347 0x68b0, 1348 0x68b8, 1349 0x68b9, 1350 0x68ba, 1351 0x68be, 1352 0x68bf, 1353 0x68c0, 1354 0x68c1, 1355 0x68c7, 1356 0x68c8, 1357 0x68c9, 1358 0x68d8, 1359 0x68d9, 1360 0x68da, 1361 0x68de, 1362 0x68e0, 1363 0x68e1, 1364 0x68e4, 1365 0x68e5, 1366 0x68e8, 1367 0x68e9, 1368 0x68f1, 1369 0x68f2, 1370 0x68f8, 1371 0x68f9, 1372 0x68fa, 1373 0x68fe, 1374 0x7100, 1375 0x7101, 1376 0x7102, 1377 0x7103, 1378 0x7104, 1379 0x7105, 1380 0x7106, 1381 0x7108, 1382 0x7109, 1383 0x710A, 1384 0x710B, 1385 0x710C, 1386 0x710E, 1387 0x710F, 1388 0x7140, 1389 0x7141, 1390 0x7142, 1391 0x7143, 1392 0x7144, 1393 0x7145, 1394 0x7146, 1395 0x7147, 1396 0x7149, 1397 0x714A, 1398 0x714B, 1399 0x714C, 1400 0x714D, 1401 0x714E, 1402 0x714F, 1403 0x7151, 1404 0x7152, 1405 0x7153, 1406 0x715E, 1407 0x715F, 1408 0x7180, 1409 0x7181, 1410 0x7183, 1411 0x7186, 1412 0x7187, 1413 0x7188, 1414 0x718A, 1415 0x718B, 1416 0x718C, 1417 0x718D, 1418 0x718F, 1419 0x7193, 1420 0x7196, 1421 0x719B, 1422 0x719F, 1423 0x71C0, 1424 0x71C1, 1425 0x71C2, 1426 0x71C3, 1427 0x71C4, 1428 0x71C5, 1429 0x71C6, 1430 0x71C7, 1431 0x71CD, 1432 0x71CE, 1433 0x71D2, 1434 0x71D4, 1435 0x71D5, 1436 0x71D6, 1437 0x71DA, 1438 0x71DE, 1439 0x7200, 1440 0x7210, 1441 0x7211, 1442 0x7240, 1443 0x7243, 1444 0x7244, 1445 0x7245, 1446 0x7246, 1447 0x7247, 1448 0x7248, 1449 0x7249, 1450 0x724A, 1451 0x724B, 1452 0x724C, 1453 0x724D, 1454 0x724E, 1455 0x724F, 1456 0x7280, 1457 0x7281, 1458 0x7283, 1459 0x7284, 1460 0x7287, 1461 0x7288, 1462 0x7289, 1463 0x728B, 1464 0x728C, 1465 0x7290, 1466 0x7291, 1467 0x7293, 1468 0x7297, 1469 0x7834, 1470 0x7835, 1471 0x791e, 1472 0x791f, 1473 0x793f, 1474 0x7941, 1475 0x7942, 1476 0x796c, 1477 0x796d, 1478 0x796e, 1479 0x796f, 1480 0x9400, 1481 0x9401, 1482 0x9402, 1483 0x9403, 1484 0x9405, 1485 0x940A, 1486 0x940B, 1487 0x940F, 1488 0x94A0, 1489 0x94A1, 1490 0x94A3, 1491 0x94B1, 1492 0x94B3, 1493 0x94B4, 1494 0x94B5, 1495 0x94B9, 1496 0x9440, 1497 0x9441, 1498 0x9442, 1499 0x9443, 1500 0x9444, 1501 0x9446, 1502 0x944A, 1503 0x944B, 1504 0x944C, 1505 0x944E, 1506 0x9450, 1507 0x9452, 1508 0x9456, 1509 0x945A, 1510 0x945B, 1511 0x945E, 1512 0x9460, 1513 0x9462, 1514 0x946A, 1515 0x946B, 1516 0x947A, 1517 0x947B, 1518 0x9480, 1519 0x9487, 1520 0x9488, 1521 0x9489, 1522 0x948A, 1523 0x948F, 1524 0x9490, 1525 0x9491, 1526 0x9495, 1527 0x9498, 1528 0x949C, 1529 0x949E, 1530 0x949F, 1531 0x94C0, 1532 0x94C1, 1533 0x94C3, 1534 0x94C4, 1535 0x94C5, 1536 0x94C6, 1537 0x94C7, 1538 0x94C8, 1539 0x94C9, 1540 0x94CB, 1541 0x94CC, 1542 0x94CD, 1543 0x9500, 1544 0x9501, 1545 0x9504, 1546 0x9505, 1547 0x9506, 1548 0x9507, 1549 0x9508, 1550 0x9509, 1551 0x950F, 1552 0x9511, 1553 0x9515, 1554 0x9517, 1555 0x9519, 1556 0x9540, 1557 0x9541, 1558 0x9542, 1559 0x954E, 1560 0x954F, 1561 0x9552, 1562 0x9553, 1563 0x9555, 1564 0x9557, 1565 0x955f, 1566 0x9580, 1567 0x9581, 1568 0x9583, 1569 0x9586, 1570 0x9587, 1571 0x9588, 1572 0x9589, 1573 0x958A, 1574 0x958B, 1575 0x958C, 1576 0x958D, 1577 0x958E, 1578 0x958F, 1579 0x9590, 1580 0x9591, 1581 0x9593, 1582 0x9595, 1583 0x9596, 1584 0x9597, 1585 0x9598, 1586 0x9599, 1587 0x959B, 1588 0x95C0, 1589 0x95C2, 1590 0x95C4, 1591 0x95C5, 1592 0x95C6, 1593 0x95C7, 1594 0x95C9, 1595 0x95CC, 1596 0x95CD, 1597 0x95CE, 1598 0x95CF, 1599 0x9610, 1600 0x9611, 1601 0x9612, 1602 0x9613, 1603 0x9614, 1604 0x9615, 1605 0x9616, 1606 0x9640, 1607 0x9641, 1608 0x9642, 1609 0x9643, 1610 0x9644, 1611 0x9645, 1612 0x9647, 1613 0x9648, 1614 0x9649, 1615 0x964a, 1616 0x964b, 1617 0x964c, 1618 0x964e, 1619 0x964f, 1620 0x9710, 1621 0x9711, 1622 0x9712, 1623 0x9713, 1624 0x9714, 1625 0x9715, 1626 0x9802, 1627 0x9803, 1628 0x9804, 1629 0x9805, 1630 0x9806, 1631 0x9807, 1632 0x9808, 1633 0x9809, 1634 0x980A, 1635 0x9900, 1636 0x9901, 1637 0x9903, 1638 0x9904, 1639 0x9905, 1640 0x9906, 1641 0x9907, 1642 0x9908, 1643 0x9909, 1644 0x990A, 1645 0x990B, 1646 0x990C, 1647 0x990D, 1648 0x990E, 1649 0x990F, 1650 0x9910, 1651 0x9913, 1652 0x9917, 1653 0x9918, 1654 0x9919, 1655 0x9990, 1656 0x9991, 1657 0x9992, 1658 0x9993, 1659 0x9994, 1660 0x9995, 1661 0x9996, 1662 0x9997, 1663 0x9998, 1664 0x9999, 1665 0x999A, 1666 0x999B, 1667 0x999C, 1668 0x999D, 1669 0x99A0, 1670 0x99A2, 1671 0x99A4, 1672 /* radeon secondary ids */ 1673 0x3171, 1674 0x3e70, 1675 0x4164, 1676 0x4165, 1677 0x4166, 1678 0x4168, 1679 0x4170, 1680 0x4171, 1681 0x4172, 1682 0x4173, 1683 0x496e, 1684 0x4a69, 1685 0x4a6a, 1686 0x4a6b, 1687 0x4a70, 1688 0x4a74, 1689 0x4b69, 1690 0x4b6b, 1691 0x4b6c, 1692 0x4c6e, 1693 0x4e64, 1694 0x4e65, 1695 0x4e66, 1696 0x4e67, 1697 0x4e68, 1698 0x4e69, 1699 0x4e6a, 1700 0x4e71, 1701 0x4f73, 1702 0x5569, 1703 0x556b, 1704 0x556d, 1705 0x556f, 1706 0x5571, 1707 0x5854, 1708 0x5874, 1709 0x5940, 1710 0x5941, 1711 0x5b70, 1712 0x5b72, 1713 0x5b73, 1714 0x5b74, 1715 0x5b75, 1716 0x5d44, 1717 0x5d45, 1718 0x5d6d, 1719 0x5d6f, 1720 0x5d72, 1721 0x5d77, 1722 0x5e6b, 1723 0x5e6d, 1724 0x7120, 1725 0x7124, 1726 0x7129, 1727 0x712e, 1728 0x712f, 1729 0x7162, 1730 0x7163, 1731 0x7166, 1732 0x7167, 1733 0x7172, 1734 0x7173, 1735 0x71a0, 1736 0x71a1, 1737 0x71a3, 1738 0x71a7, 1739 0x71bb, 1740 0x71e0, 1741 0x71e1, 1742 0x71e2, 1743 0x71e6, 1744 0x71e7, 1745 0x71f2, 1746 0x7269, 1747 0x726b, 1748 0x726e, 1749 0x72a0, 1750 0x72a8, 1751 0x72b1, 1752 0x72b3, 1753 0x793f, 1754 }; 1755 1756 static const struct pci_device_id pciidlist[] = { 1757 #ifdef CONFIG_DRM_AMDGPU_SI 1758 {0x1002, 0x6780, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI}, 1759 {0x1002, 0x6784, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI}, 1760 {0x1002, 0x6788, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI}, 1761 {0x1002, 0x678A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI}, 1762 {0x1002, 0x6790, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI}, 1763 {0x1002, 0x6791, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI}, 1764 {0x1002, 0x6792, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI}, 1765 {0x1002, 0x6798, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI}, 1766 {0x1002, 0x6799, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI}, 1767 {0x1002, 0x679A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI}, 1768 {0x1002, 0x679B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI}, 1769 {0x1002, 0x679E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI}, 1770 {0x1002, 0x679F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI}, 1771 {0x1002, 0x6800, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|AMD_IS_MOBILITY}, 1772 {0x1002, 0x6801, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|AMD_IS_MOBILITY}, 1773 {0x1002, 0x6802, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|AMD_IS_MOBILITY}, 1774 {0x1002, 0x6806, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN}, 1775 {0x1002, 0x6808, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN}, 1776 {0x1002, 0x6809, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN}, 1777 {0x1002, 0x6810, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN}, 1778 {0x1002, 0x6811, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN}, 1779 {0x1002, 0x6816, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN}, 1780 {0x1002, 0x6817, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN}, 1781 {0x1002, 0x6818, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN}, 1782 {0x1002, 0x6819, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN}, 1783 {0x1002, 0x6600, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|AMD_IS_MOBILITY}, 1784 {0x1002, 0x6601, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|AMD_IS_MOBILITY}, 1785 {0x1002, 0x6602, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|AMD_IS_MOBILITY}, 1786 {0x1002, 0x6603, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|AMD_IS_MOBILITY}, 1787 {0x1002, 0x6604, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|AMD_IS_MOBILITY}, 1788 {0x1002, 0x6605, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|AMD_IS_MOBILITY}, 1789 {0x1002, 0x6606, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|AMD_IS_MOBILITY}, 1790 {0x1002, 0x6607, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|AMD_IS_MOBILITY}, 1791 {0x1002, 0x6608, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND}, 1792 {0x1002, 0x6610, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND}, 1793 {0x1002, 0x6611, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND}, 1794 {0x1002, 0x6613, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND}, 1795 {0x1002, 0x6617, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|AMD_IS_MOBILITY}, 1796 {0x1002, 0x6620, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|AMD_IS_MOBILITY}, 1797 {0x1002, 0x6621, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|AMD_IS_MOBILITY}, 1798 {0x1002, 0x6623, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|AMD_IS_MOBILITY}, 1799 {0x1002, 0x6631, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND}, 1800 {0x1002, 0x6820, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|AMD_IS_MOBILITY}, 1801 {0x1002, 0x6821, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|AMD_IS_MOBILITY}, 1802 {0x1002, 0x6822, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|AMD_IS_MOBILITY}, 1803 {0x1002, 0x6823, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|AMD_IS_MOBILITY}, 1804 {0x1002, 0x6824, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|AMD_IS_MOBILITY}, 1805 {0x1002, 0x6825, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|AMD_IS_MOBILITY}, 1806 {0x1002, 0x6826, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|AMD_IS_MOBILITY}, 1807 {0x1002, 0x6827, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|AMD_IS_MOBILITY}, 1808 {0x1002, 0x6828, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE}, 1809 {0x1002, 0x6829, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE}, 1810 {0x1002, 0x682A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|AMD_IS_MOBILITY}, 1811 {0x1002, 0x682B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|AMD_IS_MOBILITY}, 1812 {0x1002, 0x682C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE}, 1813 {0x1002, 0x682D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|AMD_IS_MOBILITY}, 1814 {0x1002, 0x682F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|AMD_IS_MOBILITY}, 1815 {0x1002, 0x6830, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|AMD_IS_MOBILITY}, 1816 {0x1002, 0x6831, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|AMD_IS_MOBILITY}, 1817 {0x1002, 0x6835, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE}, 1818 {0x1002, 0x6837, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE}, 1819 {0x1002, 0x6838, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE}, 1820 {0x1002, 0x6839, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE}, 1821 {0x1002, 0x683B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE}, 1822 {0x1002, 0x683D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE}, 1823 {0x1002, 0x683F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE}, 1824 {0x1002, 0x6660, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|AMD_IS_MOBILITY}, 1825 {0x1002, 0x6663, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|AMD_IS_MOBILITY}, 1826 {0x1002, 0x6664, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|AMD_IS_MOBILITY}, 1827 {0x1002, 0x6665, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|AMD_IS_MOBILITY}, 1828 {0x1002, 0x6667, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|AMD_IS_MOBILITY}, 1829 {0x1002, 0x666F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|AMD_IS_MOBILITY}, 1830 #endif 1831 #ifdef CONFIG_DRM_AMDGPU_CIK 1832 /* Kaveri */ 1833 {0x1002, 0x1304, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_MOBILITY|AMD_IS_APU}, 1834 {0x1002, 0x1305, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_APU}, 1835 {0x1002, 0x1306, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_MOBILITY|AMD_IS_APU}, 1836 {0x1002, 0x1307, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_APU}, 1837 {0x1002, 0x1309, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_MOBILITY|AMD_IS_APU}, 1838 {0x1002, 0x130A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_MOBILITY|AMD_IS_APU}, 1839 {0x1002, 0x130B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_MOBILITY|AMD_IS_APU}, 1840 {0x1002, 0x130C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_MOBILITY|AMD_IS_APU}, 1841 {0x1002, 0x130D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_MOBILITY|AMD_IS_APU}, 1842 {0x1002, 0x130E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_MOBILITY|AMD_IS_APU}, 1843 {0x1002, 0x130F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_APU}, 1844 {0x1002, 0x1310, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_APU}, 1845 {0x1002, 0x1311, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_APU}, 1846 {0x1002, 0x1312, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_APU}, 1847 {0x1002, 0x1313, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_APU}, 1848 {0x1002, 0x1315, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_APU}, 1849 {0x1002, 0x1316, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_APU}, 1850 {0x1002, 0x1317, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_MOBILITY|AMD_IS_APU}, 1851 {0x1002, 0x1318, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_MOBILITY|AMD_IS_APU}, 1852 {0x1002, 0x131B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_APU}, 1853 {0x1002, 0x131C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_APU}, 1854 {0x1002, 0x131D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_APU}, 1855 /* Bonaire */ 1856 {0x1002, 0x6640, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|AMD_IS_MOBILITY}, 1857 {0x1002, 0x6641, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|AMD_IS_MOBILITY}, 1858 {0x1002, 0x6646, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|AMD_IS_MOBILITY}, 1859 {0x1002, 0x6647, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|AMD_IS_MOBILITY}, 1860 {0x1002, 0x6649, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE}, 1861 {0x1002, 0x6650, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE}, 1862 {0x1002, 0x6651, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE}, 1863 {0x1002, 0x6658, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE}, 1864 {0x1002, 0x665c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE}, 1865 {0x1002, 0x665d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE}, 1866 {0x1002, 0x665f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE}, 1867 /* Hawaii */ 1868 {0x1002, 0x67A0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAWAII}, 1869 {0x1002, 0x67A1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAWAII}, 1870 {0x1002, 0x67A2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAWAII}, 1871 {0x1002, 0x67A8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAWAII}, 1872 {0x1002, 0x67A9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAWAII}, 1873 {0x1002, 0x67AA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAWAII}, 1874 {0x1002, 0x67B0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAWAII}, 1875 {0x1002, 0x67B1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAWAII}, 1876 {0x1002, 0x67B8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAWAII}, 1877 {0x1002, 0x67B9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAWAII}, 1878 {0x1002, 0x67BA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAWAII}, 1879 {0x1002, 0x67BE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAWAII}, 1880 /* Kabini */ 1881 {0x1002, 0x9830, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_MOBILITY|AMD_IS_APU}, 1882 {0x1002, 0x9831, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_APU}, 1883 {0x1002, 0x9832, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_MOBILITY|AMD_IS_APU}, 1884 {0x1002, 0x9833, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_APU}, 1885 {0x1002, 0x9834, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_MOBILITY|AMD_IS_APU}, 1886 {0x1002, 0x9835, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_APU}, 1887 {0x1002, 0x9836, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_MOBILITY|AMD_IS_APU}, 1888 {0x1002, 0x9837, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_APU}, 1889 {0x1002, 0x9838, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_MOBILITY|AMD_IS_APU}, 1890 {0x1002, 0x9839, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_MOBILITY|AMD_IS_APU}, 1891 {0x1002, 0x983a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_APU}, 1892 {0x1002, 0x983b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_MOBILITY|AMD_IS_APU}, 1893 {0x1002, 0x983c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_APU}, 1894 {0x1002, 0x983d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_APU}, 1895 {0x1002, 0x983e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_APU}, 1896 {0x1002, 0x983f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_APU}, 1897 /* mullins */ 1898 {0x1002, 0x9850, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU}, 1899 {0x1002, 0x9851, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU}, 1900 {0x1002, 0x9852, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU}, 1901 {0x1002, 0x9853, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU}, 1902 {0x1002, 0x9854, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU}, 1903 {0x1002, 0x9855, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU}, 1904 {0x1002, 0x9856, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU}, 1905 {0x1002, 0x9857, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU}, 1906 {0x1002, 0x9858, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU}, 1907 {0x1002, 0x9859, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU}, 1908 {0x1002, 0x985A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU}, 1909 {0x1002, 0x985B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU}, 1910 {0x1002, 0x985C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU}, 1911 {0x1002, 0x985D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU}, 1912 {0x1002, 0x985E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU}, 1913 {0x1002, 0x985F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU}, 1914 #endif 1915 /* topaz */ 1916 {0x1002, 0x6900, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ}, 1917 {0x1002, 0x6901, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ}, 1918 {0x1002, 0x6902, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ}, 1919 {0x1002, 0x6903, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ}, 1920 {0x1002, 0x6907, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ}, 1921 /* tonga */ 1922 {0x1002, 0x6920, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TONGA}, 1923 {0x1002, 0x6921, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TONGA}, 1924 {0x1002, 0x6928, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TONGA}, 1925 {0x1002, 0x6929, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TONGA}, 1926 {0x1002, 0x692B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TONGA}, 1927 {0x1002, 0x692F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TONGA}, 1928 {0x1002, 0x6930, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TONGA}, 1929 {0x1002, 0x6938, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TONGA}, 1930 {0x1002, 0x6939, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TONGA}, 1931 /* fiji */ 1932 {0x1002, 0x7300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_FIJI}, 1933 {0x1002, 0x730F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_FIJI}, 1934 /* carrizo */ 1935 {0x1002, 0x9870, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CARRIZO|AMD_IS_APU}, 1936 {0x1002, 0x9874, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CARRIZO|AMD_IS_APU}, 1937 {0x1002, 0x9875, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CARRIZO|AMD_IS_APU}, 1938 {0x1002, 0x9876, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CARRIZO|AMD_IS_APU}, 1939 {0x1002, 0x9877, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CARRIZO|AMD_IS_APU}, 1940 /* stoney */ 1941 {0x1002, 0x98E4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_STONEY|AMD_IS_APU}, 1942 /* Polaris11 */ 1943 {0x1002, 0x67E0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS11}, 1944 {0x1002, 0x67E3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS11}, 1945 {0x1002, 0x67E8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS11}, 1946 {0x1002, 0x67EB, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS11}, 1947 {0x1002, 0x67EF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS11}, 1948 {0x1002, 0x67FF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS11}, 1949 {0x1002, 0x67E1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS11}, 1950 {0x1002, 0x67E7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS11}, 1951 {0x1002, 0x67E9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS11}, 1952 /* Polaris10 */ 1953 {0x1002, 0x67C0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10}, 1954 {0x1002, 0x67C1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10}, 1955 {0x1002, 0x67C2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10}, 1956 {0x1002, 0x67C4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10}, 1957 {0x1002, 0x67C7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10}, 1958 {0x1002, 0x67D0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10}, 1959 {0x1002, 0x67DF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10}, 1960 {0x1002, 0x67C8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10}, 1961 {0x1002, 0x67C9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10}, 1962 {0x1002, 0x67CA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10}, 1963 {0x1002, 0x67CC, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10}, 1964 {0x1002, 0x67CF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10}, 1965 {0x1002, 0x6FDF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10}, 1966 /* Polaris12 */ 1967 {0x1002, 0x6980, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12}, 1968 {0x1002, 0x6981, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12}, 1969 {0x1002, 0x6985, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12}, 1970 {0x1002, 0x6986, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12}, 1971 {0x1002, 0x6987, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12}, 1972 {0x1002, 0x6995, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12}, 1973 {0x1002, 0x6997, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12}, 1974 {0x1002, 0x699F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12}, 1975 /* VEGAM */ 1976 {0x1002, 0x694C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGAM}, 1977 {0x1002, 0x694E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGAM}, 1978 {0x1002, 0x694F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGAM}, 1979 /* Vega 10 */ 1980 {0x1002, 0x6860, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10}, 1981 {0x1002, 0x6861, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10}, 1982 {0x1002, 0x6862, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10}, 1983 {0x1002, 0x6863, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10}, 1984 {0x1002, 0x6864, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10}, 1985 {0x1002, 0x6867, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10}, 1986 {0x1002, 0x6868, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10}, 1987 {0x1002, 0x6869, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10}, 1988 {0x1002, 0x686a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10}, 1989 {0x1002, 0x686b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10}, 1990 {0x1002, 0x686c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10}, 1991 {0x1002, 0x686d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10}, 1992 {0x1002, 0x686e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10}, 1993 {0x1002, 0x686f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10}, 1994 {0x1002, 0x687f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10}, 1995 /* Vega 12 */ 1996 {0x1002, 0x69A0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA12}, 1997 {0x1002, 0x69A1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA12}, 1998 {0x1002, 0x69A2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA12}, 1999 {0x1002, 0x69A3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA12}, 2000 {0x1002, 0x69AF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA12}, 2001 /* Vega 20 */ 2002 {0x1002, 0x66A0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20}, 2003 {0x1002, 0x66A1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20}, 2004 {0x1002, 0x66A2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20}, 2005 {0x1002, 0x66A3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20}, 2006 {0x1002, 0x66A4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20}, 2007 {0x1002, 0x66A7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20}, 2008 {0x1002, 0x66AF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20}, 2009 /* Raven */ 2010 {0x1002, 0x15dd, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RAVEN|AMD_IS_APU}, 2011 {0x1002, 0x15d8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RAVEN|AMD_IS_APU}, 2012 /* Arcturus */ 2013 {0x1002, 0x738C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARCTURUS}, 2014 {0x1002, 0x7388, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARCTURUS}, 2015 {0x1002, 0x738E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARCTURUS}, 2016 {0x1002, 0x7390, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARCTURUS}, 2017 /* Navi10 */ 2018 {0x1002, 0x7310, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI10}, 2019 {0x1002, 0x7312, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI10}, 2020 {0x1002, 0x7318, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI10}, 2021 {0x1002, 0x7319, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI10}, 2022 {0x1002, 0x731A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI10}, 2023 {0x1002, 0x731B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI10}, 2024 {0x1002, 0x731E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI10}, 2025 {0x1002, 0x731F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI10}, 2026 /* Navi14 */ 2027 {0x1002, 0x7340, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14}, 2028 {0x1002, 0x7341, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14}, 2029 {0x1002, 0x7347, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14}, 2030 {0x1002, 0x734F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14}, 2031 2032 /* Renoir */ 2033 {0x1002, 0x15E7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RENOIR|AMD_IS_APU}, 2034 {0x1002, 0x1636, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RENOIR|AMD_IS_APU}, 2035 {0x1002, 0x1638, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RENOIR|AMD_IS_APU}, 2036 {0x1002, 0x164C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RENOIR|AMD_IS_APU}, 2037 2038 /* Navi12 */ 2039 {0x1002, 0x7360, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI12}, 2040 {0x1002, 0x7362, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI12}, 2041 2042 /* Sienna_Cichlid */ 2043 {0x1002, 0x73A0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID}, 2044 {0x1002, 0x73A1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID}, 2045 {0x1002, 0x73A2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID}, 2046 {0x1002, 0x73A3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID}, 2047 {0x1002, 0x73A5, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID}, 2048 {0x1002, 0x73A8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID}, 2049 {0x1002, 0x73A9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID}, 2050 {0x1002, 0x73AB, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID}, 2051 {0x1002, 0x73AC, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID}, 2052 {0x1002, 0x73AD, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID}, 2053 {0x1002, 0x73AE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID}, 2054 {0x1002, 0x73AF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID}, 2055 {0x1002, 0x73BF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID}, 2056 2057 /* Yellow Carp */ 2058 {0x1002, 0x164D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_YELLOW_CARP|AMD_IS_APU}, 2059 {0x1002, 0x1681, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_YELLOW_CARP|AMD_IS_APU}, 2060 2061 /* Navy_Flounder */ 2062 {0x1002, 0x73C0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVY_FLOUNDER}, 2063 {0x1002, 0x73C1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVY_FLOUNDER}, 2064 {0x1002, 0x73C3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVY_FLOUNDER}, 2065 {0x1002, 0x73DA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVY_FLOUNDER}, 2066 {0x1002, 0x73DB, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVY_FLOUNDER}, 2067 {0x1002, 0x73DC, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVY_FLOUNDER}, 2068 {0x1002, 0x73DD, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVY_FLOUNDER}, 2069 {0x1002, 0x73DE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVY_FLOUNDER}, 2070 {0x1002, 0x73DF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVY_FLOUNDER}, 2071 2072 /* DIMGREY_CAVEFISH */ 2073 {0x1002, 0x73E0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_DIMGREY_CAVEFISH}, 2074 {0x1002, 0x73E1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_DIMGREY_CAVEFISH}, 2075 {0x1002, 0x73E2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_DIMGREY_CAVEFISH}, 2076 {0x1002, 0x73E3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_DIMGREY_CAVEFISH}, 2077 {0x1002, 0x73E8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_DIMGREY_CAVEFISH}, 2078 {0x1002, 0x73E9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_DIMGREY_CAVEFISH}, 2079 {0x1002, 0x73EA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_DIMGREY_CAVEFISH}, 2080 {0x1002, 0x73EB, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_DIMGREY_CAVEFISH}, 2081 {0x1002, 0x73EC, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_DIMGREY_CAVEFISH}, 2082 {0x1002, 0x73ED, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_DIMGREY_CAVEFISH}, 2083 {0x1002, 0x73EF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_DIMGREY_CAVEFISH}, 2084 {0x1002, 0x73FF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_DIMGREY_CAVEFISH}, 2085 2086 /* Aldebaran */ 2087 {0x1002, 0x7408, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ALDEBARAN}, 2088 {0x1002, 0x740C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ALDEBARAN}, 2089 {0x1002, 0x740F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ALDEBARAN}, 2090 {0x1002, 0x7410, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ALDEBARAN}, 2091 2092 /* CYAN_SKILLFISH */ 2093 {0x1002, 0x13FE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYAN_SKILLFISH|AMD_IS_APU}, 2094 {0x1002, 0x143F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYAN_SKILLFISH|AMD_IS_APU}, 2095 2096 /* BEIGE_GOBY */ 2097 {0x1002, 0x7420, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BEIGE_GOBY}, 2098 {0x1002, 0x7421, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BEIGE_GOBY}, 2099 {0x1002, 0x7422, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BEIGE_GOBY}, 2100 {0x1002, 0x7423, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BEIGE_GOBY}, 2101 {0x1002, 0x7424, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BEIGE_GOBY}, 2102 {0x1002, 0x743F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BEIGE_GOBY}, 2103 2104 { PCI_DEVICE(0x1002, PCI_ANY_ID), 2105 .class = PCI_CLASS_DISPLAY_VGA << 8, 2106 .class_mask = 0xffffff, 2107 .driver_data = CHIP_IP_DISCOVERY }, 2108 2109 { PCI_DEVICE(0x1002, PCI_ANY_ID), 2110 .class = PCI_CLASS_DISPLAY_OTHER << 8, 2111 .class_mask = 0xffffff, 2112 .driver_data = CHIP_IP_DISCOVERY }, 2113 2114 { PCI_DEVICE(0x1002, PCI_ANY_ID), 2115 .class = PCI_CLASS_ACCELERATOR_PROCESSING << 8, 2116 .class_mask = 0xffffff, 2117 .driver_data = CHIP_IP_DISCOVERY }, 2118 2119 {0, 0, 0} 2120 }; 2121 2122 MODULE_DEVICE_TABLE(pci, pciidlist); 2123 2124 static const struct amdgpu_asic_type_quirk asic_type_quirks[] = { 2125 /* differentiate between P10 and P11 asics with the same DID */ 2126 {0x67FF, 0xE3, CHIP_POLARIS10}, 2127 {0x67FF, 0xE7, CHIP_POLARIS10}, 2128 {0x67FF, 0xF3, CHIP_POLARIS10}, 2129 {0x67FF, 0xF7, CHIP_POLARIS10}, 2130 }; 2131 2132 static const struct drm_driver amdgpu_kms_driver; 2133 2134 static void amdgpu_get_secondary_funcs(struct amdgpu_device *adev) 2135 { 2136 struct pci_dev *p = NULL; 2137 int i; 2138 2139 /* 0 - GPU 2140 * 1 - audio 2141 * 2 - USB 2142 * 3 - UCSI 2143 */ 2144 for (i = 1; i < 4; i++) { 2145 p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus), 2146 adev->pdev->bus->number, i); 2147 if (p) { 2148 pm_runtime_get_sync(&p->dev); 2149 pm_runtime_mark_last_busy(&p->dev); 2150 pm_runtime_put_autosuspend(&p->dev); 2151 pci_dev_put(p); 2152 } 2153 } 2154 } 2155 2156 static void amdgpu_init_debug_options(struct amdgpu_device *adev) 2157 { 2158 if (amdgpu_debug_mask & AMDGPU_DEBUG_VM) { 2159 pr_info("debug: VM handling debug enabled\n"); 2160 adev->debug_vm = true; 2161 } 2162 2163 if (amdgpu_debug_mask & AMDGPU_DEBUG_LARGEBAR) { 2164 pr_info("debug: enabled simulating large-bar capability on non-large bar system\n"); 2165 adev->debug_largebar = true; 2166 } 2167 2168 if (amdgpu_debug_mask & AMDGPU_DEBUG_DISABLE_GPU_SOFT_RECOVERY) { 2169 pr_info("debug: soft reset for GPU recovery disabled\n"); 2170 adev->debug_disable_soft_recovery = true; 2171 } 2172 2173 if (amdgpu_debug_mask & AMDGPU_DEBUG_USE_VRAM_FW_BUF) { 2174 pr_info("debug: place fw in vram for frontdoor loading\n"); 2175 adev->debug_use_vram_fw_buf = true; 2176 } 2177 } 2178 2179 static unsigned long amdgpu_fix_asic_type(struct pci_dev *pdev, unsigned long flags) 2180 { 2181 int i; 2182 2183 for (i = 0; i < ARRAY_SIZE(asic_type_quirks); i++) { 2184 if (pdev->device == asic_type_quirks[i].device && 2185 pdev->revision == asic_type_quirks[i].revision) { 2186 flags &= ~AMD_ASIC_MASK; 2187 flags |= asic_type_quirks[i].type; 2188 break; 2189 } 2190 } 2191 2192 return flags; 2193 } 2194 2195 static int amdgpu_pci_probe(struct pci_dev *pdev, 2196 const struct pci_device_id *ent) 2197 { 2198 struct drm_device *ddev; 2199 struct amdgpu_device *adev; 2200 unsigned long flags = ent->driver_data; 2201 int ret, retry = 0, i; 2202 bool supports_atomic = false; 2203 2204 /* skip devices which are owned by radeon */ 2205 for (i = 0; i < ARRAY_SIZE(amdgpu_unsupported_pciidlist); i++) { 2206 if (amdgpu_unsupported_pciidlist[i] == pdev->device) 2207 return -ENODEV; 2208 } 2209 2210 if (amdgpu_aspm == -1 && !pcie_aspm_enabled(pdev)) 2211 amdgpu_aspm = 0; 2212 2213 if (amdgpu_virtual_display || 2214 amdgpu_device_asic_has_dc_support(flags & AMD_ASIC_MASK)) 2215 supports_atomic = true; 2216 2217 if ((flags & AMD_EXP_HW_SUPPORT) && !amdgpu_exp_hw_support) { 2218 DRM_INFO("This hardware requires experimental hardware support.\n" 2219 "See modparam exp_hw_support\n"); 2220 return -ENODEV; 2221 } 2222 2223 flags = amdgpu_fix_asic_type(pdev, flags); 2224 2225 /* Due to hardware bugs, S/G Display on raven requires a 1:1 IOMMU mapping, 2226 * however, SME requires an indirect IOMMU mapping because the encryption 2227 * bit is beyond the DMA mask of the chip. 2228 */ 2229 if (cc_platform_has(CC_ATTR_MEM_ENCRYPT) && 2230 ((flags & AMD_ASIC_MASK) == CHIP_RAVEN)) { 2231 dev_info(&pdev->dev, 2232 "SME is not compatible with RAVEN\n"); 2233 return -ENOTSUPP; 2234 } 2235 2236 #ifdef CONFIG_DRM_AMDGPU_SI 2237 if (!amdgpu_si_support) { 2238 switch (flags & AMD_ASIC_MASK) { 2239 case CHIP_TAHITI: 2240 case CHIP_PITCAIRN: 2241 case CHIP_VERDE: 2242 case CHIP_OLAND: 2243 case CHIP_HAINAN: 2244 dev_info(&pdev->dev, 2245 "SI support provided by radeon.\n"); 2246 dev_info(&pdev->dev, 2247 "Use radeon.si_support=0 amdgpu.si_support=1 to override.\n" 2248 ); 2249 return -ENODEV; 2250 } 2251 } 2252 #endif 2253 #ifdef CONFIG_DRM_AMDGPU_CIK 2254 if (!amdgpu_cik_support) { 2255 switch (flags & AMD_ASIC_MASK) { 2256 case CHIP_KAVERI: 2257 case CHIP_BONAIRE: 2258 case CHIP_HAWAII: 2259 case CHIP_KABINI: 2260 case CHIP_MULLINS: 2261 dev_info(&pdev->dev, 2262 "CIK support provided by radeon.\n"); 2263 dev_info(&pdev->dev, 2264 "Use radeon.cik_support=0 amdgpu.cik_support=1 to override.\n" 2265 ); 2266 return -ENODEV; 2267 } 2268 } 2269 #endif 2270 2271 adev = devm_drm_dev_alloc(&pdev->dev, &amdgpu_kms_driver, typeof(*adev), ddev); 2272 if (IS_ERR(adev)) 2273 return PTR_ERR(adev); 2274 2275 adev->dev = &pdev->dev; 2276 adev->pdev = pdev; 2277 ddev = adev_to_drm(adev); 2278 2279 if (!supports_atomic) 2280 ddev->driver_features &= ~DRIVER_ATOMIC; 2281 2282 ret = pci_enable_device(pdev); 2283 if (ret) 2284 return ret; 2285 2286 pci_set_drvdata(pdev, ddev); 2287 2288 amdgpu_init_debug_options(adev); 2289 2290 ret = amdgpu_driver_load_kms(adev, flags); 2291 if (ret) 2292 goto err_pci; 2293 2294 retry_init: 2295 ret = drm_dev_register(ddev, flags); 2296 if (ret == -EAGAIN && ++retry <= 3) { 2297 DRM_INFO("retry init %d\n", retry); 2298 /* Don't request EX mode too frequently which is attacking */ 2299 msleep(5000); 2300 goto retry_init; 2301 } else if (ret) { 2302 goto err_pci; 2303 } 2304 2305 ret = amdgpu_xcp_dev_register(adev, ent); 2306 if (ret) 2307 goto err_pci; 2308 2309 ret = amdgpu_amdkfd_drm_client_create(adev); 2310 if (ret) 2311 goto err_pci; 2312 2313 /* 2314 * 1. don't init fbdev on hw without DCE 2315 * 2. don't init fbdev if there are no connectors 2316 */ 2317 if (adev->mode_info.mode_config_initialized && 2318 !list_empty(&adev_to_drm(adev)->mode_config.connector_list)) { 2319 /* select 8 bpp console on low vram cards */ 2320 if (adev->gmc.real_vram_size <= (32*1024*1024)) 2321 drm_fbdev_generic_setup(adev_to_drm(adev), 8); 2322 else 2323 drm_fbdev_generic_setup(adev_to_drm(adev), 32); 2324 } 2325 2326 ret = amdgpu_debugfs_init(adev); 2327 if (ret) 2328 DRM_ERROR("Creating debugfs files failed (%d).\n", ret); 2329 2330 if (adev->pm.rpm_mode != AMDGPU_RUNPM_NONE) { 2331 /* only need to skip on ATPX */ 2332 if (amdgpu_device_supports_px(ddev)) 2333 dev_pm_set_driver_flags(ddev->dev, DPM_FLAG_NO_DIRECT_COMPLETE); 2334 /* we want direct complete for BOCO */ 2335 if (amdgpu_device_supports_boco(ddev)) 2336 dev_pm_set_driver_flags(ddev->dev, DPM_FLAG_SMART_PREPARE | 2337 DPM_FLAG_SMART_SUSPEND | 2338 DPM_FLAG_MAY_SKIP_RESUME); 2339 pm_runtime_use_autosuspend(ddev->dev); 2340 pm_runtime_set_autosuspend_delay(ddev->dev, 5000); 2341 2342 pm_runtime_allow(ddev->dev); 2343 2344 pm_runtime_mark_last_busy(ddev->dev); 2345 pm_runtime_put_autosuspend(ddev->dev); 2346 2347 pci_wake_from_d3(pdev, TRUE); 2348 2349 /* 2350 * For runpm implemented via BACO, PMFW will handle the 2351 * timing for BACO in and out: 2352 * - put ASIC into BACO state only when both video and 2353 * audio functions are in D3 state. 2354 * - pull ASIC out of BACO state when either video or 2355 * audio function is in D0 state. 2356 * Also, at startup, PMFW assumes both functions are in 2357 * D0 state. 2358 * 2359 * So if snd driver was loaded prior to amdgpu driver 2360 * and audio function was put into D3 state, there will 2361 * be no PMFW-aware D-state transition(D0->D3) on runpm 2362 * suspend. Thus the BACO will be not correctly kicked in. 2363 * 2364 * Via amdgpu_get_secondary_funcs(), the audio dev is put 2365 * into D0 state. Then there will be a PMFW-aware D-state 2366 * transition(D0->D3) on runpm suspend. 2367 */ 2368 if (amdgpu_device_supports_baco(ddev) && 2369 !(adev->flags & AMD_IS_APU) && 2370 (adev->asic_type >= CHIP_NAVI10)) 2371 amdgpu_get_secondary_funcs(adev); 2372 } 2373 2374 return 0; 2375 2376 err_pci: 2377 pci_disable_device(pdev); 2378 return ret; 2379 } 2380 2381 static void 2382 amdgpu_pci_remove(struct pci_dev *pdev) 2383 { 2384 struct drm_device *dev = pci_get_drvdata(pdev); 2385 struct amdgpu_device *adev = drm_to_adev(dev); 2386 2387 amdgpu_xcp_dev_unplug(adev); 2388 drm_dev_unplug(dev); 2389 2390 if (adev->pm.rpm_mode != AMDGPU_RUNPM_NONE) { 2391 pm_runtime_get_sync(dev->dev); 2392 pm_runtime_forbid(dev->dev); 2393 } 2394 2395 amdgpu_driver_unload_kms(dev); 2396 2397 /* 2398 * Flush any in flight DMA operations from device. 2399 * Clear the Bus Master Enable bit and then wait on the PCIe Device 2400 * StatusTransactions Pending bit. 2401 */ 2402 pci_disable_device(pdev); 2403 pci_wait_for_pending_transaction(pdev); 2404 } 2405 2406 static void 2407 amdgpu_pci_shutdown(struct pci_dev *pdev) 2408 { 2409 struct drm_device *dev = pci_get_drvdata(pdev); 2410 struct amdgpu_device *adev = drm_to_adev(dev); 2411 2412 if (amdgpu_ras_intr_triggered()) 2413 return; 2414 2415 /* if we are running in a VM, make sure the device 2416 * torn down properly on reboot/shutdown. 2417 * unfortunately we can't detect certain 2418 * hypervisors so just do this all the time. 2419 */ 2420 if (!amdgpu_passthrough(adev)) 2421 adev->mp1_state = PP_MP1_STATE_UNLOAD; 2422 amdgpu_device_ip_suspend(adev); 2423 adev->mp1_state = PP_MP1_STATE_NONE; 2424 } 2425 2426 /** 2427 * amdgpu_drv_delayed_reset_work_handler - work handler for reset 2428 * 2429 * @work: work_struct. 2430 */ 2431 static void amdgpu_drv_delayed_reset_work_handler(struct work_struct *work) 2432 { 2433 struct list_head device_list; 2434 struct amdgpu_device *adev; 2435 int i, r; 2436 struct amdgpu_reset_context reset_context; 2437 2438 memset(&reset_context, 0, sizeof(reset_context)); 2439 2440 mutex_lock(&mgpu_info.mutex); 2441 if (mgpu_info.pending_reset == true) { 2442 mutex_unlock(&mgpu_info.mutex); 2443 return; 2444 } 2445 mgpu_info.pending_reset = true; 2446 mutex_unlock(&mgpu_info.mutex); 2447 2448 /* Use a common context, just need to make sure full reset is done */ 2449 reset_context.method = AMD_RESET_METHOD_NONE; 2450 set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags); 2451 2452 for (i = 0; i < mgpu_info.num_dgpu; i++) { 2453 adev = mgpu_info.gpu_ins[i].adev; 2454 reset_context.reset_req_dev = adev; 2455 r = amdgpu_device_pre_asic_reset(adev, &reset_context); 2456 if (r) { 2457 dev_err(adev->dev, "GPU pre asic reset failed with err, %d for drm dev, %s ", 2458 r, adev_to_drm(adev)->unique); 2459 } 2460 if (!queue_work(system_unbound_wq, &adev->xgmi_reset_work)) 2461 r = -EALREADY; 2462 } 2463 for (i = 0; i < mgpu_info.num_dgpu; i++) { 2464 adev = mgpu_info.gpu_ins[i].adev; 2465 flush_work(&adev->xgmi_reset_work); 2466 adev->gmc.xgmi.pending_reset = false; 2467 } 2468 2469 /* reset function will rebuild the xgmi hive info , clear it now */ 2470 for (i = 0; i < mgpu_info.num_dgpu; i++) 2471 amdgpu_xgmi_remove_device(mgpu_info.gpu_ins[i].adev); 2472 2473 INIT_LIST_HEAD(&device_list); 2474 2475 for (i = 0; i < mgpu_info.num_dgpu; i++) 2476 list_add_tail(&mgpu_info.gpu_ins[i].adev->reset_list, &device_list); 2477 2478 /* unregister the GPU first, reset function will add them back */ 2479 list_for_each_entry(adev, &device_list, reset_list) 2480 amdgpu_unregister_gpu_instance(adev); 2481 2482 /* Use a common context, just need to make sure full reset is done */ 2483 set_bit(AMDGPU_SKIP_HW_RESET, &reset_context.flags); 2484 set_bit(AMDGPU_SKIP_COREDUMP, &reset_context.flags); 2485 r = amdgpu_do_asic_reset(&device_list, &reset_context); 2486 2487 if (r) { 2488 DRM_ERROR("reinit gpus failure"); 2489 return; 2490 } 2491 for (i = 0; i < mgpu_info.num_dgpu; i++) { 2492 adev = mgpu_info.gpu_ins[i].adev; 2493 if (!adev->kfd.init_complete) { 2494 kgd2kfd_init_zone_device(adev); 2495 amdgpu_amdkfd_device_init(adev); 2496 amdgpu_amdkfd_drm_client_create(adev); 2497 } 2498 amdgpu_ttm_set_buffer_funcs_status(adev, true); 2499 } 2500 } 2501 2502 static int amdgpu_pmops_prepare(struct device *dev) 2503 { 2504 struct drm_device *drm_dev = dev_get_drvdata(dev); 2505 struct amdgpu_device *adev = drm_to_adev(drm_dev); 2506 2507 /* Return a positive number here so 2508 * DPM_FLAG_SMART_SUSPEND works properly 2509 */ 2510 if (amdgpu_device_supports_boco(drm_dev) && 2511 pm_runtime_suspended(dev)) 2512 return 1; 2513 2514 /* if we will not support s3 or s2i for the device 2515 * then skip suspend 2516 */ 2517 if (!amdgpu_acpi_is_s0ix_active(adev) && 2518 !amdgpu_acpi_is_s3_active(adev)) 2519 return 1; 2520 2521 return amdgpu_device_prepare(drm_dev); 2522 } 2523 2524 static void amdgpu_pmops_complete(struct device *dev) 2525 { 2526 /* nothing to do */ 2527 } 2528 2529 static int amdgpu_pmops_suspend(struct device *dev) 2530 { 2531 struct drm_device *drm_dev = dev_get_drvdata(dev); 2532 struct amdgpu_device *adev = drm_to_adev(drm_dev); 2533 2534 adev->suspend_complete = false; 2535 if (amdgpu_acpi_is_s0ix_active(adev)) 2536 adev->in_s0ix = true; 2537 else if (amdgpu_acpi_is_s3_active(adev)) 2538 adev->in_s3 = true; 2539 if (!adev->in_s0ix && !adev->in_s3) 2540 return 0; 2541 return amdgpu_device_suspend(drm_dev, true); 2542 } 2543 2544 static int amdgpu_pmops_suspend_noirq(struct device *dev) 2545 { 2546 struct drm_device *drm_dev = dev_get_drvdata(dev); 2547 struct amdgpu_device *adev = drm_to_adev(drm_dev); 2548 2549 adev->suspend_complete = true; 2550 if (amdgpu_acpi_should_gpu_reset(adev)) 2551 return amdgpu_asic_reset(adev); 2552 2553 return 0; 2554 } 2555 2556 static int amdgpu_pmops_resume(struct device *dev) 2557 { 2558 struct drm_device *drm_dev = dev_get_drvdata(dev); 2559 struct amdgpu_device *adev = drm_to_adev(drm_dev); 2560 int r; 2561 2562 if (!adev->in_s0ix && !adev->in_s3) 2563 return 0; 2564 2565 /* Avoids registers access if device is physically gone */ 2566 if (!pci_device_is_present(adev->pdev)) 2567 adev->no_hw_access = true; 2568 2569 r = amdgpu_device_resume(drm_dev, true); 2570 if (amdgpu_acpi_is_s0ix_active(adev)) 2571 adev->in_s0ix = false; 2572 else 2573 adev->in_s3 = false; 2574 return r; 2575 } 2576 2577 static int amdgpu_pmops_freeze(struct device *dev) 2578 { 2579 struct drm_device *drm_dev = dev_get_drvdata(dev); 2580 struct amdgpu_device *adev = drm_to_adev(drm_dev); 2581 int r; 2582 2583 adev->in_s4 = true; 2584 r = amdgpu_device_suspend(drm_dev, true); 2585 adev->in_s4 = false; 2586 if (r) 2587 return r; 2588 2589 if (amdgpu_acpi_should_gpu_reset(adev)) 2590 return amdgpu_asic_reset(adev); 2591 return 0; 2592 } 2593 2594 static int amdgpu_pmops_thaw(struct device *dev) 2595 { 2596 struct drm_device *drm_dev = dev_get_drvdata(dev); 2597 2598 return amdgpu_device_resume(drm_dev, true); 2599 } 2600 2601 static int amdgpu_pmops_poweroff(struct device *dev) 2602 { 2603 struct drm_device *drm_dev = dev_get_drvdata(dev); 2604 2605 return amdgpu_device_suspend(drm_dev, true); 2606 } 2607 2608 static int amdgpu_pmops_restore(struct device *dev) 2609 { 2610 struct drm_device *drm_dev = dev_get_drvdata(dev); 2611 2612 return amdgpu_device_resume(drm_dev, true); 2613 } 2614 2615 static int amdgpu_runtime_idle_check_display(struct device *dev) 2616 { 2617 struct pci_dev *pdev = to_pci_dev(dev); 2618 struct drm_device *drm_dev = pci_get_drvdata(pdev); 2619 struct amdgpu_device *adev = drm_to_adev(drm_dev); 2620 2621 if (adev->mode_info.num_crtc) { 2622 struct drm_connector *list_connector; 2623 struct drm_connector_list_iter iter; 2624 int ret = 0; 2625 2626 if (amdgpu_runtime_pm != -2) { 2627 /* XXX: Return busy if any displays are connected to avoid 2628 * possible display wakeups after runtime resume due to 2629 * hotplug events in case any displays were connected while 2630 * the GPU was in suspend. Remove this once that is fixed. 2631 */ 2632 mutex_lock(&drm_dev->mode_config.mutex); 2633 drm_connector_list_iter_begin(drm_dev, &iter); 2634 drm_for_each_connector_iter(list_connector, &iter) { 2635 if (list_connector->status == connector_status_connected) { 2636 ret = -EBUSY; 2637 break; 2638 } 2639 } 2640 drm_connector_list_iter_end(&iter); 2641 mutex_unlock(&drm_dev->mode_config.mutex); 2642 2643 if (ret) 2644 return ret; 2645 } 2646 2647 if (adev->dc_enabled) { 2648 struct drm_crtc *crtc; 2649 2650 drm_for_each_crtc(crtc, drm_dev) { 2651 drm_modeset_lock(&crtc->mutex, NULL); 2652 if (crtc->state->active) 2653 ret = -EBUSY; 2654 drm_modeset_unlock(&crtc->mutex); 2655 if (ret < 0) 2656 break; 2657 } 2658 } else { 2659 mutex_lock(&drm_dev->mode_config.mutex); 2660 drm_modeset_lock(&drm_dev->mode_config.connection_mutex, NULL); 2661 2662 drm_connector_list_iter_begin(drm_dev, &iter); 2663 drm_for_each_connector_iter(list_connector, &iter) { 2664 if (list_connector->dpms == DRM_MODE_DPMS_ON) { 2665 ret = -EBUSY; 2666 break; 2667 } 2668 } 2669 2670 drm_connector_list_iter_end(&iter); 2671 2672 drm_modeset_unlock(&drm_dev->mode_config.connection_mutex); 2673 mutex_unlock(&drm_dev->mode_config.mutex); 2674 } 2675 if (ret) 2676 return ret; 2677 } 2678 2679 return 0; 2680 } 2681 2682 static int amdgpu_pmops_runtime_suspend(struct device *dev) 2683 { 2684 struct pci_dev *pdev = to_pci_dev(dev); 2685 struct drm_device *drm_dev = pci_get_drvdata(pdev); 2686 struct amdgpu_device *adev = drm_to_adev(drm_dev); 2687 int ret, i; 2688 2689 if (adev->pm.rpm_mode == AMDGPU_RUNPM_NONE) { 2690 pm_runtime_forbid(dev); 2691 return -EBUSY; 2692 } 2693 2694 ret = amdgpu_runtime_idle_check_display(dev); 2695 if (ret) 2696 return ret; 2697 2698 /* wait for all rings to drain before suspending */ 2699 for (i = 0; i < AMDGPU_MAX_RINGS; i++) { 2700 struct amdgpu_ring *ring = adev->rings[i]; 2701 2702 if (ring && ring->sched.ready) { 2703 ret = amdgpu_fence_wait_empty(ring); 2704 if (ret) 2705 return -EBUSY; 2706 } 2707 } 2708 2709 adev->in_runpm = true; 2710 if (adev->pm.rpm_mode == AMDGPU_RUNPM_PX) 2711 drm_dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; 2712 2713 /* 2714 * By setting mp1_state as PP_MP1_STATE_UNLOAD, MP1 will do some 2715 * proper cleanups and put itself into a state ready for PNP. That 2716 * can address some random resuming failure observed on BOCO capable 2717 * platforms. 2718 * TODO: this may be also needed for PX capable platform. 2719 */ 2720 if (adev->pm.rpm_mode == AMDGPU_RUNPM_BOCO) 2721 adev->mp1_state = PP_MP1_STATE_UNLOAD; 2722 2723 ret = amdgpu_device_prepare(drm_dev); 2724 if (ret) 2725 return ret; 2726 ret = amdgpu_device_suspend(drm_dev, false); 2727 if (ret) { 2728 adev->in_runpm = false; 2729 if (adev->pm.rpm_mode == AMDGPU_RUNPM_BOCO) 2730 adev->mp1_state = PP_MP1_STATE_NONE; 2731 return ret; 2732 } 2733 2734 if (adev->pm.rpm_mode == AMDGPU_RUNPM_BOCO) 2735 adev->mp1_state = PP_MP1_STATE_NONE; 2736 2737 if (adev->pm.rpm_mode == AMDGPU_RUNPM_PX) { 2738 /* Only need to handle PCI state in the driver for ATPX 2739 * PCI core handles it for _PR3. 2740 */ 2741 amdgpu_device_cache_pci_state(pdev); 2742 pci_disable_device(pdev); 2743 pci_ignore_hotplug(pdev); 2744 pci_set_power_state(pdev, PCI_D3cold); 2745 drm_dev->switch_power_state = DRM_SWITCH_POWER_DYNAMIC_OFF; 2746 } else if (adev->pm.rpm_mode == AMDGPU_RUNPM_BOCO) { 2747 /* nothing to do */ 2748 } else if ((adev->pm.rpm_mode == AMDGPU_RUNPM_BACO) || 2749 (adev->pm.rpm_mode == AMDGPU_RUNPM_BAMACO)) { 2750 amdgpu_device_baco_enter(drm_dev); 2751 } 2752 2753 dev_dbg(&pdev->dev, "asic/device is runtime suspended\n"); 2754 2755 return 0; 2756 } 2757 2758 static int amdgpu_pmops_runtime_resume(struct device *dev) 2759 { 2760 struct pci_dev *pdev = to_pci_dev(dev); 2761 struct drm_device *drm_dev = pci_get_drvdata(pdev); 2762 struct amdgpu_device *adev = drm_to_adev(drm_dev); 2763 int ret; 2764 2765 if (adev->pm.rpm_mode == AMDGPU_RUNPM_NONE) 2766 return -EINVAL; 2767 2768 /* Avoids registers access if device is physically gone */ 2769 if (!pci_device_is_present(adev->pdev)) 2770 adev->no_hw_access = true; 2771 2772 if (adev->pm.rpm_mode == AMDGPU_RUNPM_PX) { 2773 drm_dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; 2774 2775 /* Only need to handle PCI state in the driver for ATPX 2776 * PCI core handles it for _PR3. 2777 */ 2778 pci_set_power_state(pdev, PCI_D0); 2779 amdgpu_device_load_pci_state(pdev); 2780 ret = pci_enable_device(pdev); 2781 if (ret) 2782 return ret; 2783 pci_set_master(pdev); 2784 } else if (adev->pm.rpm_mode == AMDGPU_RUNPM_BOCO) { 2785 /* Only need to handle PCI state in the driver for ATPX 2786 * PCI core handles it for _PR3. 2787 */ 2788 pci_set_master(pdev); 2789 } else if ((adev->pm.rpm_mode == AMDGPU_RUNPM_BACO) || 2790 (adev->pm.rpm_mode == AMDGPU_RUNPM_BAMACO)) { 2791 amdgpu_device_baco_exit(drm_dev); 2792 } 2793 ret = amdgpu_device_resume(drm_dev, false); 2794 if (ret) { 2795 if (adev->pm.rpm_mode == AMDGPU_RUNPM_PX) 2796 pci_disable_device(pdev); 2797 return ret; 2798 } 2799 2800 if (adev->pm.rpm_mode == AMDGPU_RUNPM_PX) 2801 drm_dev->switch_power_state = DRM_SWITCH_POWER_ON; 2802 adev->in_runpm = false; 2803 return 0; 2804 } 2805 2806 static int amdgpu_pmops_runtime_idle(struct device *dev) 2807 { 2808 struct drm_device *drm_dev = dev_get_drvdata(dev); 2809 struct amdgpu_device *adev = drm_to_adev(drm_dev); 2810 int ret; 2811 2812 if (adev->pm.rpm_mode == AMDGPU_RUNPM_NONE) { 2813 pm_runtime_forbid(dev); 2814 return -EBUSY; 2815 } 2816 2817 ret = amdgpu_runtime_idle_check_display(dev); 2818 2819 pm_runtime_mark_last_busy(dev); 2820 pm_runtime_autosuspend(dev); 2821 return ret; 2822 } 2823 2824 long amdgpu_drm_ioctl(struct file *filp, 2825 unsigned int cmd, unsigned long arg) 2826 { 2827 struct drm_file *file_priv = filp->private_data; 2828 struct drm_device *dev; 2829 long ret; 2830 2831 dev = file_priv->minor->dev; 2832 ret = pm_runtime_get_sync(dev->dev); 2833 if (ret < 0) 2834 goto out; 2835 2836 ret = drm_ioctl(filp, cmd, arg); 2837 2838 pm_runtime_mark_last_busy(dev->dev); 2839 out: 2840 pm_runtime_put_autosuspend(dev->dev); 2841 return ret; 2842 } 2843 2844 static const struct dev_pm_ops amdgpu_pm_ops = { 2845 .prepare = amdgpu_pmops_prepare, 2846 .complete = amdgpu_pmops_complete, 2847 .suspend = amdgpu_pmops_suspend, 2848 .suspend_noirq = amdgpu_pmops_suspend_noirq, 2849 .resume = amdgpu_pmops_resume, 2850 .freeze = amdgpu_pmops_freeze, 2851 .thaw = amdgpu_pmops_thaw, 2852 .poweroff = amdgpu_pmops_poweroff, 2853 .restore = amdgpu_pmops_restore, 2854 .runtime_suspend = amdgpu_pmops_runtime_suspend, 2855 .runtime_resume = amdgpu_pmops_runtime_resume, 2856 .runtime_idle = amdgpu_pmops_runtime_idle, 2857 }; 2858 2859 static int amdgpu_flush(struct file *f, fl_owner_t id) 2860 { 2861 struct drm_file *file_priv = f->private_data; 2862 struct amdgpu_fpriv *fpriv = file_priv->driver_priv; 2863 long timeout = MAX_WAIT_SCHED_ENTITY_Q_EMPTY; 2864 2865 timeout = amdgpu_ctx_mgr_entity_flush(&fpriv->ctx_mgr, timeout); 2866 timeout = amdgpu_vm_wait_idle(&fpriv->vm, timeout); 2867 2868 return timeout >= 0 ? 0 : timeout; 2869 } 2870 2871 static const struct file_operations amdgpu_driver_kms_fops = { 2872 .owner = THIS_MODULE, 2873 .open = drm_open, 2874 .flush = amdgpu_flush, 2875 .release = drm_release, 2876 .unlocked_ioctl = amdgpu_drm_ioctl, 2877 .mmap = drm_gem_mmap, 2878 .poll = drm_poll, 2879 .read = drm_read, 2880 #ifdef CONFIG_COMPAT 2881 .compat_ioctl = amdgpu_kms_compat_ioctl, 2882 #endif 2883 #ifdef CONFIG_PROC_FS 2884 .show_fdinfo = drm_show_fdinfo, 2885 #endif 2886 }; 2887 2888 int amdgpu_file_to_fpriv(struct file *filp, struct amdgpu_fpriv **fpriv) 2889 { 2890 struct drm_file *file; 2891 2892 if (!filp) 2893 return -EINVAL; 2894 2895 if (filp->f_op != &amdgpu_driver_kms_fops) 2896 return -EINVAL; 2897 2898 file = filp->private_data; 2899 *fpriv = file->driver_priv; 2900 return 0; 2901 } 2902 2903 const struct drm_ioctl_desc amdgpu_ioctls_kms[] = { 2904 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_CREATE, amdgpu_gem_create_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), 2905 DRM_IOCTL_DEF_DRV(AMDGPU_CTX, amdgpu_ctx_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), 2906 DRM_IOCTL_DEF_DRV(AMDGPU_VM, amdgpu_vm_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), 2907 DRM_IOCTL_DEF_DRV(AMDGPU_SCHED, amdgpu_sched_ioctl, DRM_MASTER), 2908 DRM_IOCTL_DEF_DRV(AMDGPU_BO_LIST, amdgpu_bo_list_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), 2909 DRM_IOCTL_DEF_DRV(AMDGPU_FENCE_TO_HANDLE, amdgpu_cs_fence_to_handle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), 2910 /* KMS */ 2911 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_MMAP, amdgpu_gem_mmap_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), 2912 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_WAIT_IDLE, amdgpu_gem_wait_idle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), 2913 DRM_IOCTL_DEF_DRV(AMDGPU_CS, amdgpu_cs_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), 2914 DRM_IOCTL_DEF_DRV(AMDGPU_INFO, amdgpu_info_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), 2915 DRM_IOCTL_DEF_DRV(AMDGPU_WAIT_CS, amdgpu_cs_wait_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), 2916 DRM_IOCTL_DEF_DRV(AMDGPU_WAIT_FENCES, amdgpu_cs_wait_fences_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), 2917 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_METADATA, amdgpu_gem_metadata_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), 2918 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_VA, amdgpu_gem_va_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), 2919 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_OP, amdgpu_gem_op_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), 2920 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_USERPTR, amdgpu_gem_userptr_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), 2921 }; 2922 2923 static const struct drm_driver amdgpu_kms_driver = { 2924 .driver_features = 2925 DRIVER_ATOMIC | 2926 DRIVER_GEM | 2927 DRIVER_RENDER | DRIVER_MODESET | DRIVER_SYNCOBJ | 2928 DRIVER_SYNCOBJ_TIMELINE, 2929 .open = amdgpu_driver_open_kms, 2930 .postclose = amdgpu_driver_postclose_kms, 2931 .lastclose = amdgpu_driver_lastclose_kms, 2932 .ioctls = amdgpu_ioctls_kms, 2933 .num_ioctls = ARRAY_SIZE(amdgpu_ioctls_kms), 2934 .dumb_create = amdgpu_mode_dumb_create, 2935 .dumb_map_offset = amdgpu_mode_dumb_mmap, 2936 .fops = &amdgpu_driver_kms_fops, 2937 .release = &amdgpu_driver_release_kms, 2938 #ifdef CONFIG_PROC_FS 2939 .show_fdinfo = amdgpu_show_fdinfo, 2940 #endif 2941 2942 .gem_prime_import = amdgpu_gem_prime_import, 2943 2944 .name = DRIVER_NAME, 2945 .desc = DRIVER_DESC, 2946 .date = DRIVER_DATE, 2947 .major = KMS_DRIVER_MAJOR, 2948 .minor = KMS_DRIVER_MINOR, 2949 .patchlevel = KMS_DRIVER_PATCHLEVEL, 2950 }; 2951 2952 const struct drm_driver amdgpu_partition_driver = { 2953 .driver_features = 2954 DRIVER_GEM | DRIVER_RENDER | DRIVER_SYNCOBJ | 2955 DRIVER_SYNCOBJ_TIMELINE, 2956 .open = amdgpu_driver_open_kms, 2957 .postclose = amdgpu_driver_postclose_kms, 2958 .lastclose = amdgpu_driver_lastclose_kms, 2959 .ioctls = amdgpu_ioctls_kms, 2960 .num_ioctls = ARRAY_SIZE(amdgpu_ioctls_kms), 2961 .dumb_create = amdgpu_mode_dumb_create, 2962 .dumb_map_offset = amdgpu_mode_dumb_mmap, 2963 .fops = &amdgpu_driver_kms_fops, 2964 .release = &amdgpu_driver_release_kms, 2965 2966 .gem_prime_import = amdgpu_gem_prime_import, 2967 2968 .name = DRIVER_NAME, 2969 .desc = DRIVER_DESC, 2970 .date = DRIVER_DATE, 2971 .major = KMS_DRIVER_MAJOR, 2972 .minor = KMS_DRIVER_MINOR, 2973 .patchlevel = KMS_DRIVER_PATCHLEVEL, 2974 }; 2975 2976 static struct pci_error_handlers amdgpu_pci_err_handler = { 2977 .error_detected = amdgpu_pci_error_detected, 2978 .mmio_enabled = amdgpu_pci_mmio_enabled, 2979 .slot_reset = amdgpu_pci_slot_reset, 2980 .resume = amdgpu_pci_resume, 2981 }; 2982 2983 static const struct attribute_group *amdgpu_sysfs_groups[] = { 2984 &amdgpu_vram_mgr_attr_group, 2985 &amdgpu_gtt_mgr_attr_group, 2986 &amdgpu_flash_attr_group, 2987 NULL, 2988 }; 2989 2990 static struct pci_driver amdgpu_kms_pci_driver = { 2991 .name = DRIVER_NAME, 2992 .id_table = pciidlist, 2993 .probe = amdgpu_pci_probe, 2994 .remove = amdgpu_pci_remove, 2995 .shutdown = amdgpu_pci_shutdown, 2996 .driver.pm = &amdgpu_pm_ops, 2997 .err_handler = &amdgpu_pci_err_handler, 2998 .dev_groups = amdgpu_sysfs_groups, 2999 }; 3000 3001 static int __init amdgpu_init(void) 3002 { 3003 int r; 3004 3005 if (drm_firmware_drivers_only()) 3006 return -EINVAL; 3007 3008 r = amdgpu_sync_init(); 3009 if (r) 3010 goto error_sync; 3011 3012 r = amdgpu_fence_slab_init(); 3013 if (r) 3014 goto error_fence; 3015 3016 DRM_INFO("amdgpu kernel modesetting enabled.\n"); 3017 amdgpu_register_atpx_handler(); 3018 amdgpu_acpi_detect(); 3019 3020 /* Ignore KFD init failures. Normal when CONFIG_HSA_AMD is not set. */ 3021 amdgpu_amdkfd_init(); 3022 3023 /* let modprobe override vga console setting */ 3024 return pci_register_driver(&amdgpu_kms_pci_driver); 3025 3026 error_fence: 3027 amdgpu_sync_fini(); 3028 3029 error_sync: 3030 return r; 3031 } 3032 3033 static void __exit amdgpu_exit(void) 3034 { 3035 amdgpu_amdkfd_fini(); 3036 pci_unregister_driver(&amdgpu_kms_pci_driver); 3037 amdgpu_unregister_atpx_handler(); 3038 amdgpu_acpi_release(); 3039 amdgpu_sync_fini(); 3040 amdgpu_fence_slab_fini(); 3041 mmu_notifier_synchronize(); 3042 amdgpu_xcp_drv_release(); 3043 } 3044 3045 module_init(amdgpu_init); 3046 module_exit(amdgpu_exit); 3047 3048 MODULE_AUTHOR(DRIVER_AUTHOR); 3049 MODULE_DESCRIPTION(DRIVER_DESC); 3050 MODULE_LICENSE("GPL and additional rights"); 3051