1 /* 2 * Copyright 2008 Advanced Micro Devices, Inc. 3 * Copyright 2008 Red Hat Inc. 4 * Copyright 2009 Jerome Glisse. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 * OTHER DEALINGS IN THE SOFTWARE. 23 * 24 * Authors: Dave Airlie 25 * Alex Deucher 26 * Jerome Glisse 27 */ 28 #ifndef __AMDGPU_H__ 29 #define __AMDGPU_H__ 30 31 #ifdef pr_fmt 32 #undef pr_fmt 33 #endif 34 35 #define pr_fmt(fmt) "amdgpu: " fmt 36 37 #ifdef dev_fmt 38 #undef dev_fmt 39 #endif 40 41 #define dev_fmt(fmt) "amdgpu: " fmt 42 43 #include "amdgpu_ctx.h" 44 45 #include <linux/atomic.h> 46 #include <linux/wait.h> 47 #include <linux/list.h> 48 #include <linux/kref.h> 49 #include <linux/rbtree.h> 50 #include <linux/hashtable.h> 51 #include <linux/dma-fence.h> 52 #include <linux/pci.h> 53 54 #include <drm/ttm/ttm_bo.h> 55 #include <drm/ttm/ttm_placement.h> 56 57 #include <drm/amdgpu_drm.h> 58 #include <drm/drm_gem.h> 59 #include <drm/drm_ioctl.h> 60 61 #include <kgd_kfd_interface.h> 62 #include "dm_pp_interface.h" 63 #include "kgd_pp_interface.h" 64 65 #include "amd_shared.h" 66 #include "amdgpu_mode.h" 67 #include "amdgpu_ih.h" 68 #include "amdgpu_irq.h" 69 #include "amdgpu_ucode.h" 70 #include "amdgpu_ttm.h" 71 #include "amdgpu_psp.h" 72 #include "amdgpu_gds.h" 73 #include "amdgpu_sync.h" 74 #include "amdgpu_ring.h" 75 #include "amdgpu_vm.h" 76 #include "amdgpu_dpm.h" 77 #include "amdgpu_acp.h" 78 #include "amdgpu_uvd.h" 79 #include "amdgpu_vce.h" 80 #include "amdgpu_vcn.h" 81 #include "amdgpu_jpeg.h" 82 #include "amdgpu_vpe.h" 83 #include "amdgpu_umsch_mm.h" 84 #include "amdgpu_gmc.h" 85 #include "amdgpu_gfx.h" 86 #include "amdgpu_sdma.h" 87 #include "amdgpu_lsdma.h" 88 #include "amdgpu_nbio.h" 89 #include "amdgpu_hdp.h" 90 #include "amdgpu_dm.h" 91 #include "amdgpu_virt.h" 92 #include "amdgpu_csa.h" 93 #include "amdgpu_mes_ctx.h" 94 #include "amdgpu_gart.h" 95 #include "amdgpu_debugfs.h" 96 #include "amdgpu_job.h" 97 #include "amdgpu_bo_list.h" 98 #include "amdgpu_gem.h" 99 #include "amdgpu_doorbell.h" 100 #include "amdgpu_amdkfd.h" 101 #include "amdgpu_discovery.h" 102 #include "amdgpu_mes.h" 103 #include "amdgpu_umc.h" 104 #include "amdgpu_mmhub.h" 105 #include "amdgpu_gfxhub.h" 106 #include "amdgpu_df.h" 107 #include "amdgpu_smuio.h" 108 #include "amdgpu_fdinfo.h" 109 #include "amdgpu_mca.h" 110 #include "amdgpu_aca.h" 111 #include "amdgpu_ras.h" 112 #include "amdgpu_xcp.h" 113 #include "amdgpu_seq64.h" 114 #include "amdgpu_reg_state.h" 115 #if defined(CONFIG_DRM_AMD_ISP) 116 #include "amdgpu_isp.h" 117 #endif 118 119 #define MAX_GPU_INSTANCE 64 120 121 #define GFX_SLICE_PERIOD msecs_to_jiffies(250) 122 123 struct amdgpu_gpu_instance { 124 struct amdgpu_device *adev; 125 int mgpu_fan_enabled; 126 }; 127 128 struct amdgpu_mgpu_info { 129 struct amdgpu_gpu_instance gpu_ins[MAX_GPU_INSTANCE]; 130 struct mutex mutex; 131 uint32_t num_gpu; 132 uint32_t num_dgpu; 133 uint32_t num_apu; 134 135 /* delayed reset_func for XGMI configuration if necessary */ 136 struct delayed_work delayed_reset_work; 137 bool pending_reset; 138 }; 139 140 enum amdgpu_ss { 141 AMDGPU_SS_DRV_LOAD, 142 AMDGPU_SS_DEV_D0, 143 AMDGPU_SS_DEV_D3, 144 AMDGPU_SS_DRV_UNLOAD 145 }; 146 147 struct amdgpu_hwip_reg_entry { 148 u32 hwip; 149 u32 inst; 150 u32 seg; 151 u32 reg_offset; 152 const char *reg_name; 153 }; 154 155 struct amdgpu_watchdog_timer { 156 bool timeout_fatal_disable; 157 uint32_t period; /* maxCycles = (1 << period), the number of cycles before a timeout */ 158 }; 159 160 #define AMDGPU_MAX_TIMEOUT_PARAM_LENGTH 256 161 162 /* 163 * Modules parameters. 164 */ 165 extern int amdgpu_modeset; 166 extern unsigned int amdgpu_vram_limit; 167 extern int amdgpu_vis_vram_limit; 168 extern int amdgpu_gart_size; 169 extern int amdgpu_gtt_size; 170 extern int amdgpu_moverate; 171 extern int amdgpu_audio; 172 extern int amdgpu_disp_priority; 173 extern int amdgpu_hw_i2c; 174 extern int amdgpu_pcie_gen2; 175 extern int amdgpu_msi; 176 extern char amdgpu_lockup_timeout[AMDGPU_MAX_TIMEOUT_PARAM_LENGTH]; 177 extern int amdgpu_dpm; 178 extern int amdgpu_fw_load_type; 179 extern int amdgpu_aspm; 180 extern int amdgpu_runtime_pm; 181 extern uint amdgpu_ip_block_mask; 182 extern int amdgpu_bapm; 183 extern int amdgpu_deep_color; 184 extern int amdgpu_vm_size; 185 extern int amdgpu_vm_block_size; 186 extern int amdgpu_vm_fragment_size; 187 extern int amdgpu_vm_fault_stop; 188 extern int amdgpu_vm_debug; 189 extern int amdgpu_vm_update_mode; 190 extern int amdgpu_exp_hw_support; 191 extern int amdgpu_dc; 192 extern int amdgpu_sched_jobs; 193 extern int amdgpu_sched_hw_submission; 194 extern uint amdgpu_pcie_gen_cap; 195 extern uint amdgpu_pcie_lane_cap; 196 extern u64 amdgpu_cg_mask; 197 extern uint amdgpu_pg_mask; 198 extern uint amdgpu_sdma_phase_quantum; 199 extern char *amdgpu_disable_cu; 200 extern char *amdgpu_virtual_display; 201 extern uint amdgpu_pp_feature_mask; 202 extern uint amdgpu_force_long_training; 203 extern int amdgpu_lbpw; 204 extern int amdgpu_compute_multipipe; 205 extern int amdgpu_gpu_recovery; 206 extern int amdgpu_emu_mode; 207 extern uint amdgpu_smu_memory_pool_size; 208 extern int amdgpu_smu_pptable_id; 209 extern uint amdgpu_dc_feature_mask; 210 extern uint amdgpu_freesync_vid_mode; 211 extern uint amdgpu_dc_debug_mask; 212 extern uint amdgpu_dc_visual_confirm; 213 extern int amdgpu_dm_abm_level; 214 extern int amdgpu_backlight; 215 extern int amdgpu_damage_clips; 216 extern struct amdgpu_mgpu_info mgpu_info; 217 extern int amdgpu_ras_enable; 218 extern uint amdgpu_ras_mask; 219 extern int amdgpu_bad_page_threshold; 220 extern bool amdgpu_ignore_bad_page_threshold; 221 extern struct amdgpu_watchdog_timer amdgpu_watchdog_timer; 222 extern int amdgpu_async_gfx_ring; 223 extern int amdgpu_mcbp; 224 extern int amdgpu_discovery; 225 extern int amdgpu_mes; 226 extern int amdgpu_mes_log_enable; 227 extern int amdgpu_mes_kiq; 228 extern int amdgpu_uni_mes; 229 extern int amdgpu_noretry; 230 extern int amdgpu_force_asic_type; 231 extern int amdgpu_smartshift_bias; 232 extern int amdgpu_use_xgmi_p2p; 233 extern int amdgpu_mtype_local; 234 extern bool enforce_isolation; 235 #ifdef CONFIG_HSA_AMD 236 extern int sched_policy; 237 extern bool debug_evictions; 238 extern bool no_system_mem_limit; 239 extern int halt_if_hws_hang; 240 #else 241 static const int __maybe_unused sched_policy = KFD_SCHED_POLICY_HWS; 242 static const bool __maybe_unused debug_evictions; /* = false */ 243 static const bool __maybe_unused no_system_mem_limit; 244 static const int __maybe_unused halt_if_hws_hang; 245 #endif 246 #ifdef CONFIG_HSA_AMD_P2P 247 extern bool pcie_p2p; 248 #endif 249 250 extern int amdgpu_tmz; 251 extern int amdgpu_reset_method; 252 253 #ifdef CONFIG_DRM_AMDGPU_SI 254 extern int amdgpu_si_support; 255 #endif 256 #ifdef CONFIG_DRM_AMDGPU_CIK 257 extern int amdgpu_cik_support; 258 #endif 259 extern int amdgpu_num_kcq; 260 261 #define AMDGPU_VCNFW_LOG_SIZE (32 * 1024) 262 #define AMDGPU_UMSCHFW_LOG_SIZE (32 * 1024) 263 extern int amdgpu_vcnfw_log; 264 extern int amdgpu_sg_display; 265 extern int amdgpu_umsch_mm; 266 extern int amdgpu_seamless; 267 extern int amdgpu_umsch_mm_fwlog; 268 269 extern int amdgpu_user_partt_mode; 270 extern int amdgpu_agp; 271 272 extern int amdgpu_wbrf; 273 274 #define AMDGPU_VM_MAX_NUM_CTX 4096 275 #define AMDGPU_SG_THRESHOLD (256*1024*1024) 276 #define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS 3000 277 #define AMDGPU_MAX_USEC_TIMEOUT 100000 /* 100 ms */ 278 #define AMDGPU_FENCE_JIFFIES_TIMEOUT (HZ / 2) 279 #define AMDGPU_DEBUGFS_MAX_COMPONENTS 32 280 #define AMDGPUFB_CONN_LIMIT 4 281 #define AMDGPU_BIOS_NUM_SCRATCH 16 282 283 #define AMDGPU_VBIOS_VGA_ALLOCATION (9 * 1024 * 1024) /* reserve 8MB for vga emulator and 1 MB for FB */ 284 285 /* hard reset data */ 286 #define AMDGPU_ASIC_RESET_DATA 0x39d5e86b 287 288 /* reset flags */ 289 #define AMDGPU_RESET_GFX (1 << 0) 290 #define AMDGPU_RESET_COMPUTE (1 << 1) 291 #define AMDGPU_RESET_DMA (1 << 2) 292 #define AMDGPU_RESET_CP (1 << 3) 293 #define AMDGPU_RESET_GRBM (1 << 4) 294 #define AMDGPU_RESET_DMA1 (1 << 5) 295 #define AMDGPU_RESET_RLC (1 << 6) 296 #define AMDGPU_RESET_SEM (1 << 7) 297 #define AMDGPU_RESET_IH (1 << 8) 298 #define AMDGPU_RESET_VMC (1 << 9) 299 #define AMDGPU_RESET_MC (1 << 10) 300 #define AMDGPU_RESET_DISPLAY (1 << 11) 301 #define AMDGPU_RESET_UVD (1 << 12) 302 #define AMDGPU_RESET_VCE (1 << 13) 303 #define AMDGPU_RESET_VCE1 (1 << 14) 304 305 /* max cursor sizes (in pixels) */ 306 #define CIK_CURSOR_WIDTH 128 307 #define CIK_CURSOR_HEIGHT 128 308 309 /* smart shift bias level limits */ 310 #define AMDGPU_SMARTSHIFT_MAX_BIAS (100) 311 #define AMDGPU_SMARTSHIFT_MIN_BIAS (-100) 312 313 /* Extra time delay(in ms) to eliminate the influence of temperature momentary fluctuation */ 314 #define AMDGPU_SWCTF_EXTRA_DELAY 50 315 316 struct amdgpu_xcp_mgr; 317 struct amdgpu_device; 318 struct amdgpu_irq_src; 319 struct amdgpu_fpriv; 320 struct amdgpu_bo_va_mapping; 321 struct kfd_vm_fault_info; 322 struct amdgpu_hive_info; 323 struct amdgpu_reset_context; 324 struct amdgpu_reset_control; 325 326 enum amdgpu_cp_irq { 327 AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP = 0, 328 AMDGPU_CP_IRQ_GFX_ME0_PIPE1_EOP, 329 AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP, 330 AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP, 331 AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE2_EOP, 332 AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE3_EOP, 333 AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE0_EOP, 334 AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE1_EOP, 335 AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE2_EOP, 336 AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE3_EOP, 337 338 AMDGPU_CP_IRQ_LAST 339 }; 340 341 enum amdgpu_thermal_irq { 342 AMDGPU_THERMAL_IRQ_LOW_TO_HIGH = 0, 343 AMDGPU_THERMAL_IRQ_HIGH_TO_LOW, 344 345 AMDGPU_THERMAL_IRQ_LAST 346 }; 347 348 enum amdgpu_kiq_irq { 349 AMDGPU_CP_KIQ_IRQ_DRIVER0 = 0, 350 AMDGPU_CP_KIQ_IRQ_LAST 351 }; 352 #define SRIOV_USEC_TIMEOUT 1200000 /* wait 12 * 100ms for SRIOV */ 353 #define MAX_KIQ_REG_WAIT 5000 /* in usecs, 5ms */ 354 #define MAX_KIQ_REG_BAILOUT_INTERVAL 5 /* in msecs, 5ms */ 355 #define MAX_KIQ_REG_TRY 1000 356 357 int amdgpu_device_ip_set_clockgating_state(void *dev, 358 enum amd_ip_block_type block_type, 359 enum amd_clockgating_state state); 360 int amdgpu_device_ip_set_powergating_state(void *dev, 361 enum amd_ip_block_type block_type, 362 enum amd_powergating_state state); 363 void amdgpu_device_ip_get_clockgating_state(struct amdgpu_device *adev, 364 u64 *flags); 365 int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev, 366 enum amd_ip_block_type block_type); 367 bool amdgpu_device_ip_is_idle(struct amdgpu_device *adev, 368 enum amd_ip_block_type block_type); 369 370 #define AMDGPU_MAX_IP_NUM 16 371 372 struct amdgpu_ip_block_status { 373 bool valid; 374 bool sw; 375 bool hw; 376 bool late_initialized; 377 bool hang; 378 }; 379 380 struct amdgpu_ip_block_version { 381 const enum amd_ip_block_type type; 382 const u32 major; 383 const u32 minor; 384 const u32 rev; 385 const struct amd_ip_funcs *funcs; 386 }; 387 388 struct amdgpu_ip_block { 389 struct amdgpu_ip_block_status status; 390 const struct amdgpu_ip_block_version *version; 391 }; 392 393 int amdgpu_device_ip_block_version_cmp(struct amdgpu_device *adev, 394 enum amd_ip_block_type type, 395 u32 major, u32 minor); 396 397 struct amdgpu_ip_block * 398 amdgpu_device_ip_get_ip_block(struct amdgpu_device *adev, 399 enum amd_ip_block_type type); 400 401 int amdgpu_device_ip_block_add(struct amdgpu_device *adev, 402 const struct amdgpu_ip_block_version *ip_block_version); 403 404 /* 405 * BIOS. 406 */ 407 bool amdgpu_get_bios(struct amdgpu_device *adev); 408 bool amdgpu_read_bios(struct amdgpu_device *adev); 409 bool amdgpu_soc15_read_bios_from_rom(struct amdgpu_device *adev, 410 u8 *bios, u32 length_bytes); 411 /* 412 * Clocks 413 */ 414 415 #define AMDGPU_MAX_PPLL 3 416 417 struct amdgpu_clock { 418 struct amdgpu_pll ppll[AMDGPU_MAX_PPLL]; 419 struct amdgpu_pll spll; 420 struct amdgpu_pll mpll; 421 /* 10 Khz units */ 422 uint32_t default_mclk; 423 uint32_t default_sclk; 424 uint32_t default_dispclk; 425 uint32_t current_dispclk; 426 uint32_t dp_extclk; 427 uint32_t max_pixel_clock; 428 }; 429 430 /* sub-allocation manager, it has to be protected by another lock. 431 * By conception this is an helper for other part of the driver 432 * like the indirect buffer or semaphore, which both have their 433 * locking. 434 * 435 * Principe is simple, we keep a list of sub allocation in offset 436 * order (first entry has offset == 0, last entry has the highest 437 * offset). 438 * 439 * When allocating new object we first check if there is room at 440 * the end total_size - (last_object_offset + last_object_size) >= 441 * alloc_size. If so we allocate new object there. 442 * 443 * When there is not enough room at the end, we start waiting for 444 * each sub object until we reach object_offset+object_size >= 445 * alloc_size, this object then become the sub object we return. 446 * 447 * Alignment can't be bigger than page size. 448 * 449 * Hole are not considered for allocation to keep things simple. 450 * Assumption is that there won't be hole (all object on same 451 * alignment). 452 */ 453 454 struct amdgpu_sa_manager { 455 struct drm_suballoc_manager base; 456 struct amdgpu_bo *bo; 457 uint64_t gpu_addr; 458 void *cpu_ptr; 459 }; 460 461 int amdgpu_fence_slab_init(void); 462 void amdgpu_fence_slab_fini(void); 463 464 /* 465 * IRQS. 466 */ 467 468 struct amdgpu_flip_work { 469 struct delayed_work flip_work; 470 struct work_struct unpin_work; 471 struct amdgpu_device *adev; 472 int crtc_id; 473 u32 target_vblank; 474 uint64_t base; 475 struct drm_pending_vblank_event *event; 476 struct amdgpu_bo *old_abo; 477 unsigned shared_count; 478 struct dma_fence **shared; 479 struct dma_fence_cb cb; 480 bool async; 481 }; 482 483 484 /* 485 * file private structure 486 */ 487 488 struct amdgpu_fpriv { 489 struct amdgpu_vm vm; 490 struct amdgpu_bo_va *prt_va; 491 struct amdgpu_bo_va *csa_va; 492 struct amdgpu_bo_va *seq64_va; 493 struct mutex bo_list_lock; 494 struct idr bo_list_handles; 495 struct amdgpu_ctx_mgr ctx_mgr; 496 /** GPU partition selection */ 497 uint32_t xcp_id; 498 }; 499 500 int amdgpu_file_to_fpriv(struct file *filp, struct amdgpu_fpriv **fpriv); 501 502 /* 503 * Writeback 504 */ 505 #define AMDGPU_MAX_WB 1024 /* Reserve at most 1024 WB slots for amdgpu-owned rings. */ 506 507 struct amdgpu_wb { 508 struct amdgpu_bo *wb_obj; 509 volatile uint32_t *wb; 510 uint64_t gpu_addr; 511 u32 num_wb; /* Number of wb slots actually reserved for amdgpu. */ 512 unsigned long used[DIV_ROUND_UP(AMDGPU_MAX_WB, BITS_PER_LONG)]; 513 spinlock_t lock; 514 }; 515 516 int amdgpu_device_wb_get(struct amdgpu_device *adev, u32 *wb); 517 void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb); 518 519 /* 520 * Benchmarking 521 */ 522 int amdgpu_benchmark(struct amdgpu_device *adev, int test_number); 523 524 /* 525 * ASIC specific register table accessible by UMD 526 */ 527 struct amdgpu_allowed_register_entry { 528 uint32_t reg_offset; 529 bool grbm_indexed; 530 }; 531 532 /** 533 * enum amd_reset_method - Methods for resetting AMD GPU devices 534 * 535 * @AMD_RESET_METHOD_NONE: The device will not be reset. 536 * @AMD_RESET_LEGACY: Method reserved for SI, CIK and VI ASICs. 537 * @AMD_RESET_MODE0: Reset the entire ASIC. Not currently available for the 538 * any device. 539 * @AMD_RESET_MODE1: Resets all IP blocks on the ASIC (SDMA, GFX, VCN, etc.) 540 * individually. Suitable only for some discrete GPU, not 541 * available for all ASICs. 542 * @AMD_RESET_MODE2: Resets a lesser level of IPs compared to MODE1. Which IPs 543 * are reset depends on the ASIC. Notably doesn't reset IPs 544 * shared with the CPU on APUs or the memory controllers (so 545 * VRAM is not lost). Not available on all ASICs. 546 * @AMD_RESET_BACO: BACO (Bus Alive, Chip Off) method powers off and on the card 547 * but without powering off the PCI bus. Suitable only for 548 * discrete GPUs. 549 * @AMD_RESET_PCI: Does a full bus reset using core Linux subsystem PCI reset 550 * and does a secondary bus reset or FLR, depending on what the 551 * underlying hardware supports. 552 * 553 * Methods available for AMD GPU driver for resetting the device. Not all 554 * methods are suitable for every device. User can override the method using 555 * module parameter `reset_method`. 556 */ 557 enum amd_reset_method { 558 AMD_RESET_METHOD_NONE = -1, 559 AMD_RESET_METHOD_LEGACY = 0, 560 AMD_RESET_METHOD_MODE0, 561 AMD_RESET_METHOD_MODE1, 562 AMD_RESET_METHOD_MODE2, 563 AMD_RESET_METHOD_BACO, 564 AMD_RESET_METHOD_PCI, 565 }; 566 567 struct amdgpu_video_codec_info { 568 u32 codec_type; 569 u32 max_width; 570 u32 max_height; 571 u32 max_pixels_per_frame; 572 u32 max_level; 573 }; 574 575 #define codec_info_build(type, width, height, level) \ 576 .codec_type = type,\ 577 .max_width = width,\ 578 .max_height = height,\ 579 .max_pixels_per_frame = height * width,\ 580 .max_level = level, 581 582 struct amdgpu_video_codecs { 583 const u32 codec_count; 584 const struct amdgpu_video_codec_info *codec_array; 585 }; 586 587 /* 588 * ASIC specific functions. 589 */ 590 struct amdgpu_asic_funcs { 591 bool (*read_disabled_bios)(struct amdgpu_device *adev); 592 bool (*read_bios_from_rom)(struct amdgpu_device *adev, 593 u8 *bios, u32 length_bytes); 594 int (*read_register)(struct amdgpu_device *adev, u32 se_num, 595 u32 sh_num, u32 reg_offset, u32 *value); 596 void (*set_vga_state)(struct amdgpu_device *adev, bool state); 597 int (*reset)(struct amdgpu_device *adev); 598 enum amd_reset_method (*reset_method)(struct amdgpu_device *adev); 599 /* get the reference clock */ 600 u32 (*get_xclk)(struct amdgpu_device *adev); 601 /* MM block clocks */ 602 int (*set_uvd_clocks)(struct amdgpu_device *adev, u32 vclk, u32 dclk); 603 int (*set_vce_clocks)(struct amdgpu_device *adev, u32 evclk, u32 ecclk); 604 /* static power management */ 605 int (*get_pcie_lanes)(struct amdgpu_device *adev); 606 void (*set_pcie_lanes)(struct amdgpu_device *adev, int lanes); 607 /* get config memsize register */ 608 u32 (*get_config_memsize)(struct amdgpu_device *adev); 609 /* flush hdp write queue */ 610 void (*flush_hdp)(struct amdgpu_device *adev, struct amdgpu_ring *ring); 611 /* invalidate hdp read cache */ 612 void (*invalidate_hdp)(struct amdgpu_device *adev, 613 struct amdgpu_ring *ring); 614 /* check if the asic needs a full reset of if soft reset will work */ 615 bool (*need_full_reset)(struct amdgpu_device *adev); 616 /* initialize doorbell layout for specific asic*/ 617 void (*init_doorbell_index)(struct amdgpu_device *adev); 618 /* PCIe bandwidth usage */ 619 void (*get_pcie_usage)(struct amdgpu_device *adev, uint64_t *count0, 620 uint64_t *count1); 621 /* do we need to reset the asic at init time (e.g., kexec) */ 622 bool (*need_reset_on_init)(struct amdgpu_device *adev); 623 /* PCIe replay counter */ 624 uint64_t (*get_pcie_replay_count)(struct amdgpu_device *adev); 625 /* device supports BACO */ 626 int (*supports_baco)(struct amdgpu_device *adev); 627 /* pre asic_init quirks */ 628 void (*pre_asic_init)(struct amdgpu_device *adev); 629 /* enter/exit umd stable pstate */ 630 int (*update_umd_stable_pstate)(struct amdgpu_device *adev, bool enter); 631 /* query video codecs */ 632 int (*query_video_codecs)(struct amdgpu_device *adev, bool encode, 633 const struct amdgpu_video_codecs **codecs); 634 /* encode "> 32bits" smn addressing */ 635 u64 (*encode_ext_smn_addressing)(int ext_id); 636 637 ssize_t (*get_reg_state)(struct amdgpu_device *adev, 638 enum amdgpu_reg_state reg_state, void *buf, 639 size_t max_size); 640 }; 641 642 /* 643 * IOCTL. 644 */ 645 int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data, 646 struct drm_file *filp); 647 648 int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); 649 int amdgpu_cs_fence_to_handle_ioctl(struct drm_device *dev, void *data, 650 struct drm_file *filp); 651 int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); 652 int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data, 653 struct drm_file *filp); 654 655 /* VRAM scratch page for HDP bug, default vram page */ 656 struct amdgpu_mem_scratch { 657 struct amdgpu_bo *robj; 658 volatile uint32_t *ptr; 659 u64 gpu_addr; 660 }; 661 662 /* 663 * CGS 664 */ 665 struct cgs_device *amdgpu_cgs_create_device(struct amdgpu_device *adev); 666 void amdgpu_cgs_destroy_device(struct cgs_device *cgs_device); 667 668 /* 669 * Core structure, functions and helpers. 670 */ 671 typedef uint32_t (*amdgpu_rreg_t)(struct amdgpu_device*, uint32_t); 672 typedef void (*amdgpu_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t); 673 674 typedef uint32_t (*amdgpu_rreg_ext_t)(struct amdgpu_device*, uint64_t); 675 typedef void (*amdgpu_wreg_ext_t)(struct amdgpu_device*, uint64_t, uint32_t); 676 677 typedef uint64_t (*amdgpu_rreg64_t)(struct amdgpu_device*, uint32_t); 678 typedef void (*amdgpu_wreg64_t)(struct amdgpu_device*, uint32_t, uint64_t); 679 680 typedef uint64_t (*amdgpu_rreg64_ext_t)(struct amdgpu_device*, uint64_t); 681 typedef void (*amdgpu_wreg64_ext_t)(struct amdgpu_device*, uint64_t, uint64_t); 682 683 typedef uint32_t (*amdgpu_block_rreg_t)(struct amdgpu_device*, uint32_t, uint32_t); 684 typedef void (*amdgpu_block_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t, uint32_t); 685 686 struct amdgpu_mmio_remap { 687 u32 reg_offset; 688 resource_size_t bus_addr; 689 }; 690 691 /* Define the HW IP blocks will be used in driver , add more if necessary */ 692 enum amd_hw_ip_block_type { 693 GC_HWIP = 1, 694 HDP_HWIP, 695 SDMA0_HWIP, 696 SDMA1_HWIP, 697 SDMA2_HWIP, 698 SDMA3_HWIP, 699 SDMA4_HWIP, 700 SDMA5_HWIP, 701 SDMA6_HWIP, 702 SDMA7_HWIP, 703 LSDMA_HWIP, 704 MMHUB_HWIP, 705 ATHUB_HWIP, 706 NBIO_HWIP, 707 MP0_HWIP, 708 MP1_HWIP, 709 UVD_HWIP, 710 VCN_HWIP = UVD_HWIP, 711 JPEG_HWIP = VCN_HWIP, 712 VCN1_HWIP, 713 VCE_HWIP, 714 VPE_HWIP, 715 DF_HWIP, 716 DCE_HWIP, 717 OSSSYS_HWIP, 718 SMUIO_HWIP, 719 PWR_HWIP, 720 NBIF_HWIP, 721 THM_HWIP, 722 CLK_HWIP, 723 UMC_HWIP, 724 RSMU_HWIP, 725 XGMI_HWIP, 726 DCI_HWIP, 727 PCIE_HWIP, 728 ISP_HWIP, 729 MAX_HWIP 730 }; 731 732 #define HWIP_MAX_INSTANCE 44 733 734 #define HW_ID_MAX 300 735 #define IP_VERSION_FULL(mj, mn, rv, var, srev) \ 736 (((mj) << 24) | ((mn) << 16) | ((rv) << 8) | ((var) << 4) | (srev)) 737 #define IP_VERSION(mj, mn, rv) IP_VERSION_FULL(mj, mn, rv, 0, 0) 738 #define IP_VERSION_MAJ(ver) ((ver) >> 24) 739 #define IP_VERSION_MIN(ver) (((ver) >> 16) & 0xFF) 740 #define IP_VERSION_REV(ver) (((ver) >> 8) & 0xFF) 741 #define IP_VERSION_VARIANT(ver) (((ver) >> 4) & 0xF) 742 #define IP_VERSION_SUBREV(ver) ((ver) & 0xF) 743 #define IP_VERSION_MAJ_MIN_REV(ver) ((ver) >> 8) 744 745 struct amdgpu_ip_map_info { 746 /* Map of logical to actual dev instances/mask */ 747 uint32_t dev_inst[MAX_HWIP][HWIP_MAX_INSTANCE]; 748 int8_t (*logical_to_dev_inst)(struct amdgpu_device *adev, 749 enum amd_hw_ip_block_type block, 750 int8_t inst); 751 uint32_t (*logical_to_dev_mask)(struct amdgpu_device *adev, 752 enum amd_hw_ip_block_type block, 753 uint32_t mask); 754 }; 755 756 struct amd_powerplay { 757 void *pp_handle; 758 const struct amd_pm_funcs *pp_funcs; 759 }; 760 761 struct ip_discovery_top; 762 763 /* polaris10 kickers */ 764 #define ASICID_IS_P20(did, rid) (((did == 0x67DF) && \ 765 ((rid == 0xE3) || \ 766 (rid == 0xE4) || \ 767 (rid == 0xE5) || \ 768 (rid == 0xE7) || \ 769 (rid == 0xEF))) || \ 770 ((did == 0x6FDF) && \ 771 ((rid == 0xE7) || \ 772 (rid == 0xEF) || \ 773 (rid == 0xFF)))) 774 775 #define ASICID_IS_P30(did, rid) ((did == 0x67DF) && \ 776 ((rid == 0xE1) || \ 777 (rid == 0xF7))) 778 779 /* polaris11 kickers */ 780 #define ASICID_IS_P21(did, rid) (((did == 0x67EF) && \ 781 ((rid == 0xE0) || \ 782 (rid == 0xE5))) || \ 783 ((did == 0x67FF) && \ 784 ((rid == 0xCF) || \ 785 (rid == 0xEF) || \ 786 (rid == 0xFF)))) 787 788 #define ASICID_IS_P31(did, rid) ((did == 0x67EF) && \ 789 ((rid == 0xE2))) 790 791 /* polaris12 kickers */ 792 #define ASICID_IS_P23(did, rid) (((did == 0x6987) && \ 793 ((rid == 0xC0) || \ 794 (rid == 0xC1) || \ 795 (rid == 0xC3) || \ 796 (rid == 0xC7))) || \ 797 ((did == 0x6981) && \ 798 ((rid == 0x00) || \ 799 (rid == 0x01) || \ 800 (rid == 0x10)))) 801 802 struct amdgpu_mqd_prop { 803 uint64_t mqd_gpu_addr; 804 uint64_t hqd_base_gpu_addr; 805 uint64_t rptr_gpu_addr; 806 uint64_t wptr_gpu_addr; 807 uint32_t queue_size; 808 bool use_doorbell; 809 uint32_t doorbell_index; 810 uint64_t eop_gpu_addr; 811 uint32_t hqd_pipe_priority; 812 uint32_t hqd_queue_priority; 813 bool allow_tunneling; 814 bool hqd_active; 815 }; 816 817 struct amdgpu_mqd { 818 unsigned mqd_size; 819 int (*init_mqd)(struct amdgpu_device *adev, void *mqd, 820 struct amdgpu_mqd_prop *p); 821 }; 822 823 #define AMDGPU_RESET_MAGIC_NUM 64 824 #define AMDGPU_MAX_DF_PERFMONS 4 825 struct amdgpu_reset_domain; 826 struct amdgpu_fru_info; 827 828 /* 829 * Non-zero (true) if the GPU has VRAM. Zero (false) otherwise. 830 */ 831 #define AMDGPU_HAS_VRAM(_adev) ((_adev)->gmc.real_vram_size) 832 833 struct amdgpu_device { 834 struct device *dev; 835 struct pci_dev *pdev; 836 struct drm_device ddev; 837 838 #ifdef CONFIG_DRM_AMD_ACP 839 struct amdgpu_acp acp; 840 #endif 841 struct amdgpu_hive_info *hive; 842 struct amdgpu_xcp_mgr *xcp_mgr; 843 /* ASIC */ 844 enum amd_asic_type asic_type; 845 uint32_t family; 846 uint32_t rev_id; 847 uint32_t external_rev_id; 848 unsigned long flags; 849 unsigned long apu_flags; 850 int usec_timeout; 851 const struct amdgpu_asic_funcs *asic_funcs; 852 bool shutdown; 853 bool need_swiotlb; 854 bool accel_working; 855 struct notifier_block acpi_nb; 856 struct amdgpu_i2c_chan *i2c_bus[AMDGPU_MAX_I2C_BUS]; 857 struct debugfs_blob_wrapper debugfs_vbios_blob; 858 struct debugfs_blob_wrapper debugfs_discovery_blob; 859 struct mutex srbm_mutex; 860 /* GRBM index mutex. Protects concurrent access to GRBM index */ 861 struct mutex grbm_idx_mutex; 862 struct dev_pm_domain vga_pm_domain; 863 bool have_disp_power_ref; 864 bool have_atomics_support; 865 866 /* BIOS */ 867 bool is_atom_fw; 868 uint8_t *bios; 869 uint32_t bios_size; 870 uint32_t bios_scratch_reg_offset; 871 uint32_t bios_scratch[AMDGPU_BIOS_NUM_SCRATCH]; 872 873 /* Register/doorbell mmio */ 874 resource_size_t rmmio_base; 875 resource_size_t rmmio_size; 876 void __iomem *rmmio; 877 /* protects concurrent MM_INDEX/DATA based register access */ 878 spinlock_t mmio_idx_lock; 879 struct amdgpu_mmio_remap rmmio_remap; 880 /* protects concurrent SMC based register access */ 881 spinlock_t smc_idx_lock; 882 amdgpu_rreg_t smc_rreg; 883 amdgpu_wreg_t smc_wreg; 884 /* protects concurrent PCIE register access */ 885 spinlock_t pcie_idx_lock; 886 amdgpu_rreg_t pcie_rreg; 887 amdgpu_wreg_t pcie_wreg; 888 amdgpu_rreg_t pciep_rreg; 889 amdgpu_wreg_t pciep_wreg; 890 amdgpu_rreg_ext_t pcie_rreg_ext; 891 amdgpu_wreg_ext_t pcie_wreg_ext; 892 amdgpu_rreg64_t pcie_rreg64; 893 amdgpu_wreg64_t pcie_wreg64; 894 amdgpu_rreg64_ext_t pcie_rreg64_ext; 895 amdgpu_wreg64_ext_t pcie_wreg64_ext; 896 /* protects concurrent UVD register access */ 897 spinlock_t uvd_ctx_idx_lock; 898 amdgpu_rreg_t uvd_ctx_rreg; 899 amdgpu_wreg_t uvd_ctx_wreg; 900 /* protects concurrent DIDT register access */ 901 spinlock_t didt_idx_lock; 902 amdgpu_rreg_t didt_rreg; 903 amdgpu_wreg_t didt_wreg; 904 /* protects concurrent gc_cac register access */ 905 spinlock_t gc_cac_idx_lock; 906 amdgpu_rreg_t gc_cac_rreg; 907 amdgpu_wreg_t gc_cac_wreg; 908 /* protects concurrent se_cac register access */ 909 spinlock_t se_cac_idx_lock; 910 amdgpu_rreg_t se_cac_rreg; 911 amdgpu_wreg_t se_cac_wreg; 912 /* protects concurrent ENDPOINT (audio) register access */ 913 spinlock_t audio_endpt_idx_lock; 914 amdgpu_block_rreg_t audio_endpt_rreg; 915 amdgpu_block_wreg_t audio_endpt_wreg; 916 struct amdgpu_doorbell doorbell; 917 918 /* clock/pll info */ 919 struct amdgpu_clock clock; 920 921 /* MC */ 922 struct amdgpu_gmc gmc; 923 struct amdgpu_gart gart; 924 dma_addr_t dummy_page_addr; 925 struct amdgpu_vm_manager vm_manager; 926 struct amdgpu_vmhub vmhub[AMDGPU_MAX_VMHUBS]; 927 DECLARE_BITMAP(vmhubs_mask, AMDGPU_MAX_VMHUBS); 928 929 /* memory management */ 930 struct amdgpu_mman mman; 931 struct amdgpu_mem_scratch mem_scratch; 932 struct amdgpu_wb wb; 933 atomic64_t num_bytes_moved; 934 atomic64_t num_evictions; 935 atomic64_t num_vram_cpu_page_faults; 936 atomic_t gpu_reset_counter; 937 atomic_t vram_lost_counter; 938 939 /* data for buffer migration throttling */ 940 struct { 941 spinlock_t lock; 942 s64 last_update_us; 943 s64 accum_us; /* accumulated microseconds */ 944 s64 accum_us_vis; /* for visible VRAM */ 945 u32 log2_max_MBps; 946 } mm_stats; 947 948 /* display */ 949 bool enable_virtual_display; 950 struct amdgpu_vkms_output *amdgpu_vkms_output; 951 struct amdgpu_mode_info mode_info; 952 /* For pre-DCE11. DCE11 and later are in "struct amdgpu_device->dm" */ 953 struct delayed_work hotplug_work; 954 struct amdgpu_irq_src crtc_irq; 955 struct amdgpu_irq_src vline0_irq; 956 struct amdgpu_irq_src vupdate_irq; 957 struct amdgpu_irq_src pageflip_irq; 958 struct amdgpu_irq_src hpd_irq; 959 struct amdgpu_irq_src dmub_trace_irq; 960 struct amdgpu_irq_src dmub_outbox_irq; 961 962 /* rings */ 963 u64 fence_context; 964 unsigned num_rings; 965 struct amdgpu_ring *rings[AMDGPU_MAX_RINGS]; 966 struct dma_fence __rcu *gang_submit; 967 bool ib_pool_ready; 968 struct amdgpu_sa_manager ib_pools[AMDGPU_IB_POOL_MAX]; 969 struct amdgpu_sched gpu_sched[AMDGPU_HW_IP_NUM][AMDGPU_RING_PRIO_MAX]; 970 971 /* interrupts */ 972 struct amdgpu_irq irq; 973 974 /* powerplay */ 975 struct amd_powerplay powerplay; 976 struct amdgpu_pm pm; 977 u64 cg_flags; 978 u32 pg_flags; 979 980 /* nbio */ 981 struct amdgpu_nbio nbio; 982 983 /* hdp */ 984 struct amdgpu_hdp hdp; 985 986 /* smuio */ 987 struct amdgpu_smuio smuio; 988 989 /* mmhub */ 990 struct amdgpu_mmhub mmhub; 991 992 /* gfxhub */ 993 struct amdgpu_gfxhub gfxhub; 994 995 /* gfx */ 996 struct amdgpu_gfx gfx; 997 998 /* sdma */ 999 struct amdgpu_sdma sdma; 1000 1001 /* lsdma */ 1002 struct amdgpu_lsdma lsdma; 1003 1004 /* uvd */ 1005 struct amdgpu_uvd uvd; 1006 1007 /* vce */ 1008 struct amdgpu_vce vce; 1009 1010 /* vcn */ 1011 struct amdgpu_vcn vcn; 1012 1013 /* jpeg */ 1014 struct amdgpu_jpeg jpeg; 1015 1016 /* vpe */ 1017 struct amdgpu_vpe vpe; 1018 1019 /* umsch */ 1020 struct amdgpu_umsch_mm umsch_mm; 1021 bool enable_umsch_mm; 1022 1023 /* firmwares */ 1024 struct amdgpu_firmware firmware; 1025 1026 /* PSP */ 1027 struct psp_context psp; 1028 1029 /* GDS */ 1030 struct amdgpu_gds gds; 1031 1032 /* for userq and VM fences */ 1033 struct amdgpu_seq64 seq64; 1034 1035 /* KFD */ 1036 struct amdgpu_kfd_dev kfd; 1037 1038 /* UMC */ 1039 struct amdgpu_umc umc; 1040 1041 /* display related functionality */ 1042 struct amdgpu_display_manager dm; 1043 1044 #if defined(CONFIG_DRM_AMD_ISP) 1045 /* isp */ 1046 struct amdgpu_isp isp; 1047 #endif 1048 1049 /* mes */ 1050 bool enable_mes; 1051 bool enable_mes_kiq; 1052 bool enable_uni_mes; 1053 struct amdgpu_mes mes; 1054 struct amdgpu_mqd mqds[AMDGPU_HW_IP_NUM]; 1055 1056 /* df */ 1057 struct amdgpu_df df; 1058 1059 /* MCA */ 1060 struct amdgpu_mca mca; 1061 1062 /* ACA */ 1063 struct amdgpu_aca aca; 1064 1065 struct amdgpu_ip_block ip_blocks[AMDGPU_MAX_IP_NUM]; 1066 uint32_t harvest_ip_mask; 1067 int num_ip_blocks; 1068 struct mutex mn_lock; 1069 DECLARE_HASHTABLE(mn_hash, 7); 1070 1071 /* tracking pinned memory */ 1072 atomic64_t vram_pin_size; 1073 atomic64_t visible_pin_size; 1074 atomic64_t gart_pin_size; 1075 1076 /* soc15 register offset based on ip, instance and segment */ 1077 uint32_t *reg_offset[MAX_HWIP][HWIP_MAX_INSTANCE]; 1078 struct amdgpu_ip_map_info ip_map; 1079 1080 /* delayed work_func for deferring clockgating during resume */ 1081 struct delayed_work delayed_init_work; 1082 1083 struct amdgpu_virt virt; 1084 1085 /* link all shadow bo */ 1086 struct list_head shadow_list; 1087 struct mutex shadow_list_lock; 1088 1089 /* record hw reset is performed */ 1090 bool has_hw_reset; 1091 u8 reset_magic[AMDGPU_RESET_MAGIC_NUM]; 1092 1093 /* s3/s4 mask */ 1094 bool in_suspend; 1095 bool in_s3; 1096 bool in_s4; 1097 bool in_s0ix; 1098 /* indicate amdgpu suspension status */ 1099 bool suspend_complete; 1100 1101 enum pp_mp1_state mp1_state; 1102 struct amdgpu_doorbell_index doorbell_index; 1103 1104 struct mutex notifier_lock; 1105 1106 int asic_reset_res; 1107 struct work_struct xgmi_reset_work; 1108 struct list_head reset_list; 1109 1110 long gfx_timeout; 1111 long sdma_timeout; 1112 long video_timeout; 1113 long compute_timeout; 1114 long psp_timeout; 1115 1116 uint64_t unique_id; 1117 uint64_t df_perfmon_config_assign_mask[AMDGPU_MAX_DF_PERFMONS]; 1118 1119 /* enable runtime pm on the device */ 1120 bool in_runpm; 1121 bool has_pr3; 1122 1123 bool ucode_sysfs_en; 1124 1125 struct amdgpu_fru_info *fru_info; 1126 atomic_t throttling_logging_enabled; 1127 struct ratelimit_state throttling_logging_rs; 1128 uint32_t ras_hw_enabled; 1129 uint32_t ras_enabled; 1130 1131 bool no_hw_access; 1132 struct pci_saved_state *pci_state; 1133 pci_channel_state_t pci_channel_state; 1134 1135 /* Track auto wait count on s_barrier settings */ 1136 bool barrier_has_auto_waitcnt; 1137 1138 struct amdgpu_reset_control *reset_cntl; 1139 uint32_t ip_versions[MAX_HWIP][HWIP_MAX_INSTANCE]; 1140 1141 bool ram_is_direct_mapped; 1142 1143 struct list_head ras_list; 1144 1145 struct ip_discovery_top *ip_top; 1146 1147 struct amdgpu_reset_domain *reset_domain; 1148 1149 struct mutex benchmark_mutex; 1150 1151 bool scpm_enabled; 1152 uint32_t scpm_status; 1153 1154 struct work_struct reset_work; 1155 1156 bool job_hang; 1157 bool dc_enabled; 1158 /* Mask of active clusters */ 1159 uint32_t aid_mask; 1160 1161 /* Debug */ 1162 bool debug_vm; 1163 bool debug_largebar; 1164 bool debug_disable_soft_recovery; 1165 bool debug_use_vram_fw_buf; 1166 bool debug_enable_ras_aca; 1167 1168 bool enforce_isolation[MAX_XCP]; 1169 /* Added this mutex for cleaner shader isolation between GFX and compute processes */ 1170 struct mutex enforce_isolation_mutex; 1171 }; 1172 1173 static inline uint32_t amdgpu_ip_version(const struct amdgpu_device *adev, 1174 uint8_t ip, uint8_t inst) 1175 { 1176 /* This considers only major/minor/rev and ignores 1177 * subrevision/variant fields. 1178 */ 1179 return adev->ip_versions[ip][inst] & ~0xFFU; 1180 } 1181 1182 static inline uint32_t amdgpu_ip_version_full(const struct amdgpu_device *adev, 1183 uint8_t ip, uint8_t inst) 1184 { 1185 /* This returns full version - major/minor/rev/variant/subrevision */ 1186 return adev->ip_versions[ip][inst]; 1187 } 1188 1189 static inline struct amdgpu_device *drm_to_adev(struct drm_device *ddev) 1190 { 1191 return container_of(ddev, struct amdgpu_device, ddev); 1192 } 1193 1194 static inline struct drm_device *adev_to_drm(struct amdgpu_device *adev) 1195 { 1196 return &adev->ddev; 1197 } 1198 1199 static inline struct amdgpu_device *amdgpu_ttm_adev(struct ttm_device *bdev) 1200 { 1201 return container_of(bdev, struct amdgpu_device, mman.bdev); 1202 } 1203 1204 int amdgpu_device_init(struct amdgpu_device *adev, 1205 uint32_t flags); 1206 void amdgpu_device_fini_hw(struct amdgpu_device *adev); 1207 void amdgpu_device_fini_sw(struct amdgpu_device *adev); 1208 1209 int amdgpu_gpu_wait_for_idle(struct amdgpu_device *adev); 1210 1211 void amdgpu_device_mm_access(struct amdgpu_device *adev, loff_t pos, 1212 void *buf, size_t size, bool write); 1213 size_t amdgpu_device_aper_access(struct amdgpu_device *adev, loff_t pos, 1214 void *buf, size_t size, bool write); 1215 1216 void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos, 1217 void *buf, size_t size, bool write); 1218 uint32_t amdgpu_device_wait_on_rreg(struct amdgpu_device *adev, 1219 uint32_t inst, uint32_t reg_addr, char reg_name[], 1220 uint32_t expected_value, uint32_t mask); 1221 uint32_t amdgpu_device_rreg(struct amdgpu_device *adev, 1222 uint32_t reg, uint32_t acc_flags); 1223 u32 amdgpu_device_indirect_rreg_ext(struct amdgpu_device *adev, 1224 u64 reg_addr); 1225 uint32_t amdgpu_device_xcc_rreg(struct amdgpu_device *adev, 1226 uint32_t reg, uint32_t acc_flags, 1227 uint32_t xcc_id); 1228 void amdgpu_device_wreg(struct amdgpu_device *adev, 1229 uint32_t reg, uint32_t v, 1230 uint32_t acc_flags); 1231 void amdgpu_device_indirect_wreg_ext(struct amdgpu_device *adev, 1232 u64 reg_addr, u32 reg_data); 1233 void amdgpu_device_xcc_wreg(struct amdgpu_device *adev, 1234 uint32_t reg, uint32_t v, 1235 uint32_t acc_flags, 1236 uint32_t xcc_id); 1237 void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev, 1238 uint32_t reg, uint32_t v, uint32_t xcc_id); 1239 void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value); 1240 uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset); 1241 1242 u32 amdgpu_device_indirect_rreg(struct amdgpu_device *adev, 1243 u32 reg_addr); 1244 u64 amdgpu_device_indirect_rreg64(struct amdgpu_device *adev, 1245 u32 reg_addr); 1246 u64 amdgpu_device_indirect_rreg64_ext(struct amdgpu_device *adev, 1247 u64 reg_addr); 1248 void amdgpu_device_indirect_wreg(struct amdgpu_device *adev, 1249 u32 reg_addr, u32 reg_data); 1250 void amdgpu_device_indirect_wreg64(struct amdgpu_device *adev, 1251 u32 reg_addr, u64 reg_data); 1252 void amdgpu_device_indirect_wreg64_ext(struct amdgpu_device *adev, 1253 u64 reg_addr, u64 reg_data); 1254 u32 amdgpu_device_get_rev_id(struct amdgpu_device *adev); 1255 bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type); 1256 bool amdgpu_device_has_dc_support(struct amdgpu_device *adev); 1257 1258 void amdgpu_device_set_sriov_virtual_display(struct amdgpu_device *adev); 1259 1260 int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev, 1261 struct amdgpu_reset_context *reset_context); 1262 1263 int amdgpu_do_asic_reset(struct list_head *device_list_handle, 1264 struct amdgpu_reset_context *reset_context); 1265 1266 int emu_soc_asic_init(struct amdgpu_device *adev); 1267 1268 /* 1269 * Registers read & write functions. 1270 */ 1271 #define AMDGPU_REGS_NO_KIQ (1<<1) 1272 #define AMDGPU_REGS_RLC (1<<2) 1273 1274 #define RREG32_NO_KIQ(reg) amdgpu_device_rreg(adev, (reg), AMDGPU_REGS_NO_KIQ) 1275 #define WREG32_NO_KIQ(reg, v) amdgpu_device_wreg(adev, (reg), (v), AMDGPU_REGS_NO_KIQ) 1276 1277 #define RREG32_KIQ(reg) amdgpu_kiq_rreg(adev, (reg), 0) 1278 #define WREG32_KIQ(reg, v) amdgpu_kiq_wreg(adev, (reg), (v), 0) 1279 1280 #define RREG8(reg) amdgpu_mm_rreg8(adev, (reg)) 1281 #define WREG8(reg, v) amdgpu_mm_wreg8(adev, (reg), (v)) 1282 1283 #define RREG32(reg) amdgpu_device_rreg(adev, (reg), 0) 1284 #define DREG32(reg) printk(KERN_INFO "REGISTER: " #reg " : 0x%08X\n", amdgpu_device_rreg(adev, (reg), 0)) 1285 #define WREG32(reg, v) amdgpu_device_wreg(adev, (reg), (v), 0) 1286 #define REG_SET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK) 1287 #define REG_GET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK) 1288 #define RREG32_XCC(reg, inst) amdgpu_device_xcc_rreg(adev, (reg), 0, inst) 1289 #define WREG32_XCC(reg, v, inst) amdgpu_device_xcc_wreg(adev, (reg), (v), 0, inst) 1290 #define RREG32_PCIE(reg) adev->pcie_rreg(adev, (reg)) 1291 #define WREG32_PCIE(reg, v) adev->pcie_wreg(adev, (reg), (v)) 1292 #define RREG32_PCIE_PORT(reg) adev->pciep_rreg(adev, (reg)) 1293 #define WREG32_PCIE_PORT(reg, v) adev->pciep_wreg(adev, (reg), (v)) 1294 #define RREG32_PCIE_EXT(reg) adev->pcie_rreg_ext(adev, (reg)) 1295 #define WREG32_PCIE_EXT(reg, v) adev->pcie_wreg_ext(adev, (reg), (v)) 1296 #define RREG64_PCIE(reg) adev->pcie_rreg64(adev, (reg)) 1297 #define WREG64_PCIE(reg, v) adev->pcie_wreg64(adev, (reg), (v)) 1298 #define RREG64_PCIE_EXT(reg) adev->pcie_rreg64_ext(adev, (reg)) 1299 #define WREG64_PCIE_EXT(reg, v) adev->pcie_wreg64_ext(adev, (reg), (v)) 1300 #define RREG32_SMC(reg) adev->smc_rreg(adev, (reg)) 1301 #define WREG32_SMC(reg, v) adev->smc_wreg(adev, (reg), (v)) 1302 #define RREG32_UVD_CTX(reg) adev->uvd_ctx_rreg(adev, (reg)) 1303 #define WREG32_UVD_CTX(reg, v) adev->uvd_ctx_wreg(adev, (reg), (v)) 1304 #define RREG32_DIDT(reg) adev->didt_rreg(adev, (reg)) 1305 #define WREG32_DIDT(reg, v) adev->didt_wreg(adev, (reg), (v)) 1306 #define RREG32_GC_CAC(reg) adev->gc_cac_rreg(adev, (reg)) 1307 #define WREG32_GC_CAC(reg, v) adev->gc_cac_wreg(adev, (reg), (v)) 1308 #define RREG32_SE_CAC(reg) adev->se_cac_rreg(adev, (reg)) 1309 #define WREG32_SE_CAC(reg, v) adev->se_cac_wreg(adev, (reg), (v)) 1310 #define RREG32_AUDIO_ENDPT(block, reg) adev->audio_endpt_rreg(adev, (block), (reg)) 1311 #define WREG32_AUDIO_ENDPT(block, reg, v) adev->audio_endpt_wreg(adev, (block), (reg), (v)) 1312 #define WREG32_P(reg, val, mask) \ 1313 do { \ 1314 uint32_t tmp_ = RREG32(reg); \ 1315 tmp_ &= (mask); \ 1316 tmp_ |= ((val) & ~(mask)); \ 1317 WREG32(reg, tmp_); \ 1318 } while (0) 1319 #define WREG32_AND(reg, and) WREG32_P(reg, 0, and) 1320 #define WREG32_OR(reg, or) WREG32_P(reg, or, ~(or)) 1321 #define WREG32_PLL_P(reg, val, mask) \ 1322 do { \ 1323 uint32_t tmp_ = RREG32_PLL(reg); \ 1324 tmp_ &= (mask); \ 1325 tmp_ |= ((val) & ~(mask)); \ 1326 WREG32_PLL(reg, tmp_); \ 1327 } while (0) 1328 1329 #define WREG32_SMC_P(_Reg, _Val, _Mask) \ 1330 do { \ 1331 u32 tmp = RREG32_SMC(_Reg); \ 1332 tmp &= (_Mask); \ 1333 tmp |= ((_Val) & ~(_Mask)); \ 1334 WREG32_SMC(_Reg, tmp); \ 1335 } while (0) 1336 1337 #define DREG32_SYS(sqf, adev, reg) seq_printf((sqf), #reg " : 0x%08X\n", amdgpu_device_rreg((adev), (reg), false)) 1338 1339 #define REG_FIELD_SHIFT(reg, field) reg##__##field##__SHIFT 1340 #define REG_FIELD_MASK(reg, field) reg##__##field##_MASK 1341 1342 #define REG_SET_FIELD(orig_val, reg, field, field_val) \ 1343 (((orig_val) & ~REG_FIELD_MASK(reg, field)) | \ 1344 (REG_FIELD_MASK(reg, field) & ((field_val) << REG_FIELD_SHIFT(reg, field)))) 1345 1346 #define REG_GET_FIELD(value, reg, field) \ 1347 (((value) & REG_FIELD_MASK(reg, field)) >> REG_FIELD_SHIFT(reg, field)) 1348 1349 #define WREG32_FIELD(reg, field, val) \ 1350 WREG32(mm##reg, (RREG32(mm##reg) & ~REG_FIELD_MASK(reg, field)) | (val) << REG_FIELD_SHIFT(reg, field)) 1351 1352 #define WREG32_FIELD_OFFSET(reg, offset, field, val) \ 1353 WREG32(mm##reg + offset, (RREG32(mm##reg + offset) & ~REG_FIELD_MASK(reg, field)) | (val) << REG_FIELD_SHIFT(reg, field)) 1354 1355 #define AMDGPU_GET_REG_FIELD(x, h, l) (((x) & GENMASK_ULL(h, l)) >> (l)) 1356 /* 1357 * BIOS helpers. 1358 */ 1359 #define RBIOS8(i) (adev->bios[i]) 1360 #define RBIOS16(i) (RBIOS8(i) | (RBIOS8((i)+1) << 8)) 1361 #define RBIOS32(i) ((RBIOS16(i)) | (RBIOS16((i)+2) << 16)) 1362 1363 /* 1364 * ASICs macro. 1365 */ 1366 #define amdgpu_asic_set_vga_state(adev, state) \ 1367 ((adev)->asic_funcs->set_vga_state ? (adev)->asic_funcs->set_vga_state((adev), (state)) : 0) 1368 #define amdgpu_asic_reset(adev) (adev)->asic_funcs->reset((adev)) 1369 #define amdgpu_asic_reset_method(adev) (adev)->asic_funcs->reset_method((adev)) 1370 #define amdgpu_asic_get_xclk(adev) (adev)->asic_funcs->get_xclk((adev)) 1371 #define amdgpu_asic_set_uvd_clocks(adev, v, d) (adev)->asic_funcs->set_uvd_clocks((adev), (v), (d)) 1372 #define amdgpu_asic_set_vce_clocks(adev, ev, ec) (adev)->asic_funcs->set_vce_clocks((adev), (ev), (ec)) 1373 #define amdgpu_get_pcie_lanes(adev) (adev)->asic_funcs->get_pcie_lanes((adev)) 1374 #define amdgpu_set_pcie_lanes(adev, l) (adev)->asic_funcs->set_pcie_lanes((adev), (l)) 1375 #define amdgpu_asic_get_gpu_clock_counter(adev) (adev)->asic_funcs->get_gpu_clock_counter((adev)) 1376 #define amdgpu_asic_read_disabled_bios(adev) (adev)->asic_funcs->read_disabled_bios((adev)) 1377 #define amdgpu_asic_read_bios_from_rom(adev, b, l) (adev)->asic_funcs->read_bios_from_rom((adev), (b), (l)) 1378 #define amdgpu_asic_read_register(adev, se, sh, offset, v)((adev)->asic_funcs->read_register((adev), (se), (sh), (offset), (v))) 1379 #define amdgpu_asic_get_config_memsize(adev) (adev)->asic_funcs->get_config_memsize((adev)) 1380 #define amdgpu_asic_flush_hdp(adev, r) \ 1381 ((adev)->asic_funcs->flush_hdp ? (adev)->asic_funcs->flush_hdp((adev), (r)) : (adev)->hdp.funcs->flush_hdp((adev), (r))) 1382 #define amdgpu_asic_invalidate_hdp(adev, r) \ 1383 ((adev)->asic_funcs->invalidate_hdp ? (adev)->asic_funcs->invalidate_hdp((adev), (r)) : \ 1384 ((adev)->hdp.funcs->invalidate_hdp ? (adev)->hdp.funcs->invalidate_hdp((adev), (r)) : (void)0)) 1385 #define amdgpu_asic_need_full_reset(adev) (adev)->asic_funcs->need_full_reset((adev)) 1386 #define amdgpu_asic_init_doorbell_index(adev) (adev)->asic_funcs->init_doorbell_index((adev)) 1387 #define amdgpu_asic_get_pcie_usage(adev, cnt0, cnt1) ((adev)->asic_funcs->get_pcie_usage((adev), (cnt0), (cnt1))) 1388 #define amdgpu_asic_need_reset_on_init(adev) (adev)->asic_funcs->need_reset_on_init((adev)) 1389 #define amdgpu_asic_get_pcie_replay_count(adev) ((adev)->asic_funcs->get_pcie_replay_count((adev))) 1390 #define amdgpu_asic_supports_baco(adev) (adev)->asic_funcs->supports_baco((adev)) 1391 #define amdgpu_asic_pre_asic_init(adev) (adev)->asic_funcs->pre_asic_init((adev)) 1392 #define amdgpu_asic_update_umd_stable_pstate(adev, enter) \ 1393 ((adev)->asic_funcs->update_umd_stable_pstate ? (adev)->asic_funcs->update_umd_stable_pstate((adev), (enter)) : 0) 1394 #define amdgpu_asic_query_video_codecs(adev, e, c) (adev)->asic_funcs->query_video_codecs((adev), (e), (c)) 1395 1396 #define amdgpu_inc_vram_lost(adev) atomic_inc(&((adev)->vram_lost_counter)) 1397 1398 #define BIT_MASK_UPPER(i) ((i) >= BITS_PER_LONG ? 0 : ~0UL << (i)) 1399 #define for_each_inst(i, inst_mask) \ 1400 for (i = ffs(inst_mask); i-- != 0; \ 1401 i = ffs(inst_mask & BIT_MASK_UPPER(i + 1))) 1402 1403 /* Common functions */ 1404 bool amdgpu_device_has_job_running(struct amdgpu_device *adev); 1405 bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev); 1406 int amdgpu_device_gpu_recover(struct amdgpu_device *adev, 1407 struct amdgpu_job *job, 1408 struct amdgpu_reset_context *reset_context); 1409 void amdgpu_device_pci_config_reset(struct amdgpu_device *adev); 1410 int amdgpu_device_pci_reset(struct amdgpu_device *adev); 1411 bool amdgpu_device_need_post(struct amdgpu_device *adev); 1412 bool amdgpu_device_seamless_boot_supported(struct amdgpu_device *adev); 1413 bool amdgpu_device_should_use_aspm(struct amdgpu_device *adev); 1414 1415 void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes, 1416 u64 num_vis_bytes); 1417 int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev); 1418 void amdgpu_device_program_register_sequence(struct amdgpu_device *adev, 1419 const u32 *registers, 1420 const u32 array_size); 1421 1422 int amdgpu_device_mode1_reset(struct amdgpu_device *adev); 1423 bool amdgpu_device_supports_atpx(struct drm_device *dev); 1424 bool amdgpu_device_supports_px(struct drm_device *dev); 1425 bool amdgpu_device_supports_boco(struct drm_device *dev); 1426 bool amdgpu_device_supports_smart_shift(struct drm_device *dev); 1427 int amdgpu_device_supports_baco(struct drm_device *dev); 1428 void amdgpu_device_detect_runtime_pm_mode(struct amdgpu_device *adev); 1429 bool amdgpu_device_is_peer_accessible(struct amdgpu_device *adev, 1430 struct amdgpu_device *peer_adev); 1431 int amdgpu_device_baco_enter(struct drm_device *dev); 1432 int amdgpu_device_baco_exit(struct drm_device *dev); 1433 1434 void amdgpu_device_flush_hdp(struct amdgpu_device *adev, 1435 struct amdgpu_ring *ring); 1436 void amdgpu_device_invalidate_hdp(struct amdgpu_device *adev, 1437 struct amdgpu_ring *ring); 1438 1439 void amdgpu_device_halt(struct amdgpu_device *adev); 1440 u32 amdgpu_device_pcie_port_rreg(struct amdgpu_device *adev, 1441 u32 reg); 1442 void amdgpu_device_pcie_port_wreg(struct amdgpu_device *adev, 1443 u32 reg, u32 v); 1444 struct dma_fence *amdgpu_device_get_gang(struct amdgpu_device *adev); 1445 struct dma_fence *amdgpu_device_switch_gang(struct amdgpu_device *adev, 1446 struct dma_fence *gang); 1447 bool amdgpu_device_has_display_hardware(struct amdgpu_device *adev); 1448 1449 /* atpx handler */ 1450 #if defined(CONFIG_VGA_SWITCHEROO) 1451 void amdgpu_register_atpx_handler(void); 1452 void amdgpu_unregister_atpx_handler(void); 1453 bool amdgpu_has_atpx_dgpu_power_cntl(void); 1454 bool amdgpu_is_atpx_hybrid(void); 1455 bool amdgpu_atpx_dgpu_req_power_for_displays(void); 1456 bool amdgpu_has_atpx(void); 1457 #else 1458 static inline void amdgpu_register_atpx_handler(void) {} 1459 static inline void amdgpu_unregister_atpx_handler(void) {} 1460 static inline bool amdgpu_has_atpx_dgpu_power_cntl(void) { return false; } 1461 static inline bool amdgpu_is_atpx_hybrid(void) { return false; } 1462 static inline bool amdgpu_atpx_dgpu_req_power_for_displays(void) { return false; } 1463 static inline bool amdgpu_has_atpx(void) { return false; } 1464 #endif 1465 1466 #if defined(CONFIG_VGA_SWITCHEROO) && defined(CONFIG_ACPI) 1467 void *amdgpu_atpx_get_dhandle(void); 1468 #else 1469 static inline void *amdgpu_atpx_get_dhandle(void) { return NULL; } 1470 #endif 1471 1472 /* 1473 * KMS 1474 */ 1475 extern const struct drm_ioctl_desc amdgpu_ioctls_kms[]; 1476 extern const int amdgpu_max_kms_ioctl; 1477 1478 int amdgpu_driver_load_kms(struct amdgpu_device *adev, unsigned long flags); 1479 void amdgpu_driver_unload_kms(struct drm_device *dev); 1480 void amdgpu_driver_lastclose_kms(struct drm_device *dev); 1481 int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv); 1482 void amdgpu_driver_postclose_kms(struct drm_device *dev, 1483 struct drm_file *file_priv); 1484 void amdgpu_driver_release_kms(struct drm_device *dev); 1485 1486 int amdgpu_device_ip_suspend(struct amdgpu_device *adev); 1487 int amdgpu_device_prepare(struct drm_device *dev); 1488 int amdgpu_device_suspend(struct drm_device *dev, bool fbcon); 1489 int amdgpu_device_resume(struct drm_device *dev, bool fbcon); 1490 u32 amdgpu_get_vblank_counter_kms(struct drm_crtc *crtc); 1491 int amdgpu_enable_vblank_kms(struct drm_crtc *crtc); 1492 void amdgpu_disable_vblank_kms(struct drm_crtc *crtc); 1493 int amdgpu_info_ioctl(struct drm_device *dev, void *data, 1494 struct drm_file *filp); 1495 1496 /* 1497 * functions used by amdgpu_encoder.c 1498 */ 1499 struct amdgpu_afmt_acr { 1500 u32 clock; 1501 1502 int n_32khz; 1503 int cts_32khz; 1504 1505 int n_44_1khz; 1506 int cts_44_1khz; 1507 1508 int n_48khz; 1509 int cts_48khz; 1510 1511 }; 1512 1513 struct amdgpu_afmt_acr amdgpu_afmt_acr(uint32_t clock); 1514 1515 /* amdgpu_acpi.c */ 1516 1517 struct amdgpu_numa_info { 1518 uint64_t size; 1519 int pxm; 1520 int nid; 1521 }; 1522 1523 /* ATCS Device/Driver State */ 1524 #define AMDGPU_ATCS_PSC_DEV_STATE_D0 0 1525 #define AMDGPU_ATCS_PSC_DEV_STATE_D3_HOT 3 1526 #define AMDGPU_ATCS_PSC_DRV_STATE_OPR 0 1527 #define AMDGPU_ATCS_PSC_DRV_STATE_NOT_OPR 1 1528 1529 #if defined(CONFIG_ACPI) 1530 int amdgpu_acpi_init(struct amdgpu_device *adev); 1531 void amdgpu_acpi_fini(struct amdgpu_device *adev); 1532 bool amdgpu_acpi_is_pcie_performance_request_supported(struct amdgpu_device *adev); 1533 bool amdgpu_acpi_is_power_shift_control_supported(void); 1534 int amdgpu_acpi_pcie_performance_request(struct amdgpu_device *adev, 1535 u8 perf_req, bool advertise); 1536 int amdgpu_acpi_power_shift_control(struct amdgpu_device *adev, 1537 u8 dev_state, bool drv_state); 1538 int amdgpu_acpi_smart_shift_update(struct drm_device *dev, enum amdgpu_ss ss_state); 1539 int amdgpu_acpi_pcie_notify_device_ready(struct amdgpu_device *adev); 1540 int amdgpu_acpi_get_tmr_info(struct amdgpu_device *adev, u64 *tmr_offset, 1541 u64 *tmr_size); 1542 int amdgpu_acpi_get_mem_info(struct amdgpu_device *adev, int xcc_id, 1543 struct amdgpu_numa_info *numa_info); 1544 1545 void amdgpu_acpi_get_backlight_caps(struct amdgpu_dm_backlight_caps *caps); 1546 bool amdgpu_acpi_should_gpu_reset(struct amdgpu_device *adev); 1547 void amdgpu_acpi_detect(void); 1548 void amdgpu_acpi_release(void); 1549 #else 1550 static inline int amdgpu_acpi_init(struct amdgpu_device *adev) { return 0; } 1551 static inline int amdgpu_acpi_get_tmr_info(struct amdgpu_device *adev, 1552 u64 *tmr_offset, u64 *tmr_size) 1553 { 1554 return -EINVAL; 1555 } 1556 static inline int amdgpu_acpi_get_mem_info(struct amdgpu_device *adev, 1557 int xcc_id, 1558 struct amdgpu_numa_info *numa_info) 1559 { 1560 return -EINVAL; 1561 } 1562 static inline void amdgpu_acpi_fini(struct amdgpu_device *adev) { } 1563 static inline bool amdgpu_acpi_should_gpu_reset(struct amdgpu_device *adev) { return false; } 1564 static inline void amdgpu_acpi_detect(void) { } 1565 static inline void amdgpu_acpi_release(void) { } 1566 static inline bool amdgpu_acpi_is_power_shift_control_supported(void) { return false; } 1567 static inline int amdgpu_acpi_power_shift_control(struct amdgpu_device *adev, 1568 u8 dev_state, bool drv_state) { return 0; } 1569 static inline int amdgpu_acpi_smart_shift_update(struct drm_device *dev, 1570 enum amdgpu_ss ss_state) { return 0; } 1571 static inline void amdgpu_acpi_get_backlight_caps(struct amdgpu_dm_backlight_caps *caps) { } 1572 #endif 1573 1574 #if defined(CONFIG_ACPI) && defined(CONFIG_SUSPEND) 1575 bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev); 1576 bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev); 1577 void amdgpu_choose_low_power_state(struct amdgpu_device *adev); 1578 #else 1579 static inline bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev) { return false; } 1580 static inline bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev) { return false; } 1581 static inline void amdgpu_choose_low_power_state(struct amdgpu_device *adev) { } 1582 #endif 1583 1584 void amdgpu_register_gpu_instance(struct amdgpu_device *adev); 1585 void amdgpu_unregister_gpu_instance(struct amdgpu_device *adev); 1586 1587 pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev, 1588 pci_channel_state_t state); 1589 pci_ers_result_t amdgpu_pci_mmio_enabled(struct pci_dev *pdev); 1590 pci_ers_result_t amdgpu_pci_slot_reset(struct pci_dev *pdev); 1591 void amdgpu_pci_resume(struct pci_dev *pdev); 1592 1593 bool amdgpu_device_cache_pci_state(struct pci_dev *pdev); 1594 bool amdgpu_device_load_pci_state(struct pci_dev *pdev); 1595 1596 bool amdgpu_device_skip_hw_access(struct amdgpu_device *adev); 1597 1598 int amdgpu_device_set_cg_state(struct amdgpu_device *adev, 1599 enum amd_clockgating_state state); 1600 int amdgpu_device_set_pg_state(struct amdgpu_device *adev, 1601 enum amd_powergating_state state); 1602 1603 static inline bool amdgpu_device_has_timeouts_enabled(struct amdgpu_device *adev) 1604 { 1605 return amdgpu_gpu_recovery != 0 && 1606 adev->gfx_timeout != MAX_SCHEDULE_TIMEOUT && 1607 adev->compute_timeout != MAX_SCHEDULE_TIMEOUT && 1608 adev->sdma_timeout != MAX_SCHEDULE_TIMEOUT && 1609 adev->video_timeout != MAX_SCHEDULE_TIMEOUT; 1610 } 1611 1612 #include "amdgpu_object.h" 1613 1614 static inline bool amdgpu_is_tmz(struct amdgpu_device *adev) 1615 { 1616 return adev->gmc.tmz_enabled; 1617 } 1618 1619 int amdgpu_in_reset(struct amdgpu_device *adev); 1620 1621 extern const struct attribute_group amdgpu_vram_mgr_attr_group; 1622 extern const struct attribute_group amdgpu_gtt_mgr_attr_group; 1623 extern const struct attribute_group amdgpu_flash_attr_group; 1624 1625 #endif 1626