1 /* 2 * Copyright 2008 Advanced Micro Devices, Inc. 3 * Copyright 2008 Red Hat Inc. 4 * Copyright 2009 Jerome Glisse. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 * OTHER DEALINGS IN THE SOFTWARE. 23 * 24 * Authors: Dave Airlie 25 * Alex Deucher 26 * Jerome Glisse 27 */ 28 #ifndef __AMDGPU_H__ 29 #define __AMDGPU_H__ 30 31 #ifdef pr_fmt 32 #undef pr_fmt 33 #endif 34 35 #define pr_fmt(fmt) "amdgpu: " fmt 36 37 #ifdef dev_fmt 38 #undef dev_fmt 39 #endif 40 41 #define dev_fmt(fmt) "amdgpu: " fmt 42 43 #include "amdgpu_ctx.h" 44 45 #include <linux/atomic.h> 46 #include <linux/wait.h> 47 #include <linux/list.h> 48 #include <linux/kref.h> 49 #include <linux/rbtree.h> 50 #include <linux/hashtable.h> 51 #include <linux/dma-fence.h> 52 #include <linux/pci.h> 53 54 #include <drm/ttm/ttm_bo.h> 55 #include <drm/ttm/ttm_placement.h> 56 57 #include <drm/amdgpu_drm.h> 58 #include <drm/drm_gem.h> 59 #include <drm/drm_ioctl.h> 60 61 #include <kgd_kfd_interface.h> 62 #include "dm_pp_interface.h" 63 #include "kgd_pp_interface.h" 64 65 #include "amd_shared.h" 66 #include "amdgpu_mode.h" 67 #include "amdgpu_ih.h" 68 #include "amdgpu_irq.h" 69 #include "amdgpu_ucode.h" 70 #include "amdgpu_ttm.h" 71 #include "amdgpu_psp.h" 72 #include "amdgpu_gds.h" 73 #include "amdgpu_sync.h" 74 #include "amdgpu_ring.h" 75 #include "amdgpu_vm.h" 76 #include "amdgpu_dpm.h" 77 #include "amdgpu_acp.h" 78 #include "amdgpu_uvd.h" 79 #include "amdgpu_vce.h" 80 #include "amdgpu_vcn.h" 81 #include "amdgpu_jpeg.h" 82 #include "amdgpu_vpe.h" 83 #include "amdgpu_umsch_mm.h" 84 #include "amdgpu_gmc.h" 85 #include "amdgpu_gfx.h" 86 #include "amdgpu_sdma.h" 87 #include "amdgpu_lsdma.h" 88 #include "amdgpu_nbio.h" 89 #include "amdgpu_hdp.h" 90 #include "amdgpu_dm.h" 91 #include "amdgpu_virt.h" 92 #include "amdgpu_csa.h" 93 #include "amdgpu_mes_ctx.h" 94 #include "amdgpu_gart.h" 95 #include "amdgpu_debugfs.h" 96 #include "amdgpu_job.h" 97 #include "amdgpu_bo_list.h" 98 #include "amdgpu_gem.h" 99 #include "amdgpu_doorbell.h" 100 #include "amdgpu_amdkfd.h" 101 #include "amdgpu_discovery.h" 102 #include "amdgpu_mes.h" 103 #include "amdgpu_umc.h" 104 #include "amdgpu_mmhub.h" 105 #include "amdgpu_gfxhub.h" 106 #include "amdgpu_df.h" 107 #include "amdgpu_smuio.h" 108 #include "amdgpu_fdinfo.h" 109 #include "amdgpu_mca.h" 110 #include "amdgpu_aca.h" 111 #include "amdgpu_ras.h" 112 #include "amdgpu_cper.h" 113 #include "amdgpu_xcp.h" 114 #include "amdgpu_seq64.h" 115 #include "amdgpu_reg_state.h" 116 #if defined(CONFIG_DRM_AMD_ISP) 117 #include "amdgpu_isp.h" 118 #endif 119 120 #define MAX_GPU_INSTANCE 64 121 122 #define GFX_SLICE_PERIOD_MS 250 123 124 struct amdgpu_gpu_instance { 125 struct amdgpu_device *adev; 126 int mgpu_fan_enabled; 127 }; 128 129 struct amdgpu_mgpu_info { 130 struct amdgpu_gpu_instance gpu_ins[MAX_GPU_INSTANCE]; 131 struct mutex mutex; 132 uint32_t num_gpu; 133 uint32_t num_dgpu; 134 uint32_t num_apu; 135 }; 136 137 enum amdgpu_ss { 138 AMDGPU_SS_DRV_LOAD, 139 AMDGPU_SS_DEV_D0, 140 AMDGPU_SS_DEV_D3, 141 AMDGPU_SS_DRV_UNLOAD 142 }; 143 144 struct amdgpu_hwip_reg_entry { 145 u32 hwip; 146 u32 inst; 147 u32 seg; 148 u32 reg_offset; 149 const char *reg_name; 150 }; 151 152 struct amdgpu_watchdog_timer { 153 bool timeout_fatal_disable; 154 uint32_t period; /* maxCycles = (1 << period), the number of cycles before a timeout */ 155 }; 156 157 #define AMDGPU_MAX_TIMEOUT_PARAM_LENGTH 256 158 159 /* 160 * Modules parameters. 161 */ 162 extern int amdgpu_modeset; 163 extern unsigned int amdgpu_vram_limit; 164 extern int amdgpu_vis_vram_limit; 165 extern int amdgpu_gart_size; 166 extern int amdgpu_gtt_size; 167 extern int amdgpu_moverate; 168 extern int amdgpu_audio; 169 extern int amdgpu_disp_priority; 170 extern int amdgpu_hw_i2c; 171 extern int amdgpu_pcie_gen2; 172 extern int amdgpu_msi; 173 extern char amdgpu_lockup_timeout[AMDGPU_MAX_TIMEOUT_PARAM_LENGTH]; 174 extern int amdgpu_dpm; 175 extern int amdgpu_fw_load_type; 176 extern int amdgpu_aspm; 177 extern int amdgpu_runtime_pm; 178 extern uint amdgpu_ip_block_mask; 179 extern int amdgpu_bapm; 180 extern int amdgpu_deep_color; 181 extern int amdgpu_vm_size; 182 extern int amdgpu_vm_block_size; 183 extern int amdgpu_vm_fragment_size; 184 extern int amdgpu_vm_fault_stop; 185 extern int amdgpu_vm_debug; 186 extern int amdgpu_vm_update_mode; 187 extern int amdgpu_exp_hw_support; 188 extern int amdgpu_dc; 189 extern int amdgpu_sched_jobs; 190 extern int amdgpu_sched_hw_submission; 191 extern uint amdgpu_pcie_gen_cap; 192 extern uint amdgpu_pcie_lane_cap; 193 extern u64 amdgpu_cg_mask; 194 extern uint amdgpu_pg_mask; 195 extern uint amdgpu_sdma_phase_quantum; 196 extern char *amdgpu_disable_cu; 197 extern char *amdgpu_virtual_display; 198 extern uint amdgpu_pp_feature_mask; 199 extern uint amdgpu_force_long_training; 200 extern int amdgpu_lbpw; 201 extern int amdgpu_compute_multipipe; 202 extern int amdgpu_gpu_recovery; 203 extern int amdgpu_emu_mode; 204 extern uint amdgpu_smu_memory_pool_size; 205 extern int amdgpu_smu_pptable_id; 206 extern uint amdgpu_dc_feature_mask; 207 extern uint amdgpu_freesync_vid_mode; 208 extern uint amdgpu_dc_debug_mask; 209 extern uint amdgpu_dc_visual_confirm; 210 extern int amdgpu_dm_abm_level; 211 extern int amdgpu_backlight; 212 extern int amdgpu_damage_clips; 213 extern struct amdgpu_mgpu_info mgpu_info; 214 extern int amdgpu_ras_enable; 215 extern uint amdgpu_ras_mask; 216 extern int amdgpu_bad_page_threshold; 217 extern bool amdgpu_ignore_bad_page_threshold; 218 extern struct amdgpu_watchdog_timer amdgpu_watchdog_timer; 219 extern int amdgpu_async_gfx_ring; 220 extern int amdgpu_mcbp; 221 extern int amdgpu_discovery; 222 extern int amdgpu_mes; 223 extern int amdgpu_mes_log_enable; 224 extern int amdgpu_mes_kiq; 225 extern int amdgpu_uni_mes; 226 extern int amdgpu_noretry; 227 extern int amdgpu_force_asic_type; 228 extern int amdgpu_smartshift_bias; 229 extern int amdgpu_use_xgmi_p2p; 230 extern int amdgpu_mtype_local; 231 extern bool enforce_isolation; 232 #ifdef CONFIG_HSA_AMD 233 extern int sched_policy; 234 extern bool debug_evictions; 235 extern bool no_system_mem_limit; 236 extern int halt_if_hws_hang; 237 extern uint amdgpu_svm_default_granularity; 238 #else 239 static const int __maybe_unused sched_policy = KFD_SCHED_POLICY_HWS; 240 static const bool __maybe_unused debug_evictions; /* = false */ 241 static const bool __maybe_unused no_system_mem_limit; 242 static const int __maybe_unused halt_if_hws_hang; 243 #endif 244 #ifdef CONFIG_HSA_AMD_P2P 245 extern bool pcie_p2p; 246 #endif 247 248 extern int amdgpu_tmz; 249 extern int amdgpu_reset_method; 250 251 #ifdef CONFIG_DRM_AMDGPU_SI 252 extern int amdgpu_si_support; 253 #endif 254 #ifdef CONFIG_DRM_AMDGPU_CIK 255 extern int amdgpu_cik_support; 256 #endif 257 extern int amdgpu_num_kcq; 258 259 #define AMDGPU_VCNFW_LOG_SIZE (32 * 1024) 260 #define AMDGPU_UMSCHFW_LOG_SIZE (32 * 1024) 261 extern int amdgpu_vcnfw_log; 262 extern int amdgpu_sg_display; 263 extern int amdgpu_umsch_mm; 264 extern int amdgpu_seamless; 265 extern int amdgpu_umsch_mm_fwlog; 266 267 extern int amdgpu_user_partt_mode; 268 extern int amdgpu_agp; 269 270 extern int amdgpu_wbrf; 271 272 #define AMDGPU_VM_MAX_NUM_CTX 4096 273 #define AMDGPU_SG_THRESHOLD (256*1024*1024) 274 #define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS 3000 275 #define AMDGPU_MAX_USEC_TIMEOUT 100000 /* 100 ms */ 276 #define AMDGPU_FENCE_JIFFIES_TIMEOUT (HZ / 2) 277 #define AMDGPU_DEBUGFS_MAX_COMPONENTS 32 278 #define AMDGPUFB_CONN_LIMIT 4 279 #define AMDGPU_BIOS_NUM_SCRATCH 16 280 281 #define AMDGPU_VBIOS_VGA_ALLOCATION (9 * 1024 * 1024) /* reserve 8MB for vga emulator and 1 MB for FB */ 282 283 /* hard reset data */ 284 #define AMDGPU_ASIC_RESET_DATA 0x39d5e86b 285 286 /* reset flags */ 287 #define AMDGPU_RESET_GFX (1 << 0) 288 #define AMDGPU_RESET_COMPUTE (1 << 1) 289 #define AMDGPU_RESET_DMA (1 << 2) 290 #define AMDGPU_RESET_CP (1 << 3) 291 #define AMDGPU_RESET_GRBM (1 << 4) 292 #define AMDGPU_RESET_DMA1 (1 << 5) 293 #define AMDGPU_RESET_RLC (1 << 6) 294 #define AMDGPU_RESET_SEM (1 << 7) 295 #define AMDGPU_RESET_IH (1 << 8) 296 #define AMDGPU_RESET_VMC (1 << 9) 297 #define AMDGPU_RESET_MC (1 << 10) 298 #define AMDGPU_RESET_DISPLAY (1 << 11) 299 #define AMDGPU_RESET_UVD (1 << 12) 300 #define AMDGPU_RESET_VCE (1 << 13) 301 #define AMDGPU_RESET_VCE1 (1 << 14) 302 303 /* reset mask */ 304 #define AMDGPU_RESET_TYPE_FULL (1 << 0) /* full adapter reset, mode1/mode2/BACO/etc. */ 305 #define AMDGPU_RESET_TYPE_SOFT_RESET (1 << 1) /* IP level soft reset */ 306 #define AMDGPU_RESET_TYPE_PER_QUEUE (1 << 2) /* per queue */ 307 #define AMDGPU_RESET_TYPE_PER_PIPE (1 << 3) /* per pipe */ 308 309 /* max cursor sizes (in pixels) */ 310 #define CIK_CURSOR_WIDTH 128 311 #define CIK_CURSOR_HEIGHT 128 312 313 /* smart shift bias level limits */ 314 #define AMDGPU_SMARTSHIFT_MAX_BIAS (100) 315 #define AMDGPU_SMARTSHIFT_MIN_BIAS (-100) 316 317 /* Extra time delay(in ms) to eliminate the influence of temperature momentary fluctuation */ 318 #define AMDGPU_SWCTF_EXTRA_DELAY 50 319 320 struct amdgpu_xcp_mgr; 321 struct amdgpu_device; 322 struct amdgpu_irq_src; 323 struct amdgpu_fpriv; 324 struct amdgpu_bo_va_mapping; 325 struct kfd_vm_fault_info; 326 struct amdgpu_hive_info; 327 struct amdgpu_reset_context; 328 struct amdgpu_reset_control; 329 330 enum amdgpu_cp_irq { 331 AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP = 0, 332 AMDGPU_CP_IRQ_GFX_ME0_PIPE1_EOP, 333 AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP, 334 AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP, 335 AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE2_EOP, 336 AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE3_EOP, 337 AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE0_EOP, 338 AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE1_EOP, 339 AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE2_EOP, 340 AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE3_EOP, 341 342 AMDGPU_CP_IRQ_LAST 343 }; 344 345 enum amdgpu_thermal_irq { 346 AMDGPU_THERMAL_IRQ_LOW_TO_HIGH = 0, 347 AMDGPU_THERMAL_IRQ_HIGH_TO_LOW, 348 349 AMDGPU_THERMAL_IRQ_LAST 350 }; 351 352 enum amdgpu_kiq_irq { 353 AMDGPU_CP_KIQ_IRQ_DRIVER0 = 0, 354 AMDGPU_CP_KIQ_IRQ_LAST 355 }; 356 #define MAX_KIQ_REG_WAIT 5000 /* in usecs, 5ms */ 357 #define MAX_KIQ_REG_BAILOUT_INTERVAL 5 /* in msecs, 5ms */ 358 #define MAX_KIQ_REG_TRY 1000 359 360 int amdgpu_device_ip_set_clockgating_state(void *dev, 361 enum amd_ip_block_type block_type, 362 enum amd_clockgating_state state); 363 int amdgpu_device_ip_set_powergating_state(void *dev, 364 enum amd_ip_block_type block_type, 365 enum amd_powergating_state state); 366 void amdgpu_device_ip_get_clockgating_state(struct amdgpu_device *adev, 367 u64 *flags); 368 int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev, 369 enum amd_ip_block_type block_type); 370 bool amdgpu_device_ip_is_valid(struct amdgpu_device *adev, 371 enum amd_ip_block_type block_type); 372 int amdgpu_ip_block_suspend(struct amdgpu_ip_block *ip_block); 373 374 int amdgpu_ip_block_resume(struct amdgpu_ip_block *ip_block); 375 376 #define AMDGPU_MAX_IP_NUM 16 377 378 struct amdgpu_ip_block_status { 379 bool valid; 380 bool sw; 381 bool hw; 382 bool late_initialized; 383 bool hang; 384 }; 385 386 struct amdgpu_ip_block_version { 387 const enum amd_ip_block_type type; 388 const u32 major; 389 const u32 minor; 390 const u32 rev; 391 const struct amd_ip_funcs *funcs; 392 }; 393 394 struct amdgpu_ip_block { 395 struct amdgpu_ip_block_status status; 396 const struct amdgpu_ip_block_version *version; 397 struct amdgpu_device *adev; 398 }; 399 400 int amdgpu_device_ip_block_version_cmp(struct amdgpu_device *adev, 401 enum amd_ip_block_type type, 402 u32 major, u32 minor); 403 404 struct amdgpu_ip_block * 405 amdgpu_device_ip_get_ip_block(struct amdgpu_device *adev, 406 enum amd_ip_block_type type); 407 408 int amdgpu_device_ip_block_add(struct amdgpu_device *adev, 409 const struct amdgpu_ip_block_version *ip_block_version); 410 411 /* 412 * BIOS. 413 */ 414 bool amdgpu_get_bios(struct amdgpu_device *adev); 415 bool amdgpu_read_bios(struct amdgpu_device *adev); 416 bool amdgpu_soc15_read_bios_from_rom(struct amdgpu_device *adev, 417 u8 *bios, u32 length_bytes); 418 void amdgpu_bios_release(struct amdgpu_device *adev); 419 /* 420 * Clocks 421 */ 422 423 #define AMDGPU_MAX_PPLL 3 424 425 struct amdgpu_clock { 426 struct amdgpu_pll ppll[AMDGPU_MAX_PPLL]; 427 struct amdgpu_pll spll; 428 struct amdgpu_pll mpll; 429 /* 10 Khz units */ 430 uint32_t default_mclk; 431 uint32_t default_sclk; 432 uint32_t default_dispclk; 433 uint32_t current_dispclk; 434 uint32_t dp_extclk; 435 uint32_t max_pixel_clock; 436 }; 437 438 /* sub-allocation manager, it has to be protected by another lock. 439 * By conception this is an helper for other part of the driver 440 * like the indirect buffer or semaphore, which both have their 441 * locking. 442 * 443 * Principe is simple, we keep a list of sub allocation in offset 444 * order (first entry has offset == 0, last entry has the highest 445 * offset). 446 * 447 * When allocating new object we first check if there is room at 448 * the end total_size - (last_object_offset + last_object_size) >= 449 * alloc_size. If so we allocate new object there. 450 * 451 * When there is not enough room at the end, we start waiting for 452 * each sub object until we reach object_offset+object_size >= 453 * alloc_size, this object then become the sub object we return. 454 * 455 * Alignment can't be bigger than page size. 456 * 457 * Hole are not considered for allocation to keep things simple. 458 * Assumption is that there won't be hole (all object on same 459 * alignment). 460 */ 461 462 struct amdgpu_sa_manager { 463 struct drm_suballoc_manager base; 464 struct amdgpu_bo *bo; 465 uint64_t gpu_addr; 466 void *cpu_ptr; 467 }; 468 469 int amdgpu_fence_slab_init(void); 470 void amdgpu_fence_slab_fini(void); 471 472 /* 473 * IRQS. 474 */ 475 476 struct amdgpu_flip_work { 477 struct delayed_work flip_work; 478 struct work_struct unpin_work; 479 struct amdgpu_device *adev; 480 int crtc_id; 481 u32 target_vblank; 482 uint64_t base; 483 struct drm_pending_vblank_event *event; 484 struct amdgpu_bo *old_abo; 485 unsigned shared_count; 486 struct dma_fence **shared; 487 struct dma_fence_cb cb; 488 bool async; 489 }; 490 491 492 /* 493 * file private structure 494 */ 495 496 struct amdgpu_fpriv { 497 struct amdgpu_vm vm; 498 struct amdgpu_bo_va *prt_va; 499 struct amdgpu_bo_va *csa_va; 500 struct amdgpu_bo_va *seq64_va; 501 struct mutex bo_list_lock; 502 struct idr bo_list_handles; 503 struct amdgpu_ctx_mgr ctx_mgr; 504 /** GPU partition selection */ 505 uint32_t xcp_id; 506 }; 507 508 int amdgpu_file_to_fpriv(struct file *filp, struct amdgpu_fpriv **fpriv); 509 510 /* 511 * Writeback 512 */ 513 #define AMDGPU_MAX_WB 1024 /* Reserve at most 1024 WB slots for amdgpu-owned rings. */ 514 515 struct amdgpu_wb { 516 struct amdgpu_bo *wb_obj; 517 volatile uint32_t *wb; 518 uint64_t gpu_addr; 519 u32 num_wb; /* Number of wb slots actually reserved for amdgpu. */ 520 unsigned long used[DIV_ROUND_UP(AMDGPU_MAX_WB, BITS_PER_LONG)]; 521 spinlock_t lock; 522 }; 523 524 int amdgpu_device_wb_get(struct amdgpu_device *adev, u32 *wb); 525 void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb); 526 527 /* 528 * Benchmarking 529 */ 530 int amdgpu_benchmark(struct amdgpu_device *adev, int test_number); 531 532 /* 533 * ASIC specific register table accessible by UMD 534 */ 535 struct amdgpu_allowed_register_entry { 536 uint32_t reg_offset; 537 bool grbm_indexed; 538 }; 539 540 /** 541 * enum amd_reset_method - Methods for resetting AMD GPU devices 542 * 543 * @AMD_RESET_METHOD_NONE: The device will not be reset. 544 * @AMD_RESET_LEGACY: Method reserved for SI, CIK and VI ASICs. 545 * @AMD_RESET_MODE0: Reset the entire ASIC. Not currently available for the 546 * any device. 547 * @AMD_RESET_MODE1: Resets all IP blocks on the ASIC (SDMA, GFX, VCN, etc.) 548 * individually. Suitable only for some discrete GPU, not 549 * available for all ASICs. 550 * @AMD_RESET_MODE2: Resets a lesser level of IPs compared to MODE1. Which IPs 551 * are reset depends on the ASIC. Notably doesn't reset IPs 552 * shared with the CPU on APUs or the memory controllers (so 553 * VRAM is not lost). Not available on all ASICs. 554 * @AMD_RESET_LINK: Triggers SW-UP link reset on other GPUs 555 * @AMD_RESET_BACO: BACO (Bus Alive, Chip Off) method powers off and on the card 556 * but without powering off the PCI bus. Suitable only for 557 * discrete GPUs. 558 * @AMD_RESET_PCI: Does a full bus reset using core Linux subsystem PCI reset 559 * and does a secondary bus reset or FLR, depending on what the 560 * underlying hardware supports. 561 * 562 * Methods available for AMD GPU driver for resetting the device. Not all 563 * methods are suitable for every device. User can override the method using 564 * module parameter `reset_method`. 565 */ 566 enum amd_reset_method { 567 AMD_RESET_METHOD_NONE = -1, 568 AMD_RESET_METHOD_LEGACY = 0, 569 AMD_RESET_METHOD_MODE0, 570 AMD_RESET_METHOD_MODE1, 571 AMD_RESET_METHOD_MODE2, 572 AMD_RESET_METHOD_LINK, 573 AMD_RESET_METHOD_BACO, 574 AMD_RESET_METHOD_PCI, 575 AMD_RESET_METHOD_ON_INIT, 576 }; 577 578 struct amdgpu_video_codec_info { 579 u32 codec_type; 580 u32 max_width; 581 u32 max_height; 582 u32 max_pixels_per_frame; 583 u32 max_level; 584 }; 585 586 #define codec_info_build(type, width, height, level) \ 587 .codec_type = type,\ 588 .max_width = width,\ 589 .max_height = height,\ 590 .max_pixels_per_frame = height * width,\ 591 .max_level = level, 592 593 struct amdgpu_video_codecs { 594 const u32 codec_count; 595 const struct amdgpu_video_codec_info *codec_array; 596 }; 597 598 /* 599 * ASIC specific functions. 600 */ 601 struct amdgpu_asic_funcs { 602 bool (*read_disabled_bios)(struct amdgpu_device *adev); 603 bool (*read_bios_from_rom)(struct amdgpu_device *adev, 604 u8 *bios, u32 length_bytes); 605 int (*read_register)(struct amdgpu_device *adev, u32 se_num, 606 u32 sh_num, u32 reg_offset, u32 *value); 607 void (*set_vga_state)(struct amdgpu_device *adev, bool state); 608 int (*reset)(struct amdgpu_device *adev); 609 enum amd_reset_method (*reset_method)(struct amdgpu_device *adev); 610 /* get the reference clock */ 611 u32 (*get_xclk)(struct amdgpu_device *adev); 612 /* MM block clocks */ 613 int (*set_uvd_clocks)(struct amdgpu_device *adev, u32 vclk, u32 dclk); 614 int (*set_vce_clocks)(struct amdgpu_device *adev, u32 evclk, u32 ecclk); 615 /* static power management */ 616 int (*get_pcie_lanes)(struct amdgpu_device *adev); 617 void (*set_pcie_lanes)(struct amdgpu_device *adev, int lanes); 618 /* get config memsize register */ 619 u32 (*get_config_memsize)(struct amdgpu_device *adev); 620 /* flush hdp write queue */ 621 void (*flush_hdp)(struct amdgpu_device *adev, struct amdgpu_ring *ring); 622 /* invalidate hdp read cache */ 623 void (*invalidate_hdp)(struct amdgpu_device *adev, 624 struct amdgpu_ring *ring); 625 /* check if the asic needs a full reset of if soft reset will work */ 626 bool (*need_full_reset)(struct amdgpu_device *adev); 627 /* initialize doorbell layout for specific asic*/ 628 void (*init_doorbell_index)(struct amdgpu_device *adev); 629 /* PCIe bandwidth usage */ 630 void (*get_pcie_usage)(struct amdgpu_device *adev, uint64_t *count0, 631 uint64_t *count1); 632 /* do we need to reset the asic at init time (e.g., kexec) */ 633 bool (*need_reset_on_init)(struct amdgpu_device *adev); 634 /* PCIe replay counter */ 635 uint64_t (*get_pcie_replay_count)(struct amdgpu_device *adev); 636 /* device supports BACO */ 637 int (*supports_baco)(struct amdgpu_device *adev); 638 /* pre asic_init quirks */ 639 void (*pre_asic_init)(struct amdgpu_device *adev); 640 /* enter/exit umd stable pstate */ 641 int (*update_umd_stable_pstate)(struct amdgpu_device *adev, bool enter); 642 /* query video codecs */ 643 int (*query_video_codecs)(struct amdgpu_device *adev, bool encode, 644 const struct amdgpu_video_codecs **codecs); 645 /* encode "> 32bits" smn addressing */ 646 u64 (*encode_ext_smn_addressing)(int ext_id); 647 648 ssize_t (*get_reg_state)(struct amdgpu_device *adev, 649 enum amdgpu_reg_state reg_state, void *buf, 650 size_t max_size); 651 }; 652 653 /* 654 * IOCTL. 655 */ 656 int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data, 657 struct drm_file *filp); 658 659 int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); 660 int amdgpu_cs_fence_to_handle_ioctl(struct drm_device *dev, void *data, 661 struct drm_file *filp); 662 int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); 663 int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data, 664 struct drm_file *filp); 665 666 /* VRAM scratch page for HDP bug, default vram page */ 667 struct amdgpu_mem_scratch { 668 struct amdgpu_bo *robj; 669 volatile uint32_t *ptr; 670 u64 gpu_addr; 671 }; 672 673 /* 674 * CGS 675 */ 676 struct cgs_device *amdgpu_cgs_create_device(struct amdgpu_device *adev); 677 void amdgpu_cgs_destroy_device(struct cgs_device *cgs_device); 678 679 /* 680 * Core structure, functions and helpers. 681 */ 682 typedef uint32_t (*amdgpu_rreg_t)(struct amdgpu_device*, uint32_t); 683 typedef void (*amdgpu_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t); 684 685 typedef uint32_t (*amdgpu_rreg_ext_t)(struct amdgpu_device*, uint64_t); 686 typedef void (*amdgpu_wreg_ext_t)(struct amdgpu_device*, uint64_t, uint32_t); 687 688 typedef uint64_t (*amdgpu_rreg64_t)(struct amdgpu_device*, uint32_t); 689 typedef void (*amdgpu_wreg64_t)(struct amdgpu_device*, uint32_t, uint64_t); 690 691 typedef uint64_t (*amdgpu_rreg64_ext_t)(struct amdgpu_device*, uint64_t); 692 typedef void (*amdgpu_wreg64_ext_t)(struct amdgpu_device*, uint64_t, uint64_t); 693 694 typedef uint32_t (*amdgpu_block_rreg_t)(struct amdgpu_device*, uint32_t, uint32_t); 695 typedef void (*amdgpu_block_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t, uint32_t); 696 697 struct amdgpu_mmio_remap { 698 u32 reg_offset; 699 resource_size_t bus_addr; 700 }; 701 702 /* Define the HW IP blocks will be used in driver , add more if necessary */ 703 enum amd_hw_ip_block_type { 704 GC_HWIP = 1, 705 HDP_HWIP, 706 SDMA0_HWIP, 707 SDMA1_HWIP, 708 SDMA2_HWIP, 709 SDMA3_HWIP, 710 SDMA4_HWIP, 711 SDMA5_HWIP, 712 SDMA6_HWIP, 713 SDMA7_HWIP, 714 LSDMA_HWIP, 715 MMHUB_HWIP, 716 ATHUB_HWIP, 717 NBIO_HWIP, 718 MP0_HWIP, 719 MP1_HWIP, 720 UVD_HWIP, 721 VCN_HWIP = UVD_HWIP, 722 JPEG_HWIP = VCN_HWIP, 723 VCN1_HWIP, 724 VCE_HWIP, 725 VPE_HWIP, 726 DF_HWIP, 727 DCE_HWIP, 728 OSSSYS_HWIP, 729 SMUIO_HWIP, 730 PWR_HWIP, 731 NBIF_HWIP, 732 THM_HWIP, 733 CLK_HWIP, 734 UMC_HWIP, 735 RSMU_HWIP, 736 XGMI_HWIP, 737 DCI_HWIP, 738 PCIE_HWIP, 739 ISP_HWIP, 740 MAX_HWIP 741 }; 742 743 #define HWIP_MAX_INSTANCE 44 744 745 #define HW_ID_MAX 300 746 #define IP_VERSION_FULL(mj, mn, rv, var, srev) \ 747 (((mj) << 24) | ((mn) << 16) | ((rv) << 8) | ((var) << 4) | (srev)) 748 #define IP_VERSION(mj, mn, rv) IP_VERSION_FULL(mj, mn, rv, 0, 0) 749 #define IP_VERSION_MAJ(ver) ((ver) >> 24) 750 #define IP_VERSION_MIN(ver) (((ver) >> 16) & 0xFF) 751 #define IP_VERSION_REV(ver) (((ver) >> 8) & 0xFF) 752 #define IP_VERSION_VARIANT(ver) (((ver) >> 4) & 0xF) 753 #define IP_VERSION_SUBREV(ver) ((ver) & 0xF) 754 #define IP_VERSION_MAJ_MIN_REV(ver) ((ver) >> 8) 755 756 struct amdgpu_ip_map_info { 757 /* Map of logical to actual dev instances/mask */ 758 uint32_t dev_inst[MAX_HWIP][HWIP_MAX_INSTANCE]; 759 int8_t (*logical_to_dev_inst)(struct amdgpu_device *adev, 760 enum amd_hw_ip_block_type block, 761 int8_t inst); 762 uint32_t (*logical_to_dev_mask)(struct amdgpu_device *adev, 763 enum amd_hw_ip_block_type block, 764 uint32_t mask); 765 }; 766 767 struct amd_powerplay { 768 void *pp_handle; 769 const struct amd_pm_funcs *pp_funcs; 770 }; 771 772 struct ip_discovery_top; 773 774 /* polaris10 kickers */ 775 #define ASICID_IS_P20(did, rid) (((did == 0x67DF) && \ 776 ((rid == 0xE3) || \ 777 (rid == 0xE4) || \ 778 (rid == 0xE5) || \ 779 (rid == 0xE7) || \ 780 (rid == 0xEF))) || \ 781 ((did == 0x6FDF) && \ 782 ((rid == 0xE7) || \ 783 (rid == 0xEF) || \ 784 (rid == 0xFF)))) 785 786 #define ASICID_IS_P30(did, rid) ((did == 0x67DF) && \ 787 ((rid == 0xE1) || \ 788 (rid == 0xF7))) 789 790 /* polaris11 kickers */ 791 #define ASICID_IS_P21(did, rid) (((did == 0x67EF) && \ 792 ((rid == 0xE0) || \ 793 (rid == 0xE5))) || \ 794 ((did == 0x67FF) && \ 795 ((rid == 0xCF) || \ 796 (rid == 0xEF) || \ 797 (rid == 0xFF)))) 798 799 #define ASICID_IS_P31(did, rid) ((did == 0x67EF) && \ 800 ((rid == 0xE2))) 801 802 /* polaris12 kickers */ 803 #define ASICID_IS_P23(did, rid) (((did == 0x6987) && \ 804 ((rid == 0xC0) || \ 805 (rid == 0xC1) || \ 806 (rid == 0xC3) || \ 807 (rid == 0xC7))) || \ 808 ((did == 0x6981) && \ 809 ((rid == 0x00) || \ 810 (rid == 0x01) || \ 811 (rid == 0x10)))) 812 813 struct amdgpu_mqd_prop { 814 uint64_t mqd_gpu_addr; 815 uint64_t hqd_base_gpu_addr; 816 uint64_t rptr_gpu_addr; 817 uint64_t wptr_gpu_addr; 818 uint32_t queue_size; 819 bool use_doorbell; 820 uint32_t doorbell_index; 821 uint64_t eop_gpu_addr; 822 uint32_t hqd_pipe_priority; 823 uint32_t hqd_queue_priority; 824 bool allow_tunneling; 825 bool hqd_active; 826 }; 827 828 struct amdgpu_mqd { 829 unsigned mqd_size; 830 int (*init_mqd)(struct amdgpu_device *adev, void *mqd, 831 struct amdgpu_mqd_prop *p); 832 }; 833 834 struct amdgpu_pcie_reset_ctx { 835 bool in_link_reset; 836 bool occurs_dpc; 837 bool audio_suspended; 838 }; 839 840 /* 841 * Custom Init levels could be defined for different situations where a full 842 * initialization of all hardware blocks are not expected. Sample cases are 843 * custom init sequences after resume after S0i3/S3, reset on initialization, 844 * partial reset of blocks etc. Presently, this defines only two levels. Levels 845 * are described in corresponding struct definitions - amdgpu_init_default, 846 * amdgpu_init_minimal_xgmi. 847 */ 848 enum amdgpu_init_lvl_id { 849 AMDGPU_INIT_LEVEL_DEFAULT, 850 AMDGPU_INIT_LEVEL_MINIMAL_XGMI, 851 AMDGPU_INIT_LEVEL_RESET_RECOVERY, 852 }; 853 854 struct amdgpu_init_level { 855 enum amdgpu_init_lvl_id level; 856 uint32_t hwini_ip_block_mask; 857 }; 858 859 #define AMDGPU_RESET_MAGIC_NUM 64 860 #define AMDGPU_MAX_DF_PERFMONS 4 861 struct amdgpu_reset_domain; 862 struct amdgpu_fru_info; 863 864 /* 865 * Non-zero (true) if the GPU has VRAM. Zero (false) otherwise. 866 */ 867 #define AMDGPU_HAS_VRAM(_adev) ((_adev)->gmc.real_vram_size) 868 869 struct amdgpu_device { 870 struct device *dev; 871 struct pci_dev *pdev; 872 struct drm_device ddev; 873 874 #ifdef CONFIG_DRM_AMD_ACP 875 struct amdgpu_acp acp; 876 #endif 877 struct amdgpu_hive_info *hive; 878 struct amdgpu_xcp_mgr *xcp_mgr; 879 /* ASIC */ 880 enum amd_asic_type asic_type; 881 uint32_t family; 882 uint32_t rev_id; 883 uint32_t external_rev_id; 884 unsigned long flags; 885 unsigned long apu_flags; 886 int usec_timeout; 887 const struct amdgpu_asic_funcs *asic_funcs; 888 bool shutdown; 889 bool need_swiotlb; 890 bool accel_working; 891 struct notifier_block acpi_nb; 892 struct notifier_block pm_nb; 893 struct amdgpu_i2c_chan *i2c_bus[AMDGPU_MAX_I2C_BUS]; 894 struct debugfs_blob_wrapper debugfs_vbios_blob; 895 struct debugfs_blob_wrapper debugfs_discovery_blob; 896 struct mutex srbm_mutex; 897 /* GRBM index mutex. Protects concurrent access to GRBM index */ 898 struct mutex grbm_idx_mutex; 899 struct dev_pm_domain vga_pm_domain; 900 bool have_disp_power_ref; 901 bool have_atomics_support; 902 903 /* BIOS */ 904 bool is_atom_fw; 905 uint8_t *bios; 906 uint32_t bios_size; 907 uint32_t bios_scratch_reg_offset; 908 uint32_t bios_scratch[AMDGPU_BIOS_NUM_SCRATCH]; 909 910 /* Register/doorbell mmio */ 911 resource_size_t rmmio_base; 912 resource_size_t rmmio_size; 913 void __iomem *rmmio; 914 /* protects concurrent MM_INDEX/DATA based register access */ 915 spinlock_t mmio_idx_lock; 916 struct amdgpu_mmio_remap rmmio_remap; 917 /* protects concurrent SMC based register access */ 918 spinlock_t smc_idx_lock; 919 amdgpu_rreg_t smc_rreg; 920 amdgpu_wreg_t smc_wreg; 921 /* protects concurrent PCIE register access */ 922 spinlock_t pcie_idx_lock; 923 amdgpu_rreg_t pcie_rreg; 924 amdgpu_wreg_t pcie_wreg; 925 amdgpu_rreg_t pciep_rreg; 926 amdgpu_wreg_t pciep_wreg; 927 amdgpu_rreg_ext_t pcie_rreg_ext; 928 amdgpu_wreg_ext_t pcie_wreg_ext; 929 amdgpu_rreg64_t pcie_rreg64; 930 amdgpu_wreg64_t pcie_wreg64; 931 amdgpu_rreg64_ext_t pcie_rreg64_ext; 932 amdgpu_wreg64_ext_t pcie_wreg64_ext; 933 /* protects concurrent UVD register access */ 934 spinlock_t uvd_ctx_idx_lock; 935 amdgpu_rreg_t uvd_ctx_rreg; 936 amdgpu_wreg_t uvd_ctx_wreg; 937 /* protects concurrent DIDT register access */ 938 spinlock_t didt_idx_lock; 939 amdgpu_rreg_t didt_rreg; 940 amdgpu_wreg_t didt_wreg; 941 /* protects concurrent gc_cac register access */ 942 spinlock_t gc_cac_idx_lock; 943 amdgpu_rreg_t gc_cac_rreg; 944 amdgpu_wreg_t gc_cac_wreg; 945 /* protects concurrent se_cac register access */ 946 spinlock_t se_cac_idx_lock; 947 amdgpu_rreg_t se_cac_rreg; 948 amdgpu_wreg_t se_cac_wreg; 949 /* protects concurrent ENDPOINT (audio) register access */ 950 spinlock_t audio_endpt_idx_lock; 951 amdgpu_block_rreg_t audio_endpt_rreg; 952 amdgpu_block_wreg_t audio_endpt_wreg; 953 struct amdgpu_doorbell doorbell; 954 955 /* clock/pll info */ 956 struct amdgpu_clock clock; 957 958 /* MC */ 959 struct amdgpu_gmc gmc; 960 struct amdgpu_gart gart; 961 dma_addr_t dummy_page_addr; 962 struct amdgpu_vm_manager vm_manager; 963 struct amdgpu_vmhub vmhub[AMDGPU_MAX_VMHUBS]; 964 DECLARE_BITMAP(vmhubs_mask, AMDGPU_MAX_VMHUBS); 965 966 /* memory management */ 967 struct amdgpu_mman mman; 968 struct amdgpu_mem_scratch mem_scratch; 969 struct amdgpu_wb wb; 970 atomic64_t num_bytes_moved; 971 atomic64_t num_evictions; 972 atomic64_t num_vram_cpu_page_faults; 973 atomic_t gpu_reset_counter; 974 atomic_t vram_lost_counter; 975 976 /* data for buffer migration throttling */ 977 struct { 978 spinlock_t lock; 979 s64 last_update_us; 980 s64 accum_us; /* accumulated microseconds */ 981 s64 accum_us_vis; /* for visible VRAM */ 982 u32 log2_max_MBps; 983 } mm_stats; 984 985 /* display */ 986 bool enable_virtual_display; 987 struct amdgpu_vkms_output *amdgpu_vkms_output; 988 struct amdgpu_mode_info mode_info; 989 /* For pre-DCE11. DCE11 and later are in "struct amdgpu_device->dm" */ 990 struct delayed_work hotplug_work; 991 struct amdgpu_irq_src crtc_irq; 992 struct amdgpu_irq_src vline0_irq; 993 struct amdgpu_irq_src vupdate_irq; 994 struct amdgpu_irq_src pageflip_irq; 995 struct amdgpu_irq_src hpd_irq; 996 struct amdgpu_irq_src dmub_trace_irq; 997 struct amdgpu_irq_src dmub_outbox_irq; 998 999 /* rings */ 1000 u64 fence_context; 1001 unsigned num_rings; 1002 struct amdgpu_ring *rings[AMDGPU_MAX_RINGS]; 1003 struct dma_fence __rcu *gang_submit; 1004 bool ib_pool_ready; 1005 struct amdgpu_sa_manager ib_pools[AMDGPU_IB_POOL_MAX]; 1006 struct amdgpu_sched gpu_sched[AMDGPU_HW_IP_NUM][AMDGPU_RING_PRIO_MAX]; 1007 1008 /* interrupts */ 1009 struct amdgpu_irq irq; 1010 1011 /* powerplay */ 1012 struct amd_powerplay powerplay; 1013 struct amdgpu_pm pm; 1014 u64 cg_flags; 1015 u32 pg_flags; 1016 1017 /* nbio */ 1018 struct amdgpu_nbio nbio; 1019 1020 /* hdp */ 1021 struct amdgpu_hdp hdp; 1022 1023 /* smuio */ 1024 struct amdgpu_smuio smuio; 1025 1026 /* mmhub */ 1027 struct amdgpu_mmhub mmhub; 1028 1029 /* gfxhub */ 1030 struct amdgpu_gfxhub gfxhub; 1031 1032 /* gfx */ 1033 struct amdgpu_gfx gfx; 1034 1035 /* sdma */ 1036 struct amdgpu_sdma sdma; 1037 1038 /* lsdma */ 1039 struct amdgpu_lsdma lsdma; 1040 1041 /* uvd */ 1042 struct amdgpu_uvd uvd; 1043 1044 /* vce */ 1045 struct amdgpu_vce vce; 1046 1047 /* vcn */ 1048 struct amdgpu_vcn vcn; 1049 1050 /* jpeg */ 1051 struct amdgpu_jpeg jpeg; 1052 1053 /* vpe */ 1054 struct amdgpu_vpe vpe; 1055 1056 /* umsch */ 1057 struct amdgpu_umsch_mm umsch_mm; 1058 bool enable_umsch_mm; 1059 1060 /* firmwares */ 1061 struct amdgpu_firmware firmware; 1062 1063 /* PSP */ 1064 struct psp_context psp; 1065 1066 /* GDS */ 1067 struct amdgpu_gds gds; 1068 1069 /* for userq and VM fences */ 1070 struct amdgpu_seq64 seq64; 1071 1072 /* KFD */ 1073 struct amdgpu_kfd_dev kfd; 1074 1075 /* UMC */ 1076 struct amdgpu_umc umc; 1077 1078 /* display related functionality */ 1079 struct amdgpu_display_manager dm; 1080 1081 #if defined(CONFIG_DRM_AMD_ISP) 1082 /* isp */ 1083 struct amdgpu_isp isp; 1084 #endif 1085 1086 /* mes */ 1087 bool enable_mes; 1088 bool enable_mes_kiq; 1089 bool enable_uni_mes; 1090 struct amdgpu_mes mes; 1091 struct amdgpu_mqd mqds[AMDGPU_HW_IP_NUM]; 1092 1093 /* df */ 1094 struct amdgpu_df df; 1095 1096 /* MCA */ 1097 struct amdgpu_mca mca; 1098 1099 /* ACA */ 1100 struct amdgpu_aca aca; 1101 1102 /* CPER */ 1103 struct amdgpu_cper cper; 1104 1105 struct amdgpu_ip_block ip_blocks[AMDGPU_MAX_IP_NUM]; 1106 uint32_t harvest_ip_mask; 1107 int num_ip_blocks; 1108 struct mutex mn_lock; 1109 DECLARE_HASHTABLE(mn_hash, 7); 1110 1111 /* tracking pinned memory */ 1112 atomic64_t vram_pin_size; 1113 atomic64_t visible_pin_size; 1114 atomic64_t gart_pin_size; 1115 1116 /* soc15 register offset based on ip, instance and segment */ 1117 uint32_t *reg_offset[MAX_HWIP][HWIP_MAX_INSTANCE]; 1118 struct amdgpu_ip_map_info ip_map; 1119 1120 /* delayed work_func for deferring clockgating during resume */ 1121 struct delayed_work delayed_init_work; 1122 1123 struct amdgpu_virt virt; 1124 1125 /* record hw reset is performed */ 1126 bool has_hw_reset; 1127 u8 reset_magic[AMDGPU_RESET_MAGIC_NUM]; 1128 1129 /* s3/s4 mask */ 1130 bool in_suspend; 1131 bool in_s3; 1132 bool in_s4; 1133 bool in_s0ix; 1134 1135 enum pp_mp1_state mp1_state; 1136 struct amdgpu_doorbell_index doorbell_index; 1137 1138 struct mutex notifier_lock; 1139 1140 int asic_reset_res; 1141 struct work_struct xgmi_reset_work; 1142 struct list_head reset_list; 1143 1144 long gfx_timeout; 1145 long sdma_timeout; 1146 long video_timeout; 1147 long compute_timeout; 1148 long psp_timeout; 1149 1150 uint64_t unique_id; 1151 uint64_t df_perfmon_config_assign_mask[AMDGPU_MAX_DF_PERFMONS]; 1152 1153 /* enable runtime pm on the device */ 1154 bool in_runpm; 1155 bool has_pr3; 1156 1157 bool ucode_sysfs_en; 1158 1159 struct amdgpu_fru_info *fru_info; 1160 atomic_t throttling_logging_enabled; 1161 struct ratelimit_state throttling_logging_rs; 1162 uint32_t ras_hw_enabled; 1163 uint32_t ras_enabled; 1164 bool ras_default_ecc_enabled; 1165 1166 bool no_hw_access; 1167 struct pci_saved_state *pci_state; 1168 pci_channel_state_t pci_channel_state; 1169 1170 struct amdgpu_pcie_reset_ctx pcie_reset_ctx; 1171 1172 /* Track auto wait count on s_barrier settings */ 1173 bool barrier_has_auto_waitcnt; 1174 1175 struct amdgpu_reset_control *reset_cntl; 1176 uint32_t ip_versions[MAX_HWIP][HWIP_MAX_INSTANCE]; 1177 1178 bool ram_is_direct_mapped; 1179 1180 struct list_head ras_list; 1181 1182 struct ip_discovery_top *ip_top; 1183 1184 struct amdgpu_reset_domain *reset_domain; 1185 1186 struct mutex benchmark_mutex; 1187 1188 bool scpm_enabled; 1189 uint32_t scpm_status; 1190 1191 struct work_struct reset_work; 1192 1193 bool dc_enabled; 1194 /* Mask of active clusters */ 1195 uint32_t aid_mask; 1196 1197 /* Debug */ 1198 bool debug_vm; 1199 bool debug_largebar; 1200 bool debug_disable_soft_recovery; 1201 bool debug_use_vram_fw_buf; 1202 bool debug_enable_ras_aca; 1203 bool debug_exp_resets; 1204 bool debug_disable_gpu_ring_reset; 1205 1206 /* Protection for the following isolation structure */ 1207 struct mutex enforce_isolation_mutex; 1208 bool enforce_isolation[MAX_XCP]; 1209 struct amdgpu_isolation { 1210 void *owner; 1211 struct dma_fence *spearhead; 1212 struct amdgpu_sync active; 1213 struct amdgpu_sync prev; 1214 } isolation[MAX_XCP]; 1215 1216 struct amdgpu_init_level *init_lvl; 1217 1218 /* This flag is used to determine how VRAM allocations are handled for APUs 1219 * in KFD: VRAM or GTT. 1220 */ 1221 bool apu_prefer_gtt; 1222 }; 1223 1224 static inline uint32_t amdgpu_ip_version(const struct amdgpu_device *adev, 1225 uint8_t ip, uint8_t inst) 1226 { 1227 /* This considers only major/minor/rev and ignores 1228 * subrevision/variant fields. 1229 */ 1230 return adev->ip_versions[ip][inst] & ~0xFFU; 1231 } 1232 1233 static inline uint32_t amdgpu_ip_version_full(const struct amdgpu_device *adev, 1234 uint8_t ip, uint8_t inst) 1235 { 1236 /* This returns full version - major/minor/rev/variant/subrevision */ 1237 return adev->ip_versions[ip][inst]; 1238 } 1239 1240 static inline struct amdgpu_device *drm_to_adev(struct drm_device *ddev) 1241 { 1242 return container_of(ddev, struct amdgpu_device, ddev); 1243 } 1244 1245 static inline struct drm_device *adev_to_drm(struct amdgpu_device *adev) 1246 { 1247 return &adev->ddev; 1248 } 1249 1250 static inline struct amdgpu_device *amdgpu_ttm_adev(struct ttm_device *bdev) 1251 { 1252 return container_of(bdev, struct amdgpu_device, mman.bdev); 1253 } 1254 1255 int amdgpu_device_init(struct amdgpu_device *adev, 1256 uint32_t flags); 1257 void amdgpu_device_fini_hw(struct amdgpu_device *adev); 1258 void amdgpu_device_fini_sw(struct amdgpu_device *adev); 1259 1260 int amdgpu_gpu_wait_for_idle(struct amdgpu_device *adev); 1261 1262 void amdgpu_device_mm_access(struct amdgpu_device *adev, loff_t pos, 1263 void *buf, size_t size, bool write); 1264 size_t amdgpu_device_aper_access(struct amdgpu_device *adev, loff_t pos, 1265 void *buf, size_t size, bool write); 1266 1267 void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos, 1268 void *buf, size_t size, bool write); 1269 uint32_t amdgpu_device_wait_on_rreg(struct amdgpu_device *adev, 1270 uint32_t inst, uint32_t reg_addr, char reg_name[], 1271 uint32_t expected_value, uint32_t mask); 1272 uint32_t amdgpu_device_rreg(struct amdgpu_device *adev, 1273 uint32_t reg, uint32_t acc_flags); 1274 u32 amdgpu_device_indirect_rreg_ext(struct amdgpu_device *adev, 1275 u64 reg_addr); 1276 uint32_t amdgpu_device_xcc_rreg(struct amdgpu_device *adev, 1277 uint32_t reg, uint32_t acc_flags, 1278 uint32_t xcc_id); 1279 void amdgpu_device_wreg(struct amdgpu_device *adev, 1280 uint32_t reg, uint32_t v, 1281 uint32_t acc_flags); 1282 void amdgpu_device_indirect_wreg_ext(struct amdgpu_device *adev, 1283 u64 reg_addr, u32 reg_data); 1284 void amdgpu_device_xcc_wreg(struct amdgpu_device *adev, 1285 uint32_t reg, uint32_t v, 1286 uint32_t acc_flags, 1287 uint32_t xcc_id); 1288 void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev, 1289 uint32_t reg, uint32_t v, uint32_t xcc_id); 1290 void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value); 1291 uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset); 1292 1293 u32 amdgpu_device_indirect_rreg(struct amdgpu_device *adev, 1294 u32 reg_addr); 1295 u64 amdgpu_device_indirect_rreg64(struct amdgpu_device *adev, 1296 u32 reg_addr); 1297 u64 amdgpu_device_indirect_rreg64_ext(struct amdgpu_device *adev, 1298 u64 reg_addr); 1299 void amdgpu_device_indirect_wreg(struct amdgpu_device *adev, 1300 u32 reg_addr, u32 reg_data); 1301 void amdgpu_device_indirect_wreg64(struct amdgpu_device *adev, 1302 u32 reg_addr, u64 reg_data); 1303 void amdgpu_device_indirect_wreg64_ext(struct amdgpu_device *adev, 1304 u64 reg_addr, u64 reg_data); 1305 u32 amdgpu_device_get_rev_id(struct amdgpu_device *adev); 1306 bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type); 1307 bool amdgpu_device_has_dc_support(struct amdgpu_device *adev); 1308 1309 void amdgpu_device_set_sriov_virtual_display(struct amdgpu_device *adev); 1310 1311 int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev, 1312 struct amdgpu_reset_context *reset_context); 1313 1314 int amdgpu_do_asic_reset(struct list_head *device_list_handle, 1315 struct amdgpu_reset_context *reset_context); 1316 1317 int amdgpu_device_reinit_after_reset(struct amdgpu_reset_context *reset_context); 1318 1319 int emu_soc_asic_init(struct amdgpu_device *adev); 1320 1321 /* 1322 * Registers read & write functions. 1323 */ 1324 #define AMDGPU_REGS_NO_KIQ (1<<1) 1325 #define AMDGPU_REGS_RLC (1<<2) 1326 1327 #define RREG32_NO_KIQ(reg) amdgpu_device_rreg(adev, (reg), AMDGPU_REGS_NO_KIQ) 1328 #define WREG32_NO_KIQ(reg, v) amdgpu_device_wreg(adev, (reg), (v), AMDGPU_REGS_NO_KIQ) 1329 1330 #define RREG32_KIQ(reg) amdgpu_kiq_rreg(adev, (reg), 0) 1331 #define WREG32_KIQ(reg, v) amdgpu_kiq_wreg(adev, (reg), (v), 0) 1332 1333 #define RREG8(reg) amdgpu_mm_rreg8(adev, (reg)) 1334 #define WREG8(reg, v) amdgpu_mm_wreg8(adev, (reg), (v)) 1335 1336 #define RREG32(reg) amdgpu_device_rreg(adev, (reg), 0) 1337 #define DREG32(reg) printk(KERN_INFO "REGISTER: " #reg " : 0x%08X\n", amdgpu_device_rreg(adev, (reg), 0)) 1338 #define WREG32(reg, v) amdgpu_device_wreg(adev, (reg), (v), 0) 1339 #define REG_SET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK) 1340 #define REG_GET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK) 1341 #define RREG32_XCC(reg, inst) amdgpu_device_xcc_rreg(adev, (reg), 0, inst) 1342 #define WREG32_XCC(reg, v, inst) amdgpu_device_xcc_wreg(adev, (reg), (v), 0, inst) 1343 #define RREG32_PCIE(reg) adev->pcie_rreg(adev, (reg)) 1344 #define WREG32_PCIE(reg, v) adev->pcie_wreg(adev, (reg), (v)) 1345 #define RREG32_PCIE_PORT(reg) adev->pciep_rreg(adev, (reg)) 1346 #define WREG32_PCIE_PORT(reg, v) adev->pciep_wreg(adev, (reg), (v)) 1347 #define RREG32_PCIE_EXT(reg) adev->pcie_rreg_ext(adev, (reg)) 1348 #define WREG32_PCIE_EXT(reg, v) adev->pcie_wreg_ext(adev, (reg), (v)) 1349 #define RREG64_PCIE(reg) adev->pcie_rreg64(adev, (reg)) 1350 #define WREG64_PCIE(reg, v) adev->pcie_wreg64(adev, (reg), (v)) 1351 #define RREG64_PCIE_EXT(reg) adev->pcie_rreg64_ext(adev, (reg)) 1352 #define WREG64_PCIE_EXT(reg, v) adev->pcie_wreg64_ext(adev, (reg), (v)) 1353 #define RREG32_SMC(reg) adev->smc_rreg(adev, (reg)) 1354 #define WREG32_SMC(reg, v) adev->smc_wreg(adev, (reg), (v)) 1355 #define RREG32_UVD_CTX(reg) adev->uvd_ctx_rreg(adev, (reg)) 1356 #define WREG32_UVD_CTX(reg, v) adev->uvd_ctx_wreg(adev, (reg), (v)) 1357 #define RREG32_DIDT(reg) adev->didt_rreg(adev, (reg)) 1358 #define WREG32_DIDT(reg, v) adev->didt_wreg(adev, (reg), (v)) 1359 #define RREG32_GC_CAC(reg) adev->gc_cac_rreg(adev, (reg)) 1360 #define WREG32_GC_CAC(reg, v) adev->gc_cac_wreg(adev, (reg), (v)) 1361 #define RREG32_SE_CAC(reg) adev->se_cac_rreg(adev, (reg)) 1362 #define WREG32_SE_CAC(reg, v) adev->se_cac_wreg(adev, (reg), (v)) 1363 #define RREG32_AUDIO_ENDPT(block, reg) adev->audio_endpt_rreg(adev, (block), (reg)) 1364 #define WREG32_AUDIO_ENDPT(block, reg, v) adev->audio_endpt_wreg(adev, (block), (reg), (v)) 1365 #define WREG32_P(reg, val, mask) \ 1366 do { \ 1367 uint32_t tmp_ = RREG32(reg); \ 1368 tmp_ &= (mask); \ 1369 tmp_ |= ((val) & ~(mask)); \ 1370 WREG32(reg, tmp_); \ 1371 } while (0) 1372 #define WREG32_AND(reg, and) WREG32_P(reg, 0, and) 1373 #define WREG32_OR(reg, or) WREG32_P(reg, or, ~(or)) 1374 #define WREG32_PLL_P(reg, val, mask) \ 1375 do { \ 1376 uint32_t tmp_ = RREG32_PLL(reg); \ 1377 tmp_ &= (mask); \ 1378 tmp_ |= ((val) & ~(mask)); \ 1379 WREG32_PLL(reg, tmp_); \ 1380 } while (0) 1381 1382 #define WREG32_SMC_P(_Reg, _Val, _Mask) \ 1383 do { \ 1384 u32 tmp = RREG32_SMC(_Reg); \ 1385 tmp &= (_Mask); \ 1386 tmp |= ((_Val) & ~(_Mask)); \ 1387 WREG32_SMC(_Reg, tmp); \ 1388 } while (0) 1389 1390 #define DREG32_SYS(sqf, adev, reg) seq_printf((sqf), #reg " : 0x%08X\n", amdgpu_device_rreg((adev), (reg), false)) 1391 1392 #define REG_FIELD_SHIFT(reg, field) reg##__##field##__SHIFT 1393 #define REG_FIELD_MASK(reg, field) reg##__##field##_MASK 1394 1395 #define REG_SET_FIELD(orig_val, reg, field, field_val) \ 1396 (((orig_val) & ~REG_FIELD_MASK(reg, field)) | \ 1397 (REG_FIELD_MASK(reg, field) & ((field_val) << REG_FIELD_SHIFT(reg, field)))) 1398 1399 #define REG_GET_FIELD(value, reg, field) \ 1400 (((value) & REG_FIELD_MASK(reg, field)) >> REG_FIELD_SHIFT(reg, field)) 1401 1402 #define WREG32_FIELD(reg, field, val) \ 1403 WREG32(mm##reg, (RREG32(mm##reg) & ~REG_FIELD_MASK(reg, field)) | (val) << REG_FIELD_SHIFT(reg, field)) 1404 1405 #define WREG32_FIELD_OFFSET(reg, offset, field, val) \ 1406 WREG32(mm##reg + offset, (RREG32(mm##reg + offset) & ~REG_FIELD_MASK(reg, field)) | (val) << REG_FIELD_SHIFT(reg, field)) 1407 1408 #define AMDGPU_GET_REG_FIELD(x, h, l) (((x) & GENMASK_ULL(h, l)) >> (l)) 1409 /* 1410 * BIOS helpers. 1411 */ 1412 #define RBIOS8(i) (adev->bios[i]) 1413 #define RBIOS16(i) (RBIOS8(i) | (RBIOS8((i)+1) << 8)) 1414 #define RBIOS32(i) ((RBIOS16(i)) | (RBIOS16((i)+2) << 16)) 1415 1416 /* 1417 * ASICs macro. 1418 */ 1419 #define amdgpu_asic_set_vga_state(adev, state) \ 1420 ((adev)->asic_funcs->set_vga_state ? (adev)->asic_funcs->set_vga_state((adev), (state)) : 0) 1421 #define amdgpu_asic_reset(adev) (adev)->asic_funcs->reset((adev)) 1422 #define amdgpu_asic_reset_method(adev) (adev)->asic_funcs->reset_method((adev)) 1423 #define amdgpu_asic_get_xclk(adev) (adev)->asic_funcs->get_xclk((adev)) 1424 #define amdgpu_asic_set_uvd_clocks(adev, v, d) (adev)->asic_funcs->set_uvd_clocks((adev), (v), (d)) 1425 #define amdgpu_asic_set_vce_clocks(adev, ev, ec) (adev)->asic_funcs->set_vce_clocks((adev), (ev), (ec)) 1426 #define amdgpu_get_pcie_lanes(adev) (adev)->asic_funcs->get_pcie_lanes((adev)) 1427 #define amdgpu_set_pcie_lanes(adev, l) (adev)->asic_funcs->set_pcie_lanes((adev), (l)) 1428 #define amdgpu_asic_get_gpu_clock_counter(adev) (adev)->asic_funcs->get_gpu_clock_counter((adev)) 1429 #define amdgpu_asic_read_disabled_bios(adev) (adev)->asic_funcs->read_disabled_bios((adev)) 1430 #define amdgpu_asic_read_bios_from_rom(adev, b, l) (adev)->asic_funcs->read_bios_from_rom((adev), (b), (l)) 1431 #define amdgpu_asic_read_register(adev, se, sh, offset, v)((adev)->asic_funcs->read_register((adev), (se), (sh), (offset), (v))) 1432 #define amdgpu_asic_get_config_memsize(adev) (adev)->asic_funcs->get_config_memsize((adev)) 1433 #define amdgpu_asic_flush_hdp(adev, r) \ 1434 ((adev)->asic_funcs->flush_hdp ? (adev)->asic_funcs->flush_hdp((adev), (r)) : (adev)->hdp.funcs->flush_hdp((adev), (r))) 1435 #define amdgpu_asic_invalidate_hdp(adev, r) \ 1436 ((adev)->asic_funcs->invalidate_hdp ? (adev)->asic_funcs->invalidate_hdp((adev), (r)) : \ 1437 ((adev)->hdp.funcs->invalidate_hdp ? (adev)->hdp.funcs->invalidate_hdp((adev), (r)) : (void)0)) 1438 #define amdgpu_asic_need_full_reset(adev) (adev)->asic_funcs->need_full_reset((adev)) 1439 #define amdgpu_asic_init_doorbell_index(adev) (adev)->asic_funcs->init_doorbell_index((adev)) 1440 #define amdgpu_asic_get_pcie_usage(adev, cnt0, cnt1) ((adev)->asic_funcs->get_pcie_usage((adev), (cnt0), (cnt1))) 1441 #define amdgpu_asic_need_reset_on_init(adev) (adev)->asic_funcs->need_reset_on_init((adev)) 1442 #define amdgpu_asic_get_pcie_replay_count(adev) ((adev)->asic_funcs->get_pcie_replay_count((adev))) 1443 #define amdgpu_asic_supports_baco(adev) (adev)->asic_funcs->supports_baco((adev)) 1444 #define amdgpu_asic_pre_asic_init(adev) (adev)->asic_funcs->pre_asic_init((adev)) 1445 #define amdgpu_asic_update_umd_stable_pstate(adev, enter) \ 1446 ((adev)->asic_funcs->update_umd_stable_pstate ? (adev)->asic_funcs->update_umd_stable_pstate((adev), (enter)) : 0) 1447 #define amdgpu_asic_query_video_codecs(adev, e, c) (adev)->asic_funcs->query_video_codecs((adev), (e), (c)) 1448 1449 #define amdgpu_inc_vram_lost(adev) atomic_inc(&((adev)->vram_lost_counter)) 1450 1451 #define BIT_MASK_UPPER(i) ((i) >= BITS_PER_LONG ? 0 : ~0UL << (i)) 1452 #define for_each_inst(i, inst_mask) \ 1453 for (i = ffs(inst_mask); i-- != 0; \ 1454 i = ffs(inst_mask & BIT_MASK_UPPER(i + 1))) 1455 1456 /* Common functions */ 1457 bool amdgpu_device_has_job_running(struct amdgpu_device *adev); 1458 bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev); 1459 int amdgpu_device_gpu_recover(struct amdgpu_device *adev, 1460 struct amdgpu_job *job, 1461 struct amdgpu_reset_context *reset_context); 1462 void amdgpu_device_pci_config_reset(struct amdgpu_device *adev); 1463 int amdgpu_device_pci_reset(struct amdgpu_device *adev); 1464 bool amdgpu_device_need_post(struct amdgpu_device *adev); 1465 bool amdgpu_device_seamless_boot_supported(struct amdgpu_device *adev); 1466 bool amdgpu_device_should_use_aspm(struct amdgpu_device *adev); 1467 1468 void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes, 1469 u64 num_vis_bytes); 1470 int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev); 1471 void amdgpu_device_program_register_sequence(struct amdgpu_device *adev, 1472 const u32 *registers, 1473 const u32 array_size); 1474 1475 int amdgpu_device_mode1_reset(struct amdgpu_device *adev); 1476 int amdgpu_device_link_reset(struct amdgpu_device *adev); 1477 bool amdgpu_device_supports_atpx(struct drm_device *dev); 1478 bool amdgpu_device_supports_px(struct drm_device *dev); 1479 bool amdgpu_device_supports_boco(struct drm_device *dev); 1480 bool amdgpu_device_supports_smart_shift(struct drm_device *dev); 1481 int amdgpu_device_supports_baco(struct drm_device *dev); 1482 void amdgpu_device_detect_runtime_pm_mode(struct amdgpu_device *adev); 1483 bool amdgpu_device_is_peer_accessible(struct amdgpu_device *adev, 1484 struct amdgpu_device *peer_adev); 1485 int amdgpu_device_baco_enter(struct drm_device *dev); 1486 int amdgpu_device_baco_exit(struct drm_device *dev); 1487 1488 void amdgpu_device_flush_hdp(struct amdgpu_device *adev, 1489 struct amdgpu_ring *ring); 1490 void amdgpu_device_invalidate_hdp(struct amdgpu_device *adev, 1491 struct amdgpu_ring *ring); 1492 1493 void amdgpu_device_halt(struct amdgpu_device *adev); 1494 u32 amdgpu_device_pcie_port_rreg(struct amdgpu_device *adev, 1495 u32 reg); 1496 void amdgpu_device_pcie_port_wreg(struct amdgpu_device *adev, 1497 u32 reg, u32 v); 1498 struct dma_fence *amdgpu_device_get_gang(struct amdgpu_device *adev); 1499 struct dma_fence *amdgpu_device_switch_gang(struct amdgpu_device *adev, 1500 struct dma_fence *gang); 1501 struct dma_fence *amdgpu_device_enforce_isolation(struct amdgpu_device *adev, 1502 struct amdgpu_ring *ring, 1503 struct amdgpu_job *job); 1504 bool amdgpu_device_has_display_hardware(struct amdgpu_device *adev); 1505 ssize_t amdgpu_get_soft_full_reset_mask(struct amdgpu_ring *ring); 1506 ssize_t amdgpu_show_reset_mask(char *buf, uint32_t supported_reset); 1507 1508 /* atpx handler */ 1509 #if defined(CONFIG_VGA_SWITCHEROO) 1510 void amdgpu_register_atpx_handler(void); 1511 void amdgpu_unregister_atpx_handler(void); 1512 bool amdgpu_has_atpx_dgpu_power_cntl(void); 1513 bool amdgpu_is_atpx_hybrid(void); 1514 bool amdgpu_has_atpx(void); 1515 #else 1516 static inline void amdgpu_register_atpx_handler(void) {} 1517 static inline void amdgpu_unregister_atpx_handler(void) {} 1518 static inline bool amdgpu_has_atpx_dgpu_power_cntl(void) { return false; } 1519 static inline bool amdgpu_is_atpx_hybrid(void) { return false; } 1520 static inline bool amdgpu_has_atpx(void) { return false; } 1521 #endif 1522 1523 /* 1524 * KMS 1525 */ 1526 extern const struct drm_ioctl_desc amdgpu_ioctls_kms[]; 1527 extern const int amdgpu_max_kms_ioctl; 1528 1529 int amdgpu_driver_load_kms(struct amdgpu_device *adev, unsigned long flags); 1530 void amdgpu_driver_unload_kms(struct drm_device *dev); 1531 int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv); 1532 void amdgpu_driver_postclose_kms(struct drm_device *dev, 1533 struct drm_file *file_priv); 1534 void amdgpu_driver_release_kms(struct drm_device *dev); 1535 1536 int amdgpu_device_ip_suspend(struct amdgpu_device *adev); 1537 int amdgpu_device_prepare(struct drm_device *dev); 1538 int amdgpu_device_suspend(struct drm_device *dev, bool fbcon); 1539 int amdgpu_device_resume(struct drm_device *dev, bool fbcon); 1540 u32 amdgpu_get_vblank_counter_kms(struct drm_crtc *crtc); 1541 int amdgpu_enable_vblank_kms(struct drm_crtc *crtc); 1542 void amdgpu_disable_vblank_kms(struct drm_crtc *crtc); 1543 int amdgpu_info_ioctl(struct drm_device *dev, void *data, 1544 struct drm_file *filp); 1545 1546 /* 1547 * functions used by amdgpu_encoder.c 1548 */ 1549 struct amdgpu_afmt_acr { 1550 u32 clock; 1551 1552 int n_32khz; 1553 int cts_32khz; 1554 1555 int n_44_1khz; 1556 int cts_44_1khz; 1557 1558 int n_48khz; 1559 int cts_48khz; 1560 1561 }; 1562 1563 struct amdgpu_afmt_acr amdgpu_afmt_acr(uint32_t clock); 1564 1565 /* amdgpu_acpi.c */ 1566 1567 struct amdgpu_numa_info { 1568 uint64_t size; 1569 int pxm; 1570 int nid; 1571 }; 1572 1573 /* ATCS Device/Driver State */ 1574 #define AMDGPU_ATCS_PSC_DEV_STATE_D0 0 1575 #define AMDGPU_ATCS_PSC_DEV_STATE_D3_HOT 3 1576 #define AMDGPU_ATCS_PSC_DRV_STATE_OPR 0 1577 #define AMDGPU_ATCS_PSC_DRV_STATE_NOT_OPR 1 1578 1579 #if defined(CONFIG_ACPI) 1580 int amdgpu_acpi_init(struct amdgpu_device *adev); 1581 void amdgpu_acpi_fini(struct amdgpu_device *adev); 1582 bool amdgpu_acpi_is_pcie_performance_request_supported(struct amdgpu_device *adev); 1583 bool amdgpu_acpi_is_power_shift_control_supported(void); 1584 int amdgpu_acpi_pcie_performance_request(struct amdgpu_device *adev, 1585 u8 perf_req, bool advertise); 1586 int amdgpu_acpi_power_shift_control(struct amdgpu_device *adev, 1587 u8 dev_state, bool drv_state); 1588 int amdgpu_acpi_smart_shift_update(struct drm_device *dev, enum amdgpu_ss ss_state); 1589 int amdgpu_acpi_pcie_notify_device_ready(struct amdgpu_device *adev); 1590 int amdgpu_acpi_get_tmr_info(struct amdgpu_device *adev, u64 *tmr_offset, 1591 u64 *tmr_size); 1592 int amdgpu_acpi_get_mem_info(struct amdgpu_device *adev, int xcc_id, 1593 struct amdgpu_numa_info *numa_info); 1594 1595 void amdgpu_acpi_get_backlight_caps(struct amdgpu_dm_backlight_caps *caps); 1596 bool amdgpu_acpi_should_gpu_reset(struct amdgpu_device *adev); 1597 void amdgpu_acpi_detect(void); 1598 void amdgpu_acpi_release(void); 1599 #else 1600 static inline int amdgpu_acpi_init(struct amdgpu_device *adev) { return 0; } 1601 static inline int amdgpu_acpi_get_tmr_info(struct amdgpu_device *adev, 1602 u64 *tmr_offset, u64 *tmr_size) 1603 { 1604 return -EINVAL; 1605 } 1606 static inline int amdgpu_acpi_get_mem_info(struct amdgpu_device *adev, 1607 int xcc_id, 1608 struct amdgpu_numa_info *numa_info) 1609 { 1610 return -EINVAL; 1611 } 1612 static inline void amdgpu_acpi_fini(struct amdgpu_device *adev) { } 1613 static inline bool amdgpu_acpi_should_gpu_reset(struct amdgpu_device *adev) { return false; } 1614 static inline void amdgpu_acpi_detect(void) { } 1615 static inline void amdgpu_acpi_release(void) { } 1616 static inline bool amdgpu_acpi_is_power_shift_control_supported(void) { return false; } 1617 static inline int amdgpu_acpi_power_shift_control(struct amdgpu_device *adev, 1618 u8 dev_state, bool drv_state) { return 0; } 1619 static inline int amdgpu_acpi_smart_shift_update(struct drm_device *dev, 1620 enum amdgpu_ss ss_state) { return 0; } 1621 static inline void amdgpu_acpi_get_backlight_caps(struct amdgpu_dm_backlight_caps *caps) { } 1622 #endif 1623 1624 #if defined(CONFIG_ACPI) && defined(CONFIG_SUSPEND) 1625 bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev); 1626 bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev); 1627 void amdgpu_choose_low_power_state(struct amdgpu_device *adev); 1628 #else 1629 static inline bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev) { return false; } 1630 static inline bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev) { return false; } 1631 static inline void amdgpu_choose_low_power_state(struct amdgpu_device *adev) { } 1632 #endif 1633 1634 void amdgpu_register_gpu_instance(struct amdgpu_device *adev); 1635 void amdgpu_unregister_gpu_instance(struct amdgpu_device *adev); 1636 1637 pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev, 1638 pci_channel_state_t state); 1639 pci_ers_result_t amdgpu_pci_mmio_enabled(struct pci_dev *pdev); 1640 pci_ers_result_t amdgpu_pci_slot_reset(struct pci_dev *pdev); 1641 void amdgpu_pci_resume(struct pci_dev *pdev); 1642 1643 bool amdgpu_device_cache_pci_state(struct pci_dev *pdev); 1644 bool amdgpu_device_load_pci_state(struct pci_dev *pdev); 1645 1646 bool amdgpu_device_skip_hw_access(struct amdgpu_device *adev); 1647 1648 int amdgpu_device_set_cg_state(struct amdgpu_device *adev, 1649 enum amd_clockgating_state state); 1650 int amdgpu_device_set_pg_state(struct amdgpu_device *adev, 1651 enum amd_powergating_state state); 1652 1653 static inline bool amdgpu_device_has_timeouts_enabled(struct amdgpu_device *adev) 1654 { 1655 return amdgpu_gpu_recovery != 0 && 1656 adev->gfx_timeout != MAX_SCHEDULE_TIMEOUT && 1657 adev->compute_timeout != MAX_SCHEDULE_TIMEOUT && 1658 adev->sdma_timeout != MAX_SCHEDULE_TIMEOUT && 1659 adev->video_timeout != MAX_SCHEDULE_TIMEOUT; 1660 } 1661 1662 #include "amdgpu_object.h" 1663 1664 static inline bool amdgpu_is_tmz(struct amdgpu_device *adev) 1665 { 1666 return adev->gmc.tmz_enabled; 1667 } 1668 1669 int amdgpu_in_reset(struct amdgpu_device *adev); 1670 1671 extern const struct attribute_group amdgpu_vram_mgr_attr_group; 1672 extern const struct attribute_group amdgpu_gtt_mgr_attr_group; 1673 extern const struct attribute_group amdgpu_flash_attr_group; 1674 1675 void amdgpu_set_init_level(struct amdgpu_device *adev, 1676 enum amdgpu_init_lvl_id lvl); 1677 #endif 1678