1 /* 2 * Copyright 2008 Advanced Micro Devices, Inc. 3 * Copyright 2008 Red Hat Inc. 4 * Copyright 2009 Jerome Glisse. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 * OTHER DEALINGS IN THE SOFTWARE. 23 * 24 * Authors: Dave Airlie 25 * Alex Deucher 26 * Jerome Glisse 27 */ 28 #ifndef __AMDGPU_H__ 29 #define __AMDGPU_H__ 30 31 #ifdef pr_fmt 32 #undef pr_fmt 33 #endif 34 35 #define pr_fmt(fmt) "amdgpu: " fmt 36 37 #ifdef dev_fmt 38 #undef dev_fmt 39 #endif 40 41 #define dev_fmt(fmt) "amdgpu: " fmt 42 43 #include "amdgpu_ctx.h" 44 45 #include <linux/atomic.h> 46 #include <linux/wait.h> 47 #include <linux/list.h> 48 #include <linux/kref.h> 49 #include <linux/rbtree.h> 50 #include <linux/hashtable.h> 51 #include <linux/dma-fence.h> 52 #include <linux/pci.h> 53 54 #include <drm/ttm/ttm_bo.h> 55 #include <drm/ttm/ttm_placement.h> 56 57 #include <drm/amdgpu_drm.h> 58 #include <drm/drm_gem.h> 59 #include <drm/drm_ioctl.h> 60 61 #include <kgd_kfd_interface.h> 62 #include "dm_pp_interface.h" 63 #include "kgd_pp_interface.h" 64 65 #include "amd_shared.h" 66 #include "amdgpu_utils.h" 67 #include "amdgpu_mode.h" 68 #include "amdgpu_ih.h" 69 #include "amdgpu_irq.h" 70 #include "amdgpu_ucode.h" 71 #include "amdgpu_ttm.h" 72 #include "amdgpu_psp.h" 73 #include "amdgpu_gds.h" 74 #include "amdgpu_sync.h" 75 #include "amdgpu_ring.h" 76 #include "amdgpu_vm.h" 77 #include "amdgpu_dpm.h" 78 #include "amdgpu_acp.h" 79 #include "amdgpu_uvd.h" 80 #include "amdgpu_vce.h" 81 #include "amdgpu_vcn.h" 82 #include "amdgpu_jpeg.h" 83 #include "amdgpu_vpe.h" 84 #include "amdgpu_umsch_mm.h" 85 #include "amdgpu_gmc.h" 86 #include "amdgpu_gfx.h" 87 #include "amdgpu_sdma.h" 88 #include "amdgpu_lsdma.h" 89 #include "amdgpu_nbio.h" 90 #include "amdgpu_hdp.h" 91 #include "amdgpu_dm.h" 92 #include "amdgpu_virt.h" 93 #include "amdgpu_csa.h" 94 #include "amdgpu_mes_ctx.h" 95 #include "amdgpu_gart.h" 96 #include "amdgpu_debugfs.h" 97 #include "amdgpu_job.h" 98 #include "amdgpu_bo_list.h" 99 #include "amdgpu_gem.h" 100 #include "amdgpu_doorbell.h" 101 #include "amdgpu_amdkfd.h" 102 #include "amdgpu_discovery.h" 103 #include "amdgpu_mes.h" 104 #include "amdgpu_umc.h" 105 #include "amdgpu_mmhub.h" 106 #include "amdgpu_gfxhub.h" 107 #include "amdgpu_df.h" 108 #include "amdgpu_smuio.h" 109 #include "amdgpu_fdinfo.h" 110 #include "amdgpu_mca.h" 111 #include "amdgpu_aca.h" 112 #include "amdgpu_ras.h" 113 #include "amdgpu_cper.h" 114 #include "amdgpu_xcp.h" 115 #include "amdgpu_seq64.h" 116 #include "amdgpu_reg_state.h" 117 #include "amdgpu_userq.h" 118 #include "amdgpu_eviction_fence.h" 119 #include "amdgpu_ip.h" 120 #if defined(CONFIG_DRM_AMD_ISP) 121 #include "amdgpu_isp.h" 122 #endif 123 124 #define MAX_GPU_INSTANCE 64 125 126 #define GFX_SLICE_PERIOD_MS 250 127 128 struct amdgpu_gpu_instance { 129 struct amdgpu_device *adev; 130 int mgpu_fan_enabled; 131 }; 132 133 struct amdgpu_mgpu_info { 134 struct amdgpu_gpu_instance gpu_ins[MAX_GPU_INSTANCE]; 135 struct mutex mutex; 136 uint32_t num_gpu; 137 uint32_t num_dgpu; 138 uint32_t num_apu; 139 }; 140 141 enum amdgpu_ss { 142 AMDGPU_SS_DRV_LOAD, 143 AMDGPU_SS_DEV_D0, 144 AMDGPU_SS_DEV_D3, 145 AMDGPU_SS_DRV_UNLOAD 146 }; 147 148 struct amdgpu_hwip_reg_entry { 149 u32 hwip; 150 u32 inst; 151 u32 seg; 152 u32 reg_offset; 153 const char *reg_name; 154 }; 155 156 struct amdgpu_watchdog_timer { 157 bool timeout_fatal_disable; 158 uint32_t period; /* maxCycles = (1 << period), the number of cycles before a timeout */ 159 }; 160 161 #define AMDGPU_MAX_TIMEOUT_PARAM_LENGTH 256 162 163 /* 164 * Modules parameters. 165 */ 166 extern int amdgpu_modeset; 167 extern unsigned int amdgpu_vram_limit; 168 extern int amdgpu_vis_vram_limit; 169 extern int amdgpu_gart_size; 170 extern int amdgpu_gtt_size; 171 extern int amdgpu_moverate; 172 extern int amdgpu_audio; 173 extern int amdgpu_disp_priority; 174 extern int amdgpu_hw_i2c; 175 extern int amdgpu_pcie_gen2; 176 extern int amdgpu_msi; 177 extern char amdgpu_lockup_timeout[AMDGPU_MAX_TIMEOUT_PARAM_LENGTH]; 178 extern int amdgpu_dpm; 179 extern int amdgpu_fw_load_type; 180 extern int amdgpu_aspm; 181 extern int amdgpu_runtime_pm; 182 extern uint amdgpu_ip_block_mask; 183 extern int amdgpu_bapm; 184 extern int amdgpu_deep_color; 185 extern int amdgpu_vm_size; 186 extern int amdgpu_vm_block_size; 187 extern int amdgpu_vm_fragment_size; 188 extern int amdgpu_vm_fault_stop; 189 extern int amdgpu_vm_debug; 190 extern int amdgpu_vm_update_mode; 191 extern int amdgpu_exp_hw_support; 192 extern int amdgpu_dc; 193 extern int amdgpu_sched_jobs; 194 extern int amdgpu_sched_hw_submission; 195 extern uint amdgpu_pcie_gen_cap; 196 extern uint amdgpu_pcie_lane_cap; 197 extern u64 amdgpu_cg_mask; 198 extern uint amdgpu_pg_mask; 199 extern uint amdgpu_sdma_phase_quantum; 200 extern char *amdgpu_disable_cu; 201 extern char *amdgpu_virtual_display; 202 extern uint amdgpu_pp_feature_mask; 203 extern uint amdgpu_force_long_training; 204 extern int amdgpu_lbpw; 205 extern int amdgpu_compute_multipipe; 206 extern int amdgpu_gpu_recovery; 207 extern int amdgpu_emu_mode; 208 extern uint amdgpu_smu_memory_pool_size; 209 extern int amdgpu_smu_pptable_id; 210 extern uint amdgpu_dc_feature_mask; 211 extern uint amdgpu_freesync_vid_mode; 212 extern uint amdgpu_dc_debug_mask; 213 extern uint amdgpu_dc_visual_confirm; 214 extern int amdgpu_dm_abm_level; 215 extern int amdgpu_backlight; 216 extern int amdgpu_damage_clips; 217 extern struct amdgpu_mgpu_info mgpu_info; 218 extern int amdgpu_ras_enable; 219 extern uint amdgpu_ras_mask; 220 extern int amdgpu_bad_page_threshold; 221 extern bool amdgpu_ignore_bad_page_threshold; 222 extern struct amdgpu_watchdog_timer amdgpu_watchdog_timer; 223 extern int amdgpu_async_gfx_ring; 224 extern int amdgpu_mcbp; 225 extern int amdgpu_discovery; 226 extern int amdgpu_mes; 227 extern int amdgpu_mes_log_enable; 228 extern int amdgpu_mes_kiq; 229 extern int amdgpu_uni_mes; 230 extern int amdgpu_noretry; 231 extern int amdgpu_force_asic_type; 232 extern int amdgpu_smartshift_bias; 233 extern int amdgpu_use_xgmi_p2p; 234 extern int amdgpu_mtype_local; 235 extern int amdgpu_enforce_isolation; 236 #ifdef CONFIG_HSA_AMD 237 extern int sched_policy; 238 extern bool debug_evictions; 239 extern bool no_system_mem_limit; 240 extern int halt_if_hws_hang; 241 extern uint amdgpu_svm_default_granularity; 242 #else 243 static const int __maybe_unused sched_policy = KFD_SCHED_POLICY_HWS; 244 static const bool __maybe_unused debug_evictions; /* = false */ 245 static const bool __maybe_unused no_system_mem_limit; 246 static const int __maybe_unused halt_if_hws_hang; 247 #endif 248 #ifdef CONFIG_HSA_AMD_P2P 249 extern bool pcie_p2p; 250 #endif 251 252 extern int amdgpu_tmz; 253 extern int amdgpu_reset_method; 254 255 #ifdef CONFIG_DRM_AMDGPU_SI 256 extern int amdgpu_si_support; 257 #endif 258 #ifdef CONFIG_DRM_AMDGPU_CIK 259 extern int amdgpu_cik_support; 260 #endif 261 extern int amdgpu_num_kcq; 262 263 #define AMDGPU_VCNFW_LOG_SIZE (32 * 1024) 264 #define AMDGPU_UMSCHFW_LOG_SIZE (32 * 1024) 265 extern int amdgpu_vcnfw_log; 266 extern int amdgpu_sg_display; 267 extern int amdgpu_umsch_mm; 268 extern int amdgpu_seamless; 269 extern int amdgpu_umsch_mm_fwlog; 270 271 extern int amdgpu_user_partt_mode; 272 extern int amdgpu_agp; 273 extern int amdgpu_rebar; 274 275 extern int amdgpu_wbrf; 276 extern int amdgpu_user_queue; 277 278 #define AMDGPU_VM_MAX_NUM_CTX 4096 279 #define AMDGPU_SG_THRESHOLD (256*1024*1024) 280 #define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS 3000 281 #define AMDGPU_MAX_USEC_TIMEOUT 100000 /* 100 ms */ 282 #define AMDGPU_FENCE_JIFFIES_TIMEOUT (HZ / 2) 283 #define AMDGPU_DEBUGFS_MAX_COMPONENTS 32 284 #define AMDGPUFB_CONN_LIMIT 4 285 #define AMDGPU_BIOS_NUM_SCRATCH 16 286 287 #define AMDGPU_VBIOS_VGA_ALLOCATION (9 * 1024 * 1024) /* reserve 8MB for vga emulator and 1 MB for FB */ 288 289 /* hard reset data */ 290 #define AMDGPU_ASIC_RESET_DATA 0x39d5e86b 291 292 /* reset flags */ 293 #define AMDGPU_RESET_GFX (1 << 0) 294 #define AMDGPU_RESET_COMPUTE (1 << 1) 295 #define AMDGPU_RESET_DMA (1 << 2) 296 #define AMDGPU_RESET_CP (1 << 3) 297 #define AMDGPU_RESET_GRBM (1 << 4) 298 #define AMDGPU_RESET_DMA1 (1 << 5) 299 #define AMDGPU_RESET_RLC (1 << 6) 300 #define AMDGPU_RESET_SEM (1 << 7) 301 #define AMDGPU_RESET_IH (1 << 8) 302 #define AMDGPU_RESET_VMC (1 << 9) 303 #define AMDGPU_RESET_MC (1 << 10) 304 #define AMDGPU_RESET_DISPLAY (1 << 11) 305 #define AMDGPU_RESET_UVD (1 << 12) 306 #define AMDGPU_RESET_VCE (1 << 13) 307 #define AMDGPU_RESET_VCE1 (1 << 14) 308 309 /* reset mask */ 310 #define AMDGPU_RESET_TYPE_FULL (1 << 0) /* full adapter reset, mode1/mode2/BACO/etc. */ 311 #define AMDGPU_RESET_TYPE_SOFT_RESET (1 << 1) /* IP level soft reset */ 312 #define AMDGPU_RESET_TYPE_PER_QUEUE (1 << 2) /* per queue */ 313 #define AMDGPU_RESET_TYPE_PER_PIPE (1 << 3) /* per pipe */ 314 315 /* max cursor sizes (in pixels) */ 316 #define CIK_CURSOR_WIDTH 128 317 #define CIK_CURSOR_HEIGHT 128 318 319 /* smart shift bias level limits */ 320 #define AMDGPU_SMARTSHIFT_MAX_BIAS (100) 321 #define AMDGPU_SMARTSHIFT_MIN_BIAS (-100) 322 323 /* Extra time delay(in ms) to eliminate the influence of temperature momentary fluctuation */ 324 #define AMDGPU_SWCTF_EXTRA_DELAY 50 325 326 struct amdgpu_xcp_mgr; 327 struct amdgpu_device; 328 struct amdgpu_irq_src; 329 struct amdgpu_fpriv; 330 struct amdgpu_bo_va_mapping; 331 struct kfd_vm_fault_info; 332 struct amdgpu_hive_info; 333 struct amdgpu_reset_context; 334 struct amdgpu_reset_control; 335 336 enum amdgpu_cp_irq { 337 AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP = 0, 338 AMDGPU_CP_IRQ_GFX_ME0_PIPE1_EOP, 339 AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP, 340 AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP, 341 AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE2_EOP, 342 AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE3_EOP, 343 AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE0_EOP, 344 AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE1_EOP, 345 AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE2_EOP, 346 AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE3_EOP, 347 348 AMDGPU_CP_IRQ_LAST 349 }; 350 351 enum amdgpu_thermal_irq { 352 AMDGPU_THERMAL_IRQ_LOW_TO_HIGH = 0, 353 AMDGPU_THERMAL_IRQ_HIGH_TO_LOW, 354 355 AMDGPU_THERMAL_IRQ_LAST 356 }; 357 358 enum amdgpu_kiq_irq { 359 AMDGPU_CP_KIQ_IRQ_DRIVER0 = 0, 360 AMDGPU_CP_KIQ_IRQ_LAST 361 }; 362 #define MAX_KIQ_REG_WAIT 5000 /* in usecs, 5ms */ 363 #define MAX_KIQ_REG_BAILOUT_INTERVAL 5 /* in msecs, 5ms */ 364 #define MAX_KIQ_REG_TRY 1000 365 366 /* 367 * BIOS. 368 */ 369 bool amdgpu_get_bios(struct amdgpu_device *adev); 370 bool amdgpu_read_bios(struct amdgpu_device *adev); 371 bool amdgpu_soc15_read_bios_from_rom(struct amdgpu_device *adev, 372 u8 *bios, u32 length_bytes); 373 void amdgpu_bios_release(struct amdgpu_device *adev); 374 /* 375 * Clocks 376 */ 377 378 #define AMDGPU_MAX_PPLL 3 379 380 struct amdgpu_clock { 381 struct amdgpu_pll ppll[AMDGPU_MAX_PPLL]; 382 struct amdgpu_pll spll; 383 struct amdgpu_pll mpll; 384 /* 10 Khz units */ 385 uint32_t default_mclk; 386 uint32_t default_sclk; 387 uint32_t default_dispclk; 388 uint32_t dp_extclk; 389 uint32_t max_pixel_clock; 390 }; 391 392 /* sub-allocation manager, it has to be protected by another lock. 393 * By conception this is an helper for other part of the driver 394 * like the indirect buffer or semaphore, which both have their 395 * locking. 396 * 397 * Principe is simple, we keep a list of sub allocation in offset 398 * order (first entry has offset == 0, last entry has the highest 399 * offset). 400 * 401 * When allocating new object we first check if there is room at 402 * the end total_size - (last_object_offset + last_object_size) >= 403 * alloc_size. If so we allocate new object there. 404 * 405 * When there is not enough room at the end, we start waiting for 406 * each sub object until we reach object_offset+object_size >= 407 * alloc_size, this object then become the sub object we return. 408 * 409 * Alignment can't be bigger than page size. 410 * 411 * Hole are not considered for allocation to keep things simple. 412 * Assumption is that there won't be hole (all object on same 413 * alignment). 414 */ 415 416 struct amdgpu_sa_manager { 417 struct drm_suballoc_manager base; 418 struct amdgpu_bo *bo; 419 uint64_t gpu_addr; 420 void *cpu_ptr; 421 }; 422 423 /* 424 * IRQS. 425 */ 426 427 struct amdgpu_flip_work { 428 struct delayed_work flip_work; 429 struct work_struct unpin_work; 430 struct amdgpu_device *adev; 431 int crtc_id; 432 u32 target_vblank; 433 uint64_t base; 434 struct drm_pending_vblank_event *event; 435 struct amdgpu_bo *old_abo; 436 unsigned shared_count; 437 struct dma_fence **shared; 438 struct dma_fence_cb cb; 439 bool async; 440 }; 441 442 /* 443 * file private structure 444 */ 445 446 struct amdgpu_fpriv { 447 struct amdgpu_vm vm; 448 struct amdgpu_bo_va *prt_va; 449 struct amdgpu_bo_va *csa_va; 450 struct amdgpu_bo_va *seq64_va; 451 struct mutex bo_list_lock; 452 struct idr bo_list_handles; 453 struct amdgpu_ctx_mgr ctx_mgr; 454 struct amdgpu_userq_mgr userq_mgr; 455 456 /* Eviction fence infra */ 457 struct amdgpu_eviction_fence_mgr evf_mgr; 458 459 /** GPU partition selection */ 460 uint32_t xcp_id; 461 }; 462 463 int amdgpu_file_to_fpriv(struct file *filp, struct amdgpu_fpriv **fpriv); 464 465 /* 466 * Writeback 467 */ 468 #define AMDGPU_MAX_WB 1024 /* Reserve at most 1024 WB slots for amdgpu-owned rings. */ 469 470 /** 471 * amdgpu_wb - This struct is used for small GPU memory allocation. 472 * 473 * This struct is used to allocate a small amount of GPU memory that can be 474 * used to shadow certain states into the memory. This is especially useful for 475 * providing easy CPU access to some states without requiring register access 476 * (e.g., if some block is power gated, reading register may be problematic). 477 * 478 * Note: the term writeback was initially used because many of the amdgpu 479 * components had some level of writeback memory, and this struct initially 480 * described those components. 481 */ 482 struct amdgpu_wb { 483 484 /** 485 * @wb_obj: 486 * 487 * Buffer Object used for the writeback memory. 488 */ 489 struct amdgpu_bo *wb_obj; 490 491 /** 492 * @wb: 493 * 494 * Pointer to the first writeback slot. In terms of CPU address 495 * this value can be accessed directly by using the offset as an index. 496 * For the GPU address, it is necessary to use gpu_addr and the offset. 497 */ 498 uint32_t *wb; 499 500 /** 501 * @gpu_addr: 502 * 503 * Writeback base address in the GPU. 504 */ 505 uint64_t gpu_addr; 506 507 /** 508 * @num_wb: 509 * 510 * Number of writeback slots reserved for amdgpu. 511 */ 512 u32 num_wb; 513 514 /** 515 * @used: 516 * 517 * Track the writeback slot already used. 518 */ 519 unsigned long used[DIV_ROUND_UP(AMDGPU_MAX_WB, BITS_PER_LONG)]; 520 521 /** 522 * @lock: 523 * 524 * Protects read and write of the used field array. 525 */ 526 spinlock_t lock; 527 }; 528 529 int amdgpu_device_wb_get(struct amdgpu_device *adev, u32 *wb); 530 void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb); 531 532 /* 533 * Benchmarking 534 */ 535 int amdgpu_benchmark(struct amdgpu_device *adev, int test_number); 536 537 /* 538 * ASIC specific register table accessible by UMD 539 */ 540 struct amdgpu_allowed_register_entry { 541 uint32_t reg_offset; 542 bool grbm_indexed; 543 }; 544 545 /** 546 * enum amd_reset_method - Methods for resetting AMD GPU devices 547 * 548 * @AMD_RESET_METHOD_NONE: The device will not be reset. 549 * @AMD_RESET_LEGACY: Method reserved for SI, CIK and VI ASICs. 550 * @AMD_RESET_MODE0: Reset the entire ASIC. Not currently available for the 551 * any device. 552 * @AMD_RESET_MODE1: Resets all IP blocks on the ASIC (SDMA, GFX, VCN, etc.) 553 * individually. Suitable only for some discrete GPU, not 554 * available for all ASICs. 555 * @AMD_RESET_MODE2: Resets a lesser level of IPs compared to MODE1. Which IPs 556 * are reset depends on the ASIC. Notably doesn't reset IPs 557 * shared with the CPU on APUs or the memory controllers (so 558 * VRAM is not lost). Not available on all ASICs. 559 * @AMD_RESET_LINK: Triggers SW-UP link reset on other GPUs 560 * @AMD_RESET_BACO: BACO (Bus Alive, Chip Off) method powers off and on the card 561 * but without powering off the PCI bus. Suitable only for 562 * discrete GPUs. 563 * @AMD_RESET_PCI: Does a full bus reset using core Linux subsystem PCI reset 564 * and does a secondary bus reset or FLR, depending on what the 565 * underlying hardware supports. 566 * 567 * Methods available for AMD GPU driver for resetting the device. Not all 568 * methods are suitable for every device. User can override the method using 569 * module parameter `reset_method`. 570 */ 571 enum amd_reset_method { 572 AMD_RESET_METHOD_NONE = -1, 573 AMD_RESET_METHOD_LEGACY = 0, 574 AMD_RESET_METHOD_MODE0, 575 AMD_RESET_METHOD_MODE1, 576 AMD_RESET_METHOD_MODE2, 577 AMD_RESET_METHOD_LINK, 578 AMD_RESET_METHOD_BACO, 579 AMD_RESET_METHOD_PCI, 580 AMD_RESET_METHOD_ON_INIT, 581 }; 582 583 struct amdgpu_video_codec_info { 584 u32 codec_type; 585 u32 max_width; 586 u32 max_height; 587 u32 max_pixels_per_frame; 588 u32 max_level; 589 }; 590 591 #define codec_info_build(type, width, height, level) \ 592 .codec_type = type,\ 593 .max_width = width,\ 594 .max_height = height,\ 595 .max_pixels_per_frame = height * width,\ 596 .max_level = level, 597 598 struct amdgpu_video_codecs { 599 const u32 codec_count; 600 const struct amdgpu_video_codec_info *codec_array; 601 }; 602 603 /* 604 * ASIC specific functions. 605 */ 606 struct amdgpu_asic_funcs { 607 bool (*read_disabled_bios)(struct amdgpu_device *adev); 608 bool (*read_bios_from_rom)(struct amdgpu_device *adev, 609 u8 *bios, u32 length_bytes); 610 int (*read_register)(struct amdgpu_device *adev, u32 se_num, 611 u32 sh_num, u32 reg_offset, u32 *value); 612 void (*set_vga_state)(struct amdgpu_device *adev, bool state); 613 int (*reset)(struct amdgpu_device *adev); 614 enum amd_reset_method (*reset_method)(struct amdgpu_device *adev); 615 /* get the reference clock */ 616 u32 (*get_xclk)(struct amdgpu_device *adev); 617 /* MM block clocks */ 618 int (*set_uvd_clocks)(struct amdgpu_device *adev, u32 vclk, u32 dclk); 619 int (*set_vce_clocks)(struct amdgpu_device *adev, u32 evclk, u32 ecclk); 620 /* static power management */ 621 int (*get_pcie_lanes)(struct amdgpu_device *adev); 622 void (*set_pcie_lanes)(struct amdgpu_device *adev, int lanes); 623 /* get config memsize register */ 624 u32 (*get_config_memsize)(struct amdgpu_device *adev); 625 /* flush hdp write queue */ 626 void (*flush_hdp)(struct amdgpu_device *adev, struct amdgpu_ring *ring); 627 /* invalidate hdp read cache */ 628 void (*invalidate_hdp)(struct amdgpu_device *adev, 629 struct amdgpu_ring *ring); 630 /* check if the asic needs a full reset of if soft reset will work */ 631 bool (*need_full_reset)(struct amdgpu_device *adev); 632 /* initialize doorbell layout for specific asic*/ 633 void (*init_doorbell_index)(struct amdgpu_device *adev); 634 /* PCIe bandwidth usage */ 635 void (*get_pcie_usage)(struct amdgpu_device *adev, uint64_t *count0, 636 uint64_t *count1); 637 /* do we need to reset the asic at init time (e.g., kexec) */ 638 bool (*need_reset_on_init)(struct amdgpu_device *adev); 639 /* PCIe replay counter */ 640 uint64_t (*get_pcie_replay_count)(struct amdgpu_device *adev); 641 /* device supports BACO */ 642 int (*supports_baco)(struct amdgpu_device *adev); 643 /* pre asic_init quirks */ 644 void (*pre_asic_init)(struct amdgpu_device *adev); 645 /* enter/exit umd stable pstate */ 646 int (*update_umd_stable_pstate)(struct amdgpu_device *adev, bool enter); 647 /* query video codecs */ 648 int (*query_video_codecs)(struct amdgpu_device *adev, bool encode, 649 const struct amdgpu_video_codecs **codecs); 650 /* encode "> 32bits" smn addressing */ 651 u64 (*encode_ext_smn_addressing)(int ext_id); 652 653 ssize_t (*get_reg_state)(struct amdgpu_device *adev, 654 enum amdgpu_reg_state reg_state, void *buf, 655 size_t max_size); 656 }; 657 658 /* 659 * IOCTL. 660 */ 661 int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data, 662 struct drm_file *filp); 663 664 int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); 665 int amdgpu_cs_fence_to_handle_ioctl(struct drm_device *dev, void *data, 666 struct drm_file *filp); 667 int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); 668 int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data, 669 struct drm_file *filp); 670 671 /* VRAM scratch page for HDP bug, default vram page */ 672 struct amdgpu_mem_scratch { 673 struct amdgpu_bo *robj; 674 uint32_t *ptr; 675 u64 gpu_addr; 676 }; 677 678 /* 679 * CGS 680 */ 681 struct cgs_device *amdgpu_cgs_create_device(struct amdgpu_device *adev); 682 void amdgpu_cgs_destroy_device(struct cgs_device *cgs_device); 683 684 /* 685 * Core structure, functions and helpers. 686 */ 687 typedef uint32_t (*amdgpu_rreg_t)(struct amdgpu_device*, uint32_t); 688 typedef void (*amdgpu_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t); 689 690 typedef uint32_t (*amdgpu_rreg_ext_t)(struct amdgpu_device*, uint64_t); 691 typedef void (*amdgpu_wreg_ext_t)(struct amdgpu_device*, uint64_t, uint32_t); 692 693 typedef uint64_t (*amdgpu_rreg64_t)(struct amdgpu_device*, uint32_t); 694 typedef void (*amdgpu_wreg64_t)(struct amdgpu_device*, uint32_t, uint64_t); 695 696 typedef uint64_t (*amdgpu_rreg64_ext_t)(struct amdgpu_device*, uint64_t); 697 typedef void (*amdgpu_wreg64_ext_t)(struct amdgpu_device*, uint64_t, uint64_t); 698 699 typedef uint32_t (*amdgpu_block_rreg_t)(struct amdgpu_device*, uint32_t, uint32_t); 700 typedef void (*amdgpu_block_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t, uint32_t); 701 702 struct amdgpu_mmio_remap { 703 u32 reg_offset; 704 resource_size_t bus_addr; 705 struct amdgpu_bo *bo; 706 }; 707 708 enum amdgpu_uid_type { 709 AMDGPU_UID_TYPE_XCD, 710 AMDGPU_UID_TYPE_AID, 711 AMDGPU_UID_TYPE_SOC, 712 AMDGPU_UID_TYPE_MAX 713 }; 714 715 #define AMDGPU_UID_INST_MAX 8 /* max number of instances for each UID type */ 716 717 struct amdgpu_uid { 718 uint64_t uid[AMDGPU_UID_TYPE_MAX][AMDGPU_UID_INST_MAX]; 719 struct amdgpu_device *adev; 720 }; 721 722 struct amd_powerplay { 723 void *pp_handle; 724 const struct amd_pm_funcs *pp_funcs; 725 }; 726 727 /* polaris10 kickers */ 728 #define ASICID_IS_P20(did, rid) (((did == 0x67DF) && \ 729 ((rid == 0xE3) || \ 730 (rid == 0xE4) || \ 731 (rid == 0xE5) || \ 732 (rid == 0xE7) || \ 733 (rid == 0xEF))) || \ 734 ((did == 0x6FDF) && \ 735 ((rid == 0xE7) || \ 736 (rid == 0xEF) || \ 737 (rid == 0xFF)))) 738 739 #define ASICID_IS_P30(did, rid) ((did == 0x67DF) && \ 740 ((rid == 0xE1) || \ 741 (rid == 0xF7))) 742 743 /* polaris11 kickers */ 744 #define ASICID_IS_P21(did, rid) (((did == 0x67EF) && \ 745 ((rid == 0xE0) || \ 746 (rid == 0xE5))) || \ 747 ((did == 0x67FF) && \ 748 ((rid == 0xCF) || \ 749 (rid == 0xEF) || \ 750 (rid == 0xFF)))) 751 752 #define ASICID_IS_P31(did, rid) ((did == 0x67EF) && \ 753 ((rid == 0xE2))) 754 755 /* polaris12 kickers */ 756 #define ASICID_IS_P23(did, rid) (((did == 0x6987) && \ 757 ((rid == 0xC0) || \ 758 (rid == 0xC1) || \ 759 (rid == 0xC3) || \ 760 (rid == 0xC7))) || \ 761 ((did == 0x6981) && \ 762 ((rid == 0x00) || \ 763 (rid == 0x01) || \ 764 (rid == 0x10)))) 765 766 struct amdgpu_mqd_prop { 767 uint64_t mqd_gpu_addr; 768 uint64_t hqd_base_gpu_addr; 769 uint64_t rptr_gpu_addr; 770 uint64_t wptr_gpu_addr; 771 uint32_t queue_size; 772 bool use_doorbell; 773 uint32_t doorbell_index; 774 uint64_t eop_gpu_addr; 775 uint32_t hqd_pipe_priority; 776 uint32_t hqd_queue_priority; 777 bool allow_tunneling; 778 bool hqd_active; 779 uint64_t shadow_addr; 780 uint64_t gds_bkup_addr; 781 uint64_t csa_addr; 782 uint64_t fence_address; 783 bool tmz_queue; 784 bool kernel_queue; 785 }; 786 787 struct amdgpu_mqd { 788 unsigned mqd_size; 789 int (*init_mqd)(struct amdgpu_device *adev, void *mqd, 790 struct amdgpu_mqd_prop *p); 791 }; 792 793 struct amdgpu_pcie_reset_ctx { 794 bool in_link_reset; 795 bool occurs_dpc; 796 bool audio_suspended; 797 struct pci_dev *swus; 798 struct pci_saved_state *swus_pcistate; 799 struct pci_saved_state *swds_pcistate; 800 }; 801 802 /* 803 * Custom Init levels could be defined for different situations where a full 804 * initialization of all hardware blocks are not expected. Sample cases are 805 * custom init sequences after resume after S0i3/S3, reset on initialization, 806 * partial reset of blocks etc. Presently, this defines only two levels. Levels 807 * are described in corresponding struct definitions - amdgpu_init_default, 808 * amdgpu_init_minimal_xgmi. 809 */ 810 enum amdgpu_init_lvl_id { 811 AMDGPU_INIT_LEVEL_DEFAULT, 812 AMDGPU_INIT_LEVEL_MINIMAL_XGMI, 813 AMDGPU_INIT_LEVEL_RESET_RECOVERY, 814 }; 815 816 struct amdgpu_init_level { 817 enum amdgpu_init_lvl_id level; 818 uint32_t hwini_ip_block_mask; 819 }; 820 821 #define AMDGPU_RESET_MAGIC_NUM 64 822 #define AMDGPU_MAX_DF_PERFMONS 4 823 struct amdgpu_reset_domain; 824 struct amdgpu_fru_info; 825 826 enum amdgpu_enforce_isolation_mode { 827 AMDGPU_ENFORCE_ISOLATION_DISABLE = 0, 828 AMDGPU_ENFORCE_ISOLATION_ENABLE = 1, 829 AMDGPU_ENFORCE_ISOLATION_ENABLE_LEGACY = 2, 830 AMDGPU_ENFORCE_ISOLATION_NO_CLEANER_SHADER = 3, 831 }; 832 833 struct amdgpu_device { 834 struct device *dev; 835 struct pci_dev *pdev; 836 struct drm_device ddev; 837 838 #ifdef CONFIG_DRM_AMD_ACP 839 struct amdgpu_acp acp; 840 #endif 841 struct amdgpu_hive_info *hive; 842 struct amdgpu_xcp_mgr *xcp_mgr; 843 /* ASIC */ 844 enum amd_asic_type asic_type; 845 uint32_t family; 846 uint32_t rev_id; 847 uint32_t external_rev_id; 848 unsigned long flags; 849 unsigned long apu_flags; 850 int usec_timeout; 851 const struct amdgpu_asic_funcs *asic_funcs; 852 bool shutdown; 853 bool need_swiotlb; 854 bool accel_working; 855 struct notifier_block acpi_nb; 856 struct notifier_block pm_nb; 857 struct amdgpu_i2c_chan *i2c_bus[AMDGPU_MAX_I2C_BUS]; 858 struct debugfs_blob_wrapper debugfs_vbios_blob; 859 struct mutex srbm_mutex; 860 /* GRBM index mutex. Protects concurrent access to GRBM index */ 861 struct mutex grbm_idx_mutex; 862 struct dev_pm_domain vga_pm_domain; 863 bool have_disp_power_ref; 864 bool have_atomics_support; 865 866 /* BIOS */ 867 bool is_atom_fw; 868 uint8_t *bios; 869 uint32_t bios_size; 870 uint32_t bios_scratch_reg_offset; 871 uint32_t bios_scratch[AMDGPU_BIOS_NUM_SCRATCH]; 872 873 /* Register/doorbell mmio */ 874 resource_size_t rmmio_base; 875 resource_size_t rmmio_size; 876 void __iomem *rmmio; 877 /* protects concurrent MM_INDEX/DATA based register access */ 878 spinlock_t mmio_idx_lock; 879 struct amdgpu_mmio_remap rmmio_remap; 880 /* protects concurrent SMC based register access */ 881 spinlock_t smc_idx_lock; 882 amdgpu_rreg_t smc_rreg; 883 amdgpu_wreg_t smc_wreg; 884 /* protects concurrent PCIE register access */ 885 spinlock_t pcie_idx_lock; 886 amdgpu_rreg_t pcie_rreg; 887 amdgpu_wreg_t pcie_wreg; 888 amdgpu_rreg_t pciep_rreg; 889 amdgpu_wreg_t pciep_wreg; 890 amdgpu_rreg_ext_t pcie_rreg_ext; 891 amdgpu_wreg_ext_t pcie_wreg_ext; 892 amdgpu_rreg64_t pcie_rreg64; 893 amdgpu_wreg64_t pcie_wreg64; 894 amdgpu_rreg64_ext_t pcie_rreg64_ext; 895 amdgpu_wreg64_ext_t pcie_wreg64_ext; 896 /* protects concurrent UVD register access */ 897 spinlock_t uvd_ctx_idx_lock; 898 amdgpu_rreg_t uvd_ctx_rreg; 899 amdgpu_wreg_t uvd_ctx_wreg; 900 /* protects concurrent DIDT register access */ 901 spinlock_t didt_idx_lock; 902 amdgpu_rreg_t didt_rreg; 903 amdgpu_wreg_t didt_wreg; 904 /* protects concurrent gc_cac register access */ 905 spinlock_t gc_cac_idx_lock; 906 amdgpu_rreg_t gc_cac_rreg; 907 amdgpu_wreg_t gc_cac_wreg; 908 /* protects concurrent se_cac register access */ 909 spinlock_t se_cac_idx_lock; 910 amdgpu_rreg_t se_cac_rreg; 911 amdgpu_wreg_t se_cac_wreg; 912 /* protects concurrent ENDPOINT (audio) register access */ 913 spinlock_t audio_endpt_idx_lock; 914 amdgpu_block_rreg_t audio_endpt_rreg; 915 amdgpu_block_wreg_t audio_endpt_wreg; 916 struct amdgpu_doorbell doorbell; 917 918 /* clock/pll info */ 919 struct amdgpu_clock clock; 920 921 /* MC */ 922 struct amdgpu_gmc gmc; 923 struct amdgpu_gart gart; 924 dma_addr_t dummy_page_addr; 925 struct amdgpu_vm_manager vm_manager; 926 struct amdgpu_vmhub vmhub[AMDGPU_MAX_VMHUBS]; 927 DECLARE_BITMAP(vmhubs_mask, AMDGPU_MAX_VMHUBS); 928 929 /* memory management */ 930 struct amdgpu_mman mman; 931 struct amdgpu_mem_scratch mem_scratch; 932 struct amdgpu_wb wb; 933 atomic64_t num_bytes_moved; 934 atomic64_t num_evictions; 935 atomic64_t num_vram_cpu_page_faults; 936 atomic_t gpu_reset_counter; 937 atomic_t vram_lost_counter; 938 939 /* data for buffer migration throttling */ 940 struct { 941 spinlock_t lock; 942 s64 last_update_us; 943 s64 accum_us; /* accumulated microseconds */ 944 s64 accum_us_vis; /* for visible VRAM */ 945 u32 log2_max_MBps; 946 } mm_stats; 947 948 /* discovery*/ 949 struct amdgpu_discovery_info discovery; 950 951 /* display */ 952 bool enable_virtual_display; 953 struct amdgpu_vkms_output *amdgpu_vkms_output; 954 struct amdgpu_mode_info mode_info; 955 /* For pre-DCE11. DCE11 and later are in "struct amdgpu_device->dm" */ 956 struct delayed_work hotplug_work; 957 struct amdgpu_irq_src crtc_irq; 958 struct amdgpu_irq_src vline0_irq; 959 struct amdgpu_irq_src vupdate_irq; 960 struct amdgpu_irq_src pageflip_irq; 961 struct amdgpu_irq_src hpd_irq; 962 struct amdgpu_irq_src dmub_trace_irq; 963 struct amdgpu_irq_src dmub_outbox_irq; 964 965 /* rings */ 966 u64 fence_context; 967 unsigned num_rings; 968 struct amdgpu_ring *rings[AMDGPU_MAX_RINGS]; 969 struct dma_fence __rcu *gang_submit; 970 bool ib_pool_ready; 971 struct amdgpu_sa_manager ib_pools[AMDGPU_IB_POOL_MAX]; 972 struct amdgpu_sched gpu_sched[AMDGPU_HW_IP_NUM][AMDGPU_RING_PRIO_MAX]; 973 974 /* interrupts */ 975 struct amdgpu_irq irq; 976 977 /* powerplay */ 978 struct amd_powerplay powerplay; 979 struct amdgpu_pm pm; 980 u64 cg_flags; 981 u32 pg_flags; 982 983 /* nbio */ 984 struct amdgpu_nbio nbio; 985 986 /* hdp */ 987 struct amdgpu_hdp hdp; 988 989 /* smuio */ 990 struct amdgpu_smuio smuio; 991 992 /* mmhub */ 993 struct amdgpu_mmhub mmhub; 994 995 /* gfxhub */ 996 struct amdgpu_gfxhub gfxhub; 997 998 /* gfx */ 999 struct amdgpu_gfx gfx; 1000 1001 /* sdma */ 1002 struct amdgpu_sdma sdma; 1003 1004 /* lsdma */ 1005 struct amdgpu_lsdma lsdma; 1006 1007 /* uvd */ 1008 struct amdgpu_uvd uvd; 1009 1010 /* vce */ 1011 struct amdgpu_vce vce; 1012 1013 /* vcn */ 1014 struct amdgpu_vcn vcn; 1015 1016 /* jpeg */ 1017 struct amdgpu_jpeg jpeg; 1018 1019 /* vpe */ 1020 struct amdgpu_vpe vpe; 1021 1022 /* umsch */ 1023 struct amdgpu_umsch_mm umsch_mm; 1024 bool enable_umsch_mm; 1025 1026 /* firmwares */ 1027 struct amdgpu_firmware firmware; 1028 1029 /* PSP */ 1030 struct psp_context psp; 1031 1032 /* GDS */ 1033 struct amdgpu_gds gds; 1034 1035 /* for userq and VM fences */ 1036 struct amdgpu_seq64 seq64; 1037 1038 /* UMC */ 1039 struct amdgpu_umc umc; 1040 1041 /* display related functionality */ 1042 struct amdgpu_display_manager dm; 1043 1044 #if defined(CONFIG_DRM_AMD_ISP) 1045 /* isp */ 1046 struct amdgpu_isp isp; 1047 #endif 1048 1049 /* mes */ 1050 bool enable_mes; 1051 bool enable_mes_kiq; 1052 bool enable_uni_mes; 1053 struct amdgpu_mes mes; 1054 struct amdgpu_mqd mqds[AMDGPU_HW_IP_NUM]; 1055 const struct amdgpu_userq_funcs *userq_funcs[AMDGPU_HW_IP_NUM]; 1056 1057 /* xarray used to retrieve the user queue fence driver reference 1058 * in the EOP interrupt handler to signal the particular user 1059 * queue fence. 1060 */ 1061 struct xarray userq_xa; 1062 /** 1063 * @userq_doorbell_xa: Global user queue map (doorbell index → queue) 1064 * Key: doorbell_index (unique global identifier for the queue) 1065 * Value: struct amdgpu_usermode_queue 1066 */ 1067 struct xarray userq_doorbell_xa; 1068 1069 /* df */ 1070 struct amdgpu_df df; 1071 1072 /* MCA */ 1073 struct amdgpu_mca mca; 1074 1075 /* ACA */ 1076 struct amdgpu_aca aca; 1077 1078 /* CPER */ 1079 struct amdgpu_cper cper; 1080 1081 struct amdgpu_ip_block ip_blocks[AMDGPU_MAX_IP_NUM]; 1082 uint32_t harvest_ip_mask; 1083 int num_ip_blocks; 1084 struct mutex mn_lock; 1085 DECLARE_HASHTABLE(mn_hash, 7); 1086 1087 /* tracking pinned memory */ 1088 atomic64_t vram_pin_size; 1089 atomic64_t visible_pin_size; 1090 atomic64_t gart_pin_size; 1091 1092 /* soc15 register offset based on ip, instance and segment */ 1093 uint32_t *reg_offset[MAX_HWIP][HWIP_MAX_INSTANCE]; 1094 struct amdgpu_ip_map_info ip_map; 1095 1096 /* delayed work_func for deferring clockgating during resume */ 1097 struct delayed_work delayed_init_work; 1098 1099 struct amdgpu_virt virt; 1100 1101 /* record hw reset is performed */ 1102 bool has_hw_reset; 1103 u8 reset_magic[AMDGPU_RESET_MAGIC_NUM]; 1104 1105 /* s3/s4 mask */ 1106 bool in_suspend; 1107 bool in_s3; 1108 bool in_s4; 1109 bool in_s0ix; 1110 suspend_state_t last_suspend_state; 1111 1112 enum pp_mp1_state mp1_state; 1113 struct amdgpu_doorbell_index doorbell_index; 1114 1115 struct mutex notifier_lock; 1116 1117 int asic_reset_res; 1118 struct work_struct xgmi_reset_work; 1119 struct list_head reset_list; 1120 1121 long gfx_timeout; 1122 long sdma_timeout; 1123 long video_timeout; 1124 long compute_timeout; 1125 long psp_timeout; 1126 1127 uint64_t unique_id; 1128 uint64_t df_perfmon_config_assign_mask[AMDGPU_MAX_DF_PERFMONS]; 1129 1130 /* enable runtime pm on the device */ 1131 bool in_runpm; 1132 bool has_pr3; 1133 1134 bool ucode_sysfs_en; 1135 1136 struct amdgpu_fru_info *fru_info; 1137 atomic_t throttling_logging_enabled; 1138 struct ratelimit_state throttling_logging_rs; 1139 uint32_t ras_hw_enabled; 1140 uint32_t ras_enabled; 1141 bool ras_default_ecc_enabled; 1142 1143 bool no_hw_access; 1144 struct pci_saved_state *pci_state; 1145 pci_channel_state_t pci_channel_state; 1146 1147 struct amdgpu_pcie_reset_ctx pcie_reset_ctx; 1148 1149 /* Track auto wait count on s_barrier settings */ 1150 bool barrier_has_auto_waitcnt; 1151 1152 struct amdgpu_reset_control *reset_cntl; 1153 uint32_t ip_versions[MAX_HWIP][HWIP_MAX_INSTANCE]; 1154 1155 bool ram_is_direct_mapped; 1156 1157 struct list_head ras_list; 1158 1159 struct amdgpu_reset_domain *reset_domain; 1160 1161 struct mutex benchmark_mutex; 1162 1163 bool scpm_enabled; 1164 uint32_t scpm_status; 1165 1166 struct work_struct reset_work; 1167 1168 bool dc_enabled; 1169 /* Mask of active clusters */ 1170 uint32_t aid_mask; 1171 1172 /* Debug */ 1173 bool debug_vm; 1174 bool debug_largebar; 1175 bool debug_disable_soft_recovery; 1176 bool debug_use_vram_fw_buf; 1177 bool debug_enable_ras_aca; 1178 bool debug_exp_resets; 1179 bool debug_disable_gpu_ring_reset; 1180 bool debug_vm_userptr; 1181 bool debug_disable_ce_logs; 1182 bool debug_enable_ce_cs; 1183 1184 /* Protection for the following isolation structure */ 1185 struct mutex enforce_isolation_mutex; 1186 enum amdgpu_enforce_isolation_mode enforce_isolation[MAX_XCP]; 1187 struct amdgpu_isolation { 1188 void *owner; 1189 struct dma_fence *spearhead; 1190 struct amdgpu_sync active; 1191 struct amdgpu_sync prev; 1192 } isolation[MAX_XCP]; 1193 1194 struct amdgpu_init_level *init_lvl; 1195 1196 /* This flag is used to determine how VRAM allocations are handled for APUs 1197 * in KFD: VRAM or GTT. 1198 */ 1199 bool apu_prefer_gtt; 1200 1201 bool userq_halt_for_enforce_isolation; 1202 struct work_struct userq_reset_work; 1203 struct amdgpu_uid *uid_info; 1204 1205 /* KFD 1206 * Must be last --ends in a flexible-array member. 1207 */ 1208 struct amdgpu_kfd_dev kfd; 1209 }; 1210 1211 static inline uint32_t amdgpu_ip_version(const struct amdgpu_device *adev, 1212 uint8_t ip, uint8_t inst) 1213 { 1214 /* This considers only major/minor/rev and ignores 1215 * subrevision/variant fields. 1216 */ 1217 return adev->ip_versions[ip][inst] & ~0xFFU; 1218 } 1219 1220 static inline uint32_t amdgpu_ip_version_full(const struct amdgpu_device *adev, 1221 uint8_t ip, uint8_t inst) 1222 { 1223 /* This returns full version - major/minor/rev/variant/subrevision */ 1224 return adev->ip_versions[ip][inst]; 1225 } 1226 1227 static inline struct amdgpu_device *drm_to_adev(struct drm_device *ddev) 1228 { 1229 return container_of(ddev, struct amdgpu_device, ddev); 1230 } 1231 1232 static inline struct drm_device *adev_to_drm(struct amdgpu_device *adev) 1233 { 1234 return &adev->ddev; 1235 } 1236 1237 static inline struct amdgpu_device *amdgpu_ttm_adev(struct ttm_device *bdev) 1238 { 1239 return container_of(bdev, struct amdgpu_device, mman.bdev); 1240 } 1241 1242 static inline bool amdgpu_is_multi_aid(struct amdgpu_device *adev) 1243 { 1244 return !!adev->aid_mask; 1245 } 1246 1247 int amdgpu_device_init(struct amdgpu_device *adev, 1248 uint32_t flags); 1249 void amdgpu_device_fini_hw(struct amdgpu_device *adev); 1250 void amdgpu_device_fini_sw(struct amdgpu_device *adev); 1251 1252 int amdgpu_gpu_wait_for_idle(struct amdgpu_device *adev); 1253 1254 void amdgpu_device_mm_access(struct amdgpu_device *adev, loff_t pos, 1255 void *buf, size_t size, bool write); 1256 size_t amdgpu_device_aper_access(struct amdgpu_device *adev, loff_t pos, 1257 void *buf, size_t size, bool write); 1258 1259 void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos, 1260 void *buf, size_t size, bool write); 1261 uint32_t amdgpu_device_wait_on_rreg(struct amdgpu_device *adev, 1262 uint32_t inst, uint32_t reg_addr, char reg_name[], 1263 uint32_t expected_value, uint32_t mask); 1264 uint32_t amdgpu_device_rreg(struct amdgpu_device *adev, 1265 uint32_t reg, uint32_t acc_flags); 1266 u32 amdgpu_device_indirect_rreg_ext(struct amdgpu_device *adev, 1267 u64 reg_addr); 1268 uint32_t amdgpu_device_xcc_rreg(struct amdgpu_device *adev, 1269 uint32_t reg, uint32_t acc_flags, 1270 uint32_t xcc_id); 1271 void amdgpu_device_wreg(struct amdgpu_device *adev, 1272 uint32_t reg, uint32_t v, 1273 uint32_t acc_flags); 1274 void amdgpu_device_indirect_wreg_ext(struct amdgpu_device *adev, 1275 u64 reg_addr, u32 reg_data); 1276 void amdgpu_device_xcc_wreg(struct amdgpu_device *adev, 1277 uint32_t reg, uint32_t v, 1278 uint32_t acc_flags, 1279 uint32_t xcc_id); 1280 void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev, 1281 uint32_t reg, uint32_t v, uint32_t xcc_id); 1282 void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value); 1283 uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset); 1284 1285 u32 amdgpu_device_indirect_rreg(struct amdgpu_device *adev, 1286 u32 reg_addr); 1287 u64 amdgpu_device_indirect_rreg64(struct amdgpu_device *adev, 1288 u32 reg_addr); 1289 u64 amdgpu_device_indirect_rreg64_ext(struct amdgpu_device *adev, 1290 u64 reg_addr); 1291 void amdgpu_device_indirect_wreg(struct amdgpu_device *adev, 1292 u32 reg_addr, u32 reg_data); 1293 void amdgpu_device_indirect_wreg64(struct amdgpu_device *adev, 1294 u32 reg_addr, u64 reg_data); 1295 void amdgpu_device_indirect_wreg64_ext(struct amdgpu_device *adev, 1296 u64 reg_addr, u64 reg_data); 1297 u32 amdgpu_device_get_rev_id(struct amdgpu_device *adev); 1298 bool amdgpu_device_asic_has_dc_support(struct pci_dev *pdev, 1299 enum amd_asic_type asic_type); 1300 bool amdgpu_device_has_dc_support(struct amdgpu_device *adev); 1301 1302 void amdgpu_device_set_sriov_virtual_display(struct amdgpu_device *adev); 1303 1304 int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev, 1305 struct amdgpu_reset_context *reset_context); 1306 1307 int amdgpu_do_asic_reset(struct list_head *device_list_handle, 1308 struct amdgpu_reset_context *reset_context); 1309 1310 int amdgpu_device_reinit_after_reset(struct amdgpu_reset_context *reset_context); 1311 1312 int emu_soc_asic_init(struct amdgpu_device *adev); 1313 1314 /* 1315 * Registers read & write functions. 1316 */ 1317 #define AMDGPU_REGS_NO_KIQ (1<<1) 1318 #define AMDGPU_REGS_RLC (1<<2) 1319 1320 #define RREG32_NO_KIQ(reg) amdgpu_device_rreg(adev, (reg), AMDGPU_REGS_NO_KIQ) 1321 #define WREG32_NO_KIQ(reg, v) amdgpu_device_wreg(adev, (reg), (v), AMDGPU_REGS_NO_KIQ) 1322 1323 #define RREG32_KIQ(reg) amdgpu_kiq_rreg(adev, (reg), 0) 1324 #define WREG32_KIQ(reg, v) amdgpu_kiq_wreg(adev, (reg), (v), 0) 1325 1326 #define RREG8(reg) amdgpu_mm_rreg8(adev, (reg)) 1327 #define WREG8(reg, v) amdgpu_mm_wreg8(adev, (reg), (v)) 1328 1329 #define RREG32(reg) amdgpu_device_rreg(adev, (reg), 0) 1330 #define DREG32(reg) printk(KERN_INFO "REGISTER: " #reg " : 0x%08X\n", amdgpu_device_rreg(adev, (reg), 0)) 1331 #define WREG32(reg, v) amdgpu_device_wreg(adev, (reg), (v), 0) 1332 #define REG_SET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK) 1333 #define REG_GET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK) 1334 #define RREG32_XCC(reg, inst) amdgpu_device_xcc_rreg(adev, (reg), 0, inst) 1335 #define WREG32_XCC(reg, v, inst) amdgpu_device_xcc_wreg(adev, (reg), (v), 0, inst) 1336 #define RREG32_PCIE(reg) adev->pcie_rreg(adev, (reg)) 1337 #define WREG32_PCIE(reg, v) adev->pcie_wreg(adev, (reg), (v)) 1338 #define RREG32_PCIE_PORT(reg) adev->pciep_rreg(adev, (reg)) 1339 #define WREG32_PCIE_PORT(reg, v) adev->pciep_wreg(adev, (reg), (v)) 1340 #define RREG32_PCIE_EXT(reg) adev->pcie_rreg_ext(adev, (reg)) 1341 #define WREG32_PCIE_EXT(reg, v) adev->pcie_wreg_ext(adev, (reg), (v)) 1342 #define RREG64_PCIE(reg) adev->pcie_rreg64(adev, (reg)) 1343 #define WREG64_PCIE(reg, v) adev->pcie_wreg64(adev, (reg), (v)) 1344 #define RREG64_PCIE_EXT(reg) adev->pcie_rreg64_ext(adev, (reg)) 1345 #define WREG64_PCIE_EXT(reg, v) adev->pcie_wreg64_ext(adev, (reg), (v)) 1346 #define RREG32_SMC(reg) adev->smc_rreg(adev, (reg)) 1347 #define WREG32_SMC(reg, v) adev->smc_wreg(adev, (reg), (v)) 1348 #define RREG32_UVD_CTX(reg) adev->uvd_ctx_rreg(adev, (reg)) 1349 #define WREG32_UVD_CTX(reg, v) adev->uvd_ctx_wreg(adev, (reg), (v)) 1350 #define RREG32_DIDT(reg) adev->didt_rreg(adev, (reg)) 1351 #define WREG32_DIDT(reg, v) adev->didt_wreg(adev, (reg), (v)) 1352 #define RREG32_GC_CAC(reg) adev->gc_cac_rreg(adev, (reg)) 1353 #define WREG32_GC_CAC(reg, v) adev->gc_cac_wreg(adev, (reg), (v)) 1354 #define RREG32_SE_CAC(reg) adev->se_cac_rreg(adev, (reg)) 1355 #define WREG32_SE_CAC(reg, v) adev->se_cac_wreg(adev, (reg), (v)) 1356 #define RREG32_AUDIO_ENDPT(block, reg) adev->audio_endpt_rreg(adev, (block), (reg)) 1357 #define WREG32_AUDIO_ENDPT(block, reg, v) adev->audio_endpt_wreg(adev, (block), (reg), (v)) 1358 #define WREG32_P(reg, val, mask) \ 1359 do { \ 1360 uint32_t tmp_ = RREG32(reg); \ 1361 tmp_ &= (mask); \ 1362 tmp_ |= ((val) & ~(mask)); \ 1363 WREG32(reg, tmp_); \ 1364 } while (0) 1365 #define WREG32_AND(reg, and) WREG32_P(reg, 0, and) 1366 #define WREG32_OR(reg, or) WREG32_P(reg, or, ~(or)) 1367 #define WREG32_PLL_P(reg, val, mask) \ 1368 do { \ 1369 uint32_t tmp_ = RREG32_PLL(reg); \ 1370 tmp_ &= (mask); \ 1371 tmp_ |= ((val) & ~(mask)); \ 1372 WREG32_PLL(reg, tmp_); \ 1373 } while (0) 1374 1375 #define WREG32_SMC_P(_Reg, _Val, _Mask) \ 1376 do { \ 1377 u32 tmp = RREG32_SMC(_Reg); \ 1378 tmp &= (_Mask); \ 1379 tmp |= ((_Val) & ~(_Mask)); \ 1380 WREG32_SMC(_Reg, tmp); \ 1381 } while (0) 1382 1383 #define DREG32_SYS(sqf, adev, reg) seq_printf((sqf), #reg " : 0x%08X\n", amdgpu_device_rreg((adev), (reg), false)) 1384 1385 #define REG_FIELD_SHIFT(reg, field) reg##__##field##__SHIFT 1386 #define REG_FIELD_MASK(reg, field) reg##__##field##_MASK 1387 1388 #define REG_SET_FIELD(orig_val, reg, field, field_val) \ 1389 (((orig_val) & ~REG_FIELD_MASK(reg, field)) | \ 1390 (REG_FIELD_MASK(reg, field) & ((field_val) << REG_FIELD_SHIFT(reg, field)))) 1391 1392 #define REG_GET_FIELD(value, reg, field) \ 1393 (((value) & REG_FIELD_MASK(reg, field)) >> REG_FIELD_SHIFT(reg, field)) 1394 1395 #define WREG32_FIELD(reg, field, val) \ 1396 WREG32(mm##reg, (RREG32(mm##reg) & ~REG_FIELD_MASK(reg, field)) | (val) << REG_FIELD_SHIFT(reg, field)) 1397 1398 #define WREG32_FIELD_OFFSET(reg, offset, field, val) \ 1399 WREG32(mm##reg + offset, (RREG32(mm##reg + offset) & ~REG_FIELD_MASK(reg, field)) | (val) << REG_FIELD_SHIFT(reg, field)) 1400 1401 #define AMDGPU_GET_REG_FIELD(x, h, l) (((x) & GENMASK_ULL(h, l)) >> (l)) 1402 /* 1403 * BIOS helpers. 1404 */ 1405 #define RBIOS8(i) (adev->bios[i]) 1406 #define RBIOS16(i) (RBIOS8(i) | (RBIOS8((i)+1) << 8)) 1407 #define RBIOS32(i) ((RBIOS16(i)) | (RBIOS16((i)+2) << 16)) 1408 1409 /* 1410 * ASICs macro. 1411 */ 1412 #define amdgpu_asic_set_vga_state(adev, state) \ 1413 ((adev)->asic_funcs->set_vga_state ? (adev)->asic_funcs->set_vga_state((adev), (state)) : 0) 1414 #define amdgpu_asic_reset(adev) (adev)->asic_funcs->reset((adev)) 1415 #define amdgpu_asic_reset_method(adev) (adev)->asic_funcs->reset_method((adev)) 1416 #define amdgpu_asic_get_xclk(adev) (adev)->asic_funcs->get_xclk((adev)) 1417 #define amdgpu_asic_set_uvd_clocks(adev, v, d) (adev)->asic_funcs->set_uvd_clocks((adev), (v), (d)) 1418 #define amdgpu_asic_set_vce_clocks(adev, ev, ec) (adev)->asic_funcs->set_vce_clocks((adev), (ev), (ec)) 1419 #define amdgpu_get_pcie_lanes(adev) (adev)->asic_funcs->get_pcie_lanes((adev)) 1420 #define amdgpu_set_pcie_lanes(adev, l) (adev)->asic_funcs->set_pcie_lanes((adev), (l)) 1421 #define amdgpu_asic_get_gpu_clock_counter(adev) (adev)->asic_funcs->get_gpu_clock_counter((adev)) 1422 #define amdgpu_asic_read_disabled_bios(adev) (adev)->asic_funcs->read_disabled_bios((adev)) 1423 #define amdgpu_asic_read_bios_from_rom(adev, b, l) (adev)->asic_funcs->read_bios_from_rom((adev), (b), (l)) 1424 #define amdgpu_asic_read_register(adev, se, sh, offset, v)((adev)->asic_funcs->read_register((adev), (se), (sh), (offset), (v))) 1425 #define amdgpu_asic_get_config_memsize(adev) (adev)->asic_funcs->get_config_memsize((adev)) 1426 #define amdgpu_asic_need_full_reset(adev) (adev)->asic_funcs->need_full_reset((adev)) 1427 #define amdgpu_asic_init_doorbell_index(adev) (adev)->asic_funcs->init_doorbell_index((adev)) 1428 #define amdgpu_asic_get_pcie_usage(adev, cnt0, cnt1) ((adev)->asic_funcs->get_pcie_usage((adev), (cnt0), (cnt1))) 1429 #define amdgpu_asic_need_reset_on_init(adev) (adev)->asic_funcs->need_reset_on_init((adev)) 1430 #define amdgpu_asic_get_pcie_replay_count(adev) ((adev)->asic_funcs->get_pcie_replay_count((adev))) 1431 #define amdgpu_asic_supports_baco(adev) \ 1432 ((adev)->asic_funcs->supports_baco ? (adev)->asic_funcs->supports_baco((adev)) : 0) 1433 #define amdgpu_asic_pre_asic_init(adev) \ 1434 { \ 1435 if ((adev)->asic_funcs && (adev)->asic_funcs->pre_asic_init) \ 1436 (adev)->asic_funcs->pre_asic_init((adev)); \ 1437 } 1438 #define amdgpu_asic_update_umd_stable_pstate(adev, enter) \ 1439 ((adev)->asic_funcs->update_umd_stable_pstate ? (adev)->asic_funcs->update_umd_stable_pstate((adev), (enter)) : 0) 1440 #define amdgpu_asic_query_video_codecs(adev, e, c) (adev)->asic_funcs->query_video_codecs((adev), (e), (c)) 1441 1442 #define amdgpu_inc_vram_lost(adev) atomic_inc(&((adev)->vram_lost_counter)) 1443 1444 #define BIT_MASK_UPPER(i) ((i) >= BITS_PER_LONG ? 0 : ~0UL << (i)) 1445 #define for_each_inst(i, inst_mask) \ 1446 for (i = ffs(inst_mask); i-- != 0; \ 1447 i = ffs(inst_mask & BIT_MASK_UPPER(i + 1))) 1448 1449 /* Common functions */ 1450 bool amdgpu_device_has_job_running(struct amdgpu_device *adev); 1451 bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev); 1452 int amdgpu_device_gpu_recover(struct amdgpu_device *adev, 1453 struct amdgpu_job *job, 1454 struct amdgpu_reset_context *reset_context); 1455 void amdgpu_device_pci_config_reset(struct amdgpu_device *adev); 1456 int amdgpu_device_pci_reset(struct amdgpu_device *adev); 1457 bool amdgpu_device_need_post(struct amdgpu_device *adev); 1458 bool amdgpu_device_seamless_boot_supported(struct amdgpu_device *adev); 1459 bool amdgpu_device_should_use_aspm(struct amdgpu_device *adev); 1460 1461 void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes, 1462 u64 num_vis_bytes); 1463 int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev); 1464 void amdgpu_device_program_register_sequence(struct amdgpu_device *adev, 1465 const u32 *registers, 1466 const u32 array_size); 1467 1468 int amdgpu_device_mode1_reset(struct amdgpu_device *adev); 1469 int amdgpu_device_link_reset(struct amdgpu_device *adev); 1470 bool amdgpu_device_supports_atpx(struct amdgpu_device *adev); 1471 bool amdgpu_device_supports_px(struct amdgpu_device *adev); 1472 bool amdgpu_device_supports_boco(struct amdgpu_device *adev); 1473 bool amdgpu_device_supports_smart_shift(struct amdgpu_device *adev); 1474 int amdgpu_device_supports_baco(struct amdgpu_device *adev); 1475 void amdgpu_device_detect_runtime_pm_mode(struct amdgpu_device *adev); 1476 bool amdgpu_device_is_peer_accessible(struct amdgpu_device *adev, 1477 struct amdgpu_device *peer_adev); 1478 int amdgpu_device_baco_enter(struct amdgpu_device *adev); 1479 int amdgpu_device_baco_exit(struct amdgpu_device *adev); 1480 1481 void amdgpu_device_flush_hdp(struct amdgpu_device *adev, 1482 struct amdgpu_ring *ring); 1483 void amdgpu_device_invalidate_hdp(struct amdgpu_device *adev, 1484 struct amdgpu_ring *ring); 1485 1486 void amdgpu_device_halt(struct amdgpu_device *adev); 1487 u32 amdgpu_device_pcie_port_rreg(struct amdgpu_device *adev, 1488 u32 reg); 1489 void amdgpu_device_pcie_port_wreg(struct amdgpu_device *adev, 1490 u32 reg, u32 v); 1491 struct dma_fence *amdgpu_device_get_gang(struct amdgpu_device *adev); 1492 struct dma_fence *amdgpu_device_switch_gang(struct amdgpu_device *adev, 1493 struct dma_fence *gang); 1494 struct dma_fence *amdgpu_device_enforce_isolation(struct amdgpu_device *adev, 1495 struct amdgpu_ring *ring, 1496 struct amdgpu_job *job); 1497 bool amdgpu_device_has_display_hardware(struct amdgpu_device *adev); 1498 ssize_t amdgpu_get_soft_full_reset_mask(struct amdgpu_ring *ring); 1499 ssize_t amdgpu_show_reset_mask(char *buf, uint32_t supported_reset); 1500 1501 /* atpx handler */ 1502 #if defined(CONFIG_VGA_SWITCHEROO) 1503 void amdgpu_register_atpx_handler(void); 1504 void amdgpu_unregister_atpx_handler(void); 1505 bool amdgpu_has_atpx_dgpu_power_cntl(void); 1506 bool amdgpu_is_atpx_hybrid(void); 1507 bool amdgpu_has_atpx(void); 1508 #else 1509 static inline void amdgpu_register_atpx_handler(void) {} 1510 static inline void amdgpu_unregister_atpx_handler(void) {} 1511 static inline bool amdgpu_has_atpx_dgpu_power_cntl(void) { return false; } 1512 static inline bool amdgpu_is_atpx_hybrid(void) { return false; } 1513 static inline bool amdgpu_has_atpx(void) { return false; } 1514 #endif 1515 1516 /* 1517 * KMS 1518 */ 1519 extern const struct drm_ioctl_desc amdgpu_ioctls_kms[]; 1520 extern const int amdgpu_max_kms_ioctl; 1521 1522 int amdgpu_driver_load_kms(struct amdgpu_device *adev, unsigned long flags); 1523 void amdgpu_driver_unload_kms(struct drm_device *dev); 1524 int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv); 1525 void amdgpu_driver_postclose_kms(struct drm_device *dev, 1526 struct drm_file *file_priv); 1527 void amdgpu_driver_release_kms(struct drm_device *dev); 1528 1529 int amdgpu_device_prepare(struct drm_device *dev); 1530 void amdgpu_device_complete(struct drm_device *dev); 1531 int amdgpu_device_suspend(struct drm_device *dev, bool fbcon); 1532 int amdgpu_device_resume(struct drm_device *dev, bool fbcon); 1533 u32 amdgpu_get_vblank_counter_kms(struct drm_crtc *crtc); 1534 int amdgpu_enable_vblank_kms(struct drm_crtc *crtc); 1535 void amdgpu_disable_vblank_kms(struct drm_crtc *crtc); 1536 int amdgpu_info_ioctl(struct drm_device *dev, void *data, 1537 struct drm_file *filp); 1538 1539 /* 1540 * functions used by amdgpu_encoder.c 1541 */ 1542 struct amdgpu_afmt_acr { 1543 u32 clock; 1544 1545 int n_32khz; 1546 int cts_32khz; 1547 1548 int n_44_1khz; 1549 int cts_44_1khz; 1550 1551 int n_48khz; 1552 int cts_48khz; 1553 1554 }; 1555 1556 struct amdgpu_afmt_acr amdgpu_afmt_acr(uint32_t clock); 1557 1558 /* amdgpu_acpi.c */ 1559 1560 struct amdgpu_numa_info { 1561 uint64_t size; 1562 int pxm; 1563 int nid; 1564 }; 1565 1566 /* ATCS Device/Driver State */ 1567 #define AMDGPU_ATCS_PSC_DEV_STATE_D0 0 1568 #define AMDGPU_ATCS_PSC_DEV_STATE_D3_HOT 3 1569 #define AMDGPU_ATCS_PSC_DRV_STATE_OPR 0 1570 #define AMDGPU_ATCS_PSC_DRV_STATE_NOT_OPR 1 1571 1572 #if defined(CONFIG_ACPI) 1573 int amdgpu_acpi_init(struct amdgpu_device *adev); 1574 void amdgpu_acpi_fini(struct amdgpu_device *adev); 1575 bool amdgpu_acpi_is_pcie_performance_request_supported(struct amdgpu_device *adev); 1576 bool amdgpu_acpi_is_power_shift_control_supported(void); 1577 int amdgpu_acpi_pcie_performance_request(struct amdgpu_device *adev, 1578 u8 perf_req, bool advertise); 1579 int amdgpu_acpi_power_shift_control(struct amdgpu_device *adev, 1580 u8 dev_state, bool drv_state); 1581 int amdgpu_acpi_smart_shift_update(struct amdgpu_device *adev, 1582 enum amdgpu_ss ss_state); 1583 int amdgpu_acpi_pcie_notify_device_ready(struct amdgpu_device *adev); 1584 int amdgpu_acpi_get_tmr_info(struct amdgpu_device *adev, u64 *tmr_offset, 1585 u64 *tmr_size); 1586 int amdgpu_acpi_get_mem_info(struct amdgpu_device *adev, int xcc_id, 1587 struct amdgpu_numa_info *numa_info); 1588 1589 void amdgpu_acpi_get_backlight_caps(struct amdgpu_dm_backlight_caps *caps); 1590 bool amdgpu_acpi_should_gpu_reset(struct amdgpu_device *adev); 1591 void amdgpu_acpi_detect(void); 1592 void amdgpu_acpi_release(void); 1593 #else 1594 static inline int amdgpu_acpi_init(struct amdgpu_device *adev) { return 0; } 1595 static inline int amdgpu_acpi_get_tmr_info(struct amdgpu_device *adev, 1596 u64 *tmr_offset, u64 *tmr_size) 1597 { 1598 return -EINVAL; 1599 } 1600 static inline int amdgpu_acpi_get_mem_info(struct amdgpu_device *adev, 1601 int xcc_id, 1602 struct amdgpu_numa_info *numa_info) 1603 { 1604 return -EINVAL; 1605 } 1606 static inline void amdgpu_acpi_fini(struct amdgpu_device *adev) { } 1607 static inline bool amdgpu_acpi_should_gpu_reset(struct amdgpu_device *adev) { return false; } 1608 static inline void amdgpu_acpi_detect(void) { } 1609 static inline void amdgpu_acpi_release(void) { } 1610 static inline bool amdgpu_acpi_is_power_shift_control_supported(void) { return false; } 1611 static inline int amdgpu_acpi_power_shift_control(struct amdgpu_device *adev, 1612 u8 dev_state, bool drv_state) { return 0; } 1613 static inline int amdgpu_acpi_smart_shift_update(struct amdgpu_device *adev, 1614 enum amdgpu_ss ss_state) 1615 { 1616 return 0; 1617 } 1618 static inline void amdgpu_acpi_get_backlight_caps(struct amdgpu_dm_backlight_caps *caps) { } 1619 #endif 1620 1621 #if defined(CONFIG_ACPI) && defined(CONFIG_SUSPEND) 1622 bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev); 1623 bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev); 1624 #else 1625 static inline bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev) { return false; } 1626 static inline bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev) { return false; } 1627 #endif 1628 1629 #if defined(CONFIG_DRM_AMD_ISP) 1630 int amdgpu_acpi_get_isp4_dev(struct acpi_device **dev); 1631 #endif 1632 1633 void amdgpu_register_gpu_instance(struct amdgpu_device *adev); 1634 void amdgpu_unregister_gpu_instance(struct amdgpu_device *adev); 1635 1636 pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev, 1637 pci_channel_state_t state); 1638 pci_ers_result_t amdgpu_pci_mmio_enabled(struct pci_dev *pdev); 1639 pci_ers_result_t amdgpu_pci_slot_reset(struct pci_dev *pdev); 1640 void amdgpu_pci_resume(struct pci_dev *pdev); 1641 1642 bool amdgpu_device_cache_pci_state(struct pci_dev *pdev); 1643 bool amdgpu_device_load_pci_state(struct pci_dev *pdev); 1644 1645 bool amdgpu_device_skip_hw_access(struct amdgpu_device *adev); 1646 1647 int amdgpu_device_set_cg_state(struct amdgpu_device *adev, 1648 enum amd_clockgating_state state); 1649 int amdgpu_device_set_pg_state(struct amdgpu_device *adev, 1650 enum amd_powergating_state state); 1651 1652 static inline bool amdgpu_device_has_timeouts_enabled(struct amdgpu_device *adev) 1653 { 1654 return amdgpu_gpu_recovery != 0 && 1655 adev->gfx_timeout != MAX_SCHEDULE_TIMEOUT && 1656 adev->compute_timeout != MAX_SCHEDULE_TIMEOUT && 1657 adev->sdma_timeout != MAX_SCHEDULE_TIMEOUT && 1658 adev->video_timeout != MAX_SCHEDULE_TIMEOUT; 1659 } 1660 1661 #include "amdgpu_object.h" 1662 1663 static inline bool amdgpu_is_tmz(struct amdgpu_device *adev) 1664 { 1665 return adev->gmc.tmz_enabled; 1666 } 1667 1668 int amdgpu_in_reset(struct amdgpu_device *adev); 1669 1670 extern const struct attribute_group amdgpu_vram_mgr_attr_group; 1671 extern const struct attribute_group amdgpu_gtt_mgr_attr_group; 1672 extern const struct attribute_group amdgpu_flash_attr_group; 1673 1674 void amdgpu_set_init_level(struct amdgpu_device *adev, 1675 enum amdgpu_init_lvl_id lvl); 1676 1677 static inline int amdgpu_device_bus_status_check(struct amdgpu_device *adev) 1678 { 1679 u32 status; 1680 int r; 1681 1682 r = pci_read_config_dword(adev->pdev, PCI_COMMAND, &status); 1683 if (r || PCI_POSSIBLE_ERROR(status)) { 1684 dev_err(adev->dev, "device lost from bus!"); 1685 return -ENODEV; 1686 } 1687 1688 return 0; 1689 } 1690 1691 void amdgpu_device_set_uid(struct amdgpu_uid *uid_info, 1692 enum amdgpu_uid_type type, uint8_t inst, 1693 uint64_t uid); 1694 uint64_t amdgpu_device_get_uid(struct amdgpu_uid *uid_info, 1695 enum amdgpu_uid_type type, uint8_t inst); 1696 #endif 1697