1 /* 2 * Copyright 2008 Advanced Micro Devices, Inc. 3 * Copyright 2008 Red Hat Inc. 4 * Copyright 2009 Jerome Glisse. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 * OTHER DEALINGS IN THE SOFTWARE. 23 * 24 * Authors: Dave Airlie 25 * Alex Deucher 26 * Jerome Glisse 27 */ 28 #ifndef __AMDGPU_H__ 29 #define __AMDGPU_H__ 30 31 #ifdef pr_fmt 32 #undef pr_fmt 33 #endif 34 35 #define pr_fmt(fmt) "amdgpu: " fmt 36 37 #ifdef dev_fmt 38 #undef dev_fmt 39 #endif 40 41 #define dev_fmt(fmt) "amdgpu: " fmt 42 43 #include "amdgpu_ctx.h" 44 45 #include <linux/atomic.h> 46 #include <linux/wait.h> 47 #include <linux/list.h> 48 #include <linux/kref.h> 49 #include <linux/rbtree.h> 50 #include <linux/hashtable.h> 51 #include <linux/dma-fence.h> 52 #include <linux/pci.h> 53 54 #include <drm/ttm/ttm_bo.h> 55 #include <drm/ttm/ttm_placement.h> 56 57 #include <drm/amdgpu_drm.h> 58 #include <drm/drm_gem.h> 59 #include <drm/drm_ioctl.h> 60 61 #include <kgd_kfd_interface.h> 62 #include "dm_pp_interface.h" 63 #include "kgd_pp_interface.h" 64 65 #include "amd_shared.h" 66 #include "amdgpu_utils.h" 67 #include "amdgpu_mode.h" 68 #include "amdgpu_ih.h" 69 #include "amdgpu_irq.h" 70 #include "amdgpu_ucode.h" 71 #include "amdgpu_ttm.h" 72 #include "amdgpu_psp.h" 73 #include "amdgpu_gds.h" 74 #include "amdgpu_sync.h" 75 #include "amdgpu_ring.h" 76 #include "amdgpu_vm.h" 77 #include "amdgpu_dpm.h" 78 #include "amdgpu_acp.h" 79 #include "amdgpu_uvd.h" 80 #include "amdgpu_vce.h" 81 #include "amdgpu_vcn.h" 82 #include "amdgpu_jpeg.h" 83 #include "amdgpu_vpe.h" 84 #include "amdgpu_umsch_mm.h" 85 #include "amdgpu_gmc.h" 86 #include "amdgpu_gfx.h" 87 #include "amdgpu_sdma.h" 88 #include "amdgpu_lsdma.h" 89 #include "amdgpu_nbio.h" 90 #include "amdgpu_hdp.h" 91 #include "amdgpu_dm.h" 92 #include "amdgpu_virt.h" 93 #include "amdgpu_csa.h" 94 #include "amdgpu_mes_ctx.h" 95 #include "amdgpu_gart.h" 96 #include "amdgpu_debugfs.h" 97 #include "amdgpu_job.h" 98 #include "amdgpu_bo_list.h" 99 #include "amdgpu_gem.h" 100 #include "amdgpu_doorbell.h" 101 #include "amdgpu_amdkfd.h" 102 #include "amdgpu_discovery.h" 103 #include "amdgpu_mes.h" 104 #include "amdgpu_umc.h" 105 #include "amdgpu_mmhub.h" 106 #include "amdgpu_gfxhub.h" 107 #include "amdgpu_df.h" 108 #include "amdgpu_smuio.h" 109 #include "amdgpu_fdinfo.h" 110 #include "amdgpu_mca.h" 111 #include "amdgpu_aca.h" 112 #include "amdgpu_ras.h" 113 #include "amdgpu_cper.h" 114 #include "amdgpu_xcp.h" 115 #include "amdgpu_seq64.h" 116 #include "amdgpu_reg_state.h" 117 #include "amdgpu_userq.h" 118 #include "amdgpu_eviction_fence.h" 119 #include "amdgpu_ip.h" 120 #if defined(CONFIG_DRM_AMD_ISP) 121 #include "amdgpu_isp.h" 122 #endif 123 124 #define MAX_GPU_INSTANCE 64 125 126 #define GFX_SLICE_PERIOD_MS 250 127 128 struct amdgpu_gpu_instance { 129 struct amdgpu_device *adev; 130 int mgpu_fan_enabled; 131 }; 132 133 struct amdgpu_mgpu_info { 134 struct amdgpu_gpu_instance gpu_ins[MAX_GPU_INSTANCE]; 135 struct mutex mutex; 136 uint32_t num_gpu; 137 uint32_t num_dgpu; 138 uint32_t num_apu; 139 }; 140 141 enum amdgpu_ss { 142 AMDGPU_SS_DRV_LOAD, 143 AMDGPU_SS_DEV_D0, 144 AMDGPU_SS_DEV_D3, 145 AMDGPU_SS_DRV_UNLOAD 146 }; 147 148 struct amdgpu_hwip_reg_entry { 149 u32 hwip; 150 u32 inst; 151 u32 seg; 152 u32 reg_offset; 153 const char *reg_name; 154 }; 155 156 struct amdgpu_watchdog_timer { 157 bool timeout_fatal_disable; 158 uint32_t period; /* maxCycles = (1 << period), the number of cycles before a timeout */ 159 }; 160 161 #define AMDGPU_MAX_TIMEOUT_PARAM_LENGTH 256 162 163 /* 164 * Modules parameters. 165 */ 166 extern int amdgpu_modeset; 167 extern unsigned int amdgpu_vram_limit; 168 extern int amdgpu_vis_vram_limit; 169 extern int amdgpu_gart_size; 170 extern int amdgpu_gtt_size; 171 extern int amdgpu_moverate; 172 extern int amdgpu_audio; 173 extern int amdgpu_disp_priority; 174 extern int amdgpu_hw_i2c; 175 extern int amdgpu_pcie_gen2; 176 extern int amdgpu_msi; 177 extern char amdgpu_lockup_timeout[AMDGPU_MAX_TIMEOUT_PARAM_LENGTH]; 178 extern int amdgpu_dpm; 179 extern int amdgpu_fw_load_type; 180 extern int amdgpu_aspm; 181 extern int amdgpu_runtime_pm; 182 extern uint amdgpu_ip_block_mask; 183 extern int amdgpu_bapm; 184 extern int amdgpu_deep_color; 185 extern int amdgpu_vm_size; 186 extern int amdgpu_vm_block_size; 187 extern int amdgpu_vm_fragment_size; 188 extern int amdgpu_vm_fault_stop; 189 extern int amdgpu_vm_debug; 190 extern int amdgpu_vm_update_mode; 191 extern int amdgpu_exp_hw_support; 192 extern int amdgpu_dc; 193 extern int amdgpu_sched_jobs; 194 extern int amdgpu_sched_hw_submission; 195 extern uint amdgpu_pcie_gen_cap; 196 extern uint amdgpu_pcie_lane_cap; 197 extern u64 amdgpu_cg_mask; 198 extern uint amdgpu_pg_mask; 199 extern uint amdgpu_sdma_phase_quantum; 200 extern char *amdgpu_disable_cu; 201 extern char *amdgpu_virtual_display; 202 extern uint amdgpu_pp_feature_mask; 203 extern uint amdgpu_force_long_training; 204 extern int amdgpu_lbpw; 205 extern int amdgpu_compute_multipipe; 206 extern int amdgpu_gpu_recovery; 207 extern int amdgpu_emu_mode; 208 extern uint amdgpu_smu_memory_pool_size; 209 extern int amdgpu_smu_pptable_id; 210 extern uint amdgpu_dc_feature_mask; 211 extern uint amdgpu_freesync_vid_mode; 212 extern uint amdgpu_dc_debug_mask; 213 extern uint amdgpu_dc_visual_confirm; 214 extern int amdgpu_dm_abm_level; 215 extern int amdgpu_backlight; 216 extern int amdgpu_damage_clips; 217 extern struct amdgpu_mgpu_info mgpu_info; 218 extern int amdgpu_ras_enable; 219 extern uint amdgpu_ras_mask; 220 extern int amdgpu_bad_page_threshold; 221 extern bool amdgpu_ignore_bad_page_threshold; 222 extern struct amdgpu_watchdog_timer amdgpu_watchdog_timer; 223 extern int amdgpu_async_gfx_ring; 224 extern int amdgpu_mcbp; 225 extern int amdgpu_discovery; 226 extern int amdgpu_mes; 227 extern int amdgpu_mes_log_enable; 228 extern int amdgpu_mes_kiq; 229 extern int amdgpu_uni_mes; 230 extern int amdgpu_noretry; 231 extern int amdgpu_force_asic_type; 232 extern int amdgpu_smartshift_bias; 233 extern int amdgpu_use_xgmi_p2p; 234 extern int amdgpu_mtype_local; 235 extern int amdgpu_enforce_isolation; 236 #ifdef CONFIG_HSA_AMD 237 extern int sched_policy; 238 extern bool debug_evictions; 239 extern bool no_system_mem_limit; 240 extern int halt_if_hws_hang; 241 extern uint amdgpu_svm_default_granularity; 242 #else 243 static const int __maybe_unused sched_policy = KFD_SCHED_POLICY_HWS; 244 static const bool __maybe_unused debug_evictions; /* = false */ 245 static const bool __maybe_unused no_system_mem_limit; 246 static const int __maybe_unused halt_if_hws_hang; 247 #endif 248 #ifdef CONFIG_HSA_AMD_P2P 249 extern bool pcie_p2p; 250 #endif 251 252 extern int amdgpu_tmz; 253 extern int amdgpu_reset_method; 254 255 #ifdef CONFIG_DRM_AMDGPU_SI 256 extern int amdgpu_si_support; 257 #endif 258 #ifdef CONFIG_DRM_AMDGPU_CIK 259 extern int amdgpu_cik_support; 260 #endif 261 extern int amdgpu_num_kcq; 262 263 #define AMDGPU_VCNFW_LOG_SIZE (32 * 1024) 264 #define AMDGPU_UMSCHFW_LOG_SIZE (32 * 1024) 265 extern int amdgpu_vcnfw_log; 266 extern int amdgpu_sg_display; 267 extern int amdgpu_umsch_mm; 268 extern int amdgpu_seamless; 269 extern int amdgpu_umsch_mm_fwlog; 270 271 extern int amdgpu_user_partt_mode; 272 extern int amdgpu_agp; 273 extern int amdgpu_rebar; 274 275 extern int amdgpu_wbrf; 276 extern int amdgpu_user_queue; 277 278 #define AMDGPU_VM_MAX_NUM_CTX 4096 279 #define AMDGPU_SG_THRESHOLD (256*1024*1024) 280 #define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS 3000 281 #define AMDGPU_MAX_USEC_TIMEOUT 100000 /* 100 ms */ 282 #define AMDGPU_FENCE_JIFFIES_TIMEOUT (HZ / 2) 283 #define AMDGPU_DEBUGFS_MAX_COMPONENTS 32 284 #define AMDGPUFB_CONN_LIMIT 4 285 #define AMDGPU_BIOS_NUM_SCRATCH 16 286 287 #define AMDGPU_VBIOS_VGA_ALLOCATION (9 * 1024 * 1024) /* reserve 8MB for vga emulator and 1 MB for FB */ 288 289 /* hard reset data */ 290 #define AMDGPU_ASIC_RESET_DATA 0x39d5e86b 291 292 /* reset flags */ 293 #define AMDGPU_RESET_GFX (1 << 0) 294 #define AMDGPU_RESET_COMPUTE (1 << 1) 295 #define AMDGPU_RESET_DMA (1 << 2) 296 #define AMDGPU_RESET_CP (1 << 3) 297 #define AMDGPU_RESET_GRBM (1 << 4) 298 #define AMDGPU_RESET_DMA1 (1 << 5) 299 #define AMDGPU_RESET_RLC (1 << 6) 300 #define AMDGPU_RESET_SEM (1 << 7) 301 #define AMDGPU_RESET_IH (1 << 8) 302 #define AMDGPU_RESET_VMC (1 << 9) 303 #define AMDGPU_RESET_MC (1 << 10) 304 #define AMDGPU_RESET_DISPLAY (1 << 11) 305 #define AMDGPU_RESET_UVD (1 << 12) 306 #define AMDGPU_RESET_VCE (1 << 13) 307 #define AMDGPU_RESET_VCE1 (1 << 14) 308 309 /* reset mask */ 310 #define AMDGPU_RESET_TYPE_FULL (1 << 0) /* full adapter reset, mode1/mode2/BACO/etc. */ 311 #define AMDGPU_RESET_TYPE_SOFT_RESET (1 << 1) /* IP level soft reset */ 312 #define AMDGPU_RESET_TYPE_PER_QUEUE (1 << 2) /* per queue */ 313 #define AMDGPU_RESET_TYPE_PER_PIPE (1 << 3) /* per pipe */ 314 315 /* max cursor sizes (in pixels) */ 316 #define CIK_CURSOR_WIDTH 128 317 #define CIK_CURSOR_HEIGHT 128 318 319 /* smart shift bias level limits */ 320 #define AMDGPU_SMARTSHIFT_MAX_BIAS (100) 321 #define AMDGPU_SMARTSHIFT_MIN_BIAS (-100) 322 323 /* Extra time delay(in ms) to eliminate the influence of temperature momentary fluctuation */ 324 #define AMDGPU_SWCTF_EXTRA_DELAY 50 325 326 struct amdgpu_xcp_mgr; 327 struct amdgpu_device; 328 struct amdgpu_irq_src; 329 struct amdgpu_fpriv; 330 struct amdgpu_bo_va_mapping; 331 struct kfd_vm_fault_info; 332 struct amdgpu_hive_info; 333 struct amdgpu_reset_context; 334 struct amdgpu_reset_control; 335 336 enum amdgpu_cp_irq { 337 AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP = 0, 338 AMDGPU_CP_IRQ_GFX_ME0_PIPE1_EOP, 339 AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP, 340 AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP, 341 AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE2_EOP, 342 AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE3_EOP, 343 AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE0_EOP, 344 AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE1_EOP, 345 AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE2_EOP, 346 AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE3_EOP, 347 348 AMDGPU_CP_IRQ_LAST 349 }; 350 351 enum amdgpu_thermal_irq { 352 AMDGPU_THERMAL_IRQ_LOW_TO_HIGH = 0, 353 AMDGPU_THERMAL_IRQ_HIGH_TO_LOW, 354 355 AMDGPU_THERMAL_IRQ_LAST 356 }; 357 358 enum amdgpu_kiq_irq { 359 AMDGPU_CP_KIQ_IRQ_DRIVER0 = 0, 360 AMDGPU_CP_KIQ_IRQ_LAST 361 }; 362 #define MAX_KIQ_REG_WAIT 5000 /* in usecs, 5ms */ 363 #define MAX_KIQ_REG_BAILOUT_INTERVAL 5 /* in msecs, 5ms */ 364 #define MAX_KIQ_REG_TRY 1000 365 366 /* 367 * BIOS. 368 */ 369 bool amdgpu_get_bios(struct amdgpu_device *adev); 370 bool amdgpu_read_bios(struct amdgpu_device *adev); 371 bool amdgpu_soc15_read_bios_from_rom(struct amdgpu_device *adev, 372 u8 *bios, u32 length_bytes); 373 void amdgpu_bios_release(struct amdgpu_device *adev); 374 /* 375 * Clocks 376 */ 377 378 #define AMDGPU_MAX_PPLL 3 379 380 struct amdgpu_clock { 381 struct amdgpu_pll ppll[AMDGPU_MAX_PPLL]; 382 struct amdgpu_pll spll; 383 struct amdgpu_pll mpll; 384 /* 10 Khz units */ 385 uint32_t default_mclk; 386 uint32_t default_sclk; 387 uint32_t default_dispclk; 388 uint32_t dp_extclk; 389 uint32_t max_pixel_clock; 390 }; 391 392 /* sub-allocation manager, it has to be protected by another lock. 393 * By conception this is an helper for other part of the driver 394 * like the indirect buffer or semaphore, which both have their 395 * locking. 396 * 397 * Principe is simple, we keep a list of sub allocation in offset 398 * order (first entry has offset == 0, last entry has the highest 399 * offset). 400 * 401 * When allocating new object we first check if there is room at 402 * the end total_size - (last_object_offset + last_object_size) >= 403 * alloc_size. If so we allocate new object there. 404 * 405 * When there is not enough room at the end, we start waiting for 406 * each sub object until we reach object_offset+object_size >= 407 * alloc_size, this object then become the sub object we return. 408 * 409 * Alignment can't be bigger than page size. 410 * 411 * Hole are not considered for allocation to keep things simple. 412 * Assumption is that there won't be hole (all object on same 413 * alignment). 414 */ 415 416 struct amdgpu_sa_manager { 417 struct drm_suballoc_manager base; 418 struct amdgpu_bo *bo; 419 uint64_t gpu_addr; 420 void *cpu_ptr; 421 }; 422 423 /* 424 * IRQS. 425 */ 426 427 struct amdgpu_flip_work { 428 struct delayed_work flip_work; 429 struct work_struct unpin_work; 430 struct amdgpu_device *adev; 431 int crtc_id; 432 u32 target_vblank; 433 uint64_t base; 434 struct drm_pending_vblank_event *event; 435 struct amdgpu_bo *old_abo; 436 unsigned shared_count; 437 struct dma_fence **shared; 438 struct dma_fence_cb cb; 439 bool async; 440 }; 441 442 /* 443 * file private structure 444 */ 445 446 struct amdgpu_fpriv { 447 struct amdgpu_vm vm; 448 struct amdgpu_bo_va *prt_va; 449 struct amdgpu_bo_va *csa_va; 450 struct amdgpu_bo_va *seq64_va; 451 struct mutex bo_list_lock; 452 struct idr bo_list_handles; 453 struct amdgpu_ctx_mgr ctx_mgr; 454 struct amdgpu_userq_mgr userq_mgr; 455 456 /* Eviction fence infra */ 457 struct amdgpu_eviction_fence_mgr evf_mgr; 458 459 /** GPU partition selection */ 460 uint32_t xcp_id; 461 }; 462 463 int amdgpu_file_to_fpriv(struct file *filp, struct amdgpu_fpriv **fpriv); 464 465 /* 466 * Writeback 467 */ 468 #define AMDGPU_MAX_WB 1024 /* Reserve at most 1024 WB slots for amdgpu-owned rings. */ 469 470 /** 471 * amdgpu_wb - This struct is used for small GPU memory allocation. 472 * 473 * This struct is used to allocate a small amount of GPU memory that can be 474 * used to shadow certain states into the memory. This is especially useful for 475 * providing easy CPU access to some states without requiring register access 476 * (e.g., if some block is power gated, reading register may be problematic). 477 * 478 * Note: the term writeback was initially used because many of the amdgpu 479 * components had some level of writeback memory, and this struct initially 480 * described those components. 481 */ 482 struct amdgpu_wb { 483 484 /** 485 * @wb_obj: 486 * 487 * Buffer Object used for the writeback memory. 488 */ 489 struct amdgpu_bo *wb_obj; 490 491 /** 492 * @wb: 493 * 494 * Pointer to the first writeback slot. In terms of CPU address 495 * this value can be accessed directly by using the offset as an index. 496 * For the GPU address, it is necessary to use gpu_addr and the offset. 497 */ 498 uint32_t *wb; 499 500 /** 501 * @gpu_addr: 502 * 503 * Writeback base address in the GPU. 504 */ 505 uint64_t gpu_addr; 506 507 /** 508 * @num_wb: 509 * 510 * Number of writeback slots reserved for amdgpu. 511 */ 512 u32 num_wb; 513 514 /** 515 * @used: 516 * 517 * Track the writeback slot already used. 518 */ 519 unsigned long used[DIV_ROUND_UP(AMDGPU_MAX_WB, BITS_PER_LONG)]; 520 521 /** 522 * @lock: 523 * 524 * Protects read and write of the used field array. 525 */ 526 spinlock_t lock; 527 }; 528 529 int amdgpu_device_wb_get(struct amdgpu_device *adev, u32 *wb); 530 void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb); 531 532 /* 533 * Benchmarking 534 */ 535 int amdgpu_benchmark(struct amdgpu_device *adev, int test_number); 536 537 /* 538 * ASIC specific register table accessible by UMD 539 */ 540 struct amdgpu_allowed_register_entry { 541 uint32_t reg_offset; 542 bool grbm_indexed; 543 }; 544 545 /** 546 * enum amd_reset_method - Methods for resetting AMD GPU devices 547 * 548 * @AMD_RESET_METHOD_NONE: The device will not be reset. 549 * @AMD_RESET_LEGACY: Method reserved for SI, CIK and VI ASICs. 550 * @AMD_RESET_MODE0: Reset the entire ASIC. Not currently available for the 551 * any device. 552 * @AMD_RESET_MODE1: Resets all IP blocks on the ASIC (SDMA, GFX, VCN, etc.) 553 * individually. Suitable only for some discrete GPU, not 554 * available for all ASICs. 555 * @AMD_RESET_MODE2: Resets a lesser level of IPs compared to MODE1. Which IPs 556 * are reset depends on the ASIC. Notably doesn't reset IPs 557 * shared with the CPU on APUs or the memory controllers (so 558 * VRAM is not lost). Not available on all ASICs. 559 * @AMD_RESET_LINK: Triggers SW-UP link reset on other GPUs 560 * @AMD_RESET_BACO: BACO (Bus Alive, Chip Off) method powers off and on the card 561 * but without powering off the PCI bus. Suitable only for 562 * discrete GPUs. 563 * @AMD_RESET_PCI: Does a full bus reset using core Linux subsystem PCI reset 564 * and does a secondary bus reset or FLR, depending on what the 565 * underlying hardware supports. 566 * 567 * Methods available for AMD GPU driver for resetting the device. Not all 568 * methods are suitable for every device. User can override the method using 569 * module parameter `reset_method`. 570 */ 571 enum amd_reset_method { 572 AMD_RESET_METHOD_NONE = -1, 573 AMD_RESET_METHOD_LEGACY = 0, 574 AMD_RESET_METHOD_MODE0, 575 AMD_RESET_METHOD_MODE1, 576 AMD_RESET_METHOD_MODE2, 577 AMD_RESET_METHOD_LINK, 578 AMD_RESET_METHOD_BACO, 579 AMD_RESET_METHOD_PCI, 580 AMD_RESET_METHOD_ON_INIT, 581 }; 582 583 struct amdgpu_video_codec_info { 584 u32 codec_type; 585 u32 max_width; 586 u32 max_height; 587 u32 max_pixels_per_frame; 588 u32 max_level; 589 }; 590 591 #define codec_info_build(type, width, height, level) \ 592 .codec_type = type,\ 593 .max_width = width,\ 594 .max_height = height,\ 595 .max_pixels_per_frame = height * width,\ 596 .max_level = level, 597 598 struct amdgpu_video_codecs { 599 const u32 codec_count; 600 const struct amdgpu_video_codec_info *codec_array; 601 }; 602 603 /* 604 * ASIC specific functions. 605 */ 606 struct amdgpu_asic_funcs { 607 bool (*read_disabled_bios)(struct amdgpu_device *adev); 608 bool (*read_bios_from_rom)(struct amdgpu_device *adev, 609 u8 *bios, u32 length_bytes); 610 int (*read_register)(struct amdgpu_device *adev, u32 se_num, 611 u32 sh_num, u32 reg_offset, u32 *value); 612 void (*set_vga_state)(struct amdgpu_device *adev, bool state); 613 int (*reset)(struct amdgpu_device *adev); 614 enum amd_reset_method (*reset_method)(struct amdgpu_device *adev); 615 /* get the reference clock */ 616 u32 (*get_xclk)(struct amdgpu_device *adev); 617 /* MM block clocks */ 618 int (*set_uvd_clocks)(struct amdgpu_device *adev, u32 vclk, u32 dclk); 619 int (*set_vce_clocks)(struct amdgpu_device *adev, u32 evclk, u32 ecclk); 620 /* static power management */ 621 int (*get_pcie_lanes)(struct amdgpu_device *adev); 622 void (*set_pcie_lanes)(struct amdgpu_device *adev, int lanes); 623 /* get config memsize register */ 624 u32 (*get_config_memsize)(struct amdgpu_device *adev); 625 /* flush hdp write queue */ 626 void (*flush_hdp)(struct amdgpu_device *adev, struct amdgpu_ring *ring); 627 /* invalidate hdp read cache */ 628 void (*invalidate_hdp)(struct amdgpu_device *adev, 629 struct amdgpu_ring *ring); 630 /* check if the asic needs a full reset of if soft reset will work */ 631 bool (*need_full_reset)(struct amdgpu_device *adev); 632 /* initialize doorbell layout for specific asic*/ 633 void (*init_doorbell_index)(struct amdgpu_device *adev); 634 /* PCIe bandwidth usage */ 635 void (*get_pcie_usage)(struct amdgpu_device *adev, uint64_t *count0, 636 uint64_t *count1); 637 /* do we need to reset the asic at init time (e.g., kexec) */ 638 bool (*need_reset_on_init)(struct amdgpu_device *adev); 639 /* PCIe replay counter */ 640 uint64_t (*get_pcie_replay_count)(struct amdgpu_device *adev); 641 /* device supports BACO */ 642 int (*supports_baco)(struct amdgpu_device *adev); 643 /* pre asic_init quirks */ 644 void (*pre_asic_init)(struct amdgpu_device *adev); 645 /* enter/exit umd stable pstate */ 646 int (*update_umd_stable_pstate)(struct amdgpu_device *adev, bool enter); 647 /* query video codecs */ 648 int (*query_video_codecs)(struct amdgpu_device *adev, bool encode, 649 const struct amdgpu_video_codecs **codecs); 650 /* encode "> 32bits" smn addressing */ 651 u64 (*encode_ext_smn_addressing)(int ext_id); 652 653 ssize_t (*get_reg_state)(struct amdgpu_device *adev, 654 enum amdgpu_reg_state reg_state, void *buf, 655 size_t max_size); 656 }; 657 658 /* 659 * IOCTL. 660 */ 661 int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data, 662 struct drm_file *filp); 663 664 int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); 665 int amdgpu_cs_fence_to_handle_ioctl(struct drm_device *dev, void *data, 666 struct drm_file *filp); 667 int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); 668 int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data, 669 struct drm_file *filp); 670 671 /* VRAM scratch page for HDP bug, default vram page */ 672 struct amdgpu_mem_scratch { 673 struct amdgpu_bo *robj; 674 uint32_t *ptr; 675 u64 gpu_addr; 676 }; 677 678 /* 679 * CGS 680 */ 681 struct cgs_device *amdgpu_cgs_create_device(struct amdgpu_device *adev); 682 void amdgpu_cgs_destroy_device(struct cgs_device *cgs_device); 683 684 /* 685 * Core structure, functions and helpers. 686 */ 687 typedef uint32_t (*amdgpu_rreg_t)(struct amdgpu_device*, uint32_t); 688 typedef void (*amdgpu_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t); 689 690 typedef uint32_t (*amdgpu_rreg_ext_t)(struct amdgpu_device*, uint64_t); 691 typedef void (*amdgpu_wreg_ext_t)(struct amdgpu_device*, uint64_t, uint32_t); 692 693 typedef uint64_t (*amdgpu_rreg64_t)(struct amdgpu_device*, uint32_t); 694 typedef void (*amdgpu_wreg64_t)(struct amdgpu_device*, uint32_t, uint64_t); 695 696 typedef uint64_t (*amdgpu_rreg64_ext_t)(struct amdgpu_device*, uint64_t); 697 typedef void (*amdgpu_wreg64_ext_t)(struct amdgpu_device*, uint64_t, uint64_t); 698 699 typedef uint32_t (*amdgpu_block_rreg_t)(struct amdgpu_device*, uint32_t, uint32_t); 700 typedef void (*amdgpu_block_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t, uint32_t); 701 702 struct amdgpu_mmio_remap { 703 u32 reg_offset; 704 resource_size_t bus_addr; 705 struct amdgpu_bo *bo; 706 }; 707 708 enum amdgpu_uid_type { 709 AMDGPU_UID_TYPE_XCD, 710 AMDGPU_UID_TYPE_AID, 711 AMDGPU_UID_TYPE_SOC, 712 AMDGPU_UID_TYPE_MAX 713 }; 714 715 #define AMDGPU_UID_INST_MAX 8 /* max number of instances for each UID type */ 716 717 struct amdgpu_uid { 718 uint64_t uid[AMDGPU_UID_TYPE_MAX][AMDGPU_UID_INST_MAX]; 719 struct amdgpu_device *adev; 720 }; 721 722 struct amd_powerplay { 723 void *pp_handle; 724 const struct amd_pm_funcs *pp_funcs; 725 }; 726 727 /* polaris10 kickers */ 728 #define ASICID_IS_P20(did, rid) (((did == 0x67DF) && \ 729 ((rid == 0xE3) || \ 730 (rid == 0xE4) || \ 731 (rid == 0xE5) || \ 732 (rid == 0xE7) || \ 733 (rid == 0xEF))) || \ 734 ((did == 0x6FDF) && \ 735 ((rid == 0xE7) || \ 736 (rid == 0xEF) || \ 737 (rid == 0xFF)))) 738 739 #define ASICID_IS_P30(did, rid) ((did == 0x67DF) && \ 740 ((rid == 0xE1) || \ 741 (rid == 0xF7))) 742 743 /* polaris11 kickers */ 744 #define ASICID_IS_P21(did, rid) (((did == 0x67EF) && \ 745 ((rid == 0xE0) || \ 746 (rid == 0xE5))) || \ 747 ((did == 0x67FF) && \ 748 ((rid == 0xCF) || \ 749 (rid == 0xEF) || \ 750 (rid == 0xFF)))) 751 752 #define ASICID_IS_P31(did, rid) ((did == 0x67EF) && \ 753 ((rid == 0xE2))) 754 755 /* polaris12 kickers */ 756 #define ASICID_IS_P23(did, rid) (((did == 0x6987) && \ 757 ((rid == 0xC0) || \ 758 (rid == 0xC1) || \ 759 (rid == 0xC3) || \ 760 (rid == 0xC7))) || \ 761 ((did == 0x6981) && \ 762 ((rid == 0x00) || \ 763 (rid == 0x01) || \ 764 (rid == 0x10)))) 765 766 struct amdgpu_mqd_prop { 767 uint64_t mqd_gpu_addr; 768 uint64_t hqd_base_gpu_addr; 769 uint64_t rptr_gpu_addr; 770 uint64_t wptr_gpu_addr; 771 uint32_t queue_size; 772 bool use_doorbell; 773 uint32_t doorbell_index; 774 uint64_t eop_gpu_addr; 775 uint32_t hqd_pipe_priority; 776 uint32_t hqd_queue_priority; 777 uint32_t mqd_stride_size; 778 bool allow_tunneling; 779 bool hqd_active; 780 uint64_t shadow_addr; 781 uint64_t gds_bkup_addr; 782 uint64_t csa_addr; 783 uint64_t fence_address; 784 bool tmz_queue; 785 bool kernel_queue; 786 }; 787 788 struct amdgpu_mqd { 789 unsigned mqd_size; 790 int (*init_mqd)(struct amdgpu_device *adev, void *mqd, 791 struct amdgpu_mqd_prop *p); 792 }; 793 794 struct amdgpu_pcie_reset_ctx { 795 bool in_link_reset; 796 bool occurs_dpc; 797 bool audio_suspended; 798 struct pci_dev *swus; 799 struct pci_saved_state *swus_pcistate; 800 struct pci_saved_state *swds_pcistate; 801 }; 802 803 /* 804 * Custom Init levels could be defined for different situations where a full 805 * initialization of all hardware blocks are not expected. Sample cases are 806 * custom init sequences after resume after S0i3/S3, reset on initialization, 807 * partial reset of blocks etc. Presently, this defines only two levels. Levels 808 * are described in corresponding struct definitions - amdgpu_init_default, 809 * amdgpu_init_minimal_xgmi. 810 */ 811 enum amdgpu_init_lvl_id { 812 AMDGPU_INIT_LEVEL_DEFAULT, 813 AMDGPU_INIT_LEVEL_MINIMAL_XGMI, 814 AMDGPU_INIT_LEVEL_RESET_RECOVERY, 815 }; 816 817 struct amdgpu_init_level { 818 enum amdgpu_init_lvl_id level; 819 uint32_t hwini_ip_block_mask; 820 }; 821 822 #define AMDGPU_RESET_MAGIC_NUM 64 823 #define AMDGPU_MAX_DF_PERFMONS 4 824 struct amdgpu_reset_domain; 825 struct amdgpu_fru_info; 826 827 enum amdgpu_enforce_isolation_mode { 828 AMDGPU_ENFORCE_ISOLATION_DISABLE = 0, 829 AMDGPU_ENFORCE_ISOLATION_ENABLE = 1, 830 AMDGPU_ENFORCE_ISOLATION_ENABLE_LEGACY = 2, 831 AMDGPU_ENFORCE_ISOLATION_NO_CLEANER_SHADER = 3, 832 }; 833 834 struct amdgpu_device { 835 struct device *dev; 836 struct pci_dev *pdev; 837 struct drm_device ddev; 838 839 #ifdef CONFIG_DRM_AMD_ACP 840 struct amdgpu_acp acp; 841 #endif 842 struct amdgpu_hive_info *hive; 843 struct amdgpu_xcp_mgr *xcp_mgr; 844 /* ASIC */ 845 enum amd_asic_type asic_type; 846 uint32_t family; 847 uint32_t rev_id; 848 uint32_t external_rev_id; 849 unsigned long flags; 850 unsigned long apu_flags; 851 int usec_timeout; 852 const struct amdgpu_asic_funcs *asic_funcs; 853 bool shutdown; 854 bool need_swiotlb; 855 bool accel_working; 856 struct notifier_block acpi_nb; 857 struct notifier_block pm_nb; 858 struct amdgpu_i2c_chan *i2c_bus[AMDGPU_MAX_I2C_BUS]; 859 struct debugfs_blob_wrapper debugfs_vbios_blob; 860 struct mutex srbm_mutex; 861 /* GRBM index mutex. Protects concurrent access to GRBM index */ 862 struct mutex grbm_idx_mutex; 863 struct dev_pm_domain vga_pm_domain; 864 bool have_disp_power_ref; 865 bool have_atomics_support; 866 867 /* BIOS */ 868 bool is_atom_fw; 869 uint8_t *bios; 870 uint32_t bios_size; 871 uint32_t bios_scratch_reg_offset; 872 uint32_t bios_scratch[AMDGPU_BIOS_NUM_SCRATCH]; 873 874 /* Register/doorbell mmio */ 875 resource_size_t rmmio_base; 876 resource_size_t rmmio_size; 877 void __iomem *rmmio; 878 /* protects concurrent MM_INDEX/DATA based register access */ 879 spinlock_t mmio_idx_lock; 880 struct amdgpu_mmio_remap rmmio_remap; 881 /* protects concurrent SMC based register access */ 882 spinlock_t smc_idx_lock; 883 amdgpu_rreg_t smc_rreg; 884 amdgpu_wreg_t smc_wreg; 885 /* protects concurrent PCIE register access */ 886 spinlock_t pcie_idx_lock; 887 amdgpu_rreg_t pcie_rreg; 888 amdgpu_wreg_t pcie_wreg; 889 amdgpu_rreg_t pciep_rreg; 890 amdgpu_wreg_t pciep_wreg; 891 amdgpu_rreg_ext_t pcie_rreg_ext; 892 amdgpu_wreg_ext_t pcie_wreg_ext; 893 amdgpu_rreg64_t pcie_rreg64; 894 amdgpu_wreg64_t pcie_wreg64; 895 amdgpu_rreg64_ext_t pcie_rreg64_ext; 896 amdgpu_wreg64_ext_t pcie_wreg64_ext; 897 /* protects concurrent UVD register access */ 898 spinlock_t uvd_ctx_idx_lock; 899 amdgpu_rreg_t uvd_ctx_rreg; 900 amdgpu_wreg_t uvd_ctx_wreg; 901 /* protects concurrent DIDT register access */ 902 spinlock_t didt_idx_lock; 903 amdgpu_rreg_t didt_rreg; 904 amdgpu_wreg_t didt_wreg; 905 /* protects concurrent gc_cac register access */ 906 spinlock_t gc_cac_idx_lock; 907 amdgpu_rreg_t gc_cac_rreg; 908 amdgpu_wreg_t gc_cac_wreg; 909 /* protects concurrent se_cac register access */ 910 spinlock_t se_cac_idx_lock; 911 amdgpu_rreg_t se_cac_rreg; 912 amdgpu_wreg_t se_cac_wreg; 913 /* protects concurrent ENDPOINT (audio) register access */ 914 spinlock_t audio_endpt_idx_lock; 915 amdgpu_block_rreg_t audio_endpt_rreg; 916 amdgpu_block_wreg_t audio_endpt_wreg; 917 struct amdgpu_doorbell doorbell; 918 919 /* clock/pll info */ 920 struct amdgpu_clock clock; 921 922 /* MC */ 923 struct amdgpu_gmc gmc; 924 struct amdgpu_gart gart; 925 dma_addr_t dummy_page_addr; 926 struct amdgpu_vm_manager vm_manager; 927 struct amdgpu_vmhub vmhub[AMDGPU_MAX_VMHUBS]; 928 DECLARE_BITMAP(vmhubs_mask, AMDGPU_MAX_VMHUBS); 929 930 /* memory management */ 931 struct amdgpu_mman mman; 932 struct amdgpu_mem_scratch mem_scratch; 933 struct amdgpu_wb wb; 934 atomic64_t num_bytes_moved; 935 atomic64_t num_evictions; 936 atomic64_t num_vram_cpu_page_faults; 937 atomic_t gpu_reset_counter; 938 atomic_t vram_lost_counter; 939 940 /* data for buffer migration throttling */ 941 struct { 942 spinlock_t lock; 943 s64 last_update_us; 944 s64 accum_us; /* accumulated microseconds */ 945 s64 accum_us_vis; /* for visible VRAM */ 946 u32 log2_max_MBps; 947 } mm_stats; 948 949 /* discovery*/ 950 struct amdgpu_discovery_info discovery; 951 952 /* display */ 953 bool enable_virtual_display; 954 struct amdgpu_vkms_output *amdgpu_vkms_output; 955 struct amdgpu_mode_info mode_info; 956 /* For pre-DCE11. DCE11 and later are in "struct amdgpu_device->dm" */ 957 struct delayed_work hotplug_work; 958 struct amdgpu_irq_src crtc_irq; 959 struct amdgpu_irq_src vline0_irq; 960 struct amdgpu_irq_src vupdate_irq; 961 struct amdgpu_irq_src pageflip_irq; 962 struct amdgpu_irq_src hpd_irq; 963 struct amdgpu_irq_src dmub_trace_irq; 964 struct amdgpu_irq_src dmub_outbox_irq; 965 966 /* rings */ 967 u64 fence_context; 968 unsigned num_rings; 969 struct amdgpu_ring *rings[AMDGPU_MAX_RINGS]; 970 struct dma_fence __rcu *gang_submit; 971 bool ib_pool_ready; 972 struct amdgpu_sa_manager ib_pools[AMDGPU_IB_POOL_MAX]; 973 struct amdgpu_sched gpu_sched[AMDGPU_HW_IP_NUM][AMDGPU_RING_PRIO_MAX]; 974 975 /* interrupts */ 976 struct amdgpu_irq irq; 977 978 /* powerplay */ 979 struct amd_powerplay powerplay; 980 struct amdgpu_pm pm; 981 u64 cg_flags; 982 u32 pg_flags; 983 984 /* nbio */ 985 struct amdgpu_nbio nbio; 986 987 /* hdp */ 988 struct amdgpu_hdp hdp; 989 990 /* smuio */ 991 struct amdgpu_smuio smuio; 992 993 /* mmhub */ 994 struct amdgpu_mmhub mmhub; 995 996 /* gfxhub */ 997 struct amdgpu_gfxhub gfxhub; 998 999 /* gfx */ 1000 struct amdgpu_gfx gfx; 1001 1002 /* sdma */ 1003 struct amdgpu_sdma sdma; 1004 1005 /* lsdma */ 1006 struct amdgpu_lsdma lsdma; 1007 1008 /* uvd */ 1009 struct amdgpu_uvd uvd; 1010 1011 /* vce */ 1012 struct amdgpu_vce vce; 1013 1014 /* vcn */ 1015 struct amdgpu_vcn vcn; 1016 1017 /* jpeg */ 1018 struct amdgpu_jpeg jpeg; 1019 1020 /* vpe */ 1021 struct amdgpu_vpe vpe; 1022 1023 /* umsch */ 1024 struct amdgpu_umsch_mm umsch_mm; 1025 bool enable_umsch_mm; 1026 1027 /* firmwares */ 1028 struct amdgpu_firmware firmware; 1029 1030 /* PSP */ 1031 struct psp_context psp; 1032 1033 /* GDS */ 1034 struct amdgpu_gds gds; 1035 1036 /* for userq and VM fences */ 1037 struct amdgpu_seq64 seq64; 1038 1039 /* UMC */ 1040 struct amdgpu_umc umc; 1041 1042 /* display related functionality */ 1043 struct amdgpu_display_manager dm; 1044 1045 #if defined(CONFIG_DRM_AMD_ISP) 1046 /* isp */ 1047 struct amdgpu_isp isp; 1048 #endif 1049 1050 /* mes */ 1051 bool enable_mes; 1052 bool enable_mes_kiq; 1053 bool enable_uni_mes; 1054 struct amdgpu_mes mes; 1055 struct amdgpu_mqd mqds[AMDGPU_HW_IP_NUM]; 1056 const struct amdgpu_userq_funcs *userq_funcs[AMDGPU_HW_IP_NUM]; 1057 1058 /* xarray used to retrieve the user queue fence driver reference 1059 * in the EOP interrupt handler to signal the particular user 1060 * queue fence. 1061 */ 1062 struct xarray userq_xa; 1063 /** 1064 * @userq_doorbell_xa: Global user queue map (doorbell index → queue) 1065 * Key: doorbell_index (unique global identifier for the queue) 1066 * Value: struct amdgpu_usermode_queue 1067 */ 1068 struct xarray userq_doorbell_xa; 1069 1070 /* df */ 1071 struct amdgpu_df df; 1072 1073 /* MCA */ 1074 struct amdgpu_mca mca; 1075 1076 /* ACA */ 1077 struct amdgpu_aca aca; 1078 1079 /* CPER */ 1080 struct amdgpu_cper cper; 1081 1082 struct amdgpu_ip_block ip_blocks[AMDGPU_MAX_IP_NUM]; 1083 uint32_t harvest_ip_mask; 1084 int num_ip_blocks; 1085 struct mutex mn_lock; 1086 DECLARE_HASHTABLE(mn_hash, 7); 1087 1088 /* tracking pinned memory */ 1089 atomic64_t vram_pin_size; 1090 atomic64_t visible_pin_size; 1091 atomic64_t gart_pin_size; 1092 1093 /* soc15 register offset based on ip, instance and segment */ 1094 uint32_t *reg_offset[MAX_HWIP][HWIP_MAX_INSTANCE]; 1095 struct amdgpu_ip_map_info ip_map; 1096 1097 /* delayed work_func for deferring clockgating during resume */ 1098 struct delayed_work delayed_init_work; 1099 1100 struct amdgpu_virt virt; 1101 1102 /* record hw reset is performed */ 1103 bool has_hw_reset; 1104 u8 reset_magic[AMDGPU_RESET_MAGIC_NUM]; 1105 1106 /* s3/s4 mask */ 1107 bool in_suspend; 1108 bool in_s3; 1109 bool in_s4; 1110 bool in_s0ix; 1111 suspend_state_t last_suspend_state; 1112 1113 enum pp_mp1_state mp1_state; 1114 struct amdgpu_doorbell_index doorbell_index; 1115 1116 struct mutex notifier_lock; 1117 1118 int asic_reset_res; 1119 struct work_struct xgmi_reset_work; 1120 struct list_head reset_list; 1121 1122 long gfx_timeout; 1123 long sdma_timeout; 1124 long video_timeout; 1125 long compute_timeout; 1126 long psp_timeout; 1127 1128 uint64_t unique_id; 1129 uint64_t df_perfmon_config_assign_mask[AMDGPU_MAX_DF_PERFMONS]; 1130 1131 /* enable runtime pm on the device */ 1132 bool in_runpm; 1133 bool has_pr3; 1134 1135 bool ucode_sysfs_en; 1136 1137 struct amdgpu_fru_info *fru_info; 1138 atomic_t throttling_logging_enabled; 1139 struct ratelimit_state throttling_logging_rs; 1140 uint32_t ras_hw_enabled; 1141 uint32_t ras_enabled; 1142 bool ras_default_ecc_enabled; 1143 1144 bool no_hw_access; 1145 struct pci_saved_state *pci_state; 1146 pci_channel_state_t pci_channel_state; 1147 1148 struct amdgpu_pcie_reset_ctx pcie_reset_ctx; 1149 1150 /* Track auto wait count on s_barrier settings */ 1151 bool barrier_has_auto_waitcnt; 1152 1153 struct amdgpu_reset_control *reset_cntl; 1154 uint32_t ip_versions[MAX_HWIP][HWIP_MAX_INSTANCE]; 1155 1156 bool ram_is_direct_mapped; 1157 1158 struct list_head ras_list; 1159 1160 struct amdgpu_reset_domain *reset_domain; 1161 1162 struct mutex benchmark_mutex; 1163 1164 bool scpm_enabled; 1165 uint32_t scpm_status; 1166 1167 struct work_struct reset_work; 1168 1169 bool dc_enabled; 1170 /* Mask of active clusters */ 1171 uint32_t aid_mask; 1172 1173 /* Debug */ 1174 bool debug_vm; 1175 bool debug_largebar; 1176 bool debug_disable_soft_recovery; 1177 bool debug_use_vram_fw_buf; 1178 bool debug_enable_ras_aca; 1179 bool debug_exp_resets; 1180 bool debug_disable_gpu_ring_reset; 1181 bool debug_vm_userptr; 1182 bool debug_disable_ce_logs; 1183 bool debug_enable_ce_cs; 1184 1185 /* Protection for the following isolation structure */ 1186 struct mutex enforce_isolation_mutex; 1187 enum amdgpu_enforce_isolation_mode enforce_isolation[MAX_XCP]; 1188 struct amdgpu_isolation { 1189 void *owner; 1190 struct dma_fence *spearhead; 1191 struct amdgpu_sync active; 1192 struct amdgpu_sync prev; 1193 } isolation[MAX_XCP]; 1194 1195 struct amdgpu_init_level *init_lvl; 1196 1197 /* This flag is used to determine how VRAM allocations are handled for APUs 1198 * in KFD: VRAM or GTT. 1199 */ 1200 bool apu_prefer_gtt; 1201 1202 bool userq_halt_for_enforce_isolation; 1203 struct work_struct userq_reset_work; 1204 struct amdgpu_uid *uid_info; 1205 1206 /* KFD 1207 * Must be last --ends in a flexible-array member. 1208 */ 1209 struct amdgpu_kfd_dev kfd; 1210 }; 1211 1212 static inline uint32_t amdgpu_ip_version(const struct amdgpu_device *adev, 1213 uint8_t ip, uint8_t inst) 1214 { 1215 /* This considers only major/minor/rev and ignores 1216 * subrevision/variant fields. 1217 */ 1218 return adev->ip_versions[ip][inst] & ~0xFFU; 1219 } 1220 1221 static inline uint32_t amdgpu_ip_version_full(const struct amdgpu_device *adev, 1222 uint8_t ip, uint8_t inst) 1223 { 1224 /* This returns full version - major/minor/rev/variant/subrevision */ 1225 return adev->ip_versions[ip][inst]; 1226 } 1227 1228 static inline struct amdgpu_device *drm_to_adev(struct drm_device *ddev) 1229 { 1230 return container_of(ddev, struct amdgpu_device, ddev); 1231 } 1232 1233 static inline struct drm_device *adev_to_drm(struct amdgpu_device *adev) 1234 { 1235 return &adev->ddev; 1236 } 1237 1238 static inline struct amdgpu_device *amdgpu_ttm_adev(struct ttm_device *bdev) 1239 { 1240 return container_of(bdev, struct amdgpu_device, mman.bdev); 1241 } 1242 1243 static inline bool amdgpu_is_multi_aid(struct amdgpu_device *adev) 1244 { 1245 return !!adev->aid_mask; 1246 } 1247 1248 int amdgpu_device_init(struct amdgpu_device *adev, 1249 uint32_t flags); 1250 void amdgpu_device_fini_hw(struct amdgpu_device *adev); 1251 void amdgpu_device_fini_sw(struct amdgpu_device *adev); 1252 1253 int amdgpu_gpu_wait_for_idle(struct amdgpu_device *adev); 1254 1255 void amdgpu_device_mm_access(struct amdgpu_device *adev, loff_t pos, 1256 void *buf, size_t size, bool write); 1257 size_t amdgpu_device_aper_access(struct amdgpu_device *adev, loff_t pos, 1258 void *buf, size_t size, bool write); 1259 1260 void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos, 1261 void *buf, size_t size, bool write); 1262 uint32_t amdgpu_device_wait_on_rreg(struct amdgpu_device *adev, 1263 uint32_t inst, uint32_t reg_addr, char reg_name[], 1264 uint32_t expected_value, uint32_t mask); 1265 uint32_t amdgpu_device_rreg(struct amdgpu_device *adev, 1266 uint32_t reg, uint32_t acc_flags); 1267 u32 amdgpu_device_indirect_rreg_ext(struct amdgpu_device *adev, 1268 u64 reg_addr); 1269 uint32_t amdgpu_device_xcc_rreg(struct amdgpu_device *adev, 1270 uint32_t reg, uint32_t acc_flags, 1271 uint32_t xcc_id); 1272 void amdgpu_device_wreg(struct amdgpu_device *adev, 1273 uint32_t reg, uint32_t v, 1274 uint32_t acc_flags); 1275 void amdgpu_device_indirect_wreg_ext(struct amdgpu_device *adev, 1276 u64 reg_addr, u32 reg_data); 1277 void amdgpu_device_xcc_wreg(struct amdgpu_device *adev, 1278 uint32_t reg, uint32_t v, 1279 uint32_t acc_flags, 1280 uint32_t xcc_id); 1281 void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev, 1282 uint32_t reg, uint32_t v, uint32_t xcc_id); 1283 void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value); 1284 uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset); 1285 1286 u32 amdgpu_device_indirect_rreg(struct amdgpu_device *adev, 1287 u32 reg_addr); 1288 u64 amdgpu_device_indirect_rreg64(struct amdgpu_device *adev, 1289 u32 reg_addr); 1290 u64 amdgpu_device_indirect_rreg64_ext(struct amdgpu_device *adev, 1291 u64 reg_addr); 1292 void amdgpu_device_indirect_wreg(struct amdgpu_device *adev, 1293 u32 reg_addr, u32 reg_data); 1294 void amdgpu_device_indirect_wreg64(struct amdgpu_device *adev, 1295 u32 reg_addr, u64 reg_data); 1296 void amdgpu_device_indirect_wreg64_ext(struct amdgpu_device *adev, 1297 u64 reg_addr, u64 reg_data); 1298 u32 amdgpu_device_get_rev_id(struct amdgpu_device *adev); 1299 bool amdgpu_device_asic_has_dc_support(struct pci_dev *pdev, 1300 enum amd_asic_type asic_type); 1301 bool amdgpu_device_has_dc_support(struct amdgpu_device *adev); 1302 1303 void amdgpu_device_set_sriov_virtual_display(struct amdgpu_device *adev); 1304 1305 int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev, 1306 struct amdgpu_reset_context *reset_context); 1307 1308 int amdgpu_do_asic_reset(struct list_head *device_list_handle, 1309 struct amdgpu_reset_context *reset_context); 1310 1311 int amdgpu_device_reinit_after_reset(struct amdgpu_reset_context *reset_context); 1312 1313 int emu_soc_asic_init(struct amdgpu_device *adev); 1314 1315 /* 1316 * Registers read & write functions. 1317 */ 1318 #define AMDGPU_REGS_NO_KIQ (1<<1) 1319 #define AMDGPU_REGS_RLC (1<<2) 1320 1321 #define RREG32_NO_KIQ(reg) amdgpu_device_rreg(adev, (reg), AMDGPU_REGS_NO_KIQ) 1322 #define WREG32_NO_KIQ(reg, v) amdgpu_device_wreg(adev, (reg), (v), AMDGPU_REGS_NO_KIQ) 1323 1324 #define RREG32_KIQ(reg) amdgpu_kiq_rreg(adev, (reg), 0) 1325 #define WREG32_KIQ(reg, v) amdgpu_kiq_wreg(adev, (reg), (v), 0) 1326 1327 #define RREG8(reg) amdgpu_mm_rreg8(adev, (reg)) 1328 #define WREG8(reg, v) amdgpu_mm_wreg8(adev, (reg), (v)) 1329 1330 #define RREG32(reg) amdgpu_device_rreg(adev, (reg), 0) 1331 #define DREG32(reg) printk(KERN_INFO "REGISTER: " #reg " : 0x%08X\n", amdgpu_device_rreg(adev, (reg), 0)) 1332 #define WREG32(reg, v) amdgpu_device_wreg(adev, (reg), (v), 0) 1333 #define REG_SET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK) 1334 #define REG_GET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK) 1335 #define RREG32_XCC(reg, inst) amdgpu_device_xcc_rreg(adev, (reg), 0, inst) 1336 #define WREG32_XCC(reg, v, inst) amdgpu_device_xcc_wreg(adev, (reg), (v), 0, inst) 1337 #define RREG32_PCIE(reg) adev->pcie_rreg(adev, (reg)) 1338 #define WREG32_PCIE(reg, v) adev->pcie_wreg(adev, (reg), (v)) 1339 #define RREG32_PCIE_PORT(reg) adev->pciep_rreg(adev, (reg)) 1340 #define WREG32_PCIE_PORT(reg, v) adev->pciep_wreg(adev, (reg), (v)) 1341 #define RREG32_PCIE_EXT(reg) adev->pcie_rreg_ext(adev, (reg)) 1342 #define WREG32_PCIE_EXT(reg, v) adev->pcie_wreg_ext(adev, (reg), (v)) 1343 #define RREG64_PCIE(reg) adev->pcie_rreg64(adev, (reg)) 1344 #define WREG64_PCIE(reg, v) adev->pcie_wreg64(adev, (reg), (v)) 1345 #define RREG64_PCIE_EXT(reg) adev->pcie_rreg64_ext(adev, (reg)) 1346 #define WREG64_PCIE_EXT(reg, v) adev->pcie_wreg64_ext(adev, (reg), (v)) 1347 #define RREG32_SMC(reg) adev->smc_rreg(adev, (reg)) 1348 #define WREG32_SMC(reg, v) adev->smc_wreg(adev, (reg), (v)) 1349 #define RREG32_UVD_CTX(reg) adev->uvd_ctx_rreg(adev, (reg)) 1350 #define WREG32_UVD_CTX(reg, v) adev->uvd_ctx_wreg(adev, (reg), (v)) 1351 #define RREG32_DIDT(reg) adev->didt_rreg(adev, (reg)) 1352 #define WREG32_DIDT(reg, v) adev->didt_wreg(adev, (reg), (v)) 1353 #define RREG32_GC_CAC(reg) adev->gc_cac_rreg(adev, (reg)) 1354 #define WREG32_GC_CAC(reg, v) adev->gc_cac_wreg(adev, (reg), (v)) 1355 #define RREG32_SE_CAC(reg) adev->se_cac_rreg(adev, (reg)) 1356 #define WREG32_SE_CAC(reg, v) adev->se_cac_wreg(adev, (reg), (v)) 1357 #define RREG32_AUDIO_ENDPT(block, reg) adev->audio_endpt_rreg(adev, (block), (reg)) 1358 #define WREG32_AUDIO_ENDPT(block, reg, v) adev->audio_endpt_wreg(adev, (block), (reg), (v)) 1359 #define WREG32_P(reg, val, mask) \ 1360 do { \ 1361 uint32_t tmp_ = RREG32(reg); \ 1362 tmp_ &= (mask); \ 1363 tmp_ |= ((val) & ~(mask)); \ 1364 WREG32(reg, tmp_); \ 1365 } while (0) 1366 #define WREG32_AND(reg, and) WREG32_P(reg, 0, and) 1367 #define WREG32_OR(reg, or) WREG32_P(reg, or, ~(or)) 1368 #define WREG32_PLL_P(reg, val, mask) \ 1369 do { \ 1370 uint32_t tmp_ = RREG32_PLL(reg); \ 1371 tmp_ &= (mask); \ 1372 tmp_ |= ((val) & ~(mask)); \ 1373 WREG32_PLL(reg, tmp_); \ 1374 } while (0) 1375 1376 #define WREG32_SMC_P(_Reg, _Val, _Mask) \ 1377 do { \ 1378 u32 tmp = RREG32_SMC(_Reg); \ 1379 tmp &= (_Mask); \ 1380 tmp |= ((_Val) & ~(_Mask)); \ 1381 WREG32_SMC(_Reg, tmp); \ 1382 } while (0) 1383 1384 #define DREG32_SYS(sqf, adev, reg) seq_printf((sqf), #reg " : 0x%08X\n", amdgpu_device_rreg((adev), (reg), false)) 1385 1386 #define REG_FIELD_SHIFT(reg, field) reg##__##field##__SHIFT 1387 #define REG_FIELD_MASK(reg, field) reg##__##field##_MASK 1388 1389 #define REG_SET_FIELD(orig_val, reg, field, field_val) \ 1390 (((orig_val) & ~REG_FIELD_MASK(reg, field)) | \ 1391 (REG_FIELD_MASK(reg, field) & ((field_val) << REG_FIELD_SHIFT(reg, field)))) 1392 1393 #define REG_GET_FIELD(value, reg, field) \ 1394 (((value) & REG_FIELD_MASK(reg, field)) >> REG_FIELD_SHIFT(reg, field)) 1395 1396 #define WREG32_FIELD(reg, field, val) \ 1397 WREG32(mm##reg, (RREG32(mm##reg) & ~REG_FIELD_MASK(reg, field)) | (val) << REG_FIELD_SHIFT(reg, field)) 1398 1399 #define WREG32_FIELD_OFFSET(reg, offset, field, val) \ 1400 WREG32(mm##reg + offset, (RREG32(mm##reg + offset) & ~REG_FIELD_MASK(reg, field)) | (val) << REG_FIELD_SHIFT(reg, field)) 1401 1402 #define AMDGPU_GET_REG_FIELD(x, h, l) (((x) & GENMASK_ULL(h, l)) >> (l)) 1403 /* 1404 * BIOS helpers. 1405 */ 1406 #define RBIOS8(i) (adev->bios[i]) 1407 #define RBIOS16(i) (RBIOS8(i) | (RBIOS8((i)+1) << 8)) 1408 #define RBIOS32(i) ((RBIOS16(i)) | (RBIOS16((i)+2) << 16)) 1409 1410 /* 1411 * ASICs macro. 1412 */ 1413 #define amdgpu_asic_set_vga_state(adev, state) \ 1414 ((adev)->asic_funcs->set_vga_state ? (adev)->asic_funcs->set_vga_state((adev), (state)) : 0) 1415 #define amdgpu_asic_reset(adev) (adev)->asic_funcs->reset((adev)) 1416 #define amdgpu_asic_reset_method(adev) (adev)->asic_funcs->reset_method((adev)) 1417 #define amdgpu_asic_get_xclk(adev) (adev)->asic_funcs->get_xclk((adev)) 1418 #define amdgpu_asic_set_uvd_clocks(adev, v, d) (adev)->asic_funcs->set_uvd_clocks((adev), (v), (d)) 1419 #define amdgpu_asic_set_vce_clocks(adev, ev, ec) (adev)->asic_funcs->set_vce_clocks((adev), (ev), (ec)) 1420 #define amdgpu_get_pcie_lanes(adev) (adev)->asic_funcs->get_pcie_lanes((adev)) 1421 #define amdgpu_set_pcie_lanes(adev, l) (adev)->asic_funcs->set_pcie_lanes((adev), (l)) 1422 #define amdgpu_asic_get_gpu_clock_counter(adev) (adev)->asic_funcs->get_gpu_clock_counter((adev)) 1423 #define amdgpu_asic_read_disabled_bios(adev) (adev)->asic_funcs->read_disabled_bios((adev)) 1424 #define amdgpu_asic_read_bios_from_rom(adev, b, l) (adev)->asic_funcs->read_bios_from_rom((adev), (b), (l)) 1425 #define amdgpu_asic_read_register(adev, se, sh, offset, v)((adev)->asic_funcs->read_register((adev), (se), (sh), (offset), (v))) 1426 #define amdgpu_asic_get_config_memsize(adev) (adev)->asic_funcs->get_config_memsize((adev)) 1427 #define amdgpu_asic_need_full_reset(adev) (adev)->asic_funcs->need_full_reset((adev)) 1428 #define amdgpu_asic_init_doorbell_index(adev) (adev)->asic_funcs->init_doorbell_index((adev)) 1429 #define amdgpu_asic_get_pcie_usage(adev, cnt0, cnt1) ((adev)->asic_funcs->get_pcie_usage((adev), (cnt0), (cnt1))) 1430 #define amdgpu_asic_need_reset_on_init(adev) (adev)->asic_funcs->need_reset_on_init((adev)) 1431 #define amdgpu_asic_get_pcie_replay_count(adev) ((adev)->asic_funcs->get_pcie_replay_count((adev))) 1432 #define amdgpu_asic_supports_baco(adev) \ 1433 ((adev)->asic_funcs->supports_baco ? (adev)->asic_funcs->supports_baco((adev)) : 0) 1434 #define amdgpu_asic_pre_asic_init(adev) \ 1435 { \ 1436 if ((adev)->asic_funcs && (adev)->asic_funcs->pre_asic_init) \ 1437 (adev)->asic_funcs->pre_asic_init((adev)); \ 1438 } 1439 #define amdgpu_asic_update_umd_stable_pstate(adev, enter) \ 1440 ((adev)->asic_funcs->update_umd_stable_pstate ? (adev)->asic_funcs->update_umd_stable_pstate((adev), (enter)) : 0) 1441 #define amdgpu_asic_query_video_codecs(adev, e, c) (adev)->asic_funcs->query_video_codecs((adev), (e), (c)) 1442 1443 #define amdgpu_inc_vram_lost(adev) atomic_inc(&((adev)->vram_lost_counter)) 1444 1445 #define BIT_MASK_UPPER(i) ((i) >= BITS_PER_LONG ? 0 : ~0UL << (i)) 1446 #define for_each_inst(i, inst_mask) \ 1447 for (i = ffs(inst_mask); i-- != 0; \ 1448 i = ffs(inst_mask & BIT_MASK_UPPER(i + 1))) 1449 1450 /* Common functions */ 1451 bool amdgpu_device_has_job_running(struct amdgpu_device *adev); 1452 bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev); 1453 int amdgpu_device_gpu_recover(struct amdgpu_device *adev, 1454 struct amdgpu_job *job, 1455 struct amdgpu_reset_context *reset_context); 1456 void amdgpu_device_pci_config_reset(struct amdgpu_device *adev); 1457 int amdgpu_device_pci_reset(struct amdgpu_device *adev); 1458 bool amdgpu_device_need_post(struct amdgpu_device *adev); 1459 bool amdgpu_device_seamless_boot_supported(struct amdgpu_device *adev); 1460 bool amdgpu_device_should_use_aspm(struct amdgpu_device *adev); 1461 1462 void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes, 1463 u64 num_vis_bytes); 1464 int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev); 1465 void amdgpu_device_program_register_sequence(struct amdgpu_device *adev, 1466 const u32 *registers, 1467 const u32 array_size); 1468 1469 int amdgpu_device_mode1_reset(struct amdgpu_device *adev); 1470 int amdgpu_device_link_reset(struct amdgpu_device *adev); 1471 bool amdgpu_device_supports_atpx(struct amdgpu_device *adev); 1472 bool amdgpu_device_supports_px(struct amdgpu_device *adev); 1473 bool amdgpu_device_supports_boco(struct amdgpu_device *adev); 1474 bool amdgpu_device_supports_smart_shift(struct amdgpu_device *adev); 1475 int amdgpu_device_supports_baco(struct amdgpu_device *adev); 1476 void amdgpu_device_detect_runtime_pm_mode(struct amdgpu_device *adev); 1477 bool amdgpu_device_is_peer_accessible(struct amdgpu_device *adev, 1478 struct amdgpu_device *peer_adev); 1479 int amdgpu_device_baco_enter(struct amdgpu_device *adev); 1480 int amdgpu_device_baco_exit(struct amdgpu_device *adev); 1481 1482 void amdgpu_device_flush_hdp(struct amdgpu_device *adev, 1483 struct amdgpu_ring *ring); 1484 void amdgpu_device_invalidate_hdp(struct amdgpu_device *adev, 1485 struct amdgpu_ring *ring); 1486 1487 void amdgpu_device_halt(struct amdgpu_device *adev); 1488 u32 amdgpu_device_pcie_port_rreg(struct amdgpu_device *adev, 1489 u32 reg); 1490 void amdgpu_device_pcie_port_wreg(struct amdgpu_device *adev, 1491 u32 reg, u32 v); 1492 struct dma_fence *amdgpu_device_get_gang(struct amdgpu_device *adev); 1493 struct dma_fence *amdgpu_device_switch_gang(struct amdgpu_device *adev, 1494 struct dma_fence *gang); 1495 struct dma_fence *amdgpu_device_enforce_isolation(struct amdgpu_device *adev, 1496 struct amdgpu_ring *ring, 1497 struct amdgpu_job *job); 1498 bool amdgpu_device_has_display_hardware(struct amdgpu_device *adev); 1499 ssize_t amdgpu_get_soft_full_reset_mask(struct amdgpu_ring *ring); 1500 ssize_t amdgpu_show_reset_mask(char *buf, uint32_t supported_reset); 1501 1502 /* atpx handler */ 1503 #if defined(CONFIG_VGA_SWITCHEROO) 1504 void amdgpu_register_atpx_handler(void); 1505 void amdgpu_unregister_atpx_handler(void); 1506 bool amdgpu_has_atpx_dgpu_power_cntl(void); 1507 bool amdgpu_is_atpx_hybrid(void); 1508 bool amdgpu_has_atpx(void); 1509 #else 1510 static inline void amdgpu_register_atpx_handler(void) {} 1511 static inline void amdgpu_unregister_atpx_handler(void) {} 1512 static inline bool amdgpu_has_atpx_dgpu_power_cntl(void) { return false; } 1513 static inline bool amdgpu_is_atpx_hybrid(void) { return false; } 1514 static inline bool amdgpu_has_atpx(void) { return false; } 1515 #endif 1516 1517 /* 1518 * KMS 1519 */ 1520 extern const struct drm_ioctl_desc amdgpu_ioctls_kms[]; 1521 extern const int amdgpu_max_kms_ioctl; 1522 1523 int amdgpu_driver_load_kms(struct amdgpu_device *adev, unsigned long flags); 1524 void amdgpu_driver_unload_kms(struct drm_device *dev); 1525 int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv); 1526 void amdgpu_driver_postclose_kms(struct drm_device *dev, 1527 struct drm_file *file_priv); 1528 void amdgpu_driver_release_kms(struct drm_device *dev); 1529 1530 int amdgpu_device_prepare(struct drm_device *dev); 1531 void amdgpu_device_complete(struct drm_device *dev); 1532 int amdgpu_device_suspend(struct drm_device *dev, bool fbcon); 1533 int amdgpu_device_resume(struct drm_device *dev, bool fbcon); 1534 u32 amdgpu_get_vblank_counter_kms(struct drm_crtc *crtc); 1535 int amdgpu_enable_vblank_kms(struct drm_crtc *crtc); 1536 void amdgpu_disable_vblank_kms(struct drm_crtc *crtc); 1537 int amdgpu_info_ioctl(struct drm_device *dev, void *data, 1538 struct drm_file *filp); 1539 1540 /* 1541 * functions used by amdgpu_encoder.c 1542 */ 1543 struct amdgpu_afmt_acr { 1544 u32 clock; 1545 1546 int n_32khz; 1547 int cts_32khz; 1548 1549 int n_44_1khz; 1550 int cts_44_1khz; 1551 1552 int n_48khz; 1553 int cts_48khz; 1554 1555 }; 1556 1557 struct amdgpu_afmt_acr amdgpu_afmt_acr(uint32_t clock); 1558 1559 /* amdgpu_acpi.c */ 1560 1561 struct amdgpu_numa_info { 1562 uint64_t size; 1563 int pxm; 1564 int nid; 1565 }; 1566 1567 /* ATCS Device/Driver State */ 1568 #define AMDGPU_ATCS_PSC_DEV_STATE_D0 0 1569 #define AMDGPU_ATCS_PSC_DEV_STATE_D3_HOT 3 1570 #define AMDGPU_ATCS_PSC_DRV_STATE_OPR 0 1571 #define AMDGPU_ATCS_PSC_DRV_STATE_NOT_OPR 1 1572 1573 #if defined(CONFIG_ACPI) 1574 int amdgpu_acpi_init(struct amdgpu_device *adev); 1575 void amdgpu_acpi_fini(struct amdgpu_device *adev); 1576 bool amdgpu_acpi_is_pcie_performance_request_supported(struct amdgpu_device *adev); 1577 bool amdgpu_acpi_is_power_shift_control_supported(void); 1578 int amdgpu_acpi_pcie_performance_request(struct amdgpu_device *adev, 1579 u8 perf_req, bool advertise); 1580 int amdgpu_acpi_power_shift_control(struct amdgpu_device *adev, 1581 u8 dev_state, bool drv_state); 1582 int amdgpu_acpi_smart_shift_update(struct amdgpu_device *adev, 1583 enum amdgpu_ss ss_state); 1584 int amdgpu_acpi_pcie_notify_device_ready(struct amdgpu_device *adev); 1585 int amdgpu_acpi_get_tmr_info(struct amdgpu_device *adev, u64 *tmr_offset, 1586 u64 *tmr_size); 1587 int amdgpu_acpi_get_mem_info(struct amdgpu_device *adev, int xcc_id, 1588 struct amdgpu_numa_info *numa_info); 1589 1590 void amdgpu_acpi_get_backlight_caps(struct amdgpu_dm_backlight_caps *caps); 1591 bool amdgpu_acpi_should_gpu_reset(struct amdgpu_device *adev); 1592 void amdgpu_acpi_detect(void); 1593 void amdgpu_acpi_release(void); 1594 #else 1595 static inline int amdgpu_acpi_init(struct amdgpu_device *adev) { return 0; } 1596 static inline int amdgpu_acpi_get_tmr_info(struct amdgpu_device *adev, 1597 u64 *tmr_offset, u64 *tmr_size) 1598 { 1599 return -EINVAL; 1600 } 1601 static inline int amdgpu_acpi_get_mem_info(struct amdgpu_device *adev, 1602 int xcc_id, 1603 struct amdgpu_numa_info *numa_info) 1604 { 1605 return -EINVAL; 1606 } 1607 static inline void amdgpu_acpi_fini(struct amdgpu_device *adev) { } 1608 static inline bool amdgpu_acpi_should_gpu_reset(struct amdgpu_device *adev) { return false; } 1609 static inline void amdgpu_acpi_detect(void) { } 1610 static inline void amdgpu_acpi_release(void) { } 1611 static inline bool amdgpu_acpi_is_power_shift_control_supported(void) { return false; } 1612 static inline int amdgpu_acpi_power_shift_control(struct amdgpu_device *adev, 1613 u8 dev_state, bool drv_state) { return 0; } 1614 static inline int amdgpu_acpi_smart_shift_update(struct amdgpu_device *adev, 1615 enum amdgpu_ss ss_state) 1616 { 1617 return 0; 1618 } 1619 static inline void amdgpu_acpi_get_backlight_caps(struct amdgpu_dm_backlight_caps *caps) { } 1620 #endif 1621 1622 #if defined(CONFIG_ACPI) && defined(CONFIG_SUSPEND) 1623 bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev); 1624 bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev); 1625 #else 1626 static inline bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev) { return false; } 1627 static inline bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev) { return false; } 1628 #endif 1629 1630 #if defined(CONFIG_DRM_AMD_ISP) 1631 int amdgpu_acpi_get_isp4_dev(struct acpi_device **dev); 1632 #endif 1633 1634 void amdgpu_register_gpu_instance(struct amdgpu_device *adev); 1635 void amdgpu_unregister_gpu_instance(struct amdgpu_device *adev); 1636 1637 pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev, 1638 pci_channel_state_t state); 1639 pci_ers_result_t amdgpu_pci_mmio_enabled(struct pci_dev *pdev); 1640 pci_ers_result_t amdgpu_pci_slot_reset(struct pci_dev *pdev); 1641 void amdgpu_pci_resume(struct pci_dev *pdev); 1642 1643 bool amdgpu_device_cache_pci_state(struct pci_dev *pdev); 1644 bool amdgpu_device_load_pci_state(struct pci_dev *pdev); 1645 1646 bool amdgpu_device_skip_hw_access(struct amdgpu_device *adev); 1647 1648 int amdgpu_device_set_cg_state(struct amdgpu_device *adev, 1649 enum amd_clockgating_state state); 1650 int amdgpu_device_set_pg_state(struct amdgpu_device *adev, 1651 enum amd_powergating_state state); 1652 1653 static inline bool amdgpu_device_has_timeouts_enabled(struct amdgpu_device *adev) 1654 { 1655 return amdgpu_gpu_recovery != 0 && 1656 adev->gfx_timeout != MAX_SCHEDULE_TIMEOUT && 1657 adev->compute_timeout != MAX_SCHEDULE_TIMEOUT && 1658 adev->sdma_timeout != MAX_SCHEDULE_TIMEOUT && 1659 adev->video_timeout != MAX_SCHEDULE_TIMEOUT; 1660 } 1661 1662 #include "amdgpu_object.h" 1663 1664 static inline bool amdgpu_is_tmz(struct amdgpu_device *adev) 1665 { 1666 return adev->gmc.tmz_enabled; 1667 } 1668 1669 int amdgpu_in_reset(struct amdgpu_device *adev); 1670 1671 extern const struct attribute_group amdgpu_vram_mgr_attr_group; 1672 extern const struct attribute_group amdgpu_gtt_mgr_attr_group; 1673 extern const struct attribute_group amdgpu_flash_attr_group; 1674 1675 void amdgpu_set_init_level(struct amdgpu_device *adev, 1676 enum amdgpu_init_lvl_id lvl); 1677 1678 static inline int amdgpu_device_bus_status_check(struct amdgpu_device *adev) 1679 { 1680 u32 status; 1681 int r; 1682 1683 r = pci_read_config_dword(adev->pdev, PCI_COMMAND, &status); 1684 if (r || PCI_POSSIBLE_ERROR(status)) { 1685 dev_err(adev->dev, "device lost from bus!"); 1686 return -ENODEV; 1687 } 1688 1689 return 0; 1690 } 1691 1692 void amdgpu_device_set_uid(struct amdgpu_uid *uid_info, 1693 enum amdgpu_uid_type type, uint8_t inst, 1694 uint64_t uid); 1695 uint64_t amdgpu_device_get_uid(struct amdgpu_uid *uid_info, 1696 enum amdgpu_uid_type type, uint8_t inst); 1697 #endif 1698