1 /* 2 * Copyright 2014 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 24 #ifndef __AMDGPU_GFX_H__ 25 #define __AMDGPU_GFX_H__ 26 27 /* 28 * GFX stuff 29 */ 30 #include "clearstate_defs.h" 31 #include "amdgpu_ring.h" 32 #include "amdgpu_rlc.h" 33 #include "amdgpu_imu.h" 34 #include "soc15.h" 35 #include "amdgpu_ras.h" 36 #include "amdgpu_ring_mux.h" 37 #include "amdgpu_xcp.h" 38 39 /* GFX current status */ 40 #define AMDGPU_GFX_NORMAL_MODE 0x00000000L 41 #define AMDGPU_GFX_SAFE_MODE 0x00000001L 42 #define AMDGPU_GFX_PG_DISABLED_MODE 0x00000002L 43 #define AMDGPU_GFX_CG_DISABLED_MODE 0x00000004L 44 #define AMDGPU_GFX_LBPW_DISABLED_MODE 0x00000008L 45 46 #define AMDGPU_MAX_GC_INSTANCES 8 47 #define AMDGPU_MAX_QUEUES 128 48 49 #define AMDGPU_MAX_GFX_QUEUES AMDGPU_MAX_QUEUES 50 #define AMDGPU_MAX_COMPUTE_QUEUES AMDGPU_MAX_QUEUES 51 52 enum amdgpu_gfx_pipe_priority { 53 AMDGPU_GFX_PIPE_PRIO_NORMAL = AMDGPU_RING_PRIO_1, 54 AMDGPU_GFX_PIPE_PRIO_HIGH = AMDGPU_RING_PRIO_2 55 }; 56 57 #define AMDGPU_GFX_QUEUE_PRIORITY_MINIMUM 0 58 #define AMDGPU_GFX_QUEUE_PRIORITY_MAXIMUM 15 59 60 /* 1 second timeout */ 61 #define GFX_PROFILE_IDLE_TIMEOUT msecs_to_jiffies(1000) 62 63 enum amdgpu_gfx_partition { 64 AMDGPU_SPX_PARTITION_MODE = 0, 65 AMDGPU_DPX_PARTITION_MODE = 1, 66 AMDGPU_TPX_PARTITION_MODE = 2, 67 AMDGPU_QPX_PARTITION_MODE = 3, 68 AMDGPU_CPX_PARTITION_MODE = 4, 69 AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE = -1, 70 /* Automatically choose the right mode */ 71 AMDGPU_AUTO_COMPUTE_PARTITION_MODE = -2, 72 }; 73 74 #define NUM_XCC(x) hweight16(x) 75 76 enum amdgpu_gfx_ras_mem_id_type { 77 AMDGPU_GFX_CP_MEM = 0, 78 AMDGPU_GFX_GCEA_MEM, 79 AMDGPU_GFX_GC_CANE_MEM, 80 AMDGPU_GFX_GCUTCL2_MEM, 81 AMDGPU_GFX_GDS_MEM, 82 AMDGPU_GFX_LDS_MEM, 83 AMDGPU_GFX_RLC_MEM, 84 AMDGPU_GFX_SP_MEM, 85 AMDGPU_GFX_SPI_MEM, 86 AMDGPU_GFX_SQC_MEM, 87 AMDGPU_GFX_SQ_MEM, 88 AMDGPU_GFX_TA_MEM, 89 AMDGPU_GFX_TCC_MEM, 90 AMDGPU_GFX_TCA_MEM, 91 AMDGPU_GFX_TCI_MEM, 92 AMDGPU_GFX_TCP_MEM, 93 AMDGPU_GFX_TD_MEM, 94 AMDGPU_GFX_TCX_MEM, 95 AMDGPU_GFX_ATC_L2_MEM, 96 AMDGPU_GFX_UTCL2_MEM, 97 AMDGPU_GFX_VML2_MEM, 98 AMDGPU_GFX_VML2_WALKER_MEM, 99 AMDGPU_GFX_MEM_TYPE_NUM 100 }; 101 102 struct amdgpu_mec { 103 struct amdgpu_bo *hpd_eop_obj; 104 u64 hpd_eop_gpu_addr; 105 struct amdgpu_bo *mec_fw_obj; 106 u64 mec_fw_gpu_addr; 107 struct amdgpu_bo *mec_fw_data_obj; 108 u64 mec_fw_data_gpu_addr; 109 110 u32 num_mec; 111 u32 num_pipe_per_mec; 112 u32 num_queue_per_pipe; 113 void *mqd_backup[AMDGPU_MAX_COMPUTE_RINGS * AMDGPU_MAX_GC_INSTANCES]; 114 }; 115 116 struct amdgpu_mec_bitmap { 117 /* These are the resources for which amdgpu takes ownership */ 118 DECLARE_BITMAP(queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES); 119 }; 120 121 enum amdgpu_unmap_queues_action { 122 PREEMPT_QUEUES = 0, 123 RESET_QUEUES, 124 DISABLE_PROCESS_QUEUES, 125 PREEMPT_QUEUES_NO_UNMAP, 126 }; 127 128 struct kiq_pm4_funcs { 129 /* Support ASIC-specific kiq pm4 packets*/ 130 void (*kiq_set_resources)(struct amdgpu_ring *kiq_ring, 131 uint64_t queue_mask); 132 void (*kiq_map_queues)(struct amdgpu_ring *kiq_ring, 133 struct amdgpu_ring *ring); 134 void (*kiq_unmap_queues)(struct amdgpu_ring *kiq_ring, 135 struct amdgpu_ring *ring, 136 enum amdgpu_unmap_queues_action action, 137 u64 gpu_addr, u64 seq); 138 void (*kiq_query_status)(struct amdgpu_ring *kiq_ring, 139 struct amdgpu_ring *ring, 140 u64 addr, 141 u64 seq); 142 void (*kiq_invalidate_tlbs)(struct amdgpu_ring *kiq_ring, 143 uint16_t pasid, uint32_t flush_type, 144 bool all_hub); 145 void (*kiq_reset_hw_queue)(struct amdgpu_ring *kiq_ring, 146 uint32_t queue_type, uint32_t me_id, 147 uint32_t pipe_id, uint32_t queue_id, 148 uint32_t xcc_id, uint32_t vmid); 149 /* Packet sizes */ 150 int set_resources_size; 151 int map_queues_size; 152 int unmap_queues_size; 153 int query_status_size; 154 int invalidate_tlbs_size; 155 }; 156 157 struct amdgpu_kiq { 158 u64 eop_gpu_addr; 159 struct amdgpu_bo *eop_obj; 160 spinlock_t ring_lock; 161 struct amdgpu_ring ring; 162 struct amdgpu_irq_src irq; 163 const struct kiq_pm4_funcs *pmf; 164 void *mqd_backup; 165 }; 166 167 /* 168 * GFX configurations 169 */ 170 #define AMDGPU_GFX_MAX_SE 4 171 #define AMDGPU_GFX_MAX_SH_PER_SE 2 172 173 struct amdgpu_rb_config { 174 uint32_t rb_backend_disable; 175 uint32_t user_rb_backend_disable; 176 uint32_t raster_config; 177 uint32_t raster_config_1; 178 }; 179 180 struct gb_addr_config { 181 uint16_t pipe_interleave_size; 182 uint8_t num_pipes; 183 uint8_t max_compress_frags; 184 uint8_t num_banks; 185 uint8_t num_se; 186 uint8_t num_rb_per_se; 187 uint8_t num_pkrs; 188 }; 189 190 struct amdgpu_gfx_config { 191 unsigned max_shader_engines; 192 unsigned max_tile_pipes; 193 unsigned max_cu_per_sh; 194 unsigned max_sh_per_se; 195 unsigned max_backends_per_se; 196 unsigned max_texture_channel_caches; 197 unsigned max_gprs; 198 unsigned max_gs_threads; 199 unsigned max_hw_contexts; 200 unsigned sc_prim_fifo_size_frontend; 201 unsigned sc_prim_fifo_size_backend; 202 unsigned sc_hiz_tile_fifo_size; 203 unsigned sc_earlyz_tile_fifo_size; 204 205 unsigned num_tile_pipes; 206 unsigned backend_enable_mask; 207 unsigned mem_max_burst_length_bytes; 208 unsigned mem_row_size_in_kb; 209 unsigned shader_engine_tile_size; 210 unsigned num_gpus; 211 unsigned multi_gpu_tile_size; 212 unsigned mc_arb_ramcfg; 213 unsigned num_banks; 214 unsigned num_ranks; 215 unsigned gb_addr_config; 216 unsigned num_rbs; 217 unsigned gs_vgt_table_depth; 218 unsigned gs_prim_buffer_depth; 219 220 uint32_t tile_mode_array[32]; 221 uint32_t macrotile_mode_array[16]; 222 223 struct gb_addr_config gb_addr_config_fields; 224 struct amdgpu_rb_config rb_config[AMDGPU_GFX_MAX_SE][AMDGPU_GFX_MAX_SH_PER_SE]; 225 226 /* gfx configure feature */ 227 uint32_t double_offchip_lds_buf; 228 /* cached value of DB_DEBUG2 */ 229 uint32_t db_debug2; 230 /* gfx10 specific config */ 231 uint32_t num_sc_per_sh; 232 uint32_t num_packer_per_sc; 233 uint32_t pa_sc_tile_steering_override; 234 /* Whether texture coordinate truncation is conformant. */ 235 bool ta_cntl2_truncate_coord_mode; 236 uint64_t tcc_disabled_mask; 237 uint32_t gc_num_tcp_per_sa; 238 uint32_t gc_num_sdp_interface; 239 uint32_t gc_num_tcps; 240 uint32_t gc_num_tcp_per_wpg; 241 uint32_t gc_tcp_l1_size; 242 uint32_t gc_num_sqc_per_wgp; 243 uint32_t gc_l1_instruction_cache_size_per_sqc; 244 uint32_t gc_l1_data_cache_size_per_sqc; 245 uint32_t gc_gl1c_per_sa; 246 uint32_t gc_gl1c_size_per_instance; 247 uint32_t gc_gl2c_per_gpu; 248 uint32_t gc_tcp_size_per_cu; 249 uint32_t gc_num_cu_per_sqc; 250 uint32_t gc_tcc_size; 251 uint32_t gc_tcp_cache_line_size; 252 uint32_t gc_instruction_cache_size_per_sqc; 253 uint32_t gc_instruction_cache_line_size; 254 uint32_t gc_scalar_data_cache_size_per_sqc; 255 uint32_t gc_scalar_data_cache_line_size; 256 uint32_t gc_tcc_cache_line_size; 257 }; 258 259 struct amdgpu_cu_info { 260 uint32_t simd_per_cu; 261 uint32_t max_waves_per_simd; 262 uint32_t wave_front_size; 263 uint32_t max_scratch_slots_per_cu; 264 uint32_t lds_size; 265 266 /* total active CU number */ 267 uint32_t number; 268 uint32_t ao_cu_mask; 269 uint32_t ao_cu_bitmap[4][4]; 270 uint32_t bitmap[AMDGPU_MAX_GC_INSTANCES][4][4]; 271 }; 272 273 struct amdgpu_gfx_ras { 274 struct amdgpu_ras_block_object ras_block; 275 void (*enable_watchdog_timer)(struct amdgpu_device *adev); 276 int (*rlc_gc_fed_irq)(struct amdgpu_device *adev, 277 struct amdgpu_irq_src *source, 278 struct amdgpu_iv_entry *entry); 279 int (*poison_consumption_handler)(struct amdgpu_device *adev, 280 struct amdgpu_iv_entry *entry); 281 }; 282 283 struct amdgpu_gfx_shadow_info { 284 u32 shadow_size; 285 u32 shadow_alignment; 286 u32 csa_size; 287 u32 csa_alignment; 288 }; 289 290 struct amdgpu_gfx_funcs { 291 /* get the gpu clock counter */ 292 uint64_t (*get_gpu_clock_counter)(struct amdgpu_device *adev); 293 void (*select_se_sh)(struct amdgpu_device *adev, u32 se_num, 294 u32 sh_num, u32 instance, int xcc_id); 295 void (*read_wave_data)(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd, 296 uint32_t wave, uint32_t *dst, int *no_fields); 297 void (*read_wave_vgprs)(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd, 298 uint32_t wave, uint32_t thread, uint32_t start, 299 uint32_t size, uint32_t *dst); 300 void (*read_wave_sgprs)(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd, 301 uint32_t wave, uint32_t start, uint32_t size, 302 uint32_t *dst); 303 void (*select_me_pipe_q)(struct amdgpu_device *adev, u32 me, u32 pipe, 304 u32 queue, u32 vmid, u32 xcc_id); 305 void (*init_spm_golden)(struct amdgpu_device *adev); 306 void (*update_perfmon_mgcg)(struct amdgpu_device *adev, bool enable); 307 int (*get_gfx_shadow_info)(struct amdgpu_device *adev, 308 struct amdgpu_gfx_shadow_info *shadow_info); 309 enum amdgpu_gfx_partition 310 (*query_partition_mode)(struct amdgpu_device *adev); 311 int (*switch_partition_mode)(struct amdgpu_device *adev, 312 int num_xccs_per_xcp); 313 int (*ih_node_to_logical_xcc)(struct amdgpu_device *adev, int ih_node); 314 int (*get_xccs_per_xcp)(struct amdgpu_device *adev); 315 }; 316 317 struct sq_work { 318 struct work_struct work; 319 unsigned ih_data; 320 }; 321 322 struct amdgpu_pfp { 323 struct amdgpu_bo *pfp_fw_obj; 324 uint64_t pfp_fw_gpu_addr; 325 uint32_t *pfp_fw_ptr; 326 327 struct amdgpu_bo *pfp_fw_data_obj; 328 uint64_t pfp_fw_data_gpu_addr; 329 uint32_t *pfp_fw_data_ptr; 330 }; 331 332 struct amdgpu_ce { 333 struct amdgpu_bo *ce_fw_obj; 334 uint64_t ce_fw_gpu_addr; 335 uint32_t *ce_fw_ptr; 336 }; 337 338 struct amdgpu_me { 339 struct amdgpu_bo *me_fw_obj; 340 uint64_t me_fw_gpu_addr; 341 uint32_t *me_fw_ptr; 342 343 struct amdgpu_bo *me_fw_data_obj; 344 uint64_t me_fw_data_gpu_addr; 345 uint32_t *me_fw_data_ptr; 346 347 uint32_t num_me; 348 uint32_t num_pipe_per_me; 349 uint32_t num_queue_per_pipe; 350 void *mqd_backup[AMDGPU_MAX_GFX_RINGS]; 351 352 /* These are the resources for which amdgpu takes ownership */ 353 DECLARE_BITMAP(queue_bitmap, AMDGPU_MAX_GFX_QUEUES); 354 }; 355 356 struct amdgpu_isolation_work { 357 struct amdgpu_device *adev; 358 u32 xcp_id; 359 struct delayed_work work; 360 }; 361 362 struct amdgpu_gfx { 363 struct mutex gpu_clock_mutex; 364 struct amdgpu_gfx_config config; 365 struct amdgpu_rlc rlc; 366 struct amdgpu_pfp pfp; 367 struct amdgpu_ce ce; 368 struct amdgpu_me me; 369 struct amdgpu_mec mec; 370 struct amdgpu_mec_bitmap mec_bitmap[AMDGPU_MAX_GC_INSTANCES]; 371 struct amdgpu_kiq kiq[AMDGPU_MAX_GC_INSTANCES]; 372 struct amdgpu_imu imu; 373 bool rs64_enable; /* firmware format */ 374 const struct firmware *me_fw; /* ME firmware */ 375 uint32_t me_fw_version; 376 const struct firmware *pfp_fw; /* PFP firmware */ 377 uint32_t pfp_fw_version; 378 const struct firmware *ce_fw; /* CE firmware */ 379 uint32_t ce_fw_version; 380 const struct firmware *rlc_fw; /* RLC firmware */ 381 uint32_t rlc_fw_version; 382 const struct firmware *mec_fw; /* MEC firmware */ 383 uint32_t mec_fw_version; 384 const struct firmware *mec2_fw; /* MEC2 firmware */ 385 uint32_t mec2_fw_version; 386 const struct firmware *imu_fw; /* IMU firmware */ 387 uint32_t imu_fw_version; 388 uint32_t me_feature_version; 389 uint32_t ce_feature_version; 390 uint32_t pfp_feature_version; 391 uint32_t rlc_feature_version; 392 uint32_t rlc_srlc_fw_version; 393 uint32_t rlc_srlc_feature_version; 394 uint32_t rlc_srlg_fw_version; 395 uint32_t rlc_srlg_feature_version; 396 uint32_t rlc_srls_fw_version; 397 uint32_t rlc_srls_feature_version; 398 uint32_t rlcp_ucode_version; 399 uint32_t rlcp_ucode_feature_version; 400 uint32_t rlcv_ucode_version; 401 uint32_t rlcv_ucode_feature_version; 402 uint32_t mec_feature_version; 403 uint32_t mec2_feature_version; 404 bool mec_fw_write_wait; 405 bool me_fw_write_wait; 406 bool cp_fw_write_wait; 407 struct amdgpu_ring gfx_ring[AMDGPU_MAX_GFX_RINGS]; 408 unsigned num_gfx_rings; 409 struct amdgpu_ring compute_ring[AMDGPU_MAX_COMPUTE_RINGS * AMDGPU_MAX_GC_INSTANCES]; 410 unsigned num_compute_rings; 411 struct amdgpu_irq_src eop_irq; 412 struct amdgpu_irq_src priv_reg_irq; 413 struct amdgpu_irq_src priv_inst_irq; 414 struct amdgpu_irq_src bad_op_irq; 415 struct amdgpu_irq_src cp_ecc_error_irq; 416 struct amdgpu_irq_src sq_irq; 417 struct amdgpu_irq_src rlc_gc_fed_irq; 418 struct sq_work sq_work; 419 420 /* gfx status */ 421 uint32_t gfx_current_status; 422 /* ce ram size*/ 423 unsigned ce_ram_size; 424 struct amdgpu_cu_info cu_info; 425 const struct amdgpu_gfx_funcs *funcs; 426 427 /* reset mask */ 428 uint32_t grbm_soft_reset; 429 uint32_t srbm_soft_reset; 430 uint32_t gfx_supported_reset; 431 uint32_t compute_supported_reset; 432 433 /* gfx off */ 434 bool gfx_off_state; /* true: enabled, false: disabled */ 435 struct mutex gfx_off_mutex; /* mutex to change gfxoff state */ 436 uint32_t gfx_off_req_count; /* default 1, enable gfx off: dec 1, disable gfx off: add 1 */ 437 struct delayed_work gfx_off_delay_work; /* async work to set gfx block off */ 438 uint32_t gfx_off_residency; /* last logged residency */ 439 uint64_t gfx_off_entrycount; /* count of times GPU has get into GFXOFF state */ 440 441 /* pipe reservation */ 442 struct mutex pipe_reserve_mutex; 443 DECLARE_BITMAP (pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES); 444 445 /*ras */ 446 struct ras_common_if *ras_if; 447 struct amdgpu_gfx_ras *ras; 448 449 bool is_poweron; 450 451 struct amdgpu_ring sw_gfx_ring[AMDGPU_MAX_SW_GFX_RINGS]; 452 struct amdgpu_ring_mux muxer; 453 454 bool cp_gfx_shadow; /* for gfx11 */ 455 456 uint16_t xcc_mask; 457 uint32_t num_xcc_per_xcp; 458 struct mutex partition_mutex; 459 bool mcbp; /* mid command buffer preemption */ 460 461 /* IP reg dump */ 462 uint32_t *ip_dump_core; 463 uint32_t *ip_dump_compute_queues; 464 uint32_t *ip_dump_gfx_queues; 465 466 struct mutex reset_sem_mutex; 467 468 /* cleaner shader */ 469 struct amdgpu_bo *cleaner_shader_obj; 470 unsigned int cleaner_shader_size; 471 u64 cleaner_shader_gpu_addr; 472 void *cleaner_shader_cpu_ptr; 473 const void *cleaner_shader_ptr; 474 bool enable_cleaner_shader; 475 struct amdgpu_isolation_work enforce_isolation[MAX_XCP]; 476 /* Mutex for synchronizing KFD scheduler operations */ 477 struct mutex kfd_sch_mutex; 478 u64 kfd_sch_req_count[MAX_XCP]; 479 bool kfd_sch_inactive[MAX_XCP]; 480 unsigned long enforce_isolation_jiffies[MAX_XCP]; 481 unsigned long enforce_isolation_time[MAX_XCP]; 482 483 atomic_t total_submission_cnt; 484 struct delayed_work idle_work; 485 bool workload_profile_active; 486 struct mutex workload_profile_mutex; 487 }; 488 489 struct amdgpu_gfx_ras_reg_entry { 490 struct amdgpu_ras_err_status_reg_entry reg_entry; 491 enum amdgpu_gfx_ras_mem_id_type mem_id_type; 492 uint32_t se_num; 493 }; 494 495 struct amdgpu_gfx_ras_mem_id_entry { 496 const struct amdgpu_ras_memory_id_entry *mem_id_ent; 497 uint32_t size; 498 }; 499 500 #define AMDGPU_GFX_MEMID_ENT(x) {(x), ARRAY_SIZE(x)}, 501 502 #define amdgpu_gfx_get_gpu_clock_counter(adev) (adev)->gfx.funcs->get_gpu_clock_counter((adev)) 503 #define amdgpu_gfx_select_se_sh(adev, se, sh, instance, xcc_id) ((adev)->gfx.funcs->select_se_sh((adev), (se), (sh), (instance), (xcc_id))) 504 #define amdgpu_gfx_select_me_pipe_q(adev, me, pipe, q, vmid, xcc_id) ((adev)->gfx.funcs->select_me_pipe_q((adev), (me), (pipe), (q), (vmid), (xcc_id))) 505 #define amdgpu_gfx_init_spm_golden(adev) (adev)->gfx.funcs->init_spm_golden((adev)) 506 #define amdgpu_gfx_get_gfx_shadow_info(adev, si) ((adev)->gfx.funcs->get_gfx_shadow_info((adev), (si))) 507 508 /** 509 * amdgpu_gfx_create_bitmask - create a bitmask 510 * 511 * @bit_width: length of the mask 512 * 513 * create a variable length bit mask. 514 * Returns the bitmask. 515 */ 516 static inline u32 amdgpu_gfx_create_bitmask(u32 bit_width) 517 { 518 return (u32)((1ULL << bit_width) - 1); 519 } 520 521 void amdgpu_gfx_parse_disable_cu(unsigned *mask, unsigned max_se, 522 unsigned max_sh); 523 524 int amdgpu_gfx_kiq_init_ring(struct amdgpu_device *adev, int xcc_id); 525 526 void amdgpu_gfx_kiq_free_ring(struct amdgpu_ring *ring); 527 528 void amdgpu_gfx_kiq_fini(struct amdgpu_device *adev, int xcc_id); 529 int amdgpu_gfx_kiq_init(struct amdgpu_device *adev, 530 unsigned hpd_size, int xcc_id); 531 532 int amdgpu_gfx_mqd_sw_init(struct amdgpu_device *adev, 533 unsigned mqd_size, int xcc_id); 534 void amdgpu_gfx_mqd_sw_fini(struct amdgpu_device *adev, int xcc_id); 535 int amdgpu_gfx_disable_kcq(struct amdgpu_device *adev, int xcc_id); 536 int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev, int xcc_id); 537 int amdgpu_gfx_disable_kgq(struct amdgpu_device *adev, int xcc_id); 538 int amdgpu_gfx_enable_kgq(struct amdgpu_device *adev, int xcc_id); 539 540 void amdgpu_gfx_compute_queue_acquire(struct amdgpu_device *adev); 541 void amdgpu_gfx_graphics_queue_acquire(struct amdgpu_device *adev); 542 543 int amdgpu_gfx_mec_queue_to_bit(struct amdgpu_device *adev, int mec, 544 int pipe, int queue); 545 void amdgpu_queue_mask_bit_to_mec_queue(struct amdgpu_device *adev, int bit, 546 int *mec, int *pipe, int *queue); 547 bool amdgpu_gfx_is_mec_queue_enabled(struct amdgpu_device *adev, int xcc_id, 548 int mec, int pipe, int queue); 549 bool amdgpu_gfx_is_high_priority_compute_queue(struct amdgpu_device *adev, 550 struct amdgpu_ring *ring); 551 bool amdgpu_gfx_is_high_priority_graphics_queue(struct amdgpu_device *adev, 552 struct amdgpu_ring *ring); 553 int amdgpu_gfx_me_queue_to_bit(struct amdgpu_device *adev, int me, 554 int pipe, int queue); 555 bool amdgpu_gfx_is_me_queue_enabled(struct amdgpu_device *adev, int me, 556 int pipe, int queue); 557 void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable); 558 void amdgpu_gfx_off_ctrl_immediate(struct amdgpu_device *adev, bool enable); 559 int amdgpu_get_gfx_off_status(struct amdgpu_device *adev, uint32_t *value); 560 int amdgpu_gfx_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block); 561 void amdgpu_gfx_ras_fini(struct amdgpu_device *adev); 562 int amdgpu_get_gfx_off_entrycount(struct amdgpu_device *adev, u64 *value); 563 int amdgpu_get_gfx_off_residency(struct amdgpu_device *adev, u32 *residency); 564 int amdgpu_set_gfx_off_residency(struct amdgpu_device *adev, bool value); 565 int amdgpu_gfx_process_ras_data_cb(struct amdgpu_device *adev, 566 void *err_data, 567 struct amdgpu_iv_entry *entry); 568 int amdgpu_gfx_cp_ecc_error_irq(struct amdgpu_device *adev, 569 struct amdgpu_irq_src *source, 570 struct amdgpu_iv_entry *entry); 571 uint32_t amdgpu_kiq_rreg(struct amdgpu_device *adev, uint32_t reg, uint32_t xcc_id); 572 void amdgpu_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v, uint32_t xcc_id); 573 int amdgpu_gfx_get_num_kcq(struct amdgpu_device *adev); 574 void amdgpu_gfx_cp_init_microcode(struct amdgpu_device *adev, uint32_t ucode_id); 575 576 int amdgpu_gfx_ras_sw_init(struct amdgpu_device *adev); 577 int amdgpu_gfx_poison_consumption_handler(struct amdgpu_device *adev, 578 struct amdgpu_iv_entry *entry); 579 580 bool amdgpu_gfx_is_master_xcc(struct amdgpu_device *adev, int xcc_id); 581 int amdgpu_gfx_sysfs_init(struct amdgpu_device *adev); 582 void amdgpu_gfx_sysfs_fini(struct amdgpu_device *adev); 583 void amdgpu_gfx_ras_error_func(struct amdgpu_device *adev, 584 void *ras_error_status, 585 void (*func)(struct amdgpu_device *adev, void *ras_error_status, 586 int xcc_id)); 587 int amdgpu_gfx_cleaner_shader_sw_init(struct amdgpu_device *adev, 588 unsigned int cleaner_shader_size); 589 void amdgpu_gfx_cleaner_shader_sw_fini(struct amdgpu_device *adev); 590 void amdgpu_gfx_cleaner_shader_init(struct amdgpu_device *adev, 591 unsigned int cleaner_shader_size, 592 const void *cleaner_shader_ptr); 593 void amdgpu_gfx_enforce_isolation_handler(struct work_struct *work); 594 void amdgpu_gfx_enforce_isolation_ring_begin_use(struct amdgpu_ring *ring); 595 void amdgpu_gfx_enforce_isolation_ring_end_use(struct amdgpu_ring *ring); 596 597 void amdgpu_gfx_profile_idle_work_handler(struct work_struct *work); 598 void amdgpu_gfx_profile_ring_begin_use(struct amdgpu_ring *ring); 599 void amdgpu_gfx_profile_ring_end_use(struct amdgpu_ring *ring); 600 601 void amdgpu_debugfs_gfx_sched_mask_init(struct amdgpu_device *adev); 602 void amdgpu_debugfs_compute_sched_mask_init(struct amdgpu_device *adev); 603 604 static inline const char *amdgpu_gfx_compute_mode_desc(int mode) 605 { 606 switch (mode) { 607 case AMDGPU_SPX_PARTITION_MODE: 608 return "SPX"; 609 case AMDGPU_DPX_PARTITION_MODE: 610 return "DPX"; 611 case AMDGPU_TPX_PARTITION_MODE: 612 return "TPX"; 613 case AMDGPU_QPX_PARTITION_MODE: 614 return "QPX"; 615 case AMDGPU_CPX_PARTITION_MODE: 616 return "CPX"; 617 default: 618 return "UNKNOWN"; 619 } 620 } 621 622 #endif 623