1 /* 2 * Copyright 2016 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: Christian König 23 */ 24 #ifndef __AMDGPU_RING_H__ 25 #define __AMDGPU_RING_H__ 26 27 #include <drm/amdgpu_drm.h> 28 #include <drm/gpu_scheduler.h> 29 #include <drm/drm_print.h> 30 #include <drm/drm_suballoc.h> 31 32 struct amdgpu_device; 33 struct amdgpu_ring; 34 struct amdgpu_ib; 35 struct amdgpu_cs_parser; 36 struct amdgpu_job; 37 struct amdgpu_vm; 38 39 /* max number of rings */ 40 #define AMDGPU_MAX_RINGS 124 41 #define AMDGPU_MAX_HWIP_RINGS 64 42 #define AMDGPU_MAX_GFX_RINGS 2 43 #define AMDGPU_MAX_SW_GFX_RINGS 2 44 #define AMDGPU_MAX_COMPUTE_RINGS 8 45 #define AMDGPU_MAX_VCE_RINGS 3 46 #define AMDGPU_MAX_UVD_ENC_RINGS 2 47 #define AMDGPU_MAX_VPE_RINGS 2 48 49 enum amdgpu_ring_priority_level { 50 AMDGPU_RING_PRIO_0, 51 AMDGPU_RING_PRIO_1, 52 AMDGPU_RING_PRIO_DEFAULT = 1, 53 AMDGPU_RING_PRIO_2, 54 AMDGPU_RING_PRIO_MAX 55 }; 56 57 /* some special values for the owner field */ 58 #define AMDGPU_FENCE_OWNER_UNDEFINED ((void *)0ul) 59 #define AMDGPU_FENCE_OWNER_VM ((void *)1ul) 60 #define AMDGPU_FENCE_OWNER_KFD ((void *)2ul) 61 62 #define AMDGPU_FENCE_FLAG_64BIT (1 << 0) 63 #define AMDGPU_FENCE_FLAG_INT (1 << 1) 64 #define AMDGPU_FENCE_FLAG_TC_WB_ONLY (1 << 2) 65 #define AMDGPU_FENCE_FLAG_EXEC (1 << 3) 66 67 #define to_amdgpu_ring(s) container_of((s), struct amdgpu_ring, sched) 68 69 #define AMDGPU_IB_POOL_SIZE (1024 * 1024) 70 71 enum amdgpu_ring_type { 72 AMDGPU_RING_TYPE_GFX = AMDGPU_HW_IP_GFX, 73 AMDGPU_RING_TYPE_COMPUTE = AMDGPU_HW_IP_COMPUTE, 74 AMDGPU_RING_TYPE_SDMA = AMDGPU_HW_IP_DMA, 75 AMDGPU_RING_TYPE_UVD = AMDGPU_HW_IP_UVD, 76 AMDGPU_RING_TYPE_VCE = AMDGPU_HW_IP_VCE, 77 AMDGPU_RING_TYPE_UVD_ENC = AMDGPU_HW_IP_UVD_ENC, 78 AMDGPU_RING_TYPE_VCN_DEC = AMDGPU_HW_IP_VCN_DEC, 79 AMDGPU_RING_TYPE_VCN_ENC = AMDGPU_HW_IP_VCN_ENC, 80 AMDGPU_RING_TYPE_VCN_JPEG = AMDGPU_HW_IP_VCN_JPEG, 81 AMDGPU_RING_TYPE_VPE = AMDGPU_HW_IP_VPE, 82 AMDGPU_RING_TYPE_KIQ, 83 AMDGPU_RING_TYPE_MES, 84 AMDGPU_RING_TYPE_UMSCH_MM, 85 }; 86 87 enum amdgpu_ib_pool_type { 88 /* Normal submissions to the top of the pipeline. */ 89 AMDGPU_IB_POOL_DELAYED, 90 /* Immediate submissions to the bottom of the pipeline. */ 91 AMDGPU_IB_POOL_IMMEDIATE, 92 /* Direct submission to the ring buffer during init and reset. */ 93 AMDGPU_IB_POOL_DIRECT, 94 95 AMDGPU_IB_POOL_MAX 96 }; 97 98 struct amdgpu_ib { 99 struct drm_suballoc *sa_bo; 100 uint32_t length_dw; 101 uint64_t gpu_addr; 102 uint32_t *ptr; 103 uint32_t flags; 104 }; 105 106 struct amdgpu_sched { 107 u32 num_scheds; 108 struct drm_gpu_scheduler *sched[AMDGPU_MAX_HWIP_RINGS]; 109 }; 110 111 /* 112 * Fences. 113 */ 114 struct amdgpu_fence_driver { 115 uint64_t gpu_addr; 116 volatile uint32_t *cpu_addr; 117 /* sync_seq is protected by ring emission lock */ 118 uint32_t sync_seq; 119 atomic_t last_seq; 120 bool initialized; 121 struct amdgpu_irq_src *irq_src; 122 unsigned irq_type; 123 struct timer_list fallback_timer; 124 unsigned num_fences_mask; 125 spinlock_t lock; 126 struct dma_fence **fences; 127 }; 128 129 extern const struct drm_sched_backend_ops amdgpu_sched_ops; 130 131 void amdgpu_fence_driver_clear_job_fences(struct amdgpu_ring *ring); 132 void amdgpu_fence_driver_set_error(struct amdgpu_ring *ring, int error); 133 void amdgpu_fence_driver_force_completion(struct amdgpu_ring *ring); 134 135 int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring); 136 int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring, 137 struct amdgpu_irq_src *irq_src, 138 unsigned irq_type); 139 void amdgpu_fence_driver_hw_init(struct amdgpu_device *adev); 140 void amdgpu_fence_driver_hw_fini(struct amdgpu_device *adev); 141 int amdgpu_fence_driver_sw_init(struct amdgpu_device *adev); 142 void amdgpu_fence_driver_sw_fini(struct amdgpu_device *adev); 143 int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **fence, struct amdgpu_job *job, 144 unsigned flags); 145 int amdgpu_fence_emit_polling(struct amdgpu_ring *ring, uint32_t *s, 146 uint32_t timeout); 147 bool amdgpu_fence_process(struct amdgpu_ring *ring); 148 int amdgpu_fence_wait_empty(struct amdgpu_ring *ring); 149 signed long amdgpu_fence_wait_polling(struct amdgpu_ring *ring, 150 uint32_t wait_seq, 151 signed long timeout); 152 unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring); 153 154 void amdgpu_fence_driver_isr_toggle(struct amdgpu_device *adev, bool stop); 155 156 u64 amdgpu_fence_last_unsignaled_time_us(struct amdgpu_ring *ring); 157 void amdgpu_fence_update_start_timestamp(struct amdgpu_ring *ring, uint32_t seq, 158 ktime_t timestamp); 159 160 /* 161 * Rings. 162 */ 163 164 /* provided by hw blocks that expose a ring buffer for commands */ 165 struct amdgpu_ring_funcs { 166 enum amdgpu_ring_type type; 167 uint32_t align_mask; 168 u32 nop; 169 bool support_64bit_ptrs; 170 bool no_user_fence; 171 bool secure_submission_supported; 172 unsigned extra_dw; 173 174 /* ring read/write ptr handling */ 175 u64 (*get_rptr)(struct amdgpu_ring *ring); 176 u64 (*get_wptr)(struct amdgpu_ring *ring); 177 void (*set_wptr)(struct amdgpu_ring *ring); 178 /* validating and patching of IBs */ 179 int (*parse_cs)(struct amdgpu_cs_parser *p, 180 struct amdgpu_job *job, 181 struct amdgpu_ib *ib); 182 int (*patch_cs_in_place)(struct amdgpu_cs_parser *p, 183 struct amdgpu_job *job, 184 struct amdgpu_ib *ib); 185 /* constants to calculate how many DW are needed for an emit */ 186 unsigned emit_frame_size; 187 unsigned emit_ib_size; 188 /* command emit functions */ 189 void (*emit_ib)(struct amdgpu_ring *ring, 190 struct amdgpu_job *job, 191 struct amdgpu_ib *ib, 192 uint32_t flags); 193 void (*emit_fence)(struct amdgpu_ring *ring, uint64_t addr, 194 uint64_t seq, unsigned flags); 195 void (*emit_pipeline_sync)(struct amdgpu_ring *ring); 196 void (*emit_vm_flush)(struct amdgpu_ring *ring, unsigned vmid, 197 uint64_t pd_addr); 198 void (*emit_hdp_flush)(struct amdgpu_ring *ring); 199 void (*emit_gds_switch)(struct amdgpu_ring *ring, uint32_t vmid, 200 uint32_t gds_base, uint32_t gds_size, 201 uint32_t gws_base, uint32_t gws_size, 202 uint32_t oa_base, uint32_t oa_size); 203 /* testing functions */ 204 int (*test_ring)(struct amdgpu_ring *ring); 205 int (*test_ib)(struct amdgpu_ring *ring, long timeout); 206 /* insert NOP packets */ 207 void (*insert_nop)(struct amdgpu_ring *ring, uint32_t count); 208 void (*insert_start)(struct amdgpu_ring *ring); 209 void (*insert_end)(struct amdgpu_ring *ring); 210 /* pad the indirect buffer to the necessary number of dw */ 211 void (*pad_ib)(struct amdgpu_ring *ring, struct amdgpu_ib *ib); 212 unsigned (*init_cond_exec)(struct amdgpu_ring *ring, uint64_t addr); 213 /* note usage for clock and power gating */ 214 void (*begin_use)(struct amdgpu_ring *ring); 215 void (*end_use)(struct amdgpu_ring *ring); 216 void (*emit_switch_buffer) (struct amdgpu_ring *ring); 217 void (*emit_cntxcntl) (struct amdgpu_ring *ring, uint32_t flags); 218 void (*emit_gfx_shadow)(struct amdgpu_ring *ring, u64 shadow_va, u64 csa_va, 219 u64 gds_va, bool init_shadow, int vmid); 220 void (*emit_rreg)(struct amdgpu_ring *ring, uint32_t reg, 221 uint32_t reg_val_offs); 222 void (*emit_wreg)(struct amdgpu_ring *ring, uint32_t reg, uint32_t val); 223 void (*emit_reg_wait)(struct amdgpu_ring *ring, uint32_t reg, 224 uint32_t val, uint32_t mask); 225 void (*emit_reg_write_reg_wait)(struct amdgpu_ring *ring, 226 uint32_t reg0, uint32_t reg1, 227 uint32_t ref, uint32_t mask); 228 void (*emit_frame_cntl)(struct amdgpu_ring *ring, bool start, 229 bool secure); 230 /* Try to soft recover the ring to make the fence signal */ 231 void (*soft_recovery)(struct amdgpu_ring *ring, unsigned vmid); 232 int (*preempt_ib)(struct amdgpu_ring *ring); 233 void (*emit_mem_sync)(struct amdgpu_ring *ring); 234 void (*emit_wave_limit)(struct amdgpu_ring *ring, bool enable); 235 void (*patch_cntl)(struct amdgpu_ring *ring, unsigned offset); 236 void (*patch_ce)(struct amdgpu_ring *ring, unsigned offset); 237 void (*patch_de)(struct amdgpu_ring *ring, unsigned offset); 238 }; 239 240 struct amdgpu_ring { 241 struct amdgpu_device *adev; 242 const struct amdgpu_ring_funcs *funcs; 243 struct amdgpu_fence_driver fence_drv; 244 struct drm_gpu_scheduler sched; 245 246 struct amdgpu_bo *ring_obj; 247 volatile uint32_t *ring; 248 unsigned rptr_offs; 249 u64 rptr_gpu_addr; 250 volatile u32 *rptr_cpu_addr; 251 u64 wptr; 252 u64 wptr_old; 253 unsigned ring_size; 254 unsigned max_dw; 255 int count_dw; 256 uint64_t gpu_addr; 257 uint64_t ptr_mask; 258 uint32_t buf_mask; 259 u32 idx; 260 u32 xcc_id; 261 u32 xcp_id; 262 u32 me; 263 u32 pipe; 264 u32 queue; 265 struct amdgpu_bo *mqd_obj; 266 uint64_t mqd_gpu_addr; 267 void *mqd_ptr; 268 unsigned mqd_size; 269 uint64_t eop_gpu_addr; 270 u32 doorbell_index; 271 bool use_doorbell; 272 bool use_pollmem; 273 unsigned wptr_offs; 274 u64 wptr_gpu_addr; 275 volatile u32 *wptr_cpu_addr; 276 unsigned fence_offs; 277 u64 fence_gpu_addr; 278 volatile u32 *fence_cpu_addr; 279 uint64_t current_ctx; 280 char name[16]; 281 u32 trail_seq; 282 unsigned trail_fence_offs; 283 u64 trail_fence_gpu_addr; 284 volatile u32 *trail_fence_cpu_addr; 285 unsigned cond_exe_offs; 286 u64 cond_exe_gpu_addr; 287 volatile u32 *cond_exe_cpu_addr; 288 unsigned int set_q_mode_offs; 289 volatile u32 *set_q_mode_ptr; 290 u64 set_q_mode_token; 291 unsigned vm_hub; 292 unsigned vm_inv_eng; 293 struct dma_fence *vmid_wait; 294 bool has_compute_vm_bug; 295 bool no_scheduler; 296 int hw_prio; 297 unsigned num_hw_submission; 298 atomic_t *sched_score; 299 300 /* used for mes */ 301 bool is_mes_queue; 302 uint32_t hw_queue_id; 303 struct amdgpu_mes_ctx_data *mes_ctx; 304 305 bool is_sw_ring; 306 unsigned int entry_index; 307 308 }; 309 310 #define amdgpu_ring_parse_cs(r, p, job, ib) ((r)->funcs->parse_cs((p), (job), (ib))) 311 #define amdgpu_ring_patch_cs_in_place(r, p, job, ib) ((r)->funcs->patch_cs_in_place((p), (job), (ib))) 312 #define amdgpu_ring_test_ring(r) (r)->funcs->test_ring((r)) 313 #define amdgpu_ring_test_ib(r, t) ((r)->funcs->test_ib ? (r)->funcs->test_ib((r), (t)) : 0) 314 #define amdgpu_ring_get_rptr(r) (r)->funcs->get_rptr((r)) 315 #define amdgpu_ring_get_wptr(r) (r)->funcs->get_wptr((r)) 316 #define amdgpu_ring_set_wptr(r) (r)->funcs->set_wptr((r)) 317 #define amdgpu_ring_emit_ib(r, job, ib, flags) ((r)->funcs->emit_ib((r), (job), (ib), (flags))) 318 #define amdgpu_ring_emit_pipeline_sync(r) (r)->funcs->emit_pipeline_sync((r)) 319 #define amdgpu_ring_emit_vm_flush(r, vmid, addr) (r)->funcs->emit_vm_flush((r), (vmid), (addr)) 320 #define amdgpu_ring_emit_fence(r, addr, seq, flags) (r)->funcs->emit_fence((r), (addr), (seq), (flags)) 321 #define amdgpu_ring_emit_gds_switch(r, v, db, ds, wb, ws, ab, as) (r)->funcs->emit_gds_switch((r), (v), (db), (ds), (wb), (ws), (ab), (as)) 322 #define amdgpu_ring_emit_hdp_flush(r) (r)->funcs->emit_hdp_flush((r)) 323 #define amdgpu_ring_emit_switch_buffer(r) (r)->funcs->emit_switch_buffer((r)) 324 #define amdgpu_ring_emit_cntxcntl(r, d) (r)->funcs->emit_cntxcntl((r), (d)) 325 #define amdgpu_ring_emit_gfx_shadow(r, s, c, g, i, v) ((r)->funcs->emit_gfx_shadow((r), (s), (c), (g), (i), (v))) 326 #define amdgpu_ring_emit_rreg(r, d, o) (r)->funcs->emit_rreg((r), (d), (o)) 327 #define amdgpu_ring_emit_wreg(r, d, v) (r)->funcs->emit_wreg((r), (d), (v)) 328 #define amdgpu_ring_emit_reg_wait(r, d, v, m) (r)->funcs->emit_reg_wait((r), (d), (v), (m)) 329 #define amdgpu_ring_emit_reg_write_reg_wait(r, d0, d1, v, m) (r)->funcs->emit_reg_write_reg_wait((r), (d0), (d1), (v), (m)) 330 #define amdgpu_ring_emit_frame_cntl(r, b, s) (r)->funcs->emit_frame_cntl((r), (b), (s)) 331 #define amdgpu_ring_pad_ib(r, ib) ((r)->funcs->pad_ib((r), (ib))) 332 #define amdgpu_ring_init_cond_exec(r, a) (r)->funcs->init_cond_exec((r), (a)) 333 #define amdgpu_ring_preempt_ib(r) (r)->funcs->preempt_ib(r) 334 #define amdgpu_ring_patch_cntl(r, o) ((r)->funcs->patch_cntl((r), (o))) 335 #define amdgpu_ring_patch_ce(r, o) ((r)->funcs->patch_ce((r), (o))) 336 #define amdgpu_ring_patch_de(r, o) ((r)->funcs->patch_de((r), (o))) 337 338 unsigned int amdgpu_ring_max_ibs(enum amdgpu_ring_type type); 339 int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned ndw); 340 void amdgpu_ring_ib_begin(struct amdgpu_ring *ring); 341 void amdgpu_ring_ib_end(struct amdgpu_ring *ring); 342 void amdgpu_ring_ib_on_emit_cntl(struct amdgpu_ring *ring); 343 void amdgpu_ring_ib_on_emit_ce(struct amdgpu_ring *ring); 344 void amdgpu_ring_ib_on_emit_de(struct amdgpu_ring *ring); 345 346 void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count); 347 void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib); 348 void amdgpu_ring_commit(struct amdgpu_ring *ring); 349 void amdgpu_ring_undo(struct amdgpu_ring *ring); 350 int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring, 351 unsigned int max_dw, struct amdgpu_irq_src *irq_src, 352 unsigned int irq_type, unsigned int hw_prio, 353 atomic_t *sched_score); 354 void amdgpu_ring_fini(struct amdgpu_ring *ring); 355 void amdgpu_ring_emit_reg_write_reg_wait_helper(struct amdgpu_ring *ring, 356 uint32_t reg0, uint32_t val0, 357 uint32_t reg1, uint32_t val1); 358 bool amdgpu_ring_soft_recovery(struct amdgpu_ring *ring, unsigned int vmid, 359 struct dma_fence *fence); 360 361 static inline void amdgpu_ring_set_preempt_cond_exec(struct amdgpu_ring *ring, 362 bool cond_exec) 363 { 364 *ring->cond_exe_cpu_addr = cond_exec; 365 } 366 367 static inline void amdgpu_ring_clear_ring(struct amdgpu_ring *ring) 368 { 369 int i = 0; 370 while (i <= ring->buf_mask) 371 ring->ring[i++] = ring->funcs->nop; 372 373 } 374 375 static inline void amdgpu_ring_write(struct amdgpu_ring *ring, uint32_t v) 376 { 377 if (ring->count_dw <= 0) 378 DRM_ERROR("amdgpu: writing more dwords to the ring than expected!\n"); 379 ring->ring[ring->wptr++ & ring->buf_mask] = v; 380 ring->wptr &= ring->ptr_mask; 381 ring->count_dw--; 382 } 383 384 static inline void amdgpu_ring_write_multiple(struct amdgpu_ring *ring, 385 void *src, int count_dw) 386 { 387 unsigned occupied, chunk1, chunk2; 388 void *dst; 389 390 if (unlikely(ring->count_dw < count_dw)) 391 DRM_ERROR("amdgpu: writing more dwords to the ring than expected!\n"); 392 393 occupied = ring->wptr & ring->buf_mask; 394 dst = (void *)&ring->ring[occupied]; 395 chunk1 = ring->buf_mask + 1 - occupied; 396 chunk1 = (chunk1 >= count_dw) ? count_dw : chunk1; 397 chunk2 = count_dw - chunk1; 398 chunk1 <<= 2; 399 chunk2 <<= 2; 400 401 if (chunk1) 402 memcpy(dst, src, chunk1); 403 404 if (chunk2) { 405 src += chunk1; 406 dst = (void *)ring->ring; 407 memcpy(dst, src, chunk2); 408 } 409 410 ring->wptr += count_dw; 411 ring->wptr &= ring->ptr_mask; 412 ring->count_dw -= count_dw; 413 } 414 415 /** 416 * amdgpu_ring_patch_cond_exec - patch dw count of conditional execute 417 * @ring: amdgpu_ring structure 418 * @offset: offset returned by amdgpu_ring_init_cond_exec 419 * 420 * Calculate the dw count and patch it into a cond_exec command. 421 */ 422 static inline void amdgpu_ring_patch_cond_exec(struct amdgpu_ring *ring, 423 unsigned int offset) 424 { 425 unsigned cur; 426 427 if (!ring->funcs->init_cond_exec) 428 return; 429 430 WARN_ON(offset > ring->buf_mask); 431 WARN_ON(ring->ring[offset] != 0); 432 433 cur = (ring->wptr - 1) & ring->buf_mask; 434 if (cur < offset) 435 cur += ring->ring_size >> 2; 436 ring->ring[offset] = cur - offset; 437 } 438 439 #define amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset) \ 440 (ring->is_mes_queue && ring->mes_ctx ? \ 441 (ring->mes_ctx->meta_data_gpu_addr + offset) : 0) 442 443 #define amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset) \ 444 (ring->is_mes_queue && ring->mes_ctx ? \ 445 (void *)((uint8_t *)(ring->mes_ctx->meta_data_ptr) + offset) : \ 446 NULL) 447 448 int amdgpu_ring_test_helper(struct amdgpu_ring *ring); 449 450 void amdgpu_debugfs_ring_init(struct amdgpu_device *adev, 451 struct amdgpu_ring *ring); 452 453 int amdgpu_ring_init_mqd(struct amdgpu_ring *ring); 454 455 static inline u32 amdgpu_ib_get_value(struct amdgpu_ib *ib, int idx) 456 { 457 return ib->ptr[idx]; 458 } 459 460 static inline void amdgpu_ib_set_value(struct amdgpu_ib *ib, int idx, 461 uint32_t value) 462 { 463 ib->ptr[idx] = value; 464 } 465 466 int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm, 467 unsigned size, 468 enum amdgpu_ib_pool_type pool, 469 struct amdgpu_ib *ib); 470 void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib, 471 struct dma_fence *f); 472 int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, 473 struct amdgpu_ib *ibs, struct amdgpu_job *job, 474 struct dma_fence **f); 475 int amdgpu_ib_pool_init(struct amdgpu_device *adev); 476 void amdgpu_ib_pool_fini(struct amdgpu_device *adev); 477 int amdgpu_ib_ring_tests(struct amdgpu_device *adev); 478 bool amdgpu_ring_sched_ready(struct amdgpu_ring *ring); 479 #endif 480