1 /* 2 * Copyright 2016 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: Christian König 23 */ 24 #ifndef __AMDGPU_RING_H__ 25 #define __AMDGPU_RING_H__ 26 27 #include <drm/amdgpu_drm.h> 28 #include <drm/gpu_scheduler.h> 29 #include <drm/drm_print.h> 30 #include <drm/drm_suballoc.h> 31 32 struct amdgpu_device; 33 struct amdgpu_ring; 34 struct amdgpu_ib; 35 struct amdgpu_cs_parser; 36 struct amdgpu_job; 37 struct amdgpu_vm; 38 39 /* max number of rings */ 40 #define AMDGPU_MAX_RINGS 149 41 #define AMDGPU_MAX_HWIP_RINGS 64 42 #define AMDGPU_MAX_GFX_RINGS 2 43 #define AMDGPU_MAX_SW_GFX_RINGS 2 44 #define AMDGPU_MAX_COMPUTE_RINGS 8 45 #define AMDGPU_MAX_VCE_RINGS 3 46 #define AMDGPU_MAX_UVD_ENC_RINGS 2 47 #define AMDGPU_MAX_VPE_RINGS 2 48 49 enum amdgpu_ring_priority_level { 50 AMDGPU_RING_PRIO_0, 51 AMDGPU_RING_PRIO_1, 52 AMDGPU_RING_PRIO_DEFAULT = 1, 53 AMDGPU_RING_PRIO_2, 54 AMDGPU_RING_PRIO_MAX 55 }; 56 57 /* some special values for the owner field */ 58 #define AMDGPU_FENCE_OWNER_UNDEFINED ((void *)0ul) 59 #define AMDGPU_FENCE_OWNER_VM ((void *)1ul) 60 #define AMDGPU_FENCE_OWNER_KFD ((void *)2ul) 61 62 #define AMDGPU_FENCE_FLAG_64BIT (1 << 0) 63 #define AMDGPU_FENCE_FLAG_INT (1 << 1) 64 #define AMDGPU_FENCE_FLAG_TC_WB_ONLY (1 << 2) 65 #define AMDGPU_FENCE_FLAG_EXEC (1 << 3) 66 67 #define to_amdgpu_ring(s) container_of((s), struct amdgpu_ring, sched) 68 69 #define AMDGPU_IB_POOL_SIZE (1024 * 1024) 70 71 enum amdgpu_ring_type { 72 AMDGPU_RING_TYPE_GFX = AMDGPU_HW_IP_GFX, 73 AMDGPU_RING_TYPE_COMPUTE = AMDGPU_HW_IP_COMPUTE, 74 AMDGPU_RING_TYPE_SDMA = AMDGPU_HW_IP_DMA, 75 AMDGPU_RING_TYPE_UVD = AMDGPU_HW_IP_UVD, 76 AMDGPU_RING_TYPE_VCE = AMDGPU_HW_IP_VCE, 77 AMDGPU_RING_TYPE_UVD_ENC = AMDGPU_HW_IP_UVD_ENC, 78 AMDGPU_RING_TYPE_VCN_DEC = AMDGPU_HW_IP_VCN_DEC, 79 AMDGPU_RING_TYPE_VCN_ENC = AMDGPU_HW_IP_VCN_ENC, 80 AMDGPU_RING_TYPE_VCN_JPEG = AMDGPU_HW_IP_VCN_JPEG, 81 AMDGPU_RING_TYPE_VPE = AMDGPU_HW_IP_VPE, 82 AMDGPU_RING_TYPE_KIQ, 83 AMDGPU_RING_TYPE_MES, 84 AMDGPU_RING_TYPE_UMSCH_MM, 85 AMDGPU_RING_TYPE_CPER, 86 }; 87 88 enum amdgpu_ib_pool_type { 89 /* Normal submissions to the top of the pipeline. */ 90 AMDGPU_IB_POOL_DELAYED, 91 /* Immediate submissions to the bottom of the pipeline. */ 92 AMDGPU_IB_POOL_IMMEDIATE, 93 /* Direct submission to the ring buffer during init and reset. */ 94 AMDGPU_IB_POOL_DIRECT, 95 96 AMDGPU_IB_POOL_MAX 97 }; 98 99 struct amdgpu_ib { 100 struct drm_suballoc *sa_bo; 101 uint32_t length_dw; 102 uint64_t gpu_addr; 103 uint32_t *ptr; 104 uint32_t flags; 105 }; 106 107 struct amdgpu_sched { 108 u32 num_scheds; 109 struct drm_gpu_scheduler *sched[AMDGPU_MAX_HWIP_RINGS]; 110 }; 111 112 /* 113 * Fences. 114 */ 115 struct amdgpu_fence_driver { 116 uint64_t gpu_addr; 117 volatile uint32_t *cpu_addr; 118 /* sync_seq is protected by ring emission lock */ 119 uint32_t sync_seq; 120 atomic_t last_seq; 121 bool initialized; 122 struct amdgpu_irq_src *irq_src; 123 unsigned irq_type; 124 struct timer_list fallback_timer; 125 unsigned num_fences_mask; 126 spinlock_t lock; 127 struct dma_fence **fences; 128 }; 129 130 extern const struct drm_sched_backend_ops amdgpu_sched_ops; 131 132 void amdgpu_fence_driver_clear_job_fences(struct amdgpu_ring *ring); 133 void amdgpu_fence_driver_set_error(struct amdgpu_ring *ring, int error); 134 void amdgpu_fence_driver_force_completion(struct amdgpu_ring *ring); 135 136 int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring); 137 int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring, 138 struct amdgpu_irq_src *irq_src, 139 unsigned irq_type); 140 void amdgpu_fence_driver_hw_init(struct amdgpu_device *adev); 141 void amdgpu_fence_driver_hw_fini(struct amdgpu_device *adev); 142 int amdgpu_fence_driver_sw_init(struct amdgpu_device *adev); 143 void amdgpu_fence_driver_sw_fini(struct amdgpu_device *adev); 144 int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **fence, struct amdgpu_job *job, 145 unsigned flags); 146 int amdgpu_fence_emit_polling(struct amdgpu_ring *ring, uint32_t *s, 147 uint32_t timeout); 148 bool amdgpu_fence_process(struct amdgpu_ring *ring); 149 int amdgpu_fence_wait_empty(struct amdgpu_ring *ring); 150 signed long amdgpu_fence_wait_polling(struct amdgpu_ring *ring, 151 uint32_t wait_seq, 152 signed long timeout); 153 unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring); 154 155 void amdgpu_fence_driver_isr_toggle(struct amdgpu_device *adev, bool stop); 156 157 u64 amdgpu_fence_last_unsignaled_time_us(struct amdgpu_ring *ring); 158 void amdgpu_fence_update_start_timestamp(struct amdgpu_ring *ring, uint32_t seq, 159 ktime_t timestamp); 160 161 /* 162 * Rings. 163 */ 164 165 /* provided by hw blocks that expose a ring buffer for commands */ 166 struct amdgpu_ring_funcs { 167 enum amdgpu_ring_type type; 168 uint32_t align_mask; 169 u32 nop; 170 bool support_64bit_ptrs; 171 bool no_user_fence; 172 bool secure_submission_supported; 173 unsigned extra_dw; 174 175 /* ring read/write ptr handling */ 176 u64 (*get_rptr)(struct amdgpu_ring *ring); 177 u64 (*get_wptr)(struct amdgpu_ring *ring); 178 void (*set_wptr)(struct amdgpu_ring *ring); 179 /* validating and patching of IBs */ 180 int (*parse_cs)(struct amdgpu_cs_parser *p, 181 struct amdgpu_job *job, 182 struct amdgpu_ib *ib); 183 int (*patch_cs_in_place)(struct amdgpu_cs_parser *p, 184 struct amdgpu_job *job, 185 struct amdgpu_ib *ib); 186 /* constants to calculate how many DW are needed for an emit */ 187 unsigned emit_frame_size; 188 unsigned emit_ib_size; 189 /* command emit functions */ 190 void (*emit_ib)(struct amdgpu_ring *ring, 191 struct amdgpu_job *job, 192 struct amdgpu_ib *ib, 193 uint32_t flags); 194 void (*emit_fence)(struct amdgpu_ring *ring, uint64_t addr, 195 uint64_t seq, unsigned flags); 196 void (*emit_pipeline_sync)(struct amdgpu_ring *ring); 197 void (*emit_vm_flush)(struct amdgpu_ring *ring, unsigned vmid, 198 uint64_t pd_addr); 199 void (*emit_hdp_flush)(struct amdgpu_ring *ring); 200 void (*emit_gds_switch)(struct amdgpu_ring *ring, uint32_t vmid, 201 uint32_t gds_base, uint32_t gds_size, 202 uint32_t gws_base, uint32_t gws_size, 203 uint32_t oa_base, uint32_t oa_size); 204 /* testing functions */ 205 int (*test_ring)(struct amdgpu_ring *ring); 206 int (*test_ib)(struct amdgpu_ring *ring, long timeout); 207 /* insert NOP packets */ 208 void (*insert_nop)(struct amdgpu_ring *ring, uint32_t count); 209 void (*insert_start)(struct amdgpu_ring *ring); 210 void (*insert_end)(struct amdgpu_ring *ring); 211 /* pad the indirect buffer to the necessary number of dw */ 212 void (*pad_ib)(struct amdgpu_ring *ring, struct amdgpu_ib *ib); 213 unsigned (*init_cond_exec)(struct amdgpu_ring *ring, uint64_t addr); 214 /* note usage for clock and power gating */ 215 void (*begin_use)(struct amdgpu_ring *ring); 216 void (*end_use)(struct amdgpu_ring *ring); 217 void (*emit_switch_buffer) (struct amdgpu_ring *ring); 218 void (*emit_cntxcntl) (struct amdgpu_ring *ring, uint32_t flags); 219 void (*emit_gfx_shadow)(struct amdgpu_ring *ring, u64 shadow_va, u64 csa_va, 220 u64 gds_va, bool init_shadow, int vmid); 221 void (*emit_rreg)(struct amdgpu_ring *ring, uint32_t reg, 222 uint32_t reg_val_offs); 223 void (*emit_wreg)(struct amdgpu_ring *ring, uint32_t reg, uint32_t val); 224 void (*emit_reg_wait)(struct amdgpu_ring *ring, uint32_t reg, 225 uint32_t val, uint32_t mask); 226 void (*emit_reg_write_reg_wait)(struct amdgpu_ring *ring, 227 uint32_t reg0, uint32_t reg1, 228 uint32_t ref, uint32_t mask); 229 void (*emit_frame_cntl)(struct amdgpu_ring *ring, bool start, 230 bool secure); 231 /* Try to soft recover the ring to make the fence signal */ 232 void (*soft_recovery)(struct amdgpu_ring *ring, unsigned vmid); 233 int (*preempt_ib)(struct amdgpu_ring *ring); 234 void (*emit_mem_sync)(struct amdgpu_ring *ring); 235 void (*emit_wave_limit)(struct amdgpu_ring *ring, bool enable); 236 void (*patch_cntl)(struct amdgpu_ring *ring, unsigned offset); 237 void (*patch_ce)(struct amdgpu_ring *ring, unsigned offset); 238 void (*patch_de)(struct amdgpu_ring *ring, unsigned offset); 239 int (*reset)(struct amdgpu_ring *ring, unsigned int vmid); 240 void (*emit_cleaner_shader)(struct amdgpu_ring *ring); 241 bool (*is_guilty)(struct amdgpu_ring *ring); 242 }; 243 244 struct amdgpu_ring { 245 struct amdgpu_device *adev; 246 const struct amdgpu_ring_funcs *funcs; 247 struct amdgpu_fence_driver fence_drv; 248 struct drm_gpu_scheduler sched; 249 250 struct amdgpu_bo *ring_obj; 251 uint32_t *ring; 252 unsigned rptr_offs; 253 u64 rptr_gpu_addr; 254 volatile u32 *rptr_cpu_addr; 255 u64 wptr; 256 u64 wptr_old; 257 unsigned ring_size; 258 unsigned max_dw; 259 int count_dw; 260 uint64_t gpu_addr; 261 uint64_t ptr_mask; 262 uint32_t buf_mask; 263 u32 idx; 264 u32 xcc_id; 265 u32 xcp_id; 266 u32 me; 267 u32 pipe; 268 u32 queue; 269 struct amdgpu_bo *mqd_obj; 270 uint64_t mqd_gpu_addr; 271 void *mqd_ptr; 272 unsigned mqd_size; 273 uint64_t eop_gpu_addr; 274 u32 doorbell_index; 275 bool use_doorbell; 276 bool use_pollmem; 277 unsigned wptr_offs; 278 u64 wptr_gpu_addr; 279 volatile u32 *wptr_cpu_addr; 280 unsigned fence_offs; 281 u64 fence_gpu_addr; 282 volatile u32 *fence_cpu_addr; 283 uint64_t current_ctx; 284 char name[16]; 285 u32 trail_seq; 286 unsigned trail_fence_offs; 287 u64 trail_fence_gpu_addr; 288 volatile u32 *trail_fence_cpu_addr; 289 unsigned cond_exe_offs; 290 u64 cond_exe_gpu_addr; 291 volatile u32 *cond_exe_cpu_addr; 292 unsigned int set_q_mode_offs; 293 u32 *set_q_mode_ptr; 294 u64 set_q_mode_token; 295 unsigned vm_hub; 296 unsigned vm_inv_eng; 297 struct dma_fence *vmid_wait; 298 bool has_compute_vm_bug; 299 bool no_scheduler; 300 int hw_prio; 301 unsigned num_hw_submission; 302 atomic_t *sched_score; 303 304 /* used for mes */ 305 bool is_mes_queue; 306 uint32_t hw_queue_id; 307 struct amdgpu_mes_ctx_data *mes_ctx; 308 309 bool is_sw_ring; 310 unsigned int entry_index; 311 /* store the cached rptr to restore after reset */ 312 uint64_t cached_rptr; 313 314 }; 315 316 #define amdgpu_ring_parse_cs(r, p, job, ib) ((r)->funcs->parse_cs((p), (job), (ib))) 317 #define amdgpu_ring_patch_cs_in_place(r, p, job, ib) ((r)->funcs->patch_cs_in_place((p), (job), (ib))) 318 #define amdgpu_ring_test_ring(r) (r)->funcs->test_ring((r)) 319 #define amdgpu_ring_test_ib(r, t) ((r)->funcs->test_ib ? (r)->funcs->test_ib((r), (t)) : 0) 320 #define amdgpu_ring_get_rptr(r) (r)->funcs->get_rptr((r)) 321 #define amdgpu_ring_get_wptr(r) (r)->funcs->get_wptr((r)) 322 #define amdgpu_ring_set_wptr(r) (r)->funcs->set_wptr((r)) 323 #define amdgpu_ring_emit_ib(r, job, ib, flags) ((r)->funcs->emit_ib((r), (job), (ib), (flags))) 324 #define amdgpu_ring_emit_pipeline_sync(r) (r)->funcs->emit_pipeline_sync((r)) 325 #define amdgpu_ring_emit_vm_flush(r, vmid, addr) (r)->funcs->emit_vm_flush((r), (vmid), (addr)) 326 #define amdgpu_ring_emit_fence(r, addr, seq, flags) (r)->funcs->emit_fence((r), (addr), (seq), (flags)) 327 #define amdgpu_ring_emit_gds_switch(r, v, db, ds, wb, ws, ab, as) (r)->funcs->emit_gds_switch((r), (v), (db), (ds), (wb), (ws), (ab), (as)) 328 #define amdgpu_ring_emit_hdp_flush(r) (r)->funcs->emit_hdp_flush((r)) 329 #define amdgpu_ring_emit_switch_buffer(r) (r)->funcs->emit_switch_buffer((r)) 330 #define amdgpu_ring_emit_cntxcntl(r, d) (r)->funcs->emit_cntxcntl((r), (d)) 331 #define amdgpu_ring_emit_gfx_shadow(r, s, c, g, i, v) ((r)->funcs->emit_gfx_shadow((r), (s), (c), (g), (i), (v))) 332 #define amdgpu_ring_emit_rreg(r, d, o) (r)->funcs->emit_rreg((r), (d), (o)) 333 #define amdgpu_ring_emit_wreg(r, d, v) (r)->funcs->emit_wreg((r), (d), (v)) 334 #define amdgpu_ring_emit_reg_wait(r, d, v, m) (r)->funcs->emit_reg_wait((r), (d), (v), (m)) 335 #define amdgpu_ring_emit_reg_write_reg_wait(r, d0, d1, v, m) (r)->funcs->emit_reg_write_reg_wait((r), (d0), (d1), (v), (m)) 336 #define amdgpu_ring_emit_frame_cntl(r, b, s) (r)->funcs->emit_frame_cntl((r), (b), (s)) 337 #define amdgpu_ring_pad_ib(r, ib) ((r)->funcs->pad_ib((r), (ib))) 338 #define amdgpu_ring_init_cond_exec(r, a) (r)->funcs->init_cond_exec((r), (a)) 339 #define amdgpu_ring_preempt_ib(r) (r)->funcs->preempt_ib(r) 340 #define amdgpu_ring_patch_cntl(r, o) ((r)->funcs->patch_cntl((r), (o))) 341 #define amdgpu_ring_patch_ce(r, o) ((r)->funcs->patch_ce((r), (o))) 342 #define amdgpu_ring_patch_de(r, o) ((r)->funcs->patch_de((r), (o))) 343 #define amdgpu_ring_reset(r, v) (r)->funcs->reset((r), (v)) 344 345 unsigned int amdgpu_ring_max_ibs(enum amdgpu_ring_type type); 346 int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned ndw); 347 void amdgpu_ring_ib_begin(struct amdgpu_ring *ring); 348 void amdgpu_ring_ib_end(struct amdgpu_ring *ring); 349 void amdgpu_ring_ib_on_emit_cntl(struct amdgpu_ring *ring); 350 void amdgpu_ring_ib_on_emit_ce(struct amdgpu_ring *ring); 351 void amdgpu_ring_ib_on_emit_de(struct amdgpu_ring *ring); 352 353 void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count); 354 void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib); 355 void amdgpu_ring_commit(struct amdgpu_ring *ring); 356 void amdgpu_ring_undo(struct amdgpu_ring *ring); 357 int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring, 358 unsigned int max_dw, struct amdgpu_irq_src *irq_src, 359 unsigned int irq_type, unsigned int hw_prio, 360 atomic_t *sched_score); 361 void amdgpu_ring_fini(struct amdgpu_ring *ring); 362 void amdgpu_ring_emit_reg_write_reg_wait_helper(struct amdgpu_ring *ring, 363 uint32_t reg0, uint32_t val0, 364 uint32_t reg1, uint32_t val1); 365 bool amdgpu_ring_soft_recovery(struct amdgpu_ring *ring, unsigned int vmid, 366 struct dma_fence *fence); 367 368 static inline void amdgpu_ring_set_preempt_cond_exec(struct amdgpu_ring *ring, 369 bool cond_exec) 370 { 371 *ring->cond_exe_cpu_addr = cond_exec; 372 } 373 374 static inline void amdgpu_ring_clear_ring(struct amdgpu_ring *ring) 375 { 376 int i = 0; 377 while (i <= ring->buf_mask) 378 ring->ring[i++] = ring->funcs->nop; 379 380 } 381 382 static inline void amdgpu_ring_write(struct amdgpu_ring *ring, uint32_t v) 383 { 384 ring->ring[ring->wptr++ & ring->buf_mask] = v; 385 ring->wptr &= ring->ptr_mask; 386 ring->count_dw--; 387 } 388 389 static inline void amdgpu_ring_write_multiple(struct amdgpu_ring *ring, 390 void *src, int count_dw) 391 { 392 unsigned occupied, chunk1, chunk2; 393 394 occupied = ring->wptr & ring->buf_mask; 395 chunk1 = ring->buf_mask + 1 - occupied; 396 chunk1 = (chunk1 >= count_dw) ? count_dw : chunk1; 397 chunk2 = count_dw - chunk1; 398 chunk1 <<= 2; 399 chunk2 <<= 2; 400 401 if (chunk1) 402 memcpy(&ring->ring[occupied], src, chunk1); 403 404 if (chunk2) { 405 src += chunk1; 406 memcpy(ring->ring, src, chunk2); 407 } 408 409 ring->wptr += count_dw; 410 ring->wptr &= ring->ptr_mask; 411 ring->count_dw -= count_dw; 412 } 413 414 /** 415 * amdgpu_ring_patch_cond_exec - patch dw count of conditional execute 416 * @ring: amdgpu_ring structure 417 * @offset: offset returned by amdgpu_ring_init_cond_exec 418 * 419 * Calculate the dw count and patch it into a cond_exec command. 420 */ 421 static inline void amdgpu_ring_patch_cond_exec(struct amdgpu_ring *ring, 422 unsigned int offset) 423 { 424 unsigned cur; 425 426 if (!ring->funcs->init_cond_exec) 427 return; 428 429 WARN_ON(offset > ring->buf_mask); 430 WARN_ON(ring->ring[offset] != 0); 431 432 cur = (ring->wptr - 1) & ring->buf_mask; 433 if (cur < offset) 434 cur += ring->ring_size >> 2; 435 ring->ring[offset] = cur - offset; 436 } 437 438 #define amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset) \ 439 (ring->is_mes_queue && ring->mes_ctx ? \ 440 (ring->mes_ctx->meta_data_gpu_addr + offset) : 0) 441 442 #define amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset) \ 443 (ring->is_mes_queue && ring->mes_ctx ? \ 444 (void *)((uint8_t *)(ring->mes_ctx->meta_data_ptr) + offset) : \ 445 NULL) 446 447 int amdgpu_ring_test_helper(struct amdgpu_ring *ring); 448 449 void amdgpu_debugfs_ring_init(struct amdgpu_device *adev, 450 struct amdgpu_ring *ring); 451 452 int amdgpu_ring_init_mqd(struct amdgpu_ring *ring); 453 454 static inline u32 amdgpu_ib_get_value(struct amdgpu_ib *ib, int idx) 455 { 456 return ib->ptr[idx]; 457 } 458 459 static inline void amdgpu_ib_set_value(struct amdgpu_ib *ib, int idx, 460 uint32_t value) 461 { 462 ib->ptr[idx] = value; 463 } 464 465 int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm, 466 unsigned size, 467 enum amdgpu_ib_pool_type pool, 468 struct amdgpu_ib *ib); 469 void amdgpu_ib_free(struct amdgpu_ib *ib, struct dma_fence *f); 470 int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, 471 struct amdgpu_ib *ibs, struct amdgpu_job *job, 472 struct dma_fence **f); 473 int amdgpu_ib_pool_init(struct amdgpu_device *adev); 474 void amdgpu_ib_pool_fini(struct amdgpu_device *adev); 475 int amdgpu_ib_ring_tests(struct amdgpu_device *adev); 476 bool amdgpu_ring_sched_ready(struct amdgpu_ring *ring); 477 #endif 478