1 /*
2 * Copyright 2016 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Christian König
23 */
24 #ifndef __AMDGPU_RING_H__
25 #define __AMDGPU_RING_H__
26
27 #include <drm/amdgpu_drm.h>
28 #include <drm/gpu_scheduler.h>
29 #include <drm/drm_print.h>
30 #include <drm/drm_suballoc.h>
31
32 struct amdgpu_device;
33 struct amdgpu_ring;
34 struct amdgpu_ib;
35 struct amdgpu_cs_parser;
36 struct amdgpu_job;
37 struct amdgpu_vm;
38
39 /* max number of rings */
40 #define AMDGPU_MAX_RINGS 124
41 #define AMDGPU_MAX_HWIP_RINGS 64
42 #define AMDGPU_MAX_GFX_RINGS 2
43 #define AMDGPU_MAX_SW_GFX_RINGS 2
44 #define AMDGPU_MAX_COMPUTE_RINGS 8
45 #define AMDGPU_MAX_VCE_RINGS 3
46 #define AMDGPU_MAX_UVD_ENC_RINGS 2
47 #define AMDGPU_MAX_VPE_RINGS 2
48
49 enum amdgpu_ring_priority_level {
50 AMDGPU_RING_PRIO_0,
51 AMDGPU_RING_PRIO_1,
52 AMDGPU_RING_PRIO_DEFAULT = 1,
53 AMDGPU_RING_PRIO_2,
54 AMDGPU_RING_PRIO_MAX
55 };
56
57 /* some special values for the owner field */
58 #define AMDGPU_FENCE_OWNER_UNDEFINED ((void *)0ul)
59 #define AMDGPU_FENCE_OWNER_VM ((void *)1ul)
60 #define AMDGPU_FENCE_OWNER_KFD ((void *)2ul)
61
62 #define AMDGPU_FENCE_FLAG_64BIT (1 << 0)
63 #define AMDGPU_FENCE_FLAG_INT (1 << 1)
64 #define AMDGPU_FENCE_FLAG_TC_WB_ONLY (1 << 2)
65 #define AMDGPU_FENCE_FLAG_EXEC (1 << 3)
66
67 #define to_amdgpu_ring(s) container_of((s), struct amdgpu_ring, sched)
68
69 #define AMDGPU_IB_POOL_SIZE (1024 * 1024)
70
71 enum amdgpu_ring_type {
72 AMDGPU_RING_TYPE_GFX = AMDGPU_HW_IP_GFX,
73 AMDGPU_RING_TYPE_COMPUTE = AMDGPU_HW_IP_COMPUTE,
74 AMDGPU_RING_TYPE_SDMA = AMDGPU_HW_IP_DMA,
75 AMDGPU_RING_TYPE_UVD = AMDGPU_HW_IP_UVD,
76 AMDGPU_RING_TYPE_VCE = AMDGPU_HW_IP_VCE,
77 AMDGPU_RING_TYPE_UVD_ENC = AMDGPU_HW_IP_UVD_ENC,
78 AMDGPU_RING_TYPE_VCN_DEC = AMDGPU_HW_IP_VCN_DEC,
79 AMDGPU_RING_TYPE_VCN_ENC = AMDGPU_HW_IP_VCN_ENC,
80 AMDGPU_RING_TYPE_VCN_JPEG = AMDGPU_HW_IP_VCN_JPEG,
81 AMDGPU_RING_TYPE_VPE = AMDGPU_HW_IP_VPE,
82 AMDGPU_RING_TYPE_KIQ,
83 AMDGPU_RING_TYPE_MES,
84 AMDGPU_RING_TYPE_UMSCH_MM,
85 };
86
87 enum amdgpu_ib_pool_type {
88 /* Normal submissions to the top of the pipeline. */
89 AMDGPU_IB_POOL_DELAYED,
90 /* Immediate submissions to the bottom of the pipeline. */
91 AMDGPU_IB_POOL_IMMEDIATE,
92 /* Direct submission to the ring buffer during init and reset. */
93 AMDGPU_IB_POOL_DIRECT,
94
95 AMDGPU_IB_POOL_MAX
96 };
97
98 struct amdgpu_ib {
99 struct drm_suballoc *sa_bo;
100 uint32_t length_dw;
101 uint64_t gpu_addr;
102 uint32_t *ptr;
103 uint32_t flags;
104 };
105
106 struct amdgpu_sched {
107 u32 num_scheds;
108 struct drm_gpu_scheduler *sched[AMDGPU_MAX_HWIP_RINGS];
109 };
110
111 /*
112 * Fences.
113 */
114 struct amdgpu_fence_driver {
115 uint64_t gpu_addr;
116 volatile uint32_t *cpu_addr;
117 /* sync_seq is protected by ring emission lock */
118 uint32_t sync_seq;
119 atomic_t last_seq;
120 bool initialized;
121 struct amdgpu_irq_src *irq_src;
122 unsigned irq_type;
123 struct timer_list fallback_timer;
124 unsigned num_fences_mask;
125 spinlock_t lock;
126 struct dma_fence **fences;
127 };
128
129 extern const struct drm_sched_backend_ops amdgpu_sched_ops;
130
131 void amdgpu_fence_driver_clear_job_fences(struct amdgpu_ring *ring);
132 void amdgpu_fence_driver_set_error(struct amdgpu_ring *ring, int error);
133 void amdgpu_fence_driver_force_completion(struct amdgpu_ring *ring);
134
135 int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring);
136 int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
137 struct amdgpu_irq_src *irq_src,
138 unsigned irq_type);
139 void amdgpu_fence_driver_hw_init(struct amdgpu_device *adev);
140 void amdgpu_fence_driver_hw_fini(struct amdgpu_device *adev);
141 int amdgpu_fence_driver_sw_init(struct amdgpu_device *adev);
142 void amdgpu_fence_driver_sw_fini(struct amdgpu_device *adev);
143 int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **fence, struct amdgpu_job *job,
144 unsigned flags);
145 int amdgpu_fence_emit_polling(struct amdgpu_ring *ring, uint32_t *s,
146 uint32_t timeout);
147 bool amdgpu_fence_process(struct amdgpu_ring *ring);
148 int amdgpu_fence_wait_empty(struct amdgpu_ring *ring);
149 signed long amdgpu_fence_wait_polling(struct amdgpu_ring *ring,
150 uint32_t wait_seq,
151 signed long timeout);
152 unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring);
153
154 void amdgpu_fence_driver_isr_toggle(struct amdgpu_device *adev, bool stop);
155
156 u64 amdgpu_fence_last_unsignaled_time_us(struct amdgpu_ring *ring);
157 void amdgpu_fence_update_start_timestamp(struct amdgpu_ring *ring, uint32_t seq,
158 ktime_t timestamp);
159
160 /*
161 * Rings.
162 */
163
164 /* provided by hw blocks that expose a ring buffer for commands */
165 struct amdgpu_ring_funcs {
166 enum amdgpu_ring_type type;
167 uint32_t align_mask;
168 u32 nop;
169 bool support_64bit_ptrs;
170 bool no_user_fence;
171 bool secure_submission_supported;
172 unsigned extra_dw;
173
174 /* ring read/write ptr handling */
175 u64 (*get_rptr)(struct amdgpu_ring *ring);
176 u64 (*get_wptr)(struct amdgpu_ring *ring);
177 void (*set_wptr)(struct amdgpu_ring *ring);
178 /* validating and patching of IBs */
179 int (*parse_cs)(struct amdgpu_cs_parser *p,
180 struct amdgpu_job *job,
181 struct amdgpu_ib *ib);
182 int (*patch_cs_in_place)(struct amdgpu_cs_parser *p,
183 struct amdgpu_job *job,
184 struct amdgpu_ib *ib);
185 /* constants to calculate how many DW are needed for an emit */
186 unsigned emit_frame_size;
187 unsigned emit_ib_size;
188 /* command emit functions */
189 void (*emit_ib)(struct amdgpu_ring *ring,
190 struct amdgpu_job *job,
191 struct amdgpu_ib *ib,
192 uint32_t flags);
193 void (*emit_fence)(struct amdgpu_ring *ring, uint64_t addr,
194 uint64_t seq, unsigned flags);
195 void (*emit_pipeline_sync)(struct amdgpu_ring *ring);
196 void (*emit_vm_flush)(struct amdgpu_ring *ring, unsigned vmid,
197 uint64_t pd_addr);
198 void (*emit_hdp_flush)(struct amdgpu_ring *ring);
199 void (*emit_gds_switch)(struct amdgpu_ring *ring, uint32_t vmid,
200 uint32_t gds_base, uint32_t gds_size,
201 uint32_t gws_base, uint32_t gws_size,
202 uint32_t oa_base, uint32_t oa_size);
203 /* testing functions */
204 int (*test_ring)(struct amdgpu_ring *ring);
205 int (*test_ib)(struct amdgpu_ring *ring, long timeout);
206 /* insert NOP packets */
207 void (*insert_nop)(struct amdgpu_ring *ring, uint32_t count);
208 void (*insert_start)(struct amdgpu_ring *ring);
209 void (*insert_end)(struct amdgpu_ring *ring);
210 /* pad the indirect buffer to the necessary number of dw */
211 void (*pad_ib)(struct amdgpu_ring *ring, struct amdgpu_ib *ib);
212 unsigned (*init_cond_exec)(struct amdgpu_ring *ring, uint64_t addr);
213 /* note usage for clock and power gating */
214 void (*begin_use)(struct amdgpu_ring *ring);
215 void (*end_use)(struct amdgpu_ring *ring);
216 void (*emit_switch_buffer) (struct amdgpu_ring *ring);
217 void (*emit_cntxcntl) (struct amdgpu_ring *ring, uint32_t flags);
218 void (*emit_gfx_shadow)(struct amdgpu_ring *ring, u64 shadow_va, u64 csa_va,
219 u64 gds_va, bool init_shadow, int vmid);
220 void (*emit_rreg)(struct amdgpu_ring *ring, uint32_t reg,
221 uint32_t reg_val_offs);
222 void (*emit_wreg)(struct amdgpu_ring *ring, uint32_t reg, uint32_t val);
223 void (*emit_reg_wait)(struct amdgpu_ring *ring, uint32_t reg,
224 uint32_t val, uint32_t mask);
225 void (*emit_reg_write_reg_wait)(struct amdgpu_ring *ring,
226 uint32_t reg0, uint32_t reg1,
227 uint32_t ref, uint32_t mask);
228 void (*emit_frame_cntl)(struct amdgpu_ring *ring, bool start,
229 bool secure);
230 /* Try to soft recover the ring to make the fence signal */
231 void (*soft_recovery)(struct amdgpu_ring *ring, unsigned vmid);
232 int (*preempt_ib)(struct amdgpu_ring *ring);
233 void (*emit_mem_sync)(struct amdgpu_ring *ring);
234 void (*emit_wave_limit)(struct amdgpu_ring *ring, bool enable);
235 void (*patch_cntl)(struct amdgpu_ring *ring, unsigned offset);
236 void (*patch_ce)(struct amdgpu_ring *ring, unsigned offset);
237 void (*patch_de)(struct amdgpu_ring *ring, unsigned offset);
238 int (*reset)(struct amdgpu_ring *ring, unsigned int vmid);
239 void (*emit_cleaner_shader)(struct amdgpu_ring *ring);
240 };
241
242 struct amdgpu_ring {
243 struct amdgpu_device *adev;
244 const struct amdgpu_ring_funcs *funcs;
245 struct amdgpu_fence_driver fence_drv;
246 struct drm_gpu_scheduler sched;
247
248 struct amdgpu_bo *ring_obj;
249 uint32_t *ring;
250 unsigned rptr_offs;
251 u64 rptr_gpu_addr;
252 volatile u32 *rptr_cpu_addr;
253 u64 wptr;
254 u64 wptr_old;
255 unsigned ring_size;
256 unsigned max_dw;
257 int count_dw;
258 uint64_t gpu_addr;
259 uint64_t ptr_mask;
260 uint32_t buf_mask;
261 u32 idx;
262 u32 xcc_id;
263 u32 xcp_id;
264 u32 me;
265 u32 pipe;
266 u32 queue;
267 struct amdgpu_bo *mqd_obj;
268 uint64_t mqd_gpu_addr;
269 void *mqd_ptr;
270 unsigned mqd_size;
271 uint64_t eop_gpu_addr;
272 u32 doorbell_index;
273 bool use_doorbell;
274 bool use_pollmem;
275 unsigned wptr_offs;
276 u64 wptr_gpu_addr;
277 volatile u32 *wptr_cpu_addr;
278 unsigned fence_offs;
279 u64 fence_gpu_addr;
280 volatile u32 *fence_cpu_addr;
281 uint64_t current_ctx;
282 char name[16];
283 u32 trail_seq;
284 unsigned trail_fence_offs;
285 u64 trail_fence_gpu_addr;
286 volatile u32 *trail_fence_cpu_addr;
287 unsigned cond_exe_offs;
288 u64 cond_exe_gpu_addr;
289 volatile u32 *cond_exe_cpu_addr;
290 unsigned int set_q_mode_offs;
291 u32 *set_q_mode_ptr;
292 u64 set_q_mode_token;
293 unsigned vm_hub;
294 unsigned vm_inv_eng;
295 struct dma_fence *vmid_wait;
296 bool has_compute_vm_bug;
297 bool no_scheduler;
298 int hw_prio;
299 unsigned num_hw_submission;
300 atomic_t *sched_score;
301
302 /* used for mes */
303 bool is_mes_queue;
304 uint32_t hw_queue_id;
305 struct amdgpu_mes_ctx_data *mes_ctx;
306
307 bool is_sw_ring;
308 unsigned int entry_index;
309
310 };
311
312 #define amdgpu_ring_parse_cs(r, p, job, ib) ((r)->funcs->parse_cs((p), (job), (ib)))
313 #define amdgpu_ring_patch_cs_in_place(r, p, job, ib) ((r)->funcs->patch_cs_in_place((p), (job), (ib)))
314 #define amdgpu_ring_test_ring(r) (r)->funcs->test_ring((r))
315 #define amdgpu_ring_test_ib(r, t) ((r)->funcs->test_ib ? (r)->funcs->test_ib((r), (t)) : 0)
316 #define amdgpu_ring_get_rptr(r) (r)->funcs->get_rptr((r))
317 #define amdgpu_ring_get_wptr(r) (r)->funcs->get_wptr((r))
318 #define amdgpu_ring_set_wptr(r) (r)->funcs->set_wptr((r))
319 #define amdgpu_ring_emit_ib(r, job, ib, flags) ((r)->funcs->emit_ib((r), (job), (ib), (flags)))
320 #define amdgpu_ring_emit_pipeline_sync(r) (r)->funcs->emit_pipeline_sync((r))
321 #define amdgpu_ring_emit_vm_flush(r, vmid, addr) (r)->funcs->emit_vm_flush((r), (vmid), (addr))
322 #define amdgpu_ring_emit_fence(r, addr, seq, flags) (r)->funcs->emit_fence((r), (addr), (seq), (flags))
323 #define amdgpu_ring_emit_gds_switch(r, v, db, ds, wb, ws, ab, as) (r)->funcs->emit_gds_switch((r), (v), (db), (ds), (wb), (ws), (ab), (as))
324 #define amdgpu_ring_emit_hdp_flush(r) (r)->funcs->emit_hdp_flush((r))
325 #define amdgpu_ring_emit_switch_buffer(r) (r)->funcs->emit_switch_buffer((r))
326 #define amdgpu_ring_emit_cntxcntl(r, d) (r)->funcs->emit_cntxcntl((r), (d))
327 #define amdgpu_ring_emit_gfx_shadow(r, s, c, g, i, v) ((r)->funcs->emit_gfx_shadow((r), (s), (c), (g), (i), (v)))
328 #define amdgpu_ring_emit_rreg(r, d, o) (r)->funcs->emit_rreg((r), (d), (o))
329 #define amdgpu_ring_emit_wreg(r, d, v) (r)->funcs->emit_wreg((r), (d), (v))
330 #define amdgpu_ring_emit_reg_wait(r, d, v, m) (r)->funcs->emit_reg_wait((r), (d), (v), (m))
331 #define amdgpu_ring_emit_reg_write_reg_wait(r, d0, d1, v, m) (r)->funcs->emit_reg_write_reg_wait((r), (d0), (d1), (v), (m))
332 #define amdgpu_ring_emit_frame_cntl(r, b, s) (r)->funcs->emit_frame_cntl((r), (b), (s))
333 #define amdgpu_ring_pad_ib(r, ib) ((r)->funcs->pad_ib((r), (ib)))
334 #define amdgpu_ring_init_cond_exec(r, a) (r)->funcs->init_cond_exec((r), (a))
335 #define amdgpu_ring_preempt_ib(r) (r)->funcs->preempt_ib(r)
336 #define amdgpu_ring_patch_cntl(r, o) ((r)->funcs->patch_cntl((r), (o)))
337 #define amdgpu_ring_patch_ce(r, o) ((r)->funcs->patch_ce((r), (o)))
338 #define amdgpu_ring_patch_de(r, o) ((r)->funcs->patch_de((r), (o)))
339 #define amdgpu_ring_reset(r, v) (r)->funcs->reset((r), (v))
340
341 unsigned int amdgpu_ring_max_ibs(enum amdgpu_ring_type type);
342 int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned ndw);
343 void amdgpu_ring_ib_begin(struct amdgpu_ring *ring);
344 void amdgpu_ring_ib_end(struct amdgpu_ring *ring);
345 void amdgpu_ring_ib_on_emit_cntl(struct amdgpu_ring *ring);
346 void amdgpu_ring_ib_on_emit_ce(struct amdgpu_ring *ring);
347 void amdgpu_ring_ib_on_emit_de(struct amdgpu_ring *ring);
348
349 void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count);
350 void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib);
351 void amdgpu_ring_commit(struct amdgpu_ring *ring);
352 void amdgpu_ring_undo(struct amdgpu_ring *ring);
353 int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
354 unsigned int max_dw, struct amdgpu_irq_src *irq_src,
355 unsigned int irq_type, unsigned int hw_prio,
356 atomic_t *sched_score);
357 void amdgpu_ring_fini(struct amdgpu_ring *ring);
358 void amdgpu_ring_emit_reg_write_reg_wait_helper(struct amdgpu_ring *ring,
359 uint32_t reg0, uint32_t val0,
360 uint32_t reg1, uint32_t val1);
361 bool amdgpu_ring_soft_recovery(struct amdgpu_ring *ring, unsigned int vmid,
362 struct dma_fence *fence);
363
amdgpu_ring_set_preempt_cond_exec(struct amdgpu_ring * ring,bool cond_exec)364 static inline void amdgpu_ring_set_preempt_cond_exec(struct amdgpu_ring *ring,
365 bool cond_exec)
366 {
367 *ring->cond_exe_cpu_addr = cond_exec;
368 }
369
amdgpu_ring_clear_ring(struct amdgpu_ring * ring)370 static inline void amdgpu_ring_clear_ring(struct amdgpu_ring *ring)
371 {
372 int i = 0;
373 while (i <= ring->buf_mask)
374 ring->ring[i++] = ring->funcs->nop;
375
376 }
377
amdgpu_ring_write(struct amdgpu_ring * ring,uint32_t v)378 static inline void amdgpu_ring_write(struct amdgpu_ring *ring, uint32_t v)
379 {
380 ring->ring[ring->wptr++ & ring->buf_mask] = v;
381 ring->wptr &= ring->ptr_mask;
382 ring->count_dw--;
383 }
384
amdgpu_ring_write_multiple(struct amdgpu_ring * ring,void * src,int count_dw)385 static inline void amdgpu_ring_write_multiple(struct amdgpu_ring *ring,
386 void *src, int count_dw)
387 {
388 unsigned occupied, chunk1, chunk2;
389
390 occupied = ring->wptr & ring->buf_mask;
391 chunk1 = ring->buf_mask + 1 - occupied;
392 chunk1 = (chunk1 >= count_dw) ? count_dw : chunk1;
393 chunk2 = count_dw - chunk1;
394 chunk1 <<= 2;
395 chunk2 <<= 2;
396
397 if (chunk1)
398 memcpy(&ring->ring[occupied], src, chunk1);
399
400 if (chunk2) {
401 src += chunk1;
402 memcpy(ring->ring, src, chunk2);
403 }
404
405 ring->wptr += count_dw;
406 ring->wptr &= ring->ptr_mask;
407 ring->count_dw -= count_dw;
408 }
409
410 /**
411 * amdgpu_ring_patch_cond_exec - patch dw count of conditional execute
412 * @ring: amdgpu_ring structure
413 * @offset: offset returned by amdgpu_ring_init_cond_exec
414 *
415 * Calculate the dw count and patch it into a cond_exec command.
416 */
amdgpu_ring_patch_cond_exec(struct amdgpu_ring * ring,unsigned int offset)417 static inline void amdgpu_ring_patch_cond_exec(struct amdgpu_ring *ring,
418 unsigned int offset)
419 {
420 unsigned cur;
421
422 if (!ring->funcs->init_cond_exec)
423 return;
424
425 WARN_ON(offset > ring->buf_mask);
426 WARN_ON(ring->ring[offset] != 0);
427
428 cur = (ring->wptr - 1) & ring->buf_mask;
429 if (cur < offset)
430 cur += ring->ring_size >> 2;
431 ring->ring[offset] = cur - offset;
432 }
433
434 #define amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset) \
435 (ring->is_mes_queue && ring->mes_ctx ? \
436 (ring->mes_ctx->meta_data_gpu_addr + offset) : 0)
437
438 #define amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset) \
439 (ring->is_mes_queue && ring->mes_ctx ? \
440 (void *)((uint8_t *)(ring->mes_ctx->meta_data_ptr) + offset) : \
441 NULL)
442
443 int amdgpu_ring_test_helper(struct amdgpu_ring *ring);
444
445 void amdgpu_debugfs_ring_init(struct amdgpu_device *adev,
446 struct amdgpu_ring *ring);
447
448 int amdgpu_ring_init_mqd(struct amdgpu_ring *ring);
449
amdgpu_ib_get_value(struct amdgpu_ib * ib,int idx)450 static inline u32 amdgpu_ib_get_value(struct amdgpu_ib *ib, int idx)
451 {
452 return ib->ptr[idx];
453 }
454
amdgpu_ib_set_value(struct amdgpu_ib * ib,int idx,uint32_t value)455 static inline void amdgpu_ib_set_value(struct amdgpu_ib *ib, int idx,
456 uint32_t value)
457 {
458 ib->ptr[idx] = value;
459 }
460
461 int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm,
462 unsigned size,
463 enum amdgpu_ib_pool_type pool,
464 struct amdgpu_ib *ib);
465 void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib,
466 struct dma_fence *f);
467 int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
468 struct amdgpu_ib *ibs, struct amdgpu_job *job,
469 struct dma_fence **f);
470 int amdgpu_ib_pool_init(struct amdgpu_device *adev);
471 void amdgpu_ib_pool_fini(struct amdgpu_device *adev);
472 int amdgpu_ib_ring_tests(struct amdgpu_device *adev);
473 bool amdgpu_ring_sched_ready(struct amdgpu_ring *ring);
474 #endif
475