xref: /linux/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h (revision 5f2b6c5f6b692c696a232d12c43b8e41c0d393b9)
1 /*
2  * Copyright 2016 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Christian König
23  */
24 #ifndef __AMDGPU_RING_H__
25 #define __AMDGPU_RING_H__
26 
27 #include <drm/amdgpu_drm.h>
28 #include <drm/gpu_scheduler.h>
29 #include <drm/drm_print.h>
30 #include <drm/drm_suballoc.h>
31 
32 struct amdgpu_device;
33 struct amdgpu_ring;
34 struct amdgpu_ib;
35 struct amdgpu_cs_parser;
36 struct amdgpu_job;
37 struct amdgpu_vm;
38 
39 /* max number of rings */
40 #define AMDGPU_MAX_RINGS		149
41 #define AMDGPU_MAX_HWIP_RINGS		64
42 #define AMDGPU_MAX_GFX_RINGS		2
43 #define AMDGPU_MAX_SW_GFX_RINGS         2
44 #define AMDGPU_MAX_COMPUTE_RINGS	8
45 #define AMDGPU_MAX_VCE_RINGS		3
46 #define AMDGPU_MAX_UVD_ENC_RINGS	2
47 #define AMDGPU_MAX_VPE_RINGS		2
48 
49 enum amdgpu_ring_priority_level {
50 	AMDGPU_RING_PRIO_0,
51 	AMDGPU_RING_PRIO_1,
52 	AMDGPU_RING_PRIO_DEFAULT = 1,
53 	AMDGPU_RING_PRIO_2,
54 	AMDGPU_RING_PRIO_MAX
55 };
56 
57 /* some special values for the owner field */
58 #define AMDGPU_FENCE_OWNER_UNDEFINED	((void *)0ul)
59 #define AMDGPU_FENCE_OWNER_VM		((void *)1ul)
60 #define AMDGPU_FENCE_OWNER_KFD		((void *)2ul)
61 
62 #define AMDGPU_FENCE_FLAG_64BIT         (1 << 0)
63 #define AMDGPU_FENCE_FLAG_INT           (1 << 1)
64 #define AMDGPU_FENCE_FLAG_TC_WB_ONLY    (1 << 2)
65 #define AMDGPU_FENCE_FLAG_EXEC          (1 << 3)
66 
67 #define to_amdgpu_ring(s) container_of((s), struct amdgpu_ring, sched)
68 
69 #define AMDGPU_IB_POOL_SIZE	(1024 * 1024)
70 
71 enum amdgpu_ring_type {
72 	AMDGPU_RING_TYPE_GFX		= AMDGPU_HW_IP_GFX,
73 	AMDGPU_RING_TYPE_COMPUTE	= AMDGPU_HW_IP_COMPUTE,
74 	AMDGPU_RING_TYPE_SDMA		= AMDGPU_HW_IP_DMA,
75 	AMDGPU_RING_TYPE_UVD		= AMDGPU_HW_IP_UVD,
76 	AMDGPU_RING_TYPE_VCE		= AMDGPU_HW_IP_VCE,
77 	AMDGPU_RING_TYPE_UVD_ENC	= AMDGPU_HW_IP_UVD_ENC,
78 	AMDGPU_RING_TYPE_VCN_DEC	= AMDGPU_HW_IP_VCN_DEC,
79 	AMDGPU_RING_TYPE_VCN_ENC	= AMDGPU_HW_IP_VCN_ENC,
80 	AMDGPU_RING_TYPE_VCN_JPEG	= AMDGPU_HW_IP_VCN_JPEG,
81 	AMDGPU_RING_TYPE_VPE		= AMDGPU_HW_IP_VPE,
82 	AMDGPU_RING_TYPE_KIQ,
83 	AMDGPU_RING_TYPE_MES,
84 	AMDGPU_RING_TYPE_UMSCH_MM,
85 	AMDGPU_RING_TYPE_CPER,
86 };
87 
88 enum amdgpu_ib_pool_type {
89 	/* Normal submissions to the top of the pipeline. */
90 	AMDGPU_IB_POOL_DELAYED,
91 	/* Immediate submissions to the bottom of the pipeline. */
92 	AMDGPU_IB_POOL_IMMEDIATE,
93 	/* Direct submission to the ring buffer during init and reset. */
94 	AMDGPU_IB_POOL_DIRECT,
95 
96 	AMDGPU_IB_POOL_MAX
97 };
98 
99 struct amdgpu_ib {
100 	struct drm_suballoc		*sa_bo;
101 	uint32_t			length_dw;
102 	uint64_t			gpu_addr;
103 	uint32_t			*ptr;
104 	uint32_t			flags;
105 };
106 
107 struct amdgpu_sched {
108 	u32				num_scheds;
109 	struct drm_gpu_scheduler	*sched[AMDGPU_MAX_HWIP_RINGS];
110 };
111 
112 /*
113  * Fences.
114  */
115 struct amdgpu_fence_driver {
116 	uint64_t			gpu_addr;
117 	volatile uint32_t		*cpu_addr;
118 	/* sync_seq is protected by ring emission lock */
119 	uint32_t			sync_seq;
120 	atomic_t			last_seq;
121 	bool				initialized;
122 	struct amdgpu_irq_src		*irq_src;
123 	unsigned			irq_type;
124 	struct timer_list		fallback_timer;
125 	unsigned			num_fences_mask;
126 	spinlock_t			lock;
127 	struct dma_fence		**fences;
128 };
129 
130 /*
131  * Fences mark an event in the GPUs pipeline and are used
132  * for GPU/CPU synchronization.  When the fence is written,
133  * it is expected that all buffers associated with that fence
134  * are no longer in use by the associated ring on the GPU and
135  * that the relevant GPU caches have been flushed.
136  */
137 
138 struct amdgpu_fence {
139 	struct dma_fence base;
140 
141 	/* RB, DMA, etc. */
142 	struct amdgpu_ring		*ring;
143 	ktime_t				start_timestamp;
144 };
145 
146 extern const struct drm_sched_backend_ops amdgpu_sched_ops;
147 
148 void amdgpu_fence_driver_clear_job_fences(struct amdgpu_ring *ring);
149 void amdgpu_fence_driver_set_error(struct amdgpu_ring *ring, int error);
150 void amdgpu_fence_driver_force_completion(struct amdgpu_ring *ring);
151 
152 int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring);
153 int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
154 				   struct amdgpu_irq_src *irq_src,
155 				   unsigned irq_type);
156 void amdgpu_fence_driver_hw_init(struct amdgpu_device *adev);
157 void amdgpu_fence_driver_hw_fini(struct amdgpu_device *adev);
158 int amdgpu_fence_driver_sw_init(struct amdgpu_device *adev);
159 void amdgpu_fence_driver_sw_fini(struct amdgpu_device *adev);
160 int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **fence, struct amdgpu_job *job,
161 		      unsigned flags);
162 int amdgpu_fence_emit_polling(struct amdgpu_ring *ring, uint32_t *s,
163 			      uint32_t timeout);
164 bool amdgpu_fence_process(struct amdgpu_ring *ring);
165 int amdgpu_fence_wait_empty(struct amdgpu_ring *ring);
166 signed long amdgpu_fence_wait_polling(struct amdgpu_ring *ring,
167 				      uint32_t wait_seq,
168 				      signed long timeout);
169 unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring);
170 
171 void amdgpu_fence_driver_isr_toggle(struct amdgpu_device *adev, bool stop);
172 
173 u64 amdgpu_fence_last_unsignaled_time_us(struct amdgpu_ring *ring);
174 void amdgpu_fence_update_start_timestamp(struct amdgpu_ring *ring, uint32_t seq,
175 					 ktime_t timestamp);
176 
177 /*
178  * Rings.
179  */
180 
181 /* provided by hw blocks that expose a ring buffer for commands */
182 struct amdgpu_ring_funcs {
183 	/**
184 	 * @type:
185 	 *
186 	 * GFX, Compute, SDMA, UVD, VCE, VCN, VPE, KIQ, MES, UMSCH, and CPER
187 	 * use ring buffers. The type field just identifies which component the
188 	 * ring buffer is associated with.
189 	 */
190 	enum amdgpu_ring_type	type;
191 	uint32_t		align_mask;
192 
193 	/**
194 	 * @nop:
195 	 *
196 	 * Every block in the amdgpu has no-op instructions (e.g., GFX 10
197 	 * uses PACKET3(PACKET3_NOP, 0x3FFF), VCN 5 uses VCN_ENC_CMD_NO_OP,
198 	 * etc). This field receives the specific no-op for the component
199 	 * that initializes the ring.
200 	 */
201 	u32			nop;
202 	bool			support_64bit_ptrs;
203 	bool			no_user_fence;
204 	bool			secure_submission_supported;
205 	unsigned		extra_dw;
206 
207 	/* ring read/write ptr handling */
208 	u64 (*get_rptr)(struct amdgpu_ring *ring);
209 	u64 (*get_wptr)(struct amdgpu_ring *ring);
210 	void (*set_wptr)(struct amdgpu_ring *ring);
211 	/* validating and patching of IBs */
212 	int (*parse_cs)(struct amdgpu_cs_parser *p,
213 			struct amdgpu_job *job,
214 			struct amdgpu_ib *ib);
215 	int (*patch_cs_in_place)(struct amdgpu_cs_parser *p,
216 				 struct amdgpu_job *job,
217 				 struct amdgpu_ib *ib);
218 	/* constants to calculate how many DW are needed for an emit */
219 	unsigned emit_frame_size;
220 	unsigned emit_ib_size;
221 	/* command emit functions */
222 	void (*emit_ib)(struct amdgpu_ring *ring,
223 			struct amdgpu_job *job,
224 			struct amdgpu_ib *ib,
225 			uint32_t flags);
226 	void (*emit_fence)(struct amdgpu_ring *ring, uint64_t addr,
227 			   uint64_t seq, unsigned flags);
228 	void (*emit_pipeline_sync)(struct amdgpu_ring *ring);
229 	void (*emit_vm_flush)(struct amdgpu_ring *ring, unsigned vmid,
230 			      uint64_t pd_addr);
231 	void (*emit_hdp_flush)(struct amdgpu_ring *ring);
232 	void (*emit_gds_switch)(struct amdgpu_ring *ring, uint32_t vmid,
233 				uint32_t gds_base, uint32_t gds_size,
234 				uint32_t gws_base, uint32_t gws_size,
235 				uint32_t oa_base, uint32_t oa_size);
236 	/* testing functions */
237 	int (*test_ring)(struct amdgpu_ring *ring);
238 	int (*test_ib)(struct amdgpu_ring *ring, long timeout);
239 	/* insert NOP packets */
240 	void (*insert_nop)(struct amdgpu_ring *ring, uint32_t count);
241 	void (*insert_start)(struct amdgpu_ring *ring);
242 	void (*insert_end)(struct amdgpu_ring *ring);
243 	/* pad the indirect buffer to the necessary number of dw */
244 	void (*pad_ib)(struct amdgpu_ring *ring, struct amdgpu_ib *ib);
245 	unsigned (*init_cond_exec)(struct amdgpu_ring *ring, uint64_t addr);
246 	/* note usage for clock and power gating */
247 	void (*begin_use)(struct amdgpu_ring *ring);
248 	void (*end_use)(struct amdgpu_ring *ring);
249 	void (*emit_switch_buffer) (struct amdgpu_ring *ring);
250 	void (*emit_cntxcntl) (struct amdgpu_ring *ring, uint32_t flags);
251 	void (*emit_gfx_shadow)(struct amdgpu_ring *ring, u64 shadow_va, u64 csa_va,
252 				u64 gds_va, bool init_shadow, int vmid);
253 	void (*emit_rreg)(struct amdgpu_ring *ring, uint32_t reg,
254 			  uint32_t reg_val_offs);
255 	void (*emit_wreg)(struct amdgpu_ring *ring, uint32_t reg, uint32_t val);
256 	void (*emit_reg_wait)(struct amdgpu_ring *ring, uint32_t reg,
257 			      uint32_t val, uint32_t mask);
258 	void (*emit_reg_write_reg_wait)(struct amdgpu_ring *ring,
259 					uint32_t reg0, uint32_t reg1,
260 					uint32_t ref, uint32_t mask);
261 	void (*emit_frame_cntl)(struct amdgpu_ring *ring, bool start,
262 				bool secure);
263 	/* Try to soft recover the ring to make the fence signal */
264 	void (*soft_recovery)(struct amdgpu_ring *ring, unsigned vmid);
265 	int (*preempt_ib)(struct amdgpu_ring *ring);
266 	void (*emit_mem_sync)(struct amdgpu_ring *ring);
267 	void (*emit_wave_limit)(struct amdgpu_ring *ring, bool enable);
268 	void (*patch_cntl)(struct amdgpu_ring *ring, unsigned offset);
269 	void (*patch_ce)(struct amdgpu_ring *ring, unsigned offset);
270 	void (*patch_de)(struct amdgpu_ring *ring, unsigned offset);
271 	int (*reset)(struct amdgpu_ring *ring, unsigned int vmid);
272 	void (*emit_cleaner_shader)(struct amdgpu_ring *ring);
273 	bool (*is_guilty)(struct amdgpu_ring *ring);
274 };
275 
276 /**
277  * amdgpu_ring - Holds ring information
278  */
279 struct amdgpu_ring {
280 	struct amdgpu_device		*adev;
281 	const struct amdgpu_ring_funcs	*funcs;
282 	struct amdgpu_fence_driver	fence_drv;
283 	struct drm_gpu_scheduler	sched;
284 
285 	struct amdgpu_bo	*ring_obj;
286 	uint32_t		*ring;
287 	unsigned		rptr_offs;
288 	u64			rptr_gpu_addr;
289 	volatile u32		*rptr_cpu_addr;
290 
291 	/**
292 	 * @wptr:
293 	 *
294 	 * This is part of the Ring buffer implementation and represents the
295 	 * write pointer. The wptr determines where the host has written.
296 	 */
297 	u64			wptr;
298 
299 	/**
300 	 * @wptr_old:
301 	 *
302 	 * Before update wptr with the new value, usually the old value is
303 	 * stored in the wptr_old.
304 	 */
305 	u64			wptr_old;
306 	unsigned		ring_size;
307 
308 	/**
309 	 * @max_dw:
310 	 *
311 	 * Maximum number of DWords for ring allocation. This information is
312 	 * provided at the ring initialization time, and each IP block can
313 	 * specify a specific value. Check places that invoke
314 	 * amdgpu_ring_init() to see the maximum size per block.
315 	 */
316 	unsigned		max_dw;
317 
318 	/**
319 	 * @count_dw:
320 	 *
321 	 * This value starts with the maximum amount of DWords supported by the
322 	 * ring. This value is updated based on the ring manipulation.
323 	 */
324 	int			count_dw;
325 	uint64_t		gpu_addr;
326 
327 	/**
328 	 * @ptr_mask:
329 	 *
330 	 * Some IPs provide support for 64-bit pointers and others for 32-bit
331 	 * only; this behavior is component-specific and defined by the field
332 	 * support_64bit_ptr. If the IP block supports 64-bits, the mask
333 	 * 0xffffffffffffffff is set; otherwise, this value assumes buf_mask.
334 	 * Notice that this field is used to keep wptr under a valid range.
335 	 */
336 	uint64_t		ptr_mask;
337 
338 	/**
339 	 * @buf_mask:
340 	 *
341 	 * Buffer mask is a value used to keep wptr count under its
342 	 * thresholding. Buffer mask initialized during the ring buffer
343 	 * initialization time, and it is defined as (ring_size / 4) -1.
344 	 */
345 	uint32_t		buf_mask;
346 	u32			idx;
347 	u32			xcc_id;
348 	u32			xcp_id;
349 	u32			me;
350 	u32			pipe;
351 	u32			queue;
352 	struct amdgpu_bo	*mqd_obj;
353 	uint64_t                mqd_gpu_addr;
354 	void                    *mqd_ptr;
355 	unsigned                mqd_size;
356 	uint64_t                eop_gpu_addr;
357 	u32			doorbell_index;
358 	bool			use_doorbell;
359 	bool			use_pollmem;
360 	unsigned		wptr_offs;
361 	u64			wptr_gpu_addr;
362 
363 	/**
364 	 * @wptr_cpu_addr:
365 	 *
366 	 * This is the CPU address pointer in the writeback slot. This is used
367 	 * to commit changes to the GPU.
368 	 */
369 	volatile u32		*wptr_cpu_addr;
370 	unsigned		fence_offs;
371 	u64			fence_gpu_addr;
372 	volatile u32		*fence_cpu_addr;
373 	uint64_t		current_ctx;
374 	char			name[16];
375 	u32                     trail_seq;
376 	unsigned		trail_fence_offs;
377 	u64			trail_fence_gpu_addr;
378 	volatile u32		*trail_fence_cpu_addr;
379 	unsigned		cond_exe_offs;
380 	u64			cond_exe_gpu_addr;
381 	volatile u32		*cond_exe_cpu_addr;
382 	unsigned int		set_q_mode_offs;
383 	u32			*set_q_mode_ptr;
384 	u64			set_q_mode_token;
385 	unsigned		vm_hub;
386 	unsigned		vm_inv_eng;
387 	struct dma_fence	*vmid_wait;
388 	bool			has_compute_vm_bug;
389 	bool			no_scheduler;
390 	bool			no_user_submission;
391 	int			hw_prio;
392 	unsigned 		num_hw_submission;
393 	atomic_t		*sched_score;
394 
395 	bool            is_sw_ring;
396 	unsigned int    entry_index;
397 	/* store the cached rptr to restore after reset */
398 	uint64_t cached_rptr;
399 };
400 
401 #define amdgpu_ring_parse_cs(r, p, job, ib) ((r)->funcs->parse_cs((p), (job), (ib)))
402 #define amdgpu_ring_patch_cs_in_place(r, p, job, ib) ((r)->funcs->patch_cs_in_place((p), (job), (ib)))
403 #define amdgpu_ring_test_ring(r) (r)->funcs->test_ring((r))
404 #define amdgpu_ring_test_ib(r, t) ((r)->funcs->test_ib ? (r)->funcs->test_ib((r), (t)) : 0)
405 #define amdgpu_ring_get_rptr(r) (r)->funcs->get_rptr((r))
406 #define amdgpu_ring_get_wptr(r) (r)->funcs->get_wptr((r))
407 #define amdgpu_ring_set_wptr(r) (r)->funcs->set_wptr((r))
408 #define amdgpu_ring_emit_ib(r, job, ib, flags) ((r)->funcs->emit_ib((r), (job), (ib), (flags)))
409 #define amdgpu_ring_emit_pipeline_sync(r) (r)->funcs->emit_pipeline_sync((r))
410 #define amdgpu_ring_emit_vm_flush(r, vmid, addr) (r)->funcs->emit_vm_flush((r), (vmid), (addr))
411 #define amdgpu_ring_emit_fence(r, addr, seq, flags) (r)->funcs->emit_fence((r), (addr), (seq), (flags))
412 #define amdgpu_ring_emit_gds_switch(r, v, db, ds, wb, ws, ab, as) (r)->funcs->emit_gds_switch((r), (v), (db), (ds), (wb), (ws), (ab), (as))
413 #define amdgpu_ring_emit_hdp_flush(r) (r)->funcs->emit_hdp_flush((r))
414 #define amdgpu_ring_emit_switch_buffer(r) (r)->funcs->emit_switch_buffer((r))
415 #define amdgpu_ring_emit_cntxcntl(r, d) (r)->funcs->emit_cntxcntl((r), (d))
416 #define amdgpu_ring_emit_gfx_shadow(r, s, c, g, i, v) ((r)->funcs->emit_gfx_shadow((r), (s), (c), (g), (i), (v)))
417 #define amdgpu_ring_emit_rreg(r, d, o) (r)->funcs->emit_rreg((r), (d), (o))
418 #define amdgpu_ring_emit_wreg(r, d, v) (r)->funcs->emit_wreg((r), (d), (v))
419 #define amdgpu_ring_emit_reg_wait(r, d, v, m) (r)->funcs->emit_reg_wait((r), (d), (v), (m))
420 #define amdgpu_ring_emit_reg_write_reg_wait(r, d0, d1, v, m) (r)->funcs->emit_reg_write_reg_wait((r), (d0), (d1), (v), (m))
421 #define amdgpu_ring_emit_frame_cntl(r, b, s) (r)->funcs->emit_frame_cntl((r), (b), (s))
422 #define amdgpu_ring_pad_ib(r, ib) ((r)->funcs->pad_ib((r), (ib)))
423 #define amdgpu_ring_init_cond_exec(r, a) (r)->funcs->init_cond_exec((r), (a))
424 #define amdgpu_ring_preempt_ib(r) (r)->funcs->preempt_ib(r)
425 #define amdgpu_ring_patch_cntl(r, o) ((r)->funcs->patch_cntl((r), (o)))
426 #define amdgpu_ring_patch_ce(r, o) ((r)->funcs->patch_ce((r), (o)))
427 #define amdgpu_ring_patch_de(r, o) ((r)->funcs->patch_de((r), (o)))
428 #define amdgpu_ring_reset(r, v) (r)->funcs->reset((r), (v))
429 
430 unsigned int amdgpu_ring_max_ibs(enum amdgpu_ring_type type);
431 int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned ndw);
432 void amdgpu_ring_ib_begin(struct amdgpu_ring *ring);
433 void amdgpu_ring_ib_end(struct amdgpu_ring *ring);
434 void amdgpu_ring_ib_on_emit_cntl(struct amdgpu_ring *ring);
435 void amdgpu_ring_ib_on_emit_ce(struct amdgpu_ring *ring);
436 void amdgpu_ring_ib_on_emit_de(struct amdgpu_ring *ring);
437 
438 void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count);
439 void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib);
440 void amdgpu_ring_commit(struct amdgpu_ring *ring);
441 void amdgpu_ring_undo(struct amdgpu_ring *ring);
442 int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
443 		     unsigned int max_dw, struct amdgpu_irq_src *irq_src,
444 		     unsigned int irq_type, unsigned int hw_prio,
445 		     atomic_t *sched_score);
446 void amdgpu_ring_fini(struct amdgpu_ring *ring);
447 void amdgpu_ring_emit_reg_write_reg_wait_helper(struct amdgpu_ring *ring,
448 						uint32_t reg0, uint32_t val0,
449 						uint32_t reg1, uint32_t val1);
450 bool amdgpu_ring_soft_recovery(struct amdgpu_ring *ring, unsigned int vmid,
451 			       struct dma_fence *fence);
452 
amdgpu_ring_set_preempt_cond_exec(struct amdgpu_ring * ring,bool cond_exec)453 static inline void amdgpu_ring_set_preempt_cond_exec(struct amdgpu_ring *ring,
454 							bool cond_exec)
455 {
456 	*ring->cond_exe_cpu_addr = cond_exec;
457 }
458 
amdgpu_ring_clear_ring(struct amdgpu_ring * ring)459 static inline void amdgpu_ring_clear_ring(struct amdgpu_ring *ring)
460 {
461 	int i = 0;
462 	while (i <= ring->buf_mask)
463 		ring->ring[i++] = ring->funcs->nop;
464 
465 }
466 
amdgpu_ring_write(struct amdgpu_ring * ring,uint32_t v)467 static inline void amdgpu_ring_write(struct amdgpu_ring *ring, uint32_t v)
468 {
469 	ring->ring[ring->wptr++ & ring->buf_mask] = v;
470 	ring->wptr &= ring->ptr_mask;
471 	ring->count_dw--;
472 }
473 
amdgpu_ring_write_multiple(struct amdgpu_ring * ring,void * src,int count_dw)474 static inline void amdgpu_ring_write_multiple(struct amdgpu_ring *ring,
475 					      void *src, int count_dw)
476 {
477 	unsigned occupied, chunk1, chunk2;
478 
479 	occupied = ring->wptr & ring->buf_mask;
480 	chunk1 = ring->buf_mask + 1 - occupied;
481 	chunk1 = (chunk1 >= count_dw) ? count_dw : chunk1;
482 	chunk2 = count_dw - chunk1;
483 	chunk1 <<= 2;
484 	chunk2 <<= 2;
485 
486 	if (chunk1)
487 		memcpy(&ring->ring[occupied], src, chunk1);
488 
489 	if (chunk2) {
490 		src += chunk1;
491 		memcpy(ring->ring, src, chunk2);
492 	}
493 
494 	ring->wptr += count_dw;
495 	ring->wptr &= ring->ptr_mask;
496 	ring->count_dw -= count_dw;
497 }
498 
499 /**
500  * amdgpu_ring_patch_cond_exec - patch dw count of conditional execute
501  * @ring: amdgpu_ring structure
502  * @offset: offset returned by amdgpu_ring_init_cond_exec
503  *
504  * Calculate the dw count and patch it into a cond_exec command.
505  */
amdgpu_ring_patch_cond_exec(struct amdgpu_ring * ring,unsigned int offset)506 static inline void amdgpu_ring_patch_cond_exec(struct amdgpu_ring *ring,
507 					       unsigned int offset)
508 {
509 	unsigned cur;
510 
511 	if (!ring->funcs->init_cond_exec)
512 		return;
513 
514 	WARN_ON(offset > ring->buf_mask);
515 	WARN_ON(ring->ring[offset] != 0);
516 
517 	cur = (ring->wptr - 1) & ring->buf_mask;
518 	if (cur < offset)
519 		cur += ring->ring_size >> 2;
520 	ring->ring[offset] = cur - offset;
521 }
522 
523 int amdgpu_ring_test_helper(struct amdgpu_ring *ring);
524 
525 void amdgpu_debugfs_ring_init(struct amdgpu_device *adev,
526 			      struct amdgpu_ring *ring);
527 
528 int amdgpu_ring_init_mqd(struct amdgpu_ring *ring);
529 
amdgpu_ib_get_value(struct amdgpu_ib * ib,int idx)530 static inline u32 amdgpu_ib_get_value(struct amdgpu_ib *ib, int idx)
531 {
532 	return ib->ptr[idx];
533 }
534 
amdgpu_ib_set_value(struct amdgpu_ib * ib,int idx,uint32_t value)535 static inline void amdgpu_ib_set_value(struct amdgpu_ib *ib, int idx,
536 				       uint32_t value)
537 {
538 	ib->ptr[idx] = value;
539 }
540 
541 int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm,
542 		  unsigned size,
543 		  enum amdgpu_ib_pool_type pool,
544 		  struct amdgpu_ib *ib);
545 void amdgpu_ib_free(struct amdgpu_ib *ib, struct dma_fence *f);
546 int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
547 		       struct amdgpu_ib *ibs, struct amdgpu_job *job,
548 		       struct dma_fence **f);
549 int amdgpu_ib_pool_init(struct amdgpu_device *adev);
550 void amdgpu_ib_pool_fini(struct amdgpu_device *adev);
551 int amdgpu_ib_ring_tests(struct amdgpu_device *adev);
552 bool amdgpu_ring_sched_ready(struct amdgpu_ring *ring);
553 #endif
554