xref: /linux/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h (revision c717993dd76a1049093af5c262e751d901b8da10)
1  /*
2   * Copyright 2016 Advanced Micro Devices, Inc.
3   *
4   * Permission is hereby granted, free of charge, to any person obtaining a
5   * copy of this software and associated documentation files (the "Software"),
6   * to deal in the Software without restriction, including without limitation
7   * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8   * and/or sell copies of the Software, and to permit persons to whom the
9   * Software is furnished to do so, subject to the following conditions:
10   *
11   * The above copyright notice and this permission notice shall be included in
12   * all copies or substantial portions of the Software.
13   *
14   * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15   * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16   * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17   * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18   * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19   * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20   * OTHER DEALINGS IN THE SOFTWARE.
21   *
22   * Authors: Christian König
23   */
24  #ifndef __AMDGPU_RING_H__
25  #define __AMDGPU_RING_H__
26  
27  #include <drm/amdgpu_drm.h>
28  #include <drm/gpu_scheduler.h>
29  #include <drm/drm_print.h>
30  
31  /* max number of rings */
32  #define AMDGPU_MAX_RINGS		28
33  #define AMDGPU_MAX_HWIP_RINGS		8
34  #define AMDGPU_MAX_GFX_RINGS		2
35  #define AMDGPU_MAX_COMPUTE_RINGS	8
36  #define AMDGPU_MAX_VCE_RINGS		3
37  #define AMDGPU_MAX_UVD_ENC_RINGS	2
38  
39  enum amdgpu_ring_priority_level {
40  	AMDGPU_RING_PRIO_0,
41  	AMDGPU_RING_PRIO_1,
42  	AMDGPU_RING_PRIO_DEFAULT = 1,
43  	AMDGPU_RING_PRIO_2,
44  	AMDGPU_RING_PRIO_MAX
45  };
46  
47  /* some special values for the owner field */
48  #define AMDGPU_FENCE_OWNER_UNDEFINED	((void *)0ul)
49  #define AMDGPU_FENCE_OWNER_VM		((void *)1ul)
50  #define AMDGPU_FENCE_OWNER_KFD		((void *)2ul)
51  
52  #define AMDGPU_FENCE_FLAG_64BIT         (1 << 0)
53  #define AMDGPU_FENCE_FLAG_INT           (1 << 1)
54  #define AMDGPU_FENCE_FLAG_TC_WB_ONLY    (1 << 2)
55  
56  #define to_amdgpu_ring(s) container_of((s), struct amdgpu_ring, sched)
57  
58  #define AMDGPU_IB_POOL_SIZE	(1024 * 1024)
59  
60  enum amdgpu_ring_type {
61  	AMDGPU_RING_TYPE_GFX		= AMDGPU_HW_IP_GFX,
62  	AMDGPU_RING_TYPE_COMPUTE	= AMDGPU_HW_IP_COMPUTE,
63  	AMDGPU_RING_TYPE_SDMA		= AMDGPU_HW_IP_DMA,
64  	AMDGPU_RING_TYPE_UVD		= AMDGPU_HW_IP_UVD,
65  	AMDGPU_RING_TYPE_VCE		= AMDGPU_HW_IP_VCE,
66  	AMDGPU_RING_TYPE_UVD_ENC	= AMDGPU_HW_IP_UVD_ENC,
67  	AMDGPU_RING_TYPE_VCN_DEC	= AMDGPU_HW_IP_VCN_DEC,
68  	AMDGPU_RING_TYPE_VCN_ENC	= AMDGPU_HW_IP_VCN_ENC,
69  	AMDGPU_RING_TYPE_VCN_JPEG	= AMDGPU_HW_IP_VCN_JPEG,
70  	AMDGPU_RING_TYPE_KIQ,
71  	AMDGPU_RING_TYPE_MES
72  };
73  
74  enum amdgpu_ib_pool_type {
75  	/* Normal submissions to the top of the pipeline. */
76  	AMDGPU_IB_POOL_DELAYED,
77  	/* Immediate submissions to the bottom of the pipeline. */
78  	AMDGPU_IB_POOL_IMMEDIATE,
79  	/* Direct submission to the ring buffer during init and reset. */
80  	AMDGPU_IB_POOL_DIRECT,
81  
82  	AMDGPU_IB_POOL_MAX
83  };
84  
85  struct amdgpu_device;
86  struct amdgpu_ring;
87  struct amdgpu_ib;
88  struct amdgpu_cs_parser;
89  struct amdgpu_job;
90  
91  struct amdgpu_sched {
92  	u32				num_scheds;
93  	struct drm_gpu_scheduler	*sched[AMDGPU_MAX_HWIP_RINGS];
94  };
95  
96  /*
97   * Fences.
98   */
99  struct amdgpu_fence_driver {
100  	uint64_t			gpu_addr;
101  	volatile uint32_t		*cpu_addr;
102  	/* sync_seq is protected by ring emission lock */
103  	uint32_t			sync_seq;
104  	atomic_t			last_seq;
105  	bool				initialized;
106  	struct amdgpu_irq_src		*irq_src;
107  	unsigned			irq_type;
108  	struct timer_list		fallback_timer;
109  	unsigned			num_fences_mask;
110  	spinlock_t			lock;
111  	struct dma_fence		**fences;
112  };
113  
114  void amdgpu_fence_driver_clear_job_fences(struct amdgpu_ring *ring);
115  void amdgpu_fence_driver_force_completion(struct amdgpu_ring *ring);
116  
117  int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
118  				  unsigned num_hw_submission,
119  				  atomic_t *sched_score);
120  int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
121  				   struct amdgpu_irq_src *irq_src,
122  				   unsigned irq_type);
123  void amdgpu_fence_driver_hw_init(struct amdgpu_device *adev);
124  void amdgpu_fence_driver_hw_fini(struct amdgpu_device *adev);
125  int amdgpu_fence_driver_sw_init(struct amdgpu_device *adev);
126  void amdgpu_fence_driver_sw_fini(struct amdgpu_device *adev);
127  int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **fence, struct amdgpu_job *job,
128  		      unsigned flags);
129  int amdgpu_fence_emit_polling(struct amdgpu_ring *ring, uint32_t *s,
130  			      uint32_t timeout);
131  bool amdgpu_fence_process(struct amdgpu_ring *ring);
132  int amdgpu_fence_wait_empty(struct amdgpu_ring *ring);
133  signed long amdgpu_fence_wait_polling(struct amdgpu_ring *ring,
134  				      uint32_t wait_seq,
135  				      signed long timeout);
136  unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring);
137  
138  /*
139   * Rings.
140   */
141  
142  /* provided by hw blocks that expose a ring buffer for commands */
143  struct amdgpu_ring_funcs {
144  	enum amdgpu_ring_type	type;
145  	uint32_t		align_mask;
146  	u32			nop;
147  	bool			support_64bit_ptrs;
148  	bool			no_user_fence;
149  	unsigned		vmhub;
150  	unsigned		extra_dw;
151  
152  	/* ring read/write ptr handling */
153  	u64 (*get_rptr)(struct amdgpu_ring *ring);
154  	u64 (*get_wptr)(struct amdgpu_ring *ring);
155  	void (*set_wptr)(struct amdgpu_ring *ring);
156  	/* validating and patching of IBs */
157  	int (*parse_cs)(struct amdgpu_cs_parser *p, uint32_t ib_idx);
158  	int (*patch_cs_in_place)(struct amdgpu_cs_parser *p, uint32_t ib_idx);
159  	/* constants to calculate how many DW are needed for an emit */
160  	unsigned emit_frame_size;
161  	unsigned emit_ib_size;
162  	/* command emit functions */
163  	void (*emit_ib)(struct amdgpu_ring *ring,
164  			struct amdgpu_job *job,
165  			struct amdgpu_ib *ib,
166  			uint32_t flags);
167  	void (*emit_fence)(struct amdgpu_ring *ring, uint64_t addr,
168  			   uint64_t seq, unsigned flags);
169  	void (*emit_pipeline_sync)(struct amdgpu_ring *ring);
170  	void (*emit_vm_flush)(struct amdgpu_ring *ring, unsigned vmid,
171  			      uint64_t pd_addr);
172  	void (*emit_hdp_flush)(struct amdgpu_ring *ring);
173  	void (*emit_gds_switch)(struct amdgpu_ring *ring, uint32_t vmid,
174  				uint32_t gds_base, uint32_t gds_size,
175  				uint32_t gws_base, uint32_t gws_size,
176  				uint32_t oa_base, uint32_t oa_size);
177  	/* testing functions */
178  	int (*test_ring)(struct amdgpu_ring *ring);
179  	int (*test_ib)(struct amdgpu_ring *ring, long timeout);
180  	/* insert NOP packets */
181  	void (*insert_nop)(struct amdgpu_ring *ring, uint32_t count);
182  	void (*insert_start)(struct amdgpu_ring *ring);
183  	void (*insert_end)(struct amdgpu_ring *ring);
184  	/* pad the indirect buffer to the necessary number of dw */
185  	void (*pad_ib)(struct amdgpu_ring *ring, struct amdgpu_ib *ib);
186  	unsigned (*init_cond_exec)(struct amdgpu_ring *ring);
187  	void (*patch_cond_exec)(struct amdgpu_ring *ring, unsigned offset);
188  	/* note usage for clock and power gating */
189  	void (*begin_use)(struct amdgpu_ring *ring);
190  	void (*end_use)(struct amdgpu_ring *ring);
191  	void (*emit_switch_buffer) (struct amdgpu_ring *ring);
192  	void (*emit_cntxcntl) (struct amdgpu_ring *ring, uint32_t flags);
193  	void (*emit_rreg)(struct amdgpu_ring *ring, uint32_t reg,
194  			  uint32_t reg_val_offs);
195  	void (*emit_wreg)(struct amdgpu_ring *ring, uint32_t reg, uint32_t val);
196  	void (*emit_reg_wait)(struct amdgpu_ring *ring, uint32_t reg,
197  			      uint32_t val, uint32_t mask);
198  	void (*emit_reg_write_reg_wait)(struct amdgpu_ring *ring,
199  					uint32_t reg0, uint32_t reg1,
200  					uint32_t ref, uint32_t mask);
201  	void (*emit_frame_cntl)(struct amdgpu_ring *ring, bool start,
202  				bool secure);
203  	/* Try to soft recover the ring to make the fence signal */
204  	void (*soft_recovery)(struct amdgpu_ring *ring, unsigned vmid);
205  	int (*preempt_ib)(struct amdgpu_ring *ring);
206  	void (*emit_mem_sync)(struct amdgpu_ring *ring);
207  	void (*emit_wave_limit)(struct amdgpu_ring *ring, bool enable);
208  };
209  
210  struct amdgpu_ring {
211  	struct amdgpu_device		*adev;
212  	const struct amdgpu_ring_funcs	*funcs;
213  	struct amdgpu_fence_driver	fence_drv;
214  	struct drm_gpu_scheduler	sched;
215  
216  	struct amdgpu_bo	*ring_obj;
217  	volatile uint32_t	*ring;
218  	unsigned		rptr_offs;
219  	u64			wptr;
220  	u64			wptr_old;
221  	unsigned		ring_size;
222  	unsigned		max_dw;
223  	int			count_dw;
224  	uint64_t		gpu_addr;
225  	uint64_t		ptr_mask;
226  	uint32_t		buf_mask;
227  	u32			idx;
228  	u32			me;
229  	u32			pipe;
230  	u32			queue;
231  	struct amdgpu_bo	*mqd_obj;
232  	uint64_t                mqd_gpu_addr;
233  	void                    *mqd_ptr;
234  	uint64_t                eop_gpu_addr;
235  	u32			doorbell_index;
236  	bool			use_doorbell;
237  	bool			use_pollmem;
238  	unsigned		wptr_offs;
239  	unsigned		fence_offs;
240  	uint64_t		current_ctx;
241  	char			name[16];
242  	u32                     trail_seq;
243  	unsigned		trail_fence_offs;
244  	u64			trail_fence_gpu_addr;
245  	volatile u32		*trail_fence_cpu_addr;
246  	unsigned		cond_exe_offs;
247  	u64			cond_exe_gpu_addr;
248  	volatile u32		*cond_exe_cpu_addr;
249  	unsigned		vm_inv_eng;
250  	struct dma_fence	*vmid_wait;
251  	bool			has_compute_vm_bug;
252  	bool			no_scheduler;
253  	int			hw_prio;
254  };
255  
256  #define amdgpu_ring_parse_cs(r, p, ib) ((r)->funcs->parse_cs((p), (ib)))
257  #define amdgpu_ring_patch_cs_in_place(r, p, ib) ((r)->funcs->patch_cs_in_place((p), (ib)))
258  #define amdgpu_ring_test_ring(r) (r)->funcs->test_ring((r))
259  #define amdgpu_ring_test_ib(r, t) (r)->funcs->test_ib((r), (t))
260  #define amdgpu_ring_get_rptr(r) (r)->funcs->get_rptr((r))
261  #define amdgpu_ring_get_wptr(r) (r)->funcs->get_wptr((r))
262  #define amdgpu_ring_set_wptr(r) (r)->funcs->set_wptr((r))
263  #define amdgpu_ring_emit_ib(r, job, ib, flags) ((r)->funcs->emit_ib((r), (job), (ib), (flags)))
264  #define amdgpu_ring_emit_pipeline_sync(r) (r)->funcs->emit_pipeline_sync((r))
265  #define amdgpu_ring_emit_vm_flush(r, vmid, addr) (r)->funcs->emit_vm_flush((r), (vmid), (addr))
266  #define amdgpu_ring_emit_fence(r, addr, seq, flags) (r)->funcs->emit_fence((r), (addr), (seq), (flags))
267  #define amdgpu_ring_emit_gds_switch(r, v, db, ds, wb, ws, ab, as) (r)->funcs->emit_gds_switch((r), (v), (db), (ds), (wb), (ws), (ab), (as))
268  #define amdgpu_ring_emit_hdp_flush(r) (r)->funcs->emit_hdp_flush((r))
269  #define amdgpu_ring_emit_switch_buffer(r) (r)->funcs->emit_switch_buffer((r))
270  #define amdgpu_ring_emit_cntxcntl(r, d) (r)->funcs->emit_cntxcntl((r), (d))
271  #define amdgpu_ring_emit_rreg(r, d, o) (r)->funcs->emit_rreg((r), (d), (o))
272  #define amdgpu_ring_emit_wreg(r, d, v) (r)->funcs->emit_wreg((r), (d), (v))
273  #define amdgpu_ring_emit_reg_wait(r, d, v, m) (r)->funcs->emit_reg_wait((r), (d), (v), (m))
274  #define amdgpu_ring_emit_reg_write_reg_wait(r, d0, d1, v, m) (r)->funcs->emit_reg_write_reg_wait((r), (d0), (d1), (v), (m))
275  #define amdgpu_ring_emit_frame_cntl(r, b, s) (r)->funcs->emit_frame_cntl((r), (b), (s))
276  #define amdgpu_ring_pad_ib(r, ib) ((r)->funcs->pad_ib((r), (ib)))
277  #define amdgpu_ring_init_cond_exec(r) (r)->funcs->init_cond_exec((r))
278  #define amdgpu_ring_patch_cond_exec(r,o) (r)->funcs->patch_cond_exec((r),(o))
279  #define amdgpu_ring_preempt_ib(r) (r)->funcs->preempt_ib(r)
280  
281  int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned ndw);
282  void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count);
283  void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib);
284  void amdgpu_ring_commit(struct amdgpu_ring *ring);
285  void amdgpu_ring_undo(struct amdgpu_ring *ring);
286  int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
287  		     unsigned int ring_size, struct amdgpu_irq_src *irq_src,
288  		     unsigned int irq_type, unsigned int prio,
289  		     atomic_t *sched_score);
290  void amdgpu_ring_fini(struct amdgpu_ring *ring);
291  void amdgpu_ring_emit_reg_write_reg_wait_helper(struct amdgpu_ring *ring,
292  						uint32_t reg0, uint32_t val0,
293  						uint32_t reg1, uint32_t val1);
294  bool amdgpu_ring_soft_recovery(struct amdgpu_ring *ring, unsigned int vmid,
295  			       struct dma_fence *fence);
296  
297  static inline void amdgpu_ring_set_preempt_cond_exec(struct amdgpu_ring *ring,
298  							bool cond_exec)
299  {
300  	*ring->cond_exe_cpu_addr = cond_exec;
301  }
302  
303  static inline void amdgpu_ring_clear_ring(struct amdgpu_ring *ring)
304  {
305  	int i = 0;
306  	while (i <= ring->buf_mask)
307  		ring->ring[i++] = ring->funcs->nop;
308  
309  }
310  
311  static inline void amdgpu_ring_write(struct amdgpu_ring *ring, uint32_t v)
312  {
313  	if (ring->count_dw <= 0)
314  		DRM_ERROR("amdgpu: writing more dwords to the ring than expected!\n");
315  	ring->ring[ring->wptr++ & ring->buf_mask] = v;
316  	ring->wptr &= ring->ptr_mask;
317  	ring->count_dw--;
318  }
319  
320  static inline void amdgpu_ring_write_multiple(struct amdgpu_ring *ring,
321  					      void *src, int count_dw)
322  {
323  	unsigned occupied, chunk1, chunk2;
324  	void *dst;
325  
326  	if (unlikely(ring->count_dw < count_dw))
327  		DRM_ERROR("amdgpu: writing more dwords to the ring than expected!\n");
328  
329  	occupied = ring->wptr & ring->buf_mask;
330  	dst = (void *)&ring->ring[occupied];
331  	chunk1 = ring->buf_mask + 1 - occupied;
332  	chunk1 = (chunk1 >= count_dw) ? count_dw: chunk1;
333  	chunk2 = count_dw - chunk1;
334  	chunk1 <<= 2;
335  	chunk2 <<= 2;
336  
337  	if (chunk1)
338  		memcpy(dst, src, chunk1);
339  
340  	if (chunk2) {
341  		src += chunk1;
342  		dst = (void *)ring->ring;
343  		memcpy(dst, src, chunk2);
344  	}
345  
346  	ring->wptr += count_dw;
347  	ring->wptr &= ring->ptr_mask;
348  	ring->count_dw -= count_dw;
349  }
350  
351  int amdgpu_ring_test_helper(struct amdgpu_ring *ring);
352  
353  void amdgpu_debugfs_ring_init(struct amdgpu_device *adev,
354  			      struct amdgpu_ring *ring);
355  #endif
356