xref: /linux/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c (revision 89748acdf226fd1a8775ff6fa2703f8412b286c8)
1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  *          Christian König
28  */
29 #include <linux/seq_file.h>
30 #include <linux/slab.h>
31 #include <linux/uaccess.h>
32 #include <linux/debugfs.h>
33 
34 #include <drm/amdgpu_drm.h>
35 #include "amdgpu.h"
36 #include "atom.h"
37 
38 /*
39  * Rings
40  * Most engines on the GPU are fed via ring buffers.  Ring
41  * buffers are areas of GPU accessible memory that the host
42  * writes commands into and the GPU reads commands out of.
43  * There is a rptr (read pointer) that determines where the
44  * GPU is currently reading, and a wptr (write pointer)
45  * which determines where the host has written.  When the
46  * pointers are equal, the ring is idle.  When the host
47  * writes commands to the ring buffer, it increments the
48  * wptr.  The GPU then starts fetching commands and executes
49  * them until the pointers are equal again.
50  */
51 
52 /**
53  * amdgpu_ring_max_ibs - Return max IBs that fit in a single submission.
54  *
55  * @type: ring type for which to return the limit.
56  */
amdgpu_ring_max_ibs(enum amdgpu_ring_type type)57 unsigned int amdgpu_ring_max_ibs(enum amdgpu_ring_type type)
58 {
59 	switch (type) {
60 	case AMDGPU_RING_TYPE_GFX:
61 		/* Need to keep at least 192 on GFX7+ for old radv. */
62 		return 192;
63 	case AMDGPU_RING_TYPE_COMPUTE:
64 		return 125;
65 	case AMDGPU_RING_TYPE_VCN_JPEG:
66 		return 16;
67 	default:
68 		return 49;
69 	}
70 }
71 
72 /**
73  * amdgpu_ring_alloc - allocate space on the ring buffer
74  *
75  * @ring: amdgpu_ring structure holding ring information
76  * @ndw: number of dwords to allocate in the ring buffer
77  *
78  * Allocate @ndw dwords in the ring buffer (all asics).
79  * Returns 0 on success, error on failure.
80  */
amdgpu_ring_alloc(struct amdgpu_ring * ring,unsigned int ndw)81 int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned int ndw)
82 {
83 	/* Align requested size with padding so unlock_commit can
84 	 * pad safely */
85 	ndw = (ndw + ring->funcs->align_mask) & ~ring->funcs->align_mask;
86 
87 	/* Make sure we aren't trying to allocate more space
88 	 * than the maximum for one submission
89 	 */
90 	if (WARN_ON_ONCE(ndw > ring->max_dw))
91 		return -ENOMEM;
92 
93 	ring->count_dw = ndw;
94 	ring->wptr_old = ring->wptr;
95 
96 	if (ring->funcs->begin_use)
97 		ring->funcs->begin_use(ring);
98 
99 	return 0;
100 }
101 
102 /**
103  * amdgpu_ring_alloc_reemit - allocate space on the ring buffer for reemit
104  *
105  * @ring: amdgpu_ring structure holding ring information
106  * @ndw: number of dwords to allocate in the ring buffer
107  *
108  * Allocate @ndw dwords in the ring buffer (all asics).
109  * doesn't check the max_dw limit as we may be reemitting
110  * several submissions.
111  */
amdgpu_ring_alloc_reemit(struct amdgpu_ring * ring,unsigned int ndw)112 static void amdgpu_ring_alloc_reemit(struct amdgpu_ring *ring, unsigned int ndw)
113 {
114 	/* Align requested size with padding so unlock_commit can
115 	 * pad safely */
116 	ndw = (ndw + ring->funcs->align_mask) & ~ring->funcs->align_mask;
117 
118 	ring->count_dw = ndw;
119 	ring->wptr_old = ring->wptr;
120 
121 	if (ring->funcs->begin_use)
122 		ring->funcs->begin_use(ring);
123 }
124 
125 /** amdgpu_ring_insert_nop - insert NOP packets
126  *
127  * @ring: amdgpu_ring structure holding ring information
128  * @count: the number of NOP packets to insert
129  *
130  * This is the generic insert_nop function for rings except SDMA
131  */
amdgpu_ring_insert_nop(struct amdgpu_ring * ring,uint32_t count)132 void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
133 {
134 	uint32_t occupied, chunk1, chunk2;
135 
136 	occupied = ring->wptr & ring->buf_mask;
137 	chunk1 = ring->buf_mask + 1 - occupied;
138 	chunk1 = (chunk1 >= count) ? count : chunk1;
139 	chunk2 = count - chunk1;
140 
141 	if (chunk1)
142 		memset32(&ring->ring[occupied], ring->funcs->nop, chunk1);
143 
144 	if (chunk2)
145 		memset32(ring->ring, ring->funcs->nop, chunk2);
146 
147 	ring->wptr += count;
148 	ring->wptr &= ring->ptr_mask;
149 	ring->count_dw -= count;
150 }
151 
152 /**
153  * amdgpu_ring_generic_pad_ib - pad IB with NOP packets
154  *
155  * @ring: amdgpu_ring structure holding ring information
156  * @ib: IB to add NOP packets to
157  *
158  * This is the generic pad_ib function for rings except SDMA
159  */
amdgpu_ring_generic_pad_ib(struct amdgpu_ring * ring,struct amdgpu_ib * ib)160 void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
161 {
162 	while (ib->length_dw & ring->funcs->align_mask)
163 		ib->ptr[ib->length_dw++] = ring->funcs->nop;
164 }
165 
166 /**
167  * amdgpu_ring_commit - tell the GPU to execute the new
168  * commands on the ring buffer
169  *
170  * @ring: amdgpu_ring structure holding ring information
171  *
172  * Update the wptr (write pointer) to tell the GPU to
173  * execute new commands on the ring buffer (all asics).
174  */
amdgpu_ring_commit(struct amdgpu_ring * ring)175 void amdgpu_ring_commit(struct amdgpu_ring *ring)
176 {
177 	uint32_t count;
178 
179 	if (ring->count_dw < 0)
180 		DRM_ERROR("amdgpu: writing more dwords to the ring than expected!\n");
181 
182 	/* We pad to match fetch size */
183 	count = ring->funcs->align_mask + 1 -
184 		(ring->wptr & ring->funcs->align_mask);
185 	count &= ring->funcs->align_mask;
186 
187 	if (count != 0)
188 		ring->funcs->insert_nop(ring, count);
189 
190 	mb();
191 	amdgpu_ring_set_wptr(ring);
192 
193 	if (ring->funcs->end_use)
194 		ring->funcs->end_use(ring);
195 }
196 
197 /**
198  * amdgpu_ring_undo - reset the wptr
199  *
200  * @ring: amdgpu_ring structure holding ring information
201  *
202  * Reset the driver's copy of the wptr (all asics).
203  */
amdgpu_ring_undo(struct amdgpu_ring * ring)204 void amdgpu_ring_undo(struct amdgpu_ring *ring)
205 {
206 	ring->wptr = ring->wptr_old;
207 
208 	if (ring->funcs->end_use)
209 		ring->funcs->end_use(ring);
210 }
211 
212 #define amdgpu_ring_get_gpu_addr(ring, offset)				\
213 	 (ring->adev->wb.gpu_addr + offset * 4)
214 
215 #define amdgpu_ring_get_cpu_addr(ring, offset)				\
216 	 (&ring->adev->wb.wb[offset])
217 
218 /**
219  * amdgpu_ring_init - init driver ring struct.
220  *
221  * @adev: amdgpu_device pointer
222  * @ring: amdgpu_ring structure holding ring information
223  * @max_dw: maximum number of dw for ring alloc
224  * @irq_src: interrupt source to use for this ring
225  * @irq_type: interrupt type to use for this ring
226  * @hw_prio: ring priority (NORMAL/HIGH)
227  * @sched_score: optional score atomic shared with other schedulers
228  *
229  * Initialize the driver information for the selected ring (all asics).
230  * Returns 0 on success, error on failure.
231  */
amdgpu_ring_init(struct amdgpu_device * adev,struct amdgpu_ring * ring,unsigned int max_dw,struct amdgpu_irq_src * irq_src,unsigned int irq_type,unsigned int hw_prio,atomic_t * sched_score)232 int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
233 		     unsigned int max_dw, struct amdgpu_irq_src *irq_src,
234 		     unsigned int irq_type, unsigned int hw_prio,
235 		     atomic_t *sched_score)
236 {
237 	int r;
238 	int sched_hw_submission = amdgpu_sched_hw_submission;
239 	u32 *num_sched;
240 	u32 hw_ip;
241 	unsigned int max_ibs_dw;
242 
243 	/* Set the hw submission limit higher for KIQ because
244 	 * it's used for a number of gfx/compute tasks by both
245 	 * KFD and KGD which may have outstanding fences and
246 	 * it doesn't really use the gpu scheduler anyway;
247 	 * KIQ tasks get submitted directly to the ring.
248 	 */
249 	if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
250 		sched_hw_submission = max(sched_hw_submission, 256);
251 	if (ring->funcs->type == AMDGPU_RING_TYPE_MES)
252 		sched_hw_submission = 8;
253 	else if (ring == &adev->sdma.instance[0].page)
254 		sched_hw_submission = 256;
255 
256 	if (ring->adev == NULL) {
257 		if (adev->num_rings >= AMDGPU_MAX_RINGS)
258 			return -EINVAL;
259 
260 		ring->adev = adev;
261 		ring->num_hw_submission = sched_hw_submission;
262 		ring->sched_score = sched_score;
263 		ring->vmid_wait = dma_fence_get_stub();
264 
265 		ring->idx = adev->num_rings++;
266 		adev->rings[ring->idx] = ring;
267 
268 		r = amdgpu_fence_driver_init_ring(ring);
269 		if (r)
270 			return r;
271 	}
272 
273 	r = amdgpu_device_wb_get(adev, &ring->rptr_offs);
274 	if (r) {
275 		dev_err(adev->dev, "(%d) ring rptr_offs wb alloc failed\n", r);
276 		return r;
277 	}
278 
279 	r = amdgpu_device_wb_get(adev, &ring->wptr_offs);
280 	if (r) {
281 		dev_err(adev->dev, "(%d) ring wptr_offs wb alloc failed\n", r);
282 		return r;
283 	}
284 
285 	r = amdgpu_device_wb_get(adev, &ring->fence_offs);
286 	if (r) {
287 		dev_err(adev->dev, "(%d) ring fence_offs wb alloc failed\n", r);
288 		return r;
289 	}
290 
291 	r = amdgpu_device_wb_get(adev, &ring->trail_fence_offs);
292 	if (r) {
293 		dev_err(adev->dev, "(%d) ring trail_fence_offs wb alloc failed\n", r);
294 		return r;
295 	}
296 
297 	r = amdgpu_device_wb_get(adev, &ring->cond_exe_offs);
298 	if (r) {
299 		dev_err(adev->dev, "(%d) ring cond_exec_polling wb alloc failed\n", r);
300 		return r;
301 	}
302 
303 	ring->fence_gpu_addr =
304 		amdgpu_ring_get_gpu_addr(ring, ring->fence_offs);
305 	ring->fence_cpu_addr =
306 		amdgpu_ring_get_cpu_addr(ring, ring->fence_offs);
307 
308 	ring->rptr_gpu_addr =
309 		amdgpu_ring_get_gpu_addr(ring, ring->rptr_offs);
310 	ring->rptr_cpu_addr =
311 		amdgpu_ring_get_cpu_addr(ring, ring->rptr_offs);
312 
313 	ring->wptr_gpu_addr =
314 		amdgpu_ring_get_gpu_addr(ring, ring->wptr_offs);
315 	ring->wptr_cpu_addr =
316 		amdgpu_ring_get_cpu_addr(ring, ring->wptr_offs);
317 
318 	ring->trail_fence_gpu_addr =
319 		amdgpu_ring_get_gpu_addr(ring, ring->trail_fence_offs);
320 	ring->trail_fence_cpu_addr =
321 		amdgpu_ring_get_cpu_addr(ring, ring->trail_fence_offs);
322 
323 	ring->cond_exe_gpu_addr =
324 		amdgpu_ring_get_gpu_addr(ring, ring->cond_exe_offs);
325 	ring->cond_exe_cpu_addr =
326 		amdgpu_ring_get_cpu_addr(ring, ring->cond_exe_offs);
327 
328 	/* always set cond_exec_polling to CONTINUE */
329 	*ring->cond_exe_cpu_addr = 1;
330 
331 	if (ring->funcs->type != AMDGPU_RING_TYPE_CPER) {
332 		r = amdgpu_fence_driver_start_ring(ring, irq_src, irq_type);
333 		if (r) {
334 			dev_err(adev->dev, "failed initializing fences (%d).\n", r);
335 			return r;
336 		}
337 
338 		max_ibs_dw = ring->funcs->emit_frame_size +
339 			     amdgpu_ring_max_ibs(ring->funcs->type) * ring->funcs->emit_ib_size;
340 		max_ibs_dw = (max_ibs_dw + ring->funcs->align_mask) & ~ring->funcs->align_mask;
341 
342 		if (WARN_ON(max_ibs_dw > max_dw))
343 			max_dw = max_ibs_dw;
344 
345 		ring->ring_size = roundup_pow_of_two(max_dw * 4 * sched_hw_submission);
346 	} else {
347 		ring->ring_size = roundup_pow_of_two(max_dw * 4);
348 		ring->count_dw = (ring->ring_size - 4) >> 2;
349 		/* ring buffer is empty now */
350 		ring->wptr = *ring->rptr_cpu_addr = 0;
351 	}
352 
353 	ring->buf_mask = (ring->ring_size / 4) - 1;
354 	ring->ptr_mask = ring->funcs->support_64bit_ptrs ?
355 		0xffffffffffffffff : ring->buf_mask;
356 	/*  Initialize cached_rptr to 0 */
357 	ring->cached_rptr = 0;
358 
359 	if (!ring->ring_backup) {
360 		ring->ring_backup = kvzalloc(ring->ring_size, GFP_KERNEL);
361 		if (!ring->ring_backup)
362 			return -ENOMEM;
363 	}
364 
365 	/* Allocate ring buffer */
366 	if (ring->ring_obj == NULL) {
367 		r = amdgpu_bo_create_kernel(adev, ring->ring_size + ring->funcs->extra_dw, PAGE_SIZE,
368 					    AMDGPU_GEM_DOMAIN_GTT,
369 					    &ring->ring_obj,
370 					    &ring->gpu_addr,
371 					    (void **)&ring->ring);
372 		if (r) {
373 			dev_err(adev->dev, "(%d) ring create failed\n", r);
374 			kvfree(ring->ring_backup);
375 			return r;
376 		}
377 		amdgpu_ring_clear_ring(ring);
378 	}
379 
380 	ring->max_dw = max_dw;
381 	ring->hw_prio = hw_prio;
382 
383 	if (!ring->no_scheduler && ring->funcs->type < AMDGPU_HW_IP_NUM) {
384 		hw_ip = ring->funcs->type;
385 		num_sched = &adev->gpu_sched[hw_ip][hw_prio].num_scheds;
386 		adev->gpu_sched[hw_ip][hw_prio].sched[(*num_sched)++] =
387 			&ring->sched;
388 	}
389 
390 	return 0;
391 }
392 
393 /**
394  * amdgpu_ring_fini - tear down the driver ring struct.
395  *
396  * @ring: amdgpu_ring structure holding ring information
397  *
398  * Tear down the driver information for the selected ring (all asics).
399  */
amdgpu_ring_fini(struct amdgpu_ring * ring)400 void amdgpu_ring_fini(struct amdgpu_ring *ring)
401 {
402 
403 	/* Not to finish a ring which is not initialized */
404 	if (!(ring->adev) || !(ring->adev->rings[ring->idx]))
405 		return;
406 
407 	ring->sched.ready = false;
408 
409 	amdgpu_device_wb_free(ring->adev, ring->rptr_offs);
410 	amdgpu_device_wb_free(ring->adev, ring->wptr_offs);
411 
412 	amdgpu_device_wb_free(ring->adev, ring->cond_exe_offs);
413 	amdgpu_device_wb_free(ring->adev, ring->fence_offs);
414 
415 	amdgpu_bo_free_kernel(&ring->ring_obj,
416 			      &ring->gpu_addr,
417 			      (void **)&ring->ring);
418 	kvfree(ring->ring_backup);
419 	ring->ring_backup = NULL;
420 
421 	dma_fence_put(ring->vmid_wait);
422 	ring->vmid_wait = NULL;
423 	ring->me = 0;
424 
425 	ring->adev->rings[ring->idx] = NULL;
426 }
427 
428 /**
429  * amdgpu_ring_emit_reg_write_reg_wait_helper - ring helper
430  *
431  * @ring: ring to write to
432  * @reg0: register to write
433  * @reg1: register to wait on
434  * @ref: reference value to write/wait on
435  * @mask: mask to wait on
436  *
437  * Helper for rings that don't support write and wait in a
438  * single oneshot packet.
439  */
amdgpu_ring_emit_reg_write_reg_wait_helper(struct amdgpu_ring * ring,uint32_t reg0,uint32_t reg1,uint32_t ref,uint32_t mask)440 void amdgpu_ring_emit_reg_write_reg_wait_helper(struct amdgpu_ring *ring,
441 						uint32_t reg0, uint32_t reg1,
442 						uint32_t ref, uint32_t mask)
443 {
444 	amdgpu_ring_emit_wreg(ring, reg0, ref);
445 	amdgpu_ring_emit_reg_wait(ring, reg1, mask, mask);
446 }
447 
448 /**
449  * amdgpu_ring_soft_recovery - try to soft recover a ring lockup
450  *
451  * @ring: ring to try the recovery on
452  * @vmid: VMID we try to get going again
453  * @fence: timedout fence
454  *
455  * Tries to get a ring proceeding again when it is stuck.
456  */
amdgpu_ring_soft_recovery(struct amdgpu_ring * ring,unsigned int vmid,struct dma_fence * fence)457 bool amdgpu_ring_soft_recovery(struct amdgpu_ring *ring, unsigned int vmid,
458 			       struct dma_fence *fence)
459 {
460 	unsigned long flags;
461 	ktime_t deadline;
462 	bool ret;
463 
464 	if (unlikely(ring->adev->debug_disable_soft_recovery))
465 		return false;
466 
467 	deadline = ktime_add_us(ktime_get(), 10000);
468 
469 	if (amdgpu_sriov_vf(ring->adev) || !ring->funcs->soft_recovery || !fence)
470 		return false;
471 
472 	spin_lock_irqsave(fence->lock, flags);
473 	if (!dma_fence_is_signaled_locked(fence))
474 		dma_fence_set_error(fence, -ENODATA);
475 	spin_unlock_irqrestore(fence->lock, flags);
476 
477 	while (!dma_fence_is_signaled(fence) &&
478 	       ktime_to_ns(ktime_sub(deadline, ktime_get())) > 0)
479 		ring->funcs->soft_recovery(ring, vmid);
480 
481 	ret = dma_fence_is_signaled(fence);
482 	/* increment the counter only if soft reset worked */
483 	if (ret)
484 		atomic_inc(&ring->adev->gpu_reset_counter);
485 
486 	return ret;
487 }
488 
489 /*
490  * Debugfs info
491  */
492 #if defined(CONFIG_DEBUG_FS)
493 
494 /* Layout of file is 12 bytes consisting of
495  * - rptr
496  * - wptr
497  * - driver's copy of wptr
498  *
499  * followed by n-words of ring data
500  */
amdgpu_debugfs_ring_read(struct file * f,char __user * buf,size_t size,loff_t * pos)501 static ssize_t amdgpu_debugfs_ring_read(struct file *f, char __user *buf,
502 					size_t size, loff_t *pos)
503 {
504 	struct amdgpu_ring *ring = file_inode(f)->i_private;
505 	uint32_t value, result, early[3];
506 	uint64_t p;
507 	loff_t i;
508 	int r;
509 
510 	if (*pos & 3 || size & 3)
511 		return -EINVAL;
512 
513 	result = 0;
514 
515 	if (*pos < 12) {
516 		if (ring->funcs->type == AMDGPU_RING_TYPE_CPER)
517 			mutex_lock(&ring->adev->cper.ring_lock);
518 
519 		early[0] = amdgpu_ring_get_rptr(ring) & ring->buf_mask;
520 		early[1] = amdgpu_ring_get_wptr(ring) & ring->buf_mask;
521 		early[2] = ring->wptr & ring->buf_mask;
522 		for (i = *pos / 4; i < 3 && size; i++) {
523 			r = put_user(early[i], (uint32_t *)buf);
524 			if (r) {
525 				result = r;
526 				goto out;
527 			}
528 			buf += 4;
529 			result += 4;
530 			size -= 4;
531 			*pos += 4;
532 		}
533 	}
534 
535 	if (ring->funcs->type != AMDGPU_RING_TYPE_CPER) {
536 		while (size) {
537 			if (*pos >= (ring->ring_size + 12))
538 				return result;
539 
540 			value = ring->ring[(*pos - 12)/4];
541 			r = put_user(value, (uint32_t *)buf);
542 			if (r)
543 				return r;
544 			buf += 4;
545 			result += 4;
546 			size -= 4;
547 			*pos += 4;
548 		}
549 	} else {
550 		p = early[0];
551 		if (early[0] <= early[1])
552 			size = (early[1] - early[0]);
553 		else
554 			size = ring->ring_size - (early[0] - early[1]);
555 
556 		while (size) {
557 			if (p == early[1])
558 				goto out;
559 
560 			value = ring->ring[p];
561 			r = put_user(value, (uint32_t *)buf);
562 			if (r) {
563 				result = r;
564 				goto out;
565 			}
566 
567 			buf += 4;
568 			result += 4;
569 			size--;
570 			p++;
571 			p &= ring->ptr_mask;
572 		}
573 	}
574 
575 out:
576 	if (ring->funcs->type == AMDGPU_RING_TYPE_CPER)
577 		mutex_unlock(&ring->adev->cper.ring_lock);
578 
579 	return result;
580 }
581 
amdgpu_debugfs_virt_ring_read(struct file * f,char __user * buf,size_t size,loff_t * pos)582 static ssize_t amdgpu_debugfs_virt_ring_read(struct file *f, char __user *buf,
583 	size_t size, loff_t *pos)
584 {
585 	struct amdgpu_ring *ring = file_inode(f)->i_private;
586 
587 	if (*pos & 3 || size & 3)
588 		return -EINVAL;
589 
590 	if (ring->funcs->type == AMDGPU_RING_TYPE_CPER)
591 		amdgpu_virt_req_ras_cper_dump(ring->adev, false);
592 
593 	return amdgpu_debugfs_ring_read(f, buf, size, pos);
594 }
595 
596 static const struct file_operations amdgpu_debugfs_ring_fops = {
597 	.owner = THIS_MODULE,
598 	.read = amdgpu_debugfs_ring_read,
599 	.llseek = default_llseek
600 };
601 
602 static const struct file_operations amdgpu_debugfs_virt_ring_fops = {
603 	.owner = THIS_MODULE,
604 	.read = amdgpu_debugfs_virt_ring_read,
605 	.llseek = default_llseek
606 };
607 
amdgpu_debugfs_mqd_read(struct file * f,char __user * buf,size_t size,loff_t * pos)608 static ssize_t amdgpu_debugfs_mqd_read(struct file *f, char __user *buf,
609 				       size_t size, loff_t *pos)
610 {
611 	struct amdgpu_ring *ring = file_inode(f)->i_private;
612 	ssize_t bytes = min_t(ssize_t, ring->mqd_size - *pos, size);
613 	void *from = ((u8 *)ring->mqd_ptr) + *pos;
614 
615 	if (*pos > ring->mqd_size)
616 		return 0;
617 
618 	if (copy_to_user(buf, from, bytes))
619 		return -EFAULT;
620 
621 	*pos += bytes;
622 	return bytes;
623 }
624 
625 static const struct file_operations amdgpu_debugfs_mqd_fops = {
626 	.owner = THIS_MODULE,
627 	.read = amdgpu_debugfs_mqd_read,
628 	.llseek = default_llseek
629 };
630 
amdgpu_debugfs_ring_error(void * data,u64 val)631 static int amdgpu_debugfs_ring_error(void *data, u64 val)
632 {
633 	struct amdgpu_ring *ring = data;
634 
635 	amdgpu_fence_driver_set_error(ring, val);
636 	return 0;
637 }
638 
639 DEFINE_DEBUGFS_ATTRIBUTE_SIGNED(amdgpu_debugfs_error_fops, NULL,
640 				amdgpu_debugfs_ring_error, "%lld\n");
641 
642 #endif
643 
amdgpu_debugfs_ring_init(struct amdgpu_device * adev,struct amdgpu_ring * ring)644 void amdgpu_debugfs_ring_init(struct amdgpu_device *adev,
645 			      struct amdgpu_ring *ring)
646 {
647 #if defined(CONFIG_DEBUG_FS)
648 	struct drm_minor *minor = adev_to_drm(adev)->primary;
649 	struct dentry *root = minor->debugfs_root;
650 	char name[32];
651 
652 	sprintf(name, "amdgpu_ring_%s", ring->name);
653 	if (amdgpu_sriov_vf(adev))
654 		debugfs_create_file_size(name, S_IFREG | 0444, root, ring,
655 					 &amdgpu_debugfs_virt_ring_fops,
656 					 ring->ring_size + 12);
657 	else
658 		debugfs_create_file_size(name, S_IFREG | 0444, root, ring,
659 					 &amdgpu_debugfs_ring_fops,
660 					 ring->ring_size + 12);
661 
662 	if (ring->mqd_obj) {
663 		sprintf(name, "amdgpu_mqd_%s", ring->name);
664 		debugfs_create_file_size(name, S_IFREG | 0444, root, ring,
665 					 &amdgpu_debugfs_mqd_fops,
666 					 ring->mqd_size);
667 	}
668 
669 	sprintf(name, "amdgpu_error_%s", ring->name);
670 	debugfs_create_file(name, 0200, root, ring,
671 			    &amdgpu_debugfs_error_fops);
672 
673 #endif
674 }
675 
676 /**
677  * amdgpu_ring_test_helper - tests ring and set sched readiness status
678  *
679  * @ring: ring to try the recovery on
680  *
681  * Tests ring and set sched readiness status
682  *
683  * Returns 0 on success, error on failure.
684  */
amdgpu_ring_test_helper(struct amdgpu_ring * ring)685 int amdgpu_ring_test_helper(struct amdgpu_ring *ring)
686 {
687 	struct amdgpu_device *adev = ring->adev;
688 	int r;
689 
690 	r = amdgpu_ring_test_ring(ring);
691 	if (r)
692 		DRM_DEV_ERROR(adev->dev, "ring %s test failed (%d)\n",
693 			      ring->name, r);
694 	else
695 		DRM_DEV_DEBUG(adev->dev, "ring test on %s succeeded\n",
696 			      ring->name);
697 
698 	ring->sched.ready = !r;
699 
700 	return r;
701 }
702 
amdgpu_ring_to_mqd_prop(struct amdgpu_ring * ring,struct amdgpu_mqd_prop * prop)703 static void amdgpu_ring_to_mqd_prop(struct amdgpu_ring *ring,
704 				    struct amdgpu_mqd_prop *prop)
705 {
706 	struct amdgpu_device *adev = ring->adev;
707 	bool is_high_prio_compute = ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE &&
708 				    amdgpu_gfx_is_high_priority_compute_queue(adev, ring);
709 	bool is_high_prio_gfx = ring->funcs->type == AMDGPU_RING_TYPE_GFX &&
710 				amdgpu_gfx_is_high_priority_graphics_queue(adev, ring);
711 
712 	memset(prop, 0, sizeof(*prop));
713 
714 	prop->mqd_gpu_addr = ring->mqd_gpu_addr;
715 	prop->hqd_base_gpu_addr = ring->gpu_addr;
716 	prop->rptr_gpu_addr = ring->rptr_gpu_addr;
717 	prop->wptr_gpu_addr = ring->wptr_gpu_addr;
718 	prop->queue_size = ring->ring_size;
719 	prop->eop_gpu_addr = ring->eop_gpu_addr;
720 	prop->use_doorbell = ring->use_doorbell;
721 	prop->doorbell_index = ring->doorbell_index;
722 	prop->kernel_queue = true;
723 
724 	/* map_queues packet doesn't need activate the queue,
725 	 * so only kiq need set this field.
726 	 */
727 	prop->hqd_active = ring->funcs->type == AMDGPU_RING_TYPE_KIQ;
728 
729 	prop->allow_tunneling = is_high_prio_compute;
730 	if (is_high_prio_compute || is_high_prio_gfx) {
731 		prop->hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_HIGH;
732 		prop->hqd_queue_priority = AMDGPU_GFX_QUEUE_PRIORITY_MAXIMUM;
733 	}
734 }
735 
amdgpu_ring_init_mqd(struct amdgpu_ring * ring)736 int amdgpu_ring_init_mqd(struct amdgpu_ring *ring)
737 {
738 	struct amdgpu_device *adev = ring->adev;
739 	struct amdgpu_mqd *mqd_mgr;
740 	struct amdgpu_mqd_prop prop;
741 
742 	amdgpu_ring_to_mqd_prop(ring, &prop);
743 
744 	ring->wptr = 0;
745 
746 	if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
747 		mqd_mgr = &adev->mqds[AMDGPU_HW_IP_COMPUTE];
748 	else
749 		mqd_mgr = &adev->mqds[ring->funcs->type];
750 
751 	return mqd_mgr->init_mqd(adev, ring->mqd_ptr, &prop);
752 }
753 
amdgpu_ring_ib_begin(struct amdgpu_ring * ring)754 void amdgpu_ring_ib_begin(struct amdgpu_ring *ring)
755 {
756 	if (ring->is_sw_ring)
757 		amdgpu_sw_ring_ib_begin(ring);
758 }
759 
amdgpu_ring_ib_end(struct amdgpu_ring * ring)760 void amdgpu_ring_ib_end(struct amdgpu_ring *ring)
761 {
762 	if (ring->is_sw_ring)
763 		amdgpu_sw_ring_ib_end(ring);
764 }
765 
amdgpu_ring_ib_on_emit_cntl(struct amdgpu_ring * ring)766 void amdgpu_ring_ib_on_emit_cntl(struct amdgpu_ring *ring)
767 {
768 	if (ring->is_sw_ring)
769 		amdgpu_sw_ring_ib_mark_offset(ring, AMDGPU_MUX_OFFSET_TYPE_CONTROL);
770 }
771 
amdgpu_ring_ib_on_emit_ce(struct amdgpu_ring * ring)772 void amdgpu_ring_ib_on_emit_ce(struct amdgpu_ring *ring)
773 {
774 	if (ring->is_sw_ring)
775 		amdgpu_sw_ring_ib_mark_offset(ring, AMDGPU_MUX_OFFSET_TYPE_CE);
776 }
777 
amdgpu_ring_ib_on_emit_de(struct amdgpu_ring * ring)778 void amdgpu_ring_ib_on_emit_de(struct amdgpu_ring *ring)
779 {
780 	if (ring->is_sw_ring)
781 		amdgpu_sw_ring_ib_mark_offset(ring, AMDGPU_MUX_OFFSET_TYPE_DE);
782 }
783 
amdgpu_ring_sched_ready(struct amdgpu_ring * ring)784 bool amdgpu_ring_sched_ready(struct amdgpu_ring *ring)
785 {
786 	if (!ring)
787 		return false;
788 
789 	if (ring->no_scheduler || !drm_sched_wqueue_ready(&ring->sched))
790 		return false;
791 
792 	return true;
793 }
794 
amdgpu_ring_reset_helper_begin(struct amdgpu_ring * ring,struct amdgpu_fence * guilty_fence)795 void amdgpu_ring_reset_helper_begin(struct amdgpu_ring *ring,
796 				    struct amdgpu_fence *guilty_fence)
797 {
798 	/* Stop the scheduler to prevent anybody else from touching the ring buffer. */
799 	drm_sched_wqueue_stop(&ring->sched);
800 	/* back up the non-guilty commands */
801 	amdgpu_ring_backup_unprocessed_commands(ring, guilty_fence);
802 }
803 
amdgpu_ring_reset_helper_end(struct amdgpu_ring * ring,struct amdgpu_fence * guilty_fence)804 int amdgpu_ring_reset_helper_end(struct amdgpu_ring *ring,
805 				 struct amdgpu_fence *guilty_fence)
806 {
807 	unsigned int i;
808 	int r;
809 
810 	/* verify that the ring is functional */
811 	r = amdgpu_ring_test_ring(ring);
812 	if (r)
813 		return r;
814 
815 	/* signal the fence of the bad job */
816 	if (guilty_fence)
817 		amdgpu_fence_driver_guilty_force_completion(guilty_fence);
818 	/* Re-emit the non-guilty commands */
819 	if (ring->ring_backup_entries_to_copy) {
820 		amdgpu_ring_alloc_reemit(ring, ring->ring_backup_entries_to_copy);
821 		for (i = 0; i < ring->ring_backup_entries_to_copy; i++)
822 			amdgpu_ring_write(ring, ring->ring_backup[i]);
823 		amdgpu_ring_commit(ring);
824 	}
825 	/* Start the scheduler again */
826 	drm_sched_wqueue_start(&ring->sched);
827 	return 0;
828 }
829 
amdgpu_ring_is_reset_type_supported(struct amdgpu_ring * ring,u32 reset_type)830 bool amdgpu_ring_is_reset_type_supported(struct amdgpu_ring *ring,
831 					 u32 reset_type)
832 {
833 	switch (ring->funcs->type) {
834 	case AMDGPU_RING_TYPE_GFX:
835 		if (ring->adev->gfx.gfx_supported_reset & reset_type)
836 			return true;
837 		break;
838 	case AMDGPU_RING_TYPE_COMPUTE:
839 		if (ring->adev->gfx.compute_supported_reset & reset_type)
840 			return true;
841 		break;
842 	case AMDGPU_RING_TYPE_SDMA:
843 		if (ring->adev->sdma.supported_reset & reset_type)
844 			return true;
845 		break;
846 	case AMDGPU_RING_TYPE_VCN_DEC:
847 	case AMDGPU_RING_TYPE_VCN_ENC:
848 		if (ring->adev->vcn.supported_reset & reset_type)
849 			return true;
850 		break;
851 	case AMDGPU_RING_TYPE_VCN_JPEG:
852 		if (ring->adev->jpeg.supported_reset & reset_type)
853 			return true;
854 		break;
855 	default:
856 		break;
857 	}
858 	return false;
859 }
860