xref: /linux/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c (revision c7062be3380cb20c8b1c4a935a13f1848ead0719)
1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  *          Christian König
28  */
29 #include <linux/seq_file.h>
30 #include <linux/slab.h>
31 #include <linux/uaccess.h>
32 #include <linux/debugfs.h>
33 
34 #include <drm/amdgpu_drm.h>
35 #include "amdgpu.h"
36 #include "amdgpu_ras_mgr.h"
37 #include "atom.h"
38 
39 /*
40  * Rings
41  * Most engines on the GPU are fed via ring buffers.  Ring
42  * buffers are areas of GPU accessible memory that the host
43  * writes commands into and the GPU reads commands out of.
44  * There is a rptr (read pointer) that determines where the
45  * GPU is currently reading, and a wptr (write pointer)
46  * which determines where the host has written.  When the
47  * pointers are equal, the ring is idle.  When the host
48  * writes commands to the ring buffer, it increments the
49  * wptr.  The GPU then starts fetching commands and executes
50  * them until the pointers are equal again.
51  */
52 
53 /**
54  * amdgpu_ring_max_ibs - Return max IBs that fit in a single submission.
55  *
56  * @type: ring type for which to return the limit.
57  */
58 unsigned int amdgpu_ring_max_ibs(enum amdgpu_ring_type type)
59 {
60 	switch (type) {
61 	case AMDGPU_RING_TYPE_GFX:
62 		/* Need to keep at least 192 on GFX7+ for old radv. */
63 		return 192;
64 	case AMDGPU_RING_TYPE_COMPUTE:
65 		return 125;
66 	case AMDGPU_RING_TYPE_VCN_JPEG:
67 		return 16;
68 	default:
69 		return 49;
70 	}
71 }
72 
73 /**
74  * amdgpu_ring_alloc - allocate space on the ring buffer
75  *
76  * @ring: amdgpu_ring structure holding ring information
77  * @ndw: number of dwords to allocate in the ring buffer
78  *
79  * Allocate @ndw dwords in the ring buffer. The number of dwords should be the
80  * sum of all commands written to the ring.
81  *
82  * Returns:
83  * 0 on success, otherwise -ENOMEM if it tries to allocate more than the
84  * maximum dword allowed for one submission.
85  */
86 int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned int ndw)
87 {
88 	/* Align requested size with padding so unlock_commit can
89 	 * pad safely */
90 	ndw = (ndw + ring->funcs->align_mask) & ~ring->funcs->align_mask;
91 
92 	/* Make sure we aren't trying to allocate more space
93 	 * than the maximum for one submission
94 	 */
95 	if (WARN_ON_ONCE(ndw > ring->max_dw))
96 		return -ENOMEM;
97 
98 	ring->count_dw = ndw;
99 	ring->wptr_old = ring->wptr;
100 
101 	if (ring->funcs->begin_use)
102 		ring->funcs->begin_use(ring);
103 
104 	return 0;
105 }
106 
107 /**
108  * amdgpu_ring_alloc_reemit - allocate space on the ring buffer for reemit
109  *
110  * @ring: amdgpu_ring structure holding ring information
111  * @ndw: number of dwords to allocate in the ring buffer
112  *
113  * Allocate @ndw dwords in the ring buffer (all asics).
114  * doesn't check the max_dw limit as we may be reemitting
115  * several submissions.
116  */
117 static void amdgpu_ring_alloc_reemit(struct amdgpu_ring *ring, unsigned int ndw)
118 {
119 	/* Align requested size with padding so unlock_commit can
120 	 * pad safely */
121 	ndw = (ndw + ring->funcs->align_mask) & ~ring->funcs->align_mask;
122 
123 	ring->count_dw = ndw;
124 	ring->wptr_old = ring->wptr;
125 
126 	if (ring->funcs->begin_use)
127 		ring->funcs->begin_use(ring);
128 }
129 
130 /**
131  * amdgpu_ring_insert_nop - insert NOP packets
132  *
133  * @ring: amdgpu_ring structure holding ring information
134  * @count: the number of NOP packets to insert
135  *
136  * This is the generic insert_nop function for rings except SDMA
137  */
138 void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
139 {
140 	uint32_t occupied, chunk1, chunk2;
141 
142 	occupied = ring->wptr & ring->buf_mask;
143 	chunk1 = ring->buf_mask + 1 - occupied;
144 	chunk1 = (chunk1 >= count) ? count : chunk1;
145 	chunk2 = count - chunk1;
146 
147 	if (chunk1)
148 		memset32(&ring->ring[occupied], ring->funcs->nop, chunk1);
149 
150 	if (chunk2)
151 		memset32(ring->ring, ring->funcs->nop, chunk2);
152 
153 	ring->wptr += count;
154 	ring->wptr &= ring->ptr_mask;
155 	ring->count_dw -= count;
156 }
157 
158 /**
159  * amdgpu_ring_generic_pad_ib - pad IB with NOP packets
160  *
161  * @ring: amdgpu_ring structure holding ring information
162  * @ib: IB to add NOP packets to
163  *
164  * This is the generic pad_ib function for rings except SDMA
165  */
166 void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
167 {
168 	u32 align_mask = ring->funcs->align_mask;
169 	u32 count = ib->length_dw & align_mask;
170 
171 	if (count) {
172 		count = align_mask + 1 - count;
173 
174 		memset32(&ib->ptr[ib->length_dw], ring->funcs->nop, count);
175 
176 		ib->length_dw += count;
177 	}
178 }
179 
180 /**
181  * amdgpu_ring_commit - tell the GPU to execute the new
182  * commands on the ring buffer
183  *
184  * @ring: amdgpu_ring structure holding ring information
185  *
186  * Update the wptr (write pointer) to tell the GPU to
187  * execute new commands on the ring buffer (all asics).
188  */
189 void amdgpu_ring_commit(struct amdgpu_ring *ring)
190 {
191 	uint32_t count;
192 
193 	if (ring->count_dw < 0)
194 		DRM_ERROR("amdgpu: writing more dwords to the ring than expected!\n");
195 
196 	/* We pad to match fetch size */
197 	count = ring->funcs->align_mask + 1 -
198 		(ring->wptr & ring->funcs->align_mask);
199 	count &= ring->funcs->align_mask;
200 
201 	if (count != 0)
202 		ring->funcs->insert_nop(ring, count);
203 
204 	mb();
205 	amdgpu_ring_set_wptr(ring);
206 
207 	if (ring->funcs->end_use)
208 		ring->funcs->end_use(ring);
209 }
210 
211 /**
212  * amdgpu_ring_undo - reset the wptr
213  *
214  * @ring: amdgpu_ring structure holding ring information
215  *
216  * Reset the driver's copy of the wptr (all asics).
217  */
218 void amdgpu_ring_undo(struct amdgpu_ring *ring)
219 {
220 	ring->wptr = ring->wptr_old;
221 
222 	if (ring->funcs->end_use)
223 		ring->funcs->end_use(ring);
224 }
225 
226 #define amdgpu_ring_get_gpu_addr(ring, offset)				\
227 	 (ring->adev->wb.gpu_addr + offset * 4)
228 
229 #define amdgpu_ring_get_cpu_addr(ring, offset)				\
230 	 (&ring->adev->wb.wb[offset])
231 
232 /**
233  * amdgpu_ring_init - init driver ring struct.
234  *
235  * @adev: amdgpu_device pointer
236  * @ring: amdgpu_ring structure holding ring information
237  * @max_dw: maximum number of dw for ring alloc
238  * @irq_src: interrupt source to use for this ring
239  * @irq_type: interrupt type to use for this ring
240  * @hw_prio: ring priority (NORMAL/HIGH)
241  * @sched_score: optional score atomic shared with other schedulers
242  *
243  * Initialize the driver information for the selected ring (all asics).
244  * Returns 0 on success, error on failure.
245  */
246 int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
247 		     unsigned int max_dw, struct amdgpu_irq_src *irq_src,
248 		     unsigned int irq_type, unsigned int hw_prio,
249 		     atomic_t *sched_score)
250 {
251 	int r;
252 	int sched_hw_submission = amdgpu_sched_hw_submission;
253 	u32 *num_sched;
254 	u32 hw_ip;
255 	unsigned int max_ibs_dw;
256 
257 	/* Set the hw submission limit higher for KIQ because
258 	 * it's used for a number of gfx/compute tasks by both
259 	 * KFD and KGD which may have outstanding fences and
260 	 * it doesn't really use the gpu scheduler anyway;
261 	 * KIQ tasks get submitted directly to the ring.
262 	 */
263 	if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
264 		sched_hw_submission = max(sched_hw_submission, 256);
265 	if (ring->funcs->type == AMDGPU_RING_TYPE_MES)
266 		sched_hw_submission = 8;
267 	else if (ring == &adev->sdma.instance[0].page)
268 		sched_hw_submission = 256;
269 
270 	if (ring->adev == NULL) {
271 		if (adev->num_rings >= AMDGPU_MAX_RINGS)
272 			return -EINVAL;
273 
274 		ring->adev = adev;
275 		ring->num_hw_submission = sched_hw_submission;
276 		ring->sched_score = sched_score;
277 		ring->vmid_wait = dma_fence_get_stub();
278 
279 		ring->idx = adev->num_rings++;
280 		adev->rings[ring->idx] = ring;
281 
282 		r = amdgpu_fence_driver_init_ring(ring);
283 		if (r)
284 			return r;
285 	}
286 
287 	r = amdgpu_device_wb_get(adev, &ring->rptr_offs);
288 	if (r) {
289 		dev_err(adev->dev, "(%d) ring rptr_offs wb alloc failed\n", r);
290 		return r;
291 	}
292 
293 	r = amdgpu_device_wb_get(adev, &ring->wptr_offs);
294 	if (r) {
295 		dev_err(adev->dev, "(%d) ring wptr_offs wb alloc failed\n", r);
296 		return r;
297 	}
298 
299 	r = amdgpu_device_wb_get(adev, &ring->fence_offs);
300 	if (r) {
301 		dev_err(adev->dev, "(%d) ring fence_offs wb alloc failed\n", r);
302 		return r;
303 	}
304 
305 	r = amdgpu_device_wb_get(adev, &ring->trail_fence_offs);
306 	if (r) {
307 		dev_err(adev->dev, "(%d) ring trail_fence_offs wb alloc failed\n", r);
308 		return r;
309 	}
310 
311 	r = amdgpu_device_wb_get(adev, &ring->cond_exe_offs);
312 	if (r) {
313 		dev_err(adev->dev, "(%d) ring cond_exec_polling wb alloc failed\n", r);
314 		return r;
315 	}
316 
317 	ring->fence_gpu_addr =
318 		amdgpu_ring_get_gpu_addr(ring, ring->fence_offs);
319 	ring->fence_cpu_addr =
320 		amdgpu_ring_get_cpu_addr(ring, ring->fence_offs);
321 
322 	ring->rptr_gpu_addr =
323 		amdgpu_ring_get_gpu_addr(ring, ring->rptr_offs);
324 	ring->rptr_cpu_addr =
325 		amdgpu_ring_get_cpu_addr(ring, ring->rptr_offs);
326 
327 	ring->wptr_gpu_addr =
328 		amdgpu_ring_get_gpu_addr(ring, ring->wptr_offs);
329 	ring->wptr_cpu_addr =
330 		amdgpu_ring_get_cpu_addr(ring, ring->wptr_offs);
331 
332 	ring->trail_fence_gpu_addr =
333 		amdgpu_ring_get_gpu_addr(ring, ring->trail_fence_offs);
334 	ring->trail_fence_cpu_addr =
335 		amdgpu_ring_get_cpu_addr(ring, ring->trail_fence_offs);
336 
337 	ring->cond_exe_gpu_addr =
338 		amdgpu_ring_get_gpu_addr(ring, ring->cond_exe_offs);
339 	ring->cond_exe_cpu_addr =
340 		amdgpu_ring_get_cpu_addr(ring, ring->cond_exe_offs);
341 
342 	/* always set cond_exec_polling to CONTINUE */
343 	*ring->cond_exe_cpu_addr = 1;
344 
345 	if (ring->funcs->type != AMDGPU_RING_TYPE_CPER) {
346 		r = amdgpu_fence_driver_start_ring(ring, irq_src, irq_type);
347 		if (r) {
348 			dev_err(adev->dev, "failed initializing fences (%d).\n", r);
349 			return r;
350 		}
351 
352 		max_ibs_dw = ring->funcs->emit_frame_size +
353 			     amdgpu_ring_max_ibs(ring->funcs->type) * ring->funcs->emit_ib_size;
354 		max_ibs_dw = (max_ibs_dw + ring->funcs->align_mask) & ~ring->funcs->align_mask;
355 
356 		if (WARN_ON(max_ibs_dw > max_dw))
357 			max_dw = max_ibs_dw;
358 
359 		ring->ring_size = roundup_pow_of_two(max_dw * 4 * sched_hw_submission);
360 	} else {
361 		ring->ring_size = roundup_pow_of_two(max_dw * 4);
362 		ring->count_dw = (ring->ring_size - 4) >> 2;
363 		/* ring buffer is empty now */
364 		ring->wptr = *ring->rptr_cpu_addr = 0;
365 	}
366 
367 	ring->buf_mask = (ring->ring_size / 4) - 1;
368 	ring->ptr_mask = ring->funcs->support_64bit_ptrs ?
369 		0xffffffffffffffff : ring->buf_mask;
370 	/*  Initialize cached_rptr to 0 */
371 	ring->cached_rptr = 0;
372 
373 	if (!ring->ring_backup) {
374 		ring->ring_backup = kvzalloc(ring->ring_size, GFP_KERNEL);
375 		if (!ring->ring_backup)
376 			return -ENOMEM;
377 	}
378 
379 	/* Allocate ring buffer */
380 	if (ring->ring_obj == NULL) {
381 		r = amdgpu_bo_create_kernel(adev, ring->ring_size + ring->funcs->extra_bytes,
382 					    PAGE_SIZE,
383 					    AMDGPU_GEM_DOMAIN_GTT,
384 					    &ring->ring_obj,
385 					    &ring->gpu_addr,
386 					    (void **)&ring->ring);
387 		if (r) {
388 			dev_err(adev->dev, "(%d) ring create failed\n", r);
389 			kvfree(ring->ring_backup);
390 			return r;
391 		}
392 		amdgpu_ring_clear_ring(ring);
393 	}
394 
395 	ring->max_dw = max_dw;
396 	ring->hw_prio = hw_prio;
397 
398 	if (!ring->no_scheduler && ring->funcs->type < AMDGPU_HW_IP_NUM) {
399 		hw_ip = ring->funcs->type;
400 		num_sched = &adev->gpu_sched[hw_ip][hw_prio].num_scheds;
401 		adev->gpu_sched[hw_ip][hw_prio].sched[(*num_sched)++] =
402 			&ring->sched;
403 	}
404 
405 	return 0;
406 }
407 
408 /**
409  * amdgpu_ring_fini - tear down the driver ring struct.
410  *
411  * @ring: amdgpu_ring structure holding ring information
412  *
413  * Tear down the driver information for the selected ring (all asics).
414  */
415 void amdgpu_ring_fini(struct amdgpu_ring *ring)
416 {
417 
418 	/* Not to finish a ring which is not initialized */
419 	if (!(ring->adev) || !(ring->adev->rings[ring->idx]))
420 		return;
421 
422 	ring->sched.ready = false;
423 
424 	amdgpu_device_wb_free(ring->adev, ring->rptr_offs);
425 	amdgpu_device_wb_free(ring->adev, ring->wptr_offs);
426 
427 	amdgpu_device_wb_free(ring->adev, ring->cond_exe_offs);
428 	amdgpu_device_wb_free(ring->adev, ring->fence_offs);
429 
430 	amdgpu_bo_free_kernel(&ring->ring_obj,
431 			      &ring->gpu_addr,
432 			      (void **)&ring->ring);
433 	kvfree(ring->ring_backup);
434 	ring->ring_backup = NULL;
435 
436 	dma_fence_put(ring->vmid_wait);
437 	ring->vmid_wait = NULL;
438 	ring->me = 0;
439 }
440 
441 /**
442  * amdgpu_ring_emit_reg_write_reg_wait_helper - ring helper
443  *
444  * @ring: ring to write to
445  * @reg0: register to write
446  * @reg1: register to wait on
447  * @ref: reference value to write/wait on
448  * @mask: mask to wait on
449  *
450  * Helper for rings that don't support write and wait in a
451  * single oneshot packet.
452  */
453 void amdgpu_ring_emit_reg_write_reg_wait_helper(struct amdgpu_ring *ring,
454 						uint32_t reg0, uint32_t reg1,
455 						uint32_t ref, uint32_t mask)
456 {
457 	amdgpu_ring_emit_wreg(ring, reg0, ref);
458 	amdgpu_ring_emit_reg_wait(ring, reg1, mask, mask);
459 }
460 
461 /**
462  * amdgpu_ring_soft_recovery - try to soft recover a ring lockup
463  *
464  * @ring: ring to try the recovery on
465  * @vmid: VMID we try to get going again
466  * @fence: timedout fence
467  *
468  * Tries to get a ring proceeding again when it is stuck.
469  */
470 bool amdgpu_ring_soft_recovery(struct amdgpu_ring *ring, unsigned int vmid,
471 			       struct dma_fence *fence)
472 {
473 	unsigned long flags;
474 	ktime_t deadline;
475 	bool ret;
476 
477 	deadline = ktime_add_us(ktime_get(), 10000);
478 
479 	if (amdgpu_sriov_vf(ring->adev) || !ring->funcs->soft_recovery || !fence)
480 		return false;
481 
482 	spin_lock_irqsave(fence->lock, flags);
483 	if (!dma_fence_is_signaled_locked(fence))
484 		dma_fence_set_error(fence, -ENODATA);
485 	spin_unlock_irqrestore(fence->lock, flags);
486 
487 	while (!dma_fence_is_signaled(fence) &&
488 	       ktime_to_ns(ktime_sub(deadline, ktime_get())) > 0)
489 		ring->funcs->soft_recovery(ring, vmid);
490 
491 	ret = dma_fence_is_signaled(fence);
492 	/* increment the counter only if soft reset worked */
493 	if (ret)
494 		atomic_inc(&ring->adev->gpu_reset_counter);
495 
496 	return ret;
497 }
498 
499 /*
500  * Debugfs info
501  */
502 #if defined(CONFIG_DEBUG_FS)
503 
504 static ssize_t amdgpu_ras_cper_debugfs_read(struct file *f, char __user *buf,
505 					    size_t size, loff_t *offset)
506 {
507 	const uint8_t ring_header_size = 12;
508 	struct amdgpu_ring *ring = file_inode(f)->i_private;
509 	struct ras_cmd_cper_snapshot_req *snapshot_req __free(kfree) =
510 		kzalloc(sizeof(struct ras_cmd_cper_snapshot_req), GFP_KERNEL);
511 	struct ras_cmd_cper_snapshot_rsp *snapshot_rsp __free(kfree) =
512 		kzalloc(sizeof(struct ras_cmd_cper_snapshot_rsp), GFP_KERNEL);
513 	struct ras_cmd_cper_record_req *record_req __free(kfree) =
514 		kzalloc(sizeof(struct ras_cmd_cper_record_req), GFP_KERNEL);
515 	struct ras_cmd_cper_record_rsp *record_rsp __free(kfree) =
516 		kzalloc(sizeof(struct ras_cmd_cper_record_rsp), GFP_KERNEL);
517 	uint8_t *ring_header __free(kfree) =
518 		kzalloc(ring_header_size, GFP_KERNEL);
519 	uint32_t total_cper_num;
520 	uint64_t start_cper_id;
521 	int r;
522 
523 	if (!snapshot_req || !snapshot_rsp || !record_req || !record_rsp ||
524 	    !ring_header)
525 		return -ENOMEM;
526 
527 	if (!(*offset)) {
528 		/* Need at least 12 bytes for the header on the first read */
529 		if (size < ring_header_size)
530 			return -EINVAL;
531 
532 		if (copy_to_user(buf, ring_header, ring_header_size))
533 			return -EFAULT;
534 		buf += ring_header_size;
535 		size -= ring_header_size;
536 	}
537 
538 	r = amdgpu_ras_mgr_handle_ras_cmd(ring->adev,
539 					  RAS_CMD__GET_CPER_SNAPSHOT,
540 					  snapshot_req, sizeof(struct ras_cmd_cper_snapshot_req),
541 					  snapshot_rsp, sizeof(struct ras_cmd_cper_snapshot_rsp));
542 	if (r || !snapshot_rsp->total_cper_num)
543 		return r;
544 
545 	start_cper_id = snapshot_rsp->start_cper_id;
546 	total_cper_num = snapshot_rsp->total_cper_num;
547 
548 	record_req->buf_ptr = (uint64_t)(uintptr_t)buf;
549 	record_req->buf_size = size;
550 	record_req->cper_start_id = start_cper_id + *offset;
551 	record_req->cper_num = total_cper_num;
552 	r = amdgpu_ras_mgr_handle_ras_cmd(ring->adev, RAS_CMD__GET_CPER_RECORD,
553 					  record_req, sizeof(struct ras_cmd_cper_record_req),
554 					  record_rsp, sizeof(struct ras_cmd_cper_record_rsp));
555 	if (r)
556 		return r;
557 
558 	r = *offset ? record_rsp->real_data_size : record_rsp->real_data_size + ring_header_size;
559 	(*offset) += record_rsp->real_cper_num;
560 
561 	return r;
562 }
563 
564 /* Layout of file is 12 bytes consisting of
565  * - rptr
566  * - wptr
567  * - driver's copy of wptr
568  *
569  * followed by n-words of ring data
570  */
571 static ssize_t amdgpu_debugfs_ring_read(struct file *f, char __user *buf,
572 					size_t size, loff_t *pos)
573 {
574 	struct amdgpu_ring *ring = file_inode(f)->i_private;
575 	uint32_t value, result, early[3];
576 	uint64_t p;
577 	loff_t i;
578 	int r;
579 
580 	if (ring->funcs->type == AMDGPU_RING_TYPE_CPER && amdgpu_uniras_enabled(ring->adev))
581 		return amdgpu_ras_cper_debugfs_read(f, buf, size, pos);
582 
583 	if (*pos & 3 || size & 3)
584 		return -EINVAL;
585 
586 	result = 0;
587 
588 	if (*pos < 12) {
589 		if (ring->funcs->type == AMDGPU_RING_TYPE_CPER)
590 			mutex_lock(&ring->adev->cper.ring_lock);
591 
592 		early[0] = amdgpu_ring_get_rptr(ring) & ring->buf_mask;
593 		early[1] = amdgpu_ring_get_wptr(ring) & ring->buf_mask;
594 		early[2] = ring->wptr & ring->buf_mask;
595 		for (i = *pos / 4; i < 3 && size; i++) {
596 			r = put_user(early[i], (uint32_t *)buf);
597 			if (r) {
598 				result = r;
599 				goto out;
600 			}
601 			buf += 4;
602 			result += 4;
603 			size -= 4;
604 			*pos += 4;
605 		}
606 	}
607 
608 	if (ring->funcs->type != AMDGPU_RING_TYPE_CPER) {
609 		while (size) {
610 			if (*pos >= (ring->ring_size + 12))
611 				return result;
612 
613 			value = ring->ring[(*pos - 12)/4];
614 			r = put_user(value, (uint32_t *)buf);
615 			if (r)
616 				return r;
617 			buf += 4;
618 			result += 4;
619 			size -= 4;
620 			*pos += 4;
621 		}
622 	} else {
623 		p = early[0];
624 		if (early[0] <= early[1])
625 			size = (early[1] - early[0]);
626 		else
627 			size = ring->ring_size - (early[0] - early[1]);
628 
629 		while (size) {
630 			if (p == early[1])
631 				goto out;
632 
633 			value = ring->ring[p];
634 			r = put_user(value, (uint32_t *)buf);
635 			if (r) {
636 				result = r;
637 				goto out;
638 			}
639 
640 			buf += 4;
641 			result += 4;
642 			size--;
643 			p++;
644 			p &= ring->ptr_mask;
645 		}
646 	}
647 
648 out:
649 	if (ring->funcs->type == AMDGPU_RING_TYPE_CPER)
650 		mutex_unlock(&ring->adev->cper.ring_lock);
651 
652 	return result;
653 }
654 
655 static ssize_t amdgpu_debugfs_virt_ring_read(struct file *f, char __user *buf,
656 	size_t size, loff_t *pos)
657 {
658 	struct amdgpu_ring *ring = file_inode(f)->i_private;
659 
660 	if (*pos & 3 || size & 3)
661 		return -EINVAL;
662 
663 	if (ring->funcs->type == AMDGPU_RING_TYPE_CPER)
664 		amdgpu_virt_req_ras_cper_dump(ring->adev, false);
665 
666 	return amdgpu_debugfs_ring_read(f, buf, size, pos);
667 }
668 
669 static const struct file_operations amdgpu_debugfs_ring_fops = {
670 	.owner = THIS_MODULE,
671 	.read = amdgpu_debugfs_ring_read,
672 	.llseek = default_llseek
673 };
674 
675 static const struct file_operations amdgpu_debugfs_virt_ring_fops = {
676 	.owner = THIS_MODULE,
677 	.read = amdgpu_debugfs_virt_ring_read,
678 	.llseek = default_llseek
679 };
680 
681 static ssize_t amdgpu_debugfs_mqd_read(struct file *f, char __user *buf,
682 				       size_t size, loff_t *pos)
683 {
684 	struct amdgpu_ring *ring = file_inode(f)->i_private;
685 	ssize_t bytes = min_t(ssize_t, ring->mqd_size - *pos, size);
686 	void *from = ((u8 *)ring->mqd_ptr) + *pos;
687 
688 	if (*pos > ring->mqd_size)
689 		return 0;
690 
691 	if (copy_to_user(buf, from, bytes))
692 		return -EFAULT;
693 
694 	*pos += bytes;
695 	return bytes;
696 }
697 
698 static const struct file_operations amdgpu_debugfs_mqd_fops = {
699 	.owner = THIS_MODULE,
700 	.read = amdgpu_debugfs_mqd_read,
701 	.llseek = default_llseek
702 };
703 
704 static int amdgpu_debugfs_ring_error(void *data, u64 val)
705 {
706 	struct amdgpu_ring *ring = data;
707 
708 	amdgpu_fence_driver_set_error(ring, val);
709 	return 0;
710 }
711 
712 DEFINE_DEBUGFS_ATTRIBUTE_SIGNED(amdgpu_debugfs_error_fops, NULL,
713 				amdgpu_debugfs_ring_error, "%lld\n");
714 
715 #endif
716 
717 void amdgpu_debugfs_ring_init(struct amdgpu_device *adev,
718 			      struct amdgpu_ring *ring)
719 {
720 #if defined(CONFIG_DEBUG_FS)
721 	struct drm_minor *minor = adev_to_drm(adev)->primary;
722 	struct dentry *root = minor->debugfs_root;
723 	char name[32];
724 
725 	sprintf(name, "amdgpu_ring_%s", ring->name);
726 	if (amdgpu_sriov_vf(adev))
727 		debugfs_create_file_size(name, S_IFREG | 0444, root, ring,
728 					 &amdgpu_debugfs_virt_ring_fops,
729 					 ring->ring_size + 12);
730 	else
731 		debugfs_create_file_size(name, S_IFREG | 0444, root, ring,
732 					 &amdgpu_debugfs_ring_fops,
733 					 ring->ring_size + 12);
734 
735 	if (ring->mqd_obj) {
736 		sprintf(name, "amdgpu_mqd_%s", ring->name);
737 		debugfs_create_file_size(name, S_IFREG | 0444, root, ring,
738 					 &amdgpu_debugfs_mqd_fops,
739 					 ring->mqd_size);
740 	}
741 
742 	sprintf(name, "amdgpu_error_%s", ring->name);
743 	debugfs_create_file(name, 0200, root, ring,
744 			    &amdgpu_debugfs_error_fops);
745 
746 #endif
747 }
748 
749 /**
750  * amdgpu_ring_test_helper - tests ring and set sched readiness status
751  *
752  * @ring: ring to try the recovery on
753  *
754  * Tests ring and set sched readiness status
755  *
756  * Returns 0 on success, error on failure.
757  */
758 int amdgpu_ring_test_helper(struct amdgpu_ring *ring)
759 {
760 	struct amdgpu_device *adev = ring->adev;
761 	int r;
762 
763 	r = amdgpu_ring_test_ring(ring);
764 	if (r)
765 		DRM_DEV_ERROR(adev->dev, "ring %s test failed (%d)\n",
766 			      ring->name, r);
767 	else
768 		DRM_DEV_DEBUG(adev->dev, "ring test on %s succeeded\n",
769 			      ring->name);
770 
771 	ring->sched.ready = !r;
772 
773 	return r;
774 }
775 
776 static void amdgpu_ring_to_mqd_prop(struct amdgpu_ring *ring,
777 				    struct amdgpu_mqd_prop *prop)
778 {
779 	struct amdgpu_device *adev = ring->adev;
780 	bool is_high_prio_compute = ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE &&
781 				    amdgpu_gfx_is_high_priority_compute_queue(adev, ring);
782 	bool is_high_prio_gfx = ring->funcs->type == AMDGPU_RING_TYPE_GFX &&
783 				amdgpu_gfx_is_high_priority_graphics_queue(adev, ring);
784 
785 	memset(prop, 0, sizeof(*prop));
786 
787 	prop->mqd_gpu_addr = ring->mqd_gpu_addr;
788 	prop->hqd_base_gpu_addr = ring->gpu_addr;
789 	prop->rptr_gpu_addr = ring->rptr_gpu_addr;
790 	prop->wptr_gpu_addr = ring->wptr_gpu_addr;
791 	prop->queue_size = ring->ring_size;
792 	prop->eop_gpu_addr = ring->eop_gpu_addr;
793 	prop->use_doorbell = ring->use_doorbell;
794 	prop->doorbell_index = ring->doorbell_index;
795 	prop->kernel_queue = true;
796 
797 	/* map_queues packet doesn't need activate the queue,
798 	 * so only kiq need set this field.
799 	 */
800 	prop->hqd_active = ring->funcs->type == AMDGPU_RING_TYPE_KIQ;
801 
802 	prop->allow_tunneling = is_high_prio_compute;
803 	if (is_high_prio_compute || is_high_prio_gfx) {
804 		prop->hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_HIGH;
805 		prop->hqd_queue_priority = AMDGPU_GFX_QUEUE_PRIORITY_MAXIMUM;
806 	}
807 }
808 
809 int amdgpu_ring_init_mqd(struct amdgpu_ring *ring)
810 {
811 	struct amdgpu_device *adev = ring->adev;
812 	struct amdgpu_mqd *mqd_mgr;
813 	struct amdgpu_mqd_prop prop;
814 
815 	amdgpu_ring_to_mqd_prop(ring, &prop);
816 
817 	ring->wptr = 0;
818 
819 	if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
820 		mqd_mgr = &adev->mqds[AMDGPU_HW_IP_COMPUTE];
821 	else
822 		mqd_mgr = &adev->mqds[ring->funcs->type];
823 
824 	return mqd_mgr->init_mqd(adev, ring->mqd_ptr, &prop);
825 }
826 
827 void amdgpu_ring_ib_begin(struct amdgpu_ring *ring)
828 {
829 	if (ring->is_sw_ring)
830 		amdgpu_sw_ring_ib_begin(ring);
831 }
832 
833 void amdgpu_ring_ib_end(struct amdgpu_ring *ring)
834 {
835 	if (ring->is_sw_ring)
836 		amdgpu_sw_ring_ib_end(ring);
837 }
838 
839 void amdgpu_ring_ib_on_emit_cntl(struct amdgpu_ring *ring)
840 {
841 	if (ring->is_sw_ring)
842 		amdgpu_sw_ring_ib_mark_offset(ring, AMDGPU_MUX_OFFSET_TYPE_CONTROL);
843 }
844 
845 void amdgpu_ring_ib_on_emit_ce(struct amdgpu_ring *ring)
846 {
847 	if (ring->is_sw_ring)
848 		amdgpu_sw_ring_ib_mark_offset(ring, AMDGPU_MUX_OFFSET_TYPE_CE);
849 }
850 
851 void amdgpu_ring_ib_on_emit_de(struct amdgpu_ring *ring)
852 {
853 	if (ring->is_sw_ring)
854 		amdgpu_sw_ring_ib_mark_offset(ring, AMDGPU_MUX_OFFSET_TYPE_DE);
855 }
856 
857 bool amdgpu_ring_sched_ready(struct amdgpu_ring *ring)
858 {
859 	if (!ring)
860 		return false;
861 
862 	if (ring->no_scheduler || !drm_sched_wqueue_ready(&ring->sched))
863 		return false;
864 
865 	return true;
866 }
867 
868 void amdgpu_ring_reset_helper_begin(struct amdgpu_ring *ring,
869 				    struct amdgpu_fence *guilty_fence)
870 {
871 	/* Stop the scheduler to prevent anybody else from touching the ring buffer. */
872 	drm_sched_wqueue_stop(&ring->sched);
873 	/* back up the non-guilty commands */
874 	amdgpu_ring_backup_unprocessed_commands(ring, guilty_fence);
875 }
876 
877 int amdgpu_ring_reset_helper_end(struct amdgpu_ring *ring,
878 				 struct amdgpu_fence *guilty_fence)
879 {
880 	unsigned int i;
881 	int r;
882 
883 	/* verify that the ring is functional */
884 	r = amdgpu_ring_test_ring(ring);
885 	if (r)
886 		return r;
887 
888 	/* signal the guilty fence and set an error on all fences from the context */
889 	if (guilty_fence)
890 		amdgpu_fence_driver_guilty_force_completion(guilty_fence);
891 	/* Re-emit the non-guilty commands */
892 	if (ring->ring_backup_entries_to_copy) {
893 		amdgpu_ring_alloc_reemit(ring, ring->ring_backup_entries_to_copy);
894 		for (i = 0; i < ring->ring_backup_entries_to_copy; i++)
895 			amdgpu_ring_write(ring, ring->ring_backup[i]);
896 		amdgpu_ring_commit(ring);
897 	}
898 	/* Start the scheduler again */
899 	drm_sched_wqueue_start(&ring->sched);
900 	return 0;
901 }
902 
903 bool amdgpu_ring_is_reset_type_supported(struct amdgpu_ring *ring,
904 					 u32 reset_type)
905 {
906 	switch (ring->funcs->type) {
907 	case AMDGPU_RING_TYPE_GFX:
908 		if (ring->adev->gfx.gfx_supported_reset & reset_type)
909 			return true;
910 		break;
911 	case AMDGPU_RING_TYPE_COMPUTE:
912 		if (ring->adev->gfx.compute_supported_reset & reset_type)
913 			return true;
914 		break;
915 	case AMDGPU_RING_TYPE_SDMA:
916 		if (ring->adev->sdma.supported_reset & reset_type)
917 			return true;
918 		break;
919 	case AMDGPU_RING_TYPE_VCN_DEC:
920 	case AMDGPU_RING_TYPE_VCN_ENC:
921 		if (ring->adev->vcn.supported_reset & reset_type)
922 			return true;
923 		break;
924 	case AMDGPU_RING_TYPE_VCN_JPEG:
925 		if (ring->adev->jpeg.supported_reset & reset_type)
926 			return true;
927 		break;
928 	default:
929 		break;
930 	}
931 	return false;
932 }
933