xref: /linux/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c (revision face6a3615a649456eb4549f6d474221d877d604)
1 /*
2  * Copyright 2009 Jerome Glisse.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the
7  * "Software"), to deal in the Software without restriction, including
8  * without limitation the rights to use, copy, modify, merge, publish,
9  * distribute, sub license, and/or sell copies of the Software, and to
10  * permit persons to whom the Software is furnished to do so, subject to
11  * the following conditions:
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19  * USE OR OTHER DEALINGS IN THE SOFTWARE.
20  *
21  * The above copyright notice and this permission notice (including the
22  * next paragraph) shall be included in all copies or substantial portions
23  * of the Software.
24  *
25  */
26 /*
27  * Authors:
28  *    Jerome Glisse <glisse@freedesktop.org>
29  *    Dave Airlie
30  */
31 #include <linux/seq_file.h>
32 #include <linux/atomic.h>
33 #include <linux/wait.h>
34 #include <linux/kref.h>
35 #include <linux/slab.h>
36 #include <linux/firmware.h>
37 #include <linux/pm_runtime.h>
38 
39 #include <drm/drm_drv.h>
40 #include "amdgpu.h"
41 #include "amdgpu_trace.h"
42 #include "amdgpu_reset.h"
43 
44 /*
45  * Cast helper
46  */
47 static const struct dma_fence_ops amdgpu_fence_ops;
48 static inline struct amdgpu_fence *to_amdgpu_fence(struct dma_fence *f)
49 {
50 	struct amdgpu_fence *__f = container_of(f, struct amdgpu_fence, base);
51 
52 	return __f;
53 }
54 
55 /**
56  * amdgpu_fence_write - write a fence value
57  *
58  * @ring: ring the fence is associated with
59  * @seq: sequence number to write
60  *
61  * Writes a fence value to memory (all asics).
62  */
63 static void amdgpu_fence_write(struct amdgpu_ring *ring, u32 seq)
64 {
65 	struct amdgpu_fence_driver *drv = &ring->fence_drv;
66 
67 	if (drv->cpu_addr)
68 		*drv->cpu_addr = cpu_to_le32(seq);
69 }
70 
71 /**
72  * amdgpu_fence_read - read a fence value
73  *
74  * @ring: ring the fence is associated with
75  *
76  * Reads a fence value from memory (all asics).
77  * Returns the value of the fence read from memory.
78  */
79 static u32 amdgpu_fence_read(struct amdgpu_ring *ring)
80 {
81 	struct amdgpu_fence_driver *drv = &ring->fence_drv;
82 	u32 seq = 0;
83 
84 	if (drv->cpu_addr)
85 		seq = le32_to_cpu(*drv->cpu_addr);
86 	else
87 		seq = atomic_read(&drv->last_seq);
88 
89 	return seq;
90 }
91 
92 /**
93  * amdgpu_fence_emit - emit a fence on the requested ring
94  *
95  * @ring: ring the fence is associated with
96  * @af: amdgpu fence input
97  * @flags: flags to pass into the subordinate .emit_fence() call
98  *
99  * Emits a fence command on the requested ring (all asics).
100  * Returns 0 on success, -ENOMEM on failure.
101  */
102 int amdgpu_fence_emit(struct amdgpu_ring *ring, struct amdgpu_fence *af,
103 		      unsigned int flags)
104 {
105 	struct amdgpu_device *adev = ring->adev;
106 	struct dma_fence *fence;
107 	struct dma_fence __rcu **ptr;
108 	uint32_t seq;
109 	int r;
110 
111 	fence = &af->base;
112 	af->ring = ring;
113 
114 	seq = ++ring->fence_drv.sync_seq;
115 	dma_fence_init(fence, &amdgpu_fence_ops,
116 		       &ring->fence_drv.lock,
117 		       adev->fence_context + ring->idx, seq);
118 
119 	amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
120 			       seq, flags | AMDGPU_FENCE_FLAG_INT);
121 	amdgpu_fence_save_wptr(af);
122 	pm_runtime_get_noresume(adev_to_drm(adev)->dev);
123 	ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask];
124 	if (unlikely(rcu_dereference_protected(*ptr, 1))) {
125 		struct dma_fence *old;
126 
127 		rcu_read_lock();
128 		old = dma_fence_get_rcu_safe(ptr);
129 		rcu_read_unlock();
130 
131 		if (old) {
132 			r = dma_fence_wait(old, false);
133 			dma_fence_put(old);
134 			if (r)
135 				return r;
136 		}
137 	}
138 
139 	to_amdgpu_fence(fence)->start_timestamp = ktime_get();
140 
141 	/* This function can't be called concurrently anyway, otherwise
142 	 * emitting the fence would mess up the hardware ring buffer.
143 	 */
144 	rcu_assign_pointer(*ptr, dma_fence_get(fence));
145 
146 	return 0;
147 }
148 
149 /**
150  * amdgpu_fence_emit_polling - emit a fence on the requeste ring
151  *
152  * @ring: ring the fence is associated with
153  * @s: resulting sequence number
154  * @timeout: the timeout for waiting in usecs
155  *
156  * Emits a fence command on the requested ring (all asics).
157  * Used For polling fence.
158  * Returns 0 on success, -ENOMEM on failure.
159  */
160 int amdgpu_fence_emit_polling(struct amdgpu_ring *ring, uint32_t *s,
161 			      uint32_t timeout)
162 {
163 	uint32_t seq;
164 	signed long r;
165 
166 	if (!s)
167 		return -EINVAL;
168 
169 	seq = ++ring->fence_drv.sync_seq;
170 	r = amdgpu_fence_wait_polling(ring,
171 				      seq - ring->fence_drv.num_fences_mask,
172 				      timeout);
173 	if (r < 1)
174 		return -ETIMEDOUT;
175 
176 	amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
177 			       seq, 0);
178 
179 	*s = seq;
180 
181 	return 0;
182 }
183 
184 /**
185  * amdgpu_fence_schedule_fallback - schedule fallback check
186  *
187  * @ring: pointer to struct amdgpu_ring
188  *
189  * Start a timer as fallback to our interrupts.
190  */
191 static void amdgpu_fence_schedule_fallback(struct amdgpu_ring *ring)
192 {
193 	mod_timer(&ring->fence_drv.fallback_timer,
194 		  jiffies + AMDGPU_FENCE_JIFFIES_TIMEOUT);
195 }
196 
197 /**
198  * amdgpu_fence_process - check for fence activity
199  *
200  * @ring: pointer to struct amdgpu_ring
201  *
202  * Checks the current fence value and calculates the last
203  * signalled fence value. Wakes the fence queue if the
204  * sequence number has increased.
205  *
206  * Returns true if fence was processed
207  */
208 bool amdgpu_fence_process(struct amdgpu_ring *ring)
209 {
210 	struct amdgpu_fence_driver *drv = &ring->fence_drv;
211 	struct amdgpu_device *adev = ring->adev;
212 	uint32_t seq, last_seq;
213 
214 	do {
215 		last_seq = atomic_read(&ring->fence_drv.last_seq);
216 		seq = amdgpu_fence_read(ring);
217 
218 	} while (atomic_cmpxchg(&drv->last_seq, last_seq, seq) != last_seq);
219 
220 	if (timer_delete(&ring->fence_drv.fallback_timer) &&
221 	    seq != ring->fence_drv.sync_seq)
222 		amdgpu_fence_schedule_fallback(ring);
223 
224 	if (unlikely(seq == last_seq))
225 		return false;
226 
227 	last_seq &= drv->num_fences_mask;
228 	seq &= drv->num_fences_mask;
229 
230 	do {
231 		struct dma_fence *fence, **ptr;
232 		struct amdgpu_fence *am_fence;
233 
234 		++last_seq;
235 		last_seq &= drv->num_fences_mask;
236 		ptr = &drv->fences[last_seq];
237 
238 		/* There is always exactly one thread signaling this fence slot */
239 		fence = rcu_dereference_protected(*ptr, 1);
240 		RCU_INIT_POINTER(*ptr, NULL);
241 
242 		if (!fence)
243 			continue;
244 
245 		/* Save the wptr in the fence driver so we know what the last processed
246 		 * wptr was.  This is required for re-emitting the ring state for
247 		 * queues that are reset but are not guilty and thus have no guilty fence.
248 		 */
249 		am_fence = container_of(fence, struct amdgpu_fence, base);
250 		drv->signalled_wptr = am_fence->wptr;
251 		dma_fence_signal(fence);
252 		dma_fence_put(fence);
253 		pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
254 	} while (last_seq != seq);
255 
256 	return true;
257 }
258 
259 /**
260  * amdgpu_fence_fallback - fallback for hardware interrupts
261  *
262  * @t: timer context used to obtain the pointer to ring structure
263  *
264  * Checks for fence activity.
265  */
266 static void amdgpu_fence_fallback(struct timer_list *t)
267 {
268 	struct amdgpu_ring *ring = timer_container_of(ring, t,
269 						      fence_drv.fallback_timer);
270 
271 	if (amdgpu_fence_process(ring))
272 		dev_warn(ring->adev->dev,
273 			 "Fence fallback timer expired on ring %s\n",
274 			 ring->name);
275 }
276 
277 /**
278  * amdgpu_fence_wait_empty - wait for all fences to signal
279  *
280  * @ring: ring index the fence is associated with
281  *
282  * Wait for all fences on the requested ring to signal (all asics).
283  * Returns 0 if the fences have passed, error for all other cases.
284  */
285 int amdgpu_fence_wait_empty(struct amdgpu_ring *ring)
286 {
287 	uint64_t seq = READ_ONCE(ring->fence_drv.sync_seq);
288 	struct dma_fence *fence, **ptr;
289 	int r;
290 
291 	if (!seq)
292 		return 0;
293 
294 	ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask];
295 	rcu_read_lock();
296 	fence = rcu_dereference(*ptr);
297 	if (!fence || !dma_fence_get_rcu(fence)) {
298 		rcu_read_unlock();
299 		return 0;
300 	}
301 	rcu_read_unlock();
302 
303 	r = dma_fence_wait(fence, false);
304 	dma_fence_put(fence);
305 	return r;
306 }
307 
308 /**
309  * amdgpu_fence_wait_polling - busy wait for givn sequence number
310  *
311  * @ring: ring index the fence is associated with
312  * @wait_seq: sequence number to wait
313  * @timeout: the timeout for waiting in usecs
314  *
315  * Wait for all fences on the requested ring to signal (all asics).
316  * Returns left time if no timeout, 0 or minus if timeout.
317  */
318 signed long amdgpu_fence_wait_polling(struct amdgpu_ring *ring,
319 				      uint32_t wait_seq,
320 				      signed long timeout)
321 {
322 
323 	while ((int32_t)(wait_seq - amdgpu_fence_read(ring)) > 0 && timeout > 0) {
324 		udelay(2);
325 		timeout -= 2;
326 	}
327 	return timeout > 0 ? timeout : 0;
328 }
329 /**
330  * amdgpu_fence_count_emitted - get the count of emitted fences
331  *
332  * @ring: ring the fence is associated with
333  *
334  * Get the number of fences emitted on the requested ring (all asics).
335  * Returns the number of emitted fences on the ring.  Used by the
336  * dynpm code to ring track activity.
337  */
338 unsigned int amdgpu_fence_count_emitted(struct amdgpu_ring *ring)
339 {
340 	uint64_t emitted;
341 
342 	/* We are not protected by ring lock when reading the last sequence
343 	 * but it's ok to report slightly wrong fence count here.
344 	 */
345 	emitted = 0x100000000ull;
346 	emitted -= atomic_read(&ring->fence_drv.last_seq);
347 	emitted += READ_ONCE(ring->fence_drv.sync_seq);
348 	return lower_32_bits(emitted);
349 }
350 
351 /**
352  * amdgpu_fence_last_unsignaled_time_us - the time fence emitted until now
353  * @ring: ring the fence is associated with
354  *
355  * Find the earliest fence unsignaled until now, calculate the time delta
356  * between the time fence emitted and now.
357  */
358 u64 amdgpu_fence_last_unsignaled_time_us(struct amdgpu_ring *ring)
359 {
360 	struct amdgpu_fence_driver *drv = &ring->fence_drv;
361 	struct dma_fence *fence;
362 	uint32_t last_seq, sync_seq;
363 
364 	last_seq = atomic_read(&ring->fence_drv.last_seq);
365 	sync_seq = READ_ONCE(ring->fence_drv.sync_seq);
366 	if (last_seq == sync_seq)
367 		return 0;
368 
369 	++last_seq;
370 	last_seq &= drv->num_fences_mask;
371 	fence = drv->fences[last_seq];
372 	if (!fence)
373 		return 0;
374 
375 	return ktime_us_delta(ktime_get(),
376 		to_amdgpu_fence(fence)->start_timestamp);
377 }
378 
379 /**
380  * amdgpu_fence_update_start_timestamp - update the timestamp of the fence
381  * @ring: ring the fence is associated with
382  * @seq: the fence seq number to update.
383  * @timestamp: the start timestamp to update.
384  *
385  * The function called at the time the fence and related ib is about to
386  * resubmit to gpu in MCBP scenario. Thus we do not consider race condition
387  * with amdgpu_fence_process to modify the same fence.
388  */
389 void amdgpu_fence_update_start_timestamp(struct amdgpu_ring *ring, uint32_t seq, ktime_t timestamp)
390 {
391 	struct amdgpu_fence_driver *drv = &ring->fence_drv;
392 	struct dma_fence *fence;
393 
394 	seq &= drv->num_fences_mask;
395 	fence = drv->fences[seq];
396 	if (!fence)
397 		return;
398 
399 	to_amdgpu_fence(fence)->start_timestamp = timestamp;
400 }
401 
402 /**
403  * amdgpu_fence_driver_start_ring - make the fence driver
404  * ready for use on the requested ring.
405  *
406  * @ring: ring to start the fence driver on
407  * @irq_src: interrupt source to use for this ring
408  * @irq_type: interrupt type to use for this ring
409  *
410  * Make the fence driver ready for processing (all asics).
411  * Not all asics have all rings, so each asic will only
412  * start the fence driver on the rings it has.
413  * Returns 0 for success, errors for failure.
414  */
415 int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
416 				   struct amdgpu_irq_src *irq_src,
417 				   unsigned int irq_type)
418 {
419 	struct amdgpu_device *adev = ring->adev;
420 	uint64_t index;
421 
422 	if (ring->funcs->type != AMDGPU_RING_TYPE_UVD) {
423 		ring->fence_drv.cpu_addr = ring->fence_cpu_addr;
424 		ring->fence_drv.gpu_addr = ring->fence_gpu_addr;
425 	} else {
426 		/* put fence directly behind firmware */
427 		index = ALIGN(adev->uvd.fw->size, 8);
428 		ring->fence_drv.cpu_addr = adev->uvd.inst[ring->me].cpu_addr + index;
429 		ring->fence_drv.gpu_addr = adev->uvd.inst[ring->me].gpu_addr + index;
430 	}
431 	amdgpu_fence_write(ring, atomic_read(&ring->fence_drv.last_seq));
432 
433 	ring->fence_drv.irq_src = irq_src;
434 	ring->fence_drv.irq_type = irq_type;
435 	ring->fence_drv.initialized = true;
436 
437 	DRM_DEV_DEBUG(adev->dev, "fence driver on ring %s use gpu addr 0x%016llx\n",
438 		      ring->name, ring->fence_drv.gpu_addr);
439 	return 0;
440 }
441 
442 /**
443  * amdgpu_fence_driver_init_ring - init the fence driver
444  * for the requested ring.
445  *
446  * @ring: ring to init the fence driver on
447  *
448  * Init the fence driver for the requested ring (all asics).
449  * Helper function for amdgpu_fence_driver_init().
450  */
451 int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring)
452 {
453 	struct amdgpu_device *adev = ring->adev;
454 
455 	if (!adev)
456 		return -EINVAL;
457 
458 	if (!is_power_of_2(ring->num_hw_submission))
459 		return -EINVAL;
460 
461 	ring->fence_drv.cpu_addr = NULL;
462 	ring->fence_drv.gpu_addr = 0;
463 	ring->fence_drv.sync_seq = 0;
464 	atomic_set(&ring->fence_drv.last_seq, 0);
465 	ring->fence_drv.initialized = false;
466 
467 	timer_setup(&ring->fence_drv.fallback_timer, amdgpu_fence_fallback, 0);
468 
469 	ring->fence_drv.num_fences_mask = ring->num_hw_submission * 2 - 1;
470 	spin_lock_init(&ring->fence_drv.lock);
471 	ring->fence_drv.fences = kcalloc(ring->num_hw_submission * 2, sizeof(void *),
472 					 GFP_KERNEL);
473 
474 	if (!ring->fence_drv.fences)
475 		return -ENOMEM;
476 
477 	return 0;
478 }
479 
480 /**
481  * amdgpu_fence_driver_sw_init - init the fence driver
482  * for all possible rings.
483  *
484  * @adev: amdgpu device pointer
485  *
486  * Init the fence driver for all possible rings (all asics).
487  * Not all asics have all rings, so each asic will only
488  * start the fence driver on the rings it has using
489  * amdgpu_fence_driver_start_ring().
490  * Returns 0 for success.
491  */
492 int amdgpu_fence_driver_sw_init(struct amdgpu_device *adev)
493 {
494 	return 0;
495 }
496 
497 /**
498  * amdgpu_fence_need_ring_interrupt_restore - helper function to check whether
499  * fence driver interrupts need to be restored.
500  *
501  * @ring: ring that to be checked
502  *
503  * Interrupts for rings that belong to GFX IP don't need to be restored
504  * when the target power state is s0ix.
505  *
506  * Return true if need to restore interrupts, false otherwise.
507  */
508 static bool amdgpu_fence_need_ring_interrupt_restore(struct amdgpu_ring *ring)
509 {
510 	struct amdgpu_device *adev = ring->adev;
511 	bool is_gfx_power_domain = false;
512 
513 	switch (ring->funcs->type) {
514 	case AMDGPU_RING_TYPE_SDMA:
515 	/* SDMA 5.x+ is part of GFX power domain so it's covered by GFXOFF */
516 		if (amdgpu_ip_version(adev, SDMA0_HWIP, 0) >=
517 		    IP_VERSION(5, 0, 0))
518 			is_gfx_power_domain = true;
519 		break;
520 	case AMDGPU_RING_TYPE_GFX:
521 	case AMDGPU_RING_TYPE_COMPUTE:
522 	case AMDGPU_RING_TYPE_KIQ:
523 	case AMDGPU_RING_TYPE_MES:
524 		is_gfx_power_domain = true;
525 		break;
526 	default:
527 		break;
528 	}
529 
530 	return !(adev->in_s0ix && is_gfx_power_domain);
531 }
532 
533 /**
534  * amdgpu_fence_driver_hw_fini - tear down the fence driver
535  * for all possible rings.
536  *
537  * @adev: amdgpu device pointer
538  *
539  * Tear down the fence driver for all possible rings (all asics).
540  */
541 void amdgpu_fence_driver_hw_fini(struct amdgpu_device *adev)
542 {
543 	int i, r;
544 
545 	for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
546 		struct amdgpu_ring *ring = adev->rings[i];
547 
548 		if (!ring || !ring->fence_drv.initialized)
549 			continue;
550 
551 		/* You can't wait for HW to signal if it's gone */
552 		if (!drm_dev_is_unplugged(adev_to_drm(adev)))
553 			r = amdgpu_fence_wait_empty(ring);
554 		else
555 			r = -ENODEV;
556 		/* no need to trigger GPU reset as we are unloading */
557 		if (r)
558 			amdgpu_fence_driver_force_completion(ring);
559 
560 		if (!drm_dev_is_unplugged(adev_to_drm(adev)) &&
561 		    ring->fence_drv.irq_src &&
562 		    amdgpu_fence_need_ring_interrupt_restore(ring))
563 			amdgpu_irq_put(adev, ring->fence_drv.irq_src,
564 				       ring->fence_drv.irq_type);
565 
566 		timer_delete_sync(&ring->fence_drv.fallback_timer);
567 	}
568 }
569 
570 /* Will either stop and flush handlers for amdgpu interrupt or reanble it */
571 void amdgpu_fence_driver_isr_toggle(struct amdgpu_device *adev, bool stop)
572 {
573 	int i;
574 
575 	for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
576 		struct amdgpu_ring *ring = adev->rings[i];
577 
578 		if (!ring || !ring->fence_drv.initialized || !ring->fence_drv.irq_src)
579 			continue;
580 
581 		if (stop)
582 			disable_irq(adev->irq.irq);
583 		else
584 			enable_irq(adev->irq.irq);
585 	}
586 }
587 
588 void amdgpu_fence_driver_sw_fini(struct amdgpu_device *adev)
589 {
590 	unsigned int i, j;
591 
592 	for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
593 		struct amdgpu_ring *ring = adev->rings[i];
594 
595 		if (!ring || !ring->fence_drv.initialized)
596 			continue;
597 
598 		/*
599 		 * Notice we check for sched.ops since there's some
600 		 * override on the meaning of sched.ready by amdgpu.
601 		 * The natural check would be sched.ready, which is
602 		 * set as drm_sched_init() finishes...
603 		 */
604 		if (ring->sched.ops)
605 			drm_sched_fini(&ring->sched);
606 
607 		for (j = 0; j <= ring->fence_drv.num_fences_mask; ++j)
608 			dma_fence_put(ring->fence_drv.fences[j]);
609 		kfree(ring->fence_drv.fences);
610 		ring->fence_drv.fences = NULL;
611 		ring->fence_drv.initialized = false;
612 	}
613 }
614 
615 /**
616  * amdgpu_fence_driver_hw_init - enable the fence driver
617  * for all possible rings.
618  *
619  * @adev: amdgpu device pointer
620  *
621  * Enable the fence driver for all possible rings (all asics).
622  * Not all asics have all rings, so each asic will only
623  * start the fence driver on the rings it has using
624  * amdgpu_fence_driver_start_ring().
625  * Returns 0 for success.
626  */
627 void amdgpu_fence_driver_hw_init(struct amdgpu_device *adev)
628 {
629 	int i;
630 
631 	for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
632 		struct amdgpu_ring *ring = adev->rings[i];
633 
634 		if (!ring || !ring->fence_drv.initialized)
635 			continue;
636 
637 		/* enable the interrupt */
638 		if (ring->fence_drv.irq_src &&
639 		    amdgpu_fence_need_ring_interrupt_restore(ring))
640 			amdgpu_irq_get(adev, ring->fence_drv.irq_src,
641 				       ring->fence_drv.irq_type);
642 	}
643 }
644 
645 /**
646  * amdgpu_fence_driver_set_error - set error code on fences
647  * @ring: the ring which contains the fences
648  * @error: the error code to set
649  *
650  * Set an error code to all the fences pending on the ring.
651  */
652 void amdgpu_fence_driver_set_error(struct amdgpu_ring *ring, int error)
653 {
654 	struct amdgpu_fence_driver *drv = &ring->fence_drv;
655 	unsigned long flags;
656 
657 	spin_lock_irqsave(&drv->lock, flags);
658 	for (unsigned int i = 0; i <= drv->num_fences_mask; ++i) {
659 		struct dma_fence *fence;
660 
661 		fence = rcu_dereference_protected(drv->fences[i],
662 						  lockdep_is_held(&drv->lock));
663 		if (fence && !dma_fence_is_signaled_locked(fence))
664 			dma_fence_set_error(fence, error);
665 	}
666 	spin_unlock_irqrestore(&drv->lock, flags);
667 }
668 
669 /**
670  * amdgpu_fence_driver_force_completion - force signal latest fence of ring
671  *
672  * @ring: fence of the ring to signal
673  *
674  */
675 void amdgpu_fence_driver_force_completion(struct amdgpu_ring *ring)
676 {
677 	amdgpu_fence_driver_set_error(ring, -ECANCELED);
678 	amdgpu_fence_write(ring, ring->fence_drv.sync_seq);
679 	amdgpu_fence_process(ring);
680 }
681 
682 
683 /*
684  * Kernel queue reset handling
685  *
686  * The driver can reset individual queues for most engines, but those queues
687  * may contain work from multiple contexts.  Resetting the queue will reset
688  * lose all of that state.  In order to minimize the collateral damage, the
689  * driver will save the ring contents which are not associated with the guilty
690  * context prior to resetting the queue.  After resetting the queue the queue
691  * contents from the other contexts is re-emitted to the rings so that it can
692  * be processed by the engine.  To handle this, we save the queue's write
693  * pointer (wptr) in the fences associated with each context.  If we get a
694  * queue timeout, we can then use the wptrs from the fences to determine
695  * which data needs to be saved out of the queue's ring buffer.
696  */
697 
698 /**
699  * amdgpu_fence_driver_guilty_force_completion - force signal of specified sequence
700  *
701  * @af: fence of the ring to signal
702  *
703  */
704 void amdgpu_fence_driver_guilty_force_completion(struct amdgpu_fence *af)
705 {
706 	struct dma_fence *unprocessed;
707 	struct dma_fence __rcu **ptr;
708 	struct amdgpu_fence *fence;
709 	struct amdgpu_ring *ring = af->ring;
710 	unsigned long flags;
711 	u32 seq, last_seq;
712 
713 	last_seq = amdgpu_fence_read(ring) & ring->fence_drv.num_fences_mask;
714 	seq = ring->fence_drv.sync_seq & ring->fence_drv.num_fences_mask;
715 
716 	/* mark all fences from the guilty context with an error */
717 	spin_lock_irqsave(&ring->fence_drv.lock, flags);
718 	do {
719 		last_seq++;
720 		last_seq &= ring->fence_drv.num_fences_mask;
721 
722 		ptr = &ring->fence_drv.fences[last_seq];
723 		rcu_read_lock();
724 		unprocessed = rcu_dereference(*ptr);
725 
726 		if (unprocessed && !dma_fence_is_signaled_locked(unprocessed)) {
727 			fence = container_of(unprocessed, struct amdgpu_fence, base);
728 
729 			if (fence == af)
730 				dma_fence_set_error(&fence->base, -ETIME);
731 			else if (fence->context == af->context)
732 				dma_fence_set_error(&fence->base, -ECANCELED);
733 		}
734 		rcu_read_unlock();
735 	} while (last_seq != seq);
736 	spin_unlock_irqrestore(&ring->fence_drv.lock, flags);
737 	/* signal the guilty fence */
738 	amdgpu_fence_write(ring, (u32)af->base.seqno);
739 	amdgpu_fence_process(ring);
740 }
741 
742 void amdgpu_fence_save_wptr(struct amdgpu_fence *af)
743 {
744 	af->wptr = af->ring->wptr;
745 }
746 
747 static void amdgpu_ring_backup_unprocessed_command(struct amdgpu_ring *ring,
748 						   u64 start_wptr, u32 end_wptr)
749 {
750 	unsigned int first_idx = start_wptr & ring->buf_mask;
751 	unsigned int last_idx = end_wptr & ring->buf_mask;
752 	unsigned int i;
753 
754 	/* Backup the contents of the ring buffer. */
755 	for (i = first_idx; i != last_idx; ++i, i &= ring->buf_mask)
756 		ring->ring_backup[ring->ring_backup_entries_to_copy++] = ring->ring[i];
757 }
758 
759 void amdgpu_ring_backup_unprocessed_commands(struct amdgpu_ring *ring,
760 					     struct amdgpu_fence *guilty_fence)
761 {
762 	struct dma_fence *unprocessed;
763 	struct dma_fence __rcu **ptr;
764 	struct amdgpu_fence *fence;
765 	u64 wptr;
766 	u32 seq, last_seq;
767 
768 	last_seq = amdgpu_fence_read(ring) & ring->fence_drv.num_fences_mask;
769 	seq = ring->fence_drv.sync_seq & ring->fence_drv.num_fences_mask;
770 	wptr = ring->fence_drv.signalled_wptr;
771 	ring->ring_backup_entries_to_copy = 0;
772 
773 	do {
774 		last_seq++;
775 		last_seq &= ring->fence_drv.num_fences_mask;
776 
777 		ptr = &ring->fence_drv.fences[last_seq];
778 		rcu_read_lock();
779 		unprocessed = rcu_dereference(*ptr);
780 
781 		if (unprocessed && !dma_fence_is_signaled(unprocessed)) {
782 			fence = container_of(unprocessed, struct amdgpu_fence, base);
783 
784 			/* save everything if the ring is not guilty, otherwise
785 			 * just save the content from other contexts.
786 			 */
787 			if (!guilty_fence || (fence->context != guilty_fence->context))
788 				amdgpu_ring_backup_unprocessed_command(ring, wptr,
789 								       fence->wptr);
790 			wptr = fence->wptr;
791 		}
792 		rcu_read_unlock();
793 	} while (last_seq != seq);
794 }
795 
796 /*
797  * Common fence implementation
798  */
799 
800 static const char *amdgpu_fence_get_driver_name(struct dma_fence *fence)
801 {
802 	return "amdgpu";
803 }
804 
805 static const char *amdgpu_fence_get_timeline_name(struct dma_fence *f)
806 {
807 	return (const char *)to_amdgpu_fence(f)->ring->name;
808 }
809 
810 /**
811  * amdgpu_fence_enable_signaling - enable signalling on fence
812  * @f: fence
813  *
814  * This function is called with fence_queue lock held, and adds a callback
815  * to fence_queue that checks if this fence is signaled, and if so it
816  * signals the fence and removes itself.
817  */
818 static bool amdgpu_fence_enable_signaling(struct dma_fence *f)
819 {
820 	if (!timer_pending(&to_amdgpu_fence(f)->ring->fence_drv.fallback_timer))
821 		amdgpu_fence_schedule_fallback(to_amdgpu_fence(f)->ring);
822 
823 	return true;
824 }
825 
826 /**
827  * amdgpu_fence_free - free up the fence memory
828  *
829  * @rcu: RCU callback head
830  *
831  * Free up the fence memory after the RCU grace period.
832  */
833 static void amdgpu_fence_free(struct rcu_head *rcu)
834 {
835 	struct dma_fence *f = container_of(rcu, struct dma_fence, rcu);
836 
837 	/* free fence_slab if it's separated fence*/
838 	kfree(to_amdgpu_fence(f));
839 }
840 
841 /**
842  * amdgpu_fence_release - callback that fence can be freed
843  *
844  * @f: fence
845  *
846  * This function is called when the reference count becomes zero.
847  * It just RCU schedules freeing up the fence.
848  */
849 static void amdgpu_fence_release(struct dma_fence *f)
850 {
851 	call_rcu(&f->rcu, amdgpu_fence_free);
852 }
853 
854 static const struct dma_fence_ops amdgpu_fence_ops = {
855 	.get_driver_name = amdgpu_fence_get_driver_name,
856 	.get_timeline_name = amdgpu_fence_get_timeline_name,
857 	.enable_signaling = amdgpu_fence_enable_signaling,
858 	.release = amdgpu_fence_release,
859 };
860 
861 /*
862  * Fence debugfs
863  */
864 #if defined(CONFIG_DEBUG_FS)
865 static int amdgpu_debugfs_fence_info_show(struct seq_file *m, void *unused)
866 {
867 	struct amdgpu_device *adev = m->private;
868 	int i;
869 
870 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
871 		struct amdgpu_ring *ring = adev->rings[i];
872 
873 		if (!ring || !ring->fence_drv.initialized)
874 			continue;
875 
876 		amdgpu_fence_process(ring);
877 
878 		seq_printf(m, "--- ring %d (%s) ---\n", i, ring->name);
879 		seq_printf(m, "Last signaled fence          0x%08x\n",
880 			   atomic_read(&ring->fence_drv.last_seq));
881 		seq_printf(m, "Last emitted                 0x%08x\n",
882 			   ring->fence_drv.sync_seq);
883 
884 		if (ring->funcs->type == AMDGPU_RING_TYPE_GFX ||
885 		    ring->funcs->type == AMDGPU_RING_TYPE_SDMA) {
886 			seq_printf(m, "Last signaled trailing fence 0x%08x\n",
887 				   le32_to_cpu(*ring->trail_fence_cpu_addr));
888 			seq_printf(m, "Last emitted                 0x%08x\n",
889 				   ring->trail_seq);
890 		}
891 
892 		if (ring->funcs->type != AMDGPU_RING_TYPE_GFX)
893 			continue;
894 
895 		/* set in CP_VMID_PREEMPT and preemption occurred */
896 		seq_printf(m, "Last preempted               0x%08x\n",
897 			   le32_to_cpu(*(ring->fence_drv.cpu_addr + 2)));
898 		/* set in CP_VMID_RESET and reset occurred */
899 		seq_printf(m, "Last reset                   0x%08x\n",
900 			   le32_to_cpu(*(ring->fence_drv.cpu_addr + 4)));
901 		/* Both preemption and reset occurred */
902 		seq_printf(m, "Last both                    0x%08x\n",
903 			   le32_to_cpu(*(ring->fence_drv.cpu_addr + 6)));
904 	}
905 	return 0;
906 }
907 
908 /*
909  * amdgpu_debugfs_gpu_recover - manually trigger a gpu reset & recover
910  *
911  * Manually trigger a gpu reset at the next fence wait.
912  */
913 static int gpu_recover_get(void *data, u64 *val)
914 {
915 	struct amdgpu_device *adev = (struct amdgpu_device *)data;
916 	struct drm_device *dev = adev_to_drm(adev);
917 	int r;
918 
919 	r = pm_runtime_get_sync(dev->dev);
920 	if (r < 0) {
921 		pm_runtime_put_autosuspend(dev->dev);
922 		return 0;
923 	}
924 
925 	if (amdgpu_reset_domain_schedule(adev->reset_domain, &adev->reset_work))
926 		flush_work(&adev->reset_work);
927 
928 	*val = atomic_read(&adev->reset_domain->reset_res);
929 
930 	pm_runtime_put_autosuspend(dev->dev);
931 
932 	return 0;
933 }
934 
935 DEFINE_SHOW_ATTRIBUTE(amdgpu_debugfs_fence_info);
936 DEFINE_DEBUGFS_ATTRIBUTE(amdgpu_debugfs_gpu_recover_fops, gpu_recover_get, NULL,
937 			 "%lld\n");
938 
939 static void amdgpu_debugfs_reset_work(struct work_struct *work)
940 {
941 	struct amdgpu_device *adev = container_of(work, struct amdgpu_device,
942 						  reset_work);
943 
944 	struct amdgpu_reset_context reset_context;
945 
946 	memset(&reset_context, 0, sizeof(reset_context));
947 
948 	reset_context.method = AMD_RESET_METHOD_NONE;
949 	reset_context.reset_req_dev = adev;
950 	reset_context.src = AMDGPU_RESET_SRC_USER;
951 	set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
952 	set_bit(AMDGPU_SKIP_COREDUMP, &reset_context.flags);
953 
954 	amdgpu_device_gpu_recover(adev, NULL, &reset_context);
955 }
956 
957 #endif
958 
959 void amdgpu_debugfs_fence_init(struct amdgpu_device *adev)
960 {
961 #if defined(CONFIG_DEBUG_FS)
962 	struct drm_minor *minor = adev_to_drm(adev)->primary;
963 	struct dentry *root = minor->debugfs_root;
964 
965 	debugfs_create_file("amdgpu_fence_info", 0444, root, adev,
966 			    &amdgpu_debugfs_fence_info_fops);
967 
968 	if (!amdgpu_sriov_vf(adev)) {
969 
970 		INIT_WORK(&adev->reset_work, amdgpu_debugfs_reset_work);
971 		debugfs_create_file("amdgpu_gpu_recover", 0444, root, adev,
972 				    &amdgpu_debugfs_gpu_recover_fops);
973 	}
974 #endif
975 }
976 
977