xref: /linux/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c (revision 07fdad3a93756b872da7b53647715c48d0f4a2d0)
1 /*
2  * Copyright 2009 Jerome Glisse.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the
7  * "Software"), to deal in the Software without restriction, including
8  * without limitation the rights to use, copy, modify, merge, publish,
9  * distribute, sub license, and/or sell copies of the Software, and to
10  * permit persons to whom the Software is furnished to do so, subject to
11  * the following conditions:
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19  * USE OR OTHER DEALINGS IN THE SOFTWARE.
20  *
21  * The above copyright notice and this permission notice (including the
22  * next paragraph) shall be included in all copies or substantial portions
23  * of the Software.
24  *
25  */
26 /*
27  * Authors:
28  *    Jerome Glisse <glisse@freedesktop.org>
29  *    Dave Airlie
30  */
31 #include <linux/seq_file.h>
32 #include <linux/atomic.h>
33 #include <linux/wait.h>
34 #include <linux/kref.h>
35 #include <linux/slab.h>
36 #include <linux/firmware.h>
37 #include <linux/pm_runtime.h>
38 
39 #include <drm/drm_drv.h>
40 #include "amdgpu.h"
41 #include "amdgpu_trace.h"
42 #include "amdgpu_reset.h"
43 
44 /*
45  * Cast helper
46  */
47 static const struct dma_fence_ops amdgpu_fence_ops;
48 static const struct dma_fence_ops amdgpu_job_fence_ops;
49 static inline struct amdgpu_fence *to_amdgpu_fence(struct dma_fence *f)
50 {
51 	struct amdgpu_fence *__f = container_of(f, struct amdgpu_fence, base);
52 
53 	if (__f->base.ops == &amdgpu_fence_ops ||
54 	    __f->base.ops == &amdgpu_job_fence_ops)
55 		return __f;
56 
57 	return NULL;
58 }
59 
60 /**
61  * amdgpu_fence_write - write a fence value
62  *
63  * @ring: ring the fence is associated with
64  * @seq: sequence number to write
65  *
66  * Writes a fence value to memory (all asics).
67  */
68 static void amdgpu_fence_write(struct amdgpu_ring *ring, u32 seq)
69 {
70 	struct amdgpu_fence_driver *drv = &ring->fence_drv;
71 
72 	if (drv->cpu_addr)
73 		*drv->cpu_addr = cpu_to_le32(seq);
74 }
75 
76 /**
77  * amdgpu_fence_read - read a fence value
78  *
79  * @ring: ring the fence is associated with
80  *
81  * Reads a fence value from memory (all asics).
82  * Returns the value of the fence read from memory.
83  */
84 static u32 amdgpu_fence_read(struct amdgpu_ring *ring)
85 {
86 	struct amdgpu_fence_driver *drv = &ring->fence_drv;
87 	u32 seq = 0;
88 
89 	if (drv->cpu_addr)
90 		seq = le32_to_cpu(*drv->cpu_addr);
91 	else
92 		seq = atomic_read(&drv->last_seq);
93 
94 	return seq;
95 }
96 
97 /**
98  * amdgpu_fence_emit - emit a fence on the requested ring
99  *
100  * @ring: ring the fence is associated with
101  * @f: resulting fence object
102  * @af: amdgpu fence input
103  * @flags: flags to pass into the subordinate .emit_fence() call
104  *
105  * Emits a fence command on the requested ring (all asics).
106  * Returns 0 on success, -ENOMEM on failure.
107  */
108 int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f,
109 		      struct amdgpu_fence *af, unsigned int flags)
110 {
111 	struct amdgpu_device *adev = ring->adev;
112 	struct dma_fence *fence;
113 	struct amdgpu_fence *am_fence;
114 	struct dma_fence __rcu **ptr;
115 	uint32_t seq;
116 	int r;
117 
118 	if (!af) {
119 		/* create a separate hw fence */
120 		am_fence = kzalloc(sizeof(*am_fence), GFP_KERNEL);
121 		if (!am_fence)
122 			return -ENOMEM;
123 	} else {
124 		am_fence = af;
125 	}
126 	fence = &am_fence->base;
127 	am_fence->ring = ring;
128 
129 	seq = ++ring->fence_drv.sync_seq;
130 	am_fence->seq = seq;
131 	if (af) {
132 		dma_fence_init(fence, &amdgpu_job_fence_ops,
133 			       &ring->fence_drv.lock,
134 			       adev->fence_context + ring->idx, seq);
135 		/* Against remove in amdgpu_job_{free, free_cb} */
136 		dma_fence_get(fence);
137 	} else {
138 		dma_fence_init(fence, &amdgpu_fence_ops,
139 			       &ring->fence_drv.lock,
140 			       adev->fence_context + ring->idx, seq);
141 	}
142 
143 	amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
144 			       seq, flags | AMDGPU_FENCE_FLAG_INT);
145 	amdgpu_fence_save_wptr(fence);
146 	pm_runtime_get_noresume(adev_to_drm(adev)->dev);
147 	ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask];
148 	if (unlikely(rcu_dereference_protected(*ptr, 1))) {
149 		struct dma_fence *old;
150 
151 		rcu_read_lock();
152 		old = dma_fence_get_rcu_safe(ptr);
153 		rcu_read_unlock();
154 
155 		if (old) {
156 			r = dma_fence_wait(old, false);
157 			dma_fence_put(old);
158 			if (r)
159 				return r;
160 		}
161 	}
162 
163 	to_amdgpu_fence(fence)->start_timestamp = ktime_get();
164 
165 	/* This function can't be called concurrently anyway, otherwise
166 	 * emitting the fence would mess up the hardware ring buffer.
167 	 */
168 	rcu_assign_pointer(*ptr, dma_fence_get(fence));
169 
170 	*f = fence;
171 
172 	return 0;
173 }
174 
175 /**
176  * amdgpu_fence_emit_polling - emit a fence on the requeste ring
177  *
178  * @ring: ring the fence is associated with
179  * @s: resulting sequence number
180  * @timeout: the timeout for waiting in usecs
181  *
182  * Emits a fence command on the requested ring (all asics).
183  * Used For polling fence.
184  * Returns 0 on success, -ENOMEM on failure.
185  */
186 int amdgpu_fence_emit_polling(struct amdgpu_ring *ring, uint32_t *s,
187 			      uint32_t timeout)
188 {
189 	uint32_t seq;
190 	signed long r;
191 
192 	if (!s)
193 		return -EINVAL;
194 
195 	seq = ++ring->fence_drv.sync_seq;
196 	r = amdgpu_fence_wait_polling(ring,
197 				      seq - ring->fence_drv.num_fences_mask,
198 				      timeout);
199 	if (r < 1)
200 		return -ETIMEDOUT;
201 
202 	amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
203 			       seq, 0);
204 
205 	*s = seq;
206 
207 	return 0;
208 }
209 
210 /**
211  * amdgpu_fence_schedule_fallback - schedule fallback check
212  *
213  * @ring: pointer to struct amdgpu_ring
214  *
215  * Start a timer as fallback to our interrupts.
216  */
217 static void amdgpu_fence_schedule_fallback(struct amdgpu_ring *ring)
218 {
219 	mod_timer(&ring->fence_drv.fallback_timer,
220 		  jiffies + AMDGPU_FENCE_JIFFIES_TIMEOUT);
221 }
222 
223 /**
224  * amdgpu_fence_process - check for fence activity
225  *
226  * @ring: pointer to struct amdgpu_ring
227  *
228  * Checks the current fence value and calculates the last
229  * signalled fence value. Wakes the fence queue if the
230  * sequence number has increased.
231  *
232  * Returns true if fence was processed
233  */
234 bool amdgpu_fence_process(struct amdgpu_ring *ring)
235 {
236 	struct amdgpu_fence_driver *drv = &ring->fence_drv;
237 	struct amdgpu_device *adev = ring->adev;
238 	uint32_t seq, last_seq;
239 
240 	do {
241 		last_seq = atomic_read(&ring->fence_drv.last_seq);
242 		seq = amdgpu_fence_read(ring);
243 
244 	} while (atomic_cmpxchg(&drv->last_seq, last_seq, seq) != last_seq);
245 
246 	if (timer_delete(&ring->fence_drv.fallback_timer) &&
247 	    seq != ring->fence_drv.sync_seq)
248 		amdgpu_fence_schedule_fallback(ring);
249 
250 	if (unlikely(seq == last_seq))
251 		return false;
252 
253 	last_seq &= drv->num_fences_mask;
254 	seq &= drv->num_fences_mask;
255 
256 	do {
257 		struct dma_fence *fence, **ptr;
258 		struct amdgpu_fence *am_fence;
259 
260 		++last_seq;
261 		last_seq &= drv->num_fences_mask;
262 		ptr = &drv->fences[last_seq];
263 
264 		/* There is always exactly one thread signaling this fence slot */
265 		fence = rcu_dereference_protected(*ptr, 1);
266 		RCU_INIT_POINTER(*ptr, NULL);
267 
268 		if (!fence)
269 			continue;
270 
271 		/* Save the wptr in the fence driver so we know what the last processed
272 		 * wptr was.  This is required for re-emitting the ring state for
273 		 * queues that are reset but are not guilty and thus have no guilty fence.
274 		 */
275 		am_fence = container_of(fence, struct amdgpu_fence, base);
276 		drv->signalled_wptr = am_fence->wptr;
277 		dma_fence_signal(fence);
278 		dma_fence_put(fence);
279 		pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
280 		pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
281 	} while (last_seq != seq);
282 
283 	return true;
284 }
285 
286 /**
287  * amdgpu_fence_fallback - fallback for hardware interrupts
288  *
289  * @t: timer context used to obtain the pointer to ring structure
290  *
291  * Checks for fence activity.
292  */
293 static void amdgpu_fence_fallback(struct timer_list *t)
294 {
295 	struct amdgpu_ring *ring = timer_container_of(ring, t,
296 						      fence_drv.fallback_timer);
297 
298 	if (amdgpu_fence_process(ring))
299 		dev_warn(ring->adev->dev,
300 			 "Fence fallback timer expired on ring %s\n",
301 			 ring->name);
302 }
303 
304 /**
305  * amdgpu_fence_wait_empty - wait for all fences to signal
306  *
307  * @ring: ring index the fence is associated with
308  *
309  * Wait for all fences on the requested ring to signal (all asics).
310  * Returns 0 if the fences have passed, error for all other cases.
311  */
312 int amdgpu_fence_wait_empty(struct amdgpu_ring *ring)
313 {
314 	uint64_t seq = READ_ONCE(ring->fence_drv.sync_seq);
315 	struct dma_fence *fence, **ptr;
316 	int r;
317 
318 	if (!seq)
319 		return 0;
320 
321 	ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask];
322 	rcu_read_lock();
323 	fence = rcu_dereference(*ptr);
324 	if (!fence || !dma_fence_get_rcu(fence)) {
325 		rcu_read_unlock();
326 		return 0;
327 	}
328 	rcu_read_unlock();
329 
330 	r = dma_fence_wait(fence, false);
331 	dma_fence_put(fence);
332 	return r;
333 }
334 
335 /**
336  * amdgpu_fence_wait_polling - busy wait for givn sequence number
337  *
338  * @ring: ring index the fence is associated with
339  * @wait_seq: sequence number to wait
340  * @timeout: the timeout for waiting in usecs
341  *
342  * Wait for all fences on the requested ring to signal (all asics).
343  * Returns left time if no timeout, 0 or minus if timeout.
344  */
345 signed long amdgpu_fence_wait_polling(struct amdgpu_ring *ring,
346 				      uint32_t wait_seq,
347 				      signed long timeout)
348 {
349 
350 	while ((int32_t)(wait_seq - amdgpu_fence_read(ring)) > 0 && timeout > 0) {
351 		udelay(2);
352 		timeout -= 2;
353 	}
354 	return timeout > 0 ? timeout : 0;
355 }
356 /**
357  * amdgpu_fence_count_emitted - get the count of emitted fences
358  *
359  * @ring: ring the fence is associated with
360  *
361  * Get the number of fences emitted on the requested ring (all asics).
362  * Returns the number of emitted fences on the ring.  Used by the
363  * dynpm code to ring track activity.
364  */
365 unsigned int amdgpu_fence_count_emitted(struct amdgpu_ring *ring)
366 {
367 	uint64_t emitted;
368 
369 	/* We are not protected by ring lock when reading the last sequence
370 	 * but it's ok to report slightly wrong fence count here.
371 	 */
372 	emitted = 0x100000000ull;
373 	emitted -= atomic_read(&ring->fence_drv.last_seq);
374 	emitted += READ_ONCE(ring->fence_drv.sync_seq);
375 	return lower_32_bits(emitted);
376 }
377 
378 /**
379  * amdgpu_fence_last_unsignaled_time_us - the time fence emitted until now
380  * @ring: ring the fence is associated with
381  *
382  * Find the earliest fence unsignaled until now, calculate the time delta
383  * between the time fence emitted and now.
384  */
385 u64 amdgpu_fence_last_unsignaled_time_us(struct amdgpu_ring *ring)
386 {
387 	struct amdgpu_fence_driver *drv = &ring->fence_drv;
388 	struct dma_fence *fence;
389 	uint32_t last_seq, sync_seq;
390 
391 	last_seq = atomic_read(&ring->fence_drv.last_seq);
392 	sync_seq = READ_ONCE(ring->fence_drv.sync_seq);
393 	if (last_seq == sync_seq)
394 		return 0;
395 
396 	++last_seq;
397 	last_seq &= drv->num_fences_mask;
398 	fence = drv->fences[last_seq];
399 	if (!fence)
400 		return 0;
401 
402 	return ktime_us_delta(ktime_get(),
403 		to_amdgpu_fence(fence)->start_timestamp);
404 }
405 
406 /**
407  * amdgpu_fence_update_start_timestamp - update the timestamp of the fence
408  * @ring: ring the fence is associated with
409  * @seq: the fence seq number to update.
410  * @timestamp: the start timestamp to update.
411  *
412  * The function called at the time the fence and related ib is about to
413  * resubmit to gpu in MCBP scenario. Thus we do not consider race condition
414  * with amdgpu_fence_process to modify the same fence.
415  */
416 void amdgpu_fence_update_start_timestamp(struct amdgpu_ring *ring, uint32_t seq, ktime_t timestamp)
417 {
418 	struct amdgpu_fence_driver *drv = &ring->fence_drv;
419 	struct dma_fence *fence;
420 
421 	seq &= drv->num_fences_mask;
422 	fence = drv->fences[seq];
423 	if (!fence)
424 		return;
425 
426 	to_amdgpu_fence(fence)->start_timestamp = timestamp;
427 }
428 
429 /**
430  * amdgpu_fence_driver_start_ring - make the fence driver
431  * ready for use on the requested ring.
432  *
433  * @ring: ring to start the fence driver on
434  * @irq_src: interrupt source to use for this ring
435  * @irq_type: interrupt type to use for this ring
436  *
437  * Make the fence driver ready for processing (all asics).
438  * Not all asics have all rings, so each asic will only
439  * start the fence driver on the rings it has.
440  * Returns 0 for success, errors for failure.
441  */
442 int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
443 				   struct amdgpu_irq_src *irq_src,
444 				   unsigned int irq_type)
445 {
446 	struct amdgpu_device *adev = ring->adev;
447 	uint64_t index;
448 
449 	if (ring->funcs->type != AMDGPU_RING_TYPE_UVD) {
450 		ring->fence_drv.cpu_addr = ring->fence_cpu_addr;
451 		ring->fence_drv.gpu_addr = ring->fence_gpu_addr;
452 	} else {
453 		/* put fence directly behind firmware */
454 		index = ALIGN(adev->uvd.fw->size, 8);
455 		ring->fence_drv.cpu_addr = adev->uvd.inst[ring->me].cpu_addr + index;
456 		ring->fence_drv.gpu_addr = adev->uvd.inst[ring->me].gpu_addr + index;
457 	}
458 	amdgpu_fence_write(ring, atomic_read(&ring->fence_drv.last_seq));
459 
460 	ring->fence_drv.irq_src = irq_src;
461 	ring->fence_drv.irq_type = irq_type;
462 	ring->fence_drv.initialized = true;
463 
464 	DRM_DEV_DEBUG(adev->dev, "fence driver on ring %s use gpu addr 0x%016llx\n",
465 		      ring->name, ring->fence_drv.gpu_addr);
466 	return 0;
467 }
468 
469 /**
470  * amdgpu_fence_driver_init_ring - init the fence driver
471  * for the requested ring.
472  *
473  * @ring: ring to init the fence driver on
474  *
475  * Init the fence driver for the requested ring (all asics).
476  * Helper function for amdgpu_fence_driver_init().
477  */
478 int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring)
479 {
480 	struct amdgpu_device *adev = ring->adev;
481 
482 	if (!adev)
483 		return -EINVAL;
484 
485 	if (!is_power_of_2(ring->num_hw_submission))
486 		return -EINVAL;
487 
488 	ring->fence_drv.cpu_addr = NULL;
489 	ring->fence_drv.gpu_addr = 0;
490 	ring->fence_drv.sync_seq = 0;
491 	atomic_set(&ring->fence_drv.last_seq, 0);
492 	ring->fence_drv.initialized = false;
493 
494 	timer_setup(&ring->fence_drv.fallback_timer, amdgpu_fence_fallback, 0);
495 
496 	ring->fence_drv.num_fences_mask = ring->num_hw_submission * 2 - 1;
497 	spin_lock_init(&ring->fence_drv.lock);
498 	ring->fence_drv.fences = kcalloc(ring->num_hw_submission * 2, sizeof(void *),
499 					 GFP_KERNEL);
500 
501 	if (!ring->fence_drv.fences)
502 		return -ENOMEM;
503 
504 	return 0;
505 }
506 
507 /**
508  * amdgpu_fence_driver_sw_init - init the fence driver
509  * for all possible rings.
510  *
511  * @adev: amdgpu device pointer
512  *
513  * Init the fence driver for all possible rings (all asics).
514  * Not all asics have all rings, so each asic will only
515  * start the fence driver on the rings it has using
516  * amdgpu_fence_driver_start_ring().
517  * Returns 0 for success.
518  */
519 int amdgpu_fence_driver_sw_init(struct amdgpu_device *adev)
520 {
521 	return 0;
522 }
523 
524 /**
525  * amdgpu_fence_need_ring_interrupt_restore - helper function to check whether
526  * fence driver interrupts need to be restored.
527  *
528  * @ring: ring that to be checked
529  *
530  * Interrupts for rings that belong to GFX IP don't need to be restored
531  * when the target power state is s0ix.
532  *
533  * Return true if need to restore interrupts, false otherwise.
534  */
535 static bool amdgpu_fence_need_ring_interrupt_restore(struct amdgpu_ring *ring)
536 {
537 	struct amdgpu_device *adev = ring->adev;
538 	bool is_gfx_power_domain = false;
539 
540 	switch (ring->funcs->type) {
541 	case AMDGPU_RING_TYPE_SDMA:
542 	/* SDMA 5.x+ is part of GFX power domain so it's covered by GFXOFF */
543 		if (amdgpu_ip_version(adev, SDMA0_HWIP, 0) >=
544 		    IP_VERSION(5, 0, 0))
545 			is_gfx_power_domain = true;
546 		break;
547 	case AMDGPU_RING_TYPE_GFX:
548 	case AMDGPU_RING_TYPE_COMPUTE:
549 	case AMDGPU_RING_TYPE_KIQ:
550 	case AMDGPU_RING_TYPE_MES:
551 		is_gfx_power_domain = true;
552 		break;
553 	default:
554 		break;
555 	}
556 
557 	return !(adev->in_s0ix && is_gfx_power_domain);
558 }
559 
560 /**
561  * amdgpu_fence_driver_hw_fini - tear down the fence driver
562  * for all possible rings.
563  *
564  * @adev: amdgpu device pointer
565  *
566  * Tear down the fence driver for all possible rings (all asics).
567  */
568 void amdgpu_fence_driver_hw_fini(struct amdgpu_device *adev)
569 {
570 	int i, r;
571 
572 	for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
573 		struct amdgpu_ring *ring = adev->rings[i];
574 
575 		if (!ring || !ring->fence_drv.initialized)
576 			continue;
577 
578 		/* You can't wait for HW to signal if it's gone */
579 		if (!drm_dev_is_unplugged(adev_to_drm(adev)))
580 			r = amdgpu_fence_wait_empty(ring);
581 		else
582 			r = -ENODEV;
583 		/* no need to trigger GPU reset as we are unloading */
584 		if (r)
585 			amdgpu_fence_driver_force_completion(ring);
586 
587 		if (!drm_dev_is_unplugged(adev_to_drm(adev)) &&
588 		    ring->fence_drv.irq_src &&
589 		    amdgpu_fence_need_ring_interrupt_restore(ring))
590 			amdgpu_irq_put(adev, ring->fence_drv.irq_src,
591 				       ring->fence_drv.irq_type);
592 
593 		timer_delete_sync(&ring->fence_drv.fallback_timer);
594 	}
595 }
596 
597 /* Will either stop and flush handlers for amdgpu interrupt or reanble it */
598 void amdgpu_fence_driver_isr_toggle(struct amdgpu_device *adev, bool stop)
599 {
600 	int i;
601 
602 	for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
603 		struct amdgpu_ring *ring = adev->rings[i];
604 
605 		if (!ring || !ring->fence_drv.initialized || !ring->fence_drv.irq_src)
606 			continue;
607 
608 		if (stop)
609 			disable_irq(adev->irq.irq);
610 		else
611 			enable_irq(adev->irq.irq);
612 	}
613 }
614 
615 void amdgpu_fence_driver_sw_fini(struct amdgpu_device *adev)
616 {
617 	unsigned int i, j;
618 
619 	for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
620 		struct amdgpu_ring *ring = adev->rings[i];
621 
622 		if (!ring || !ring->fence_drv.initialized)
623 			continue;
624 
625 		/*
626 		 * Notice we check for sched.ops since there's some
627 		 * override on the meaning of sched.ready by amdgpu.
628 		 * The natural check would be sched.ready, which is
629 		 * set as drm_sched_init() finishes...
630 		 */
631 		if (ring->sched.ops)
632 			drm_sched_fini(&ring->sched);
633 
634 		for (j = 0; j <= ring->fence_drv.num_fences_mask; ++j)
635 			dma_fence_put(ring->fence_drv.fences[j]);
636 		kfree(ring->fence_drv.fences);
637 		ring->fence_drv.fences = NULL;
638 		ring->fence_drv.initialized = false;
639 	}
640 }
641 
642 /**
643  * amdgpu_fence_driver_hw_init - enable the fence driver
644  * for all possible rings.
645  *
646  * @adev: amdgpu device pointer
647  *
648  * Enable the fence driver for all possible rings (all asics).
649  * Not all asics have all rings, so each asic will only
650  * start the fence driver on the rings it has using
651  * amdgpu_fence_driver_start_ring().
652  * Returns 0 for success.
653  */
654 void amdgpu_fence_driver_hw_init(struct amdgpu_device *adev)
655 {
656 	int i;
657 
658 	for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
659 		struct amdgpu_ring *ring = adev->rings[i];
660 
661 		if (!ring || !ring->fence_drv.initialized)
662 			continue;
663 
664 		/* enable the interrupt */
665 		if (ring->fence_drv.irq_src &&
666 		    amdgpu_fence_need_ring_interrupt_restore(ring))
667 			amdgpu_irq_get(adev, ring->fence_drv.irq_src,
668 				       ring->fence_drv.irq_type);
669 	}
670 }
671 
672 /**
673  * amdgpu_fence_driver_clear_job_fences - clear job embedded fences of ring
674  *
675  * @ring: fence of the ring to be cleared
676  *
677  */
678 void amdgpu_fence_driver_clear_job_fences(struct amdgpu_ring *ring)
679 {
680 	int i;
681 	struct dma_fence *old, **ptr;
682 
683 	for (i = 0; i <= ring->fence_drv.num_fences_mask; i++) {
684 		ptr = &ring->fence_drv.fences[i];
685 		old = rcu_dereference_protected(*ptr, 1);
686 		if (old && old->ops == &amdgpu_job_fence_ops) {
687 			struct amdgpu_job *job;
688 
689 			/* For non-scheduler bad job, i.e. failed ib test, we need to signal
690 			 * it right here or we won't be able to track them in fence_drv
691 			 * and they will remain unsignaled during sa_bo free.
692 			 */
693 			job = container_of(old, struct amdgpu_job, hw_fence.base);
694 			if (!job->base.s_fence && !dma_fence_is_signaled(old))
695 				dma_fence_signal(old);
696 			RCU_INIT_POINTER(*ptr, NULL);
697 			dma_fence_put(old);
698 		}
699 	}
700 }
701 
702 /**
703  * amdgpu_fence_driver_set_error - set error code on fences
704  * @ring: the ring which contains the fences
705  * @error: the error code to set
706  *
707  * Set an error code to all the fences pending on the ring.
708  */
709 void amdgpu_fence_driver_set_error(struct amdgpu_ring *ring, int error)
710 {
711 	struct amdgpu_fence_driver *drv = &ring->fence_drv;
712 	unsigned long flags;
713 
714 	spin_lock_irqsave(&drv->lock, flags);
715 	for (unsigned int i = 0; i <= drv->num_fences_mask; ++i) {
716 		struct dma_fence *fence;
717 
718 		fence = rcu_dereference_protected(drv->fences[i],
719 						  lockdep_is_held(&drv->lock));
720 		if (fence && !dma_fence_is_signaled_locked(fence))
721 			dma_fence_set_error(fence, error);
722 	}
723 	spin_unlock_irqrestore(&drv->lock, flags);
724 }
725 
726 /**
727  * amdgpu_fence_driver_force_completion - force signal latest fence of ring
728  *
729  * @ring: fence of the ring to signal
730  *
731  */
732 void amdgpu_fence_driver_force_completion(struct amdgpu_ring *ring)
733 {
734 	amdgpu_fence_driver_set_error(ring, -ECANCELED);
735 	amdgpu_fence_write(ring, ring->fence_drv.sync_seq);
736 	amdgpu_fence_process(ring);
737 }
738 
739 
740 /*
741  * Kernel queue reset handling
742  *
743  * The driver can reset individual queues for most engines, but those queues
744  * may contain work from multiple contexts.  Resetting the queue will reset
745  * lose all of that state.  In order to minimize the collateral damage, the
746  * driver will save the ring contents which are not associated with the guilty
747  * context prior to resetting the queue.  After resetting the queue the queue
748  * contents from the other contexts is re-emitted to the rings so that it can
749  * be processed by the engine.  To handle this, we save the queue's write
750  * pointer (wptr) in the fences associated with each context.  If we get a
751  * queue timeout, we can then use the wptrs from the fences to determine
752  * which data needs to be saved out of the queue's ring buffer.
753  */
754 
755 /**
756  * amdgpu_fence_driver_guilty_force_completion - force signal of specified sequence
757  *
758  * @fence: fence of the ring to signal
759  *
760  */
761 void amdgpu_fence_driver_guilty_force_completion(struct amdgpu_fence *fence)
762 {
763 	dma_fence_set_error(&fence->base, -ETIME);
764 	amdgpu_fence_write(fence->ring, fence->seq);
765 	amdgpu_fence_process(fence->ring);
766 }
767 
768 void amdgpu_fence_save_wptr(struct dma_fence *fence)
769 {
770 	struct amdgpu_fence *am_fence = container_of(fence, struct amdgpu_fence, base);
771 
772 	am_fence->wptr = am_fence->ring->wptr;
773 }
774 
775 static void amdgpu_ring_backup_unprocessed_command(struct amdgpu_ring *ring,
776 						   u64 start_wptr, u32 end_wptr)
777 {
778 	unsigned int first_idx = start_wptr & ring->buf_mask;
779 	unsigned int last_idx = end_wptr & ring->buf_mask;
780 	unsigned int i;
781 
782 	/* Backup the contents of the ring buffer. */
783 	for (i = first_idx; i != last_idx; ++i, i &= ring->buf_mask)
784 		ring->ring_backup[ring->ring_backup_entries_to_copy++] = ring->ring[i];
785 }
786 
787 void amdgpu_ring_backup_unprocessed_commands(struct amdgpu_ring *ring,
788 					     struct amdgpu_fence *guilty_fence)
789 {
790 	struct dma_fence *unprocessed;
791 	struct dma_fence __rcu **ptr;
792 	struct amdgpu_fence *fence;
793 	u64 wptr, i, seqno;
794 
795 	seqno = amdgpu_fence_read(ring);
796 	wptr = ring->fence_drv.signalled_wptr;
797 	ring->ring_backup_entries_to_copy = 0;
798 
799 	for (i = seqno + 1; i <= ring->fence_drv.sync_seq; ++i) {
800 		ptr = &ring->fence_drv.fences[i & ring->fence_drv.num_fences_mask];
801 		rcu_read_lock();
802 		unprocessed = rcu_dereference(*ptr);
803 
804 		if (unprocessed && !dma_fence_is_signaled(unprocessed)) {
805 			fence = container_of(unprocessed, struct amdgpu_fence, base);
806 
807 			/* save everything if the ring is not guilty, otherwise
808 			 * just save the content from other contexts.
809 			 */
810 			if (!guilty_fence || (fence->context != guilty_fence->context))
811 				amdgpu_ring_backup_unprocessed_command(ring, wptr,
812 								       fence->wptr);
813 			wptr = fence->wptr;
814 		}
815 		rcu_read_unlock();
816 	}
817 }
818 
819 /*
820  * Common fence implementation
821  */
822 
823 static const char *amdgpu_fence_get_driver_name(struct dma_fence *fence)
824 {
825 	return "amdgpu";
826 }
827 
828 static const char *amdgpu_fence_get_timeline_name(struct dma_fence *f)
829 {
830 	return (const char *)to_amdgpu_fence(f)->ring->name;
831 }
832 
833 static const char *amdgpu_job_fence_get_timeline_name(struct dma_fence *f)
834 {
835 	struct amdgpu_job *job = container_of(f, struct amdgpu_job, hw_fence.base);
836 
837 	return (const char *)to_amdgpu_ring(job->base.sched)->name;
838 }
839 
840 /**
841  * amdgpu_fence_enable_signaling - enable signalling on fence
842  * @f: fence
843  *
844  * This function is called with fence_queue lock held, and adds a callback
845  * to fence_queue that checks if this fence is signaled, and if so it
846  * signals the fence and removes itself.
847  */
848 static bool amdgpu_fence_enable_signaling(struct dma_fence *f)
849 {
850 	if (!timer_pending(&to_amdgpu_fence(f)->ring->fence_drv.fallback_timer))
851 		amdgpu_fence_schedule_fallback(to_amdgpu_fence(f)->ring);
852 
853 	return true;
854 }
855 
856 /**
857  * amdgpu_job_fence_enable_signaling - enable signalling on job fence
858  * @f: fence
859  *
860  * This is the simliar function with amdgpu_fence_enable_signaling above, it
861  * only handles the job embedded fence.
862  */
863 static bool amdgpu_job_fence_enable_signaling(struct dma_fence *f)
864 {
865 	struct amdgpu_job *job = container_of(f, struct amdgpu_job, hw_fence.base);
866 
867 	if (!timer_pending(&to_amdgpu_ring(job->base.sched)->fence_drv.fallback_timer))
868 		amdgpu_fence_schedule_fallback(to_amdgpu_ring(job->base.sched));
869 
870 	return true;
871 }
872 
873 /**
874  * amdgpu_fence_free - free up the fence memory
875  *
876  * @rcu: RCU callback head
877  *
878  * Free up the fence memory after the RCU grace period.
879  */
880 static void amdgpu_fence_free(struct rcu_head *rcu)
881 {
882 	struct dma_fence *f = container_of(rcu, struct dma_fence, rcu);
883 
884 	/* free fence_slab if it's separated fence*/
885 	kfree(to_amdgpu_fence(f));
886 }
887 
888 /**
889  * amdgpu_job_fence_free - free up the job with embedded fence
890  *
891  * @rcu: RCU callback head
892  *
893  * Free up the job with embedded fence after the RCU grace period.
894  */
895 static void amdgpu_job_fence_free(struct rcu_head *rcu)
896 {
897 	struct dma_fence *f = container_of(rcu, struct dma_fence, rcu);
898 
899 	/* free job if fence has a parent job */
900 	kfree(container_of(f, struct amdgpu_job, hw_fence.base));
901 }
902 
903 /**
904  * amdgpu_fence_release - callback that fence can be freed
905  *
906  * @f: fence
907  *
908  * This function is called when the reference count becomes zero.
909  * It just RCU schedules freeing up the fence.
910  */
911 static void amdgpu_fence_release(struct dma_fence *f)
912 {
913 	call_rcu(&f->rcu, amdgpu_fence_free);
914 }
915 
916 /**
917  * amdgpu_job_fence_release - callback that job embedded fence can be freed
918  *
919  * @f: fence
920  *
921  * This is the simliar function with amdgpu_fence_release above, it
922  * only handles the job embedded fence.
923  */
924 static void amdgpu_job_fence_release(struct dma_fence *f)
925 {
926 	call_rcu(&f->rcu, amdgpu_job_fence_free);
927 }
928 
929 static const struct dma_fence_ops amdgpu_fence_ops = {
930 	.get_driver_name = amdgpu_fence_get_driver_name,
931 	.get_timeline_name = amdgpu_fence_get_timeline_name,
932 	.enable_signaling = amdgpu_fence_enable_signaling,
933 	.release = amdgpu_fence_release,
934 };
935 
936 static const struct dma_fence_ops amdgpu_job_fence_ops = {
937 	.get_driver_name = amdgpu_fence_get_driver_name,
938 	.get_timeline_name = amdgpu_job_fence_get_timeline_name,
939 	.enable_signaling = amdgpu_job_fence_enable_signaling,
940 	.release = amdgpu_job_fence_release,
941 };
942 
943 /*
944  * Fence debugfs
945  */
946 #if defined(CONFIG_DEBUG_FS)
947 static int amdgpu_debugfs_fence_info_show(struct seq_file *m, void *unused)
948 {
949 	struct amdgpu_device *adev = m->private;
950 	int i;
951 
952 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
953 		struct amdgpu_ring *ring = adev->rings[i];
954 
955 		if (!ring || !ring->fence_drv.initialized)
956 			continue;
957 
958 		amdgpu_fence_process(ring);
959 
960 		seq_printf(m, "--- ring %d (%s) ---\n", i, ring->name);
961 		seq_printf(m, "Last signaled fence          0x%08x\n",
962 			   atomic_read(&ring->fence_drv.last_seq));
963 		seq_printf(m, "Last emitted                 0x%08x\n",
964 			   ring->fence_drv.sync_seq);
965 
966 		if (ring->funcs->type == AMDGPU_RING_TYPE_GFX ||
967 		    ring->funcs->type == AMDGPU_RING_TYPE_SDMA) {
968 			seq_printf(m, "Last signaled trailing fence 0x%08x\n",
969 				   le32_to_cpu(*ring->trail_fence_cpu_addr));
970 			seq_printf(m, "Last emitted                 0x%08x\n",
971 				   ring->trail_seq);
972 		}
973 
974 		if (ring->funcs->type != AMDGPU_RING_TYPE_GFX)
975 			continue;
976 
977 		/* set in CP_VMID_PREEMPT and preemption occurred */
978 		seq_printf(m, "Last preempted               0x%08x\n",
979 			   le32_to_cpu(*(ring->fence_drv.cpu_addr + 2)));
980 		/* set in CP_VMID_RESET and reset occurred */
981 		seq_printf(m, "Last reset                   0x%08x\n",
982 			   le32_to_cpu(*(ring->fence_drv.cpu_addr + 4)));
983 		/* Both preemption and reset occurred */
984 		seq_printf(m, "Last both                    0x%08x\n",
985 			   le32_to_cpu(*(ring->fence_drv.cpu_addr + 6)));
986 	}
987 	return 0;
988 }
989 
990 /*
991  * amdgpu_debugfs_gpu_recover - manually trigger a gpu reset & recover
992  *
993  * Manually trigger a gpu reset at the next fence wait.
994  */
995 static int gpu_recover_get(void *data, u64 *val)
996 {
997 	struct amdgpu_device *adev = (struct amdgpu_device *)data;
998 	struct drm_device *dev = adev_to_drm(adev);
999 	int r;
1000 
1001 	r = pm_runtime_get_sync(dev->dev);
1002 	if (r < 0) {
1003 		pm_runtime_put_autosuspend(dev->dev);
1004 		return 0;
1005 	}
1006 
1007 	if (amdgpu_reset_domain_schedule(adev->reset_domain, &adev->reset_work))
1008 		flush_work(&adev->reset_work);
1009 
1010 	*val = atomic_read(&adev->reset_domain->reset_res);
1011 
1012 	pm_runtime_mark_last_busy(dev->dev);
1013 	pm_runtime_put_autosuspend(dev->dev);
1014 
1015 	return 0;
1016 }
1017 
1018 DEFINE_SHOW_ATTRIBUTE(amdgpu_debugfs_fence_info);
1019 DEFINE_DEBUGFS_ATTRIBUTE(amdgpu_debugfs_gpu_recover_fops, gpu_recover_get, NULL,
1020 			 "%lld\n");
1021 
1022 static void amdgpu_debugfs_reset_work(struct work_struct *work)
1023 {
1024 	struct amdgpu_device *adev = container_of(work, struct amdgpu_device,
1025 						  reset_work);
1026 
1027 	struct amdgpu_reset_context reset_context;
1028 
1029 	memset(&reset_context, 0, sizeof(reset_context));
1030 
1031 	reset_context.method = AMD_RESET_METHOD_NONE;
1032 	reset_context.reset_req_dev = adev;
1033 	reset_context.src = AMDGPU_RESET_SRC_USER;
1034 	set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
1035 	set_bit(AMDGPU_SKIP_COREDUMP, &reset_context.flags);
1036 
1037 	amdgpu_device_gpu_recover(adev, NULL, &reset_context);
1038 }
1039 
1040 #endif
1041 
1042 void amdgpu_debugfs_fence_init(struct amdgpu_device *adev)
1043 {
1044 #if defined(CONFIG_DEBUG_FS)
1045 	struct drm_minor *minor = adev_to_drm(adev)->primary;
1046 	struct dentry *root = minor->debugfs_root;
1047 
1048 	debugfs_create_file("amdgpu_fence_info", 0444, root, adev,
1049 			    &amdgpu_debugfs_fence_info_fops);
1050 
1051 	if (!amdgpu_sriov_vf(adev)) {
1052 
1053 		INIT_WORK(&adev->reset_work, amdgpu_debugfs_reset_work);
1054 		debugfs_create_file("amdgpu_gpu_recover", 0444, root, adev,
1055 				    &amdgpu_debugfs_gpu_recover_fops);
1056 	}
1057 #endif
1058 }
1059 
1060