xref: /linux/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c (revision e3b9f1e81de2083f359bacd2a94bf1c024f2ede0)
1 /*
2  * Copyright 2014 Advanced Micro Devices, Inc.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the
7  * "Software"), to deal in the Software without restriction, including
8  * without limitation the rights to use, copy, modify, merge, publish,
9  * distribute, sub license, and/or sell copies of the Software, and to
10  * permit persons to whom the Software is furnished to do so, subject to
11  * the following conditions:
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19  * USE OR OTHER DEALINGS IN THE SOFTWARE.
20  *
21  * The above copyright notice and this permission notice (including the
22  * next paragraph) shall be included in all copies or substantial portions
23  * of the Software.
24  *
25  */
26 /*
27  * Authors:
28  *    Christian König <christian.koenig@amd.com>
29  */
30 
31 #include <drm/drmP.h>
32 #include "amdgpu.h"
33 #include "amdgpu_trace.h"
34 
35 struct amdgpu_sync_entry {
36 	struct hlist_node	node;
37 	struct dma_fence	*fence;
38 	bool	explicit;
39 };
40 
41 static struct kmem_cache *amdgpu_sync_slab;
42 
43 /**
44  * amdgpu_sync_create - zero init sync object
45  *
46  * @sync: sync object to initialize
47  *
48  * Just clear the sync object for now.
49  */
50 void amdgpu_sync_create(struct amdgpu_sync *sync)
51 {
52 	hash_init(sync->fences);
53 	sync->last_vm_update = NULL;
54 }
55 
56 /**
57  * amdgpu_sync_same_dev - test if fence belong to us
58  *
59  * @adev: amdgpu device to use for the test
60  * @f: fence to test
61  *
62  * Test if the fence was issued by us.
63  */
64 static bool amdgpu_sync_same_dev(struct amdgpu_device *adev,
65 				 struct dma_fence *f)
66 {
67 	struct drm_sched_fence *s_fence = to_drm_sched_fence(f);
68 
69 	if (s_fence) {
70 		struct amdgpu_ring *ring;
71 
72 		ring = container_of(s_fence->sched, struct amdgpu_ring, sched);
73 		return ring->adev == adev;
74 	}
75 
76 	return false;
77 }
78 
79 /**
80  * amdgpu_sync_get_owner - extract the owner of a fence
81  *
82  * @fence: fence get the owner from
83  *
84  * Extract who originally created the fence.
85  */
86 static void *amdgpu_sync_get_owner(struct dma_fence *f)
87 {
88 	struct drm_sched_fence *s_fence = to_drm_sched_fence(f);
89 
90 	if (s_fence)
91 		return s_fence->owner;
92 
93 	return AMDGPU_FENCE_OWNER_UNDEFINED;
94 }
95 
96 /**
97  * amdgpu_sync_keep_later - Keep the later fence
98  *
99  * @keep: existing fence to test
100  * @fence: new fence
101  *
102  * Either keep the existing fence or the new one, depending which one is later.
103  */
104 static void amdgpu_sync_keep_later(struct dma_fence **keep,
105 				   struct dma_fence *fence)
106 {
107 	if (*keep && dma_fence_is_later(*keep, fence))
108 		return;
109 
110 	dma_fence_put(*keep);
111 	*keep = dma_fence_get(fence);
112 }
113 
114 /**
115  * amdgpu_sync_add_later - add the fence to the hash
116  *
117  * @sync: sync object to add the fence to
118  * @f: fence to add
119  *
120  * Tries to add the fence to an existing hash entry. Returns true when an entry
121  * was found, false otherwise.
122  */
123 static bool amdgpu_sync_add_later(struct amdgpu_sync *sync, struct dma_fence *f, bool explicit)
124 {
125 	struct amdgpu_sync_entry *e;
126 
127 	hash_for_each_possible(sync->fences, e, node, f->context) {
128 		if (unlikely(e->fence->context != f->context))
129 			continue;
130 
131 		amdgpu_sync_keep_later(&e->fence, f);
132 
133 		/* Preserve eplicit flag to not loose pipe line sync */
134 		e->explicit |= explicit;
135 
136 		return true;
137 	}
138 	return false;
139 }
140 
141 /**
142  * amdgpu_sync_fence - remember to sync to this fence
143  *
144  * @sync: sync object to add fence to
145  * @fence: fence to sync to
146  *
147  */
148 int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
149 		      struct dma_fence *f, bool explicit)
150 {
151 	struct amdgpu_sync_entry *e;
152 
153 	if (!f)
154 		return 0;
155 	if (amdgpu_sync_same_dev(adev, f) &&
156 	    amdgpu_sync_get_owner(f) == AMDGPU_FENCE_OWNER_VM)
157 		amdgpu_sync_keep_later(&sync->last_vm_update, f);
158 
159 	if (amdgpu_sync_add_later(sync, f, explicit))
160 		return 0;
161 
162 	e = kmem_cache_alloc(amdgpu_sync_slab, GFP_KERNEL);
163 	if (!e)
164 		return -ENOMEM;
165 
166 	e->explicit = explicit;
167 
168 	hash_add(sync->fences, &e->node, f->context);
169 	e->fence = dma_fence_get(f);
170 	return 0;
171 }
172 
173 /**
174  * amdgpu_sync_resv - sync to a reservation object
175  *
176  * @sync: sync object to add fences from reservation object to
177  * @resv: reservation object with embedded fence
178  * @explicit_sync: true if we should only sync to the exclusive fence
179  *
180  * Sync to the fence
181  */
182 int amdgpu_sync_resv(struct amdgpu_device *adev,
183 		     struct amdgpu_sync *sync,
184 		     struct reservation_object *resv,
185 		     void *owner, bool explicit_sync)
186 {
187 	struct reservation_object_list *flist;
188 	struct dma_fence *f;
189 	void *fence_owner;
190 	unsigned i;
191 	int r = 0;
192 
193 	if (resv == NULL)
194 		return -EINVAL;
195 
196 	/* always sync to the exclusive fence */
197 	f = reservation_object_get_excl(resv);
198 	r = amdgpu_sync_fence(adev, sync, f, false);
199 
200 	flist = reservation_object_get_list(resv);
201 	if (!flist || r)
202 		return r;
203 
204 	for (i = 0; i < flist->shared_count; ++i) {
205 		f = rcu_dereference_protected(flist->shared[i],
206 					      reservation_object_held(resv));
207 		if (amdgpu_sync_same_dev(adev, f)) {
208 			/* VM updates are only interesting
209 			 * for other VM updates and moves.
210 			 */
211 			fence_owner = amdgpu_sync_get_owner(f);
212 			if ((owner != AMDGPU_FENCE_OWNER_UNDEFINED) &&
213 			    (fence_owner != AMDGPU_FENCE_OWNER_UNDEFINED) &&
214 			    ((owner == AMDGPU_FENCE_OWNER_VM) !=
215 			     (fence_owner == AMDGPU_FENCE_OWNER_VM)))
216 				continue;
217 
218 			/* Ignore fence from the same owner and explicit one as
219 			 * long as it isn't undefined.
220 			 */
221 			if (owner != AMDGPU_FENCE_OWNER_UNDEFINED &&
222 			    (fence_owner == owner || explicit_sync))
223 				continue;
224 		}
225 
226 		r = amdgpu_sync_fence(adev, sync, f, false);
227 		if (r)
228 			break;
229 	}
230 	return r;
231 }
232 
233 /**
234  * amdgpu_sync_peek_fence - get the next fence not signaled yet
235  *
236  * @sync: the sync object
237  * @ring: optional ring to use for test
238  *
239  * Returns the next fence not signaled yet without removing it from the sync
240  * object.
241  */
242 struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
243 					 struct amdgpu_ring *ring)
244 {
245 	struct amdgpu_sync_entry *e;
246 	struct hlist_node *tmp;
247 	int i;
248 
249 	hash_for_each_safe(sync->fences, i, tmp, e, node) {
250 		struct dma_fence *f = e->fence;
251 		struct drm_sched_fence *s_fence = to_drm_sched_fence(f);
252 
253 		if (dma_fence_is_signaled(f)) {
254 			hash_del(&e->node);
255 			dma_fence_put(f);
256 			kmem_cache_free(amdgpu_sync_slab, e);
257 			continue;
258 		}
259 		if (ring && s_fence) {
260 			/* For fences from the same ring it is sufficient
261 			 * when they are scheduled.
262 			 */
263 			if (s_fence->sched == &ring->sched) {
264 				if (dma_fence_is_signaled(&s_fence->scheduled))
265 					continue;
266 
267 				return &s_fence->scheduled;
268 			}
269 		}
270 
271 		return f;
272 	}
273 
274 	return NULL;
275 }
276 
277 /**
278  * amdgpu_sync_get_fence - get the next fence from the sync object
279  *
280  * @sync: sync object to use
281  * @explicit: true if the next fence is explicit
282  *
283  * Get and removes the next fence from the sync object not signaled yet.
284  */
285 struct dma_fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync, bool *explicit)
286 {
287 	struct amdgpu_sync_entry *e;
288 	struct hlist_node *tmp;
289 	struct dma_fence *f;
290 	int i;
291 	hash_for_each_safe(sync->fences, i, tmp, e, node) {
292 
293 		f = e->fence;
294 		if (explicit)
295 			*explicit = e->explicit;
296 
297 		hash_del(&e->node);
298 		kmem_cache_free(amdgpu_sync_slab, e);
299 
300 		if (!dma_fence_is_signaled(f))
301 			return f;
302 
303 		dma_fence_put(f);
304 	}
305 	return NULL;
306 }
307 
308 int amdgpu_sync_wait(struct amdgpu_sync *sync, bool intr)
309 {
310 	struct amdgpu_sync_entry *e;
311 	struct hlist_node *tmp;
312 	int i, r;
313 
314 	hash_for_each_safe(sync->fences, i, tmp, e, node) {
315 		r = dma_fence_wait(e->fence, intr);
316 		if (r)
317 			return r;
318 
319 		hash_del(&e->node);
320 		dma_fence_put(e->fence);
321 		kmem_cache_free(amdgpu_sync_slab, e);
322 	}
323 
324 	return 0;
325 }
326 
327 /**
328  * amdgpu_sync_free - free the sync object
329  *
330  * @sync: sync object to use
331  *
332  * Free the sync object.
333  */
334 void amdgpu_sync_free(struct amdgpu_sync *sync)
335 {
336 	struct amdgpu_sync_entry *e;
337 	struct hlist_node *tmp;
338 	unsigned i;
339 
340 	hash_for_each_safe(sync->fences, i, tmp, e, node) {
341 		hash_del(&e->node);
342 		dma_fence_put(e->fence);
343 		kmem_cache_free(amdgpu_sync_slab, e);
344 	}
345 
346 	dma_fence_put(sync->last_vm_update);
347 }
348 
349 /**
350  * amdgpu_sync_init - init sync object subsystem
351  *
352  * Allocate the slab allocator.
353  */
354 int amdgpu_sync_init(void)
355 {
356 	amdgpu_sync_slab = kmem_cache_create(
357 		"amdgpu_sync", sizeof(struct amdgpu_sync_entry), 0,
358 		SLAB_HWCACHE_ALIGN, NULL);
359 	if (!amdgpu_sync_slab)
360 		return -ENOMEM;
361 
362 	return 0;
363 }
364 
365 /**
366  * amdgpu_sync_fini - fini sync object subsystem
367  *
368  * Free the slab allocator.
369  */
370 void amdgpu_sync_fini(void)
371 {
372 	kmem_cache_destroy(amdgpu_sync_slab);
373 }
374