xref: /linux/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c (revision 0883c2c06fb5bcf5b9e008270827e63c09a88c1e)
1 /*
2  * Copyright 2014 Advanced Micro Devices, Inc.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the
7  * "Software"), to deal in the Software without restriction, including
8  * without limitation the rights to use, copy, modify, merge, publish,
9  * distribute, sub license, and/or sell copies of the Software, and to
10  * permit persons to whom the Software is furnished to do so, subject to
11  * the following conditions:
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19  * USE OR OTHER DEALINGS IN THE SOFTWARE.
20  *
21  * The above copyright notice and this permission notice (including the
22  * next paragraph) shall be included in all copies or substantial portions
23  * of the Software.
24  *
25  */
26 /*
27  * Authors:
28  *    Christian König <christian.koenig@amd.com>
29  */
30 
31 #include <drm/drmP.h>
32 #include "amdgpu.h"
33 #include "amdgpu_trace.h"
34 
35 struct amdgpu_sync_entry {
36 	struct hlist_node	node;
37 	struct fence		*fence;
38 };
39 
40 static struct kmem_cache *amdgpu_sync_slab;
41 
42 /**
43  * amdgpu_sync_create - zero init sync object
44  *
45  * @sync: sync object to initialize
46  *
47  * Just clear the sync object for now.
48  */
49 void amdgpu_sync_create(struct amdgpu_sync *sync)
50 {
51 	hash_init(sync->fences);
52 	sync->last_vm_update = NULL;
53 }
54 
55 /**
56  * amdgpu_sync_same_dev - test if fence belong to us
57  *
58  * @adev: amdgpu device to use for the test
59  * @f: fence to test
60  *
61  * Test if the fence was issued by us.
62  */
63 static bool amdgpu_sync_same_dev(struct amdgpu_device *adev, struct fence *f)
64 {
65 	struct amd_sched_fence *s_fence = to_amd_sched_fence(f);
66 
67 	if (s_fence) {
68 		struct amdgpu_ring *ring;
69 
70 		ring = container_of(s_fence->sched, struct amdgpu_ring, sched);
71 		return ring->adev == adev;
72 	}
73 
74 	return false;
75 }
76 
77 /**
78  * amdgpu_sync_get_owner - extract the owner of a fence
79  *
80  * @fence: fence get the owner from
81  *
82  * Extract who originally created the fence.
83  */
84 static void *amdgpu_sync_get_owner(struct fence *f)
85 {
86 	struct amd_sched_fence *s_fence = to_amd_sched_fence(f);
87 
88 	if (s_fence)
89 		return s_fence->owner;
90 
91 	return AMDGPU_FENCE_OWNER_UNDEFINED;
92 }
93 
94 /**
95  * amdgpu_sync_keep_later - Keep the later fence
96  *
97  * @keep: existing fence to test
98  * @fence: new fence
99  *
100  * Either keep the existing fence or the new one, depending which one is later.
101  */
102 static void amdgpu_sync_keep_later(struct fence **keep, struct fence *fence)
103 {
104 	if (*keep && fence_is_later(*keep, fence))
105 		return;
106 
107 	fence_put(*keep);
108 	*keep = fence_get(fence);
109 }
110 
111 /**
112  * amdgpu_sync_add_later - add the fence to the hash
113  *
114  * @sync: sync object to add the fence to
115  * @f: fence to add
116  *
117  * Tries to add the fence to an existing hash entry. Returns true when an entry
118  * was found, false otherwise.
119  */
120 static bool amdgpu_sync_add_later(struct amdgpu_sync *sync, struct fence *f)
121 {
122 	struct amdgpu_sync_entry *e;
123 
124 	hash_for_each_possible(sync->fences, e, node, f->context) {
125 		if (unlikely(e->fence->context != f->context))
126 			continue;
127 
128 		amdgpu_sync_keep_later(&e->fence, f);
129 		return true;
130 	}
131 	return false;
132 }
133 
134 /**
135  * amdgpu_sync_fence - remember to sync to this fence
136  *
137  * @sync: sync object to add fence to
138  * @fence: fence to sync to
139  *
140  */
141 int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
142 		      struct fence *f)
143 {
144 	struct amdgpu_sync_entry *e;
145 
146 	if (!f)
147 		return 0;
148 
149 	if (amdgpu_sync_same_dev(adev, f) &&
150 	    amdgpu_sync_get_owner(f) == AMDGPU_FENCE_OWNER_VM)
151 		amdgpu_sync_keep_later(&sync->last_vm_update, f);
152 
153 	if (amdgpu_sync_add_later(sync, f))
154 		return 0;
155 
156 	e = kmem_cache_alloc(amdgpu_sync_slab, GFP_KERNEL);
157 	if (!e)
158 		return -ENOMEM;
159 
160 	hash_add(sync->fences, &e->node, f->context);
161 	e->fence = fence_get(f);
162 	return 0;
163 }
164 
165 /**
166  * amdgpu_sync_resv - sync to a reservation object
167  *
168  * @sync: sync object to add fences from reservation object to
169  * @resv: reservation object with embedded fence
170  * @shared: true if we should only sync to the exclusive fence
171  *
172  * Sync to the fence
173  */
174 int amdgpu_sync_resv(struct amdgpu_device *adev,
175 		     struct amdgpu_sync *sync,
176 		     struct reservation_object *resv,
177 		     void *owner)
178 {
179 	struct reservation_object_list *flist;
180 	struct fence *f;
181 	void *fence_owner;
182 	unsigned i;
183 	int r = 0;
184 
185 	if (resv == NULL)
186 		return -EINVAL;
187 
188 	/* always sync to the exclusive fence */
189 	f = reservation_object_get_excl(resv);
190 	r = amdgpu_sync_fence(adev, sync, f);
191 
192 	flist = reservation_object_get_list(resv);
193 	if (!flist || r)
194 		return r;
195 
196 	for (i = 0; i < flist->shared_count; ++i) {
197 		f = rcu_dereference_protected(flist->shared[i],
198 					      reservation_object_held(resv));
199 		if (amdgpu_sync_same_dev(adev, f)) {
200 			/* VM updates are only interesting
201 			 * for other VM updates and moves.
202 			 */
203 			fence_owner = amdgpu_sync_get_owner(f);
204 			if ((owner != AMDGPU_FENCE_OWNER_UNDEFINED) &&
205 			    (fence_owner != AMDGPU_FENCE_OWNER_UNDEFINED) &&
206 			    ((owner == AMDGPU_FENCE_OWNER_VM) !=
207 			     (fence_owner == AMDGPU_FENCE_OWNER_VM)))
208 				continue;
209 
210 			/* Ignore fence from the same owner as
211 			 * long as it isn't undefined.
212 			 */
213 			if (owner != AMDGPU_FENCE_OWNER_UNDEFINED &&
214 			    fence_owner == owner)
215 				continue;
216 		}
217 
218 		r = amdgpu_sync_fence(adev, sync, f);
219 		if (r)
220 			break;
221 	}
222 	return r;
223 }
224 
225 /**
226  * amdgpu_sync_is_idle - test if all fences are signaled
227  *
228  * @sync: the sync object
229  *
230  * Returns true if all fences in the sync object are signaled.
231  */
232 bool amdgpu_sync_is_idle(struct amdgpu_sync *sync)
233 {
234 	struct amdgpu_sync_entry *e;
235 	struct hlist_node *tmp;
236 	int i;
237 
238 	hash_for_each_safe(sync->fences, i, tmp, e, node) {
239 		struct fence *f = e->fence;
240 
241 		if (fence_is_signaled(f)) {
242 			hash_del(&e->node);
243 			fence_put(f);
244 			kmem_cache_free(amdgpu_sync_slab, e);
245 			continue;
246 		}
247 
248 		return false;
249 	}
250 
251 	return true;
252 }
253 
254 /**
255  * amdgpu_sync_cycle_fences - move fences from one sync object into another
256  *
257  * @dst: the destination sync object
258  * @src: the source sync object
259  * @fence: fence to add to source
260  *
261  * Remove all fences from source and put them into destination and add
262  * fence as new one into source.
263  */
264 int amdgpu_sync_cycle_fences(struct amdgpu_sync *dst, struct amdgpu_sync *src,
265 			     struct fence *fence)
266 {
267 	struct amdgpu_sync_entry *e, *newone;
268 	struct hlist_node *tmp;
269 	int i;
270 
271 	/* Allocate the new entry before moving the old ones */
272 	newone = kmem_cache_alloc(amdgpu_sync_slab, GFP_KERNEL);
273 	if (!newone)
274 		return -ENOMEM;
275 
276 	hash_for_each_safe(src->fences, i, tmp, e, node) {
277 		struct fence *f = e->fence;
278 
279 		hash_del(&e->node);
280 		if (fence_is_signaled(f)) {
281 			fence_put(f);
282 			kmem_cache_free(amdgpu_sync_slab, e);
283 			continue;
284 		}
285 
286 		if (amdgpu_sync_add_later(dst, f)) {
287 			kmem_cache_free(amdgpu_sync_slab, e);
288 			continue;
289 		}
290 
291 		hash_add(dst->fences, &e->node, f->context);
292 	}
293 
294 	hash_add(src->fences, &newone->node, fence->context);
295 	newone->fence = fence_get(fence);
296 
297 	return 0;
298 }
299 
300 struct fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync)
301 {
302 	struct amdgpu_sync_entry *e;
303 	struct hlist_node *tmp;
304 	struct fence *f;
305 	int i;
306 
307 	hash_for_each_safe(sync->fences, i, tmp, e, node) {
308 
309 		f = e->fence;
310 
311 		hash_del(&e->node);
312 		kmem_cache_free(amdgpu_sync_slab, e);
313 
314 		if (!fence_is_signaled(f))
315 			return f;
316 
317 		fence_put(f);
318 	}
319 	return NULL;
320 }
321 
322 int amdgpu_sync_wait(struct amdgpu_sync *sync)
323 {
324 	struct amdgpu_sync_entry *e;
325 	struct hlist_node *tmp;
326 	int i, r;
327 
328 	hash_for_each_safe(sync->fences, i, tmp, e, node) {
329 		r = fence_wait(e->fence, false);
330 		if (r)
331 			return r;
332 
333 		hash_del(&e->node);
334 		fence_put(e->fence);
335 		kmem_cache_free(amdgpu_sync_slab, e);
336 	}
337 
338 	return 0;
339 }
340 
341 /**
342  * amdgpu_sync_free - free the sync object
343  *
344  * @sync: sync object to use
345  *
346  * Free the sync object.
347  */
348 void amdgpu_sync_free(struct amdgpu_sync *sync)
349 {
350 	struct amdgpu_sync_entry *e;
351 	struct hlist_node *tmp;
352 	unsigned i;
353 
354 	hash_for_each_safe(sync->fences, i, tmp, e, node) {
355 		hash_del(&e->node);
356 		fence_put(e->fence);
357 		kmem_cache_free(amdgpu_sync_slab, e);
358 	}
359 
360 	fence_put(sync->last_vm_update);
361 }
362 
363 /**
364  * amdgpu_sync_init - init sync object subsystem
365  *
366  * Allocate the slab allocator.
367  */
368 int amdgpu_sync_init(void)
369 {
370 	amdgpu_sync_slab = kmem_cache_create(
371 		"amdgpu_sync", sizeof(struct amdgpu_sync_entry), 0,
372 		SLAB_HWCACHE_ALIGN, NULL);
373 	if (!amdgpu_sync_slab)
374 		return -ENOMEM;
375 
376 	return 0;
377 }
378 
379 /**
380  * amdgpu_sync_fini - fini sync object subsystem
381  *
382  * Free the slab allocator.
383  */
384 void amdgpu_sync_fini(void)
385 {
386 	kmem_cache_destroy(amdgpu_sync_slab);
387 }
388