xref: /linux/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c (revision fdf2f6c56e5e289c7d7e726b676aba25643b39a0)
1d38ceaf9SAlex Deucher /*
2d38ceaf9SAlex Deucher  * Copyright 2011 Red Hat Inc.
3d38ceaf9SAlex Deucher  * All Rights Reserved.
4d38ceaf9SAlex Deucher  *
5d38ceaf9SAlex Deucher  * Permission is hereby granted, free of charge, to any person obtaining a
6d38ceaf9SAlex Deucher  * copy of this software and associated documentation files (the
7d38ceaf9SAlex Deucher  * "Software"), to deal in the Software without restriction, including
8d38ceaf9SAlex Deucher  * without limitation the rights to use, copy, modify, merge, publish,
9d38ceaf9SAlex Deucher  * distribute, sub license, and/or sell copies of the Software, and to
10d38ceaf9SAlex Deucher  * permit persons to whom the Software is furnished to do so, subject to
11d38ceaf9SAlex Deucher  * the following conditions:
12d38ceaf9SAlex Deucher  *
13d38ceaf9SAlex Deucher  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14d38ceaf9SAlex Deucher  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15d38ceaf9SAlex Deucher  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16d38ceaf9SAlex Deucher  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17d38ceaf9SAlex Deucher  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18d38ceaf9SAlex Deucher  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19d38ceaf9SAlex Deucher  * USE OR OTHER DEALINGS IN THE SOFTWARE.
20d38ceaf9SAlex Deucher  *
21d38ceaf9SAlex Deucher  * The above copyright notice and this permission notice (including the
22d38ceaf9SAlex Deucher  * next paragraph) shall be included in all copies or substantial portions
23d38ceaf9SAlex Deucher  * of the Software.
24d38ceaf9SAlex Deucher  *
25d38ceaf9SAlex Deucher  */
26d38ceaf9SAlex Deucher /*
27d38ceaf9SAlex Deucher  * Authors:
28d38ceaf9SAlex Deucher  *    Jerome Glisse <glisse@freedesktop.org>
29d38ceaf9SAlex Deucher  */
30d38ceaf9SAlex Deucher /* Algorithm:
31d38ceaf9SAlex Deucher  *
32d38ceaf9SAlex Deucher  * We store the last allocated bo in "hole", we always try to allocate
33d38ceaf9SAlex Deucher  * after the last allocated bo. Principle is that in a linear GPU ring
34d38ceaf9SAlex Deucher  * progression was is after last is the oldest bo we allocated and thus
35d38ceaf9SAlex Deucher  * the first one that should no longer be in use by the GPU.
36d38ceaf9SAlex Deucher  *
37d38ceaf9SAlex Deucher  * If it's not the case we skip over the bo after last to the closest
38d38ceaf9SAlex Deucher  * done bo if such one exist. If none exist and we are not asked to
39d38ceaf9SAlex Deucher  * block we report failure to allocate.
40d38ceaf9SAlex Deucher  *
41d38ceaf9SAlex Deucher  * If we are asked to block we wait on all the oldest fence of all
42d38ceaf9SAlex Deucher  * rings. We just wait for any of those fence to complete.
43d38ceaf9SAlex Deucher  */
44*fdf2f6c5SSam Ravnborg 
45d38ceaf9SAlex Deucher #include "amdgpu.h"
46d38ceaf9SAlex Deucher 
47d38ceaf9SAlex Deucher static void amdgpu_sa_bo_remove_locked(struct amdgpu_sa_bo *sa_bo);
48d38ceaf9SAlex Deucher static void amdgpu_sa_bo_try_free(struct amdgpu_sa_manager *sa_manager);
49d38ceaf9SAlex Deucher 
50d38ceaf9SAlex Deucher int amdgpu_sa_bo_manager_init(struct amdgpu_device *adev,
51d38ceaf9SAlex Deucher 			      struct amdgpu_sa_manager *sa_manager,
52d38ceaf9SAlex Deucher 			      unsigned size, u32 align, u32 domain)
53d38ceaf9SAlex Deucher {
54d38ceaf9SAlex Deucher 	int i, r;
55d38ceaf9SAlex Deucher 
56d38ceaf9SAlex Deucher 	init_waitqueue_head(&sa_manager->wq);
57d38ceaf9SAlex Deucher 	sa_manager->bo = NULL;
58d38ceaf9SAlex Deucher 	sa_manager->size = size;
59d38ceaf9SAlex Deucher 	sa_manager->domain = domain;
60d38ceaf9SAlex Deucher 	sa_manager->align = align;
61d38ceaf9SAlex Deucher 	sa_manager->hole = &sa_manager->olist;
62d38ceaf9SAlex Deucher 	INIT_LIST_HEAD(&sa_manager->olist);
636ba60b89SChristian König 	for (i = 0; i < AMDGPU_SA_NUM_FENCE_LISTS; ++i)
64d38ceaf9SAlex Deucher 		INIT_LIST_HEAD(&sa_manager->flist[i]);
65d38ceaf9SAlex Deucher 
66bffe07b8SMonk Liu 	r = amdgpu_bo_create_kernel(adev, size, align, domain, &sa_manager->bo,
67bffe07b8SMonk Liu 				&sa_manager->gpu_addr, &sa_manager->cpu_ptr);
68d38ceaf9SAlex Deucher 	if (r) {
69d38ceaf9SAlex Deucher 		dev_err(adev->dev, "(%d) failed to allocate bo for manager\n", r);
70d38ceaf9SAlex Deucher 		return r;
71d38ceaf9SAlex Deucher 	}
72d38ceaf9SAlex Deucher 
73bffe07b8SMonk Liu 	memset(sa_manager->cpu_ptr, 0, sa_manager->size);
74d38ceaf9SAlex Deucher 	return r;
75d38ceaf9SAlex Deucher }
76d38ceaf9SAlex Deucher 
77d38ceaf9SAlex Deucher void amdgpu_sa_bo_manager_fini(struct amdgpu_device *adev,
78d38ceaf9SAlex Deucher                               struct amdgpu_sa_manager *sa_manager)
79d38ceaf9SAlex Deucher {
80d38ceaf9SAlex Deucher 	struct amdgpu_sa_bo *sa_bo, *tmp;
81d38ceaf9SAlex Deucher 
82bffe07b8SMonk Liu 	if (sa_manager->bo == NULL) {
83bffe07b8SMonk Liu 		dev_err(adev->dev, "no bo for sa manager\n");
84bffe07b8SMonk Liu 		return;
85bffe07b8SMonk Liu 	}
86bffe07b8SMonk Liu 
87d38ceaf9SAlex Deucher 	if (!list_empty(&sa_manager->olist)) {
88d38ceaf9SAlex Deucher 		sa_manager->hole = &sa_manager->olist,
89d38ceaf9SAlex Deucher 		amdgpu_sa_bo_try_free(sa_manager);
90d38ceaf9SAlex Deucher 		if (!list_empty(&sa_manager->olist)) {
91d38ceaf9SAlex Deucher 			dev_err(adev->dev, "sa_manager is not empty, clearing anyway\n");
92d38ceaf9SAlex Deucher 		}
93d38ceaf9SAlex Deucher 	}
94d38ceaf9SAlex Deucher 	list_for_each_entry_safe(sa_bo, tmp, &sa_manager->olist, olist) {
95d38ceaf9SAlex Deucher 		amdgpu_sa_bo_remove_locked(sa_bo);
96d38ceaf9SAlex Deucher 	}
97bffe07b8SMonk Liu 
98bffe07b8SMonk Liu 	amdgpu_bo_free_kernel(&sa_manager->bo, &sa_manager->gpu_addr, &sa_manager->cpu_ptr);
99d38ceaf9SAlex Deucher 	sa_manager->size = 0;
100d38ceaf9SAlex Deucher }
101d38ceaf9SAlex Deucher 
102d38ceaf9SAlex Deucher static void amdgpu_sa_bo_remove_locked(struct amdgpu_sa_bo *sa_bo)
103d38ceaf9SAlex Deucher {
104d38ceaf9SAlex Deucher 	struct amdgpu_sa_manager *sa_manager = sa_bo->manager;
105d38ceaf9SAlex Deucher 	if (sa_manager->hole == &sa_bo->olist) {
106d38ceaf9SAlex Deucher 		sa_manager->hole = sa_bo->olist.prev;
107d38ceaf9SAlex Deucher 	}
108d38ceaf9SAlex Deucher 	list_del_init(&sa_bo->olist);
109d38ceaf9SAlex Deucher 	list_del_init(&sa_bo->flist);
110f54d1867SChris Wilson 	dma_fence_put(sa_bo->fence);
111d38ceaf9SAlex Deucher 	kfree(sa_bo);
112d38ceaf9SAlex Deucher }
113d38ceaf9SAlex Deucher 
114d38ceaf9SAlex Deucher static void amdgpu_sa_bo_try_free(struct amdgpu_sa_manager *sa_manager)
115d38ceaf9SAlex Deucher {
116d38ceaf9SAlex Deucher 	struct amdgpu_sa_bo *sa_bo, *tmp;
117d38ceaf9SAlex Deucher 
118d38ceaf9SAlex Deucher 	if (sa_manager->hole->next == &sa_manager->olist)
119d38ceaf9SAlex Deucher 		return;
120d38ceaf9SAlex Deucher 
121d38ceaf9SAlex Deucher 	sa_bo = list_entry(sa_manager->hole->next, struct amdgpu_sa_bo, olist);
122d38ceaf9SAlex Deucher 	list_for_each_entry_safe_from(sa_bo, tmp, &sa_manager->olist, olist) {
1233cdb8119SChristian König 		if (sa_bo->fence == NULL ||
124f54d1867SChris Wilson 		    !dma_fence_is_signaled(sa_bo->fence)) {
125d38ceaf9SAlex Deucher 			return;
126d38ceaf9SAlex Deucher 		}
127d38ceaf9SAlex Deucher 		amdgpu_sa_bo_remove_locked(sa_bo);
128d38ceaf9SAlex Deucher 	}
129d38ceaf9SAlex Deucher }
130d38ceaf9SAlex Deucher 
131d38ceaf9SAlex Deucher static inline unsigned amdgpu_sa_bo_hole_soffset(struct amdgpu_sa_manager *sa_manager)
132d38ceaf9SAlex Deucher {
133d38ceaf9SAlex Deucher 	struct list_head *hole = sa_manager->hole;
134d38ceaf9SAlex Deucher 
135d38ceaf9SAlex Deucher 	if (hole != &sa_manager->olist) {
136d38ceaf9SAlex Deucher 		return list_entry(hole, struct amdgpu_sa_bo, olist)->eoffset;
137d38ceaf9SAlex Deucher 	}
138d38ceaf9SAlex Deucher 	return 0;
139d38ceaf9SAlex Deucher }
140d38ceaf9SAlex Deucher 
141d38ceaf9SAlex Deucher static inline unsigned amdgpu_sa_bo_hole_eoffset(struct amdgpu_sa_manager *sa_manager)
142d38ceaf9SAlex Deucher {
143d38ceaf9SAlex Deucher 	struct list_head *hole = sa_manager->hole;
144d38ceaf9SAlex Deucher 
145d38ceaf9SAlex Deucher 	if (hole->next != &sa_manager->olist) {
146d38ceaf9SAlex Deucher 		return list_entry(hole->next, struct amdgpu_sa_bo, olist)->soffset;
147d38ceaf9SAlex Deucher 	}
148d38ceaf9SAlex Deucher 	return sa_manager->size;
149d38ceaf9SAlex Deucher }
150d38ceaf9SAlex Deucher 
151d38ceaf9SAlex Deucher static bool amdgpu_sa_bo_try_alloc(struct amdgpu_sa_manager *sa_manager,
152d38ceaf9SAlex Deucher 				   struct amdgpu_sa_bo *sa_bo,
153d38ceaf9SAlex Deucher 				   unsigned size, unsigned align)
154d38ceaf9SAlex Deucher {
155d38ceaf9SAlex Deucher 	unsigned soffset, eoffset, wasted;
156d38ceaf9SAlex Deucher 
157d38ceaf9SAlex Deucher 	soffset = amdgpu_sa_bo_hole_soffset(sa_manager);
158d38ceaf9SAlex Deucher 	eoffset = amdgpu_sa_bo_hole_eoffset(sa_manager);
159d38ceaf9SAlex Deucher 	wasted = (align - (soffset % align)) % align;
160d38ceaf9SAlex Deucher 
161d38ceaf9SAlex Deucher 	if ((eoffset - soffset) >= (size + wasted)) {
162d38ceaf9SAlex Deucher 		soffset += wasted;
163d38ceaf9SAlex Deucher 
164d38ceaf9SAlex Deucher 		sa_bo->manager = sa_manager;
165d38ceaf9SAlex Deucher 		sa_bo->soffset = soffset;
166d38ceaf9SAlex Deucher 		sa_bo->eoffset = soffset + size;
167d38ceaf9SAlex Deucher 		list_add(&sa_bo->olist, sa_manager->hole);
168d38ceaf9SAlex Deucher 		INIT_LIST_HEAD(&sa_bo->flist);
169d38ceaf9SAlex Deucher 		sa_manager->hole = &sa_bo->olist;
170d38ceaf9SAlex Deucher 		return true;
171d38ceaf9SAlex Deucher 	}
172d38ceaf9SAlex Deucher 	return false;
173d38ceaf9SAlex Deucher }
174d38ceaf9SAlex Deucher 
175d38ceaf9SAlex Deucher /**
176d38ceaf9SAlex Deucher  * amdgpu_sa_event - Check if we can stop waiting
177d38ceaf9SAlex Deucher  *
178d38ceaf9SAlex Deucher  * @sa_manager: pointer to the sa_manager
179d38ceaf9SAlex Deucher  * @size: number of bytes we want to allocate
180d38ceaf9SAlex Deucher  * @align: alignment we need to match
181d38ceaf9SAlex Deucher  *
182d38ceaf9SAlex Deucher  * Check if either there is a fence we can wait for or
183d38ceaf9SAlex Deucher  * enough free memory to satisfy the allocation directly
184d38ceaf9SAlex Deucher  */
185d38ceaf9SAlex Deucher static bool amdgpu_sa_event(struct amdgpu_sa_manager *sa_manager,
186d38ceaf9SAlex Deucher 			    unsigned size, unsigned align)
187d38ceaf9SAlex Deucher {
188d38ceaf9SAlex Deucher 	unsigned soffset, eoffset, wasted;
189d38ceaf9SAlex Deucher 	int i;
190d38ceaf9SAlex Deucher 
1916ba60b89SChristian König 	for (i = 0; i < AMDGPU_SA_NUM_FENCE_LISTS; ++i)
1926ba60b89SChristian König 		if (!list_empty(&sa_manager->flist[i]))
193d38ceaf9SAlex Deucher 			return true;
194d38ceaf9SAlex Deucher 
195d38ceaf9SAlex Deucher 	soffset = amdgpu_sa_bo_hole_soffset(sa_manager);
196d38ceaf9SAlex Deucher 	eoffset = amdgpu_sa_bo_hole_eoffset(sa_manager);
197d38ceaf9SAlex Deucher 	wasted = (align - (soffset % align)) % align;
198d38ceaf9SAlex Deucher 
199d38ceaf9SAlex Deucher 	if ((eoffset - soffset) >= (size + wasted)) {
200d38ceaf9SAlex Deucher 		return true;
201d38ceaf9SAlex Deucher 	}
202d38ceaf9SAlex Deucher 
203d38ceaf9SAlex Deucher 	return false;
204d38ceaf9SAlex Deucher }
205d38ceaf9SAlex Deucher 
206d38ceaf9SAlex Deucher static bool amdgpu_sa_bo_next_hole(struct amdgpu_sa_manager *sa_manager,
207f54d1867SChris Wilson 				   struct dma_fence **fences,
208d38ceaf9SAlex Deucher 				   unsigned *tries)
209d38ceaf9SAlex Deucher {
210d38ceaf9SAlex Deucher 	struct amdgpu_sa_bo *best_bo = NULL;
211d38ceaf9SAlex Deucher 	unsigned i, soffset, best, tmp;
212d38ceaf9SAlex Deucher 
213d38ceaf9SAlex Deucher 	/* if hole points to the end of the buffer */
214d38ceaf9SAlex Deucher 	if (sa_manager->hole->next == &sa_manager->olist) {
215d38ceaf9SAlex Deucher 		/* try again with its beginning */
216d38ceaf9SAlex Deucher 		sa_manager->hole = &sa_manager->olist;
217d38ceaf9SAlex Deucher 		return true;
218d38ceaf9SAlex Deucher 	}
219d38ceaf9SAlex Deucher 
220d38ceaf9SAlex Deucher 	soffset = amdgpu_sa_bo_hole_soffset(sa_manager);
221d38ceaf9SAlex Deucher 	/* to handle wrap around we add sa_manager->size */
222d38ceaf9SAlex Deucher 	best = sa_manager->size * 2;
223d38ceaf9SAlex Deucher 	/* go over all fence list and try to find the closest sa_bo
224d38ceaf9SAlex Deucher 	 * of the current last
225d38ceaf9SAlex Deucher 	 */
2266ba60b89SChristian König 	for (i = 0; i < AMDGPU_SA_NUM_FENCE_LISTS; ++i) {
227d38ceaf9SAlex Deucher 		struct amdgpu_sa_bo *sa_bo;
228d38ceaf9SAlex Deucher 
2296a15f3ffSMichel Dänzer 		fences[i] = NULL;
2306a15f3ffSMichel Dänzer 
2316ba60b89SChristian König 		if (list_empty(&sa_manager->flist[i]))
232d38ceaf9SAlex Deucher 			continue;
233d38ceaf9SAlex Deucher 
234d38ceaf9SAlex Deucher 		sa_bo = list_first_entry(&sa_manager->flist[i],
235d38ceaf9SAlex Deucher 					 struct amdgpu_sa_bo, flist);
236d38ceaf9SAlex Deucher 
237f54d1867SChris Wilson 		if (!dma_fence_is_signaled(sa_bo->fence)) {
238d38ceaf9SAlex Deucher 			fences[i] = sa_bo->fence;
239d38ceaf9SAlex Deucher 			continue;
240d38ceaf9SAlex Deucher 		}
241d38ceaf9SAlex Deucher 
242d38ceaf9SAlex Deucher 		/* limit the number of tries each ring gets */
243d38ceaf9SAlex Deucher 		if (tries[i] > 2) {
244d38ceaf9SAlex Deucher 			continue;
245d38ceaf9SAlex Deucher 		}
246d38ceaf9SAlex Deucher 
247d38ceaf9SAlex Deucher 		tmp = sa_bo->soffset;
248d38ceaf9SAlex Deucher 		if (tmp < soffset) {
249d38ceaf9SAlex Deucher 			/* wrap around, pretend it's after */
250d38ceaf9SAlex Deucher 			tmp += sa_manager->size;
251d38ceaf9SAlex Deucher 		}
252d38ceaf9SAlex Deucher 		tmp -= soffset;
253d38ceaf9SAlex Deucher 		if (tmp < best) {
254d38ceaf9SAlex Deucher 			/* this sa bo is the closest one */
255d38ceaf9SAlex Deucher 			best = tmp;
256d38ceaf9SAlex Deucher 			best_bo = sa_bo;
257d38ceaf9SAlex Deucher 		}
258d38ceaf9SAlex Deucher 	}
259d38ceaf9SAlex Deucher 
260d38ceaf9SAlex Deucher 	if (best_bo) {
2616ba60b89SChristian König 		uint32_t idx = best_bo->fence->context;
2626ba60b89SChristian König 
2636ba60b89SChristian König 		idx %= AMDGPU_SA_NUM_FENCE_LISTS;
2644ce9891eSChunming Zhou 		++tries[idx];
265d38ceaf9SAlex Deucher 		sa_manager->hole = best_bo->olist.prev;
266d38ceaf9SAlex Deucher 
267d38ceaf9SAlex Deucher 		/* we knew that this one is signaled,
268d38ceaf9SAlex Deucher 		   so it's save to remote it */
269d38ceaf9SAlex Deucher 		amdgpu_sa_bo_remove_locked(best_bo);
270d38ceaf9SAlex Deucher 		return true;
271d38ceaf9SAlex Deucher 	}
272d38ceaf9SAlex Deucher 	return false;
273d38ceaf9SAlex Deucher }
274d38ceaf9SAlex Deucher 
275bbf0b345SJunwei Zhang int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager,
276d38ceaf9SAlex Deucher 		     struct amdgpu_sa_bo **sa_bo,
277d38ceaf9SAlex Deucher 		     unsigned size, unsigned align)
278d38ceaf9SAlex Deucher {
279f54d1867SChris Wilson 	struct dma_fence *fences[AMDGPU_SA_NUM_FENCE_LISTS];
2806ba60b89SChristian König 	unsigned tries[AMDGPU_SA_NUM_FENCE_LISTS];
281ee327cafSChristian König 	unsigned count;
282d38ceaf9SAlex Deucher 	int i, r;
283a8f5bf0bSmonk.liu 	signed long t;
284d38ceaf9SAlex Deucher 
285fe6b2ad9SChristian König 	if (WARN_ON_ONCE(align > sa_manager->align))
286fe6b2ad9SChristian König 		return -EINVAL;
287fe6b2ad9SChristian König 
288fe6b2ad9SChristian König 	if (WARN_ON_ONCE(size > sa_manager->size))
289fe6b2ad9SChristian König 		return -EINVAL;
290d38ceaf9SAlex Deucher 
291d38ceaf9SAlex Deucher 	*sa_bo = kmalloc(sizeof(struct amdgpu_sa_bo), GFP_KERNEL);
2923f12325aSRavikant B Sharma 	if (!(*sa_bo))
293d38ceaf9SAlex Deucher 		return -ENOMEM;
294d38ceaf9SAlex Deucher 	(*sa_bo)->manager = sa_manager;
295d38ceaf9SAlex Deucher 	(*sa_bo)->fence = NULL;
296d38ceaf9SAlex Deucher 	INIT_LIST_HEAD(&(*sa_bo)->olist);
297d38ceaf9SAlex Deucher 	INIT_LIST_HEAD(&(*sa_bo)->flist);
298d38ceaf9SAlex Deucher 
299d38ceaf9SAlex Deucher 	spin_lock(&sa_manager->wq.lock);
300d38ceaf9SAlex Deucher 	do {
3016a15f3ffSMichel Dänzer 		for (i = 0; i < AMDGPU_SA_NUM_FENCE_LISTS; ++i)
302d38ceaf9SAlex Deucher 			tries[i] = 0;
303d38ceaf9SAlex Deucher 
304d38ceaf9SAlex Deucher 		do {
305d38ceaf9SAlex Deucher 			amdgpu_sa_bo_try_free(sa_manager);
306d38ceaf9SAlex Deucher 
307d38ceaf9SAlex Deucher 			if (amdgpu_sa_bo_try_alloc(sa_manager, *sa_bo,
308d38ceaf9SAlex Deucher 						   size, align)) {
309d38ceaf9SAlex Deucher 				spin_unlock(&sa_manager->wq.lock);
310d38ceaf9SAlex Deucher 				return 0;
311d38ceaf9SAlex Deucher 			}
312d38ceaf9SAlex Deucher 
313d38ceaf9SAlex Deucher 			/* see if we can skip over some allocations */
314d38ceaf9SAlex Deucher 		} while (amdgpu_sa_bo_next_hole(sa_manager, fences, tries));
315d38ceaf9SAlex Deucher 
3166ba60b89SChristian König 		for (i = 0, count = 0; i < AMDGPU_SA_NUM_FENCE_LISTS; ++i)
317ee327cafSChristian König 			if (fences[i])
318f54d1867SChris Wilson 				fences[count++] = dma_fence_get(fences[i]);
319ee327cafSChristian König 
320ee327cafSChristian König 		if (count) {
321d38ceaf9SAlex Deucher 			spin_unlock(&sa_manager->wq.lock);
322f54d1867SChris Wilson 			t = dma_fence_wait_any_timeout(fences, count, false,
323eef18a82SJunwei Zhang 						       MAX_SCHEDULE_TIMEOUT,
324eef18a82SJunwei Zhang 						       NULL);
325a8d81b36SNicolai Hähnle 			for (i = 0; i < count; ++i)
326f54d1867SChris Wilson 				dma_fence_put(fences[i]);
327a8d81b36SNicolai Hähnle 
328a8f5bf0bSmonk.liu 			r = (t > 0) ? 0 : t;
329d38ceaf9SAlex Deucher 			spin_lock(&sa_manager->wq.lock);
330ee327cafSChristian König 		} else {
331d38ceaf9SAlex Deucher 			/* if we have nothing to wait for block */
332d38ceaf9SAlex Deucher 			r = wait_event_interruptible_locked(
333d38ceaf9SAlex Deucher 				sa_manager->wq,
334d38ceaf9SAlex Deucher 				amdgpu_sa_event(sa_manager, size, align)
335d38ceaf9SAlex Deucher 			);
336d38ceaf9SAlex Deucher 		}
337d38ceaf9SAlex Deucher 
338d38ceaf9SAlex Deucher 	} while (!r);
339d38ceaf9SAlex Deucher 
340d38ceaf9SAlex Deucher 	spin_unlock(&sa_manager->wq.lock);
341d38ceaf9SAlex Deucher 	kfree(*sa_bo);
342d38ceaf9SAlex Deucher 	*sa_bo = NULL;
343d38ceaf9SAlex Deucher 	return r;
344d38ceaf9SAlex Deucher }
345d38ceaf9SAlex Deucher 
346d38ceaf9SAlex Deucher void amdgpu_sa_bo_free(struct amdgpu_device *adev, struct amdgpu_sa_bo **sa_bo,
347f54d1867SChris Wilson 		       struct dma_fence *fence)
348d38ceaf9SAlex Deucher {
349d38ceaf9SAlex Deucher 	struct amdgpu_sa_manager *sa_manager;
350d38ceaf9SAlex Deucher 
351d38ceaf9SAlex Deucher 	if (sa_bo == NULL || *sa_bo == NULL) {
352d38ceaf9SAlex Deucher 		return;
353d38ceaf9SAlex Deucher 	}
354d38ceaf9SAlex Deucher 
355d38ceaf9SAlex Deucher 	sa_manager = (*sa_bo)->manager;
356d38ceaf9SAlex Deucher 	spin_lock(&sa_manager->wq.lock);
357f54d1867SChris Wilson 	if (fence && !dma_fence_is_signaled(fence)) {
3584ce9891eSChunming Zhou 		uint32_t idx;
3596ba60b89SChristian König 
360f54d1867SChris Wilson 		(*sa_bo)->fence = dma_fence_get(fence);
3616ba60b89SChristian König 		idx = fence->context % AMDGPU_SA_NUM_FENCE_LISTS;
3624ce9891eSChunming Zhou 		list_add_tail(&(*sa_bo)->flist, &sa_manager->flist[idx]);
363d38ceaf9SAlex Deucher 	} else {
364d38ceaf9SAlex Deucher 		amdgpu_sa_bo_remove_locked(*sa_bo);
365d38ceaf9SAlex Deucher 	}
366d38ceaf9SAlex Deucher 	wake_up_all_locked(&sa_manager->wq);
367d38ceaf9SAlex Deucher 	spin_unlock(&sa_manager->wq.lock);
368d38ceaf9SAlex Deucher 	*sa_bo = NULL;
369d38ceaf9SAlex Deucher }
370d38ceaf9SAlex Deucher 
371d38ceaf9SAlex Deucher #if defined(CONFIG_DEBUG_FS)
3724f839a24SChristian König 
373d38ceaf9SAlex Deucher void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager,
374d38ceaf9SAlex Deucher 				  struct seq_file *m)
375d38ceaf9SAlex Deucher {
376d38ceaf9SAlex Deucher 	struct amdgpu_sa_bo *i;
377d38ceaf9SAlex Deucher 
378d38ceaf9SAlex Deucher 	spin_lock(&sa_manager->wq.lock);
379d38ceaf9SAlex Deucher 	list_for_each_entry(i, &sa_manager->olist, olist) {
380d38ceaf9SAlex Deucher 		uint64_t soffset = i->soffset + sa_manager->gpu_addr;
381d38ceaf9SAlex Deucher 		uint64_t eoffset = i->eoffset + sa_manager->gpu_addr;
382d38ceaf9SAlex Deucher 		if (&i->olist == sa_manager->hole) {
383d38ceaf9SAlex Deucher 			seq_printf(m, ">");
384d38ceaf9SAlex Deucher 		} else {
385d38ceaf9SAlex Deucher 			seq_printf(m, " ");
386d38ceaf9SAlex Deucher 		}
387d38ceaf9SAlex Deucher 		seq_printf(m, "[0x%010llx 0x%010llx] size %8lld",
388d38ceaf9SAlex Deucher 			   soffset, eoffset, eoffset - soffset);
3896ba60b89SChristian König 
3904f839a24SChristian König 		if (i->fence)
391b312d8caSChristian König 			seq_printf(m, " protected by 0x%016llx on context %llu",
3926ba60b89SChristian König 				   i->fence->seqno, i->fence->context);
3936ba60b89SChristian König 
394d38ceaf9SAlex Deucher 		seq_printf(m, "\n");
395d38ceaf9SAlex Deucher 	}
396d38ceaf9SAlex Deucher 	spin_unlock(&sa_manager->wq.lock);
397d38ceaf9SAlex Deucher }
398d38ceaf9SAlex Deucher #endif
399