xref: /linux/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c (revision b85d45947951d23cb22d90caecf4c1eb81342c96)
1 /*
2  * Copyright 2011 Red Hat Inc.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the
7  * "Software"), to deal in the Software without restriction, including
8  * without limitation the rights to use, copy, modify, merge, publish,
9  * distribute, sub license, and/or sell copies of the Software, and to
10  * permit persons to whom the Software is furnished to do so, subject to
11  * the following conditions:
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19  * USE OR OTHER DEALINGS IN THE SOFTWARE.
20  *
21  * The above copyright notice and this permission notice (including the
22  * next paragraph) shall be included in all copies or substantial portions
23  * of the Software.
24  *
25  */
26 /*
27  * Authors:
28  *    Jerome Glisse <glisse@freedesktop.org>
29  */
30 /* Algorithm:
31  *
32  * We store the last allocated bo in "hole", we always try to allocate
33  * after the last allocated bo. Principle is that in a linear GPU ring
34  * progression was is after last is the oldest bo we allocated and thus
35  * the first one that should no longer be in use by the GPU.
36  *
37  * If it's not the case we skip over the bo after last to the closest
38  * done bo if such one exist. If none exist and we are not asked to
39  * block we report failure to allocate.
40  *
41  * If we are asked to block we wait on all the oldest fence of all
42  * rings. We just wait for any of those fence to complete.
43  */
44 #include <drm/drmP.h>
45 #include "amdgpu.h"
46 
47 static void amdgpu_sa_bo_remove_locked(struct amdgpu_sa_bo *sa_bo);
48 static void amdgpu_sa_bo_try_free(struct amdgpu_sa_manager *sa_manager);
49 
50 int amdgpu_sa_bo_manager_init(struct amdgpu_device *adev,
51 			      struct amdgpu_sa_manager *sa_manager,
52 			      unsigned size, u32 align, u32 domain)
53 {
54 	int i, r;
55 
56 	init_waitqueue_head(&sa_manager->wq);
57 	sa_manager->bo = NULL;
58 	sa_manager->size = size;
59 	sa_manager->domain = domain;
60 	sa_manager->align = align;
61 	sa_manager->hole = &sa_manager->olist;
62 	INIT_LIST_HEAD(&sa_manager->olist);
63 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
64 		INIT_LIST_HEAD(&sa_manager->flist[i]);
65 	}
66 
67 	r = amdgpu_bo_create(adev, size, align, true, domain,
68 			     0, NULL, NULL, &sa_manager->bo);
69 	if (r) {
70 		dev_err(adev->dev, "(%d) failed to allocate bo for manager\n", r);
71 		return r;
72 	}
73 
74 	return r;
75 }
76 
77 void amdgpu_sa_bo_manager_fini(struct amdgpu_device *adev,
78 			       struct amdgpu_sa_manager *sa_manager)
79 {
80 	struct amdgpu_sa_bo *sa_bo, *tmp;
81 
82 	if (!list_empty(&sa_manager->olist)) {
83 		sa_manager->hole = &sa_manager->olist,
84 		amdgpu_sa_bo_try_free(sa_manager);
85 		if (!list_empty(&sa_manager->olist)) {
86 			dev_err(adev->dev, "sa_manager is not empty, clearing anyway\n");
87 		}
88 	}
89 	list_for_each_entry_safe(sa_bo, tmp, &sa_manager->olist, olist) {
90 		amdgpu_sa_bo_remove_locked(sa_bo);
91 	}
92 	amdgpu_bo_unref(&sa_manager->bo);
93 	sa_manager->size = 0;
94 }
95 
96 int amdgpu_sa_bo_manager_start(struct amdgpu_device *adev,
97 			       struct amdgpu_sa_manager *sa_manager)
98 {
99 	int r;
100 
101 	if (sa_manager->bo == NULL) {
102 		dev_err(adev->dev, "no bo for sa manager\n");
103 		return -EINVAL;
104 	}
105 
106 	/* map the buffer */
107 	r = amdgpu_bo_reserve(sa_manager->bo, false);
108 	if (r) {
109 		dev_err(adev->dev, "(%d) failed to reserve manager bo\n", r);
110 		return r;
111 	}
112 	r = amdgpu_bo_pin(sa_manager->bo, sa_manager->domain, &sa_manager->gpu_addr);
113 	if (r) {
114 		amdgpu_bo_unreserve(sa_manager->bo);
115 		dev_err(adev->dev, "(%d) failed to pin manager bo\n", r);
116 		return r;
117 	}
118 	r = amdgpu_bo_kmap(sa_manager->bo, &sa_manager->cpu_ptr);
119 	amdgpu_bo_unreserve(sa_manager->bo);
120 	return r;
121 }
122 
123 int amdgpu_sa_bo_manager_suspend(struct amdgpu_device *adev,
124 				 struct amdgpu_sa_manager *sa_manager)
125 {
126 	int r;
127 
128 	if (sa_manager->bo == NULL) {
129 		dev_err(adev->dev, "no bo for sa manager\n");
130 		return -EINVAL;
131 	}
132 
133 	r = amdgpu_bo_reserve(sa_manager->bo, false);
134 	if (!r) {
135 		amdgpu_bo_kunmap(sa_manager->bo);
136 		amdgpu_bo_unpin(sa_manager->bo);
137 		amdgpu_bo_unreserve(sa_manager->bo);
138 	}
139 	return r;
140 }
141 
142 static uint32_t amdgpu_sa_get_ring_from_fence(struct fence *f)
143 {
144 	struct amdgpu_fence *a_fence;
145 	struct amd_sched_fence *s_fence;
146 
147 	s_fence = to_amd_sched_fence(f);
148 	if (s_fence) {
149 		struct amdgpu_ring *ring;
150 
151 		ring = container_of(s_fence->sched, struct amdgpu_ring, sched);
152 		return ring->idx;
153 	}
154 
155 	a_fence = to_amdgpu_fence(f);
156 	if (a_fence)
157 		return a_fence->ring->idx;
158 	return 0;
159 }
160 
161 static void amdgpu_sa_bo_remove_locked(struct amdgpu_sa_bo *sa_bo)
162 {
163 	struct amdgpu_sa_manager *sa_manager = sa_bo->manager;
164 	if (sa_manager->hole == &sa_bo->olist) {
165 		sa_manager->hole = sa_bo->olist.prev;
166 	}
167 	list_del_init(&sa_bo->olist);
168 	list_del_init(&sa_bo->flist);
169 	fence_put(sa_bo->fence);
170 	kfree(sa_bo);
171 }
172 
173 static void amdgpu_sa_bo_try_free(struct amdgpu_sa_manager *sa_manager)
174 {
175 	struct amdgpu_sa_bo *sa_bo, *tmp;
176 
177 	if (sa_manager->hole->next == &sa_manager->olist)
178 		return;
179 
180 	sa_bo = list_entry(sa_manager->hole->next, struct amdgpu_sa_bo, olist);
181 	list_for_each_entry_safe_from(sa_bo, tmp, &sa_manager->olist, olist) {
182 		if (sa_bo->fence == NULL ||
183 		    !fence_is_signaled(sa_bo->fence)) {
184 			return;
185 		}
186 		amdgpu_sa_bo_remove_locked(sa_bo);
187 	}
188 }
189 
190 static inline unsigned amdgpu_sa_bo_hole_soffset(struct amdgpu_sa_manager *sa_manager)
191 {
192 	struct list_head *hole = sa_manager->hole;
193 
194 	if (hole != &sa_manager->olist) {
195 		return list_entry(hole, struct amdgpu_sa_bo, olist)->eoffset;
196 	}
197 	return 0;
198 }
199 
200 static inline unsigned amdgpu_sa_bo_hole_eoffset(struct amdgpu_sa_manager *sa_manager)
201 {
202 	struct list_head *hole = sa_manager->hole;
203 
204 	if (hole->next != &sa_manager->olist) {
205 		return list_entry(hole->next, struct amdgpu_sa_bo, olist)->soffset;
206 	}
207 	return sa_manager->size;
208 }
209 
210 static bool amdgpu_sa_bo_try_alloc(struct amdgpu_sa_manager *sa_manager,
211 				   struct amdgpu_sa_bo *sa_bo,
212 				   unsigned size, unsigned align)
213 {
214 	unsigned soffset, eoffset, wasted;
215 
216 	soffset = amdgpu_sa_bo_hole_soffset(sa_manager);
217 	eoffset = amdgpu_sa_bo_hole_eoffset(sa_manager);
218 	wasted = (align - (soffset % align)) % align;
219 
220 	if ((eoffset - soffset) >= (size + wasted)) {
221 		soffset += wasted;
222 
223 		sa_bo->manager = sa_manager;
224 		sa_bo->soffset = soffset;
225 		sa_bo->eoffset = soffset + size;
226 		list_add(&sa_bo->olist, sa_manager->hole);
227 		INIT_LIST_HEAD(&sa_bo->flist);
228 		sa_manager->hole = &sa_bo->olist;
229 		return true;
230 	}
231 	return false;
232 }
233 
234 /**
235  * amdgpu_sa_event - Check if we can stop waiting
236  *
237  * @sa_manager: pointer to the sa_manager
238  * @size: number of bytes we want to allocate
239  * @align: alignment we need to match
240  *
241  * Check if either there is a fence we can wait for or
242  * enough free memory to satisfy the allocation directly
243  */
244 static bool amdgpu_sa_event(struct amdgpu_sa_manager *sa_manager,
245 			    unsigned size, unsigned align)
246 {
247 	unsigned soffset, eoffset, wasted;
248 	int i;
249 
250 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
251 		if (!list_empty(&sa_manager->flist[i])) {
252 			return true;
253 		}
254 	}
255 
256 	soffset = amdgpu_sa_bo_hole_soffset(sa_manager);
257 	eoffset = amdgpu_sa_bo_hole_eoffset(sa_manager);
258 	wasted = (align - (soffset % align)) % align;
259 
260 	if ((eoffset - soffset) >= (size + wasted)) {
261 		return true;
262 	}
263 
264 	return false;
265 }
266 
267 static bool amdgpu_sa_bo_next_hole(struct amdgpu_sa_manager *sa_manager,
268 				   struct fence **fences,
269 				   unsigned *tries)
270 {
271 	struct amdgpu_sa_bo *best_bo = NULL;
272 	unsigned i, soffset, best, tmp;
273 
274 	/* if hole points to the end of the buffer */
275 	if (sa_manager->hole->next == &sa_manager->olist) {
276 		/* try again with its beginning */
277 		sa_manager->hole = &sa_manager->olist;
278 		return true;
279 	}
280 
281 	soffset = amdgpu_sa_bo_hole_soffset(sa_manager);
282 	/* to handle wrap around we add sa_manager->size */
283 	best = sa_manager->size * 2;
284 	/* go over all fence list and try to find the closest sa_bo
285 	 * of the current last
286 	 */
287 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
288 		struct amdgpu_sa_bo *sa_bo;
289 
290 		if (list_empty(&sa_manager->flist[i])) {
291 			continue;
292 		}
293 
294 		sa_bo = list_first_entry(&sa_manager->flist[i],
295 					 struct amdgpu_sa_bo, flist);
296 
297 		if (!fence_is_signaled(sa_bo->fence)) {
298 			fences[i] = sa_bo->fence;
299 			continue;
300 		}
301 
302 		/* limit the number of tries each ring gets */
303 		if (tries[i] > 2) {
304 			continue;
305 		}
306 
307 		tmp = sa_bo->soffset;
308 		if (tmp < soffset) {
309 			/* wrap around, pretend it's after */
310 			tmp += sa_manager->size;
311 		}
312 		tmp -= soffset;
313 		if (tmp < best) {
314 			/* this sa bo is the closest one */
315 			best = tmp;
316 			best_bo = sa_bo;
317 		}
318 	}
319 
320 	if (best_bo) {
321 		uint32_t idx = amdgpu_sa_get_ring_from_fence(best_bo->fence);
322 		++tries[idx];
323 		sa_manager->hole = best_bo->olist.prev;
324 
325 		/* we knew that this one is signaled,
326 		   so it's save to remote it */
327 		amdgpu_sa_bo_remove_locked(best_bo);
328 		return true;
329 	}
330 	return false;
331 }
332 
333 int amdgpu_sa_bo_new(struct amdgpu_device *adev,
334 		     struct amdgpu_sa_manager *sa_manager,
335 		     struct amdgpu_sa_bo **sa_bo,
336 		     unsigned size, unsigned align)
337 {
338 	struct fence *fences[AMDGPU_MAX_RINGS];
339 	unsigned tries[AMDGPU_MAX_RINGS];
340 	int i, r;
341 	signed long t;
342 
343 	BUG_ON(align > sa_manager->align);
344 	BUG_ON(size > sa_manager->size);
345 
346 	*sa_bo = kmalloc(sizeof(struct amdgpu_sa_bo), GFP_KERNEL);
347 	if ((*sa_bo) == NULL) {
348 		return -ENOMEM;
349 	}
350 	(*sa_bo)->manager = sa_manager;
351 	(*sa_bo)->fence = NULL;
352 	INIT_LIST_HEAD(&(*sa_bo)->olist);
353 	INIT_LIST_HEAD(&(*sa_bo)->flist);
354 
355 	spin_lock(&sa_manager->wq.lock);
356 	do {
357 		for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
358 			fences[i] = NULL;
359 			tries[i] = 0;
360 		}
361 
362 		do {
363 			amdgpu_sa_bo_try_free(sa_manager);
364 
365 			if (amdgpu_sa_bo_try_alloc(sa_manager, *sa_bo,
366 						   size, align)) {
367 				spin_unlock(&sa_manager->wq.lock);
368 				return 0;
369 			}
370 
371 			/* see if we can skip over some allocations */
372 		} while (amdgpu_sa_bo_next_hole(sa_manager, fences, tries));
373 
374 		spin_unlock(&sa_manager->wq.lock);
375 		t = amdgpu_fence_wait_any(adev, fences, AMDGPU_MAX_RINGS,
376 					  false, MAX_SCHEDULE_TIMEOUT);
377 		r = (t > 0) ? 0 : t;
378 		spin_lock(&sa_manager->wq.lock);
379 		/* if we have nothing to wait for block */
380 		if (r == -ENOENT) {
381 			r = wait_event_interruptible_locked(
382 				sa_manager->wq,
383 				amdgpu_sa_event(sa_manager, size, align)
384 			);
385 		}
386 
387 	} while (!r);
388 
389 	spin_unlock(&sa_manager->wq.lock);
390 	kfree(*sa_bo);
391 	*sa_bo = NULL;
392 	return r;
393 }
394 
395 void amdgpu_sa_bo_free(struct amdgpu_device *adev, struct amdgpu_sa_bo **sa_bo,
396 		       struct fence *fence)
397 {
398 	struct amdgpu_sa_manager *sa_manager;
399 
400 	if (sa_bo == NULL || *sa_bo == NULL) {
401 		return;
402 	}
403 
404 	sa_manager = (*sa_bo)->manager;
405 	spin_lock(&sa_manager->wq.lock);
406 	if (fence && !fence_is_signaled(fence)) {
407 		uint32_t idx;
408 		(*sa_bo)->fence = fence_get(fence);
409 		idx = amdgpu_sa_get_ring_from_fence(fence);
410 		list_add_tail(&(*sa_bo)->flist, &sa_manager->flist[idx]);
411 	} else {
412 		amdgpu_sa_bo_remove_locked(*sa_bo);
413 	}
414 	wake_up_all_locked(&sa_manager->wq);
415 	spin_unlock(&sa_manager->wq.lock);
416 	*sa_bo = NULL;
417 }
418 
419 #if defined(CONFIG_DEBUG_FS)
420 
421 static void amdgpu_sa_bo_dump_fence(struct fence *fence, struct seq_file *m)
422 {
423 	struct amdgpu_fence *a_fence = to_amdgpu_fence(fence);
424 	struct amd_sched_fence *s_fence = to_amd_sched_fence(fence);
425 
426 	if (a_fence)
427 		seq_printf(m, " protected by 0x%016llx on ring %d",
428 			   a_fence->seq, a_fence->ring->idx);
429 
430 	if (s_fence) {
431 		struct amdgpu_ring *ring;
432 
433 
434 		ring = container_of(s_fence->sched, struct amdgpu_ring, sched);
435 		seq_printf(m, " protected by 0x%016x on ring %d",
436 			   s_fence->base.seqno, ring->idx);
437 	}
438 }
439 
440 void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager,
441 				  struct seq_file *m)
442 {
443 	struct amdgpu_sa_bo *i;
444 
445 	spin_lock(&sa_manager->wq.lock);
446 	list_for_each_entry(i, &sa_manager->olist, olist) {
447 		uint64_t soffset = i->soffset + sa_manager->gpu_addr;
448 		uint64_t eoffset = i->eoffset + sa_manager->gpu_addr;
449 		if (&i->olist == sa_manager->hole) {
450 			seq_printf(m, ">");
451 		} else {
452 			seq_printf(m, " ");
453 		}
454 		seq_printf(m, "[0x%010llx 0x%010llx] size %8lld",
455 			   soffset, eoffset, eoffset - soffset);
456 		if (i->fence)
457 			amdgpu_sa_bo_dump_fence(i->fence, m);
458 		seq_printf(m, "\n");
459 	}
460 	spin_unlock(&sa_manager->wq.lock);
461 }
462 #endif
463