xref: /linux/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c (revision 90d32e92011eaae8e70a9169b4e7acf4ca8f9d3a)
1 /*
2  * Copyright 2016 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Christian König
23  */
24 
25 #include <linux/dma-mapping.h>
26 #include <drm/ttm/ttm_range_manager.h>
27 
28 #include "amdgpu.h"
29 #include "amdgpu_vm.h"
30 #include "amdgpu_res_cursor.h"
31 #include "amdgpu_atomfirmware.h"
32 #include "atom.h"
33 
34 struct amdgpu_vram_reservation {
35 	u64 start;
36 	u64 size;
37 	struct list_head allocated;
38 	struct list_head blocks;
39 };
40 
41 static inline struct amdgpu_vram_mgr *
42 to_vram_mgr(struct ttm_resource_manager *man)
43 {
44 	return container_of(man, struct amdgpu_vram_mgr, manager);
45 }
46 
47 static inline struct amdgpu_device *
48 to_amdgpu_device(struct amdgpu_vram_mgr *mgr)
49 {
50 	return container_of(mgr, struct amdgpu_device, mman.vram_mgr);
51 }
52 
53 static inline struct drm_buddy_block *
54 amdgpu_vram_mgr_first_block(struct list_head *list)
55 {
56 	return list_first_entry_or_null(list, struct drm_buddy_block, link);
57 }
58 
59 static inline bool amdgpu_is_vram_mgr_blocks_contiguous(struct list_head *head)
60 {
61 	struct drm_buddy_block *block;
62 	u64 start, size;
63 
64 	block = amdgpu_vram_mgr_first_block(head);
65 	if (!block)
66 		return false;
67 
68 	while (head != block->link.next) {
69 		start = amdgpu_vram_mgr_block_start(block);
70 		size = amdgpu_vram_mgr_block_size(block);
71 
72 		block = list_entry(block->link.next, struct drm_buddy_block, link);
73 		if (start + size != amdgpu_vram_mgr_block_start(block))
74 			return false;
75 	}
76 
77 	return true;
78 }
79 
80 static inline u64 amdgpu_vram_mgr_blocks_size(struct list_head *head)
81 {
82 	struct drm_buddy_block *block;
83 	u64 size = 0;
84 
85 	list_for_each_entry(block, head, link)
86 		size += amdgpu_vram_mgr_block_size(block);
87 
88 	return size;
89 }
90 
91 /**
92  * DOC: mem_info_vram_total
93  *
94  * The amdgpu driver provides a sysfs API for reporting current total VRAM
95  * available on the device
96  * The file mem_info_vram_total is used for this and returns the total
97  * amount of VRAM in bytes
98  */
99 static ssize_t amdgpu_mem_info_vram_total_show(struct device *dev,
100 		struct device_attribute *attr, char *buf)
101 {
102 	struct drm_device *ddev = dev_get_drvdata(dev);
103 	struct amdgpu_device *adev = drm_to_adev(ddev);
104 
105 	return sysfs_emit(buf, "%llu\n", adev->gmc.real_vram_size);
106 }
107 
108 /**
109  * DOC: mem_info_vis_vram_total
110  *
111  * The amdgpu driver provides a sysfs API for reporting current total
112  * visible VRAM available on the device
113  * The file mem_info_vis_vram_total is used for this and returns the total
114  * amount of visible VRAM in bytes
115  */
116 static ssize_t amdgpu_mem_info_vis_vram_total_show(struct device *dev,
117 		struct device_attribute *attr, char *buf)
118 {
119 	struct drm_device *ddev = dev_get_drvdata(dev);
120 	struct amdgpu_device *adev = drm_to_adev(ddev);
121 
122 	return sysfs_emit(buf, "%llu\n", adev->gmc.visible_vram_size);
123 }
124 
125 /**
126  * DOC: mem_info_vram_used
127  *
128  * The amdgpu driver provides a sysfs API for reporting current total VRAM
129  * available on the device
130  * The file mem_info_vram_used is used for this and returns the total
131  * amount of currently used VRAM in bytes
132  */
133 static ssize_t amdgpu_mem_info_vram_used_show(struct device *dev,
134 					      struct device_attribute *attr,
135 					      char *buf)
136 {
137 	struct drm_device *ddev = dev_get_drvdata(dev);
138 	struct amdgpu_device *adev = drm_to_adev(ddev);
139 	struct ttm_resource_manager *man = &adev->mman.vram_mgr.manager;
140 
141 	return sysfs_emit(buf, "%llu\n", ttm_resource_manager_usage(man));
142 }
143 
144 /**
145  * DOC: mem_info_vis_vram_used
146  *
147  * The amdgpu driver provides a sysfs API for reporting current total of
148  * used visible VRAM
149  * The file mem_info_vis_vram_used is used for this and returns the total
150  * amount of currently used visible VRAM in bytes
151  */
152 static ssize_t amdgpu_mem_info_vis_vram_used_show(struct device *dev,
153 						  struct device_attribute *attr,
154 						  char *buf)
155 {
156 	struct drm_device *ddev = dev_get_drvdata(dev);
157 	struct amdgpu_device *adev = drm_to_adev(ddev);
158 
159 	return sysfs_emit(buf, "%llu\n",
160 			  amdgpu_vram_mgr_vis_usage(&adev->mman.vram_mgr));
161 }
162 
163 /**
164  * DOC: mem_info_vram_vendor
165  *
166  * The amdgpu driver provides a sysfs API for reporting the vendor of the
167  * installed VRAM
168  * The file mem_info_vram_vendor is used for this and returns the name of the
169  * vendor.
170  */
171 static ssize_t amdgpu_mem_info_vram_vendor(struct device *dev,
172 					   struct device_attribute *attr,
173 					   char *buf)
174 {
175 	struct drm_device *ddev = dev_get_drvdata(dev);
176 	struct amdgpu_device *adev = drm_to_adev(ddev);
177 
178 	switch (adev->gmc.vram_vendor) {
179 	case SAMSUNG:
180 		return sysfs_emit(buf, "samsung\n");
181 	case INFINEON:
182 		return sysfs_emit(buf, "infineon\n");
183 	case ELPIDA:
184 		return sysfs_emit(buf, "elpida\n");
185 	case ETRON:
186 		return sysfs_emit(buf, "etron\n");
187 	case NANYA:
188 		return sysfs_emit(buf, "nanya\n");
189 	case HYNIX:
190 		return sysfs_emit(buf, "hynix\n");
191 	case MOSEL:
192 		return sysfs_emit(buf, "mosel\n");
193 	case WINBOND:
194 		return sysfs_emit(buf, "winbond\n");
195 	case ESMT:
196 		return sysfs_emit(buf, "esmt\n");
197 	case MICRON:
198 		return sysfs_emit(buf, "micron\n");
199 	default:
200 		return sysfs_emit(buf, "unknown\n");
201 	}
202 }
203 
204 static DEVICE_ATTR(mem_info_vram_total, S_IRUGO,
205 		   amdgpu_mem_info_vram_total_show, NULL);
206 static DEVICE_ATTR(mem_info_vis_vram_total, S_IRUGO,
207 		   amdgpu_mem_info_vis_vram_total_show,NULL);
208 static DEVICE_ATTR(mem_info_vram_used, S_IRUGO,
209 		   amdgpu_mem_info_vram_used_show, NULL);
210 static DEVICE_ATTR(mem_info_vis_vram_used, S_IRUGO,
211 		   amdgpu_mem_info_vis_vram_used_show, NULL);
212 static DEVICE_ATTR(mem_info_vram_vendor, S_IRUGO,
213 		   amdgpu_mem_info_vram_vendor, NULL);
214 
215 static struct attribute *amdgpu_vram_mgr_attributes[] = {
216 	&dev_attr_mem_info_vram_total.attr,
217 	&dev_attr_mem_info_vis_vram_total.attr,
218 	&dev_attr_mem_info_vram_used.attr,
219 	&dev_attr_mem_info_vis_vram_used.attr,
220 	&dev_attr_mem_info_vram_vendor.attr,
221 	NULL
222 };
223 
224 static umode_t amdgpu_vram_attrs_is_visible(struct kobject *kobj,
225 					    struct attribute *attr, int i)
226 {
227 	struct device *dev = kobj_to_dev(kobj);
228 	struct drm_device *ddev = dev_get_drvdata(dev);
229 	struct amdgpu_device *adev = drm_to_adev(ddev);
230 
231 	if (attr == &dev_attr_mem_info_vram_vendor.attr &&
232 	    !adev->gmc.vram_vendor)
233 		return 0;
234 
235 	return attr->mode;
236 }
237 
238 const struct attribute_group amdgpu_vram_mgr_attr_group = {
239 	.attrs = amdgpu_vram_mgr_attributes,
240 	.is_visible = amdgpu_vram_attrs_is_visible
241 };
242 
243 /**
244  * amdgpu_vram_mgr_vis_size - Calculate visible block size
245  *
246  * @adev: amdgpu_device pointer
247  * @block: DRM BUDDY block structure
248  *
249  * Calculate how many bytes of the DRM BUDDY block are inside visible VRAM
250  */
251 static u64 amdgpu_vram_mgr_vis_size(struct amdgpu_device *adev,
252 				    struct drm_buddy_block *block)
253 {
254 	u64 start = amdgpu_vram_mgr_block_start(block);
255 	u64 end = start + amdgpu_vram_mgr_block_size(block);
256 
257 	if (start >= adev->gmc.visible_vram_size)
258 		return 0;
259 
260 	return (end > adev->gmc.visible_vram_size ?
261 		adev->gmc.visible_vram_size : end) - start;
262 }
263 
264 /**
265  * amdgpu_vram_mgr_bo_visible_size - CPU visible BO size
266  *
267  * @bo: &amdgpu_bo buffer object (must be in VRAM)
268  *
269  * Returns:
270  * How much of the given &amdgpu_bo buffer object lies in CPU visible VRAM.
271  */
272 u64 amdgpu_vram_mgr_bo_visible_size(struct amdgpu_bo *bo)
273 {
274 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
275 	struct ttm_resource *res = bo->tbo.resource;
276 	struct amdgpu_vram_mgr_resource *vres = to_amdgpu_vram_mgr_resource(res);
277 	struct drm_buddy_block *block;
278 	u64 usage = 0;
279 
280 	if (amdgpu_gmc_vram_full_visible(&adev->gmc))
281 		return amdgpu_bo_size(bo);
282 
283 	if (res->start >= adev->gmc.visible_vram_size >> PAGE_SHIFT)
284 		return 0;
285 
286 	list_for_each_entry(block, &vres->blocks, link)
287 		usage += amdgpu_vram_mgr_vis_size(adev, block);
288 
289 	return usage;
290 }
291 
292 /* Commit the reservation of VRAM pages */
293 static void amdgpu_vram_mgr_do_reserve(struct ttm_resource_manager *man)
294 {
295 	struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
296 	struct amdgpu_device *adev = to_amdgpu_device(mgr);
297 	struct drm_buddy *mm = &mgr->mm;
298 	struct amdgpu_vram_reservation *rsv, *temp;
299 	struct drm_buddy_block *block;
300 	uint64_t vis_usage;
301 
302 	list_for_each_entry_safe(rsv, temp, &mgr->reservations_pending, blocks) {
303 		if (drm_buddy_alloc_blocks(mm, rsv->start, rsv->start + rsv->size,
304 					   rsv->size, mm->chunk_size, &rsv->allocated,
305 					   DRM_BUDDY_RANGE_ALLOCATION))
306 			continue;
307 
308 		block = amdgpu_vram_mgr_first_block(&rsv->allocated);
309 		if (!block)
310 			continue;
311 
312 		dev_dbg(adev->dev, "Reservation 0x%llx - %lld, Succeeded\n",
313 			rsv->start, rsv->size);
314 
315 		vis_usage = amdgpu_vram_mgr_vis_size(adev, block);
316 		atomic64_add(vis_usage, &mgr->vis_usage);
317 		spin_lock(&man->bdev->lru_lock);
318 		man->usage += rsv->size;
319 		spin_unlock(&man->bdev->lru_lock);
320 		list_move(&rsv->blocks, &mgr->reserved_pages);
321 	}
322 }
323 
324 /**
325  * amdgpu_vram_mgr_reserve_range - Reserve a range from VRAM
326  *
327  * @mgr: amdgpu_vram_mgr pointer
328  * @start: start address of the range in VRAM
329  * @size: size of the range
330  *
331  * Reserve memory from start address with the specified size in VRAM
332  */
333 int amdgpu_vram_mgr_reserve_range(struct amdgpu_vram_mgr *mgr,
334 				  uint64_t start, uint64_t size)
335 {
336 	struct amdgpu_vram_reservation *rsv;
337 
338 	rsv = kzalloc(sizeof(*rsv), GFP_KERNEL);
339 	if (!rsv)
340 		return -ENOMEM;
341 
342 	INIT_LIST_HEAD(&rsv->allocated);
343 	INIT_LIST_HEAD(&rsv->blocks);
344 
345 	rsv->start = start;
346 	rsv->size = size;
347 
348 	mutex_lock(&mgr->lock);
349 	list_add_tail(&rsv->blocks, &mgr->reservations_pending);
350 	amdgpu_vram_mgr_do_reserve(&mgr->manager);
351 	mutex_unlock(&mgr->lock);
352 
353 	return 0;
354 }
355 
356 /**
357  * amdgpu_vram_mgr_query_page_status - query the reservation status
358  *
359  * @mgr: amdgpu_vram_mgr pointer
360  * @start: start address of a page in VRAM
361  *
362  * Returns:
363  *	-EBUSY: the page is still hold and in pending list
364  *	0: the page has been reserved
365  *	-ENOENT: the input page is not a reservation
366  */
367 int amdgpu_vram_mgr_query_page_status(struct amdgpu_vram_mgr *mgr,
368 				      uint64_t start)
369 {
370 	struct amdgpu_vram_reservation *rsv;
371 	int ret;
372 
373 	mutex_lock(&mgr->lock);
374 
375 	list_for_each_entry(rsv, &mgr->reservations_pending, blocks) {
376 		if (rsv->start <= start &&
377 		    (start < (rsv->start + rsv->size))) {
378 			ret = -EBUSY;
379 			goto out;
380 		}
381 	}
382 
383 	list_for_each_entry(rsv, &mgr->reserved_pages, blocks) {
384 		if (rsv->start <= start &&
385 		    (start < (rsv->start + rsv->size))) {
386 			ret = 0;
387 			goto out;
388 		}
389 	}
390 
391 	ret = -ENOENT;
392 out:
393 	mutex_unlock(&mgr->lock);
394 	return ret;
395 }
396 
397 static void amdgpu_dummy_vram_mgr_debug(struct ttm_resource_manager *man,
398 				  struct drm_printer *printer)
399 {
400 	DRM_DEBUG_DRIVER("Dummy vram mgr debug\n");
401 }
402 
403 static bool amdgpu_dummy_vram_mgr_compatible(struct ttm_resource_manager *man,
404 				       struct ttm_resource *res,
405 				       const struct ttm_place *place,
406 				       size_t size)
407 {
408 	DRM_DEBUG_DRIVER("Dummy vram mgr compatible\n");
409 	return false;
410 }
411 
412 static bool amdgpu_dummy_vram_mgr_intersects(struct ttm_resource_manager *man,
413 				       struct ttm_resource *res,
414 				       const struct ttm_place *place,
415 				       size_t size)
416 {
417 	DRM_DEBUG_DRIVER("Dummy vram mgr intersects\n");
418 	return true;
419 }
420 
421 static void amdgpu_dummy_vram_mgr_del(struct ttm_resource_manager *man,
422 				struct ttm_resource *res)
423 {
424 	DRM_DEBUG_DRIVER("Dummy vram mgr deleted\n");
425 }
426 
427 static int amdgpu_dummy_vram_mgr_new(struct ttm_resource_manager *man,
428 			       struct ttm_buffer_object *tbo,
429 			       const struct ttm_place *place,
430 			       struct ttm_resource **res)
431 {
432 	DRM_DEBUG_DRIVER("Dummy vram mgr new\n");
433 	return -ENOSPC;
434 }
435 
436 /**
437  * amdgpu_vram_mgr_new - allocate new ranges
438  *
439  * @man: TTM memory type manager
440  * @tbo: TTM BO we need this range for
441  * @place: placement flags and restrictions
442  * @res: the resulting mem object
443  *
444  * Allocate VRAM for the given BO.
445  */
446 static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
447 			       struct ttm_buffer_object *tbo,
448 			       const struct ttm_place *place,
449 			       struct ttm_resource **res)
450 {
451 	struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
452 	struct amdgpu_device *adev = to_amdgpu_device(mgr);
453 	struct amdgpu_bo *bo = ttm_to_amdgpu_bo(tbo);
454 	u64 vis_usage = 0, max_bytes, min_block_size;
455 	struct amdgpu_vram_mgr_resource *vres;
456 	u64 size, remaining_size, lpfn, fpfn;
457 	struct drm_buddy *mm = &mgr->mm;
458 	struct drm_buddy_block *block;
459 	unsigned long pages_per_block;
460 	int r;
461 
462 	lpfn = (u64)place->lpfn << PAGE_SHIFT;
463 	if (!lpfn)
464 		lpfn = man->size;
465 
466 	fpfn = (u64)place->fpfn << PAGE_SHIFT;
467 
468 	max_bytes = adev->gmc.mc_vram_size;
469 	if (tbo->type != ttm_bo_type_kernel)
470 		max_bytes -= AMDGPU_VM_RESERVED_VRAM;
471 
472 	if (bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS) {
473 		pages_per_block = ~0ul;
474 	} else {
475 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
476 		pages_per_block = HPAGE_PMD_NR;
477 #else
478 		/* default to 2MB */
479 		pages_per_block = 2UL << (20UL - PAGE_SHIFT);
480 #endif
481 		pages_per_block = max_t(u32, pages_per_block,
482 					tbo->page_alignment);
483 	}
484 
485 	vres = kzalloc(sizeof(*vres), GFP_KERNEL);
486 	if (!vres)
487 		return -ENOMEM;
488 
489 	ttm_resource_init(tbo, place, &vres->base);
490 
491 	/* bail out quickly if there's likely not enough VRAM for this BO */
492 	if (ttm_resource_manager_usage(man) > max_bytes) {
493 		r = -ENOSPC;
494 		goto error_fini;
495 	}
496 
497 	INIT_LIST_HEAD(&vres->blocks);
498 
499 	if (place->flags & TTM_PL_FLAG_TOPDOWN)
500 		vres->flags |= DRM_BUDDY_TOPDOWN_ALLOCATION;
501 
502 	if (bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)
503 		vres->flags |= DRM_BUDDY_CONTIGUOUS_ALLOCATION;
504 
505 	if (bo->flags & AMDGPU_GEM_CREATE_VRAM_CLEARED)
506 		vres->flags |= DRM_BUDDY_CLEAR_ALLOCATION;
507 
508 	if (fpfn || lpfn != mgr->mm.size)
509 		/* Allocate blocks in desired range */
510 		vres->flags |= DRM_BUDDY_RANGE_ALLOCATION;
511 
512 	remaining_size = (u64)vres->base.size;
513 
514 	mutex_lock(&mgr->lock);
515 	while (remaining_size) {
516 		if (tbo->page_alignment)
517 			min_block_size = (u64)tbo->page_alignment << PAGE_SHIFT;
518 		else
519 			min_block_size = mgr->default_page_size;
520 
521 		/* Limit maximum size to 2GiB due to SG table limitations */
522 		size = min(remaining_size, 2ULL << 30);
523 
524 		if ((size >= (u64)pages_per_block << PAGE_SHIFT) &&
525 		    !(size & (((u64)pages_per_block << PAGE_SHIFT) - 1)))
526 			min_block_size = (u64)pages_per_block << PAGE_SHIFT;
527 
528 		BUG_ON(min_block_size < mm->chunk_size);
529 
530 		r = drm_buddy_alloc_blocks(mm, fpfn,
531 					   lpfn,
532 					   size,
533 					   min_block_size,
534 					   &vres->blocks,
535 					   vres->flags);
536 
537 		if (unlikely(r == -ENOSPC) && pages_per_block == ~0ul &&
538 		    !(place->flags & TTM_PL_FLAG_CONTIGUOUS)) {
539 			vres->flags &= ~DRM_BUDDY_CONTIGUOUS_ALLOCATION;
540 			pages_per_block = max_t(u32, 2UL << (20UL - PAGE_SHIFT),
541 						tbo->page_alignment);
542 
543 			continue;
544 		}
545 
546 		if (unlikely(r))
547 			goto error_free_blocks;
548 
549 		if (size > remaining_size)
550 			remaining_size = 0;
551 		else
552 			remaining_size -= size;
553 	}
554 	mutex_unlock(&mgr->lock);
555 
556 	vres->base.start = 0;
557 	size = max_t(u64, amdgpu_vram_mgr_blocks_size(&vres->blocks),
558 		     vres->base.size);
559 	list_for_each_entry(block, &vres->blocks, link) {
560 		unsigned long start;
561 
562 		start = amdgpu_vram_mgr_block_start(block) +
563 			amdgpu_vram_mgr_block_size(block);
564 		start >>= PAGE_SHIFT;
565 
566 		if (start > PFN_UP(size))
567 			start -= PFN_UP(size);
568 		else
569 			start = 0;
570 		vres->base.start = max(vres->base.start, start);
571 
572 		vis_usage += amdgpu_vram_mgr_vis_size(adev, block);
573 	}
574 
575 	if (amdgpu_is_vram_mgr_blocks_contiguous(&vres->blocks))
576 		vres->base.placement |= TTM_PL_FLAG_CONTIGUOUS;
577 
578 	if (adev->gmc.xgmi.connected_to_cpu)
579 		vres->base.bus.caching = ttm_cached;
580 	else
581 		vres->base.bus.caching = ttm_write_combined;
582 
583 	atomic64_add(vis_usage, &mgr->vis_usage);
584 	*res = &vres->base;
585 	return 0;
586 
587 error_free_blocks:
588 	drm_buddy_free_list(mm, &vres->blocks, 0);
589 	mutex_unlock(&mgr->lock);
590 error_fini:
591 	ttm_resource_fini(man, &vres->base);
592 	kfree(vres);
593 
594 	return r;
595 }
596 
597 /**
598  * amdgpu_vram_mgr_del - free ranges
599  *
600  * @man: TTM memory type manager
601  * @res: TTM memory object
602  *
603  * Free the allocated VRAM again.
604  */
605 static void amdgpu_vram_mgr_del(struct ttm_resource_manager *man,
606 				struct ttm_resource *res)
607 {
608 	struct amdgpu_vram_mgr_resource *vres = to_amdgpu_vram_mgr_resource(res);
609 	struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
610 	struct amdgpu_device *adev = to_amdgpu_device(mgr);
611 	struct drm_buddy *mm = &mgr->mm;
612 	struct drm_buddy_block *block;
613 	uint64_t vis_usage = 0;
614 
615 	mutex_lock(&mgr->lock);
616 	list_for_each_entry(block, &vres->blocks, link)
617 		vis_usage += amdgpu_vram_mgr_vis_size(adev, block);
618 
619 	amdgpu_vram_mgr_do_reserve(man);
620 
621 	drm_buddy_free_list(mm, &vres->blocks, vres->flags);
622 	mutex_unlock(&mgr->lock);
623 
624 	atomic64_sub(vis_usage, &mgr->vis_usage);
625 
626 	ttm_resource_fini(man, res);
627 	kfree(vres);
628 }
629 
630 /**
631  * amdgpu_vram_mgr_alloc_sgt - allocate and fill a sg table
632  *
633  * @adev: amdgpu device pointer
634  * @res: TTM memory object
635  * @offset: byte offset from the base of VRAM BO
636  * @length: number of bytes to export in sg_table
637  * @dev: the other device
638  * @dir: dma direction
639  * @sgt: resulting sg table
640  *
641  * Allocate and fill a sg table from a VRAM allocation.
642  */
643 int amdgpu_vram_mgr_alloc_sgt(struct amdgpu_device *adev,
644 			      struct ttm_resource *res,
645 			      u64 offset, u64 length,
646 			      struct device *dev,
647 			      enum dma_data_direction dir,
648 			      struct sg_table **sgt)
649 {
650 	struct amdgpu_res_cursor cursor;
651 	struct scatterlist *sg;
652 	int num_entries = 0;
653 	int i, r;
654 
655 	*sgt = kmalloc(sizeof(**sgt), GFP_KERNEL);
656 	if (!*sgt)
657 		return -ENOMEM;
658 
659 	/* Determine the number of DRM_BUDDY blocks to export */
660 	amdgpu_res_first(res, offset, length, &cursor);
661 	while (cursor.remaining) {
662 		num_entries++;
663 		amdgpu_res_next(&cursor, cursor.size);
664 	}
665 
666 	r = sg_alloc_table(*sgt, num_entries, GFP_KERNEL);
667 	if (r)
668 		goto error_free;
669 
670 	/* Initialize scatterlist nodes of sg_table */
671 	for_each_sgtable_sg((*sgt), sg, i)
672 		sg->length = 0;
673 
674 	/*
675 	 * Walk down DRM_BUDDY blocks to populate scatterlist nodes
676 	 * @note: Use iterator api to get first the DRM_BUDDY block
677 	 * and the number of bytes from it. Access the following
678 	 * DRM_BUDDY block(s) if more buffer needs to exported
679 	 */
680 	amdgpu_res_first(res, offset, length, &cursor);
681 	for_each_sgtable_sg((*sgt), sg, i) {
682 		phys_addr_t phys = cursor.start + adev->gmc.aper_base;
683 		size_t size = cursor.size;
684 		dma_addr_t addr;
685 
686 		addr = dma_map_resource(dev, phys, size, dir,
687 					DMA_ATTR_SKIP_CPU_SYNC);
688 		r = dma_mapping_error(dev, addr);
689 		if (r)
690 			goto error_unmap;
691 
692 		sg_set_page(sg, NULL, size, 0);
693 		sg_dma_address(sg) = addr;
694 		sg_dma_len(sg) = size;
695 
696 		amdgpu_res_next(&cursor, cursor.size);
697 	}
698 
699 	return 0;
700 
701 error_unmap:
702 	for_each_sgtable_sg((*sgt), sg, i) {
703 		if (!sg->length)
704 			continue;
705 
706 		dma_unmap_resource(dev, sg->dma_address,
707 				   sg->length, dir,
708 				   DMA_ATTR_SKIP_CPU_SYNC);
709 	}
710 	sg_free_table(*sgt);
711 
712 error_free:
713 	kfree(*sgt);
714 	return r;
715 }
716 
717 /**
718  * amdgpu_vram_mgr_free_sgt - allocate and fill a sg table
719  *
720  * @dev: device pointer
721  * @dir: data direction of resource to unmap
722  * @sgt: sg table to free
723  *
724  * Free a previously allocate sg table.
725  */
726 void amdgpu_vram_mgr_free_sgt(struct device *dev,
727 			      enum dma_data_direction dir,
728 			      struct sg_table *sgt)
729 {
730 	struct scatterlist *sg;
731 	int i;
732 
733 	for_each_sgtable_sg(sgt, sg, i)
734 		dma_unmap_resource(dev, sg->dma_address,
735 				   sg->length, dir,
736 				   DMA_ATTR_SKIP_CPU_SYNC);
737 	sg_free_table(sgt);
738 	kfree(sgt);
739 }
740 
741 /**
742  * amdgpu_vram_mgr_vis_usage - how many bytes are used in the visible part
743  *
744  * @mgr: amdgpu_vram_mgr pointer
745  *
746  * Returns how many bytes are used in the visible part of VRAM
747  */
748 uint64_t amdgpu_vram_mgr_vis_usage(struct amdgpu_vram_mgr *mgr)
749 {
750 	return atomic64_read(&mgr->vis_usage);
751 }
752 
753 /**
754  * amdgpu_vram_mgr_intersects - test each drm buddy block for intersection
755  *
756  * @man: TTM memory type manager
757  * @res: The resource to test
758  * @place: The place to test against
759  * @size: Size of the new allocation
760  *
761  * Test each drm buddy block for intersection for eviction decision.
762  */
763 static bool amdgpu_vram_mgr_intersects(struct ttm_resource_manager *man,
764 				       struct ttm_resource *res,
765 				       const struct ttm_place *place,
766 				       size_t size)
767 {
768 	struct amdgpu_vram_mgr_resource *mgr = to_amdgpu_vram_mgr_resource(res);
769 	struct drm_buddy_block *block;
770 
771 	/* Check each drm buddy block individually */
772 	list_for_each_entry(block, &mgr->blocks, link) {
773 		unsigned long fpfn =
774 			amdgpu_vram_mgr_block_start(block) >> PAGE_SHIFT;
775 		unsigned long lpfn = fpfn +
776 			(amdgpu_vram_mgr_block_size(block) >> PAGE_SHIFT);
777 
778 		if (place->fpfn < lpfn &&
779 		    (!place->lpfn || place->lpfn > fpfn))
780 			return true;
781 	}
782 
783 	return false;
784 }
785 
786 /**
787  * amdgpu_vram_mgr_compatible - test each drm buddy block for compatibility
788  *
789  * @man: TTM memory type manager
790  * @res: The resource to test
791  * @place: The place to test against
792  * @size: Size of the new allocation
793  *
794  * Test each drm buddy block for placement compatibility.
795  */
796 static bool amdgpu_vram_mgr_compatible(struct ttm_resource_manager *man,
797 				       struct ttm_resource *res,
798 				       const struct ttm_place *place,
799 				       size_t size)
800 {
801 	struct amdgpu_vram_mgr_resource *mgr = to_amdgpu_vram_mgr_resource(res);
802 	struct drm_buddy_block *block;
803 
804 	/* Check each drm buddy block individually */
805 	list_for_each_entry(block, &mgr->blocks, link) {
806 		unsigned long fpfn =
807 			amdgpu_vram_mgr_block_start(block) >> PAGE_SHIFT;
808 		unsigned long lpfn = fpfn +
809 			(amdgpu_vram_mgr_block_size(block) >> PAGE_SHIFT);
810 
811 		if (fpfn < place->fpfn ||
812 		    (place->lpfn && lpfn > place->lpfn))
813 			return false;
814 	}
815 
816 	return true;
817 }
818 
819 /**
820  * amdgpu_vram_mgr_debug - dump VRAM table
821  *
822  * @man: TTM memory type manager
823  * @printer: DRM printer to use
824  *
825  * Dump the table content using printk.
826  */
827 static void amdgpu_vram_mgr_debug(struct ttm_resource_manager *man,
828 				  struct drm_printer *printer)
829 {
830 	struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
831 	struct drm_buddy *mm = &mgr->mm;
832 	struct amdgpu_vram_reservation *rsv;
833 
834 	drm_printf(printer, "  vis usage:%llu\n",
835 		   amdgpu_vram_mgr_vis_usage(mgr));
836 
837 	mutex_lock(&mgr->lock);
838 	drm_printf(printer, "default_page_size: %lluKiB\n",
839 		   mgr->default_page_size >> 10);
840 
841 	drm_buddy_print(mm, printer);
842 
843 	drm_printf(printer, "reserved:\n");
844 	list_for_each_entry(rsv, &mgr->reserved_pages, blocks)
845 		drm_printf(printer, "%#018llx-%#018llx: %llu\n",
846 			rsv->start, rsv->start + rsv->size, rsv->size);
847 	mutex_unlock(&mgr->lock);
848 }
849 
850 static const struct ttm_resource_manager_func amdgpu_dummy_vram_mgr_func = {
851 	.alloc	= amdgpu_dummy_vram_mgr_new,
852 	.free	= amdgpu_dummy_vram_mgr_del,
853 	.intersects = amdgpu_dummy_vram_mgr_intersects,
854 	.compatible = amdgpu_dummy_vram_mgr_compatible,
855 	.debug	= amdgpu_dummy_vram_mgr_debug
856 };
857 
858 static const struct ttm_resource_manager_func amdgpu_vram_mgr_func = {
859 	.alloc	= amdgpu_vram_mgr_new,
860 	.free	= amdgpu_vram_mgr_del,
861 	.intersects = amdgpu_vram_mgr_intersects,
862 	.compatible = amdgpu_vram_mgr_compatible,
863 	.debug	= amdgpu_vram_mgr_debug
864 };
865 
866 /**
867  * amdgpu_vram_mgr_init - init VRAM manager and DRM MM
868  *
869  * @adev: amdgpu_device pointer
870  *
871  * Allocate and initialize the VRAM manager.
872  */
873 int amdgpu_vram_mgr_init(struct amdgpu_device *adev)
874 {
875 	struct amdgpu_vram_mgr *mgr = &adev->mman.vram_mgr;
876 	struct ttm_resource_manager *man = &mgr->manager;
877 	int err;
878 
879 	ttm_resource_manager_init(man, &adev->mman.bdev,
880 				  adev->gmc.real_vram_size);
881 
882 	mutex_init(&mgr->lock);
883 	INIT_LIST_HEAD(&mgr->reservations_pending);
884 	INIT_LIST_HEAD(&mgr->reserved_pages);
885 	mgr->default_page_size = PAGE_SIZE;
886 
887 	if (!adev->gmc.is_app_apu) {
888 		man->func = &amdgpu_vram_mgr_func;
889 
890 		err = drm_buddy_init(&mgr->mm, man->size, PAGE_SIZE);
891 		if (err)
892 			return err;
893 	} else {
894 		man->func = &amdgpu_dummy_vram_mgr_func;
895 		DRM_INFO("Setup dummy vram mgr\n");
896 	}
897 
898 	ttm_set_driver_manager(&adev->mman.bdev, TTM_PL_VRAM, &mgr->manager);
899 	ttm_resource_manager_set_used(man, true);
900 	return 0;
901 }
902 
903 /**
904  * amdgpu_vram_mgr_fini - free and destroy VRAM manager
905  *
906  * @adev: amdgpu_device pointer
907  *
908  * Destroy and free the VRAM manager, returns -EBUSY if ranges are still
909  * allocated inside it.
910  */
911 void amdgpu_vram_mgr_fini(struct amdgpu_device *adev)
912 {
913 	struct amdgpu_vram_mgr *mgr = &adev->mman.vram_mgr;
914 	struct ttm_resource_manager *man = &mgr->manager;
915 	int ret;
916 	struct amdgpu_vram_reservation *rsv, *temp;
917 
918 	ttm_resource_manager_set_used(man, false);
919 
920 	ret = ttm_resource_manager_evict_all(&adev->mman.bdev, man);
921 	if (ret)
922 		return;
923 
924 	mutex_lock(&mgr->lock);
925 	list_for_each_entry_safe(rsv, temp, &mgr->reservations_pending, blocks)
926 		kfree(rsv);
927 
928 	list_for_each_entry_safe(rsv, temp, &mgr->reserved_pages, blocks) {
929 		drm_buddy_free_list(&mgr->mm, &rsv->allocated, 0);
930 		kfree(rsv);
931 	}
932 	if (!adev->gmc.is_app_apu)
933 		drm_buddy_fini(&mgr->mm);
934 	mutex_unlock(&mgr->lock);
935 
936 	ttm_resource_manager_cleanup(man);
937 	ttm_set_driver_manager(&adev->mman.bdev, TTM_PL_VRAM, NULL);
938 }
939