xref: /linux/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c (revision a5219b41dd908182e6b4c0a9ab842c11973043e6)
1 /*
2  * Copyright 2016 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Christian König
23  */
24 
25 #include <linux/dma-mapping.h>
26 #include <drm/ttm/ttm_range_manager.h>
27 
28 #include "amdgpu.h"
29 #include "amdgpu_vm.h"
30 #include "amdgpu_res_cursor.h"
31 #include "atom.h"
32 
33 #define AMDGPU_MAX_SG_SEGMENT_SIZE	(2UL << 30)
34 
35 struct amdgpu_vram_reservation {
36 	u64 start;
37 	u64 size;
38 	struct list_head allocated;
39 	struct list_head blocks;
40 };
41 
42 static inline struct amdgpu_vram_mgr *
43 to_vram_mgr(struct ttm_resource_manager *man)
44 {
45 	return container_of(man, struct amdgpu_vram_mgr, manager);
46 }
47 
48 static inline struct amdgpu_device *
49 to_amdgpu_device(struct amdgpu_vram_mgr *mgr)
50 {
51 	return container_of(mgr, struct amdgpu_device, mman.vram_mgr);
52 }
53 
54 static inline struct drm_buddy_block *
55 amdgpu_vram_mgr_first_block(struct list_head *list)
56 {
57 	return list_first_entry_or_null(list, struct drm_buddy_block, link);
58 }
59 
60 static inline bool amdgpu_is_vram_mgr_blocks_contiguous(struct list_head *head)
61 {
62 	struct drm_buddy_block *block;
63 	u64 start, size;
64 
65 	block = amdgpu_vram_mgr_first_block(head);
66 	if (!block)
67 		return false;
68 
69 	while (head != block->link.next) {
70 		start = amdgpu_vram_mgr_block_start(block);
71 		size = amdgpu_vram_mgr_block_size(block);
72 
73 		block = list_entry(block->link.next, struct drm_buddy_block, link);
74 		if (start + size != amdgpu_vram_mgr_block_start(block))
75 			return false;
76 	}
77 
78 	return true;
79 }
80 
81 static inline u64 amdgpu_vram_mgr_blocks_size(struct list_head *head)
82 {
83 	struct drm_buddy_block *block;
84 	u64 size = 0;
85 
86 	list_for_each_entry(block, head, link)
87 		size += amdgpu_vram_mgr_block_size(block);
88 
89 	return size;
90 }
91 
92 /**
93  * DOC: mem_info_vram_total
94  *
95  * The amdgpu driver provides a sysfs API for reporting current total VRAM
96  * available on the device
97  * The file mem_info_vram_total is used for this and returns the total
98  * amount of VRAM in bytes
99  */
100 static ssize_t amdgpu_mem_info_vram_total_show(struct device *dev,
101 		struct device_attribute *attr, char *buf)
102 {
103 	struct drm_device *ddev = dev_get_drvdata(dev);
104 	struct amdgpu_device *adev = drm_to_adev(ddev);
105 
106 	return sysfs_emit(buf, "%llu\n", adev->gmc.real_vram_size);
107 }
108 
109 /**
110  * DOC: mem_info_vis_vram_total
111  *
112  * The amdgpu driver provides a sysfs API for reporting current total
113  * visible VRAM available on the device
114  * The file mem_info_vis_vram_total is used for this and returns the total
115  * amount of visible VRAM in bytes
116  */
117 static ssize_t amdgpu_mem_info_vis_vram_total_show(struct device *dev,
118 		struct device_attribute *attr, char *buf)
119 {
120 	struct drm_device *ddev = dev_get_drvdata(dev);
121 	struct amdgpu_device *adev = drm_to_adev(ddev);
122 
123 	return sysfs_emit(buf, "%llu\n", adev->gmc.visible_vram_size);
124 }
125 
126 /**
127  * DOC: mem_info_vram_used
128  *
129  * The amdgpu driver provides a sysfs API for reporting current total VRAM
130  * available on the device
131  * The file mem_info_vram_used is used for this and returns the total
132  * amount of currently used VRAM in bytes
133  */
134 static ssize_t amdgpu_mem_info_vram_used_show(struct device *dev,
135 					      struct device_attribute *attr,
136 					      char *buf)
137 {
138 	struct drm_device *ddev = dev_get_drvdata(dev);
139 	struct amdgpu_device *adev = drm_to_adev(ddev);
140 	struct ttm_resource_manager *man = &adev->mman.vram_mgr.manager;
141 
142 	return sysfs_emit(buf, "%llu\n", ttm_resource_manager_usage(man));
143 }
144 
145 /**
146  * DOC: mem_info_vis_vram_used
147  *
148  * The amdgpu driver provides a sysfs API for reporting current total of
149  * used visible VRAM
150  * The file mem_info_vis_vram_used is used for this and returns the total
151  * amount of currently used visible VRAM in bytes
152  */
153 static ssize_t amdgpu_mem_info_vis_vram_used_show(struct device *dev,
154 						  struct device_attribute *attr,
155 						  char *buf)
156 {
157 	struct drm_device *ddev = dev_get_drvdata(dev);
158 	struct amdgpu_device *adev = drm_to_adev(ddev);
159 
160 	return sysfs_emit(buf, "%llu\n",
161 			  amdgpu_vram_mgr_vis_usage(&adev->mman.vram_mgr));
162 }
163 
164 /**
165  * DOC: mem_info_vram_vendor
166  *
167  * The amdgpu driver provides a sysfs API for reporting the vendor of the
168  * installed VRAM
169  * The file mem_info_vram_vendor is used for this and returns the name of the
170  * vendor.
171  */
172 static ssize_t amdgpu_mem_info_vram_vendor(struct device *dev,
173 					   struct device_attribute *attr,
174 					   char *buf)
175 {
176 	struct drm_device *ddev = dev_get_drvdata(dev);
177 	struct amdgpu_device *adev = drm_to_adev(ddev);
178 
179 	switch (adev->gmc.vram_vendor) {
180 	case SAMSUNG:
181 		return sysfs_emit(buf, "samsung\n");
182 	case INFINEON:
183 		return sysfs_emit(buf, "infineon\n");
184 	case ELPIDA:
185 		return sysfs_emit(buf, "elpida\n");
186 	case ETRON:
187 		return sysfs_emit(buf, "etron\n");
188 	case NANYA:
189 		return sysfs_emit(buf, "nanya\n");
190 	case HYNIX:
191 		return sysfs_emit(buf, "hynix\n");
192 	case MOSEL:
193 		return sysfs_emit(buf, "mosel\n");
194 	case WINBOND:
195 		return sysfs_emit(buf, "winbond\n");
196 	case ESMT:
197 		return sysfs_emit(buf, "esmt\n");
198 	case MICRON:
199 		return sysfs_emit(buf, "micron\n");
200 	default:
201 		return sysfs_emit(buf, "unknown\n");
202 	}
203 }
204 
205 static DEVICE_ATTR(mem_info_vram_total, S_IRUGO,
206 		   amdgpu_mem_info_vram_total_show, NULL);
207 static DEVICE_ATTR(mem_info_vis_vram_total, S_IRUGO,
208 		   amdgpu_mem_info_vis_vram_total_show,NULL);
209 static DEVICE_ATTR(mem_info_vram_used, S_IRUGO,
210 		   amdgpu_mem_info_vram_used_show, NULL);
211 static DEVICE_ATTR(mem_info_vis_vram_used, S_IRUGO,
212 		   amdgpu_mem_info_vis_vram_used_show, NULL);
213 static DEVICE_ATTR(mem_info_vram_vendor, S_IRUGO,
214 		   amdgpu_mem_info_vram_vendor, NULL);
215 
216 static struct attribute *amdgpu_vram_mgr_attributes[] = {
217 	&dev_attr_mem_info_vram_total.attr,
218 	&dev_attr_mem_info_vis_vram_total.attr,
219 	&dev_attr_mem_info_vram_used.attr,
220 	&dev_attr_mem_info_vis_vram_used.attr,
221 	&dev_attr_mem_info_vram_vendor.attr,
222 	NULL
223 };
224 
225 static umode_t amdgpu_vram_attrs_is_visible(struct kobject *kobj,
226 					    struct attribute *attr, int i)
227 {
228 	struct device *dev = kobj_to_dev(kobj);
229 	struct drm_device *ddev = dev_get_drvdata(dev);
230 	struct amdgpu_device *adev = drm_to_adev(ddev);
231 
232 	if (attr == &dev_attr_mem_info_vram_vendor.attr &&
233 	    !adev->gmc.vram_vendor)
234 		return 0;
235 
236 	return attr->mode;
237 }
238 
239 const struct attribute_group amdgpu_vram_mgr_attr_group = {
240 	.attrs = amdgpu_vram_mgr_attributes,
241 	.is_visible = amdgpu_vram_attrs_is_visible
242 };
243 
244 /**
245  * amdgpu_vram_mgr_vis_size - Calculate visible block size
246  *
247  * @adev: amdgpu_device pointer
248  * @block: DRM BUDDY block structure
249  *
250  * Calculate how many bytes of the DRM BUDDY block are inside visible VRAM
251  */
252 static u64 amdgpu_vram_mgr_vis_size(struct amdgpu_device *adev,
253 				    struct drm_buddy_block *block)
254 {
255 	u64 start = amdgpu_vram_mgr_block_start(block);
256 	u64 end = start + amdgpu_vram_mgr_block_size(block);
257 
258 	if (start >= adev->gmc.visible_vram_size)
259 		return 0;
260 
261 	return (end > adev->gmc.visible_vram_size ?
262 		adev->gmc.visible_vram_size : end) - start;
263 }
264 
265 /**
266  * amdgpu_vram_mgr_bo_visible_size - CPU visible BO size
267  *
268  * @bo: &amdgpu_bo buffer object (must be in VRAM)
269  *
270  * Returns:
271  * How much of the given &amdgpu_bo buffer object lies in CPU visible VRAM.
272  */
273 u64 amdgpu_vram_mgr_bo_visible_size(struct amdgpu_bo *bo)
274 {
275 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
276 	struct ttm_resource *res = bo->tbo.resource;
277 	struct amdgpu_vram_mgr_resource *vres = to_amdgpu_vram_mgr_resource(res);
278 	struct drm_buddy_block *block;
279 	u64 usage = 0;
280 
281 	if (amdgpu_gmc_vram_full_visible(&adev->gmc))
282 		return amdgpu_bo_size(bo);
283 
284 	if (res->start >= adev->gmc.visible_vram_size >> PAGE_SHIFT)
285 		return 0;
286 
287 	list_for_each_entry(block, &vres->blocks, link)
288 		usage += amdgpu_vram_mgr_vis_size(adev, block);
289 
290 	return usage;
291 }
292 
293 /* Commit the reservation of VRAM pages */
294 static void amdgpu_vram_mgr_do_reserve(struct ttm_resource_manager *man)
295 {
296 	struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
297 	struct amdgpu_device *adev = to_amdgpu_device(mgr);
298 	struct drm_buddy *mm = &mgr->mm;
299 	struct amdgpu_vram_reservation *rsv, *temp;
300 	struct drm_buddy_block *block;
301 	uint64_t vis_usage;
302 
303 	list_for_each_entry_safe(rsv, temp, &mgr->reservations_pending, blocks) {
304 		if (drm_buddy_alloc_blocks(mm, rsv->start, rsv->start + rsv->size,
305 					   rsv->size, mm->chunk_size, &rsv->allocated,
306 					   DRM_BUDDY_RANGE_ALLOCATION))
307 			continue;
308 
309 		block = amdgpu_vram_mgr_first_block(&rsv->allocated);
310 		if (!block)
311 			continue;
312 
313 		dev_dbg(adev->dev, "Reservation 0x%llx - %lld, Succeeded\n",
314 			rsv->start, rsv->size);
315 
316 		vis_usage = amdgpu_vram_mgr_vis_size(adev, block);
317 		atomic64_add(vis_usage, &mgr->vis_usage);
318 		spin_lock(&man->bdev->lru_lock);
319 		man->usage += rsv->size;
320 		spin_unlock(&man->bdev->lru_lock);
321 		list_move(&rsv->blocks, &mgr->reserved_pages);
322 	}
323 }
324 
325 /**
326  * amdgpu_vram_mgr_reserve_range - Reserve a range from VRAM
327  *
328  * @mgr: amdgpu_vram_mgr pointer
329  * @start: start address of the range in VRAM
330  * @size: size of the range
331  *
332  * Reserve memory from start address with the specified size in VRAM
333  */
334 int amdgpu_vram_mgr_reserve_range(struct amdgpu_vram_mgr *mgr,
335 				  uint64_t start, uint64_t size)
336 {
337 	struct amdgpu_vram_reservation *rsv;
338 
339 	rsv = kzalloc(sizeof(*rsv), GFP_KERNEL);
340 	if (!rsv)
341 		return -ENOMEM;
342 
343 	INIT_LIST_HEAD(&rsv->allocated);
344 	INIT_LIST_HEAD(&rsv->blocks);
345 
346 	rsv->start = start;
347 	rsv->size = size;
348 
349 	mutex_lock(&mgr->lock);
350 	list_add_tail(&rsv->blocks, &mgr->reservations_pending);
351 	amdgpu_vram_mgr_do_reserve(&mgr->manager);
352 	mutex_unlock(&mgr->lock);
353 
354 	return 0;
355 }
356 
357 /**
358  * amdgpu_vram_mgr_query_page_status - query the reservation status
359  *
360  * @mgr: amdgpu_vram_mgr pointer
361  * @start: start address of a page in VRAM
362  *
363  * Returns:
364  *	-EBUSY: the page is still hold and in pending list
365  *	0: the page has been reserved
366  *	-ENOENT: the input page is not a reservation
367  */
368 int amdgpu_vram_mgr_query_page_status(struct amdgpu_vram_mgr *mgr,
369 				      uint64_t start)
370 {
371 	struct amdgpu_vram_reservation *rsv;
372 	int ret;
373 
374 	mutex_lock(&mgr->lock);
375 
376 	list_for_each_entry(rsv, &mgr->reservations_pending, blocks) {
377 		if (rsv->start <= start &&
378 		    (start < (rsv->start + rsv->size))) {
379 			ret = -EBUSY;
380 			goto out;
381 		}
382 	}
383 
384 	list_for_each_entry(rsv, &mgr->reserved_pages, blocks) {
385 		if (rsv->start <= start &&
386 		    (start < (rsv->start + rsv->size))) {
387 			ret = 0;
388 			goto out;
389 		}
390 	}
391 
392 	ret = -ENOENT;
393 out:
394 	mutex_unlock(&mgr->lock);
395 	return ret;
396 }
397 
398 static void amdgpu_dummy_vram_mgr_debug(struct ttm_resource_manager *man,
399 				  struct drm_printer *printer)
400 {
401 	DRM_DEBUG_DRIVER("Dummy vram mgr debug\n");
402 }
403 
404 static bool amdgpu_dummy_vram_mgr_compatible(struct ttm_resource_manager *man,
405 				       struct ttm_resource *res,
406 				       const struct ttm_place *place,
407 				       size_t size)
408 {
409 	DRM_DEBUG_DRIVER("Dummy vram mgr compatible\n");
410 	return false;
411 }
412 
413 static bool amdgpu_dummy_vram_mgr_intersects(struct ttm_resource_manager *man,
414 				       struct ttm_resource *res,
415 				       const struct ttm_place *place,
416 				       size_t size)
417 {
418 	DRM_DEBUG_DRIVER("Dummy vram mgr intersects\n");
419 	return true;
420 }
421 
422 static void amdgpu_dummy_vram_mgr_del(struct ttm_resource_manager *man,
423 				struct ttm_resource *res)
424 {
425 	DRM_DEBUG_DRIVER("Dummy vram mgr deleted\n");
426 }
427 
428 static int amdgpu_dummy_vram_mgr_new(struct ttm_resource_manager *man,
429 			       struct ttm_buffer_object *tbo,
430 			       const struct ttm_place *place,
431 			       struct ttm_resource **res)
432 {
433 	DRM_DEBUG_DRIVER("Dummy vram mgr new\n");
434 	return -ENOSPC;
435 }
436 
437 /**
438  * amdgpu_vram_mgr_new - allocate new ranges
439  *
440  * @man: TTM memory type manager
441  * @tbo: TTM BO we need this range for
442  * @place: placement flags and restrictions
443  * @res: the resulting mem object
444  *
445  * Allocate VRAM for the given BO.
446  */
447 static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
448 			       struct ttm_buffer_object *tbo,
449 			       const struct ttm_place *place,
450 			       struct ttm_resource **res)
451 {
452 	struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
453 	struct amdgpu_device *adev = to_amdgpu_device(mgr);
454 	struct amdgpu_bo *bo = ttm_to_amdgpu_bo(tbo);
455 	u64 vis_usage = 0, max_bytes, min_block_size;
456 	struct amdgpu_vram_mgr_resource *vres;
457 	u64 size, remaining_size, lpfn, fpfn;
458 	unsigned int adjust_dcc_size = 0;
459 	struct drm_buddy *mm = &mgr->mm;
460 	struct drm_buddy_block *block;
461 	unsigned long pages_per_block;
462 	int r;
463 
464 	lpfn = (u64)place->lpfn << PAGE_SHIFT;
465 	if (!lpfn)
466 		lpfn = man->size;
467 
468 	fpfn = (u64)place->fpfn << PAGE_SHIFT;
469 
470 	max_bytes = adev->gmc.mc_vram_size;
471 	if (tbo->type != ttm_bo_type_kernel)
472 		max_bytes -= AMDGPU_VM_RESERVED_VRAM;
473 
474 	if (bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS) {
475 		pages_per_block = ~0ul;
476 	} else {
477 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
478 		pages_per_block = HPAGE_PMD_NR;
479 #else
480 		/* default to 2MB */
481 		pages_per_block = 2UL << (20UL - PAGE_SHIFT);
482 #endif
483 		pages_per_block = max_t(u32, pages_per_block,
484 					tbo->page_alignment);
485 	}
486 
487 	vres = kzalloc(sizeof(*vres), GFP_KERNEL);
488 	if (!vres)
489 		return -ENOMEM;
490 
491 	ttm_resource_init(tbo, place, &vres->base);
492 
493 	/* bail out quickly if there's likely not enough VRAM for this BO */
494 	if (ttm_resource_manager_usage(man) > max_bytes) {
495 		r = -ENOSPC;
496 		goto error_fini;
497 	}
498 
499 	INIT_LIST_HEAD(&vres->blocks);
500 
501 	if (place->flags & TTM_PL_FLAG_TOPDOWN)
502 		vres->flags |= DRM_BUDDY_TOPDOWN_ALLOCATION;
503 
504 	if (bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)
505 		vres->flags |= DRM_BUDDY_CONTIGUOUS_ALLOCATION;
506 
507 	if (bo->flags & AMDGPU_GEM_CREATE_VRAM_CLEARED)
508 		vres->flags |= DRM_BUDDY_CLEAR_ALLOCATION;
509 
510 	if (fpfn || lpfn != mgr->mm.size)
511 		/* Allocate blocks in desired range */
512 		vres->flags |= DRM_BUDDY_RANGE_ALLOCATION;
513 
514 	if (bo->flags & AMDGPU_GEM_CREATE_GFX12_DCC &&
515 	    adev->gmc.gmc_funcs->get_dcc_alignment)
516 		adjust_dcc_size = amdgpu_gmc_get_dcc_alignment(adev);
517 
518 	remaining_size = (u64)vres->base.size;
519 	if (bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS && adjust_dcc_size) {
520 		unsigned int dcc_size;
521 
522 		dcc_size = roundup_pow_of_two(vres->base.size + adjust_dcc_size);
523 		remaining_size = (u64)dcc_size;
524 
525 		vres->flags |= DRM_BUDDY_TRIM_DISABLE;
526 	}
527 
528 	mutex_lock(&mgr->lock);
529 	while (remaining_size) {
530 		if (tbo->page_alignment)
531 			min_block_size = (u64)tbo->page_alignment << PAGE_SHIFT;
532 		else
533 			min_block_size = mgr->default_page_size;
534 
535 		size = remaining_size;
536 
537 		if (bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS && adjust_dcc_size)
538 			min_block_size = size;
539 		else if ((size >= (u64)pages_per_block << PAGE_SHIFT) &&
540 			 !(size & (((u64)pages_per_block << PAGE_SHIFT) - 1)))
541 			min_block_size = (u64)pages_per_block << PAGE_SHIFT;
542 
543 		BUG_ON(min_block_size < mm->chunk_size);
544 
545 		r = drm_buddy_alloc_blocks(mm, fpfn,
546 					   lpfn,
547 					   size,
548 					   min_block_size,
549 					   &vres->blocks,
550 					   vres->flags);
551 
552 		if (unlikely(r == -ENOSPC) && pages_per_block == ~0ul &&
553 		    !(place->flags & TTM_PL_FLAG_CONTIGUOUS)) {
554 			vres->flags &= ~DRM_BUDDY_CONTIGUOUS_ALLOCATION;
555 			pages_per_block = max_t(u32, 2UL << (20UL - PAGE_SHIFT),
556 						tbo->page_alignment);
557 
558 			continue;
559 		}
560 
561 		if (unlikely(r))
562 			goto error_free_blocks;
563 
564 		if (size > remaining_size)
565 			remaining_size = 0;
566 		else
567 			remaining_size -= size;
568 	}
569 
570 	if (bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS && adjust_dcc_size) {
571 		struct drm_buddy_block *dcc_block;
572 		unsigned long dcc_start;
573 		u64 trim_start;
574 
575 		dcc_block = amdgpu_vram_mgr_first_block(&vres->blocks);
576 		/* Adjust the start address for DCC buffers only */
577 		dcc_start =
578 			roundup((unsigned long)amdgpu_vram_mgr_block_start(dcc_block),
579 				adjust_dcc_size);
580 		trim_start = (u64)dcc_start;
581 		drm_buddy_block_trim(mm, &trim_start,
582 				     (u64)vres->base.size,
583 				     &vres->blocks);
584 	}
585 	mutex_unlock(&mgr->lock);
586 
587 	vres->base.start = 0;
588 	size = max_t(u64, amdgpu_vram_mgr_blocks_size(&vres->blocks),
589 		     vres->base.size);
590 	list_for_each_entry(block, &vres->blocks, link) {
591 		unsigned long start;
592 
593 		start = amdgpu_vram_mgr_block_start(block) +
594 			amdgpu_vram_mgr_block_size(block);
595 		start >>= PAGE_SHIFT;
596 
597 		if (start > PFN_UP(size))
598 			start -= PFN_UP(size);
599 		else
600 			start = 0;
601 		vres->base.start = max(vres->base.start, start);
602 
603 		vis_usage += amdgpu_vram_mgr_vis_size(adev, block);
604 	}
605 
606 	if (amdgpu_is_vram_mgr_blocks_contiguous(&vres->blocks))
607 		vres->base.placement |= TTM_PL_FLAG_CONTIGUOUS;
608 
609 	if (adev->gmc.xgmi.connected_to_cpu)
610 		vres->base.bus.caching = ttm_cached;
611 	else
612 		vres->base.bus.caching = ttm_write_combined;
613 
614 	atomic64_add(vis_usage, &mgr->vis_usage);
615 	*res = &vres->base;
616 	return 0;
617 
618 error_free_blocks:
619 	drm_buddy_free_list(mm, &vres->blocks, 0);
620 	mutex_unlock(&mgr->lock);
621 error_fini:
622 	ttm_resource_fini(man, &vres->base);
623 	kfree(vres);
624 
625 	return r;
626 }
627 
628 /**
629  * amdgpu_vram_mgr_del - free ranges
630  *
631  * @man: TTM memory type manager
632  * @res: TTM memory object
633  *
634  * Free the allocated VRAM again.
635  */
636 static void amdgpu_vram_mgr_del(struct ttm_resource_manager *man,
637 				struct ttm_resource *res)
638 {
639 	struct amdgpu_vram_mgr_resource *vres = to_amdgpu_vram_mgr_resource(res);
640 	struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
641 	struct amdgpu_device *adev = to_amdgpu_device(mgr);
642 	struct drm_buddy *mm = &mgr->mm;
643 	struct drm_buddy_block *block;
644 	uint64_t vis_usage = 0;
645 
646 	mutex_lock(&mgr->lock);
647 	list_for_each_entry(block, &vres->blocks, link)
648 		vis_usage += amdgpu_vram_mgr_vis_size(adev, block);
649 
650 	amdgpu_vram_mgr_do_reserve(man);
651 
652 	drm_buddy_free_list(mm, &vres->blocks, vres->flags);
653 	mutex_unlock(&mgr->lock);
654 
655 	atomic64_sub(vis_usage, &mgr->vis_usage);
656 
657 	ttm_resource_fini(man, res);
658 	kfree(vres);
659 }
660 
661 /**
662  * amdgpu_vram_mgr_alloc_sgt - allocate and fill a sg table
663  *
664  * @adev: amdgpu device pointer
665  * @res: TTM memory object
666  * @offset: byte offset from the base of VRAM BO
667  * @length: number of bytes to export in sg_table
668  * @dev: the other device
669  * @dir: dma direction
670  * @sgt: resulting sg table
671  *
672  * Allocate and fill a sg table from a VRAM allocation.
673  */
674 int amdgpu_vram_mgr_alloc_sgt(struct amdgpu_device *adev,
675 			      struct ttm_resource *res,
676 			      u64 offset, u64 length,
677 			      struct device *dev,
678 			      enum dma_data_direction dir,
679 			      struct sg_table **sgt)
680 {
681 	struct amdgpu_res_cursor cursor;
682 	struct scatterlist *sg;
683 	int num_entries = 0;
684 	int i, r;
685 
686 	*sgt = kmalloc(sizeof(**sgt), GFP_KERNEL);
687 	if (!*sgt)
688 		return -ENOMEM;
689 
690 	/* Determine the number of DRM_BUDDY blocks to export */
691 	amdgpu_res_first(res, offset, length, &cursor);
692 	while (cursor.remaining) {
693 		num_entries++;
694 		amdgpu_res_next(&cursor, min(cursor.size, AMDGPU_MAX_SG_SEGMENT_SIZE));
695 	}
696 
697 	r = sg_alloc_table(*sgt, num_entries, GFP_KERNEL);
698 	if (r)
699 		goto error_free;
700 
701 	/* Initialize scatterlist nodes of sg_table */
702 	for_each_sgtable_sg((*sgt), sg, i)
703 		sg->length = 0;
704 
705 	/*
706 	 * Walk down DRM_BUDDY blocks to populate scatterlist nodes
707 	 * @note: Use iterator api to get first the DRM_BUDDY block
708 	 * and the number of bytes from it. Access the following
709 	 * DRM_BUDDY block(s) if more buffer needs to exported
710 	 */
711 	amdgpu_res_first(res, offset, length, &cursor);
712 	for_each_sgtable_sg((*sgt), sg, i) {
713 		phys_addr_t phys = cursor.start + adev->gmc.aper_base;
714 		unsigned long size = min(cursor.size, AMDGPU_MAX_SG_SEGMENT_SIZE);
715 		dma_addr_t addr;
716 
717 		addr = dma_map_resource(dev, phys, size, dir,
718 					DMA_ATTR_SKIP_CPU_SYNC);
719 		r = dma_mapping_error(dev, addr);
720 		if (r)
721 			goto error_unmap;
722 
723 		sg_set_page(sg, NULL, size, 0);
724 		sg_dma_address(sg) = addr;
725 		sg_dma_len(sg) = size;
726 
727 		amdgpu_res_next(&cursor, size);
728 	}
729 
730 	return 0;
731 
732 error_unmap:
733 	for_each_sgtable_sg((*sgt), sg, i) {
734 		if (!sg->length)
735 			continue;
736 
737 		dma_unmap_resource(dev, sg->dma_address,
738 				   sg->length, dir,
739 				   DMA_ATTR_SKIP_CPU_SYNC);
740 	}
741 	sg_free_table(*sgt);
742 
743 error_free:
744 	kfree(*sgt);
745 	return r;
746 }
747 
748 /**
749  * amdgpu_vram_mgr_free_sgt - allocate and fill a sg table
750  *
751  * @dev: device pointer
752  * @dir: data direction of resource to unmap
753  * @sgt: sg table to free
754  *
755  * Free a previously allocate sg table.
756  */
757 void amdgpu_vram_mgr_free_sgt(struct device *dev,
758 			      enum dma_data_direction dir,
759 			      struct sg_table *sgt)
760 {
761 	struct scatterlist *sg;
762 	int i;
763 
764 	for_each_sgtable_sg(sgt, sg, i)
765 		dma_unmap_resource(dev, sg->dma_address,
766 				   sg->length, dir,
767 				   DMA_ATTR_SKIP_CPU_SYNC);
768 	sg_free_table(sgt);
769 	kfree(sgt);
770 }
771 
772 /**
773  * amdgpu_vram_mgr_vis_usage - how many bytes are used in the visible part
774  *
775  * @mgr: amdgpu_vram_mgr pointer
776  *
777  * Returns how many bytes are used in the visible part of VRAM
778  */
779 uint64_t amdgpu_vram_mgr_vis_usage(struct amdgpu_vram_mgr *mgr)
780 {
781 	return atomic64_read(&mgr->vis_usage);
782 }
783 
784 /**
785  * amdgpu_vram_mgr_intersects - test each drm buddy block for intersection
786  *
787  * @man: TTM memory type manager
788  * @res: The resource to test
789  * @place: The place to test against
790  * @size: Size of the new allocation
791  *
792  * Test each drm buddy block for intersection for eviction decision.
793  */
794 static bool amdgpu_vram_mgr_intersects(struct ttm_resource_manager *man,
795 				       struct ttm_resource *res,
796 				       const struct ttm_place *place,
797 				       size_t size)
798 {
799 	struct amdgpu_vram_mgr_resource *mgr = to_amdgpu_vram_mgr_resource(res);
800 	struct drm_buddy_block *block;
801 
802 	/* Check each drm buddy block individually */
803 	list_for_each_entry(block, &mgr->blocks, link) {
804 		unsigned long fpfn =
805 			amdgpu_vram_mgr_block_start(block) >> PAGE_SHIFT;
806 		unsigned long lpfn = fpfn +
807 			(amdgpu_vram_mgr_block_size(block) >> PAGE_SHIFT);
808 
809 		if (place->fpfn < lpfn &&
810 		    (!place->lpfn || place->lpfn > fpfn))
811 			return true;
812 	}
813 
814 	return false;
815 }
816 
817 /**
818  * amdgpu_vram_mgr_compatible - test each drm buddy block for compatibility
819  *
820  * @man: TTM memory type manager
821  * @res: The resource to test
822  * @place: The place to test against
823  * @size: Size of the new allocation
824  *
825  * Test each drm buddy block for placement compatibility.
826  */
827 static bool amdgpu_vram_mgr_compatible(struct ttm_resource_manager *man,
828 				       struct ttm_resource *res,
829 				       const struct ttm_place *place,
830 				       size_t size)
831 {
832 	struct amdgpu_vram_mgr_resource *mgr = to_amdgpu_vram_mgr_resource(res);
833 	struct drm_buddy_block *block;
834 
835 	/* Check each drm buddy block individually */
836 	list_for_each_entry(block, &mgr->blocks, link) {
837 		unsigned long fpfn =
838 			amdgpu_vram_mgr_block_start(block) >> PAGE_SHIFT;
839 		unsigned long lpfn = fpfn +
840 			(amdgpu_vram_mgr_block_size(block) >> PAGE_SHIFT);
841 
842 		if (fpfn < place->fpfn ||
843 		    (place->lpfn && lpfn > place->lpfn))
844 			return false;
845 	}
846 
847 	return true;
848 }
849 
850 /**
851  * amdgpu_vram_mgr_debug - dump VRAM table
852  *
853  * @man: TTM memory type manager
854  * @printer: DRM printer to use
855  *
856  * Dump the table content using printk.
857  */
858 static void amdgpu_vram_mgr_debug(struct ttm_resource_manager *man,
859 				  struct drm_printer *printer)
860 {
861 	struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
862 	struct drm_buddy *mm = &mgr->mm;
863 	struct amdgpu_vram_reservation *rsv;
864 
865 	drm_printf(printer, "  vis usage:%llu\n",
866 		   amdgpu_vram_mgr_vis_usage(mgr));
867 
868 	mutex_lock(&mgr->lock);
869 	drm_printf(printer, "default_page_size: %lluKiB\n",
870 		   mgr->default_page_size >> 10);
871 
872 	drm_buddy_print(mm, printer);
873 
874 	drm_printf(printer, "reserved:\n");
875 	list_for_each_entry(rsv, &mgr->reserved_pages, blocks)
876 		drm_printf(printer, "%#018llx-%#018llx: %llu\n",
877 			rsv->start, rsv->start + rsv->size, rsv->size);
878 	mutex_unlock(&mgr->lock);
879 }
880 
881 static const struct ttm_resource_manager_func amdgpu_dummy_vram_mgr_func = {
882 	.alloc	= amdgpu_dummy_vram_mgr_new,
883 	.free	= amdgpu_dummy_vram_mgr_del,
884 	.intersects = amdgpu_dummy_vram_mgr_intersects,
885 	.compatible = amdgpu_dummy_vram_mgr_compatible,
886 	.debug	= amdgpu_dummy_vram_mgr_debug
887 };
888 
889 static const struct ttm_resource_manager_func amdgpu_vram_mgr_func = {
890 	.alloc	= amdgpu_vram_mgr_new,
891 	.free	= amdgpu_vram_mgr_del,
892 	.intersects = amdgpu_vram_mgr_intersects,
893 	.compatible = amdgpu_vram_mgr_compatible,
894 	.debug	= amdgpu_vram_mgr_debug
895 };
896 
897 /**
898  * amdgpu_vram_mgr_init - init VRAM manager and DRM MM
899  *
900  * @adev: amdgpu_device pointer
901  *
902  * Allocate and initialize the VRAM manager.
903  */
904 int amdgpu_vram_mgr_init(struct amdgpu_device *adev)
905 {
906 	struct amdgpu_vram_mgr *mgr = &adev->mman.vram_mgr;
907 	struct ttm_resource_manager *man = &mgr->manager;
908 	int err;
909 
910 	ttm_resource_manager_init(man, &adev->mman.bdev,
911 				  adev->gmc.real_vram_size);
912 
913 	mutex_init(&mgr->lock);
914 	INIT_LIST_HEAD(&mgr->reservations_pending);
915 	INIT_LIST_HEAD(&mgr->reserved_pages);
916 	mgr->default_page_size = PAGE_SIZE;
917 
918 	if (!adev->gmc.is_app_apu) {
919 		man->func = &amdgpu_vram_mgr_func;
920 
921 		err = drm_buddy_init(&mgr->mm, man->size, PAGE_SIZE);
922 		if (err)
923 			return err;
924 	} else {
925 		man->func = &amdgpu_dummy_vram_mgr_func;
926 		DRM_INFO("Setup dummy vram mgr\n");
927 	}
928 
929 	ttm_set_driver_manager(&adev->mman.bdev, TTM_PL_VRAM, &mgr->manager);
930 	ttm_resource_manager_set_used(man, true);
931 	return 0;
932 }
933 
934 /**
935  * amdgpu_vram_mgr_fini - free and destroy VRAM manager
936  *
937  * @adev: amdgpu_device pointer
938  *
939  * Destroy and free the VRAM manager, returns -EBUSY if ranges are still
940  * allocated inside it.
941  */
942 void amdgpu_vram_mgr_fini(struct amdgpu_device *adev)
943 {
944 	struct amdgpu_vram_mgr *mgr = &adev->mman.vram_mgr;
945 	struct ttm_resource_manager *man = &mgr->manager;
946 	int ret;
947 	struct amdgpu_vram_reservation *rsv, *temp;
948 
949 	ttm_resource_manager_set_used(man, false);
950 
951 	ret = ttm_resource_manager_evict_all(&adev->mman.bdev, man);
952 	if (ret)
953 		return;
954 
955 	mutex_lock(&mgr->lock);
956 	list_for_each_entry_safe(rsv, temp, &mgr->reservations_pending, blocks)
957 		kfree(rsv);
958 
959 	list_for_each_entry_safe(rsv, temp, &mgr->reserved_pages, blocks) {
960 		drm_buddy_free_list(&mgr->mm, &rsv->allocated, 0);
961 		kfree(rsv);
962 	}
963 	if (!adev->gmc.is_app_apu)
964 		drm_buddy_fini(&mgr->mm);
965 	mutex_unlock(&mgr->lock);
966 
967 	ttm_resource_manager_cleanup(man);
968 	ttm_set_driver_manager(&adev->mman.bdev, TTM_PL_VRAM, NULL);
969 }
970