xref: /linux/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c (revision 8b85987d3cf50178f67618122d9f3bb202f62f42)
1 /*
2  * Copyright 2016 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Christian König
23  */
24 
25 #include <linux/dma-mapping.h>
26 #include <drm/ttm/ttm_range_manager.h>
27 #include <drm/drm_drv.h>
28 #include <drm/drm_buddy.h>
29 
30 #include "amdgpu.h"
31 #include "amdgpu_vm.h"
32 #include "amdgpu_res_cursor.h"
33 #include "atom.h"
34 
35 #define AMDGPU_MAX_SG_SEGMENT_SIZE	(2UL << 30)
36 
37 struct amdgpu_vram_reservation {
38 	u64 start;
39 	u64 size;
40 	struct list_head allocated;
41 	struct list_head blocks;
42 };
43 
44 static inline struct amdgpu_vram_mgr *
45 to_vram_mgr(struct ttm_resource_manager *man)
46 {
47 	return container_of(man, struct amdgpu_vram_mgr, manager);
48 }
49 
50 static inline struct amdgpu_device *
51 to_amdgpu_device(struct amdgpu_vram_mgr *mgr)
52 {
53 	return container_of(mgr, struct amdgpu_device, mman.vram_mgr);
54 }
55 
56 static inline struct gpu_buddy_block *
57 amdgpu_vram_mgr_first_block(struct list_head *list)
58 {
59 	return list_first_entry_or_null(list, struct gpu_buddy_block, link);
60 }
61 
62 static inline bool amdgpu_is_vram_mgr_blocks_contiguous(struct list_head *head)
63 {
64 	struct gpu_buddy_block *block;
65 	u64 start, size;
66 
67 	block = amdgpu_vram_mgr_first_block(head);
68 	if (!block)
69 		return false;
70 
71 	while (head != block->link.next) {
72 		start = amdgpu_vram_mgr_block_start(block);
73 		size = amdgpu_vram_mgr_block_size(block);
74 
75 		block = list_entry(block->link.next, struct gpu_buddy_block, link);
76 		if (start + size != amdgpu_vram_mgr_block_start(block))
77 			return false;
78 	}
79 
80 	return true;
81 }
82 
83 static inline u64 amdgpu_vram_mgr_blocks_size(struct list_head *head)
84 {
85 	struct gpu_buddy_block *block;
86 	u64 size = 0;
87 
88 	list_for_each_entry(block, head, link)
89 		size += amdgpu_vram_mgr_block_size(block);
90 
91 	return size;
92 }
93 
94 /**
95  * DOC: mem_info_vram_total
96  *
97  * The amdgpu driver provides a sysfs API for reporting current total VRAM
98  * available on the device
99  * The file mem_info_vram_total is used for this and returns the total
100  * amount of VRAM in bytes
101  */
102 static ssize_t amdgpu_mem_info_vram_total_show(struct device *dev,
103 		struct device_attribute *attr, char *buf)
104 {
105 	struct drm_device *ddev = dev_get_drvdata(dev);
106 	struct amdgpu_device *adev = drm_to_adev(ddev);
107 
108 	return sysfs_emit(buf, "%llu\n", adev->gmc.real_vram_size);
109 }
110 
111 /**
112  * DOC: mem_info_vis_vram_total
113  *
114  * The amdgpu driver provides a sysfs API for reporting current total
115  * visible VRAM available on the device
116  * The file mem_info_vis_vram_total is used for this and returns the total
117  * amount of visible VRAM in bytes
118  */
119 static ssize_t amdgpu_mem_info_vis_vram_total_show(struct device *dev,
120 		struct device_attribute *attr, char *buf)
121 {
122 	struct drm_device *ddev = dev_get_drvdata(dev);
123 	struct amdgpu_device *adev = drm_to_adev(ddev);
124 
125 	return sysfs_emit(buf, "%llu\n", adev->gmc.visible_vram_size);
126 }
127 
128 /**
129  * DOC: mem_info_vram_used
130  *
131  * The amdgpu driver provides a sysfs API for reporting current total VRAM
132  * available on the device
133  * The file mem_info_vram_used is used for this and returns the total
134  * amount of currently used VRAM in bytes
135  */
136 static ssize_t amdgpu_mem_info_vram_used_show(struct device *dev,
137 					      struct device_attribute *attr,
138 					      char *buf)
139 {
140 	struct drm_device *ddev = dev_get_drvdata(dev);
141 	struct amdgpu_device *adev = drm_to_adev(ddev);
142 	struct ttm_resource_manager *man = &adev->mman.vram_mgr.manager;
143 
144 	return sysfs_emit(buf, "%llu\n", ttm_resource_manager_usage(man));
145 }
146 
147 /**
148  * DOC: mem_info_vis_vram_used
149  *
150  * The amdgpu driver provides a sysfs API for reporting current total of
151  * used visible VRAM
152  * The file mem_info_vis_vram_used is used for this and returns the total
153  * amount of currently used visible VRAM in bytes
154  */
155 static ssize_t amdgpu_mem_info_vis_vram_used_show(struct device *dev,
156 						  struct device_attribute *attr,
157 						  char *buf)
158 {
159 	struct drm_device *ddev = dev_get_drvdata(dev);
160 	struct amdgpu_device *adev = drm_to_adev(ddev);
161 
162 	return sysfs_emit(buf, "%llu\n",
163 			  amdgpu_vram_mgr_vis_usage(&adev->mman.vram_mgr));
164 }
165 
166 /**
167  * DOC: mem_info_vram_vendor
168  *
169  * The amdgpu driver provides a sysfs API for reporting the vendor of the
170  * installed VRAM
171  * The file mem_info_vram_vendor is used for this and returns the name of the
172  * vendor.
173  */
174 static ssize_t amdgpu_mem_info_vram_vendor(struct device *dev,
175 					   struct device_attribute *attr,
176 					   char *buf)
177 {
178 	struct drm_device *ddev = dev_get_drvdata(dev);
179 	struct amdgpu_device *adev = drm_to_adev(ddev);
180 
181 	switch (adev->gmc.vram_vendor) {
182 	case SAMSUNG:
183 		return sysfs_emit(buf, "samsung\n");
184 	case INFINEON:
185 		return sysfs_emit(buf, "infineon\n");
186 	case ELPIDA:
187 		return sysfs_emit(buf, "elpida\n");
188 	case ETRON:
189 		return sysfs_emit(buf, "etron\n");
190 	case NANYA:
191 		return sysfs_emit(buf, "nanya\n");
192 	case HYNIX:
193 		return sysfs_emit(buf, "hynix\n");
194 	case MOSEL:
195 		return sysfs_emit(buf, "mosel\n");
196 	case WINBOND:
197 		return sysfs_emit(buf, "winbond\n");
198 	case ESMT:
199 		return sysfs_emit(buf, "esmt\n");
200 	case MICRON:
201 		return sysfs_emit(buf, "micron\n");
202 	default:
203 		return sysfs_emit(buf, "unknown\n");
204 	}
205 }
206 
207 static DEVICE_ATTR(mem_info_vram_total, S_IRUGO,
208 		   amdgpu_mem_info_vram_total_show, NULL);
209 static DEVICE_ATTR(mem_info_vis_vram_total, S_IRUGO,
210 		   amdgpu_mem_info_vis_vram_total_show,NULL);
211 static DEVICE_ATTR(mem_info_vram_used, S_IRUGO,
212 		   amdgpu_mem_info_vram_used_show, NULL);
213 static DEVICE_ATTR(mem_info_vis_vram_used, S_IRUGO,
214 		   amdgpu_mem_info_vis_vram_used_show, NULL);
215 static DEVICE_ATTR(mem_info_vram_vendor, S_IRUGO,
216 		   amdgpu_mem_info_vram_vendor, NULL);
217 
218 static struct attribute *amdgpu_vram_mgr_attributes[] = {
219 	&dev_attr_mem_info_vram_total.attr,
220 	&dev_attr_mem_info_vis_vram_total.attr,
221 	&dev_attr_mem_info_vram_used.attr,
222 	&dev_attr_mem_info_vis_vram_used.attr,
223 	&dev_attr_mem_info_vram_vendor.attr,
224 	NULL
225 };
226 
227 static umode_t amdgpu_vram_attrs_is_visible(struct kobject *kobj,
228 					    struct attribute *attr, int i)
229 {
230 	struct device *dev = kobj_to_dev(kobj);
231 	struct drm_device *ddev = dev_get_drvdata(dev);
232 	struct amdgpu_device *adev = drm_to_adev(ddev);
233 
234 	if (attr == &dev_attr_mem_info_vram_vendor.attr &&
235 	    !adev->gmc.vram_vendor)
236 		return 0;
237 
238 	if (!ttm_resource_manager_used(&adev->mman.vram_mgr.manager))
239 		return 0;
240 
241 	return attr->mode;
242 }
243 
244 const struct attribute_group amdgpu_vram_mgr_attr_group = {
245 	.attrs = amdgpu_vram_mgr_attributes,
246 	.is_visible = amdgpu_vram_attrs_is_visible
247 };
248 
249 /**
250  * amdgpu_vram_mgr_vis_size - Calculate visible block size
251  *
252  * @adev: amdgpu_device pointer
253  * @block: DRM BUDDY block structure
254  *
255  * Calculate how many bytes of the DRM BUDDY block are inside visible VRAM
256  */
257 static u64 amdgpu_vram_mgr_vis_size(struct amdgpu_device *adev,
258 				    struct gpu_buddy_block *block)
259 {
260 	u64 start = amdgpu_vram_mgr_block_start(block);
261 	u64 end = start + amdgpu_vram_mgr_block_size(block);
262 
263 	if (start >= adev->gmc.visible_vram_size)
264 		return 0;
265 
266 	return (end > adev->gmc.visible_vram_size ?
267 		adev->gmc.visible_vram_size : end) - start;
268 }
269 
270 /**
271  * amdgpu_vram_mgr_bo_visible_size - CPU visible BO size
272  *
273  * @bo: &amdgpu_bo buffer object (must be in VRAM)
274  *
275  * Returns:
276  * How much of the given &amdgpu_bo buffer object lies in CPU visible VRAM.
277  */
278 u64 amdgpu_vram_mgr_bo_visible_size(struct amdgpu_bo *bo)
279 {
280 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
281 	struct ttm_resource *res = bo->tbo.resource;
282 	struct amdgpu_vram_mgr_resource *vres = to_amdgpu_vram_mgr_resource(res);
283 	struct gpu_buddy_block *block;
284 	u64 usage = 0;
285 
286 	if (amdgpu_gmc_vram_full_visible(&adev->gmc))
287 		return amdgpu_bo_size(bo);
288 
289 	if (res->start >= adev->gmc.visible_vram_size >> PAGE_SHIFT)
290 		return 0;
291 
292 	list_for_each_entry(block, &vres->blocks, link)
293 		usage += amdgpu_vram_mgr_vis_size(adev, block);
294 
295 	return usage;
296 }
297 
298 /* Commit the reservation of VRAM pages */
299 static void amdgpu_vram_mgr_do_reserve(struct ttm_resource_manager *man)
300 {
301 	struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
302 	struct amdgpu_device *adev = to_amdgpu_device(mgr);
303 	struct gpu_buddy *mm = &mgr->mm;
304 	struct amdgpu_vram_reservation *rsv, *temp;
305 	struct gpu_buddy_block *block;
306 	uint64_t vis_usage;
307 
308 	list_for_each_entry_safe(rsv, temp, &mgr->reservations_pending, blocks) {
309 		if (gpu_buddy_alloc_blocks(mm, rsv->start, rsv->start + rsv->size,
310 					   rsv->size, mm->chunk_size, &rsv->allocated,
311 					   GPU_BUDDY_RANGE_ALLOCATION))
312 			continue;
313 
314 		block = amdgpu_vram_mgr_first_block(&rsv->allocated);
315 		if (!block)
316 			continue;
317 
318 		dev_dbg(adev->dev, "Reservation 0x%llx - %lld, Succeeded\n",
319 			rsv->start, rsv->size);
320 
321 		vis_usage = amdgpu_vram_mgr_vis_size(adev, block);
322 		atomic64_add(vis_usage, &mgr->vis_usage);
323 		spin_lock(&man->bdev->lru_lock);
324 		man->usage += rsv->size;
325 		spin_unlock(&man->bdev->lru_lock);
326 		list_move(&rsv->blocks, &mgr->reserved_pages);
327 	}
328 }
329 
330 /**
331  * amdgpu_vram_mgr_reserve_range - Reserve a range from VRAM
332  *
333  * @mgr: amdgpu_vram_mgr pointer
334  * @start: start address of the range in VRAM
335  * @size: size of the range
336  *
337  * Reserve memory from start address with the specified size in VRAM
338  */
339 int amdgpu_vram_mgr_reserve_range(struct amdgpu_vram_mgr *mgr,
340 				  uint64_t start, uint64_t size)
341 {
342 	struct amdgpu_vram_reservation *rsv;
343 
344 	rsv = kzalloc_obj(*rsv);
345 	if (!rsv)
346 		return -ENOMEM;
347 
348 	INIT_LIST_HEAD(&rsv->allocated);
349 	INIT_LIST_HEAD(&rsv->blocks);
350 
351 	rsv->start = start;
352 	rsv->size = size;
353 
354 	mutex_lock(&mgr->lock);
355 	list_add_tail(&rsv->blocks, &mgr->reservations_pending);
356 	amdgpu_vram_mgr_do_reserve(&mgr->manager);
357 	mutex_unlock(&mgr->lock);
358 
359 	return 0;
360 }
361 
362 /**
363  * amdgpu_vram_mgr_query_page_status - query the reservation status
364  *
365  * @mgr: amdgpu_vram_mgr pointer
366  * @start: start address of a page in VRAM
367  *
368  * Returns:
369  *	-EBUSY: the page is still hold and in pending list
370  *	0: the page has been reserved
371  *	-ENOENT: the input page is not a reservation
372  */
373 int amdgpu_vram_mgr_query_page_status(struct amdgpu_vram_mgr *mgr,
374 				      uint64_t start)
375 {
376 	struct amdgpu_vram_reservation *rsv;
377 	int ret;
378 
379 	mutex_lock(&mgr->lock);
380 
381 	list_for_each_entry(rsv, &mgr->reservations_pending, blocks) {
382 		if (rsv->start <= start &&
383 		    (start < (rsv->start + rsv->size))) {
384 			ret = -EBUSY;
385 			goto out;
386 		}
387 	}
388 
389 	list_for_each_entry(rsv, &mgr->reserved_pages, blocks) {
390 		if (rsv->start <= start &&
391 		    (start < (rsv->start + rsv->size))) {
392 			ret = 0;
393 			goto out;
394 		}
395 	}
396 
397 	ret = -ENOENT;
398 out:
399 	mutex_unlock(&mgr->lock);
400 	return ret;
401 }
402 
403 int amdgpu_vram_mgr_query_address_block_info(struct amdgpu_vram_mgr *mgr,
404 			uint64_t address, struct amdgpu_vram_block_info *info)
405 {
406 	struct amdgpu_vram_mgr_resource *vres;
407 	struct gpu_buddy_block *block;
408 	u64 start, size;
409 	int ret = -ENOENT;
410 
411 	mutex_lock(&mgr->lock);
412 	list_for_each_entry(vres, &mgr->allocated_vres_list, vres_node) {
413 		list_for_each_entry(block, &vres->blocks, link) {
414 			start = amdgpu_vram_mgr_block_start(block);
415 			size = amdgpu_vram_mgr_block_size(block);
416 			if ((start <= address) && (address < (start + size))) {
417 				info->start = start;
418 				info->size = size;
419 				memcpy(&info->task, &vres->task, sizeof(vres->task));
420 				ret = 0;
421 				goto out;
422 			}
423 		}
424 	}
425 
426 out:
427 	mutex_unlock(&mgr->lock);
428 
429 	return ret;
430 }
431 
432 /**
433  * amdgpu_vram_mgr_new - allocate new ranges
434  *
435  * @man: TTM memory type manager
436  * @tbo: TTM BO we need this range for
437  * @place: placement flags and restrictions
438  * @res: the resulting mem object
439  *
440  * Allocate VRAM for the given BO.
441  */
442 static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
443 			       struct ttm_buffer_object *tbo,
444 			       const struct ttm_place *place,
445 			       struct ttm_resource **res)
446 {
447 	struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
448 	struct amdgpu_device *adev = to_amdgpu_device(mgr);
449 	struct amdgpu_bo *bo = ttm_to_amdgpu_bo(tbo);
450 	u64 vis_usage = 0, max_bytes, min_block_size;
451 	struct amdgpu_vram_mgr_resource *vres;
452 	u64 size, remaining_size, lpfn, fpfn;
453 	unsigned int adjust_dcc_size = 0;
454 	struct gpu_buddy *mm = &mgr->mm;
455 	struct gpu_buddy_block *block;
456 	unsigned long pages_per_block;
457 	int r;
458 
459 	lpfn = (u64)place->lpfn << PAGE_SHIFT;
460 	if (!lpfn || lpfn > man->size)
461 		lpfn = man->size;
462 
463 	fpfn = (u64)place->fpfn << PAGE_SHIFT;
464 
465 	max_bytes = adev->gmc.mc_vram_size;
466 	if (tbo->type != ttm_bo_type_kernel)
467 		max_bytes -= AMDGPU_VM_RESERVED_VRAM;
468 
469 	if (bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS) {
470 		pages_per_block = ~0ul;
471 	} else {
472 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
473 		pages_per_block = HPAGE_PMD_NR;
474 #else
475 		/* default to 2MB */
476 		pages_per_block = 2UL << (20UL - PAGE_SHIFT);
477 #endif
478 		pages_per_block = max_t(u32, pages_per_block,
479 					tbo->page_alignment);
480 	}
481 
482 	vres = kzalloc_obj(*vres);
483 	if (!vres)
484 		return -ENOMEM;
485 
486 	ttm_resource_init(tbo, place, &vres->base);
487 
488 	/* bail out quickly if there's likely not enough VRAM for this BO */
489 	if (ttm_resource_manager_usage(man) > max_bytes) {
490 		r = -ENOSPC;
491 		goto error_fini;
492 	}
493 
494 	INIT_LIST_HEAD(&vres->blocks);
495 
496 	if (place->flags & TTM_PL_FLAG_TOPDOWN)
497 		vres->flags |= GPU_BUDDY_TOPDOWN_ALLOCATION;
498 
499 	if (bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)
500 		vres->flags |= GPU_BUDDY_CONTIGUOUS_ALLOCATION;
501 
502 	if (bo->flags & AMDGPU_GEM_CREATE_VRAM_CLEARED)
503 		vres->flags |= GPU_BUDDY_CLEAR_ALLOCATION;
504 
505 	if (fpfn || lpfn != mgr->mm.size)
506 		/* Allocate blocks in desired range */
507 		vres->flags |= GPU_BUDDY_RANGE_ALLOCATION;
508 
509 	if (bo->flags & AMDGPU_GEM_CREATE_GFX12_DCC &&
510 	    adev->gmc.gmc_funcs->get_dcc_alignment)
511 		adjust_dcc_size = amdgpu_gmc_get_dcc_alignment(adev);
512 
513 	remaining_size = (u64)vres->base.size;
514 	if (bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS && adjust_dcc_size) {
515 		unsigned int dcc_size;
516 
517 		dcc_size = roundup_pow_of_two(vres->base.size + adjust_dcc_size);
518 		remaining_size = (u64)dcc_size;
519 
520 		vres->flags |= GPU_BUDDY_TRIM_DISABLE;
521 	}
522 
523 	mutex_lock(&mgr->lock);
524 	while (remaining_size) {
525 		if (tbo->page_alignment)
526 			min_block_size = (u64)tbo->page_alignment << PAGE_SHIFT;
527 		else
528 			min_block_size = mgr->default_page_size;
529 
530 		size = remaining_size;
531 
532 		if (bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS && adjust_dcc_size)
533 			min_block_size = size;
534 		else if ((size >= (u64)pages_per_block << PAGE_SHIFT) &&
535 			 !(size & (((u64)pages_per_block << PAGE_SHIFT) - 1)))
536 			min_block_size = (u64)pages_per_block << PAGE_SHIFT;
537 
538 		BUG_ON(min_block_size < mm->chunk_size);
539 
540 		r = gpu_buddy_alloc_blocks(mm, fpfn,
541 					   lpfn,
542 					   size,
543 					   min_block_size,
544 					   &vres->blocks,
545 					   vres->flags);
546 
547 		if (unlikely(r == -ENOSPC) && pages_per_block == ~0ul &&
548 		    !(place->flags & TTM_PL_FLAG_CONTIGUOUS)) {
549 			vres->flags &= ~GPU_BUDDY_CONTIGUOUS_ALLOCATION;
550 			pages_per_block = max_t(u32, 2UL << (20UL - PAGE_SHIFT),
551 						tbo->page_alignment);
552 
553 			continue;
554 		}
555 
556 		if (unlikely(r))
557 			goto error_free_blocks;
558 
559 		if (size > remaining_size)
560 			remaining_size = 0;
561 		else
562 			remaining_size -= size;
563 	}
564 
565 	vres->task.pid = task_pid_nr(current);
566 	get_task_comm(vres->task.comm, current);
567 	list_add_tail(&vres->vres_node, &mgr->allocated_vres_list);
568 
569 	if (bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS && adjust_dcc_size) {
570 		struct gpu_buddy_block *dcc_block;
571 		unsigned long dcc_start;
572 		u64 trim_start;
573 
574 		dcc_block = amdgpu_vram_mgr_first_block(&vres->blocks);
575 		/* Adjust the start address for DCC buffers only */
576 		dcc_start =
577 			roundup((unsigned long)amdgpu_vram_mgr_block_start(dcc_block),
578 				adjust_dcc_size);
579 		trim_start = (u64)dcc_start;
580 		gpu_buddy_block_trim(mm, &trim_start,
581 				     (u64)vres->base.size,
582 				     &vres->blocks);
583 	}
584 	mutex_unlock(&mgr->lock);
585 
586 	vres->base.start = 0;
587 	size = max_t(u64, amdgpu_vram_mgr_blocks_size(&vres->blocks),
588 		     vres->base.size);
589 	list_for_each_entry(block, &vres->blocks, link) {
590 		unsigned long start;
591 
592 		start = amdgpu_vram_mgr_block_start(block) +
593 			amdgpu_vram_mgr_block_size(block);
594 		start >>= PAGE_SHIFT;
595 
596 		if (start > PFN_UP(size))
597 			start -= PFN_UP(size);
598 		else
599 			start = 0;
600 		vres->base.start = max(vres->base.start, start);
601 
602 		vis_usage += amdgpu_vram_mgr_vis_size(adev, block);
603 	}
604 
605 	if (amdgpu_is_vram_mgr_blocks_contiguous(&vres->blocks))
606 		vres->base.placement |= TTM_PL_FLAG_CONTIGUOUS;
607 
608 	if (adev->gmc.xgmi.connected_to_cpu)
609 		vres->base.bus.caching = ttm_cached;
610 	else
611 		vres->base.bus.caching = ttm_write_combined;
612 
613 	atomic64_add(vis_usage, &mgr->vis_usage);
614 	*res = &vres->base;
615 	return 0;
616 
617 error_free_blocks:
618 	gpu_buddy_free_list(mm, &vres->blocks, 0);
619 	mutex_unlock(&mgr->lock);
620 error_fini:
621 	ttm_resource_fini(man, &vres->base);
622 	kfree(vres);
623 
624 	return r;
625 }
626 
627 /**
628  * amdgpu_vram_mgr_del - free ranges
629  *
630  * @man: TTM memory type manager
631  * @res: TTM memory object
632  *
633  * Free the allocated VRAM again.
634  */
635 static void amdgpu_vram_mgr_del(struct ttm_resource_manager *man,
636 				struct ttm_resource *res)
637 {
638 	struct amdgpu_vram_mgr_resource *vres = to_amdgpu_vram_mgr_resource(res);
639 	struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
640 	struct amdgpu_device *adev = to_amdgpu_device(mgr);
641 	struct gpu_buddy *mm = &mgr->mm;
642 	struct gpu_buddy_block *block;
643 	uint64_t vis_usage = 0;
644 
645 	mutex_lock(&mgr->lock);
646 
647 	list_del(&vres->vres_node);
648 	memset(&vres->task, 0, sizeof(vres->task));
649 
650 	list_for_each_entry(block, &vres->blocks, link)
651 		vis_usage += amdgpu_vram_mgr_vis_size(adev, block);
652 
653 	gpu_buddy_free_list(mm, &vres->blocks, vres->flags);
654 	amdgpu_vram_mgr_do_reserve(man);
655 	mutex_unlock(&mgr->lock);
656 
657 	atomic64_sub(vis_usage, &mgr->vis_usage);
658 
659 	ttm_resource_fini(man, res);
660 	kfree(vres);
661 }
662 
663 /**
664  * amdgpu_vram_mgr_alloc_sgt - allocate and fill a sg table
665  *
666  * @adev: amdgpu device pointer
667  * @res: TTM memory object
668  * @offset: byte offset from the base of VRAM BO
669  * @length: number of bytes to export in sg_table
670  * @dev: the other device
671  * @dir: dma direction
672  * @sgt: resulting sg table
673  *
674  * Allocate and fill a sg table from a VRAM allocation.
675  */
676 int amdgpu_vram_mgr_alloc_sgt(struct amdgpu_device *adev,
677 			      struct ttm_resource *res,
678 			      u64 offset, u64 length,
679 			      struct device *dev,
680 			      enum dma_data_direction dir,
681 			      struct sg_table **sgt)
682 {
683 	struct amdgpu_res_cursor cursor;
684 	struct scatterlist *sg;
685 	int num_entries = 0;
686 	int i, r;
687 
688 	*sgt = kmalloc_obj(**sgt);
689 	if (!*sgt)
690 		return -ENOMEM;
691 
692 	/* Determine the number of GPU_BUDDY blocks to export */
693 	amdgpu_res_first(res, offset, length, &cursor);
694 	while (cursor.remaining) {
695 		num_entries++;
696 		amdgpu_res_next(&cursor, min(cursor.size, AMDGPU_MAX_SG_SEGMENT_SIZE));
697 	}
698 
699 	r = sg_alloc_table(*sgt, num_entries, GFP_KERNEL);
700 	if (r)
701 		goto error_free;
702 
703 	/* Initialize scatterlist nodes of sg_table */
704 	for_each_sgtable_sg((*sgt), sg, i)
705 		sg->length = 0;
706 
707 	/*
708 	 * Walk down GPU_BUDDY blocks to populate scatterlist nodes
709 	 * @note: Use iterator api to get first the GPU_BUDDY block
710 	 * and the number of bytes from it. Access the following
711 	 * GPU_BUDDY block(s) if more buffer needs to exported
712 	 */
713 	amdgpu_res_first(res, offset, length, &cursor);
714 	for_each_sgtable_sg((*sgt), sg, i) {
715 		phys_addr_t phys = cursor.start + adev->gmc.aper_base;
716 		unsigned long size = min(cursor.size, AMDGPU_MAX_SG_SEGMENT_SIZE);
717 		dma_addr_t addr;
718 
719 		addr = dma_map_resource(dev, phys, size, dir,
720 					DMA_ATTR_SKIP_CPU_SYNC);
721 		r = dma_mapping_error(dev, addr);
722 		if (r)
723 			goto error_unmap;
724 
725 		sg_set_page(sg, NULL, size, 0);
726 		sg_dma_address(sg) = addr;
727 		sg_dma_len(sg) = size;
728 
729 		amdgpu_res_next(&cursor, size);
730 	}
731 
732 	return 0;
733 
734 error_unmap:
735 	for_each_sgtable_sg((*sgt), sg, i) {
736 		if (!sg->length)
737 			continue;
738 
739 		dma_unmap_resource(dev, sg->dma_address,
740 				   sg->length, dir,
741 				   DMA_ATTR_SKIP_CPU_SYNC);
742 	}
743 	sg_free_table(*sgt);
744 
745 error_free:
746 	kfree(*sgt);
747 	return r;
748 }
749 
750 /**
751  * amdgpu_vram_mgr_free_sgt - allocate and fill a sg table
752  *
753  * @dev: device pointer
754  * @dir: data direction of resource to unmap
755  * @sgt: sg table to free
756  *
757  * Free a previously allocate sg table.
758  */
759 void amdgpu_vram_mgr_free_sgt(struct device *dev,
760 			      enum dma_data_direction dir,
761 			      struct sg_table *sgt)
762 {
763 	struct scatterlist *sg;
764 	int i;
765 
766 	for_each_sgtable_sg(sgt, sg, i)
767 		dma_unmap_resource(dev, sg->dma_address,
768 				   sg->length, dir,
769 				   DMA_ATTR_SKIP_CPU_SYNC);
770 	sg_free_table(sgt);
771 	kfree(sgt);
772 }
773 
774 /**
775  * amdgpu_vram_mgr_vis_usage - how many bytes are used in the visible part
776  *
777  * @mgr: amdgpu_vram_mgr pointer
778  *
779  * Returns how many bytes are used in the visible part of VRAM
780  */
781 uint64_t amdgpu_vram_mgr_vis_usage(struct amdgpu_vram_mgr *mgr)
782 {
783 	return atomic64_read(&mgr->vis_usage);
784 }
785 
786 /**
787  * amdgpu_vram_mgr_clear_reset_blocks - reset clear blocks
788  *
789  * @adev: amdgpu device pointer
790  *
791  * Reset the cleared drm buddy blocks.
792  */
793 void amdgpu_vram_mgr_clear_reset_blocks(struct amdgpu_device *adev)
794 {
795 	struct amdgpu_vram_mgr *mgr = &adev->mman.vram_mgr;
796 	struct gpu_buddy *mm = &mgr->mm;
797 
798 	mutex_lock(&mgr->lock);
799 	gpu_buddy_reset_clear(mm, false);
800 	mutex_unlock(&mgr->lock);
801 }
802 
803 /**
804  * amdgpu_vram_mgr_intersects - test each drm buddy block for intersection
805  *
806  * @man: TTM memory type manager
807  * @res: The resource to test
808  * @place: The place to test against
809  * @size: Size of the new allocation
810  *
811  * Test each drm buddy block for intersection for eviction decision.
812  */
813 static bool amdgpu_vram_mgr_intersects(struct ttm_resource_manager *man,
814 				       struct ttm_resource *res,
815 				       const struct ttm_place *place,
816 				       size_t size)
817 {
818 	struct amdgpu_vram_mgr_resource *mgr = to_amdgpu_vram_mgr_resource(res);
819 	struct gpu_buddy_block *block;
820 
821 	/* Check each drm buddy block individually */
822 	list_for_each_entry(block, &mgr->blocks, link) {
823 		unsigned long fpfn =
824 			amdgpu_vram_mgr_block_start(block) >> PAGE_SHIFT;
825 		unsigned long lpfn = fpfn +
826 			(amdgpu_vram_mgr_block_size(block) >> PAGE_SHIFT);
827 
828 		if (place->fpfn < lpfn &&
829 		    (!place->lpfn || place->lpfn > fpfn))
830 			return true;
831 	}
832 
833 	return false;
834 }
835 
836 /**
837  * amdgpu_vram_mgr_compatible - test each drm buddy block for compatibility
838  *
839  * @man: TTM memory type manager
840  * @res: The resource to test
841  * @place: The place to test against
842  * @size: Size of the new allocation
843  *
844  * Test each drm buddy block for placement compatibility.
845  */
846 static bool amdgpu_vram_mgr_compatible(struct ttm_resource_manager *man,
847 				       struct ttm_resource *res,
848 				       const struct ttm_place *place,
849 				       size_t size)
850 {
851 	struct amdgpu_vram_mgr_resource *mgr = to_amdgpu_vram_mgr_resource(res);
852 	struct gpu_buddy_block *block;
853 
854 	/* Check each drm buddy block individually */
855 	list_for_each_entry(block, &mgr->blocks, link) {
856 		unsigned long fpfn =
857 			amdgpu_vram_mgr_block_start(block) >> PAGE_SHIFT;
858 		unsigned long lpfn = fpfn +
859 			(amdgpu_vram_mgr_block_size(block) >> PAGE_SHIFT);
860 
861 		if (fpfn < place->fpfn ||
862 		    (place->lpfn && lpfn > place->lpfn))
863 			return false;
864 	}
865 
866 	return true;
867 }
868 
869 /**
870  * amdgpu_vram_mgr_debug - dump VRAM table
871  *
872  * @man: TTM memory type manager
873  * @printer: DRM printer to use
874  *
875  * Dump the table content using printk.
876  */
877 static void amdgpu_vram_mgr_debug(struct ttm_resource_manager *man,
878 				  struct drm_printer *printer)
879 {
880 	struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
881 	struct gpu_buddy *mm = &mgr->mm;
882 	struct amdgpu_vram_reservation *rsv;
883 
884 	drm_printf(printer, "  vis usage:%llu\n",
885 		   amdgpu_vram_mgr_vis_usage(mgr));
886 
887 	mutex_lock(&mgr->lock);
888 	drm_printf(printer, "default_page_size: %lluKiB\n",
889 		   mgr->default_page_size >> 10);
890 
891 	drm_buddy_print(mm, printer);
892 
893 	drm_printf(printer, "reserved:\n");
894 	list_for_each_entry(rsv, &mgr->reserved_pages, blocks)
895 		drm_printf(printer, "%#018llx-%#018llx: %llu\n",
896 			rsv->start, rsv->start + rsv->size, rsv->size);
897 	mutex_unlock(&mgr->lock);
898 }
899 
900 static const struct ttm_resource_manager_func amdgpu_vram_mgr_func = {
901 	.alloc	= amdgpu_vram_mgr_new,
902 	.free	= amdgpu_vram_mgr_del,
903 	.intersects = amdgpu_vram_mgr_intersects,
904 	.compatible = amdgpu_vram_mgr_compatible,
905 	.debug	= amdgpu_vram_mgr_debug
906 };
907 
908 /**
909  * amdgpu_vram_mgr_init - init VRAM manager and DRM MM
910  *
911  * @adev: amdgpu_device pointer
912  *
913  * Allocate and initialize the VRAM manager.
914  */
915 int amdgpu_vram_mgr_init(struct amdgpu_device *adev)
916 {
917 	struct amdgpu_vram_mgr *mgr = &adev->mman.vram_mgr;
918 	struct ttm_resource_manager *man = &mgr->manager;
919 	int err;
920 
921 	man->cg = drmm_cgroup_register_region(adev_to_drm(adev), "vram", adev->gmc.real_vram_size);
922 	if (IS_ERR(man->cg))
923 		return PTR_ERR(man->cg);
924 	ttm_resource_manager_init(man, &adev->mman.bdev,
925 				  adev->gmc.real_vram_size);
926 
927 	mutex_init(&mgr->lock);
928 	INIT_LIST_HEAD(&mgr->reservations_pending);
929 	INIT_LIST_HEAD(&mgr->reserved_pages);
930 	INIT_LIST_HEAD(&mgr->allocated_vres_list);
931 	mgr->default_page_size = PAGE_SIZE;
932 
933 	man->func = &amdgpu_vram_mgr_func;
934 	err = gpu_buddy_init(&mgr->mm, man->size, PAGE_SIZE);
935 	if (err)
936 		return err;
937 
938 	ttm_set_driver_manager(&adev->mman.bdev, TTM_PL_VRAM, &mgr->manager);
939 	ttm_resource_manager_set_used(man, true);
940 	return 0;
941 }
942 
943 /**
944  * amdgpu_vram_mgr_fini - free and destroy VRAM manager
945  *
946  * @adev: amdgpu_device pointer
947  *
948  * Destroy and free the VRAM manager, returns -EBUSY if ranges are still
949  * allocated inside it.
950  */
951 void amdgpu_vram_mgr_fini(struct amdgpu_device *adev)
952 {
953 	struct amdgpu_vram_mgr *mgr = &adev->mman.vram_mgr;
954 	struct ttm_resource_manager *man = &mgr->manager;
955 	int ret;
956 	struct amdgpu_vram_reservation *rsv, *temp;
957 
958 	ttm_resource_manager_set_used(man, false);
959 
960 	ret = ttm_resource_manager_evict_all(&adev->mman.bdev, man);
961 	if (ret)
962 		return;
963 
964 	mutex_lock(&mgr->lock);
965 	list_for_each_entry_safe(rsv, temp, &mgr->reservations_pending, blocks)
966 		kfree(rsv);
967 
968 	list_for_each_entry_safe(rsv, temp, &mgr->reserved_pages, blocks) {
969 		gpu_buddy_free_list(&mgr->mm, &rsv->allocated, 0);
970 		kfree(rsv);
971 	}
972 	if (!adev->gmc.is_app_apu)
973 		gpu_buddy_fini(&mgr->mm);
974 	mutex_unlock(&mgr->lock);
975 
976 	ttm_resource_manager_cleanup(man);
977 	ttm_set_driver_manager(&adev->mman.bdev, TTM_PL_VRAM, NULL);
978 }
979