1 /*
2 * Copyright 2016 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Christian König
23 */
24
25 #include <linux/dma-mapping.h>
26 #include <drm/ttm/ttm_range_manager.h>
27 #include <drm/drm_drv.h>
28
29 #include "amdgpu.h"
30 #include "amdgpu_vm.h"
31 #include "amdgpu_res_cursor.h"
32 #include "atom.h"
33
34 #define AMDGPU_MAX_SG_SEGMENT_SIZE (2UL << 30)
35
36 struct amdgpu_vram_reservation {
37 u64 start;
38 u64 size;
39 struct list_head allocated;
40 struct list_head blocks;
41 };
42
43 static inline struct amdgpu_vram_mgr *
to_vram_mgr(struct ttm_resource_manager * man)44 to_vram_mgr(struct ttm_resource_manager *man)
45 {
46 return container_of(man, struct amdgpu_vram_mgr, manager);
47 }
48
49 static inline struct amdgpu_device *
to_amdgpu_device(struct amdgpu_vram_mgr * mgr)50 to_amdgpu_device(struct amdgpu_vram_mgr *mgr)
51 {
52 return container_of(mgr, struct amdgpu_device, mman.vram_mgr);
53 }
54
55 static inline struct drm_buddy_block *
amdgpu_vram_mgr_first_block(struct list_head * list)56 amdgpu_vram_mgr_first_block(struct list_head *list)
57 {
58 return list_first_entry_or_null(list, struct drm_buddy_block, link);
59 }
60
amdgpu_is_vram_mgr_blocks_contiguous(struct list_head * head)61 static inline bool amdgpu_is_vram_mgr_blocks_contiguous(struct list_head *head)
62 {
63 struct drm_buddy_block *block;
64 u64 start, size;
65
66 block = amdgpu_vram_mgr_first_block(head);
67 if (!block)
68 return false;
69
70 while (head != block->link.next) {
71 start = amdgpu_vram_mgr_block_start(block);
72 size = amdgpu_vram_mgr_block_size(block);
73
74 block = list_entry(block->link.next, struct drm_buddy_block, link);
75 if (start + size != amdgpu_vram_mgr_block_start(block))
76 return false;
77 }
78
79 return true;
80 }
81
amdgpu_vram_mgr_blocks_size(struct list_head * head)82 static inline u64 amdgpu_vram_mgr_blocks_size(struct list_head *head)
83 {
84 struct drm_buddy_block *block;
85 u64 size = 0;
86
87 list_for_each_entry(block, head, link)
88 size += amdgpu_vram_mgr_block_size(block);
89
90 return size;
91 }
92
93 /**
94 * DOC: mem_info_vram_total
95 *
96 * The amdgpu driver provides a sysfs API for reporting current total VRAM
97 * available on the device
98 * The file mem_info_vram_total is used for this and returns the total
99 * amount of VRAM in bytes
100 */
amdgpu_mem_info_vram_total_show(struct device * dev,struct device_attribute * attr,char * buf)101 static ssize_t amdgpu_mem_info_vram_total_show(struct device *dev,
102 struct device_attribute *attr, char *buf)
103 {
104 struct drm_device *ddev = dev_get_drvdata(dev);
105 struct amdgpu_device *adev = drm_to_adev(ddev);
106
107 return sysfs_emit(buf, "%llu\n", adev->gmc.real_vram_size);
108 }
109
110 /**
111 * DOC: mem_info_vis_vram_total
112 *
113 * The amdgpu driver provides a sysfs API for reporting current total
114 * visible VRAM available on the device
115 * The file mem_info_vis_vram_total is used for this and returns the total
116 * amount of visible VRAM in bytes
117 */
amdgpu_mem_info_vis_vram_total_show(struct device * dev,struct device_attribute * attr,char * buf)118 static ssize_t amdgpu_mem_info_vis_vram_total_show(struct device *dev,
119 struct device_attribute *attr, char *buf)
120 {
121 struct drm_device *ddev = dev_get_drvdata(dev);
122 struct amdgpu_device *adev = drm_to_adev(ddev);
123
124 return sysfs_emit(buf, "%llu\n", adev->gmc.visible_vram_size);
125 }
126
127 /**
128 * DOC: mem_info_vram_used
129 *
130 * The amdgpu driver provides a sysfs API for reporting current total VRAM
131 * available on the device
132 * The file mem_info_vram_used is used for this and returns the total
133 * amount of currently used VRAM in bytes
134 */
amdgpu_mem_info_vram_used_show(struct device * dev,struct device_attribute * attr,char * buf)135 static ssize_t amdgpu_mem_info_vram_used_show(struct device *dev,
136 struct device_attribute *attr,
137 char *buf)
138 {
139 struct drm_device *ddev = dev_get_drvdata(dev);
140 struct amdgpu_device *adev = drm_to_adev(ddev);
141 struct ttm_resource_manager *man = &adev->mman.vram_mgr.manager;
142
143 return sysfs_emit(buf, "%llu\n", ttm_resource_manager_usage(man));
144 }
145
146 /**
147 * DOC: mem_info_vis_vram_used
148 *
149 * The amdgpu driver provides a sysfs API for reporting current total of
150 * used visible VRAM
151 * The file mem_info_vis_vram_used is used for this and returns the total
152 * amount of currently used visible VRAM in bytes
153 */
amdgpu_mem_info_vis_vram_used_show(struct device * dev,struct device_attribute * attr,char * buf)154 static ssize_t amdgpu_mem_info_vis_vram_used_show(struct device *dev,
155 struct device_attribute *attr,
156 char *buf)
157 {
158 struct drm_device *ddev = dev_get_drvdata(dev);
159 struct amdgpu_device *adev = drm_to_adev(ddev);
160
161 return sysfs_emit(buf, "%llu\n",
162 amdgpu_vram_mgr_vis_usage(&adev->mman.vram_mgr));
163 }
164
165 /**
166 * DOC: mem_info_vram_vendor
167 *
168 * The amdgpu driver provides a sysfs API for reporting the vendor of the
169 * installed VRAM
170 * The file mem_info_vram_vendor is used for this and returns the name of the
171 * vendor.
172 */
amdgpu_mem_info_vram_vendor(struct device * dev,struct device_attribute * attr,char * buf)173 static ssize_t amdgpu_mem_info_vram_vendor(struct device *dev,
174 struct device_attribute *attr,
175 char *buf)
176 {
177 struct drm_device *ddev = dev_get_drvdata(dev);
178 struct amdgpu_device *adev = drm_to_adev(ddev);
179
180 switch (adev->gmc.vram_vendor) {
181 case SAMSUNG:
182 return sysfs_emit(buf, "samsung\n");
183 case INFINEON:
184 return sysfs_emit(buf, "infineon\n");
185 case ELPIDA:
186 return sysfs_emit(buf, "elpida\n");
187 case ETRON:
188 return sysfs_emit(buf, "etron\n");
189 case NANYA:
190 return sysfs_emit(buf, "nanya\n");
191 case HYNIX:
192 return sysfs_emit(buf, "hynix\n");
193 case MOSEL:
194 return sysfs_emit(buf, "mosel\n");
195 case WINBOND:
196 return sysfs_emit(buf, "winbond\n");
197 case ESMT:
198 return sysfs_emit(buf, "esmt\n");
199 case MICRON:
200 return sysfs_emit(buf, "micron\n");
201 default:
202 return sysfs_emit(buf, "unknown\n");
203 }
204 }
205
206 static DEVICE_ATTR(mem_info_vram_total, S_IRUGO,
207 amdgpu_mem_info_vram_total_show, NULL);
208 static DEVICE_ATTR(mem_info_vis_vram_total, S_IRUGO,
209 amdgpu_mem_info_vis_vram_total_show,NULL);
210 static DEVICE_ATTR(mem_info_vram_used, S_IRUGO,
211 amdgpu_mem_info_vram_used_show, NULL);
212 static DEVICE_ATTR(mem_info_vis_vram_used, S_IRUGO,
213 amdgpu_mem_info_vis_vram_used_show, NULL);
214 static DEVICE_ATTR(mem_info_vram_vendor, S_IRUGO,
215 amdgpu_mem_info_vram_vendor, NULL);
216
217 static struct attribute *amdgpu_vram_mgr_attributes[] = {
218 &dev_attr_mem_info_vram_total.attr,
219 &dev_attr_mem_info_vis_vram_total.attr,
220 &dev_attr_mem_info_vram_used.attr,
221 &dev_attr_mem_info_vis_vram_used.attr,
222 &dev_attr_mem_info_vram_vendor.attr,
223 NULL
224 };
225
amdgpu_vram_attrs_is_visible(struct kobject * kobj,struct attribute * attr,int i)226 static umode_t amdgpu_vram_attrs_is_visible(struct kobject *kobj,
227 struct attribute *attr, int i)
228 {
229 struct device *dev = kobj_to_dev(kobj);
230 struct drm_device *ddev = dev_get_drvdata(dev);
231 struct amdgpu_device *adev = drm_to_adev(ddev);
232
233 if (attr == &dev_attr_mem_info_vram_vendor.attr &&
234 !adev->gmc.vram_vendor)
235 return 0;
236
237 if (!ttm_resource_manager_used(&adev->mman.vram_mgr.manager))
238 return 0;
239
240 return attr->mode;
241 }
242
243 const struct attribute_group amdgpu_vram_mgr_attr_group = {
244 .attrs = amdgpu_vram_mgr_attributes,
245 .is_visible = amdgpu_vram_attrs_is_visible
246 };
247
248 /**
249 * amdgpu_vram_mgr_vis_size - Calculate visible block size
250 *
251 * @adev: amdgpu_device pointer
252 * @block: DRM BUDDY block structure
253 *
254 * Calculate how many bytes of the DRM BUDDY block are inside visible VRAM
255 */
amdgpu_vram_mgr_vis_size(struct amdgpu_device * adev,struct drm_buddy_block * block)256 static u64 amdgpu_vram_mgr_vis_size(struct amdgpu_device *adev,
257 struct drm_buddy_block *block)
258 {
259 u64 start = amdgpu_vram_mgr_block_start(block);
260 u64 end = start + amdgpu_vram_mgr_block_size(block);
261
262 if (start >= adev->gmc.visible_vram_size)
263 return 0;
264
265 return (end > adev->gmc.visible_vram_size ?
266 adev->gmc.visible_vram_size : end) - start;
267 }
268
269 /**
270 * amdgpu_vram_mgr_bo_visible_size - CPU visible BO size
271 *
272 * @bo: &amdgpu_bo buffer object (must be in VRAM)
273 *
274 * Returns:
275 * How much of the given &amdgpu_bo buffer object lies in CPU visible VRAM.
276 */
amdgpu_vram_mgr_bo_visible_size(struct amdgpu_bo * bo)277 u64 amdgpu_vram_mgr_bo_visible_size(struct amdgpu_bo *bo)
278 {
279 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
280 struct ttm_resource *res = bo->tbo.resource;
281 struct amdgpu_vram_mgr_resource *vres = to_amdgpu_vram_mgr_resource(res);
282 struct drm_buddy_block *block;
283 u64 usage = 0;
284
285 if (amdgpu_gmc_vram_full_visible(&adev->gmc))
286 return amdgpu_bo_size(bo);
287
288 if (res->start >= adev->gmc.visible_vram_size >> PAGE_SHIFT)
289 return 0;
290
291 list_for_each_entry(block, &vres->blocks, link)
292 usage += amdgpu_vram_mgr_vis_size(adev, block);
293
294 return usage;
295 }
296
297 /* Commit the reservation of VRAM pages */
amdgpu_vram_mgr_do_reserve(struct ttm_resource_manager * man)298 static void amdgpu_vram_mgr_do_reserve(struct ttm_resource_manager *man)
299 {
300 struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
301 struct amdgpu_device *adev = to_amdgpu_device(mgr);
302 struct drm_buddy *mm = &mgr->mm;
303 struct amdgpu_vram_reservation *rsv, *temp;
304 struct drm_buddy_block *block;
305 uint64_t vis_usage;
306
307 list_for_each_entry_safe(rsv, temp, &mgr->reservations_pending, blocks) {
308 if (drm_buddy_alloc_blocks(mm, rsv->start, rsv->start + rsv->size,
309 rsv->size, mm->chunk_size, &rsv->allocated,
310 DRM_BUDDY_RANGE_ALLOCATION))
311 continue;
312
313 block = amdgpu_vram_mgr_first_block(&rsv->allocated);
314 if (!block)
315 continue;
316
317 dev_dbg(adev->dev, "Reservation 0x%llx - %lld, Succeeded\n",
318 rsv->start, rsv->size);
319
320 vis_usage = amdgpu_vram_mgr_vis_size(adev, block);
321 atomic64_add(vis_usage, &mgr->vis_usage);
322 spin_lock(&man->bdev->lru_lock);
323 man->usage += rsv->size;
324 spin_unlock(&man->bdev->lru_lock);
325 list_move(&rsv->blocks, &mgr->reserved_pages);
326 }
327 }
328
329 /**
330 * amdgpu_vram_mgr_reserve_range - Reserve a range from VRAM
331 *
332 * @mgr: amdgpu_vram_mgr pointer
333 * @start: start address of the range in VRAM
334 * @size: size of the range
335 *
336 * Reserve memory from start address with the specified size in VRAM
337 */
amdgpu_vram_mgr_reserve_range(struct amdgpu_vram_mgr * mgr,uint64_t start,uint64_t size)338 int amdgpu_vram_mgr_reserve_range(struct amdgpu_vram_mgr *mgr,
339 uint64_t start, uint64_t size)
340 {
341 struct amdgpu_vram_reservation *rsv;
342
343 rsv = kzalloc(sizeof(*rsv), GFP_KERNEL);
344 if (!rsv)
345 return -ENOMEM;
346
347 INIT_LIST_HEAD(&rsv->allocated);
348 INIT_LIST_HEAD(&rsv->blocks);
349
350 rsv->start = start;
351 rsv->size = size;
352
353 mutex_lock(&mgr->lock);
354 list_add_tail(&rsv->blocks, &mgr->reservations_pending);
355 amdgpu_vram_mgr_do_reserve(&mgr->manager);
356 mutex_unlock(&mgr->lock);
357
358 return 0;
359 }
360
361 /**
362 * amdgpu_vram_mgr_query_page_status - query the reservation status
363 *
364 * @mgr: amdgpu_vram_mgr pointer
365 * @start: start address of a page in VRAM
366 *
367 * Returns:
368 * -EBUSY: the page is still hold and in pending list
369 * 0: the page has been reserved
370 * -ENOENT: the input page is not a reservation
371 */
amdgpu_vram_mgr_query_page_status(struct amdgpu_vram_mgr * mgr,uint64_t start)372 int amdgpu_vram_mgr_query_page_status(struct amdgpu_vram_mgr *mgr,
373 uint64_t start)
374 {
375 struct amdgpu_vram_reservation *rsv;
376 int ret;
377
378 mutex_lock(&mgr->lock);
379
380 list_for_each_entry(rsv, &mgr->reservations_pending, blocks) {
381 if (rsv->start <= start &&
382 (start < (rsv->start + rsv->size))) {
383 ret = -EBUSY;
384 goto out;
385 }
386 }
387
388 list_for_each_entry(rsv, &mgr->reserved_pages, blocks) {
389 if (rsv->start <= start &&
390 (start < (rsv->start + rsv->size))) {
391 ret = 0;
392 goto out;
393 }
394 }
395
396 ret = -ENOENT;
397 out:
398 mutex_unlock(&mgr->lock);
399 return ret;
400 }
401
amdgpu_vram_mgr_query_address_block_info(struct amdgpu_vram_mgr * mgr,uint64_t address,struct amdgpu_vram_block_info * info)402 int amdgpu_vram_mgr_query_address_block_info(struct amdgpu_vram_mgr *mgr,
403 uint64_t address, struct amdgpu_vram_block_info *info)
404 {
405 struct amdgpu_vram_mgr_resource *vres;
406 struct drm_buddy_block *block;
407 u64 start, size;
408 int ret = -ENOENT;
409
410 mutex_lock(&mgr->lock);
411 list_for_each_entry(vres, &mgr->allocated_vres_list, vres_node) {
412 list_for_each_entry(block, &vres->blocks, link) {
413 start = amdgpu_vram_mgr_block_start(block);
414 size = amdgpu_vram_mgr_block_size(block);
415 if ((start <= address) && (address < (start + size))) {
416 info->start = start;
417 info->size = size;
418 memcpy(&info->task, &vres->task, sizeof(vres->task));
419 ret = 0;
420 goto out;
421 }
422 }
423 }
424
425 out:
426 mutex_unlock(&mgr->lock);
427
428 return ret;
429 }
430
431 /**
432 * amdgpu_vram_mgr_new - allocate new ranges
433 *
434 * @man: TTM memory type manager
435 * @tbo: TTM BO we need this range for
436 * @place: placement flags and restrictions
437 * @res: the resulting mem object
438 *
439 * Allocate VRAM for the given BO.
440 */
amdgpu_vram_mgr_new(struct ttm_resource_manager * man,struct ttm_buffer_object * tbo,const struct ttm_place * place,struct ttm_resource ** res)441 static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
442 struct ttm_buffer_object *tbo,
443 const struct ttm_place *place,
444 struct ttm_resource **res)
445 {
446 struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
447 struct amdgpu_device *adev = to_amdgpu_device(mgr);
448 struct amdgpu_bo *bo = ttm_to_amdgpu_bo(tbo);
449 u64 vis_usage = 0, max_bytes, min_block_size;
450 struct amdgpu_vram_mgr_resource *vres;
451 u64 size, remaining_size, lpfn, fpfn;
452 unsigned int adjust_dcc_size = 0;
453 struct drm_buddy *mm = &mgr->mm;
454 struct drm_buddy_block *block;
455 unsigned long pages_per_block;
456 int r;
457
458 lpfn = (u64)place->lpfn << PAGE_SHIFT;
459 if (!lpfn || lpfn > man->size)
460 lpfn = man->size;
461
462 fpfn = (u64)place->fpfn << PAGE_SHIFT;
463
464 max_bytes = adev->gmc.mc_vram_size;
465 if (tbo->type != ttm_bo_type_kernel)
466 max_bytes -= AMDGPU_VM_RESERVED_VRAM;
467
468 if (bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS) {
469 pages_per_block = ~0ul;
470 } else {
471 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
472 pages_per_block = HPAGE_PMD_NR;
473 #else
474 /* default to 2MB */
475 pages_per_block = 2UL << (20UL - PAGE_SHIFT);
476 #endif
477 pages_per_block = max_t(u32, pages_per_block,
478 tbo->page_alignment);
479 }
480
481 vres = kzalloc(sizeof(*vres), GFP_KERNEL);
482 if (!vres)
483 return -ENOMEM;
484
485 ttm_resource_init(tbo, place, &vres->base);
486
487 /* bail out quickly if there's likely not enough VRAM for this BO */
488 if (ttm_resource_manager_usage(man) > max_bytes) {
489 r = -ENOSPC;
490 goto error_fini;
491 }
492
493 INIT_LIST_HEAD(&vres->blocks);
494
495 if (place->flags & TTM_PL_FLAG_TOPDOWN)
496 vres->flags |= DRM_BUDDY_TOPDOWN_ALLOCATION;
497
498 if (bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)
499 vres->flags |= DRM_BUDDY_CONTIGUOUS_ALLOCATION;
500
501 if (bo->flags & AMDGPU_GEM_CREATE_VRAM_CLEARED)
502 vres->flags |= DRM_BUDDY_CLEAR_ALLOCATION;
503
504 if (fpfn || lpfn != mgr->mm.size)
505 /* Allocate blocks in desired range */
506 vres->flags |= DRM_BUDDY_RANGE_ALLOCATION;
507
508 if (bo->flags & AMDGPU_GEM_CREATE_GFX12_DCC &&
509 adev->gmc.gmc_funcs->get_dcc_alignment)
510 adjust_dcc_size = amdgpu_gmc_get_dcc_alignment(adev);
511
512 remaining_size = (u64)vres->base.size;
513 if (bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS && adjust_dcc_size) {
514 unsigned int dcc_size;
515
516 dcc_size = roundup_pow_of_two(vres->base.size + adjust_dcc_size);
517 remaining_size = (u64)dcc_size;
518
519 vres->flags |= DRM_BUDDY_TRIM_DISABLE;
520 }
521
522 mutex_lock(&mgr->lock);
523 while (remaining_size) {
524 if (tbo->page_alignment)
525 min_block_size = (u64)tbo->page_alignment << PAGE_SHIFT;
526 else
527 min_block_size = mgr->default_page_size;
528
529 size = remaining_size;
530
531 if (bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS && adjust_dcc_size)
532 min_block_size = size;
533 else if ((size >= (u64)pages_per_block << PAGE_SHIFT) &&
534 !(size & (((u64)pages_per_block << PAGE_SHIFT) - 1)))
535 min_block_size = (u64)pages_per_block << PAGE_SHIFT;
536
537 BUG_ON(min_block_size < mm->chunk_size);
538
539 r = drm_buddy_alloc_blocks(mm, fpfn,
540 lpfn,
541 size,
542 min_block_size,
543 &vres->blocks,
544 vres->flags);
545
546 if (unlikely(r == -ENOSPC) && pages_per_block == ~0ul &&
547 !(place->flags & TTM_PL_FLAG_CONTIGUOUS)) {
548 vres->flags &= ~DRM_BUDDY_CONTIGUOUS_ALLOCATION;
549 pages_per_block = max_t(u32, 2UL << (20UL - PAGE_SHIFT),
550 tbo->page_alignment);
551
552 continue;
553 }
554
555 if (unlikely(r))
556 goto error_free_blocks;
557
558 if (size > remaining_size)
559 remaining_size = 0;
560 else
561 remaining_size -= size;
562 }
563
564 vres->task.pid = task_pid_nr(current);
565 get_task_comm(vres->task.comm, current);
566 list_add_tail(&vres->vres_node, &mgr->allocated_vres_list);
567
568 if (bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS && adjust_dcc_size) {
569 struct drm_buddy_block *dcc_block;
570 unsigned long dcc_start;
571 u64 trim_start;
572
573 dcc_block = amdgpu_vram_mgr_first_block(&vres->blocks);
574 /* Adjust the start address for DCC buffers only */
575 dcc_start =
576 roundup((unsigned long)amdgpu_vram_mgr_block_start(dcc_block),
577 adjust_dcc_size);
578 trim_start = (u64)dcc_start;
579 drm_buddy_block_trim(mm, &trim_start,
580 (u64)vres->base.size,
581 &vres->blocks);
582 }
583 mutex_unlock(&mgr->lock);
584
585 vres->base.start = 0;
586 size = max_t(u64, amdgpu_vram_mgr_blocks_size(&vres->blocks),
587 vres->base.size);
588 list_for_each_entry(block, &vres->blocks, link) {
589 unsigned long start;
590
591 start = amdgpu_vram_mgr_block_start(block) +
592 amdgpu_vram_mgr_block_size(block);
593 start >>= PAGE_SHIFT;
594
595 if (start > PFN_UP(size))
596 start -= PFN_UP(size);
597 else
598 start = 0;
599 vres->base.start = max(vres->base.start, start);
600
601 vis_usage += amdgpu_vram_mgr_vis_size(adev, block);
602 }
603
604 if (amdgpu_is_vram_mgr_blocks_contiguous(&vres->blocks))
605 vres->base.placement |= TTM_PL_FLAG_CONTIGUOUS;
606
607 if (adev->gmc.xgmi.connected_to_cpu)
608 vres->base.bus.caching = ttm_cached;
609 else
610 vres->base.bus.caching = ttm_write_combined;
611
612 atomic64_add(vis_usage, &mgr->vis_usage);
613 *res = &vres->base;
614 return 0;
615
616 error_free_blocks:
617 drm_buddy_free_list(mm, &vres->blocks, 0);
618 mutex_unlock(&mgr->lock);
619 error_fini:
620 ttm_resource_fini(man, &vres->base);
621 kfree(vres);
622
623 return r;
624 }
625
626 /**
627 * amdgpu_vram_mgr_del - free ranges
628 *
629 * @man: TTM memory type manager
630 * @res: TTM memory object
631 *
632 * Free the allocated VRAM again.
633 */
amdgpu_vram_mgr_del(struct ttm_resource_manager * man,struct ttm_resource * res)634 static void amdgpu_vram_mgr_del(struct ttm_resource_manager *man,
635 struct ttm_resource *res)
636 {
637 struct amdgpu_vram_mgr_resource *vres = to_amdgpu_vram_mgr_resource(res);
638 struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
639 struct amdgpu_device *adev = to_amdgpu_device(mgr);
640 struct drm_buddy *mm = &mgr->mm;
641 struct drm_buddy_block *block;
642 uint64_t vis_usage = 0;
643
644 mutex_lock(&mgr->lock);
645
646 list_del(&vres->vres_node);
647 memset(&vres->task, 0, sizeof(vres->task));
648
649 list_for_each_entry(block, &vres->blocks, link)
650 vis_usage += amdgpu_vram_mgr_vis_size(adev, block);
651
652 drm_buddy_free_list(mm, &vres->blocks, vres->flags);
653 amdgpu_vram_mgr_do_reserve(man);
654 mutex_unlock(&mgr->lock);
655
656 atomic64_sub(vis_usage, &mgr->vis_usage);
657
658 ttm_resource_fini(man, res);
659 kfree(vres);
660 }
661
662 /**
663 * amdgpu_vram_mgr_alloc_sgt - allocate and fill a sg table
664 *
665 * @adev: amdgpu device pointer
666 * @res: TTM memory object
667 * @offset: byte offset from the base of VRAM BO
668 * @length: number of bytes to export in sg_table
669 * @dev: the other device
670 * @dir: dma direction
671 * @sgt: resulting sg table
672 *
673 * Allocate and fill a sg table from a VRAM allocation.
674 */
amdgpu_vram_mgr_alloc_sgt(struct amdgpu_device * adev,struct ttm_resource * res,u64 offset,u64 length,struct device * dev,enum dma_data_direction dir,struct sg_table ** sgt)675 int amdgpu_vram_mgr_alloc_sgt(struct amdgpu_device *adev,
676 struct ttm_resource *res,
677 u64 offset, u64 length,
678 struct device *dev,
679 enum dma_data_direction dir,
680 struct sg_table **sgt)
681 {
682 struct amdgpu_res_cursor cursor;
683 struct scatterlist *sg;
684 int num_entries = 0;
685 int i, r;
686
687 *sgt = kmalloc(sizeof(**sgt), GFP_KERNEL);
688 if (!*sgt)
689 return -ENOMEM;
690
691 /* Determine the number of DRM_BUDDY blocks to export */
692 amdgpu_res_first(res, offset, length, &cursor);
693 while (cursor.remaining) {
694 num_entries++;
695 amdgpu_res_next(&cursor, min(cursor.size, AMDGPU_MAX_SG_SEGMENT_SIZE));
696 }
697
698 r = sg_alloc_table(*sgt, num_entries, GFP_KERNEL);
699 if (r)
700 goto error_free;
701
702 /* Initialize scatterlist nodes of sg_table */
703 for_each_sgtable_sg((*sgt), sg, i)
704 sg->length = 0;
705
706 /*
707 * Walk down DRM_BUDDY blocks to populate scatterlist nodes
708 * @note: Use iterator api to get first the DRM_BUDDY block
709 * and the number of bytes from it. Access the following
710 * DRM_BUDDY block(s) if more buffer needs to exported
711 */
712 amdgpu_res_first(res, offset, length, &cursor);
713 for_each_sgtable_sg((*sgt), sg, i) {
714 phys_addr_t phys = cursor.start + adev->gmc.aper_base;
715 unsigned long size = min(cursor.size, AMDGPU_MAX_SG_SEGMENT_SIZE);
716 dma_addr_t addr;
717
718 addr = dma_map_resource(dev, phys, size, dir,
719 DMA_ATTR_SKIP_CPU_SYNC);
720 r = dma_mapping_error(dev, addr);
721 if (r)
722 goto error_unmap;
723
724 sg_set_page(sg, NULL, size, 0);
725 sg_dma_address(sg) = addr;
726 sg_dma_len(sg) = size;
727
728 amdgpu_res_next(&cursor, size);
729 }
730
731 return 0;
732
733 error_unmap:
734 for_each_sgtable_sg((*sgt), sg, i) {
735 if (!sg->length)
736 continue;
737
738 dma_unmap_resource(dev, sg->dma_address,
739 sg->length, dir,
740 DMA_ATTR_SKIP_CPU_SYNC);
741 }
742 sg_free_table(*sgt);
743
744 error_free:
745 kfree(*sgt);
746 return r;
747 }
748
749 /**
750 * amdgpu_vram_mgr_free_sgt - allocate and fill a sg table
751 *
752 * @dev: device pointer
753 * @dir: data direction of resource to unmap
754 * @sgt: sg table to free
755 *
756 * Free a previously allocate sg table.
757 */
amdgpu_vram_mgr_free_sgt(struct device * dev,enum dma_data_direction dir,struct sg_table * sgt)758 void amdgpu_vram_mgr_free_sgt(struct device *dev,
759 enum dma_data_direction dir,
760 struct sg_table *sgt)
761 {
762 struct scatterlist *sg;
763 int i;
764
765 for_each_sgtable_sg(sgt, sg, i)
766 dma_unmap_resource(dev, sg->dma_address,
767 sg->length, dir,
768 DMA_ATTR_SKIP_CPU_SYNC);
769 sg_free_table(sgt);
770 kfree(sgt);
771 }
772
773 /**
774 * amdgpu_vram_mgr_vis_usage - how many bytes are used in the visible part
775 *
776 * @mgr: amdgpu_vram_mgr pointer
777 *
778 * Returns how many bytes are used in the visible part of VRAM
779 */
amdgpu_vram_mgr_vis_usage(struct amdgpu_vram_mgr * mgr)780 uint64_t amdgpu_vram_mgr_vis_usage(struct amdgpu_vram_mgr *mgr)
781 {
782 return atomic64_read(&mgr->vis_usage);
783 }
784
785 /**
786 * amdgpu_vram_mgr_clear_reset_blocks - reset clear blocks
787 *
788 * @adev: amdgpu device pointer
789 *
790 * Reset the cleared drm buddy blocks.
791 */
amdgpu_vram_mgr_clear_reset_blocks(struct amdgpu_device * adev)792 void amdgpu_vram_mgr_clear_reset_blocks(struct amdgpu_device *adev)
793 {
794 struct amdgpu_vram_mgr *mgr = &adev->mman.vram_mgr;
795 struct drm_buddy *mm = &mgr->mm;
796
797 mutex_lock(&mgr->lock);
798 drm_buddy_reset_clear(mm, false);
799 mutex_unlock(&mgr->lock);
800 }
801
802 /**
803 * amdgpu_vram_mgr_intersects - test each drm buddy block for intersection
804 *
805 * @man: TTM memory type manager
806 * @res: The resource to test
807 * @place: The place to test against
808 * @size: Size of the new allocation
809 *
810 * Test each drm buddy block for intersection for eviction decision.
811 */
amdgpu_vram_mgr_intersects(struct ttm_resource_manager * man,struct ttm_resource * res,const struct ttm_place * place,size_t size)812 static bool amdgpu_vram_mgr_intersects(struct ttm_resource_manager *man,
813 struct ttm_resource *res,
814 const struct ttm_place *place,
815 size_t size)
816 {
817 struct amdgpu_vram_mgr_resource *mgr = to_amdgpu_vram_mgr_resource(res);
818 struct drm_buddy_block *block;
819
820 /* Check each drm buddy block individually */
821 list_for_each_entry(block, &mgr->blocks, link) {
822 unsigned long fpfn =
823 amdgpu_vram_mgr_block_start(block) >> PAGE_SHIFT;
824 unsigned long lpfn = fpfn +
825 (amdgpu_vram_mgr_block_size(block) >> PAGE_SHIFT);
826
827 if (place->fpfn < lpfn &&
828 (!place->lpfn || place->lpfn > fpfn))
829 return true;
830 }
831
832 return false;
833 }
834
835 /**
836 * amdgpu_vram_mgr_compatible - test each drm buddy block for compatibility
837 *
838 * @man: TTM memory type manager
839 * @res: The resource to test
840 * @place: The place to test against
841 * @size: Size of the new allocation
842 *
843 * Test each drm buddy block for placement compatibility.
844 */
amdgpu_vram_mgr_compatible(struct ttm_resource_manager * man,struct ttm_resource * res,const struct ttm_place * place,size_t size)845 static bool amdgpu_vram_mgr_compatible(struct ttm_resource_manager *man,
846 struct ttm_resource *res,
847 const struct ttm_place *place,
848 size_t size)
849 {
850 struct amdgpu_vram_mgr_resource *mgr = to_amdgpu_vram_mgr_resource(res);
851 struct drm_buddy_block *block;
852
853 /* Check each drm buddy block individually */
854 list_for_each_entry(block, &mgr->blocks, link) {
855 unsigned long fpfn =
856 amdgpu_vram_mgr_block_start(block) >> PAGE_SHIFT;
857 unsigned long lpfn = fpfn +
858 (amdgpu_vram_mgr_block_size(block) >> PAGE_SHIFT);
859
860 if (fpfn < place->fpfn ||
861 (place->lpfn && lpfn > place->lpfn))
862 return false;
863 }
864
865 return true;
866 }
867
868 /**
869 * amdgpu_vram_mgr_debug - dump VRAM table
870 *
871 * @man: TTM memory type manager
872 * @printer: DRM printer to use
873 *
874 * Dump the table content using printk.
875 */
amdgpu_vram_mgr_debug(struct ttm_resource_manager * man,struct drm_printer * printer)876 static void amdgpu_vram_mgr_debug(struct ttm_resource_manager *man,
877 struct drm_printer *printer)
878 {
879 struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
880 struct drm_buddy *mm = &mgr->mm;
881 struct amdgpu_vram_reservation *rsv;
882
883 drm_printf(printer, " vis usage:%llu\n",
884 amdgpu_vram_mgr_vis_usage(mgr));
885
886 mutex_lock(&mgr->lock);
887 drm_printf(printer, "default_page_size: %lluKiB\n",
888 mgr->default_page_size >> 10);
889
890 drm_buddy_print(mm, printer);
891
892 drm_printf(printer, "reserved:\n");
893 list_for_each_entry(rsv, &mgr->reserved_pages, blocks)
894 drm_printf(printer, "%#018llx-%#018llx: %llu\n",
895 rsv->start, rsv->start + rsv->size, rsv->size);
896 mutex_unlock(&mgr->lock);
897 }
898
899 static const struct ttm_resource_manager_func amdgpu_vram_mgr_func = {
900 .alloc = amdgpu_vram_mgr_new,
901 .free = amdgpu_vram_mgr_del,
902 .intersects = amdgpu_vram_mgr_intersects,
903 .compatible = amdgpu_vram_mgr_compatible,
904 .debug = amdgpu_vram_mgr_debug
905 };
906
907 /**
908 * amdgpu_vram_mgr_init - init VRAM manager and DRM MM
909 *
910 * @adev: amdgpu_device pointer
911 *
912 * Allocate and initialize the VRAM manager.
913 */
amdgpu_vram_mgr_init(struct amdgpu_device * adev)914 int amdgpu_vram_mgr_init(struct amdgpu_device *adev)
915 {
916 struct amdgpu_vram_mgr *mgr = &adev->mman.vram_mgr;
917 struct ttm_resource_manager *man = &mgr->manager;
918 int err;
919
920 man->cg = drmm_cgroup_register_region(adev_to_drm(adev), "vram", adev->gmc.real_vram_size);
921 if (IS_ERR(man->cg))
922 return PTR_ERR(man->cg);
923 ttm_resource_manager_init(man, &adev->mman.bdev,
924 adev->gmc.real_vram_size);
925
926 mutex_init(&mgr->lock);
927 INIT_LIST_HEAD(&mgr->reservations_pending);
928 INIT_LIST_HEAD(&mgr->reserved_pages);
929 INIT_LIST_HEAD(&mgr->allocated_vres_list);
930 mgr->default_page_size = PAGE_SIZE;
931
932 man->func = &amdgpu_vram_mgr_func;
933 err = drm_buddy_init(&mgr->mm, man->size, PAGE_SIZE);
934 if (err)
935 return err;
936
937 ttm_set_driver_manager(&adev->mman.bdev, TTM_PL_VRAM, &mgr->manager);
938 ttm_resource_manager_set_used(man, true);
939 return 0;
940 }
941
942 /**
943 * amdgpu_vram_mgr_fini - free and destroy VRAM manager
944 *
945 * @adev: amdgpu_device pointer
946 *
947 * Destroy and free the VRAM manager, returns -EBUSY if ranges are still
948 * allocated inside it.
949 */
amdgpu_vram_mgr_fini(struct amdgpu_device * adev)950 void amdgpu_vram_mgr_fini(struct amdgpu_device *adev)
951 {
952 struct amdgpu_vram_mgr *mgr = &adev->mman.vram_mgr;
953 struct ttm_resource_manager *man = &mgr->manager;
954 int ret;
955 struct amdgpu_vram_reservation *rsv, *temp;
956
957 ttm_resource_manager_set_used(man, false);
958
959 ret = ttm_resource_manager_evict_all(&adev->mman.bdev, man);
960 if (ret)
961 return;
962
963 mutex_lock(&mgr->lock);
964 list_for_each_entry_safe(rsv, temp, &mgr->reservations_pending, blocks)
965 kfree(rsv);
966
967 list_for_each_entry_safe(rsv, temp, &mgr->reserved_pages, blocks) {
968 drm_buddy_free_list(&mgr->mm, &rsv->allocated, 0);
969 kfree(rsv);
970 }
971 if (!adev->gmc.is_app_apu)
972 drm_buddy_fini(&mgr->mm);
973 mutex_unlock(&mgr->lock);
974
975 ttm_resource_manager_cleanup(man);
976 ttm_set_driver_manager(&adev->mman.bdev, TTM_PL_VRAM, NULL);
977 }
978