1 /*
2 * Copyright 2009 Jerome Glisse.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
20 *
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
23 * of the Software.
24 *
25 */
26 /*
27 * Authors:
28 * Jerome Glisse <glisse@freedesktop.org>
29 * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
30 * Dave Airlie
31 */
32
33 #include <linux/dma-mapping.h>
34 #include <linux/iommu.h>
35 #include <linux/pagemap.h>
36 #include <linux/sched/task.h>
37 #include <linux/sched/mm.h>
38 #include <linux/seq_file.h>
39 #include <linux/slab.h>
40 #include <linux/swap.h>
41 #include <linux/dma-buf.h>
42 #include <linux/sizes.h>
43 #include <linux/module.h>
44
45 #include <drm/drm_drv.h>
46 #include <drm/ttm/ttm_bo.h>
47 #include <drm/ttm/ttm_placement.h>
48 #include <drm/ttm/ttm_range_manager.h>
49 #include <drm/ttm/ttm_tt.h>
50
51 #include <drm/amdgpu_drm.h>
52
53 #include "amdgpu.h"
54 #include "amdgpu_object.h"
55 #include "amdgpu_trace.h"
56 #include "amdgpu_amdkfd.h"
57 #include "amdgpu_sdma.h"
58 #include "amdgpu_ras.h"
59 #include "amdgpu_hmm.h"
60 #include "amdgpu_atomfirmware.h"
61 #include "amdgpu_res_cursor.h"
62 #include "bif/bif_4_1_d.h"
63
64 MODULE_IMPORT_NS("DMA_BUF");
65
66 #define AMDGPU_TTM_VRAM_MAX_DW_READ ((size_t)128)
67
68 static int amdgpu_ttm_backend_bind(struct ttm_device *bdev,
69 struct ttm_tt *ttm,
70 struct ttm_resource *bo_mem);
71 static void amdgpu_ttm_backend_unbind(struct ttm_device *bdev,
72 struct ttm_tt *ttm);
73
amdgpu_ttm_init_on_chip(struct amdgpu_device * adev,unsigned int type,uint64_t size_in_page)74 static int amdgpu_ttm_init_on_chip(struct amdgpu_device *adev,
75 unsigned int type,
76 uint64_t size_in_page)
77 {
78 return ttm_range_man_init(&adev->mman.bdev, type,
79 false, size_in_page);
80 }
81
82 /**
83 * amdgpu_evict_flags - Compute placement flags
84 *
85 * @bo: The buffer object to evict
86 * @placement: Possible destination(s) for evicted BO
87 *
88 * Fill in placement data when ttm_bo_evict() is called
89 */
amdgpu_evict_flags(struct ttm_buffer_object * bo,struct ttm_placement * placement)90 static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
91 struct ttm_placement *placement)
92 {
93 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
94 struct amdgpu_bo *abo;
95 static const struct ttm_place placements = {
96 .fpfn = 0,
97 .lpfn = 0,
98 .mem_type = TTM_PL_SYSTEM,
99 .flags = 0
100 };
101
102 /* Don't handle scatter gather BOs */
103 if (bo->type == ttm_bo_type_sg) {
104 placement->num_placement = 0;
105 return;
106 }
107
108 /* Object isn't an AMDGPU object so ignore */
109 if (!amdgpu_bo_is_amdgpu_bo(bo)) {
110 placement->placement = &placements;
111 placement->num_placement = 1;
112 return;
113 }
114
115 abo = ttm_to_amdgpu_bo(bo);
116 if (abo->flags & AMDGPU_GEM_CREATE_DISCARDABLE) {
117 placement->num_placement = 0;
118 return;
119 }
120
121 switch (bo->resource->mem_type) {
122 case AMDGPU_PL_GDS:
123 case AMDGPU_PL_GWS:
124 case AMDGPU_PL_OA:
125 case AMDGPU_PL_DOORBELL:
126 case AMDGPU_PL_MMIO_REMAP:
127 placement->num_placement = 0;
128 return;
129
130 case TTM_PL_VRAM:
131 if (!adev->mman.buffer_funcs_enabled) {
132 /* Move to system memory */
133 amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU);
134
135 } else if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
136 !(abo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) &&
137 amdgpu_res_cpu_visible(adev, bo->resource)) {
138
139 /* Try evicting to the CPU inaccessible part of VRAM
140 * first, but only set GTT as busy placement, so this
141 * BO will be evicted to GTT rather than causing other
142 * BOs to be evicted from VRAM
143 */
144 amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM |
145 AMDGPU_GEM_DOMAIN_GTT |
146 AMDGPU_GEM_DOMAIN_CPU);
147 abo->placements[0].fpfn = adev->gmc.visible_vram_size >> PAGE_SHIFT;
148 abo->placements[0].lpfn = 0;
149 abo->placements[0].flags |= TTM_PL_FLAG_DESIRED;
150 } else {
151 /* Move to GTT memory */
152 amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_GTT |
153 AMDGPU_GEM_DOMAIN_CPU);
154 }
155 break;
156 case TTM_PL_TT:
157 case AMDGPU_PL_PREEMPT:
158 default:
159 amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU);
160 break;
161 }
162 *placement = abo->placement;
163 }
164
165 static struct dma_fence *
amdgpu_ttm_job_submit(struct amdgpu_device * adev,struct amdgpu_ttm_buffer_entity * entity,struct amdgpu_job * job,u32 num_dw)166 amdgpu_ttm_job_submit(struct amdgpu_device *adev, struct amdgpu_ttm_buffer_entity *entity,
167 struct amdgpu_job *job, u32 num_dw)
168 {
169 struct amdgpu_ring *ring;
170
171 ring = adev->mman.buffer_funcs_ring;
172 amdgpu_ring_pad_ib(ring, &job->ibs[0]);
173 WARN_ON(job->ibs[0].length_dw > num_dw);
174
175 lockdep_assert_held(&entity->lock);
176
177 return amdgpu_job_submit(job);
178 }
179
180 /**
181 * amdgpu_ttm_map_buffer - Map memory into the GART windows
182 * @entity: entity to run the window setup job
183 * @bo: buffer object to map
184 * @mem: memory object to map
185 * @mm_cur: range to map
186 * @window: which GART window to use
187 * @tmz: if we should setup a TMZ enabled mapping
188 * @size: in number of bytes to map, out number of bytes mapped
189 * @addr: resulting address inside the MC address space
190 *
191 * Setup one of the GART windows to access a specific piece of memory or return
192 * the physical address for local memory.
193 */
amdgpu_ttm_map_buffer(struct amdgpu_ttm_buffer_entity * entity,struct ttm_buffer_object * bo,struct ttm_resource * mem,struct amdgpu_res_cursor * mm_cur,unsigned int window,bool tmz,uint64_t * size,uint64_t * addr)194 static int amdgpu_ttm_map_buffer(struct amdgpu_ttm_buffer_entity *entity,
195 struct ttm_buffer_object *bo,
196 struct ttm_resource *mem,
197 struct amdgpu_res_cursor *mm_cur,
198 unsigned int window,
199 bool tmz, uint64_t *size, uint64_t *addr)
200 {
201 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
202 unsigned int offset, num_pages, num_dw, num_bytes;
203 uint64_t src_addr, dst_addr;
204 struct amdgpu_job *job;
205 void *cpu_addr;
206 uint64_t flags;
207 int r;
208
209 BUG_ON(adev->mman.buffer_funcs->copy_max_bytes <
210 AMDGPU_GTT_MAX_TRANSFER_SIZE * 8);
211
212 if (WARN_ON(mem->mem_type == AMDGPU_PL_PREEMPT))
213 return -EINVAL;
214
215 /* Map only what can't be accessed directly */
216 if (!tmz && mem->start != AMDGPU_BO_INVALID_OFFSET) {
217 *addr = amdgpu_ttm_domain_start(adev, mem->mem_type) +
218 mm_cur->start;
219 return 0;
220 }
221
222
223 /*
224 * If start begins at an offset inside the page, then adjust the size
225 * and addr accordingly
226 */
227 offset = mm_cur->start & ~PAGE_MASK;
228
229 num_pages = PFN_UP(*size + offset);
230 num_pages = min_t(uint32_t, num_pages, AMDGPU_GTT_MAX_TRANSFER_SIZE);
231
232 *size = min(*size, (uint64_t)num_pages * PAGE_SIZE - offset);
233
234 *addr = amdgpu_compute_gart_address(&adev->gmc, entity, window);
235 *addr += offset;
236
237 num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8);
238 num_bytes = num_pages * 8 * AMDGPU_GPU_PAGES_IN_CPU_PAGE;
239
240 r = amdgpu_job_alloc_with_ib(adev, &entity->base,
241 AMDGPU_FENCE_OWNER_UNDEFINED,
242 num_dw * 4 + num_bytes,
243 AMDGPU_IB_POOL_DELAYED, &job,
244 AMDGPU_KERNEL_JOB_ID_TTM_MAP_BUFFER);
245 if (r)
246 return r;
247
248 src_addr = num_dw * 4;
249 src_addr += job->ibs[0].gpu_addr;
250
251 dst_addr = amdgpu_bo_gpu_offset(adev->gart.bo);
252 dst_addr += (entity->gart_window_offs[window] >> AMDGPU_GPU_PAGE_SHIFT) * 8;
253 amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_addr,
254 dst_addr, num_bytes, 0);
255
256 flags = amdgpu_ttm_tt_pte_flags(adev, bo->ttm, mem);
257 if (tmz)
258 flags |= AMDGPU_PTE_TMZ;
259
260 cpu_addr = &job->ibs[0].ptr[num_dw];
261
262 if (mem->mem_type == TTM_PL_TT) {
263 dma_addr_t *dma_addr;
264
265 dma_addr = &bo->ttm->dma_address[mm_cur->start >> PAGE_SHIFT];
266 amdgpu_gart_map(adev, 0, num_pages, dma_addr, flags, cpu_addr);
267 } else {
268 u64 pa = mm_cur->start + adev->vm_manager.vram_base_offset;
269
270 amdgpu_gart_map_vram_range(adev, pa, 0, num_pages, flags, cpu_addr);
271 }
272
273 dma_fence_put(amdgpu_ttm_job_submit(adev, entity, job, num_dw));
274 return 0;
275 }
276
277 /**
278 * amdgpu_ttm_copy_mem_to_mem - Helper function for copy
279 * @adev: amdgpu device
280 * @entity: entity to run the jobs
281 * @src: buffer/address where to read from
282 * @dst: buffer/address where to write to
283 * @size: number of bytes to copy
284 * @tmz: if a secure copy should be used
285 * @resv: resv object to sync to
286 * @f: Returns the last fence if multiple jobs are submitted.
287 *
288 * The function copies @size bytes from {src->mem + src->offset} to
289 * {dst->mem + dst->offset}. src->bo and dst->bo could be same BO for a
290 * move and different for a BO to BO copy.
291 *
292 */
293 __attribute__((nonnull))
amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device * adev,struct amdgpu_ttm_buffer_entity * entity,const struct amdgpu_copy_mem * src,const struct amdgpu_copy_mem * dst,uint64_t size,bool tmz,struct dma_resv * resv,struct dma_fence ** f)294 static int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
295 struct amdgpu_ttm_buffer_entity *entity,
296 const struct amdgpu_copy_mem *src,
297 const struct amdgpu_copy_mem *dst,
298 uint64_t size, bool tmz,
299 struct dma_resv *resv,
300 struct dma_fence **f)
301 {
302 struct amdgpu_res_cursor src_mm, dst_mm;
303 struct dma_fence *fence = NULL;
304 int r = 0;
305 uint32_t copy_flags = 0;
306 struct amdgpu_bo *abo_src, *abo_dst;
307
308 if (!adev->mman.buffer_funcs_enabled) {
309 dev_err(adev->dev,
310 "Trying to move memory with ring turned off.\n");
311 return -EINVAL;
312 }
313
314 amdgpu_res_first(src->mem, src->offset, size, &src_mm);
315 amdgpu_res_first(dst->mem, dst->offset, size, &dst_mm);
316
317 mutex_lock(&entity->lock);
318 while (src_mm.remaining) {
319 uint64_t from, to, cur_size, tiling_flags;
320 uint32_t num_type, data_format, max_com, write_compress_disable;
321 struct dma_fence *next;
322
323 /* Never copy more than 256MiB at once to avoid a timeout */
324 cur_size = min3(src_mm.size, dst_mm.size, 256ULL << 20);
325
326 /* Map src to window 0 and dst to window 1. */
327 r = amdgpu_ttm_map_buffer(entity, src->bo, src->mem, &src_mm,
328 0, tmz, &cur_size, &from);
329 if (r)
330 goto error;
331
332 r = amdgpu_ttm_map_buffer(entity, dst->bo, dst->mem, &dst_mm,
333 1, tmz, &cur_size, &to);
334 if (r)
335 goto error;
336
337 abo_src = ttm_to_amdgpu_bo(src->bo);
338 abo_dst = ttm_to_amdgpu_bo(dst->bo);
339 if (tmz)
340 copy_flags |= AMDGPU_COPY_FLAGS_TMZ;
341 if ((abo_src->flags & AMDGPU_GEM_CREATE_GFX12_DCC) &&
342 (abo_src->tbo.resource->mem_type == TTM_PL_VRAM))
343 copy_flags |= AMDGPU_COPY_FLAGS_READ_DECOMPRESSED;
344 if ((abo_dst->flags & AMDGPU_GEM_CREATE_GFX12_DCC) &&
345 (dst->mem->mem_type == TTM_PL_VRAM)) {
346 copy_flags |= AMDGPU_COPY_FLAGS_WRITE_COMPRESSED;
347 amdgpu_bo_get_tiling_flags(abo_dst, &tiling_flags);
348 max_com = AMDGPU_TILING_GET(tiling_flags, GFX12_DCC_MAX_COMPRESSED_BLOCK);
349 num_type = AMDGPU_TILING_GET(tiling_flags, GFX12_DCC_NUMBER_TYPE);
350 data_format = AMDGPU_TILING_GET(tiling_flags, GFX12_DCC_DATA_FORMAT);
351 write_compress_disable =
352 AMDGPU_TILING_GET(tiling_flags, GFX12_DCC_WRITE_COMPRESS_DISABLE);
353 copy_flags |= (AMDGPU_COPY_FLAGS_SET(MAX_COMPRESSED, max_com) |
354 AMDGPU_COPY_FLAGS_SET(NUMBER_TYPE, num_type) |
355 AMDGPU_COPY_FLAGS_SET(DATA_FORMAT, data_format) |
356 AMDGPU_COPY_FLAGS_SET(WRITE_COMPRESS_DISABLE,
357 write_compress_disable));
358 }
359
360 r = amdgpu_copy_buffer(adev, entity, from, to, cur_size, resv,
361 &next, true, copy_flags);
362 if (r)
363 goto error;
364
365 dma_fence_put(fence);
366 fence = next;
367
368 amdgpu_res_next(&src_mm, cur_size);
369 amdgpu_res_next(&dst_mm, cur_size);
370 }
371 error:
372 mutex_unlock(&entity->lock);
373 *f = fence;
374 return r;
375 }
376
377 /*
378 * amdgpu_move_blit - Copy an entire buffer to another buffer
379 *
380 * This is a helper called by amdgpu_bo_move() and amdgpu_move_vram_ram() to
381 * help move buffers to and from VRAM.
382 */
amdgpu_move_blit(struct ttm_buffer_object * bo,bool evict,struct ttm_resource * new_mem,struct ttm_resource * old_mem)383 static int amdgpu_move_blit(struct ttm_buffer_object *bo,
384 bool evict,
385 struct ttm_resource *new_mem,
386 struct ttm_resource *old_mem)
387 {
388 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
389 struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
390 struct amdgpu_ttm_buffer_entity *entity;
391 struct amdgpu_copy_mem src, dst;
392 struct dma_fence *fence = NULL;
393 int r;
394 u32 e;
395
396 src.bo = bo;
397 dst.bo = bo;
398 src.mem = old_mem;
399 dst.mem = new_mem;
400 src.offset = 0;
401 dst.offset = 0;
402
403 e = atomic_inc_return(&adev->mman.next_move_entity) %
404 adev->mman.num_move_entities;
405 entity = &adev->mman.move_entities[e];
406
407 r = amdgpu_ttm_copy_mem_to_mem(adev,
408 entity,
409 &src, &dst,
410 new_mem->size,
411 amdgpu_bo_encrypted(abo),
412 bo->base.resv, &fence);
413 if (r)
414 goto error;
415
416 /* clear the space being freed */
417 if (old_mem->mem_type == TTM_PL_VRAM &&
418 (abo->flags & AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE)) {
419 struct dma_fence *wipe_fence = NULL;
420 r = amdgpu_fill_buffer(entity, abo, 0, NULL, &wipe_fence,
421 AMDGPU_KERNEL_JOB_ID_MOVE_BLIT);
422 if (r) {
423 goto error;
424 } else if (wipe_fence) {
425 amdgpu_vram_mgr_set_cleared(bo->resource);
426 dma_fence_put(fence);
427 fence = wipe_fence;
428 }
429 }
430
431 /* Always block for VM page tables before committing the new location */
432 if (bo->type == ttm_bo_type_kernel)
433 r = ttm_bo_move_accel_cleanup(bo, fence, true, false, new_mem);
434 else
435 r = ttm_bo_move_accel_cleanup(bo, fence, evict, true, new_mem);
436 dma_fence_put(fence);
437 return r;
438
439 error:
440 if (fence)
441 dma_fence_wait(fence, false);
442 dma_fence_put(fence);
443 return r;
444 }
445
446 /**
447 * amdgpu_res_cpu_visible - Check that resource can be accessed by CPU
448 * @adev: amdgpu device
449 * @res: the resource to check
450 *
451 * Returns: true if the full resource is CPU visible, false otherwise.
452 */
amdgpu_res_cpu_visible(struct amdgpu_device * adev,struct ttm_resource * res)453 bool amdgpu_res_cpu_visible(struct amdgpu_device *adev,
454 struct ttm_resource *res)
455 {
456 struct amdgpu_res_cursor cursor;
457
458 if (!res)
459 return false;
460
461 if (res->mem_type == TTM_PL_SYSTEM || res->mem_type == TTM_PL_TT ||
462 res->mem_type == AMDGPU_PL_PREEMPT || res->mem_type == AMDGPU_PL_DOORBELL ||
463 res->mem_type == AMDGPU_PL_MMIO_REMAP)
464 return true;
465
466 if (res->mem_type != TTM_PL_VRAM)
467 return false;
468
469 amdgpu_res_first(res, 0, res->size, &cursor);
470 while (cursor.remaining) {
471 if ((cursor.start + cursor.size) > adev->gmc.visible_vram_size)
472 return false;
473 amdgpu_res_next(&cursor, cursor.size);
474 }
475
476 return true;
477 }
478
479 /*
480 * amdgpu_res_copyable - Check that memory can be accessed by ttm_bo_move_memcpy
481 *
482 * Called by amdgpu_bo_move()
483 */
amdgpu_res_copyable(struct amdgpu_device * adev,struct ttm_resource * mem)484 static bool amdgpu_res_copyable(struct amdgpu_device *adev,
485 struct ttm_resource *mem)
486 {
487 if (!amdgpu_res_cpu_visible(adev, mem))
488 return false;
489
490 /* ttm_resource_ioremap only supports contiguous memory */
491 if (mem->mem_type == TTM_PL_VRAM &&
492 !(mem->placement & TTM_PL_FLAG_CONTIGUOUS))
493 return false;
494
495 return true;
496 }
497
498 /*
499 * amdgpu_bo_move - Move a buffer object to a new memory location
500 *
501 * Called by ttm_bo_handle_move_mem()
502 */
amdgpu_bo_move(struct ttm_buffer_object * bo,bool evict,struct ttm_operation_ctx * ctx,struct ttm_resource * new_mem,struct ttm_place * hop)503 static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
504 struct ttm_operation_ctx *ctx,
505 struct ttm_resource *new_mem,
506 struct ttm_place *hop)
507 {
508 struct amdgpu_device *adev;
509 struct amdgpu_bo *abo;
510 struct ttm_resource *old_mem = bo->resource;
511 int r;
512
513 if (new_mem->mem_type == TTM_PL_TT ||
514 new_mem->mem_type == AMDGPU_PL_PREEMPT) {
515 r = amdgpu_ttm_backend_bind(bo->bdev, bo->ttm, new_mem);
516 if (r)
517 return r;
518 }
519
520 abo = ttm_to_amdgpu_bo(bo);
521 adev = amdgpu_ttm_adev(bo->bdev);
522
523 if (!old_mem || (old_mem->mem_type == TTM_PL_SYSTEM &&
524 bo->ttm == NULL)) {
525 amdgpu_bo_move_notify(bo, evict, new_mem);
526 ttm_bo_move_null(bo, new_mem);
527 return 0;
528 }
529 if (old_mem->mem_type == TTM_PL_SYSTEM &&
530 (new_mem->mem_type == TTM_PL_TT ||
531 new_mem->mem_type == AMDGPU_PL_PREEMPT)) {
532 amdgpu_bo_move_notify(bo, evict, new_mem);
533 ttm_bo_move_null(bo, new_mem);
534 return 0;
535 }
536 if ((old_mem->mem_type == TTM_PL_TT ||
537 old_mem->mem_type == AMDGPU_PL_PREEMPT) &&
538 new_mem->mem_type == TTM_PL_SYSTEM) {
539 r = ttm_bo_wait_ctx(bo, ctx);
540 if (r)
541 return r;
542
543 amdgpu_ttm_backend_unbind(bo->bdev, bo->ttm);
544 amdgpu_bo_move_notify(bo, evict, new_mem);
545 ttm_resource_free(bo, &bo->resource);
546 ttm_bo_assign_mem(bo, new_mem);
547 return 0;
548 }
549
550 if (old_mem->mem_type == AMDGPU_PL_GDS ||
551 old_mem->mem_type == AMDGPU_PL_GWS ||
552 old_mem->mem_type == AMDGPU_PL_OA ||
553 old_mem->mem_type == AMDGPU_PL_DOORBELL ||
554 old_mem->mem_type == AMDGPU_PL_MMIO_REMAP ||
555 new_mem->mem_type == AMDGPU_PL_GDS ||
556 new_mem->mem_type == AMDGPU_PL_GWS ||
557 new_mem->mem_type == AMDGPU_PL_OA ||
558 new_mem->mem_type == AMDGPU_PL_DOORBELL ||
559 new_mem->mem_type == AMDGPU_PL_MMIO_REMAP) {
560 /* Nothing to save here */
561 amdgpu_bo_move_notify(bo, evict, new_mem);
562 ttm_bo_move_null(bo, new_mem);
563 return 0;
564 }
565
566 if (bo->type == ttm_bo_type_device &&
567 new_mem->mem_type == TTM_PL_VRAM &&
568 old_mem->mem_type != TTM_PL_VRAM) {
569 /* amdgpu_bo_fault_reserve_notify will re-set this if the CPU
570 * accesses the BO after it's moved.
571 */
572 abo->flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
573 }
574
575 if (adev->mman.buffer_funcs_enabled &&
576 ((old_mem->mem_type == TTM_PL_SYSTEM &&
577 new_mem->mem_type == TTM_PL_VRAM) ||
578 (old_mem->mem_type == TTM_PL_VRAM &&
579 new_mem->mem_type == TTM_PL_SYSTEM))) {
580 hop->fpfn = 0;
581 hop->lpfn = 0;
582 hop->mem_type = TTM_PL_TT;
583 hop->flags = TTM_PL_FLAG_TEMPORARY;
584 return -EMULTIHOP;
585 }
586
587 amdgpu_bo_move_notify(bo, evict, new_mem);
588 if (adev->mman.buffer_funcs_enabled)
589 r = amdgpu_move_blit(bo, evict, new_mem, old_mem);
590 else
591 r = -ENODEV;
592
593 if (r) {
594 /* Check that all memory is CPU accessible */
595 if (!amdgpu_res_copyable(adev, old_mem) ||
596 !amdgpu_res_copyable(adev, new_mem)) {
597 pr_err("Move buffer fallback to memcpy unavailable\n");
598 return r;
599 }
600
601 r = ttm_bo_move_memcpy(bo, ctx, new_mem);
602 if (r)
603 return r;
604 }
605
606 /* update statistics after the move */
607 if (evict)
608 atomic64_inc(&adev->num_evictions);
609 atomic64_add(bo->base.size, &adev->num_bytes_moved);
610 return 0;
611 }
612
613 /*
614 * amdgpu_ttm_io_mem_reserve - Reserve a block of memory during a fault
615 *
616 * Called by ttm_mem_io_reserve() ultimately via ttm_bo_vm_fault()
617 */
amdgpu_ttm_io_mem_reserve(struct ttm_device * bdev,struct ttm_resource * mem)618 static int amdgpu_ttm_io_mem_reserve(struct ttm_device *bdev,
619 struct ttm_resource *mem)
620 {
621 struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
622
623 switch (mem->mem_type) {
624 case TTM_PL_SYSTEM:
625 /* system memory */
626 return 0;
627 case TTM_PL_TT:
628 case AMDGPU_PL_PREEMPT:
629 break;
630 case TTM_PL_VRAM:
631 mem->bus.offset = mem->start << PAGE_SHIFT;
632
633 if (adev->mman.aper_base_kaddr &&
634 mem->placement & TTM_PL_FLAG_CONTIGUOUS)
635 mem->bus.addr = (u8 *)adev->mman.aper_base_kaddr +
636 mem->bus.offset;
637
638 mem->bus.offset += adev->gmc.aper_base;
639 mem->bus.is_iomem = true;
640 break;
641 case AMDGPU_PL_DOORBELL:
642 mem->bus.offset = mem->start << PAGE_SHIFT;
643 mem->bus.offset += adev->doorbell.base;
644 mem->bus.is_iomem = true;
645 mem->bus.caching = ttm_uncached;
646 break;
647 case AMDGPU_PL_MMIO_REMAP:
648 mem->bus.offset = mem->start << PAGE_SHIFT;
649 mem->bus.offset += adev->rmmio_remap.bus_addr;
650 mem->bus.is_iomem = true;
651 mem->bus.caching = ttm_uncached;
652 break;
653 default:
654 return -EINVAL;
655 }
656 return 0;
657 }
658
amdgpu_ttm_io_mem_pfn(struct ttm_buffer_object * bo,unsigned long page_offset)659 static unsigned long amdgpu_ttm_io_mem_pfn(struct ttm_buffer_object *bo,
660 unsigned long page_offset)
661 {
662 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
663 struct amdgpu_res_cursor cursor;
664
665 amdgpu_res_first(bo->resource, (u64)page_offset << PAGE_SHIFT, 0,
666 &cursor);
667
668 if (bo->resource->mem_type == AMDGPU_PL_DOORBELL)
669 return ((uint64_t)(adev->doorbell.base + cursor.start)) >> PAGE_SHIFT;
670 else if (bo->resource->mem_type == AMDGPU_PL_MMIO_REMAP)
671 return ((uint64_t)(adev->rmmio_remap.bus_addr + cursor.start)) >> PAGE_SHIFT;
672
673 return (adev->gmc.aper_base + cursor.start) >> PAGE_SHIFT;
674 }
675
676 /**
677 * amdgpu_ttm_domain_start - Returns GPU start address
678 * @adev: amdgpu device object
679 * @type: type of the memory
680 *
681 * Returns:
682 * GPU start address of a memory domain
683 */
684
amdgpu_ttm_domain_start(struct amdgpu_device * adev,uint32_t type)685 uint64_t amdgpu_ttm_domain_start(struct amdgpu_device *adev, uint32_t type)
686 {
687 switch (type) {
688 case TTM_PL_TT:
689 return adev->gmc.gart_start;
690 case TTM_PL_VRAM:
691 return adev->gmc.vram_start;
692 }
693
694 return 0;
695 }
696
697 /*
698 * TTM backend functions.
699 */
700 struct amdgpu_ttm_tt {
701 struct ttm_tt ttm;
702 struct drm_gem_object *gobj;
703 u64 offset;
704 uint64_t userptr;
705 struct task_struct *usertask;
706 uint32_t userflags;
707 bool bound;
708 int32_t pool_id;
709 };
710
711 #define ttm_to_amdgpu_ttm_tt(ptr) container_of(ptr, struct amdgpu_ttm_tt, ttm)
712
713 #ifdef CONFIG_DRM_AMDGPU_USERPTR
714 /*
715 * amdgpu_ttm_tt_get_user_pages - get device accessible pages that back user
716 * memory and start HMM tracking CPU page table update
717 *
718 * Calling function must call amdgpu_ttm_tt_userptr_range_done() once and only
719 * once afterwards to stop HMM tracking. Its the caller responsibility to ensure
720 * that range is a valid memory and it is freed too.
721 */
amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo * bo,struct amdgpu_hmm_range * range)722 int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo,
723 struct amdgpu_hmm_range *range)
724 {
725 struct ttm_tt *ttm = bo->tbo.ttm;
726 struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
727 unsigned long start = gtt->userptr;
728 struct vm_area_struct *vma;
729 struct mm_struct *mm;
730 bool readonly;
731 int r = 0;
732
733 mm = bo->notifier.mm;
734 if (unlikely(!mm)) {
735 DRM_DEBUG_DRIVER("BO is not registered?\n");
736 return -EFAULT;
737 }
738
739 if (!mmget_not_zero(mm)) /* Happens during process shutdown */
740 return -ESRCH;
741
742 mmap_read_lock(mm);
743 vma = vma_lookup(mm, start);
744 if (unlikely(!vma)) {
745 r = -EFAULT;
746 goto out_unlock;
747 }
748 if (unlikely((gtt->userflags & AMDGPU_GEM_USERPTR_ANONONLY) &&
749 vma->vm_file)) {
750 r = -EPERM;
751 goto out_unlock;
752 }
753
754 readonly = amdgpu_ttm_tt_is_readonly(ttm);
755 r = amdgpu_hmm_range_get_pages(&bo->notifier, start, ttm->num_pages,
756 readonly, NULL, range);
757 out_unlock:
758 mmap_read_unlock(mm);
759 if (r)
760 pr_debug("failed %d to get user pages 0x%lx\n", r, start);
761
762 mmput(mm);
763
764 return r;
765 }
766
767 #endif
768
769 /*
770 * amdgpu_ttm_tt_set_user_pages - Copy pages in, putting old pages as necessary.
771 *
772 * Called by amdgpu_cs_list_validate(). This creates the page list
773 * that backs user memory and will ultimately be mapped into the device
774 * address space.
775 */
amdgpu_ttm_tt_set_user_pages(struct ttm_tt * ttm,struct amdgpu_hmm_range * range)776 void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct amdgpu_hmm_range *range)
777 {
778 unsigned long i;
779
780 for (i = 0; i < ttm->num_pages; ++i)
781 ttm->pages[i] = range ? hmm_pfn_to_page(range->hmm_range.hmm_pfns[i]) : NULL;
782 }
783
784 /*
785 * amdgpu_ttm_tt_pin_userptr - prepare the sg table with the user pages
786 *
787 * Called by amdgpu_ttm_backend_bind()
788 **/
amdgpu_ttm_tt_pin_userptr(struct ttm_device * bdev,struct ttm_tt * ttm)789 static int amdgpu_ttm_tt_pin_userptr(struct ttm_device *bdev,
790 struct ttm_tt *ttm)
791 {
792 struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
793 struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
794 int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
795 enum dma_data_direction direction = write ?
796 DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
797 int r;
798
799 /* Allocate an SG array and squash pages into it */
800 r = sg_alloc_table_from_pages(ttm->sg, ttm->pages, ttm->num_pages, 0,
801 (u64)ttm->num_pages << PAGE_SHIFT,
802 GFP_KERNEL);
803 if (r)
804 goto release_sg;
805
806 /* Map SG to device */
807 r = dma_map_sgtable(adev->dev, ttm->sg, direction, 0);
808 if (r)
809 goto release_sg_table;
810
811 /* convert SG to linear array of pages and dma addresses */
812 drm_prime_sg_to_dma_addr_array(ttm->sg, gtt->ttm.dma_address,
813 ttm->num_pages);
814
815 return 0;
816
817 release_sg_table:
818 sg_free_table(ttm->sg);
819 release_sg:
820 kfree(ttm->sg);
821 ttm->sg = NULL;
822 return r;
823 }
824
825 /*
826 * amdgpu_ttm_tt_unpin_userptr - Unpin and unmap userptr pages
827 */
amdgpu_ttm_tt_unpin_userptr(struct ttm_device * bdev,struct ttm_tt * ttm)828 static void amdgpu_ttm_tt_unpin_userptr(struct ttm_device *bdev,
829 struct ttm_tt *ttm)
830 {
831 struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
832 struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
833 int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
834 enum dma_data_direction direction = write ?
835 DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
836
837 /* double check that we don't free the table twice */
838 if (!ttm->sg || !ttm->sg->sgl)
839 return;
840
841 /* unmap the pages mapped to the device */
842 dma_unmap_sgtable(adev->dev, ttm->sg, direction, 0);
843 sg_free_table(ttm->sg);
844 }
845
846 /*
847 * total_pages is constructed as MQD0+CtrlStack0 + MQD1+CtrlStack1 + ...
848 * MQDn+CtrlStackn where n is the number of XCCs per partition.
849 * pages_per_xcc is the size of one MQD+CtrlStack. The first page is MQD
850 * and uses memory type default, UC. The rest of pages_per_xcc are
851 * Ctrl stack and modify their memory type to NC.
852 */
amdgpu_ttm_gart_bind_gfx9_mqd(struct amdgpu_device * adev,struct ttm_tt * ttm,uint64_t flags)853 static void amdgpu_ttm_gart_bind_gfx9_mqd(struct amdgpu_device *adev,
854 struct ttm_tt *ttm, uint64_t flags)
855 {
856 struct amdgpu_ttm_tt *gtt = (void *)ttm;
857 uint64_t total_pages = ttm->num_pages;
858 int num_xcc = max(1U, adev->gfx.num_xcc_per_xcp);
859 uint64_t page_idx, pages_per_xcc;
860 int i;
861
862 pages_per_xcc = total_pages;
863 do_div(pages_per_xcc, num_xcc);
864
865 for (i = 0, page_idx = 0; i < num_xcc; i++, page_idx += pages_per_xcc) {
866 amdgpu_gart_map_gfx9_mqd(adev,
867 gtt->offset + (page_idx << PAGE_SHIFT),
868 pages_per_xcc, >t->ttm.dma_address[page_idx],
869 flags);
870 }
871 }
872
amdgpu_ttm_gart_bind(struct amdgpu_device * adev,struct ttm_buffer_object * tbo,uint64_t flags)873 static void amdgpu_ttm_gart_bind(struct amdgpu_device *adev,
874 struct ttm_buffer_object *tbo,
875 uint64_t flags)
876 {
877 struct amdgpu_bo *abo = ttm_to_amdgpu_bo(tbo);
878 struct ttm_tt *ttm = tbo->ttm;
879 struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
880
881 if (amdgpu_bo_encrypted(abo))
882 flags |= AMDGPU_PTE_TMZ;
883
884 if (abo->flags & AMDGPU_GEM_CREATE_CP_MQD_GFX9) {
885 amdgpu_ttm_gart_bind_gfx9_mqd(adev, ttm, flags);
886 } else {
887 amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages,
888 gtt->ttm.dma_address, flags);
889 }
890 gtt->bound = true;
891 }
892
893 /*
894 * amdgpu_ttm_backend_bind - Bind GTT memory
895 *
896 * Called by ttm_tt_bind() on behalf of ttm_bo_handle_move_mem().
897 * This handles binding GTT memory to the device address space.
898 */
amdgpu_ttm_backend_bind(struct ttm_device * bdev,struct ttm_tt * ttm,struct ttm_resource * bo_mem)899 static int amdgpu_ttm_backend_bind(struct ttm_device *bdev,
900 struct ttm_tt *ttm,
901 struct ttm_resource *bo_mem)
902 {
903 struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
904 struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
905 uint64_t flags;
906 int r;
907
908 if (!bo_mem)
909 return -EINVAL;
910
911 if (gtt->bound)
912 return 0;
913
914 if (gtt->userptr) {
915 r = amdgpu_ttm_tt_pin_userptr(bdev, ttm);
916 if (r) {
917 dev_err(adev->dev, "failed to pin userptr\n");
918 return r;
919 }
920 } else if (ttm->page_flags & TTM_TT_FLAG_EXTERNAL) {
921 if (!ttm->sg) {
922 struct dma_buf_attachment *attach;
923 struct sg_table *sgt;
924
925 attach = gtt->gobj->import_attach;
926 sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
927 if (IS_ERR(sgt))
928 return PTR_ERR(sgt);
929
930 ttm->sg = sgt;
931 }
932
933 drm_prime_sg_to_dma_addr_array(ttm->sg, gtt->ttm.dma_address,
934 ttm->num_pages);
935 }
936
937 if (!ttm->num_pages) {
938 WARN(1, "nothing to bind %u pages for mreg %p back %p!\n",
939 ttm->num_pages, bo_mem, ttm);
940 }
941
942 if (bo_mem->mem_type != TTM_PL_TT ||
943 !amdgpu_gtt_mgr_has_gart_addr(bo_mem)) {
944 gtt->offset = AMDGPU_BO_INVALID_OFFSET;
945 return 0;
946 }
947
948 /* compute PTE flags relevant to this BO memory */
949 flags = amdgpu_ttm_tt_pte_flags(adev, ttm, bo_mem);
950
951 /* bind pages into GART page tables */
952 gtt->offset = (u64)bo_mem->start << PAGE_SHIFT;
953 amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages,
954 gtt->ttm.dma_address, flags);
955 gtt->bound = true;
956 return 0;
957 }
958
959 /*
960 * amdgpu_ttm_alloc_gart - Make sure buffer object is accessible either
961 * through AGP or GART aperture.
962 *
963 * If bo is accessible through AGP aperture, then use AGP aperture
964 * to access bo; otherwise allocate logical space in GART aperture
965 * and map bo to GART aperture.
966 */
amdgpu_ttm_alloc_gart(struct ttm_buffer_object * bo)967 int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo)
968 {
969 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
970 struct ttm_operation_ctx ctx = { false, false };
971 struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(bo->ttm);
972 struct ttm_placement placement;
973 struct ttm_place placements;
974 struct ttm_resource *tmp;
975 uint64_t addr, flags;
976 int r;
977
978 if (bo->resource->start != AMDGPU_BO_INVALID_OFFSET)
979 return 0;
980
981 addr = amdgpu_gmc_agp_addr(bo);
982 if (addr != AMDGPU_BO_INVALID_OFFSET)
983 return 0;
984
985 /* allocate GART space */
986 placement.num_placement = 1;
987 placement.placement = &placements;
988 placements.fpfn = 0;
989 placements.lpfn = adev->gmc.gart_size >> PAGE_SHIFT;
990 placements.mem_type = TTM_PL_TT;
991 placements.flags = bo->resource->placement;
992
993 r = ttm_bo_mem_space(bo, &placement, &tmp, &ctx);
994 if (unlikely(r))
995 return r;
996
997 /* compute PTE flags for this buffer object */
998 flags = amdgpu_ttm_tt_pte_flags(adev, bo->ttm, tmp);
999
1000 /* Bind pages */
1001 gtt->offset = (u64)tmp->start << PAGE_SHIFT;
1002 amdgpu_ttm_gart_bind(adev, bo, flags);
1003 amdgpu_gart_invalidate_tlb(adev);
1004 ttm_resource_free(bo, &bo->resource);
1005 ttm_bo_assign_mem(bo, tmp);
1006
1007 return 0;
1008 }
1009
1010 /*
1011 * amdgpu_ttm_recover_gart - Rebind GTT pages
1012 *
1013 * Called by amdgpu_gtt_mgr_recover() from amdgpu_device_reset() to
1014 * rebind GTT pages during a GPU reset.
1015 */
amdgpu_ttm_recover_gart(struct ttm_buffer_object * tbo)1016 void amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo)
1017 {
1018 struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
1019 uint64_t flags;
1020
1021 if (!tbo->ttm)
1022 return;
1023
1024 flags = amdgpu_ttm_tt_pte_flags(adev, tbo->ttm, tbo->resource);
1025 amdgpu_ttm_gart_bind(adev, tbo, flags);
1026 }
1027
1028 /*
1029 * amdgpu_ttm_backend_unbind - Unbind GTT mapped pages
1030 *
1031 * Called by ttm_tt_unbind() on behalf of ttm_bo_move_ttm() and
1032 * ttm_tt_destroy().
1033 */
amdgpu_ttm_backend_unbind(struct ttm_device * bdev,struct ttm_tt * ttm)1034 static void amdgpu_ttm_backend_unbind(struct ttm_device *bdev,
1035 struct ttm_tt *ttm)
1036 {
1037 struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
1038 struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
1039
1040 /* if the pages have userptr pinning then clear that first */
1041 if (gtt->userptr) {
1042 amdgpu_ttm_tt_unpin_userptr(bdev, ttm);
1043 } else if (ttm->sg && drm_gem_is_imported(gtt->gobj)) {
1044 struct dma_buf_attachment *attach;
1045
1046 attach = gtt->gobj->import_attach;
1047 dma_buf_unmap_attachment(attach, ttm->sg, DMA_BIDIRECTIONAL);
1048 ttm->sg = NULL;
1049 }
1050
1051 if (!gtt->bound)
1052 return;
1053
1054 if (gtt->offset == AMDGPU_BO_INVALID_OFFSET)
1055 return;
1056
1057 /* unbind shouldn't be done for GDS/GWS/OA in ttm_bo_clean_mm */
1058 amdgpu_gart_unbind(adev, gtt->offset, ttm->num_pages);
1059 gtt->bound = false;
1060 }
1061
amdgpu_ttm_backend_destroy(struct ttm_device * bdev,struct ttm_tt * ttm)1062 static void amdgpu_ttm_backend_destroy(struct ttm_device *bdev,
1063 struct ttm_tt *ttm)
1064 {
1065 struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
1066
1067 if (gtt->usertask)
1068 put_task_struct(gtt->usertask);
1069
1070 ttm_tt_fini(>t->ttm);
1071 kfree(gtt);
1072 }
1073
1074 /**
1075 * amdgpu_ttm_mmio_remap_alloc_sgt - build an sg_table for MMIO_REMAP I/O aperture
1076 * @adev: amdgpu device providing the remap BAR base (adev->rmmio_remap.bus_addr)
1077 * @res: TTM resource of the BO to export; expected to live in AMDGPU_PL_MMIO_REMAP
1078 * @dev: importing device to map for (typically @attach->dev in dma-buf paths)
1079 * @dir: DMA data direction for the importer (passed to dma_map_resource())
1080 * @sgt: output; on success, set to a newly allocated sg_table describing the I/O span
1081 *
1082 * The HDP flush page (AMDGPU_PL_MMIO_REMAP) is a fixed hardware I/O window in a PCI
1083 * BAR—there are no struct pages to back it. Importers still need a DMA address list,
1084 * so we synthesize a minimal sg_table and populate it from dma_map_resource(), not
1085 * from pages. Using the common amdgpu_res_cursor walker keeps the offset/size math
1086 * consistent with other TTM/manager users.
1087 *
1088 * - @res is assumed to be a small, contiguous I/O region (typically a single 4 KiB
1089 * page) in AMDGPU_PL_MMIO_REMAP. Callers should validate placement before calling.
1090 * - The sg entry is created with sg_set_page(sg, NULL, …) to reflect I/O space.
1091 * - The mapping uses DMA_ATTR_SKIP_CPU_SYNC because this is MMIO, not cacheable RAM.
1092 * - Peer reachability / p2pdma policy checks must be done by the caller.
1093 *
1094 * Return:
1095 * * 0 on success, with *@sgt set to a valid table that must be freed via
1096 * amdgpu_ttm_mmio_remap_free_sgt().
1097 * * -ENOMEM if allocation of the sg_table fails.
1098 * * -EIO if dma_map_resource() fails.
1099 *
1100 */
amdgpu_ttm_mmio_remap_alloc_sgt(struct amdgpu_device * adev,struct ttm_resource * res,struct device * dev,enum dma_data_direction dir,struct sg_table ** sgt)1101 int amdgpu_ttm_mmio_remap_alloc_sgt(struct amdgpu_device *adev,
1102 struct ttm_resource *res,
1103 struct device *dev,
1104 enum dma_data_direction dir,
1105 struct sg_table **sgt)
1106 {
1107 struct amdgpu_res_cursor cur;
1108 dma_addr_t dma;
1109 resource_size_t phys;
1110 struct scatterlist *sg;
1111 int r;
1112
1113 /* Walk the resource once; MMIO_REMAP is expected to be contiguous+small. */
1114 amdgpu_res_first(res, 0, res->size, &cur);
1115
1116 /* Translate byte offset in the remap window into a host physical BAR address. */
1117 phys = adev->rmmio_remap.bus_addr + cur.start;
1118
1119 /* Build a single-entry sg_table mapped as I/O (no struct page backing). */
1120 *sgt = kzalloc_obj(**sgt);
1121 if (!*sgt)
1122 return -ENOMEM;
1123 r = sg_alloc_table(*sgt, 1, GFP_KERNEL);
1124 if (r) {
1125 kfree(*sgt);
1126 return r;
1127 }
1128 sg = (*sgt)->sgl;
1129 sg_set_page(sg, NULL, cur.size, 0); /* WHY: I/O space → no pages */
1130
1131 dma = dma_map_resource(dev, phys, cur.size, dir, DMA_ATTR_SKIP_CPU_SYNC);
1132 if (dma_mapping_error(dev, dma)) {
1133 sg_free_table(*sgt);
1134 kfree(*sgt);
1135 return -EIO;
1136 }
1137 sg_dma_address(sg) = dma;
1138 sg_dma_len(sg) = cur.size;
1139 return 0;
1140 }
1141
amdgpu_ttm_mmio_remap_free_sgt(struct device * dev,enum dma_data_direction dir,struct sg_table * sgt)1142 void amdgpu_ttm_mmio_remap_free_sgt(struct device *dev,
1143 enum dma_data_direction dir,
1144 struct sg_table *sgt)
1145 {
1146 struct scatterlist *sg = sgt->sgl;
1147
1148 dma_unmap_resource(dev, sg_dma_address(sg), sg_dma_len(sg),
1149 dir, DMA_ATTR_SKIP_CPU_SYNC);
1150 sg_free_table(sgt);
1151 kfree(sgt);
1152 }
1153
1154 /**
1155 * amdgpu_ttm_tt_create - Create a ttm_tt object for a given BO
1156 *
1157 * @bo: The buffer object to create a GTT ttm_tt object around
1158 * @page_flags: Page flags to be added to the ttm_tt object
1159 *
1160 * Called by ttm_tt_create().
1161 */
amdgpu_ttm_tt_create(struct ttm_buffer_object * bo,uint32_t page_flags)1162 static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_buffer_object *bo,
1163 uint32_t page_flags)
1164 {
1165 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
1166 struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
1167 struct amdgpu_ttm_tt *gtt;
1168 enum ttm_caching caching;
1169
1170 gtt = kzalloc_obj(struct amdgpu_ttm_tt);
1171 if (!gtt)
1172 return NULL;
1173
1174 gtt->gobj = &bo->base;
1175 if (adev->gmc.mem_partitions && abo->xcp_id >= 0)
1176 gtt->pool_id = KFD_XCP_MEM_ID(adev, abo->xcp_id);
1177 else
1178 gtt->pool_id = abo->xcp_id;
1179
1180 if (abo->flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC)
1181 caching = ttm_write_combined;
1182 else
1183 caching = ttm_cached;
1184
1185 /* allocate space for the uninitialized page entries */
1186 if (ttm_sg_tt_init(>t->ttm, bo, page_flags, caching)) {
1187 kfree(gtt);
1188 return NULL;
1189 }
1190 return >t->ttm;
1191 }
1192
1193 /*
1194 * amdgpu_ttm_tt_populate - Map GTT pages visible to the device
1195 *
1196 * Map the pages of a ttm_tt object to an address space visible
1197 * to the underlying device.
1198 */
amdgpu_ttm_tt_populate(struct ttm_device * bdev,struct ttm_tt * ttm,struct ttm_operation_ctx * ctx)1199 static int amdgpu_ttm_tt_populate(struct ttm_device *bdev,
1200 struct ttm_tt *ttm,
1201 struct ttm_operation_ctx *ctx)
1202 {
1203 struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
1204 struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
1205 struct ttm_pool *pool;
1206 pgoff_t i;
1207 int ret;
1208
1209 /* user pages are bound by amdgpu_ttm_tt_pin_userptr() */
1210 if (gtt->userptr) {
1211 ttm->sg = kzalloc_obj(struct sg_table);
1212 if (!ttm->sg)
1213 return -ENOMEM;
1214 return 0;
1215 }
1216
1217 if (ttm->page_flags & TTM_TT_FLAG_EXTERNAL)
1218 return 0;
1219
1220 if (adev->mman.ttm_pools && gtt->pool_id >= 0)
1221 pool = &adev->mman.ttm_pools[gtt->pool_id];
1222 else
1223 pool = &adev->mman.bdev.pool;
1224 ret = ttm_pool_alloc(pool, ttm, ctx);
1225 if (ret)
1226 return ret;
1227
1228 for (i = 0; i < ttm->num_pages; ++i)
1229 ttm->pages[i]->mapping = bdev->dev_mapping;
1230
1231 return 0;
1232 }
1233
1234 /*
1235 * amdgpu_ttm_tt_unpopulate - unmap GTT pages and unpopulate page arrays
1236 *
1237 * Unmaps pages of a ttm_tt object from the device address space and
1238 * unpopulates the page array backing it.
1239 */
amdgpu_ttm_tt_unpopulate(struct ttm_device * bdev,struct ttm_tt * ttm)1240 static void amdgpu_ttm_tt_unpopulate(struct ttm_device *bdev,
1241 struct ttm_tt *ttm)
1242 {
1243 struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
1244 struct amdgpu_device *adev;
1245 struct ttm_pool *pool;
1246 pgoff_t i;
1247
1248 amdgpu_ttm_backend_unbind(bdev, ttm);
1249
1250 if (gtt->userptr) {
1251 amdgpu_ttm_tt_set_user_pages(ttm, NULL);
1252 kfree(ttm->sg);
1253 ttm->sg = NULL;
1254 return;
1255 }
1256
1257 if (ttm->page_flags & TTM_TT_FLAG_EXTERNAL)
1258 return;
1259
1260 for (i = 0; i < ttm->num_pages; ++i)
1261 ttm->pages[i]->mapping = NULL;
1262
1263 adev = amdgpu_ttm_adev(bdev);
1264
1265 if (adev->mman.ttm_pools && gtt->pool_id >= 0)
1266 pool = &adev->mman.ttm_pools[gtt->pool_id];
1267 else
1268 pool = &adev->mman.bdev.pool;
1269
1270 return ttm_pool_free(pool, ttm);
1271 }
1272
1273 /**
1274 * amdgpu_ttm_tt_get_userptr - Return the userptr GTT ttm_tt for the current
1275 * task
1276 *
1277 * @tbo: The ttm_buffer_object that contains the userptr
1278 * @user_addr: The returned value
1279 */
amdgpu_ttm_tt_get_userptr(const struct ttm_buffer_object * tbo,uint64_t * user_addr)1280 int amdgpu_ttm_tt_get_userptr(const struct ttm_buffer_object *tbo,
1281 uint64_t *user_addr)
1282 {
1283 struct amdgpu_ttm_tt *gtt;
1284
1285 if (!tbo->ttm)
1286 return -EINVAL;
1287
1288 gtt = (void *)tbo->ttm;
1289 *user_addr = gtt->userptr;
1290 return 0;
1291 }
1292
1293 /**
1294 * amdgpu_ttm_tt_set_userptr - Initialize userptr GTT ttm_tt for the current
1295 * task
1296 *
1297 * @bo: The ttm_buffer_object to bind this userptr to
1298 * @addr: The address in the current tasks VM space to use
1299 * @flags: Requirements of userptr object.
1300 *
1301 * Called by amdgpu_gem_userptr_ioctl() and kfd_ioctl_alloc_memory_of_gpu() to
1302 * bind userptr pages to current task and by kfd_ioctl_acquire_vm() to
1303 * initialize GPU VM for a KFD process.
1304 */
amdgpu_ttm_tt_set_userptr(struct ttm_buffer_object * bo,uint64_t addr,uint32_t flags)1305 int amdgpu_ttm_tt_set_userptr(struct ttm_buffer_object *bo,
1306 uint64_t addr, uint32_t flags)
1307 {
1308 struct amdgpu_ttm_tt *gtt;
1309
1310 if (!bo->ttm) {
1311 /* TODO: We want a separate TTM object type for userptrs */
1312 bo->ttm = amdgpu_ttm_tt_create(bo, 0);
1313 if (bo->ttm == NULL)
1314 return -ENOMEM;
1315 }
1316
1317 /* Set TTM_TT_FLAG_EXTERNAL before populate but after create. */
1318 bo->ttm->page_flags |= TTM_TT_FLAG_EXTERNAL;
1319
1320 gtt = ttm_to_amdgpu_ttm_tt(bo->ttm);
1321 gtt->userptr = addr;
1322 gtt->userflags = flags;
1323
1324 if (gtt->usertask)
1325 put_task_struct(gtt->usertask);
1326 gtt->usertask = current->group_leader;
1327 get_task_struct(gtt->usertask);
1328
1329 return 0;
1330 }
1331
1332 /*
1333 * amdgpu_ttm_tt_get_usermm - Return memory manager for ttm_tt object
1334 */
amdgpu_ttm_tt_get_usermm(struct ttm_tt * ttm)1335 struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm)
1336 {
1337 struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
1338
1339 if (gtt == NULL)
1340 return NULL;
1341
1342 if (gtt->usertask == NULL)
1343 return NULL;
1344
1345 return gtt->usertask->mm;
1346 }
1347
1348 /*
1349 * amdgpu_ttm_tt_affect_userptr - Determine if a ttm_tt object lays inside an
1350 * address range for the current task.
1351 *
1352 */
amdgpu_ttm_tt_affect_userptr(struct ttm_tt * ttm,unsigned long start,unsigned long end,unsigned long * userptr)1353 bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
1354 unsigned long end, unsigned long *userptr)
1355 {
1356 struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
1357 unsigned long size;
1358
1359 if (gtt == NULL || !gtt->userptr)
1360 return false;
1361
1362 /* Return false if no part of the ttm_tt object lies within
1363 * the range
1364 */
1365 size = (unsigned long)gtt->ttm.num_pages * PAGE_SIZE;
1366 if (gtt->userptr > end || gtt->userptr + size <= start)
1367 return false;
1368
1369 if (userptr)
1370 *userptr = gtt->userptr;
1371 return true;
1372 }
1373
1374 /*
1375 * amdgpu_ttm_tt_is_userptr - Have the pages backing by userptr?
1376 */
amdgpu_ttm_tt_is_userptr(struct ttm_tt * ttm)1377 bool amdgpu_ttm_tt_is_userptr(struct ttm_tt *ttm)
1378 {
1379 struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
1380
1381 if (gtt == NULL || !gtt->userptr)
1382 return false;
1383
1384 return true;
1385 }
1386
1387 /*
1388 * amdgpu_ttm_tt_is_readonly - Is the ttm_tt object read only?
1389 */
amdgpu_ttm_tt_is_readonly(struct ttm_tt * ttm)1390 bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm)
1391 {
1392 struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
1393
1394 if (gtt == NULL)
1395 return false;
1396
1397 return !!(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
1398 }
1399
1400 /**
1401 * amdgpu_ttm_tt_pde_flags - Compute PDE flags for ttm_tt object
1402 *
1403 * @ttm: The ttm_tt object to compute the flags for
1404 * @mem: The memory registry backing this ttm_tt object
1405 *
1406 * Figure out the flags to use for a VM PDE (Page Directory Entry).
1407 */
amdgpu_ttm_tt_pde_flags(struct ttm_tt * ttm,struct ttm_resource * mem)1408 uint64_t amdgpu_ttm_tt_pde_flags(struct ttm_tt *ttm, struct ttm_resource *mem)
1409 {
1410 uint64_t flags = 0;
1411
1412 if (mem && mem->mem_type != TTM_PL_SYSTEM)
1413 flags |= AMDGPU_PTE_VALID;
1414
1415 if (mem && (mem->mem_type == TTM_PL_TT ||
1416 mem->mem_type == AMDGPU_PL_DOORBELL ||
1417 mem->mem_type == AMDGPU_PL_PREEMPT ||
1418 mem->mem_type == AMDGPU_PL_MMIO_REMAP)) {
1419 flags |= AMDGPU_PTE_SYSTEM;
1420
1421 if (ttm && ttm->caching == ttm_cached)
1422 flags |= AMDGPU_PTE_SNOOPED;
1423 }
1424
1425 if (mem && mem->mem_type == TTM_PL_VRAM &&
1426 mem->bus.caching == ttm_cached)
1427 flags |= AMDGPU_PTE_SNOOPED;
1428
1429 return flags;
1430 }
1431
1432 /**
1433 * amdgpu_ttm_tt_pte_flags - Compute PTE flags for ttm_tt object
1434 *
1435 * @adev: amdgpu_device pointer
1436 * @ttm: The ttm_tt object to compute the flags for
1437 * @mem: The memory registry backing this ttm_tt object
1438 *
1439 * Figure out the flags to use for a VM PTE (Page Table Entry).
1440 */
amdgpu_ttm_tt_pte_flags(struct amdgpu_device * adev,struct ttm_tt * ttm,struct ttm_resource * mem)1441 uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
1442 struct ttm_resource *mem)
1443 {
1444 uint64_t flags = amdgpu_ttm_tt_pde_flags(ttm, mem);
1445
1446 flags |= adev->gart.gart_pte_flags;
1447 flags |= AMDGPU_PTE_READABLE;
1448
1449 if (!amdgpu_ttm_tt_is_readonly(ttm))
1450 flags |= AMDGPU_PTE_WRITEABLE;
1451
1452 return flags;
1453 }
1454
1455 /*
1456 * amdgpu_ttm_bo_eviction_valuable - Check to see if we can evict a buffer
1457 * object.
1458 *
1459 * Return true if eviction is sensible. Called by ttm_mem_evict_first() on
1460 * behalf of ttm_bo_mem_force_space() which tries to evict buffer objects until
1461 * it can find space for a new object and by ttm_bo_force_list_clean() which is
1462 * used to clean out a memory space.
1463 */
amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object * bo,const struct ttm_place * place)1464 static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
1465 const struct ttm_place *place)
1466 {
1467 struct dma_resv_iter resv_cursor;
1468 struct dma_fence *f;
1469
1470 if (!amdgpu_bo_is_amdgpu_bo(bo))
1471 return ttm_bo_eviction_valuable(bo, place);
1472
1473 /* Swapout? */
1474 if (bo->resource->mem_type == TTM_PL_SYSTEM)
1475 return true;
1476
1477 if (bo->type == ttm_bo_type_kernel &&
1478 !amdgpu_vm_evictable(ttm_to_amdgpu_bo(bo)))
1479 return false;
1480
1481 /* If bo is a KFD BO, check if the bo belongs to the current process.
1482 * If true, then return false as any KFD process needs all its BOs to
1483 * be resident to run successfully
1484 */
1485 dma_resv_for_each_fence(&resv_cursor, bo->base.resv,
1486 DMA_RESV_USAGE_BOOKKEEP, f) {
1487 if (amdkfd_fence_check_mm(f, current->mm) &&
1488 !(place->flags & TTM_PL_FLAG_CONTIGUOUS))
1489 return false;
1490 }
1491
1492 /* Preemptible BOs don't own system resources managed by the
1493 * driver (pages, VRAM, GART space). They point to resources
1494 * owned by someone else (e.g. pageable memory in user mode
1495 * or a DMABuf). They are used in a preemptible context so we
1496 * can guarantee no deadlocks and good QoS in case of MMU
1497 * notifiers or DMABuf move notifiers from the resource owner.
1498 */
1499 if (bo->resource->mem_type == AMDGPU_PL_PREEMPT)
1500 return false;
1501
1502 if (bo->resource->mem_type == TTM_PL_TT &&
1503 amdgpu_bo_encrypted(ttm_to_amdgpu_bo(bo)))
1504 return false;
1505
1506 return ttm_bo_eviction_valuable(bo, place);
1507 }
1508
amdgpu_ttm_vram_mm_access(struct amdgpu_device * adev,loff_t pos,void * buf,size_t size,bool write)1509 static void amdgpu_ttm_vram_mm_access(struct amdgpu_device *adev, loff_t pos,
1510 void *buf, size_t size, bool write)
1511 {
1512 while (size) {
1513 uint64_t aligned_pos = ALIGN_DOWN(pos, 4);
1514 uint64_t bytes = 4 - (pos & 0x3);
1515 uint32_t shift = (pos & 0x3) * 8;
1516 uint32_t mask = 0xffffffff << shift;
1517 uint32_t value = 0;
1518
1519 if (size < bytes) {
1520 mask &= 0xffffffff >> (bytes - size) * 8;
1521 bytes = size;
1522 }
1523
1524 if (mask != 0xffffffff) {
1525 amdgpu_device_mm_access(adev, aligned_pos, &value, 4, false);
1526 if (write) {
1527 value &= ~mask;
1528 value |= (*(uint32_t *)buf << shift) & mask;
1529 amdgpu_device_mm_access(adev, aligned_pos, &value, 4, true);
1530 } else {
1531 value = (value & mask) >> shift;
1532 memcpy(buf, &value, bytes);
1533 }
1534 } else {
1535 amdgpu_device_mm_access(adev, aligned_pos, buf, 4, write);
1536 }
1537
1538 pos += bytes;
1539 buf += bytes;
1540 size -= bytes;
1541 }
1542 }
1543
amdgpu_ttm_access_memory_sdma(struct ttm_buffer_object * bo,unsigned long offset,void * buf,int len,int write)1544 static int amdgpu_ttm_access_memory_sdma(struct ttm_buffer_object *bo,
1545 unsigned long offset, void *buf,
1546 int len, int write)
1547 {
1548 struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
1549 struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
1550 struct amdgpu_res_cursor src_mm;
1551 struct amdgpu_job *job;
1552 struct dma_fence *fence;
1553 uint64_t src_addr, dst_addr;
1554 unsigned int num_dw;
1555 int r, idx;
1556
1557 if (len != PAGE_SIZE)
1558 return -EINVAL;
1559
1560 if (!adev->mman.sdma_access_ptr)
1561 return -EACCES;
1562
1563 if (!drm_dev_enter(adev_to_drm(adev), &idx))
1564 return -ENODEV;
1565
1566 if (write)
1567 memcpy(adev->mman.sdma_access_ptr, buf, len);
1568
1569 num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8);
1570 r = amdgpu_job_alloc_with_ib(adev, &adev->mman.default_entity.base,
1571 AMDGPU_FENCE_OWNER_UNDEFINED,
1572 num_dw * 4, AMDGPU_IB_POOL_DELAYED,
1573 &job,
1574 AMDGPU_KERNEL_JOB_ID_TTM_ACCESS_MEMORY_SDMA);
1575 if (r)
1576 goto out;
1577
1578 mutex_lock(&adev->mman.default_entity.lock);
1579 amdgpu_res_first(abo->tbo.resource, offset, len, &src_mm);
1580 src_addr = amdgpu_ttm_domain_start(adev, bo->resource->mem_type) +
1581 src_mm.start;
1582 dst_addr = amdgpu_bo_gpu_offset(adev->mman.sdma_access_bo);
1583 if (write)
1584 swap(src_addr, dst_addr);
1585
1586 amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_addr, dst_addr,
1587 PAGE_SIZE, 0);
1588
1589 fence = amdgpu_ttm_job_submit(adev, &adev->mman.default_entity, job, num_dw);
1590 mutex_unlock(&adev->mman.default_entity.lock);
1591
1592 if (!dma_fence_wait_timeout(fence, false, adev->sdma_timeout))
1593 r = -ETIMEDOUT;
1594 dma_fence_put(fence);
1595
1596 if (!(r || write))
1597 memcpy(buf, adev->mman.sdma_access_ptr, len);
1598 out:
1599 drm_dev_exit(idx);
1600 return r;
1601 }
1602
1603 /**
1604 * amdgpu_ttm_access_memory - Read or Write memory that backs a buffer object.
1605 *
1606 * @bo: The buffer object to read/write
1607 * @offset: Offset into buffer object
1608 * @buf: Secondary buffer to write/read from
1609 * @len: Length in bytes of access
1610 * @write: true if writing
1611 *
1612 * This is used to access VRAM that backs a buffer object via MMIO
1613 * access for debugging purposes.
1614 */
amdgpu_ttm_access_memory(struct ttm_buffer_object * bo,unsigned long offset,void * buf,int len,int write)1615 static int amdgpu_ttm_access_memory(struct ttm_buffer_object *bo,
1616 unsigned long offset, void *buf, int len,
1617 int write)
1618 {
1619 struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
1620 struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
1621 struct amdgpu_res_cursor cursor;
1622 int ret = 0;
1623
1624 if (bo->resource->mem_type != TTM_PL_VRAM)
1625 return -EIO;
1626
1627 if (amdgpu_device_has_timeouts_enabled(adev) &&
1628 !amdgpu_ttm_access_memory_sdma(bo, offset, buf, len, write))
1629 return len;
1630
1631 amdgpu_res_first(bo->resource, offset, len, &cursor);
1632 while (cursor.remaining) {
1633 size_t count, size = cursor.size;
1634 loff_t pos = cursor.start;
1635
1636 count = amdgpu_device_aper_access(adev, pos, buf, size, write);
1637 size -= count;
1638 if (size) {
1639 /* using MM to access rest vram and handle un-aligned address */
1640 pos += count;
1641 buf += count;
1642 amdgpu_ttm_vram_mm_access(adev, pos, buf, size, write);
1643 }
1644
1645 ret += cursor.size;
1646 buf += cursor.size;
1647 amdgpu_res_next(&cursor, cursor.size);
1648 }
1649
1650 return ret;
1651 }
1652
1653 static void
amdgpu_bo_delete_mem_notify(struct ttm_buffer_object * bo)1654 amdgpu_bo_delete_mem_notify(struct ttm_buffer_object *bo)
1655 {
1656 amdgpu_bo_move_notify(bo, false, NULL);
1657 }
1658
1659 static struct ttm_device_funcs amdgpu_bo_driver = {
1660 .ttm_tt_create = &amdgpu_ttm_tt_create,
1661 .ttm_tt_populate = &amdgpu_ttm_tt_populate,
1662 .ttm_tt_unpopulate = &amdgpu_ttm_tt_unpopulate,
1663 .ttm_tt_destroy = &amdgpu_ttm_backend_destroy,
1664 .eviction_valuable = amdgpu_ttm_bo_eviction_valuable,
1665 .evict_flags = &amdgpu_evict_flags,
1666 .move = &amdgpu_bo_move,
1667 .delete_mem_notify = &amdgpu_bo_delete_mem_notify,
1668 .release_notify = &amdgpu_bo_release_notify,
1669 .io_mem_reserve = &amdgpu_ttm_io_mem_reserve,
1670 .io_mem_pfn = amdgpu_ttm_io_mem_pfn,
1671 .access_memory = &amdgpu_ttm_access_memory,
1672 };
1673
amdgpu_ttm_init_vram_resv(struct amdgpu_device * adev,enum amdgpu_resv_region_id id,uint64_t offset,uint64_t size,bool needs_cpu_map)1674 void amdgpu_ttm_init_vram_resv(struct amdgpu_device *adev,
1675 enum amdgpu_resv_region_id id,
1676 uint64_t offset, uint64_t size,
1677 bool needs_cpu_map)
1678 {
1679 struct amdgpu_vram_resv *resv;
1680
1681 if (id >= AMDGPU_RESV_MAX)
1682 return;
1683
1684 resv = &adev->mman.resv_region[id];
1685 resv->offset = offset;
1686 resv->size = size;
1687 resv->needs_cpu_map = needs_cpu_map;
1688 }
1689
amdgpu_ttm_init_fw_resv_region(struct amdgpu_device * adev)1690 static void amdgpu_ttm_init_fw_resv_region(struct amdgpu_device *adev)
1691 {
1692 uint32_t reserve_size = 0;
1693
1694 if (!adev->discovery.reserve_tmr)
1695 return;
1696
1697 /*
1698 * Query reserved tmr size through atom firmwareinfo for Sienna_Cichlid and onwards for all
1699 * the use cases (IP discovery/G6 memory training/profiling/diagnostic data.etc)
1700 *
1701 * Otherwise, fallback to legacy approach to check and reserve tmr block for ip
1702 * discovery data and G6 memory training data respectively
1703 */
1704 if (adev->bios)
1705 reserve_size =
1706 amdgpu_atomfirmware_get_fw_reserved_fb_size(adev);
1707
1708 if (!adev->bios &&
1709 (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) ||
1710 amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4) ||
1711 amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 5, 0)))
1712 reserve_size = max(reserve_size, (uint32_t)280 << 20);
1713 else if (!adev->bios &&
1714 amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(12, 1, 0)) {
1715 if (hweight32(adev->aid_mask) == 1)
1716 reserve_size = max(reserve_size, (uint32_t)128 << 20);
1717 else
1718 reserve_size = max(reserve_size, (uint32_t)144 << 20);
1719 } else if (!reserve_size)
1720 reserve_size = DISCOVERY_TMR_OFFSET;
1721
1722 amdgpu_ttm_init_vram_resv(adev, AMDGPU_RESV_FW,
1723 adev->gmc.real_vram_size - reserve_size,
1724 reserve_size, false);
1725 }
1726
amdgpu_ttm_init_mem_train_resv_region(struct amdgpu_device * adev)1727 static void amdgpu_ttm_init_mem_train_resv_region(struct amdgpu_device *adev)
1728 {
1729 uint64_t reserve_size;
1730 uint64_t offset;
1731
1732 if (!adev->discovery.reserve_tmr)
1733 return;
1734
1735 if (!adev->bios || amdgpu_sriov_vf(adev))
1736 return;
1737
1738 if (!amdgpu_atomfirmware_mem_training_supported(adev))
1739 return;
1740
1741 reserve_size = adev->mman.resv_region[AMDGPU_RESV_FW].size;
1742 offset = ALIGN((adev->gmc.mc_vram_size - reserve_size - SZ_1M), SZ_1M);
1743 amdgpu_ttm_init_vram_resv(adev, AMDGPU_RESV_MEM_TRAIN,
1744 offset,
1745 GDDR6_MEM_TRAINING_DATA_SIZE_IN_BYTES,
1746 false);
1747 }
1748
amdgpu_ttm_init_vram_resv_regions(struct amdgpu_device * adev)1749 static void amdgpu_ttm_init_vram_resv_regions(struct amdgpu_device *adev)
1750 {
1751 uint64_t vram_size = adev->gmc.visible_vram_size;
1752
1753 /* Initialize memory reservations as required for VGA.
1754 * This is used for VGA emulation and pre-OS scanout buffers to
1755 * avoid display artifacts while transitioning between pre-OS
1756 * and driver.
1757 */
1758 amdgpu_gmc_init_vga_resv_regions(adev);
1759 amdgpu_ttm_init_fw_resv_region(adev);
1760 amdgpu_ttm_init_mem_train_resv_region(adev);
1761
1762 if (adev->mman.resv_region[AMDGPU_RESV_FW_VRAM_USAGE].size > vram_size)
1763 adev->mman.resv_region[AMDGPU_RESV_FW_VRAM_USAGE].size = 0;
1764
1765 if (adev->mman.resv_region[AMDGPU_RESV_DRV_VRAM_USAGE].size > vram_size)
1766 adev->mman.resv_region[AMDGPU_RESV_DRV_VRAM_USAGE].size = 0;
1767 }
1768
amdgpu_ttm_mark_vram_reserved(struct amdgpu_device * adev,enum amdgpu_resv_region_id id)1769 int amdgpu_ttm_mark_vram_reserved(struct amdgpu_device *adev,
1770 enum amdgpu_resv_region_id id)
1771 {
1772 struct amdgpu_vram_resv *resv;
1773 int ret;
1774
1775 if (id >= AMDGPU_RESV_MAX)
1776 return -EINVAL;
1777
1778 resv = &adev->mman.resv_region[id];
1779 if (!resv->size)
1780 return 0;
1781
1782 ret = amdgpu_bo_create_kernel_at(adev, resv->offset, resv->size,
1783 &resv->bo,
1784 resv->needs_cpu_map ? &resv->cpu_ptr : NULL);
1785 if (ret) {
1786 dev_err(adev->dev,
1787 "reserve vram failed: id=%d offset=0x%llx size=0x%llx ret=%d\n",
1788 id, resv->offset, resv->size, ret);
1789 memset(resv, 0, sizeof(*resv));
1790 }
1791
1792 return ret;
1793 }
1794
amdgpu_ttm_unmark_vram_reserved(struct amdgpu_device * adev,enum amdgpu_resv_region_id id)1795 void amdgpu_ttm_unmark_vram_reserved(struct amdgpu_device *adev,
1796 enum amdgpu_resv_region_id id)
1797 {
1798 struct amdgpu_vram_resv *resv;
1799
1800 if (id >= AMDGPU_RESV_MAX)
1801 return;
1802
1803 resv = &adev->mman.resv_region[id];
1804 if (!resv->bo)
1805 return;
1806
1807 amdgpu_bo_free_kernel(&resv->bo, NULL,
1808 resv->needs_cpu_map ? &resv->cpu_ptr : NULL);
1809 memset(resv, 0, sizeof(*resv));
1810 }
1811
1812 /*
1813 * Reserve all regions with non-zero size. Regions whose info is not
1814 * yet available (e.g., fw extended region) may still be reserved
1815 * during runtime.
1816 */
amdgpu_ttm_alloc_vram_resv_regions(struct amdgpu_device * adev)1817 static int amdgpu_ttm_alloc_vram_resv_regions(struct amdgpu_device *adev)
1818 {
1819 int i, r;
1820
1821 for (i = 0; i < AMDGPU_RESV_MAX; i++) {
1822 r = amdgpu_ttm_mark_vram_reserved(adev, i);
1823 if (r)
1824 return r;
1825 }
1826
1827 return 0;
1828 }
1829
1830 /*
1831 * Memoy training reservation functions
1832 */
1833
1834 /**
1835 * amdgpu_ttm_training_reserve_vram_fini - free memory training reserved vram
1836 *
1837 * @adev: amdgpu_device pointer
1838 *
1839 * free memory training reserved vram if it has been reserved.
1840 */
amdgpu_ttm_training_reserve_vram_fini(struct amdgpu_device * adev)1841 static int amdgpu_ttm_training_reserve_vram_fini(struct amdgpu_device *adev)
1842 {
1843 struct psp_memory_training_context *ctx = &adev->psp.mem_train_ctx;
1844
1845 ctx->init = PSP_MEM_TRAIN_NOT_SUPPORT;
1846 amdgpu_ttm_unmark_vram_reserved(adev, AMDGPU_RESV_MEM_TRAIN);
1847
1848 return 0;
1849 }
1850
amdgpu_ttm_training_data_block_init(struct amdgpu_device * adev)1851 static void amdgpu_ttm_training_data_block_init(struct amdgpu_device *adev)
1852 {
1853 struct psp_memory_training_context *ctx = &adev->psp.mem_train_ctx;
1854 struct amdgpu_vram_resv *resv =
1855 &adev->mman.resv_region[AMDGPU_RESV_MEM_TRAIN];
1856
1857 memset(ctx, 0, sizeof(*ctx));
1858
1859 ctx->c2p_train_data_offset = resv->offset;
1860 ctx->p2c_train_data_offset =
1861 (adev->gmc.mc_vram_size - GDDR6_MEM_TRAINING_OFFSET);
1862 ctx->train_data_size = resv->size;
1863
1864 DRM_DEBUG("train_data_size:%llx,p2c_train_data_offset:%llx,c2p_train_data_offset:%llx.\n",
1865 ctx->train_data_size,
1866 ctx->p2c_train_data_offset,
1867 ctx->c2p_train_data_offset);
1868 }
1869
amdgpu_ttm_pools_init(struct amdgpu_device * adev)1870 static int amdgpu_ttm_pools_init(struct amdgpu_device *adev)
1871 {
1872 int i;
1873
1874 if (!adev->gmc.is_app_apu || !adev->gmc.num_mem_partitions)
1875 return 0;
1876
1877 adev->mman.ttm_pools = kzalloc_objs(*adev->mman.ttm_pools,
1878 adev->gmc.num_mem_partitions);
1879 if (!adev->mman.ttm_pools)
1880 return -ENOMEM;
1881
1882 for (i = 0; i < adev->gmc.num_mem_partitions; i++) {
1883 ttm_pool_init(&adev->mman.ttm_pools[i], adev->dev,
1884 adev->gmc.mem_partitions[i].numa.node,
1885 TTM_ALLOCATION_POOL_BENEFICIAL_ORDER(get_order(SZ_2M)));
1886 }
1887 return 0;
1888 }
1889
amdgpu_ttm_pools_fini(struct amdgpu_device * adev)1890 static void amdgpu_ttm_pools_fini(struct amdgpu_device *adev)
1891 {
1892 int i;
1893
1894 if (!adev->gmc.is_app_apu || !adev->mman.ttm_pools)
1895 return;
1896
1897 for (i = 0; i < adev->gmc.num_mem_partitions; i++)
1898 ttm_pool_fini(&adev->mman.ttm_pools[i]);
1899
1900 kfree(adev->mman.ttm_pools);
1901 adev->mman.ttm_pools = NULL;
1902 }
1903
1904 /**
1905 * amdgpu_ttm_alloc_mmio_remap_bo - Allocate the singleton MMIO_REMAP BO
1906 * @adev: amdgpu device
1907 *
1908 * Allocates a global BO with backing AMDGPU_PL_MMIO_REMAP when the
1909 * hardware exposes a remap base (adev->rmmio_remap.bus_addr) and the host
1910 * PAGE_SIZE is <= AMDGPU_GPU_PAGE_SIZE (4K). The BO is created as a regular
1911 * GEM object (amdgpu_bo_create).
1912 *
1913 * Return:
1914 * * 0 on success or intentional skip (feature not present/unsupported)
1915 * * negative errno on allocation failure
1916 */
amdgpu_ttm_alloc_mmio_remap_bo(struct amdgpu_device * adev)1917 static int amdgpu_ttm_alloc_mmio_remap_bo(struct amdgpu_device *adev)
1918 {
1919 struct ttm_operation_ctx ctx = { false, false };
1920 struct ttm_placement placement;
1921 struct ttm_buffer_object *tbo;
1922 struct ttm_place placements;
1923 struct amdgpu_bo_param bp;
1924 struct ttm_resource *tmp;
1925 int r;
1926
1927 /* Skip if HW doesn't expose remap, or if PAGE_SIZE > AMDGPU_GPU_PAGE_SIZE (4K). */
1928 if (!adev->rmmio_remap.bus_addr || PAGE_SIZE > AMDGPU_GPU_PAGE_SIZE)
1929 return 0;
1930
1931 /*
1932 * Allocate a BO first and then move it to AMDGPU_PL_MMIO_REMAP.
1933 * The initial TTM resource assigned by amdgpu_bo_create() is
1934 * replaced below with a fixed MMIO_REMAP placement.
1935 */
1936 memset(&bp, 0, sizeof(bp));
1937 bp.type = ttm_bo_type_device;
1938 bp.size = AMDGPU_GPU_PAGE_SIZE;
1939 bp.byte_align = AMDGPU_GPU_PAGE_SIZE;
1940 bp.domain = 0;
1941 bp.flags = 0;
1942 bp.resv = NULL;
1943 bp.bo_ptr_size = sizeof(struct amdgpu_bo);
1944 r = amdgpu_bo_create(adev, &bp, &adev->rmmio_remap.bo);
1945 if (r)
1946 return r;
1947
1948 r = amdgpu_bo_reserve(adev->rmmio_remap.bo, true);
1949 if (r)
1950 goto err_unref;
1951
1952 tbo = &adev->rmmio_remap.bo->tbo;
1953
1954 /*
1955 * MMIO_REMAP is a fixed I/O placement (AMDGPU_PL_MMIO_REMAP).
1956 */
1957 placement.num_placement = 1;
1958 placement.placement = &placements;
1959 placements.fpfn = 0;
1960 placements.lpfn = 0;
1961 placements.mem_type = AMDGPU_PL_MMIO_REMAP;
1962 placements.flags = 0;
1963 /* Force the BO into the fixed MMIO_REMAP placement */
1964 r = ttm_bo_mem_space(tbo, &placement, &tmp, &ctx);
1965 if (unlikely(r))
1966 goto err_unlock;
1967
1968 ttm_resource_free(tbo, &tbo->resource);
1969 ttm_bo_assign_mem(tbo, tmp);
1970 ttm_bo_pin(tbo);
1971
1972 amdgpu_bo_unreserve(adev->rmmio_remap.bo);
1973 return 0;
1974
1975 err_unlock:
1976 amdgpu_bo_unreserve(adev->rmmio_remap.bo);
1977
1978 err_unref:
1979 amdgpu_bo_unref(&adev->rmmio_remap.bo);
1980 adev->rmmio_remap.bo = NULL;
1981 return r;
1982 }
1983
1984 /**
1985 * amdgpu_ttm_free_mmio_remap_bo - Free the singleton MMIO_REMAP BO
1986 * @adev: amdgpu device
1987 *
1988 * Frees the kernel-owned MMIO_REMAP BO if it was allocated by
1989 * amdgpu_ttm_mmio_remap_bo_init().
1990 */
amdgpu_ttm_free_mmio_remap_bo(struct amdgpu_device * adev)1991 static void amdgpu_ttm_free_mmio_remap_bo(struct amdgpu_device *adev)
1992 {
1993 if (!adev->rmmio_remap.bo)
1994 return;
1995
1996 if (!amdgpu_bo_reserve(adev->rmmio_remap.bo, true)) {
1997 ttm_bo_unpin(&adev->rmmio_remap.bo->tbo);
1998 amdgpu_bo_unreserve(adev->rmmio_remap.bo);
1999 }
2000
2001 /*
2002 * At this point we rely on normal DRM teardown ordering:
2003 * no new user ioctls can access the global MMIO_REMAP BO
2004 * once TTM teardown begins.
2005 */
2006 amdgpu_bo_unref(&adev->rmmio_remap.bo);
2007 adev->rmmio_remap.bo = NULL;
2008 }
2009
amdgpu_ttm_buffer_entity_init(struct amdgpu_gtt_mgr * mgr,struct amdgpu_ttm_buffer_entity * entity,enum drm_sched_priority prio,struct drm_gpu_scheduler ** scheds,int num_schedulers,u32 num_gart_windows)2010 static int amdgpu_ttm_buffer_entity_init(struct amdgpu_gtt_mgr *mgr,
2011 struct amdgpu_ttm_buffer_entity *entity,
2012 enum drm_sched_priority prio,
2013 struct drm_gpu_scheduler **scheds,
2014 int num_schedulers,
2015 u32 num_gart_windows)
2016 {
2017 int i, r, num_pages;
2018
2019 r = drm_sched_entity_init(&entity->base, prio, scheds, num_schedulers, NULL);
2020 if (r)
2021 return r;
2022
2023 mutex_init(&entity->lock);
2024
2025 if (ARRAY_SIZE(entity->gart_window_offs) < num_gart_windows)
2026 return -EINVAL;
2027 if (num_gart_windows == 0)
2028 return 0;
2029
2030 num_pages = num_gart_windows * AMDGPU_GTT_MAX_TRANSFER_SIZE;
2031 r = amdgpu_gtt_mgr_alloc_entries(mgr, &entity->gart_node, num_pages,
2032 DRM_MM_INSERT_BEST);
2033 if (r) {
2034 drm_sched_entity_destroy(&entity->base);
2035 return r;
2036 }
2037
2038 for (i = 0; i < num_gart_windows; i++) {
2039 entity->gart_window_offs[i] =
2040 amdgpu_gtt_node_to_byte_offset(&entity->gart_node) +
2041 i * AMDGPU_GTT_MAX_TRANSFER_SIZE * PAGE_SIZE;
2042 }
2043
2044 return 0;
2045 }
2046
amdgpu_ttm_buffer_entity_fini(struct amdgpu_gtt_mgr * mgr,struct amdgpu_ttm_buffer_entity * entity)2047 static void amdgpu_ttm_buffer_entity_fini(struct amdgpu_gtt_mgr *mgr,
2048 struct amdgpu_ttm_buffer_entity *entity)
2049 {
2050 amdgpu_gtt_mgr_free_entries(mgr, &entity->gart_node);
2051 drm_sched_entity_destroy(&entity->base);
2052 }
2053
2054 /*
2055 * amdgpu_ttm_init - Init the memory management (ttm) as well as various
2056 * gtt/vram related fields.
2057 *
2058 * This initializes all of the memory space pools that the TTM layer
2059 * will need such as the GTT space (system memory mapped to the device),
2060 * VRAM (on-board memory), and on-chip memories (GDS, GWS, OA) which
2061 * can be mapped per VMID.
2062 */
amdgpu_ttm_init(struct amdgpu_device * adev)2063 int amdgpu_ttm_init(struct amdgpu_device *adev)
2064 {
2065 uint64_t gtt_size;
2066 int r;
2067
2068 dma_set_max_seg_size(adev->dev, UINT_MAX);
2069 /* No others user of address space so set it to 0 */
2070 r = ttm_device_init(&adev->mman.bdev, &amdgpu_bo_driver, adev->dev,
2071 adev_to_drm(adev)->anon_inode->i_mapping,
2072 adev_to_drm(adev)->vma_offset_manager,
2073 (adev->need_swiotlb ?
2074 TTM_ALLOCATION_POOL_USE_DMA_ALLOC : 0) |
2075 (dma_addressing_limited(adev->dev) ?
2076 TTM_ALLOCATION_POOL_USE_DMA32 : 0) |
2077 TTM_ALLOCATION_POOL_BENEFICIAL_ORDER(get_order(SZ_2M)));
2078 if (r) {
2079 dev_err(adev->dev,
2080 "failed initializing buffer object driver(%d).\n", r);
2081 return r;
2082 }
2083
2084 r = amdgpu_ttm_pools_init(adev);
2085 if (r) {
2086 dev_err(adev->dev, "failed to init ttm pools(%d).\n", r);
2087 return r;
2088 }
2089 adev->mman.initialized = true;
2090
2091 if (!adev->gmc.is_app_apu) {
2092 /* Initialize VRAM pool with all of VRAM divided into pages */
2093 r = amdgpu_vram_mgr_init(adev);
2094 if (r) {
2095 dev_err(adev->dev, "Failed initializing VRAM heap.\n");
2096 return r;
2097 }
2098 }
2099
2100 /* Change the size here instead of the init above so only lpfn is affected */
2101 amdgpu_ttm_set_buffer_funcs_status(adev, false);
2102 #ifdef CONFIG_64BIT
2103 #ifdef CONFIG_X86
2104 if (adev->gmc.xgmi.connected_to_cpu)
2105 adev->mman.aper_base_kaddr = ioremap_cache(adev->gmc.aper_base,
2106 adev->gmc.visible_vram_size);
2107
2108 else if (adev->gmc.is_app_apu)
2109 DRM_DEBUG_DRIVER(
2110 "No need to ioremap when real vram size is 0\n");
2111 else
2112 #endif
2113 adev->mman.aper_base_kaddr = ioremap_wc(adev->gmc.aper_base,
2114 adev->gmc.visible_vram_size);
2115 #endif
2116
2117 amdgpu_ttm_init_vram_resv_regions(adev);
2118
2119 r = amdgpu_ttm_alloc_vram_resv_regions(adev);
2120 if (r)
2121 return r;
2122
2123 if (adev->mman.resv_region[AMDGPU_RESV_MEM_TRAIN].size) {
2124 struct psp_memory_training_context *ctx =
2125 &adev->psp.mem_train_ctx;
2126
2127 amdgpu_ttm_training_data_block_init(adev);
2128 ctx->init = PSP_MEM_TRAIN_RESERVE_SUCCESS;
2129 }
2130
2131 dev_info(adev->dev, " %uM of VRAM memory ready\n",
2132 (unsigned int)(adev->gmc.real_vram_size / (1024 * 1024)));
2133
2134 /* Compute GTT size, either based on TTM limit
2135 * or whatever the user passed on module init.
2136 */
2137 gtt_size = ttm_tt_pages_limit() << PAGE_SHIFT;
2138 if (amdgpu_gtt_size != -1) {
2139 uint64_t configured_size = (uint64_t)amdgpu_gtt_size << 20;
2140
2141 drm_warn(&adev->ddev,
2142 "Configuring gttsize via module parameter is deprecated, please use ttm.pages_limit\n");
2143 if (gtt_size != configured_size)
2144 drm_warn(&adev->ddev,
2145 "GTT size has been set as %llu but TTM size has been set as %llu, this is unusual\n",
2146 configured_size, gtt_size);
2147
2148 gtt_size = configured_size;
2149 }
2150
2151 /* Initialize GTT memory pool */
2152 r = amdgpu_gtt_mgr_init(adev, gtt_size);
2153 if (r) {
2154 dev_err(adev->dev, "Failed initializing GTT heap.\n");
2155 return r;
2156 }
2157 dev_info(adev->dev, " %uM of GTT memory ready.\n",
2158 (unsigned int)(gtt_size / (1024 * 1024)));
2159
2160 if (adev->flags & AMD_IS_APU) {
2161 if (adev->gmc.real_vram_size < gtt_size)
2162 adev->apu_prefer_gtt = true;
2163 }
2164
2165 /* Initialize doorbell pool on PCI BAR */
2166 r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_DOORBELL, adev->doorbell.size / PAGE_SIZE);
2167 if (r) {
2168 dev_err(adev->dev, "Failed initializing doorbell heap.\n");
2169 return r;
2170 }
2171
2172 /* Create a boorbell page for kernel usages */
2173 r = amdgpu_doorbell_create_kernel_doorbells(adev);
2174 if (r) {
2175 dev_err(adev->dev, "Failed to initialize kernel doorbells.\n");
2176 return r;
2177 }
2178
2179 /* Initialize MMIO-remap pool (single page 4K) */
2180 r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_MMIO_REMAP, 1);
2181 if (r) {
2182 dev_err(adev->dev, "Failed initializing MMIO-remap heap.\n");
2183 return r;
2184 }
2185
2186 /* Allocate the singleton MMIO_REMAP BO if supported */
2187 r = amdgpu_ttm_alloc_mmio_remap_bo(adev);
2188 if (r)
2189 return r;
2190
2191 /* Initialize preemptible memory pool */
2192 r = amdgpu_preempt_mgr_init(adev);
2193 if (r) {
2194 dev_err(adev->dev, "Failed initializing PREEMPT heap.\n");
2195 return r;
2196 }
2197
2198 /* Initialize various on-chip memory pools */
2199 r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_GDS, adev->gds.gds_size);
2200 if (r) {
2201 dev_err(adev->dev, "Failed initializing GDS heap.\n");
2202 return r;
2203 }
2204
2205 r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_GWS, adev->gds.gws_size);
2206 if (r) {
2207 dev_err(adev->dev, "Failed initializing gws heap.\n");
2208 return r;
2209 }
2210
2211 r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_OA, adev->gds.oa_size);
2212 if (r) {
2213 dev_err(adev->dev, "Failed initializing oa heap.\n");
2214 return r;
2215 }
2216 if (amdgpu_bo_create_kernel(adev, PAGE_SIZE, PAGE_SIZE,
2217 AMDGPU_GEM_DOMAIN_GTT,
2218 &adev->mman.sdma_access_bo, NULL,
2219 &adev->mman.sdma_access_ptr))
2220 drm_warn(adev_to_drm(adev),
2221 "Debug VRAM access will use slowpath MM access\n");
2222
2223 return 0;
2224 }
2225
2226 /*
2227 * amdgpu_ttm_fini - De-initialize the TTM memory pools
2228 */
amdgpu_ttm_fini(struct amdgpu_device * adev)2229 void amdgpu_ttm_fini(struct amdgpu_device *adev)
2230 {
2231 int idx;
2232
2233 if (!adev->mman.initialized)
2234 return;
2235
2236 amdgpu_ttm_pools_fini(adev);
2237
2238 amdgpu_ttm_training_reserve_vram_fini(adev);
2239 /* return the stolen vga memory back to VRAM */
2240 if (!adev->gmc.is_app_apu) {
2241 amdgpu_ttm_unmark_vram_reserved(adev, AMDGPU_RESV_STOLEN_VGA);
2242 amdgpu_ttm_unmark_vram_reserved(adev, AMDGPU_RESV_STOLEN_EXTENDED);
2243 /* return the FW reserved memory back to VRAM */
2244 amdgpu_ttm_unmark_vram_reserved(adev, AMDGPU_RESV_FW);
2245 amdgpu_ttm_unmark_vram_reserved(adev, AMDGPU_RESV_FW_EXTEND);
2246 amdgpu_ttm_unmark_vram_reserved(adev, AMDGPU_RESV_STOLEN_RESERVED);
2247 }
2248 amdgpu_bo_free_kernel(&adev->mman.sdma_access_bo, NULL,
2249 &adev->mman.sdma_access_ptr);
2250
2251 amdgpu_ttm_free_mmio_remap_bo(adev);
2252 amdgpu_ttm_unmark_vram_reserved(adev, AMDGPU_RESV_FW_VRAM_USAGE);
2253 amdgpu_ttm_unmark_vram_reserved(adev, AMDGPU_RESV_DRV_VRAM_USAGE);
2254
2255 if (drm_dev_enter(adev_to_drm(adev), &idx)) {
2256
2257 if (adev->mman.aper_base_kaddr)
2258 iounmap(adev->mman.aper_base_kaddr);
2259 adev->mman.aper_base_kaddr = NULL;
2260
2261 drm_dev_exit(idx);
2262 }
2263
2264 if (!adev->gmc.is_app_apu)
2265 amdgpu_vram_mgr_fini(adev);
2266 amdgpu_gtt_mgr_fini(adev);
2267 amdgpu_preempt_mgr_fini(adev);
2268 amdgpu_doorbell_fini(adev);
2269
2270 ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_GDS);
2271 ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_GWS);
2272 ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_OA);
2273 ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_DOORBELL);
2274 ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_MMIO_REMAP);
2275 ttm_device_fini(&adev->mman.bdev);
2276 adev->mman.initialized = false;
2277 dev_info(adev->dev, " ttm finalized\n");
2278 }
2279
2280 /**
2281 * amdgpu_ttm_set_buffer_funcs_status - enable/disable use of buffer functions
2282 *
2283 * @adev: amdgpu_device pointer
2284 * @enable: true when we can use buffer functions.
2285 *
2286 * Enable/disable use of buffer functions during suspend/resume. This should
2287 * only be called at bootup or when userspace isn't running.
2288 */
amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device * adev,bool enable)2289 void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable)
2290 {
2291 struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM);
2292 u32 num_clear_entities, num_move_entities;
2293 uint64_t size;
2294 int r, i, j;
2295
2296 if (!adev->mman.initialized || amdgpu_in_reset(adev) ||
2297 adev->mman.buffer_funcs_enabled == enable || adev->gmc.is_app_apu)
2298 return;
2299
2300 if (enable) {
2301 struct amdgpu_ring *ring;
2302 struct drm_gpu_scheduler *sched;
2303
2304 if (!adev->mman.buffer_funcs_ring || !adev->mman.buffer_funcs_ring->sched.ready) {
2305 dev_warn(adev->dev, "Not enabling DMA transfers for in kernel use");
2306 return;
2307 }
2308
2309 num_clear_entities = 1;
2310 num_move_entities = 1;
2311 ring = adev->mman.buffer_funcs_ring;
2312 sched = &ring->sched;
2313 r = amdgpu_ttm_buffer_entity_init(&adev->mman.gtt_mgr,
2314 &adev->mman.default_entity,
2315 DRM_SCHED_PRIORITY_KERNEL,
2316 &sched, 1, 0);
2317 if (r < 0) {
2318 dev_err(adev->dev,
2319 "Failed setting up TTM entity (%d)\n", r);
2320 return;
2321 }
2322
2323 adev->mman.clear_entities = kcalloc(num_clear_entities,
2324 sizeof(struct amdgpu_ttm_buffer_entity),
2325 GFP_KERNEL);
2326 atomic_set(&adev->mman.next_clear_entity, 0);
2327 if (!adev->mman.clear_entities)
2328 goto error_free_default_entity;
2329
2330 adev->mman.num_clear_entities = num_clear_entities;
2331
2332 for (i = 0; i < num_clear_entities; i++) {
2333 r = amdgpu_ttm_buffer_entity_init(
2334 &adev->mman.gtt_mgr, &adev->mman.clear_entities[i],
2335 DRM_SCHED_PRIORITY_NORMAL, &sched, 1, 1);
2336
2337 if (r < 0) {
2338 for (j = 0; j < i; j++)
2339 amdgpu_ttm_buffer_entity_fini(
2340 &adev->mman.gtt_mgr, &adev->mman.clear_entities[j]);
2341 kfree(adev->mman.clear_entities);
2342 adev->mman.num_clear_entities = 0;
2343 adev->mman.clear_entities = NULL;
2344 goto error_free_default_entity;
2345 }
2346 }
2347
2348 adev->mman.num_move_entities = num_move_entities;
2349 atomic_set(&adev->mman.next_move_entity, 0);
2350 for (i = 0; i < num_move_entities; i++) {
2351 r = amdgpu_ttm_buffer_entity_init(
2352 &adev->mman.gtt_mgr,
2353 &adev->mman.move_entities[i],
2354 DRM_SCHED_PRIORITY_NORMAL, &sched, 1, 2);
2355
2356 if (r < 0) {
2357 for (j = 0; j < i; j++)
2358 amdgpu_ttm_buffer_entity_fini(
2359 &adev->mman.gtt_mgr, &adev->mman.move_entities[j]);
2360 adev->mman.num_move_entities = 0;
2361 goto error_free_clear_entities;
2362 }
2363 }
2364 } else {
2365 amdgpu_ttm_buffer_entity_fini(&adev->mman.gtt_mgr,
2366 &adev->mman.default_entity);
2367 for (i = 0; i < adev->mman.num_clear_entities; i++)
2368 amdgpu_ttm_buffer_entity_fini(&adev->mman.gtt_mgr,
2369 &adev->mman.clear_entities[i]);
2370 for (i = 0; i < adev->mman.num_move_entities; i++)
2371 amdgpu_ttm_buffer_entity_fini(&adev->mman.gtt_mgr,
2372 &adev->mman.move_entities[i]);
2373 /* Drop all the old fences since re-creating the scheduler entities
2374 * will allocate new contexts.
2375 */
2376 ttm_resource_manager_cleanup(man);
2377 kfree(adev->mman.clear_entities);
2378 adev->mman.clear_entities = NULL;
2379 adev->mman.num_clear_entities = 0;
2380 adev->mman.num_move_entities = 0;
2381 }
2382
2383 /* this just adjusts TTM size idea, which sets lpfn to the correct value */
2384 if (enable)
2385 size = adev->gmc.real_vram_size;
2386 else
2387 size = adev->gmc.visible_vram_size;
2388 man->size = size;
2389 adev->mman.buffer_funcs_enabled = enable;
2390
2391 return;
2392
2393 error_free_clear_entities:
2394 for (i = 0; i < adev->mman.num_clear_entities; i++)
2395 amdgpu_ttm_buffer_entity_fini(&adev->mman.gtt_mgr,
2396 &adev->mman.clear_entities[i]);
2397 kfree(adev->mman.clear_entities);
2398 adev->mman.clear_entities = NULL;
2399 adev->mman.num_clear_entities = 0;
2400 error_free_default_entity:
2401 amdgpu_ttm_buffer_entity_fini(&adev->mman.gtt_mgr,
2402 &adev->mman.default_entity);
2403 }
2404
amdgpu_ttm_prepare_job(struct amdgpu_device * adev,struct amdgpu_ttm_buffer_entity * entity,unsigned int num_dw,struct dma_resv * resv,bool vm_needs_flush,struct amdgpu_job ** job,u64 k_job_id)2405 static int amdgpu_ttm_prepare_job(struct amdgpu_device *adev,
2406 struct amdgpu_ttm_buffer_entity *entity,
2407 unsigned int num_dw,
2408 struct dma_resv *resv,
2409 bool vm_needs_flush,
2410 struct amdgpu_job **job,
2411 u64 k_job_id)
2412 {
2413 enum amdgpu_ib_pool_type pool = AMDGPU_IB_POOL_DELAYED;
2414 int r;
2415 r = amdgpu_job_alloc_with_ib(adev, &entity->base,
2416 AMDGPU_FENCE_OWNER_UNDEFINED,
2417 num_dw * 4, pool, job, k_job_id);
2418 if (r)
2419 return r;
2420
2421 if (vm_needs_flush) {
2422 (*job)->vm_pd_addr = amdgpu_gmc_pd_addr(adev->gmc.pdb0_bo ?
2423 adev->gmc.pdb0_bo :
2424 adev->gart.bo);
2425 (*job)->vm_needs_flush = true;
2426 }
2427 if (!resv)
2428 return 0;
2429
2430 return drm_sched_job_add_resv_dependencies(&(*job)->base, resv,
2431 DMA_RESV_USAGE_BOOKKEEP);
2432 }
2433
amdgpu_copy_buffer(struct amdgpu_device * adev,struct amdgpu_ttm_buffer_entity * entity,uint64_t src_offset,uint64_t dst_offset,uint32_t byte_count,struct dma_resv * resv,struct dma_fence ** fence,bool vm_needs_flush,uint32_t copy_flags)2434 int amdgpu_copy_buffer(struct amdgpu_device *adev,
2435 struct amdgpu_ttm_buffer_entity *entity,
2436 uint64_t src_offset,
2437 uint64_t dst_offset, uint32_t byte_count,
2438 struct dma_resv *resv,
2439 struct dma_fence **fence,
2440 bool vm_needs_flush, uint32_t copy_flags)
2441 {
2442 unsigned int num_loops, num_dw;
2443 struct amdgpu_ring *ring;
2444 struct amdgpu_job *job;
2445 uint32_t max_bytes;
2446 unsigned int i;
2447 int r;
2448
2449 ring = adev->mman.buffer_funcs_ring;
2450
2451 if (!ring->sched.ready) {
2452 dev_err(adev->dev,
2453 "Trying to move memory with ring turned off.\n");
2454 return -EINVAL;
2455 }
2456
2457 max_bytes = adev->mman.buffer_funcs->copy_max_bytes;
2458 num_loops = DIV_ROUND_UP(byte_count, max_bytes);
2459 num_dw = ALIGN(num_loops * adev->mman.buffer_funcs->copy_num_dw, 8);
2460 r = amdgpu_ttm_prepare_job(adev, entity, num_dw,
2461 resv, vm_needs_flush, &job,
2462 AMDGPU_KERNEL_JOB_ID_TTM_COPY_BUFFER);
2463 if (r)
2464 goto error_free;
2465
2466 for (i = 0; i < num_loops; i++) {
2467 uint32_t cur_size_in_bytes = min(byte_count, max_bytes);
2468
2469 amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_offset,
2470 dst_offset, cur_size_in_bytes, copy_flags);
2471 src_offset += cur_size_in_bytes;
2472 dst_offset += cur_size_in_bytes;
2473 byte_count -= cur_size_in_bytes;
2474 }
2475
2476 *fence = amdgpu_ttm_job_submit(adev, entity, job, num_dw);
2477
2478 return 0;
2479
2480 error_free:
2481 amdgpu_job_free(job);
2482 dev_err(adev->dev, "Error scheduling IBs (%d)\n", r);
2483 return r;
2484 }
2485
amdgpu_ttm_fill_mem(struct amdgpu_device * adev,struct amdgpu_ttm_buffer_entity * entity,uint32_t src_data,uint64_t dst_addr,uint32_t byte_count,struct dma_resv * resv,struct dma_fence ** fence,bool vm_needs_flush,u64 k_job_id)2486 static int amdgpu_ttm_fill_mem(struct amdgpu_device *adev,
2487 struct amdgpu_ttm_buffer_entity *entity,
2488 uint32_t src_data,
2489 uint64_t dst_addr, uint32_t byte_count,
2490 struct dma_resv *resv,
2491 struct dma_fence **fence,
2492 bool vm_needs_flush,
2493 u64 k_job_id)
2494 {
2495 unsigned int num_loops, num_dw;
2496 struct amdgpu_job *job;
2497 uint32_t max_bytes;
2498 unsigned int i;
2499 int r;
2500
2501 max_bytes = adev->mman.buffer_funcs->fill_max_bytes;
2502 num_loops = DIV_ROUND_UP_ULL(byte_count, max_bytes);
2503 num_dw = ALIGN(num_loops * adev->mman.buffer_funcs->fill_num_dw, 8);
2504 r = amdgpu_ttm_prepare_job(adev, entity, num_dw, resv,
2505 vm_needs_flush, &job, k_job_id);
2506 if (r)
2507 return r;
2508
2509 for (i = 0; i < num_loops; i++) {
2510 uint32_t cur_size = min(byte_count, max_bytes);
2511
2512 amdgpu_emit_fill_buffer(adev, &job->ibs[0], src_data, dst_addr,
2513 cur_size);
2514
2515 dst_addr += cur_size;
2516 byte_count -= cur_size;
2517 }
2518
2519 *fence = amdgpu_ttm_job_submit(adev, entity, job, num_dw);
2520 return 0;
2521 }
2522
2523 /**
2524 * amdgpu_ttm_clear_buffer - clear memory buffers
2525 * @bo: amdgpu buffer object
2526 * @resv: reservation object
2527 * @fence: dma_fence associated with the operation
2528 *
2529 * Clear the memory buffer resource.
2530 *
2531 * Returns:
2532 * 0 for success or a negative error code on failure.
2533 */
amdgpu_ttm_clear_buffer(struct amdgpu_bo * bo,struct dma_resv * resv,struct dma_fence ** fence)2534 int amdgpu_ttm_clear_buffer(struct amdgpu_bo *bo,
2535 struct dma_resv *resv,
2536 struct dma_fence **fence)
2537 {
2538 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
2539 struct amdgpu_ttm_buffer_entity *entity;
2540 struct amdgpu_res_cursor cursor;
2541 u64 addr;
2542 int r = 0;
2543
2544 if (!adev->mman.buffer_funcs_enabled)
2545 return -EINVAL;
2546
2547 if (!fence)
2548 return -EINVAL;
2549 entity = &adev->mman.clear_entities[0];
2550 *fence = dma_fence_get_stub();
2551
2552 amdgpu_res_first(bo->tbo.resource, 0, amdgpu_bo_size(bo), &cursor);
2553
2554 mutex_lock(&entity->lock);
2555 while (cursor.remaining) {
2556 struct dma_fence *next = NULL;
2557 u64 size;
2558
2559 if (amdgpu_res_cleared(&cursor)) {
2560 amdgpu_res_next(&cursor, cursor.size);
2561 continue;
2562 }
2563
2564 /* Never clear more than 256MiB at once to avoid timeouts */
2565 size = min(cursor.size, 256ULL << 20);
2566
2567 r = amdgpu_ttm_map_buffer(entity, &bo->tbo, bo->tbo.resource, &cursor,
2568 0, false, &size, &addr);
2569 if (r)
2570 goto err;
2571
2572 r = amdgpu_ttm_fill_mem(adev, entity, 0, addr, size, resv,
2573 &next, true,
2574 AMDGPU_KERNEL_JOB_ID_TTM_CLEAR_BUFFER);
2575 if (r)
2576 goto err;
2577
2578 dma_fence_put(*fence);
2579 *fence = next;
2580
2581 amdgpu_res_next(&cursor, size);
2582 }
2583 err:
2584 mutex_unlock(&entity->lock);
2585
2586 return r;
2587 }
2588
amdgpu_fill_buffer(struct amdgpu_ttm_buffer_entity * entity,struct amdgpu_bo * bo,uint32_t src_data,struct dma_resv * resv,struct dma_fence ** f,u64 k_job_id)2589 int amdgpu_fill_buffer(struct amdgpu_ttm_buffer_entity *entity,
2590 struct amdgpu_bo *bo,
2591 uint32_t src_data,
2592 struct dma_resv *resv,
2593 struct dma_fence **f,
2594 u64 k_job_id)
2595 {
2596 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
2597 struct dma_fence *fence = NULL;
2598 struct amdgpu_res_cursor dst;
2599 int r;
2600
2601 if (!entity)
2602 return -EINVAL;
2603
2604 amdgpu_res_first(bo->tbo.resource, 0, amdgpu_bo_size(bo), &dst);
2605
2606 mutex_lock(&entity->lock);
2607 while (dst.remaining) {
2608 struct dma_fence *next;
2609 uint64_t cur_size, to;
2610
2611 /* Never fill more than 256MiB at once to avoid timeouts */
2612 cur_size = min(dst.size, 256ULL << 20);
2613
2614 r = amdgpu_ttm_map_buffer(entity, &bo->tbo, bo->tbo.resource, &dst,
2615 0, false, &cur_size, &to);
2616 if (r)
2617 goto error;
2618
2619 r = amdgpu_ttm_fill_mem(adev, entity,
2620 src_data, to, cur_size, resv,
2621 &next, true, k_job_id);
2622 if (r)
2623 goto error;
2624
2625 dma_fence_put(fence);
2626 fence = next;
2627
2628 amdgpu_res_next(&dst, cur_size);
2629 }
2630 error:
2631 mutex_unlock(&entity->lock);
2632 if (f)
2633 *f = dma_fence_get(fence);
2634 dma_fence_put(fence);
2635 return r;
2636 }
2637
2638 struct amdgpu_ttm_buffer_entity *
amdgpu_ttm_next_clear_entity(struct amdgpu_device * adev)2639 amdgpu_ttm_next_clear_entity(struct amdgpu_device *adev)
2640 {
2641 struct amdgpu_mman *mman = &adev->mman;
2642 u32 i;
2643
2644 if (mman->num_clear_entities == 0)
2645 return NULL;
2646
2647 i = atomic_inc_return(&mman->next_clear_entity) %
2648 mman->num_clear_entities;
2649 return &mman->clear_entities[i];
2650 }
2651
2652 /**
2653 * amdgpu_ttm_evict_resources - evict memory buffers
2654 * @adev: amdgpu device object
2655 * @mem_type: evicted BO's memory type
2656 *
2657 * Evicts all @mem_type buffers on the lru list of the memory type.
2658 *
2659 * Returns:
2660 * 0 for success or a negative error code on failure.
2661 */
amdgpu_ttm_evict_resources(struct amdgpu_device * adev,int mem_type)2662 int amdgpu_ttm_evict_resources(struct amdgpu_device *adev, int mem_type)
2663 {
2664 struct ttm_resource_manager *man;
2665
2666 switch (mem_type) {
2667 case TTM_PL_VRAM:
2668 case TTM_PL_TT:
2669 case AMDGPU_PL_GWS:
2670 case AMDGPU_PL_GDS:
2671 case AMDGPU_PL_OA:
2672 man = ttm_manager_type(&adev->mman.bdev, mem_type);
2673 break;
2674 default:
2675 dev_err(adev->dev, "Trying to evict invalid memory type\n");
2676 return -EINVAL;
2677 }
2678
2679 return ttm_resource_manager_evict_all(&adev->mman.bdev, man);
2680 }
2681
2682 #if defined(CONFIG_DEBUG_FS)
2683
amdgpu_ttm_page_pool_show(struct seq_file * m,void * unused)2684 static int amdgpu_ttm_page_pool_show(struct seq_file *m, void *unused)
2685 {
2686 struct amdgpu_device *adev = m->private;
2687
2688 return ttm_pool_debugfs(&adev->mman.bdev.pool, m);
2689 }
2690
2691 DEFINE_SHOW_ATTRIBUTE(amdgpu_ttm_page_pool);
2692
2693 /*
2694 * amdgpu_ttm_vram_read - Linear read access to VRAM
2695 *
2696 * Accesses VRAM via MMIO for debugging purposes.
2697 */
amdgpu_ttm_vram_read(struct file * f,char __user * buf,size_t size,loff_t * pos)2698 static ssize_t amdgpu_ttm_vram_read(struct file *f, char __user *buf,
2699 size_t size, loff_t *pos)
2700 {
2701 struct amdgpu_device *adev = file_inode(f)->i_private;
2702 ssize_t result = 0;
2703
2704 if (size & 0x3 || *pos & 0x3)
2705 return -EINVAL;
2706
2707 if (*pos >= adev->gmc.mc_vram_size)
2708 return -ENXIO;
2709
2710 size = min(size, (size_t)(adev->gmc.mc_vram_size - *pos));
2711 while (size) {
2712 size_t bytes = min(size, AMDGPU_TTM_VRAM_MAX_DW_READ * 4);
2713 uint32_t value[AMDGPU_TTM_VRAM_MAX_DW_READ];
2714
2715 amdgpu_device_vram_access(adev, *pos, value, bytes, false);
2716 if (copy_to_user(buf, value, bytes))
2717 return -EFAULT;
2718
2719 result += bytes;
2720 buf += bytes;
2721 *pos += bytes;
2722 size -= bytes;
2723 }
2724
2725 return result;
2726 }
2727
2728 /*
2729 * amdgpu_ttm_vram_write - Linear write access to VRAM
2730 *
2731 * Accesses VRAM via MMIO for debugging purposes.
2732 */
amdgpu_ttm_vram_write(struct file * f,const char __user * buf,size_t size,loff_t * pos)2733 static ssize_t amdgpu_ttm_vram_write(struct file *f, const char __user *buf,
2734 size_t size, loff_t *pos)
2735 {
2736 struct amdgpu_device *adev = file_inode(f)->i_private;
2737 ssize_t result = 0;
2738 int r;
2739
2740 if (size & 0x3 || *pos & 0x3)
2741 return -EINVAL;
2742
2743 if (*pos >= adev->gmc.mc_vram_size)
2744 return -ENXIO;
2745
2746 while (size) {
2747 uint32_t value;
2748
2749 if (*pos >= adev->gmc.mc_vram_size)
2750 return result;
2751
2752 r = get_user(value, (uint32_t *)buf);
2753 if (r)
2754 return r;
2755
2756 amdgpu_device_mm_access(adev, *pos, &value, 4, true);
2757
2758 result += 4;
2759 buf += 4;
2760 *pos += 4;
2761 size -= 4;
2762 }
2763
2764 return result;
2765 }
2766
2767 static const struct file_operations amdgpu_ttm_vram_fops = {
2768 .owner = THIS_MODULE,
2769 .read = amdgpu_ttm_vram_read,
2770 .write = amdgpu_ttm_vram_write,
2771 .llseek = default_llseek,
2772 };
2773
2774 /*
2775 * amdgpu_iomem_read - Virtual read access to GPU mapped memory
2776 *
2777 * This function is used to read memory that has been mapped to the
2778 * GPU and the known addresses are not physical addresses but instead
2779 * bus addresses (e.g., what you'd put in an IB or ring buffer).
2780 */
amdgpu_iomem_read(struct file * f,char __user * buf,size_t size,loff_t * pos)2781 static ssize_t amdgpu_iomem_read(struct file *f, char __user *buf,
2782 size_t size, loff_t *pos)
2783 {
2784 struct amdgpu_device *adev = file_inode(f)->i_private;
2785 struct iommu_domain *dom;
2786 ssize_t result = 0;
2787 int r;
2788
2789 /* retrieve the IOMMU domain if any for this device */
2790 dom = iommu_get_domain_for_dev(adev->dev);
2791
2792 while (size) {
2793 phys_addr_t addr = *pos & PAGE_MASK;
2794 loff_t off = *pos & ~PAGE_MASK;
2795 size_t bytes = PAGE_SIZE - off;
2796 unsigned long pfn;
2797 struct page *p;
2798 void *ptr;
2799
2800 bytes = min(bytes, size);
2801
2802 /* Translate the bus address to a physical address. If
2803 * the domain is NULL it means there is no IOMMU active
2804 * and the address translation is the identity
2805 */
2806 addr = dom ? iommu_iova_to_phys(dom, addr) : addr;
2807
2808 pfn = addr >> PAGE_SHIFT;
2809 if (!pfn_valid(pfn))
2810 return -EPERM;
2811
2812 p = pfn_to_page(pfn);
2813 if (p->mapping != adev->mman.bdev.dev_mapping)
2814 return -EPERM;
2815
2816 ptr = kmap_local_page(p);
2817 r = copy_to_user(buf, ptr + off, bytes);
2818 kunmap_local(ptr);
2819 if (r)
2820 return -EFAULT;
2821
2822 size -= bytes;
2823 *pos += bytes;
2824 result += bytes;
2825 }
2826
2827 return result;
2828 }
2829
2830 /*
2831 * amdgpu_iomem_write - Virtual write access to GPU mapped memory
2832 *
2833 * This function is used to write memory that has been mapped to the
2834 * GPU and the known addresses are not physical addresses but instead
2835 * bus addresses (e.g., what you'd put in an IB or ring buffer).
2836 */
amdgpu_iomem_write(struct file * f,const char __user * buf,size_t size,loff_t * pos)2837 static ssize_t amdgpu_iomem_write(struct file *f, const char __user *buf,
2838 size_t size, loff_t *pos)
2839 {
2840 struct amdgpu_device *adev = file_inode(f)->i_private;
2841 struct iommu_domain *dom;
2842 ssize_t result = 0;
2843 int r;
2844
2845 dom = iommu_get_domain_for_dev(adev->dev);
2846
2847 while (size) {
2848 phys_addr_t addr = *pos & PAGE_MASK;
2849 loff_t off = *pos & ~PAGE_MASK;
2850 size_t bytes = PAGE_SIZE - off;
2851 unsigned long pfn;
2852 struct page *p;
2853 void *ptr;
2854
2855 bytes = min(bytes, size);
2856
2857 addr = dom ? iommu_iova_to_phys(dom, addr) : addr;
2858
2859 pfn = addr >> PAGE_SHIFT;
2860 if (!pfn_valid(pfn))
2861 return -EPERM;
2862
2863 p = pfn_to_page(pfn);
2864 if (p->mapping != adev->mman.bdev.dev_mapping)
2865 return -EPERM;
2866
2867 ptr = kmap_local_page(p);
2868 r = copy_from_user(ptr + off, buf, bytes);
2869 kunmap_local(ptr);
2870 if (r)
2871 return -EFAULT;
2872
2873 size -= bytes;
2874 *pos += bytes;
2875 result += bytes;
2876 }
2877
2878 return result;
2879 }
2880
2881 static const struct file_operations amdgpu_ttm_iomem_fops = {
2882 .owner = THIS_MODULE,
2883 .read = amdgpu_iomem_read,
2884 .write = amdgpu_iomem_write,
2885 .llseek = default_llseek
2886 };
2887
2888 #endif
2889
amdgpu_ttm_debugfs_init(struct amdgpu_device * adev)2890 void amdgpu_ttm_debugfs_init(struct amdgpu_device *adev)
2891 {
2892 #if defined(CONFIG_DEBUG_FS)
2893 struct drm_minor *minor = adev_to_drm(adev)->primary;
2894 struct dentry *root = minor->debugfs_root;
2895
2896 debugfs_create_file_size("amdgpu_vram", 0444, root, adev,
2897 &amdgpu_ttm_vram_fops, adev->gmc.mc_vram_size);
2898 debugfs_create_file("amdgpu_iomem", 0444, root, adev,
2899 &amdgpu_ttm_iomem_fops);
2900 debugfs_create_file("ttm_page_pool", 0444, root, adev,
2901 &amdgpu_ttm_page_pool_fops);
2902 ttm_resource_manager_create_debugfs(ttm_manager_type(&adev->mman.bdev,
2903 TTM_PL_VRAM),
2904 root, "amdgpu_vram_mm");
2905 ttm_resource_manager_create_debugfs(ttm_manager_type(&adev->mman.bdev,
2906 TTM_PL_TT),
2907 root, "amdgpu_gtt_mm");
2908 ttm_resource_manager_create_debugfs(ttm_manager_type(&adev->mman.bdev,
2909 AMDGPU_PL_GDS),
2910 root, "amdgpu_gds_mm");
2911 ttm_resource_manager_create_debugfs(ttm_manager_type(&adev->mman.bdev,
2912 AMDGPU_PL_GWS),
2913 root, "amdgpu_gws_mm");
2914 ttm_resource_manager_create_debugfs(ttm_manager_type(&adev->mman.bdev,
2915 AMDGPU_PL_OA),
2916 root, "amdgpu_oa_mm");
2917
2918 #endif
2919 }
2920