1 /*
2 * Copyright 2009 Jerome Glisse.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
20 *
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
23 * of the Software.
24 *
25 */
26 /*
27 * Authors:
28 * Jerome Glisse <glisse@freedesktop.org>
29 * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
30 * Dave Airlie
31 */
32
33 #include <linux/dma-mapping.h>
34 #include <linux/iommu.h>
35 #include <linux/pagemap.h>
36 #include <linux/sched/task.h>
37 #include <linux/sched/mm.h>
38 #include <linux/seq_file.h>
39 #include <linux/slab.h>
40 #include <linux/swap.h>
41 #include <linux/dma-buf.h>
42 #include <linux/sizes.h>
43 #include <linux/module.h>
44
45 #include <drm/drm_drv.h>
46 #include <drm/ttm/ttm_bo.h>
47 #include <drm/ttm/ttm_placement.h>
48 #include <drm/ttm/ttm_range_manager.h>
49 #include <drm/ttm/ttm_tt.h>
50
51 #include <drm/amdgpu_drm.h>
52
53 #include "amdgpu.h"
54 #include "amdgpu_object.h"
55 #include "amdgpu_trace.h"
56 #include "amdgpu_amdkfd.h"
57 #include "amdgpu_sdma.h"
58 #include "amdgpu_ras.h"
59 #include "amdgpu_hmm.h"
60 #include "amdgpu_atomfirmware.h"
61 #include "amdgpu_res_cursor.h"
62 #include "bif/bif_4_1_d.h"
63
64 MODULE_IMPORT_NS("DMA_BUF");
65
66 #define AMDGPU_TTM_VRAM_MAX_DW_READ ((size_t)128)
67
68 static int amdgpu_ttm_backend_bind(struct ttm_device *bdev,
69 struct ttm_tt *ttm,
70 struct ttm_resource *bo_mem);
71 static void amdgpu_ttm_backend_unbind(struct ttm_device *bdev,
72 struct ttm_tt *ttm);
73
amdgpu_ttm_init_on_chip(struct amdgpu_device * adev,unsigned int type,uint64_t size_in_page)74 static int amdgpu_ttm_init_on_chip(struct amdgpu_device *adev,
75 unsigned int type,
76 uint64_t size_in_page)
77 {
78 if (!size_in_page)
79 return 0;
80
81 return ttm_range_man_init(&adev->mman.bdev, type,
82 false, size_in_page);
83 }
84
85 /**
86 * amdgpu_evict_flags - Compute placement flags
87 *
88 * @bo: The buffer object to evict
89 * @placement: Possible destination(s) for evicted BO
90 *
91 * Fill in placement data when ttm_bo_evict() is called
92 */
amdgpu_evict_flags(struct ttm_buffer_object * bo,struct ttm_placement * placement)93 static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
94 struct ttm_placement *placement)
95 {
96 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
97 struct amdgpu_bo *abo;
98 static const struct ttm_place placements = {
99 .fpfn = 0,
100 .lpfn = 0,
101 .mem_type = TTM_PL_SYSTEM,
102 .flags = 0
103 };
104
105 /* Don't handle scatter gather BOs */
106 if (bo->type == ttm_bo_type_sg) {
107 placement->num_placement = 0;
108 return;
109 }
110
111 /* Object isn't an AMDGPU object so ignore */
112 if (!amdgpu_bo_is_amdgpu_bo(bo)) {
113 placement->placement = &placements;
114 placement->num_placement = 1;
115 return;
116 }
117
118 abo = ttm_to_amdgpu_bo(bo);
119 if (abo->flags & AMDGPU_GEM_CREATE_DISCARDABLE) {
120 placement->num_placement = 0;
121 return;
122 }
123
124 switch (bo->resource->mem_type) {
125 case AMDGPU_PL_GDS:
126 case AMDGPU_PL_GWS:
127 case AMDGPU_PL_OA:
128 case AMDGPU_PL_DOORBELL:
129 case AMDGPU_PL_MMIO_REMAP:
130 placement->num_placement = 0;
131 return;
132
133 case TTM_PL_VRAM:
134 if (!adev->mman.buffer_funcs_enabled) {
135 /* Move to system memory */
136 amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU);
137
138 } else if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
139 !(abo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) &&
140 amdgpu_res_cpu_visible(adev, bo->resource)) {
141
142 /* Try evicting to the CPU inaccessible part of VRAM
143 * first, but only set GTT as busy placement, so this
144 * BO will be evicted to GTT rather than causing other
145 * BOs to be evicted from VRAM
146 */
147 amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM |
148 AMDGPU_GEM_DOMAIN_GTT |
149 AMDGPU_GEM_DOMAIN_CPU);
150 abo->placements[0].fpfn = adev->gmc.visible_vram_size >> PAGE_SHIFT;
151 abo->placements[0].lpfn = 0;
152 abo->placements[0].flags |= TTM_PL_FLAG_DESIRED;
153 } else {
154 /* Move to GTT memory */
155 amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_GTT |
156 AMDGPU_GEM_DOMAIN_CPU);
157 }
158 break;
159 case TTM_PL_TT:
160 case AMDGPU_PL_PREEMPT:
161 default:
162 amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU);
163 break;
164 }
165 *placement = abo->placement;
166 }
167
168 static struct dma_fence *
amdgpu_ttm_job_submit(struct amdgpu_device * adev,struct amdgpu_ttm_buffer_entity * entity,struct amdgpu_job * job,u32 num_dw)169 amdgpu_ttm_job_submit(struct amdgpu_device *adev, struct amdgpu_ttm_buffer_entity *entity,
170 struct amdgpu_job *job, u32 num_dw)
171 {
172 struct amdgpu_ring *ring;
173
174 ring = adev->mman.buffer_funcs_ring;
175 amdgpu_ring_pad_ib(ring, &job->ibs[0]);
176 WARN_ON(job->ibs[0].length_dw > num_dw);
177
178 lockdep_assert_held(&entity->lock);
179
180 return amdgpu_job_submit(job);
181 }
182
183 /**
184 * amdgpu_ttm_map_buffer - Map memory into the GART windows
185 * @entity: entity to run the window setup job
186 * @bo: buffer object to map
187 * @mem: memory object to map
188 * @mm_cur: range to map
189 * @window: which GART window to use
190 * @tmz: if we should setup a TMZ enabled mapping
191 * @size: in number of bytes to map, out number of bytes mapped
192 * @addr: resulting address inside the MC address space
193 *
194 * Setup one of the GART windows to access a specific piece of memory or return
195 * the physical address for local memory.
196 */
amdgpu_ttm_map_buffer(struct amdgpu_ttm_buffer_entity * entity,struct ttm_buffer_object * bo,struct ttm_resource * mem,struct amdgpu_res_cursor * mm_cur,unsigned int window,bool tmz,uint64_t * size,uint64_t * addr)197 static int amdgpu_ttm_map_buffer(struct amdgpu_ttm_buffer_entity *entity,
198 struct ttm_buffer_object *bo,
199 struct ttm_resource *mem,
200 struct amdgpu_res_cursor *mm_cur,
201 unsigned int window,
202 bool tmz, uint64_t *size, uint64_t *addr)
203 {
204 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
205 unsigned int offset, num_pages, num_dw, num_bytes;
206 uint64_t src_addr, dst_addr;
207 struct amdgpu_job *job;
208 void *cpu_addr;
209 uint64_t flags;
210 int r;
211
212 BUG_ON(adev->mman.buffer_funcs->copy_max_bytes <
213 AMDGPU_GTT_MAX_TRANSFER_SIZE * 8);
214
215 if (WARN_ON(mem->mem_type == AMDGPU_PL_PREEMPT))
216 return -EINVAL;
217
218 /* Map only what can't be accessed directly */
219 if (!tmz && mem->start != AMDGPU_BO_INVALID_OFFSET) {
220 *addr = amdgpu_ttm_domain_start(adev, mem->mem_type) +
221 mm_cur->start;
222 return 0;
223 }
224
225
226 /*
227 * If start begins at an offset inside the page, then adjust the size
228 * and addr accordingly
229 */
230 offset = mm_cur->start & ~PAGE_MASK;
231
232 num_pages = PFN_UP(*size + offset);
233 num_pages = min_t(uint32_t, num_pages, AMDGPU_GTT_MAX_TRANSFER_SIZE);
234
235 *size = min(*size, (uint64_t)num_pages * PAGE_SIZE - offset);
236
237 *addr = amdgpu_compute_gart_address(&adev->gmc, entity, window);
238 *addr += offset;
239
240 num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8);
241 num_bytes = num_pages * 8 * AMDGPU_GPU_PAGES_IN_CPU_PAGE;
242
243 r = amdgpu_job_alloc_with_ib(adev, &entity->base,
244 AMDGPU_FENCE_OWNER_UNDEFINED,
245 num_dw * 4 + num_bytes,
246 AMDGPU_IB_POOL_DELAYED, &job,
247 AMDGPU_KERNEL_JOB_ID_TTM_MAP_BUFFER);
248 if (r)
249 return r;
250
251 src_addr = num_dw * 4;
252 src_addr += job->ibs[0].gpu_addr;
253
254 dst_addr = amdgpu_bo_gpu_offset(adev->gart.bo);
255 dst_addr += (entity->gart_window_offs[window] >> AMDGPU_GPU_PAGE_SHIFT) * 8;
256 amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_addr,
257 dst_addr, num_bytes, 0);
258
259 flags = amdgpu_ttm_tt_pte_flags(adev, bo->ttm, mem);
260 if (tmz)
261 flags |= AMDGPU_PTE_TMZ;
262
263 cpu_addr = &job->ibs[0].ptr[num_dw];
264
265 if (mem->mem_type == TTM_PL_TT) {
266 dma_addr_t *dma_addr;
267
268 dma_addr = &bo->ttm->dma_address[mm_cur->start >> PAGE_SHIFT];
269 amdgpu_gart_map(adev, 0, num_pages, dma_addr, flags, cpu_addr);
270 } else {
271 u64 pa = mm_cur->start + adev->vm_manager.vram_base_offset;
272
273 amdgpu_gart_map_vram_range(adev, pa, 0, num_pages, flags, cpu_addr);
274 }
275
276 dma_fence_put(amdgpu_ttm_job_submit(adev, entity, job, num_dw));
277 return 0;
278 }
279
280 /**
281 * amdgpu_ttm_copy_mem_to_mem - Helper function for copy
282 * @adev: amdgpu device
283 * @entity: entity to run the jobs
284 * @src: buffer/address where to read from
285 * @dst: buffer/address where to write to
286 * @size: number of bytes to copy
287 * @tmz: if a secure copy should be used
288 * @resv: resv object to sync to
289 * @f: Returns the last fence if multiple jobs are submitted.
290 *
291 * The function copies @size bytes from {src->mem + src->offset} to
292 * {dst->mem + dst->offset}. src->bo and dst->bo could be same BO for a
293 * move and different for a BO to BO copy.
294 *
295 */
296 __attribute__((nonnull))
amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device * adev,struct amdgpu_ttm_buffer_entity * entity,const struct amdgpu_copy_mem * src,const struct amdgpu_copy_mem * dst,uint64_t size,bool tmz,struct dma_resv * resv,struct dma_fence ** f)297 static int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
298 struct amdgpu_ttm_buffer_entity *entity,
299 const struct amdgpu_copy_mem *src,
300 const struct amdgpu_copy_mem *dst,
301 uint64_t size, bool tmz,
302 struct dma_resv *resv,
303 struct dma_fence **f)
304 {
305 struct amdgpu_res_cursor src_mm, dst_mm;
306 struct dma_fence *fence = NULL;
307 int r = 0;
308 uint32_t copy_flags = 0;
309 struct amdgpu_bo *abo_src, *abo_dst;
310
311 if (!adev->mman.buffer_funcs_enabled) {
312 dev_err(adev->dev,
313 "Trying to move memory with ring turned off.\n");
314 return -EINVAL;
315 }
316
317 amdgpu_res_first(src->mem, src->offset, size, &src_mm);
318 amdgpu_res_first(dst->mem, dst->offset, size, &dst_mm);
319
320 mutex_lock(&entity->lock);
321 while (src_mm.remaining) {
322 uint64_t from, to, cur_size, tiling_flags;
323 uint32_t num_type, data_format, max_com, write_compress_disable;
324 struct dma_fence *next;
325
326 /* Never copy more than 256MiB at once to avoid a timeout */
327 cur_size = min3(src_mm.size, dst_mm.size, 256ULL << 20);
328
329 /* Map src to window 0 and dst to window 1. */
330 r = amdgpu_ttm_map_buffer(entity, src->bo, src->mem, &src_mm,
331 0, tmz, &cur_size, &from);
332 if (r)
333 goto error;
334
335 r = amdgpu_ttm_map_buffer(entity, dst->bo, dst->mem, &dst_mm,
336 1, tmz, &cur_size, &to);
337 if (r)
338 goto error;
339
340 abo_src = ttm_to_amdgpu_bo(src->bo);
341 abo_dst = ttm_to_amdgpu_bo(dst->bo);
342 if (tmz)
343 copy_flags |= AMDGPU_COPY_FLAGS_TMZ;
344 if ((abo_src->flags & AMDGPU_GEM_CREATE_GFX12_DCC) &&
345 (abo_src->tbo.resource->mem_type == TTM_PL_VRAM))
346 copy_flags |= AMDGPU_COPY_FLAGS_READ_DECOMPRESSED;
347 if ((abo_dst->flags & AMDGPU_GEM_CREATE_GFX12_DCC) &&
348 (dst->mem->mem_type == TTM_PL_VRAM)) {
349 copy_flags |= AMDGPU_COPY_FLAGS_WRITE_COMPRESSED;
350 amdgpu_bo_get_tiling_flags(abo_dst, &tiling_flags);
351 max_com = AMDGPU_TILING_GET(tiling_flags, GFX12_DCC_MAX_COMPRESSED_BLOCK);
352 num_type = AMDGPU_TILING_GET(tiling_flags, GFX12_DCC_NUMBER_TYPE);
353 data_format = AMDGPU_TILING_GET(tiling_flags, GFX12_DCC_DATA_FORMAT);
354 write_compress_disable =
355 AMDGPU_TILING_GET(tiling_flags, GFX12_DCC_WRITE_COMPRESS_DISABLE);
356 copy_flags |= (AMDGPU_COPY_FLAGS_SET(MAX_COMPRESSED, max_com) |
357 AMDGPU_COPY_FLAGS_SET(NUMBER_TYPE, num_type) |
358 AMDGPU_COPY_FLAGS_SET(DATA_FORMAT, data_format) |
359 AMDGPU_COPY_FLAGS_SET(WRITE_COMPRESS_DISABLE,
360 write_compress_disable));
361 }
362
363 r = amdgpu_copy_buffer(adev, entity, from, to, cur_size, resv,
364 &next, true, copy_flags);
365 if (r)
366 goto error;
367
368 dma_fence_put(fence);
369 fence = next;
370
371 amdgpu_res_next(&src_mm, cur_size);
372 amdgpu_res_next(&dst_mm, cur_size);
373 }
374 error:
375 mutex_unlock(&entity->lock);
376 *f = fence;
377 return r;
378 }
379
380 /*
381 * amdgpu_move_blit - Copy an entire buffer to another buffer
382 *
383 * This is a helper called by amdgpu_bo_move() and amdgpu_move_vram_ram() to
384 * help move buffers to and from VRAM.
385 */
amdgpu_move_blit(struct ttm_buffer_object * bo,bool evict,struct ttm_resource * new_mem,struct ttm_resource * old_mem)386 static int amdgpu_move_blit(struct ttm_buffer_object *bo,
387 bool evict,
388 struct ttm_resource *new_mem,
389 struct ttm_resource *old_mem)
390 {
391 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
392 struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
393 struct amdgpu_ttm_buffer_entity *entity;
394 struct amdgpu_copy_mem src, dst;
395 struct dma_fence *fence = NULL;
396 int r;
397 u32 e;
398
399 src.bo = bo;
400 dst.bo = bo;
401 src.mem = old_mem;
402 dst.mem = new_mem;
403 src.offset = 0;
404 dst.offset = 0;
405
406 e = atomic_inc_return(&adev->mman.next_move_entity) %
407 adev->mman.num_move_entities;
408 entity = &adev->mman.move_entities[e];
409
410 r = amdgpu_ttm_copy_mem_to_mem(adev,
411 entity,
412 &src, &dst,
413 new_mem->size,
414 amdgpu_bo_encrypted(abo),
415 bo->base.resv, &fence);
416 if (r)
417 goto error;
418
419 /* clear the space being freed */
420 if (old_mem->mem_type == TTM_PL_VRAM &&
421 (abo->flags & AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE)) {
422 struct dma_fence *wipe_fence = NULL;
423 r = amdgpu_fill_buffer(entity, abo, 0, NULL, &wipe_fence,
424 AMDGPU_KERNEL_JOB_ID_MOVE_BLIT);
425 if (r) {
426 goto error;
427 } else if (wipe_fence) {
428 amdgpu_vram_mgr_set_cleared(bo->resource);
429 dma_fence_put(fence);
430 fence = wipe_fence;
431 }
432 }
433
434 /* Always block for VM page tables before committing the new location */
435 if (bo->type == ttm_bo_type_kernel)
436 r = ttm_bo_move_accel_cleanup(bo, fence, true, false, new_mem);
437 else
438 r = ttm_bo_move_accel_cleanup(bo, fence, evict, true, new_mem);
439 dma_fence_put(fence);
440 return r;
441
442 error:
443 if (fence)
444 dma_fence_wait(fence, false);
445 dma_fence_put(fence);
446 return r;
447 }
448
449 /**
450 * amdgpu_res_cpu_visible - Check that resource can be accessed by CPU
451 * @adev: amdgpu device
452 * @res: the resource to check
453 *
454 * Returns: true if the full resource is CPU visible, false otherwise.
455 */
amdgpu_res_cpu_visible(struct amdgpu_device * adev,struct ttm_resource * res)456 bool amdgpu_res_cpu_visible(struct amdgpu_device *adev,
457 struct ttm_resource *res)
458 {
459 struct amdgpu_res_cursor cursor;
460
461 if (!res)
462 return false;
463
464 if (res->mem_type == TTM_PL_SYSTEM || res->mem_type == TTM_PL_TT ||
465 res->mem_type == AMDGPU_PL_PREEMPT || res->mem_type == AMDGPU_PL_DOORBELL ||
466 res->mem_type == AMDGPU_PL_MMIO_REMAP)
467 return true;
468
469 if (res->mem_type != TTM_PL_VRAM)
470 return false;
471
472 amdgpu_res_first(res, 0, res->size, &cursor);
473 while (cursor.remaining) {
474 if ((cursor.start + cursor.size) > adev->gmc.visible_vram_size)
475 return false;
476 amdgpu_res_next(&cursor, cursor.size);
477 }
478
479 return true;
480 }
481
482 /*
483 * amdgpu_res_copyable - Check that memory can be accessed by ttm_bo_move_memcpy
484 *
485 * Called by amdgpu_bo_move()
486 */
amdgpu_res_copyable(struct amdgpu_device * adev,struct ttm_resource * mem)487 static bool amdgpu_res_copyable(struct amdgpu_device *adev,
488 struct ttm_resource *mem)
489 {
490 if (!amdgpu_res_cpu_visible(adev, mem))
491 return false;
492
493 /* ttm_resource_ioremap only supports contiguous memory */
494 if (mem->mem_type == TTM_PL_VRAM &&
495 !(mem->placement & TTM_PL_FLAG_CONTIGUOUS))
496 return false;
497
498 return true;
499 }
500
501 /*
502 * amdgpu_bo_move - Move a buffer object to a new memory location
503 *
504 * Called by ttm_bo_handle_move_mem()
505 */
amdgpu_bo_move(struct ttm_buffer_object * bo,bool evict,struct ttm_operation_ctx * ctx,struct ttm_resource * new_mem,struct ttm_place * hop)506 static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
507 struct ttm_operation_ctx *ctx,
508 struct ttm_resource *new_mem,
509 struct ttm_place *hop)
510 {
511 struct amdgpu_device *adev;
512 struct amdgpu_bo *abo;
513 struct ttm_resource *old_mem = bo->resource;
514 int r;
515
516 if (new_mem->mem_type == TTM_PL_TT ||
517 new_mem->mem_type == AMDGPU_PL_PREEMPT) {
518 r = amdgpu_ttm_backend_bind(bo->bdev, bo->ttm, new_mem);
519 if (r)
520 return r;
521 }
522
523 abo = ttm_to_amdgpu_bo(bo);
524 adev = amdgpu_ttm_adev(bo->bdev);
525
526 if (!old_mem || (old_mem->mem_type == TTM_PL_SYSTEM &&
527 bo->ttm == NULL)) {
528 amdgpu_bo_move_notify(bo, evict, new_mem);
529 ttm_bo_move_null(bo, new_mem);
530 return 0;
531 }
532 if (old_mem->mem_type == TTM_PL_SYSTEM &&
533 (new_mem->mem_type == TTM_PL_TT ||
534 new_mem->mem_type == AMDGPU_PL_PREEMPT)) {
535 amdgpu_bo_move_notify(bo, evict, new_mem);
536 ttm_bo_move_null(bo, new_mem);
537 return 0;
538 }
539 if ((old_mem->mem_type == TTM_PL_TT ||
540 old_mem->mem_type == AMDGPU_PL_PREEMPT) &&
541 new_mem->mem_type == TTM_PL_SYSTEM) {
542 r = ttm_bo_wait_ctx(bo, ctx);
543 if (r)
544 return r;
545
546 amdgpu_ttm_backend_unbind(bo->bdev, bo->ttm);
547 amdgpu_bo_move_notify(bo, evict, new_mem);
548 ttm_resource_free(bo, &bo->resource);
549 ttm_bo_assign_mem(bo, new_mem);
550 return 0;
551 }
552
553 if (old_mem->mem_type == AMDGPU_PL_GDS ||
554 old_mem->mem_type == AMDGPU_PL_GWS ||
555 old_mem->mem_type == AMDGPU_PL_OA ||
556 old_mem->mem_type == AMDGPU_PL_DOORBELL ||
557 old_mem->mem_type == AMDGPU_PL_MMIO_REMAP ||
558 new_mem->mem_type == AMDGPU_PL_GDS ||
559 new_mem->mem_type == AMDGPU_PL_GWS ||
560 new_mem->mem_type == AMDGPU_PL_OA ||
561 new_mem->mem_type == AMDGPU_PL_DOORBELL ||
562 new_mem->mem_type == AMDGPU_PL_MMIO_REMAP) {
563 /* Nothing to save here */
564 amdgpu_bo_move_notify(bo, evict, new_mem);
565 ttm_bo_move_null(bo, new_mem);
566 return 0;
567 }
568
569 if (bo->type == ttm_bo_type_device &&
570 new_mem->mem_type == TTM_PL_VRAM &&
571 old_mem->mem_type != TTM_PL_VRAM) {
572 /* amdgpu_bo_fault_reserve_notify will re-set this if the CPU
573 * accesses the BO after it's moved.
574 */
575 abo->flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
576 }
577
578 if (adev->mman.buffer_funcs_enabled &&
579 ((old_mem->mem_type == TTM_PL_SYSTEM &&
580 new_mem->mem_type == TTM_PL_VRAM) ||
581 (old_mem->mem_type == TTM_PL_VRAM &&
582 new_mem->mem_type == TTM_PL_SYSTEM))) {
583 hop->fpfn = 0;
584 hop->lpfn = 0;
585 hop->mem_type = TTM_PL_TT;
586 hop->flags = TTM_PL_FLAG_TEMPORARY;
587 return -EMULTIHOP;
588 }
589
590 amdgpu_bo_move_notify(bo, evict, new_mem);
591 if (adev->mman.buffer_funcs_enabled)
592 r = amdgpu_move_blit(bo, evict, new_mem, old_mem);
593 else
594 r = -ENODEV;
595
596 if (r) {
597 /* Check that all memory is CPU accessible */
598 if (!amdgpu_res_copyable(adev, old_mem) ||
599 !amdgpu_res_copyable(adev, new_mem)) {
600 pr_err("Move buffer fallback to memcpy unavailable\n");
601 return r;
602 }
603
604 r = ttm_bo_move_memcpy(bo, ctx, new_mem);
605 if (r)
606 return r;
607 }
608
609 /* update statistics after the move */
610 if (evict)
611 atomic64_inc(&adev->num_evictions);
612 atomic64_add(bo->base.size, &adev->num_bytes_moved);
613 return 0;
614 }
615
616 /*
617 * amdgpu_ttm_io_mem_reserve - Reserve a block of memory during a fault
618 *
619 * Called by ttm_mem_io_reserve() ultimately via ttm_bo_vm_fault()
620 */
amdgpu_ttm_io_mem_reserve(struct ttm_device * bdev,struct ttm_resource * mem)621 static int amdgpu_ttm_io_mem_reserve(struct ttm_device *bdev,
622 struct ttm_resource *mem)
623 {
624 struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
625
626 switch (mem->mem_type) {
627 case TTM_PL_SYSTEM:
628 /* system memory */
629 return 0;
630 case TTM_PL_TT:
631 case AMDGPU_PL_PREEMPT:
632 break;
633 case TTM_PL_VRAM:
634 mem->bus.offset = mem->start << PAGE_SHIFT;
635
636 if (adev->mman.aper_base_kaddr &&
637 mem->placement & TTM_PL_FLAG_CONTIGUOUS)
638 mem->bus.addr = (u8 *)adev->mman.aper_base_kaddr +
639 mem->bus.offset;
640
641 mem->bus.offset += adev->gmc.aper_base;
642 mem->bus.is_iomem = true;
643 break;
644 case AMDGPU_PL_DOORBELL:
645 mem->bus.offset = mem->start << PAGE_SHIFT;
646 mem->bus.offset += adev->doorbell.base;
647 mem->bus.is_iomem = true;
648 mem->bus.caching = ttm_uncached;
649 break;
650 case AMDGPU_PL_MMIO_REMAP:
651 mem->bus.offset = mem->start << PAGE_SHIFT;
652 mem->bus.offset += adev->rmmio_remap.bus_addr;
653 mem->bus.is_iomem = true;
654 mem->bus.caching = ttm_uncached;
655 break;
656 default:
657 return -EINVAL;
658 }
659 return 0;
660 }
661
amdgpu_ttm_io_mem_pfn(struct ttm_buffer_object * bo,unsigned long page_offset)662 static unsigned long amdgpu_ttm_io_mem_pfn(struct ttm_buffer_object *bo,
663 unsigned long page_offset)
664 {
665 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
666 struct amdgpu_res_cursor cursor;
667
668 amdgpu_res_first(bo->resource, (u64)page_offset << PAGE_SHIFT, 0,
669 &cursor);
670
671 if (bo->resource->mem_type == AMDGPU_PL_DOORBELL)
672 return ((uint64_t)(adev->doorbell.base + cursor.start)) >> PAGE_SHIFT;
673 else if (bo->resource->mem_type == AMDGPU_PL_MMIO_REMAP)
674 return ((uint64_t)(adev->rmmio_remap.bus_addr + cursor.start)) >> PAGE_SHIFT;
675
676 return (adev->gmc.aper_base + cursor.start) >> PAGE_SHIFT;
677 }
678
679 /**
680 * amdgpu_ttm_domain_start - Returns GPU start address
681 * @adev: amdgpu device object
682 * @type: type of the memory
683 *
684 * Returns:
685 * GPU start address of a memory domain
686 */
687
amdgpu_ttm_domain_start(struct amdgpu_device * adev,uint32_t type)688 uint64_t amdgpu_ttm_domain_start(struct amdgpu_device *adev, uint32_t type)
689 {
690 switch (type) {
691 case TTM_PL_TT:
692 return adev->gmc.gart_start;
693 case TTM_PL_VRAM:
694 return adev->gmc.vram_start;
695 }
696
697 return 0;
698 }
699
700 /*
701 * TTM backend functions.
702 */
703 struct amdgpu_ttm_tt {
704 struct ttm_tt ttm;
705 struct drm_gem_object *gobj;
706 u64 offset;
707 uint64_t userptr;
708 struct task_struct *usertask;
709 uint32_t userflags;
710 bool bound;
711 int32_t pool_id;
712 };
713
714 #define ttm_to_amdgpu_ttm_tt(ptr) container_of(ptr, struct amdgpu_ttm_tt, ttm)
715
716 #ifdef CONFIG_DRM_AMDGPU_USERPTR
717 /*
718 * amdgpu_ttm_tt_get_user_pages - get device accessible pages that back user
719 * memory and start HMM tracking CPU page table update
720 *
721 * Calling function must call amdgpu_ttm_tt_userptr_range_done() once and only
722 * once afterwards to stop HMM tracking. Its the caller responsibility to ensure
723 * that range is a valid memory and it is freed too.
724 */
amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo * bo,struct amdgpu_hmm_range * range)725 int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo,
726 struct amdgpu_hmm_range *range)
727 {
728 struct ttm_tt *ttm = bo->tbo.ttm;
729 struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
730 unsigned long start = gtt->userptr;
731 struct vm_area_struct *vma;
732 struct mm_struct *mm;
733 bool readonly;
734 int r = 0;
735
736 mm = bo->notifier.mm;
737 if (unlikely(!mm)) {
738 DRM_DEBUG_DRIVER("BO is not registered?\n");
739 return -EFAULT;
740 }
741
742 if (!mmget_not_zero(mm)) /* Happens during process shutdown */
743 return -ESRCH;
744
745 mmap_read_lock(mm);
746 vma = vma_lookup(mm, start);
747 if (unlikely(!vma)) {
748 r = -EFAULT;
749 goto out_unlock;
750 }
751 if (unlikely((gtt->userflags & AMDGPU_GEM_USERPTR_ANONONLY) &&
752 vma->vm_file)) {
753 r = -EPERM;
754 goto out_unlock;
755 }
756
757 readonly = amdgpu_ttm_tt_is_readonly(ttm);
758 r = amdgpu_hmm_range_get_pages(&bo->notifier, start, ttm->num_pages,
759 readonly, NULL, range);
760 out_unlock:
761 mmap_read_unlock(mm);
762 if (r)
763 pr_debug("failed %d to get user pages 0x%lx\n", r, start);
764
765 mmput(mm);
766
767 return r;
768 }
769
770 #endif
771
772 /*
773 * amdgpu_ttm_tt_set_user_pages - Copy pages in, putting old pages as necessary.
774 *
775 * Called by amdgpu_cs_list_validate(). This creates the page list
776 * that backs user memory and will ultimately be mapped into the device
777 * address space.
778 */
amdgpu_ttm_tt_set_user_pages(struct ttm_tt * ttm,struct amdgpu_hmm_range * range)779 void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct amdgpu_hmm_range *range)
780 {
781 unsigned long i;
782
783 for (i = 0; i < ttm->num_pages; ++i)
784 ttm->pages[i] = range ? hmm_pfn_to_page(range->hmm_range.hmm_pfns[i]) : NULL;
785 }
786
787 /*
788 * amdgpu_ttm_tt_pin_userptr - prepare the sg table with the user pages
789 *
790 * Called by amdgpu_ttm_backend_bind()
791 **/
amdgpu_ttm_tt_pin_userptr(struct ttm_device * bdev,struct ttm_tt * ttm)792 static int amdgpu_ttm_tt_pin_userptr(struct ttm_device *bdev,
793 struct ttm_tt *ttm)
794 {
795 struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
796 struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
797 int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
798 enum dma_data_direction direction = write ?
799 DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
800 int r;
801
802 /* Allocate an SG array and squash pages into it */
803 r = sg_alloc_table_from_pages(ttm->sg, ttm->pages, ttm->num_pages, 0,
804 (u64)ttm->num_pages << PAGE_SHIFT,
805 GFP_KERNEL);
806 if (r)
807 goto release_sg;
808
809 /* Map SG to device */
810 r = dma_map_sgtable(adev->dev, ttm->sg, direction, 0);
811 if (r)
812 goto release_sg_table;
813
814 /* convert SG to linear array of pages and dma addresses */
815 drm_prime_sg_to_dma_addr_array(ttm->sg, gtt->ttm.dma_address,
816 ttm->num_pages);
817
818 return 0;
819
820 release_sg_table:
821 sg_free_table(ttm->sg);
822 release_sg:
823 kfree(ttm->sg);
824 ttm->sg = NULL;
825 return r;
826 }
827
828 /*
829 * amdgpu_ttm_tt_unpin_userptr - Unpin and unmap userptr pages
830 */
amdgpu_ttm_tt_unpin_userptr(struct ttm_device * bdev,struct ttm_tt * ttm)831 static void amdgpu_ttm_tt_unpin_userptr(struct ttm_device *bdev,
832 struct ttm_tt *ttm)
833 {
834 struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
835 struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
836 int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
837 enum dma_data_direction direction = write ?
838 DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
839
840 /* double check that we don't free the table twice */
841 if (!ttm->sg || !ttm->sg->sgl)
842 return;
843
844 /* unmap the pages mapped to the device */
845 dma_unmap_sgtable(adev->dev, ttm->sg, direction, 0);
846 sg_free_table(ttm->sg);
847 }
848
849 /*
850 * total_pages is constructed as MQD0+CtrlStack0 + MQD1+CtrlStack1 + ...
851 * MQDn+CtrlStackn where n is the number of XCCs per partition.
852 * pages_per_xcc is the size of one MQD+CtrlStack. The first page is MQD
853 * and uses memory type default, UC. The rest of pages_per_xcc are
854 * Ctrl stack and modify their memory type to NC.
855 */
amdgpu_ttm_gart_bind_gfx9_mqd(struct amdgpu_device * adev,struct ttm_tt * ttm,uint64_t flags)856 static void amdgpu_ttm_gart_bind_gfx9_mqd(struct amdgpu_device *adev,
857 struct ttm_tt *ttm, uint64_t flags)
858 {
859 struct amdgpu_ttm_tt *gtt = (void *)ttm;
860 uint64_t total_pages = ttm->num_pages;
861 int num_xcc = max(1U, adev->gfx.num_xcc_per_xcp);
862 uint64_t page_idx, pages_per_xcc;
863 int i;
864
865 pages_per_xcc = total_pages;
866 do_div(pages_per_xcc, num_xcc);
867
868 for (i = 0, page_idx = 0; i < num_xcc; i++, page_idx += pages_per_xcc) {
869 amdgpu_gart_map_gfx9_mqd(adev,
870 gtt->offset + (page_idx << PAGE_SHIFT),
871 pages_per_xcc, >t->ttm.dma_address[page_idx],
872 flags);
873 }
874 }
875
amdgpu_ttm_gart_bind(struct amdgpu_device * adev,struct ttm_buffer_object * tbo,uint64_t flags)876 static void amdgpu_ttm_gart_bind(struct amdgpu_device *adev,
877 struct ttm_buffer_object *tbo,
878 uint64_t flags)
879 {
880 struct amdgpu_bo *abo = ttm_to_amdgpu_bo(tbo);
881 struct ttm_tt *ttm = tbo->ttm;
882 struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
883
884 if (amdgpu_bo_encrypted(abo))
885 flags |= AMDGPU_PTE_TMZ;
886
887 if (abo->flags & AMDGPU_GEM_CREATE_CP_MQD_GFX9) {
888 amdgpu_ttm_gart_bind_gfx9_mqd(adev, ttm, flags);
889 } else {
890 amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages,
891 gtt->ttm.dma_address, flags);
892 }
893 gtt->bound = true;
894 }
895
896 /*
897 * amdgpu_ttm_backend_bind - Bind GTT memory
898 *
899 * Called by ttm_tt_bind() on behalf of ttm_bo_handle_move_mem().
900 * This handles binding GTT memory to the device address space.
901 */
amdgpu_ttm_backend_bind(struct ttm_device * bdev,struct ttm_tt * ttm,struct ttm_resource * bo_mem)902 static int amdgpu_ttm_backend_bind(struct ttm_device *bdev,
903 struct ttm_tt *ttm,
904 struct ttm_resource *bo_mem)
905 {
906 struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
907 struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
908 uint64_t flags;
909 int r;
910
911 if (!bo_mem)
912 return -EINVAL;
913
914 if (gtt->bound)
915 return 0;
916
917 if (gtt->userptr) {
918 r = amdgpu_ttm_tt_pin_userptr(bdev, ttm);
919 if (r) {
920 dev_err(adev->dev, "failed to pin userptr\n");
921 return r;
922 }
923 } else if (ttm->page_flags & TTM_TT_FLAG_EXTERNAL) {
924 if (!ttm->sg) {
925 struct dma_buf_attachment *attach;
926 struct sg_table *sgt;
927
928 attach = gtt->gobj->import_attach;
929 sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
930 if (IS_ERR(sgt))
931 return PTR_ERR(sgt);
932
933 ttm->sg = sgt;
934 }
935
936 drm_prime_sg_to_dma_addr_array(ttm->sg, gtt->ttm.dma_address,
937 ttm->num_pages);
938 }
939
940 if (!ttm->num_pages) {
941 WARN(1, "nothing to bind %u pages for mreg %p back %p!\n",
942 ttm->num_pages, bo_mem, ttm);
943 }
944
945 if (bo_mem->mem_type != TTM_PL_TT ||
946 !amdgpu_gtt_mgr_has_gart_addr(bo_mem)) {
947 gtt->offset = AMDGPU_BO_INVALID_OFFSET;
948 return 0;
949 }
950
951 /* compute PTE flags relevant to this BO memory */
952 flags = amdgpu_ttm_tt_pte_flags(adev, ttm, bo_mem);
953
954 /* bind pages into GART page tables */
955 gtt->offset = (u64)bo_mem->start << PAGE_SHIFT;
956 amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages,
957 gtt->ttm.dma_address, flags);
958 gtt->bound = true;
959 return 0;
960 }
961
962 /*
963 * amdgpu_ttm_alloc_gart - Make sure buffer object is accessible either
964 * through AGP or GART aperture.
965 *
966 * If bo is accessible through AGP aperture, then use AGP aperture
967 * to access bo; otherwise allocate logical space in GART aperture
968 * and map bo to GART aperture.
969 */
amdgpu_ttm_alloc_gart(struct ttm_buffer_object * bo)970 int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo)
971 {
972 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
973 struct ttm_operation_ctx ctx = { false, false };
974 struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(bo->ttm);
975 struct ttm_placement placement;
976 struct ttm_place placements;
977 struct ttm_resource *tmp;
978 uint64_t addr, flags;
979 int r;
980
981 if (bo->resource->start != AMDGPU_BO_INVALID_OFFSET)
982 return 0;
983
984 addr = amdgpu_gmc_agp_addr(bo);
985 if (addr != AMDGPU_BO_INVALID_OFFSET)
986 return 0;
987
988 /* allocate GART space */
989 placement.num_placement = 1;
990 placement.placement = &placements;
991 placements.fpfn = 0;
992 placements.lpfn = adev->gmc.gart_size >> PAGE_SHIFT;
993 placements.mem_type = TTM_PL_TT;
994 placements.flags = bo->resource->placement;
995
996 r = ttm_bo_mem_space(bo, &placement, &tmp, &ctx);
997 if (unlikely(r))
998 return r;
999
1000 /* compute PTE flags for this buffer object */
1001 flags = amdgpu_ttm_tt_pte_flags(adev, bo->ttm, tmp);
1002
1003 /* Bind pages */
1004 gtt->offset = (u64)tmp->start << PAGE_SHIFT;
1005 amdgpu_ttm_gart_bind(adev, bo, flags);
1006 amdgpu_gart_invalidate_tlb(adev);
1007 ttm_resource_free(bo, &bo->resource);
1008 ttm_bo_assign_mem(bo, tmp);
1009
1010 return 0;
1011 }
1012
1013 /*
1014 * amdgpu_ttm_recover_gart - Rebind GTT pages
1015 *
1016 * Called by amdgpu_gtt_mgr_recover() from amdgpu_device_reset() to
1017 * rebind GTT pages during a GPU reset.
1018 */
amdgpu_ttm_recover_gart(struct ttm_buffer_object * tbo)1019 void amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo)
1020 {
1021 struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
1022 uint64_t flags;
1023
1024 if (!tbo->ttm)
1025 return;
1026
1027 flags = amdgpu_ttm_tt_pte_flags(adev, tbo->ttm, tbo->resource);
1028 amdgpu_ttm_gart_bind(adev, tbo, flags);
1029 }
1030
1031 /*
1032 * amdgpu_ttm_backend_unbind - Unbind GTT mapped pages
1033 *
1034 * Called by ttm_tt_unbind() on behalf of ttm_bo_move_ttm() and
1035 * ttm_tt_destroy().
1036 */
amdgpu_ttm_backend_unbind(struct ttm_device * bdev,struct ttm_tt * ttm)1037 static void amdgpu_ttm_backend_unbind(struct ttm_device *bdev,
1038 struct ttm_tt *ttm)
1039 {
1040 struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
1041 struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
1042
1043 /* if the pages have userptr pinning then clear that first */
1044 if (gtt->userptr) {
1045 amdgpu_ttm_tt_unpin_userptr(bdev, ttm);
1046 } else if (ttm->sg && drm_gem_is_imported(gtt->gobj)) {
1047 struct dma_buf_attachment *attach;
1048
1049 attach = gtt->gobj->import_attach;
1050 dma_buf_unmap_attachment(attach, ttm->sg, DMA_BIDIRECTIONAL);
1051 ttm->sg = NULL;
1052 }
1053
1054 if (!gtt->bound)
1055 return;
1056
1057 if (gtt->offset == AMDGPU_BO_INVALID_OFFSET)
1058 return;
1059
1060 /* unbind shouldn't be done for GDS/GWS/OA in ttm_bo_clean_mm */
1061 amdgpu_gart_unbind(adev, gtt->offset, ttm->num_pages);
1062 gtt->bound = false;
1063 }
1064
amdgpu_ttm_backend_destroy(struct ttm_device * bdev,struct ttm_tt * ttm)1065 static void amdgpu_ttm_backend_destroy(struct ttm_device *bdev,
1066 struct ttm_tt *ttm)
1067 {
1068 struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
1069
1070 if (gtt->usertask)
1071 put_task_struct(gtt->usertask);
1072
1073 ttm_tt_fini(>t->ttm);
1074 kfree(gtt);
1075 }
1076
1077 /**
1078 * amdgpu_ttm_mmio_remap_alloc_sgt - build an sg_table for MMIO_REMAP I/O aperture
1079 * @adev: amdgpu device providing the remap BAR base (adev->rmmio_remap.bus_addr)
1080 * @res: TTM resource of the BO to export; expected to live in AMDGPU_PL_MMIO_REMAP
1081 * @dev: importing device to map for (typically @attach->dev in dma-buf paths)
1082 * @dir: DMA data direction for the importer (passed to dma_map_resource())
1083 * @sgt: output; on success, set to a newly allocated sg_table describing the I/O span
1084 *
1085 * The HDP flush page (AMDGPU_PL_MMIO_REMAP) is a fixed hardware I/O window in a PCI
1086 * BAR—there are no struct pages to back it. Importers still need a DMA address list,
1087 * so we synthesize a minimal sg_table and populate it from dma_map_resource(), not
1088 * from pages. Using the common amdgpu_res_cursor walker keeps the offset/size math
1089 * consistent with other TTM/manager users.
1090 *
1091 * - @res is assumed to be a small, contiguous I/O region (typically a single 4 KiB
1092 * page) in AMDGPU_PL_MMIO_REMAP. Callers should validate placement before calling.
1093 * - The sg entry is created with sg_set_page(sg, NULL, …) to reflect I/O space.
1094 * - The mapping uses DMA_ATTR_SKIP_CPU_SYNC because this is MMIO, not cacheable RAM.
1095 * - Peer reachability / p2pdma policy checks must be done by the caller.
1096 *
1097 * Return:
1098 * * 0 on success, with *@sgt set to a valid table that must be freed via
1099 * amdgpu_ttm_mmio_remap_free_sgt().
1100 * * -ENOMEM if allocation of the sg_table fails.
1101 * * -EIO if dma_map_resource() fails.
1102 *
1103 */
amdgpu_ttm_mmio_remap_alloc_sgt(struct amdgpu_device * adev,struct ttm_resource * res,struct device * dev,enum dma_data_direction dir,struct sg_table ** sgt)1104 int amdgpu_ttm_mmio_remap_alloc_sgt(struct amdgpu_device *adev,
1105 struct ttm_resource *res,
1106 struct device *dev,
1107 enum dma_data_direction dir,
1108 struct sg_table **sgt)
1109 {
1110 struct amdgpu_res_cursor cur;
1111 dma_addr_t dma;
1112 resource_size_t phys;
1113 struct scatterlist *sg;
1114 int r;
1115
1116 /* Walk the resource once; MMIO_REMAP is expected to be contiguous+small. */
1117 amdgpu_res_first(res, 0, res->size, &cur);
1118
1119 /* Translate byte offset in the remap window into a host physical BAR address. */
1120 phys = adev->rmmio_remap.bus_addr + cur.start;
1121
1122 /* Build a single-entry sg_table mapped as I/O (no struct page backing). */
1123 *sgt = kzalloc_obj(**sgt);
1124 if (!*sgt)
1125 return -ENOMEM;
1126 r = sg_alloc_table(*sgt, 1, GFP_KERNEL);
1127 if (r) {
1128 kfree(*sgt);
1129 return r;
1130 }
1131 sg = (*sgt)->sgl;
1132 sg_set_page(sg, NULL, cur.size, 0); /* WHY: I/O space → no pages */
1133
1134 dma = dma_map_resource(dev, phys, cur.size, dir, DMA_ATTR_SKIP_CPU_SYNC);
1135 if (dma_mapping_error(dev, dma)) {
1136 sg_free_table(*sgt);
1137 kfree(*sgt);
1138 return -EIO;
1139 }
1140 sg_dma_address(sg) = dma;
1141 sg_dma_len(sg) = cur.size;
1142 return 0;
1143 }
1144
amdgpu_ttm_mmio_remap_free_sgt(struct device * dev,enum dma_data_direction dir,struct sg_table * sgt)1145 void amdgpu_ttm_mmio_remap_free_sgt(struct device *dev,
1146 enum dma_data_direction dir,
1147 struct sg_table *sgt)
1148 {
1149 struct scatterlist *sg = sgt->sgl;
1150
1151 dma_unmap_resource(dev, sg_dma_address(sg), sg_dma_len(sg),
1152 dir, DMA_ATTR_SKIP_CPU_SYNC);
1153 sg_free_table(sgt);
1154 kfree(sgt);
1155 }
1156
1157 /**
1158 * amdgpu_ttm_tt_create - Create a ttm_tt object for a given BO
1159 *
1160 * @bo: The buffer object to create a GTT ttm_tt object around
1161 * @page_flags: Page flags to be added to the ttm_tt object
1162 *
1163 * Called by ttm_tt_create().
1164 */
amdgpu_ttm_tt_create(struct ttm_buffer_object * bo,uint32_t page_flags)1165 static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_buffer_object *bo,
1166 uint32_t page_flags)
1167 {
1168 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
1169 struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
1170 struct amdgpu_ttm_tt *gtt;
1171 enum ttm_caching caching;
1172
1173 gtt = kzalloc_obj(struct amdgpu_ttm_tt);
1174 if (!gtt)
1175 return NULL;
1176
1177 gtt->gobj = &bo->base;
1178 if (adev->gmc.mem_partitions && abo->xcp_id >= 0)
1179 gtt->pool_id = KFD_XCP_MEM_ID(adev, abo->xcp_id);
1180 else
1181 gtt->pool_id = abo->xcp_id;
1182
1183 if (abo->flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC)
1184 caching = ttm_write_combined;
1185 else
1186 caching = ttm_cached;
1187
1188 /* allocate space for the uninitialized page entries */
1189 if (ttm_sg_tt_init(>t->ttm, bo, page_flags, caching)) {
1190 kfree(gtt);
1191 return NULL;
1192 }
1193 return >t->ttm;
1194 }
1195
1196 /*
1197 * amdgpu_ttm_tt_populate - Map GTT pages visible to the device
1198 *
1199 * Map the pages of a ttm_tt object to an address space visible
1200 * to the underlying device.
1201 */
amdgpu_ttm_tt_populate(struct ttm_device * bdev,struct ttm_tt * ttm,struct ttm_operation_ctx * ctx)1202 static int amdgpu_ttm_tt_populate(struct ttm_device *bdev,
1203 struct ttm_tt *ttm,
1204 struct ttm_operation_ctx *ctx)
1205 {
1206 struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
1207 struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
1208 struct ttm_pool *pool;
1209 pgoff_t i;
1210 int ret;
1211
1212 /* user pages are bound by amdgpu_ttm_tt_pin_userptr() */
1213 if (gtt->userptr) {
1214 ttm->sg = kzalloc_obj(struct sg_table);
1215 if (!ttm->sg)
1216 return -ENOMEM;
1217 return 0;
1218 }
1219
1220 if (ttm->page_flags & TTM_TT_FLAG_EXTERNAL)
1221 return 0;
1222
1223 if (adev->mman.ttm_pools && gtt->pool_id >= 0)
1224 pool = &adev->mman.ttm_pools[gtt->pool_id];
1225 else
1226 pool = &adev->mman.bdev.pool;
1227 ret = ttm_pool_alloc(pool, ttm, ctx);
1228 if (ret)
1229 return ret;
1230
1231 for (i = 0; i < ttm->num_pages; ++i)
1232 ttm->pages[i]->mapping = bdev->dev_mapping;
1233
1234 return 0;
1235 }
1236
1237 /*
1238 * amdgpu_ttm_tt_unpopulate - unmap GTT pages and unpopulate page arrays
1239 *
1240 * Unmaps pages of a ttm_tt object from the device address space and
1241 * unpopulates the page array backing it.
1242 */
amdgpu_ttm_tt_unpopulate(struct ttm_device * bdev,struct ttm_tt * ttm)1243 static void amdgpu_ttm_tt_unpopulate(struct ttm_device *bdev,
1244 struct ttm_tt *ttm)
1245 {
1246 struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
1247 struct amdgpu_device *adev;
1248 struct ttm_pool *pool;
1249 pgoff_t i;
1250
1251 amdgpu_ttm_backend_unbind(bdev, ttm);
1252
1253 if (gtt->userptr) {
1254 amdgpu_ttm_tt_set_user_pages(ttm, NULL);
1255 kfree(ttm->sg);
1256 ttm->sg = NULL;
1257 return;
1258 }
1259
1260 if (ttm->page_flags & TTM_TT_FLAG_EXTERNAL)
1261 return;
1262
1263 for (i = 0; i < ttm->num_pages; ++i)
1264 ttm->pages[i]->mapping = NULL;
1265
1266 adev = amdgpu_ttm_adev(bdev);
1267
1268 if (adev->mman.ttm_pools && gtt->pool_id >= 0)
1269 pool = &adev->mman.ttm_pools[gtt->pool_id];
1270 else
1271 pool = &adev->mman.bdev.pool;
1272
1273 return ttm_pool_free(pool, ttm);
1274 }
1275
1276 /**
1277 * amdgpu_ttm_tt_get_userptr - Return the userptr GTT ttm_tt for the current
1278 * task
1279 *
1280 * @tbo: The ttm_buffer_object that contains the userptr
1281 * @user_addr: The returned value
1282 */
amdgpu_ttm_tt_get_userptr(const struct ttm_buffer_object * tbo,uint64_t * user_addr)1283 int amdgpu_ttm_tt_get_userptr(const struct ttm_buffer_object *tbo,
1284 uint64_t *user_addr)
1285 {
1286 struct amdgpu_ttm_tt *gtt;
1287
1288 if (!tbo->ttm)
1289 return -EINVAL;
1290
1291 gtt = (void *)tbo->ttm;
1292 *user_addr = gtt->userptr;
1293 return 0;
1294 }
1295
1296 /**
1297 * amdgpu_ttm_tt_set_userptr - Initialize userptr GTT ttm_tt for the current
1298 * task
1299 *
1300 * @bo: The ttm_buffer_object to bind this userptr to
1301 * @addr: The address in the current tasks VM space to use
1302 * @flags: Requirements of userptr object.
1303 *
1304 * Called by amdgpu_gem_userptr_ioctl() and kfd_ioctl_alloc_memory_of_gpu() to
1305 * bind userptr pages to current task and by kfd_ioctl_acquire_vm() to
1306 * initialize GPU VM for a KFD process.
1307 */
amdgpu_ttm_tt_set_userptr(struct ttm_buffer_object * bo,uint64_t addr,uint32_t flags)1308 int amdgpu_ttm_tt_set_userptr(struct ttm_buffer_object *bo,
1309 uint64_t addr, uint32_t flags)
1310 {
1311 struct amdgpu_ttm_tt *gtt;
1312
1313 if (!bo->ttm) {
1314 /* TODO: We want a separate TTM object type for userptrs */
1315 bo->ttm = amdgpu_ttm_tt_create(bo, 0);
1316 if (bo->ttm == NULL)
1317 return -ENOMEM;
1318 }
1319
1320 /* Set TTM_TT_FLAG_EXTERNAL before populate but after create. */
1321 bo->ttm->page_flags |= TTM_TT_FLAG_EXTERNAL;
1322
1323 gtt = ttm_to_amdgpu_ttm_tt(bo->ttm);
1324 gtt->userptr = addr;
1325 gtt->userflags = flags;
1326
1327 if (gtt->usertask)
1328 put_task_struct(gtt->usertask);
1329 gtt->usertask = current->group_leader;
1330 get_task_struct(gtt->usertask);
1331
1332 return 0;
1333 }
1334
1335 /*
1336 * amdgpu_ttm_tt_get_usermm - Return memory manager for ttm_tt object
1337 */
amdgpu_ttm_tt_get_usermm(struct ttm_tt * ttm)1338 struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm)
1339 {
1340 struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
1341
1342 if (gtt == NULL)
1343 return NULL;
1344
1345 if (gtt->usertask == NULL)
1346 return NULL;
1347
1348 return gtt->usertask->mm;
1349 }
1350
1351 /*
1352 * amdgpu_ttm_tt_affect_userptr - Determine if a ttm_tt object lays inside an
1353 * address range for the current task.
1354 *
1355 */
amdgpu_ttm_tt_affect_userptr(struct ttm_tt * ttm,unsigned long start,unsigned long end,unsigned long * userptr)1356 bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
1357 unsigned long end, unsigned long *userptr)
1358 {
1359 struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
1360 unsigned long size;
1361
1362 if (gtt == NULL || !gtt->userptr)
1363 return false;
1364
1365 /* Return false if no part of the ttm_tt object lies within
1366 * the range
1367 */
1368 size = (unsigned long)gtt->ttm.num_pages * PAGE_SIZE;
1369 if (gtt->userptr > end || gtt->userptr + size <= start)
1370 return false;
1371
1372 if (userptr)
1373 *userptr = gtt->userptr;
1374 return true;
1375 }
1376
1377 /*
1378 * amdgpu_ttm_tt_is_userptr - Have the pages backing by userptr?
1379 */
amdgpu_ttm_tt_is_userptr(struct ttm_tt * ttm)1380 bool amdgpu_ttm_tt_is_userptr(struct ttm_tt *ttm)
1381 {
1382 struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
1383
1384 if (gtt == NULL || !gtt->userptr)
1385 return false;
1386
1387 return true;
1388 }
1389
1390 /*
1391 * amdgpu_ttm_tt_is_readonly - Is the ttm_tt object read only?
1392 */
amdgpu_ttm_tt_is_readonly(struct ttm_tt * ttm)1393 bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm)
1394 {
1395 struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
1396
1397 if (gtt == NULL)
1398 return false;
1399
1400 return !!(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
1401 }
1402
1403 /**
1404 * amdgpu_ttm_tt_pde_flags - Compute PDE flags for ttm_tt object
1405 *
1406 * @ttm: The ttm_tt object to compute the flags for
1407 * @mem: The memory registry backing this ttm_tt object
1408 *
1409 * Figure out the flags to use for a VM PDE (Page Directory Entry).
1410 */
amdgpu_ttm_tt_pde_flags(struct ttm_tt * ttm,struct ttm_resource * mem)1411 uint64_t amdgpu_ttm_tt_pde_flags(struct ttm_tt *ttm, struct ttm_resource *mem)
1412 {
1413 uint64_t flags = 0;
1414
1415 if (mem && mem->mem_type != TTM_PL_SYSTEM)
1416 flags |= AMDGPU_PTE_VALID;
1417
1418 if (mem && (mem->mem_type == TTM_PL_TT ||
1419 mem->mem_type == AMDGPU_PL_DOORBELL ||
1420 mem->mem_type == AMDGPU_PL_PREEMPT ||
1421 mem->mem_type == AMDGPU_PL_MMIO_REMAP)) {
1422 flags |= AMDGPU_PTE_SYSTEM;
1423
1424 if (ttm && ttm->caching == ttm_cached)
1425 flags |= AMDGPU_PTE_SNOOPED;
1426 }
1427
1428 if (mem && mem->mem_type == TTM_PL_VRAM &&
1429 mem->bus.caching == ttm_cached)
1430 flags |= AMDGPU_PTE_SNOOPED;
1431
1432 return flags;
1433 }
1434
1435 /**
1436 * amdgpu_ttm_tt_pte_flags - Compute PTE flags for ttm_tt object
1437 *
1438 * @adev: amdgpu_device pointer
1439 * @ttm: The ttm_tt object to compute the flags for
1440 * @mem: The memory registry backing this ttm_tt object
1441 *
1442 * Figure out the flags to use for a VM PTE (Page Table Entry).
1443 */
amdgpu_ttm_tt_pte_flags(struct amdgpu_device * adev,struct ttm_tt * ttm,struct ttm_resource * mem)1444 uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
1445 struct ttm_resource *mem)
1446 {
1447 uint64_t flags = amdgpu_ttm_tt_pde_flags(ttm, mem);
1448
1449 flags |= adev->gart.gart_pte_flags;
1450 flags |= AMDGPU_PTE_READABLE;
1451
1452 if (!amdgpu_ttm_tt_is_readonly(ttm))
1453 flags |= AMDGPU_PTE_WRITEABLE;
1454
1455 return flags;
1456 }
1457
1458 /*
1459 * amdgpu_ttm_bo_eviction_valuable - Check to see if we can evict a buffer
1460 * object.
1461 *
1462 * Return true if eviction is sensible. Called by ttm_mem_evict_first() on
1463 * behalf of ttm_bo_mem_force_space() which tries to evict buffer objects until
1464 * it can find space for a new object and by ttm_bo_force_list_clean() which is
1465 * used to clean out a memory space.
1466 */
amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object * bo,const struct ttm_place * place)1467 static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
1468 const struct ttm_place *place)
1469 {
1470 struct dma_resv_iter resv_cursor;
1471 struct dma_fence *f;
1472
1473 if (!amdgpu_bo_is_amdgpu_bo(bo))
1474 return ttm_bo_eviction_valuable(bo, place);
1475
1476 /* Swapout? */
1477 if (bo->resource->mem_type == TTM_PL_SYSTEM)
1478 return true;
1479
1480 if (bo->type == ttm_bo_type_kernel &&
1481 !amdgpu_vm_evictable(ttm_to_amdgpu_bo(bo)))
1482 return false;
1483
1484 /* If bo is a KFD BO, check if the bo belongs to the current process.
1485 * If true, then return false as any KFD process needs all its BOs to
1486 * be resident to run successfully
1487 */
1488 dma_resv_for_each_fence(&resv_cursor, bo->base.resv,
1489 DMA_RESV_USAGE_BOOKKEEP, f) {
1490 if (amdkfd_fence_check_mm(f, current->mm) &&
1491 !(place->flags & TTM_PL_FLAG_CONTIGUOUS))
1492 return false;
1493 }
1494
1495 /* Preemptible BOs don't own system resources managed by the
1496 * driver (pages, VRAM, GART space). They point to resources
1497 * owned by someone else (e.g. pageable memory in user mode
1498 * or a DMABuf). They are used in a preemptible context so we
1499 * can guarantee no deadlocks and good QoS in case of MMU
1500 * notifiers or DMABuf move notifiers from the resource owner.
1501 */
1502 if (bo->resource->mem_type == AMDGPU_PL_PREEMPT)
1503 return false;
1504
1505 if (bo->resource->mem_type == TTM_PL_TT &&
1506 amdgpu_bo_encrypted(ttm_to_amdgpu_bo(bo)))
1507 return false;
1508
1509 return ttm_bo_eviction_valuable(bo, place);
1510 }
1511
amdgpu_ttm_vram_mm_access(struct amdgpu_device * adev,loff_t pos,void * buf,size_t size,bool write)1512 static void amdgpu_ttm_vram_mm_access(struct amdgpu_device *adev, loff_t pos,
1513 void *buf, size_t size, bool write)
1514 {
1515 while (size) {
1516 uint64_t aligned_pos = ALIGN_DOWN(pos, 4);
1517 uint64_t bytes = 4 - (pos & 0x3);
1518 uint32_t shift = (pos & 0x3) * 8;
1519 uint32_t mask = 0xffffffff << shift;
1520 uint32_t value = 0;
1521
1522 if (size < bytes) {
1523 mask &= 0xffffffff >> (bytes - size) * 8;
1524 bytes = size;
1525 }
1526
1527 if (mask != 0xffffffff) {
1528 amdgpu_device_mm_access(adev, aligned_pos, &value, 4, false);
1529 if (write) {
1530 value &= ~mask;
1531 value |= (*(uint32_t *)buf << shift) & mask;
1532 amdgpu_device_mm_access(adev, aligned_pos, &value, 4, true);
1533 } else {
1534 value = (value & mask) >> shift;
1535 memcpy(buf, &value, bytes);
1536 }
1537 } else {
1538 amdgpu_device_mm_access(adev, aligned_pos, buf, 4, write);
1539 }
1540
1541 pos += bytes;
1542 buf += bytes;
1543 size -= bytes;
1544 }
1545 }
1546
amdgpu_ttm_access_memory_sdma(struct ttm_buffer_object * bo,unsigned long offset,void * buf,int len,int write)1547 static int amdgpu_ttm_access_memory_sdma(struct ttm_buffer_object *bo,
1548 unsigned long offset, void *buf,
1549 int len, int write)
1550 {
1551 struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
1552 struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
1553 struct amdgpu_res_cursor src_mm;
1554 struct amdgpu_job *job;
1555 struct dma_fence *fence;
1556 uint64_t src_addr, dst_addr;
1557 unsigned int num_dw;
1558 int r, idx;
1559
1560 if (len != PAGE_SIZE)
1561 return -EINVAL;
1562
1563 if (!adev->mman.sdma_access_ptr)
1564 return -EACCES;
1565
1566 if (!drm_dev_enter(adev_to_drm(adev), &idx))
1567 return -ENODEV;
1568
1569 if (write)
1570 memcpy(adev->mman.sdma_access_ptr, buf, len);
1571
1572 num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8);
1573 r = amdgpu_job_alloc_with_ib(adev, &adev->mman.default_entity.base,
1574 AMDGPU_FENCE_OWNER_UNDEFINED,
1575 num_dw * 4, AMDGPU_IB_POOL_DELAYED,
1576 &job,
1577 AMDGPU_KERNEL_JOB_ID_TTM_ACCESS_MEMORY_SDMA);
1578 if (r)
1579 goto out;
1580
1581 mutex_lock(&adev->mman.default_entity.lock);
1582 amdgpu_res_first(abo->tbo.resource, offset, len, &src_mm);
1583 src_addr = amdgpu_ttm_domain_start(adev, bo->resource->mem_type) +
1584 src_mm.start;
1585 dst_addr = amdgpu_bo_gpu_offset(adev->mman.sdma_access_bo);
1586 if (write)
1587 swap(src_addr, dst_addr);
1588
1589 amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_addr, dst_addr,
1590 PAGE_SIZE, 0);
1591
1592 fence = amdgpu_ttm_job_submit(adev, &adev->mman.default_entity, job, num_dw);
1593 mutex_unlock(&adev->mman.default_entity.lock);
1594
1595 if (!dma_fence_wait_timeout(fence, false, adev->sdma_timeout))
1596 r = -ETIMEDOUT;
1597 dma_fence_put(fence);
1598
1599 if (!(r || write))
1600 memcpy(buf, adev->mman.sdma_access_ptr, len);
1601 out:
1602 drm_dev_exit(idx);
1603 return r;
1604 }
1605
1606 /**
1607 * amdgpu_ttm_access_memory - Read or Write memory that backs a buffer object.
1608 *
1609 * @bo: The buffer object to read/write
1610 * @offset: Offset into buffer object
1611 * @buf: Secondary buffer to write/read from
1612 * @len: Length in bytes of access
1613 * @write: true if writing
1614 *
1615 * This is used to access VRAM that backs a buffer object via MMIO
1616 * access for debugging purposes.
1617 */
amdgpu_ttm_access_memory(struct ttm_buffer_object * bo,unsigned long offset,void * buf,int len,int write)1618 static int amdgpu_ttm_access_memory(struct ttm_buffer_object *bo,
1619 unsigned long offset, void *buf, int len,
1620 int write)
1621 {
1622 struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
1623 struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
1624 struct amdgpu_res_cursor cursor;
1625 int ret = 0;
1626
1627 if (bo->resource->mem_type != TTM_PL_VRAM)
1628 return -EIO;
1629
1630 if (amdgpu_device_has_timeouts_enabled(adev) &&
1631 !amdgpu_ttm_access_memory_sdma(bo, offset, buf, len, write))
1632 return len;
1633
1634 amdgpu_res_first(bo->resource, offset, len, &cursor);
1635 while (cursor.remaining) {
1636 size_t count, size = cursor.size;
1637 loff_t pos = cursor.start;
1638
1639 count = amdgpu_device_aper_access(adev, pos, buf, size, write);
1640 size -= count;
1641 if (size) {
1642 /* using MM to access rest vram and handle un-aligned address */
1643 pos += count;
1644 buf += count;
1645 amdgpu_ttm_vram_mm_access(adev, pos, buf, size, write);
1646 }
1647
1648 ret += cursor.size;
1649 buf += cursor.size;
1650 amdgpu_res_next(&cursor, cursor.size);
1651 }
1652
1653 return ret;
1654 }
1655
1656 static void
amdgpu_bo_delete_mem_notify(struct ttm_buffer_object * bo)1657 amdgpu_bo_delete_mem_notify(struct ttm_buffer_object *bo)
1658 {
1659 amdgpu_bo_move_notify(bo, false, NULL);
1660 }
1661
1662 static struct ttm_device_funcs amdgpu_bo_driver = {
1663 .ttm_tt_create = &amdgpu_ttm_tt_create,
1664 .ttm_tt_populate = &amdgpu_ttm_tt_populate,
1665 .ttm_tt_unpopulate = &amdgpu_ttm_tt_unpopulate,
1666 .ttm_tt_destroy = &amdgpu_ttm_backend_destroy,
1667 .eviction_valuable = amdgpu_ttm_bo_eviction_valuable,
1668 .evict_flags = &amdgpu_evict_flags,
1669 .move = &amdgpu_bo_move,
1670 .delete_mem_notify = &amdgpu_bo_delete_mem_notify,
1671 .release_notify = &amdgpu_bo_release_notify,
1672 .io_mem_reserve = &amdgpu_ttm_io_mem_reserve,
1673 .io_mem_pfn = amdgpu_ttm_io_mem_pfn,
1674 .access_memory = &amdgpu_ttm_access_memory,
1675 };
1676
amdgpu_ttm_init_vram_resv(struct amdgpu_device * adev,enum amdgpu_resv_region_id id,uint64_t offset,uint64_t size,bool needs_cpu_map)1677 void amdgpu_ttm_init_vram_resv(struct amdgpu_device *adev,
1678 enum amdgpu_resv_region_id id,
1679 uint64_t offset, uint64_t size,
1680 bool needs_cpu_map)
1681 {
1682 struct amdgpu_vram_resv *resv;
1683
1684 if (id >= AMDGPU_RESV_MAX)
1685 return;
1686
1687 resv = &adev->mman.resv_region[id];
1688 resv->offset = offset;
1689 resv->size = size;
1690 resv->needs_cpu_map = needs_cpu_map;
1691 }
1692
amdgpu_ttm_init_fw_resv_region(struct amdgpu_device * adev)1693 static void amdgpu_ttm_init_fw_resv_region(struct amdgpu_device *adev)
1694 {
1695 uint32_t reserve_size = 0;
1696
1697 if (!adev->discovery.reserve_tmr)
1698 return;
1699
1700 /*
1701 * Query reserved tmr size through atom firmwareinfo for Sienna_Cichlid and onwards for all
1702 * the use cases (IP discovery/G6 memory training/profiling/diagnostic data.etc)
1703 *
1704 * Otherwise, fallback to legacy approach to check and reserve tmr block for ip
1705 * discovery data and G6 memory training data respectively
1706 */
1707 if (adev->bios)
1708 reserve_size =
1709 amdgpu_atomfirmware_get_fw_reserved_fb_size(adev);
1710
1711 if (!adev->bios &&
1712 (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) ||
1713 amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4) ||
1714 amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 5, 0)))
1715 reserve_size = max(reserve_size, (uint32_t)280 << 20);
1716 else if (!adev->bios &&
1717 amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(12, 1, 0)) {
1718 if (hweight32(adev->aid_mask) == 1)
1719 reserve_size = max(reserve_size, (uint32_t)128 << 20);
1720 else
1721 reserve_size = max(reserve_size, (uint32_t)144 << 20);
1722 } else if (!reserve_size)
1723 reserve_size = DISCOVERY_TMR_OFFSET;
1724
1725 amdgpu_ttm_init_vram_resv(adev, AMDGPU_RESV_FW,
1726 adev->gmc.real_vram_size - reserve_size,
1727 reserve_size, false);
1728 }
1729
amdgpu_ttm_init_mem_train_resv_region(struct amdgpu_device * adev)1730 static void amdgpu_ttm_init_mem_train_resv_region(struct amdgpu_device *adev)
1731 {
1732 uint64_t reserve_size;
1733 uint64_t offset;
1734
1735 if (!adev->discovery.reserve_tmr)
1736 return;
1737
1738 if (!adev->bios || amdgpu_sriov_vf(adev))
1739 return;
1740
1741 if (!amdgpu_atomfirmware_mem_training_supported(adev))
1742 return;
1743
1744 reserve_size = adev->mman.resv_region[AMDGPU_RESV_FW].size;
1745 offset = ALIGN((adev->gmc.mc_vram_size - reserve_size - SZ_1M), SZ_1M);
1746 amdgpu_ttm_init_vram_resv(adev, AMDGPU_RESV_MEM_TRAIN,
1747 offset,
1748 GDDR6_MEM_TRAINING_DATA_SIZE_IN_BYTES,
1749 false);
1750 }
1751
amdgpu_ttm_init_vram_resv_regions(struct amdgpu_device * adev)1752 static void amdgpu_ttm_init_vram_resv_regions(struct amdgpu_device *adev)
1753 {
1754 uint64_t vram_size = adev->gmc.visible_vram_size;
1755
1756 /* Initialize memory reservations as required for VGA.
1757 * This is used for VGA emulation and pre-OS scanout buffers to
1758 * avoid display artifacts while transitioning between pre-OS
1759 * and driver.
1760 */
1761 amdgpu_gmc_init_vga_resv_regions(adev);
1762 amdgpu_ttm_init_fw_resv_region(adev);
1763 amdgpu_ttm_init_mem_train_resv_region(adev);
1764
1765 if (adev->mman.resv_region[AMDGPU_RESV_FW_VRAM_USAGE].size > vram_size)
1766 adev->mman.resv_region[AMDGPU_RESV_FW_VRAM_USAGE].size = 0;
1767
1768 if (adev->mman.resv_region[AMDGPU_RESV_DRV_VRAM_USAGE].size > vram_size)
1769 adev->mman.resv_region[AMDGPU_RESV_DRV_VRAM_USAGE].size = 0;
1770 }
1771
amdgpu_ttm_mark_vram_reserved(struct amdgpu_device * adev,enum amdgpu_resv_region_id id)1772 int amdgpu_ttm_mark_vram_reserved(struct amdgpu_device *adev,
1773 enum amdgpu_resv_region_id id)
1774 {
1775 struct amdgpu_vram_resv *resv;
1776 int ret;
1777
1778 if (id >= AMDGPU_RESV_MAX)
1779 return -EINVAL;
1780
1781 resv = &adev->mman.resv_region[id];
1782 if (!resv->size)
1783 return 0;
1784
1785 ret = amdgpu_bo_create_kernel_at(adev, resv->offset, resv->size,
1786 &resv->bo,
1787 resv->needs_cpu_map ? &resv->cpu_ptr : NULL);
1788 if (ret) {
1789 dev_err(adev->dev,
1790 "reserve vram failed: id=%d offset=0x%llx size=0x%llx ret=%d\n",
1791 id, resv->offset, resv->size, ret);
1792 memset(resv, 0, sizeof(*resv));
1793 }
1794
1795 return ret;
1796 }
1797
amdgpu_ttm_unmark_vram_reserved(struct amdgpu_device * adev,enum amdgpu_resv_region_id id)1798 void amdgpu_ttm_unmark_vram_reserved(struct amdgpu_device *adev,
1799 enum amdgpu_resv_region_id id)
1800 {
1801 struct amdgpu_vram_resv *resv;
1802
1803 if (id >= AMDGPU_RESV_MAX)
1804 return;
1805
1806 resv = &adev->mman.resv_region[id];
1807 if (!resv->bo)
1808 return;
1809
1810 amdgpu_bo_free_kernel(&resv->bo, NULL,
1811 resv->needs_cpu_map ? &resv->cpu_ptr : NULL);
1812 memset(resv, 0, sizeof(*resv));
1813 }
1814
1815 /*
1816 * Reserve all regions with non-zero size. Regions whose info is not
1817 * yet available (e.g., fw extended region) may still be reserved
1818 * during runtime.
1819 */
amdgpu_ttm_alloc_vram_resv_regions(struct amdgpu_device * adev)1820 static int amdgpu_ttm_alloc_vram_resv_regions(struct amdgpu_device *adev)
1821 {
1822 int i, r;
1823
1824 for (i = 0; i < AMDGPU_RESV_MAX; i++) {
1825 r = amdgpu_ttm_mark_vram_reserved(adev, i);
1826 if (r)
1827 return r;
1828 }
1829
1830 return 0;
1831 }
1832
1833 /*
1834 * Memoy training reservation functions
1835 */
1836
1837 /**
1838 * amdgpu_ttm_training_reserve_vram_fini - free memory training reserved vram
1839 *
1840 * @adev: amdgpu_device pointer
1841 *
1842 * free memory training reserved vram if it has been reserved.
1843 */
amdgpu_ttm_training_reserve_vram_fini(struct amdgpu_device * adev)1844 static int amdgpu_ttm_training_reserve_vram_fini(struct amdgpu_device *adev)
1845 {
1846 struct psp_memory_training_context *ctx = &adev->psp.mem_train_ctx;
1847
1848 ctx->init = PSP_MEM_TRAIN_NOT_SUPPORT;
1849 amdgpu_ttm_unmark_vram_reserved(adev, AMDGPU_RESV_MEM_TRAIN);
1850
1851 return 0;
1852 }
1853
amdgpu_ttm_training_data_block_init(struct amdgpu_device * adev)1854 static void amdgpu_ttm_training_data_block_init(struct amdgpu_device *adev)
1855 {
1856 struct psp_memory_training_context *ctx = &adev->psp.mem_train_ctx;
1857 struct amdgpu_vram_resv *resv =
1858 &adev->mman.resv_region[AMDGPU_RESV_MEM_TRAIN];
1859
1860 memset(ctx, 0, sizeof(*ctx));
1861
1862 ctx->c2p_train_data_offset = resv->offset;
1863 ctx->p2c_train_data_offset =
1864 (adev->gmc.mc_vram_size - GDDR6_MEM_TRAINING_OFFSET);
1865 ctx->train_data_size = resv->size;
1866
1867 DRM_DEBUG("train_data_size:%llx,p2c_train_data_offset:%llx,c2p_train_data_offset:%llx.\n",
1868 ctx->train_data_size,
1869 ctx->p2c_train_data_offset,
1870 ctx->c2p_train_data_offset);
1871 }
1872
amdgpu_ttm_pools_init(struct amdgpu_device * adev)1873 static int amdgpu_ttm_pools_init(struct amdgpu_device *adev)
1874 {
1875 int i;
1876
1877 if (!adev->gmc.is_app_apu || !adev->gmc.num_mem_partitions)
1878 return 0;
1879
1880 adev->mman.ttm_pools = kzalloc_objs(*adev->mman.ttm_pools,
1881 adev->gmc.num_mem_partitions);
1882 if (!adev->mman.ttm_pools)
1883 return -ENOMEM;
1884
1885 for (i = 0; i < adev->gmc.num_mem_partitions; i++) {
1886 ttm_pool_init(&adev->mman.ttm_pools[i], adev->dev,
1887 adev->gmc.mem_partitions[i].numa.node,
1888 TTM_ALLOCATION_POOL_BENEFICIAL_ORDER(get_order(SZ_2M)));
1889 }
1890 return 0;
1891 }
1892
amdgpu_ttm_pools_fini(struct amdgpu_device * adev)1893 static void amdgpu_ttm_pools_fini(struct amdgpu_device *adev)
1894 {
1895 int i;
1896
1897 if (!adev->gmc.is_app_apu || !adev->mman.ttm_pools)
1898 return;
1899
1900 for (i = 0; i < adev->gmc.num_mem_partitions; i++)
1901 ttm_pool_fini(&adev->mman.ttm_pools[i]);
1902
1903 kfree(adev->mman.ttm_pools);
1904 adev->mman.ttm_pools = NULL;
1905 }
1906
1907 /**
1908 * amdgpu_ttm_alloc_mmio_remap_bo - Allocate the singleton MMIO_REMAP BO
1909 * @adev: amdgpu device
1910 *
1911 * Allocates a global BO with backing AMDGPU_PL_MMIO_REMAP when the
1912 * hardware exposes a remap base (adev->rmmio_remap.bus_addr) and the host
1913 * PAGE_SIZE is <= AMDGPU_GPU_PAGE_SIZE (4K). The BO is created as a regular
1914 * GEM object (amdgpu_bo_create).
1915 *
1916 * Return:
1917 * * 0 on success or intentional skip (feature not present/unsupported)
1918 * * negative errno on allocation failure
1919 */
amdgpu_ttm_alloc_mmio_remap_bo(struct amdgpu_device * adev)1920 static int amdgpu_ttm_alloc_mmio_remap_bo(struct amdgpu_device *adev)
1921 {
1922 struct ttm_operation_ctx ctx = { false, false };
1923 struct ttm_placement placement;
1924 struct ttm_buffer_object *tbo;
1925 struct ttm_place placements;
1926 struct amdgpu_bo_param bp;
1927 struct ttm_resource *tmp;
1928 int r;
1929
1930 /* Skip if HW doesn't expose remap, or if PAGE_SIZE > AMDGPU_GPU_PAGE_SIZE (4K). */
1931 if (!adev->rmmio_remap.bus_addr || PAGE_SIZE > AMDGPU_GPU_PAGE_SIZE)
1932 return 0;
1933
1934 /*
1935 * Allocate a BO first and then move it to AMDGPU_PL_MMIO_REMAP.
1936 * The initial TTM resource assigned by amdgpu_bo_create() is
1937 * replaced below with a fixed MMIO_REMAP placement.
1938 */
1939 memset(&bp, 0, sizeof(bp));
1940 bp.type = ttm_bo_type_device;
1941 bp.size = AMDGPU_GPU_PAGE_SIZE;
1942 bp.byte_align = AMDGPU_GPU_PAGE_SIZE;
1943 bp.domain = 0;
1944 bp.flags = 0;
1945 bp.resv = NULL;
1946 bp.bo_ptr_size = sizeof(struct amdgpu_bo);
1947 r = amdgpu_bo_create(adev, &bp, &adev->rmmio_remap.bo);
1948 if (r)
1949 return r;
1950
1951 r = amdgpu_bo_reserve(adev->rmmio_remap.bo, true);
1952 if (r)
1953 goto err_unref;
1954
1955 tbo = &adev->rmmio_remap.bo->tbo;
1956
1957 /*
1958 * MMIO_REMAP is a fixed I/O placement (AMDGPU_PL_MMIO_REMAP).
1959 */
1960 placement.num_placement = 1;
1961 placement.placement = &placements;
1962 placements.fpfn = 0;
1963 placements.lpfn = 0;
1964 placements.mem_type = AMDGPU_PL_MMIO_REMAP;
1965 placements.flags = 0;
1966 /* Force the BO into the fixed MMIO_REMAP placement */
1967 r = ttm_bo_mem_space(tbo, &placement, &tmp, &ctx);
1968 if (unlikely(r))
1969 goto err_unlock;
1970
1971 ttm_resource_free(tbo, &tbo->resource);
1972 ttm_bo_assign_mem(tbo, tmp);
1973 ttm_bo_pin(tbo);
1974
1975 amdgpu_bo_unreserve(adev->rmmio_remap.bo);
1976 return 0;
1977
1978 err_unlock:
1979 amdgpu_bo_unreserve(adev->rmmio_remap.bo);
1980
1981 err_unref:
1982 amdgpu_bo_unref(&adev->rmmio_remap.bo);
1983 adev->rmmio_remap.bo = NULL;
1984 return r;
1985 }
1986
1987 /**
1988 * amdgpu_ttm_free_mmio_remap_bo - Free the singleton MMIO_REMAP BO
1989 * @adev: amdgpu device
1990 *
1991 * Frees the kernel-owned MMIO_REMAP BO if it was allocated by
1992 * amdgpu_ttm_mmio_remap_bo_init().
1993 */
amdgpu_ttm_free_mmio_remap_bo(struct amdgpu_device * adev)1994 static void amdgpu_ttm_free_mmio_remap_bo(struct amdgpu_device *adev)
1995 {
1996 if (!adev->rmmio_remap.bo)
1997 return;
1998
1999 if (!amdgpu_bo_reserve(adev->rmmio_remap.bo, true)) {
2000 ttm_bo_unpin(&adev->rmmio_remap.bo->tbo);
2001 amdgpu_bo_unreserve(adev->rmmio_remap.bo);
2002 }
2003
2004 /*
2005 * At this point we rely on normal DRM teardown ordering:
2006 * no new user ioctls can access the global MMIO_REMAP BO
2007 * once TTM teardown begins.
2008 */
2009 amdgpu_bo_unref(&adev->rmmio_remap.bo);
2010 adev->rmmio_remap.bo = NULL;
2011 }
2012
amdgpu_ttm_buffer_entity_init(struct amdgpu_gtt_mgr * mgr,struct amdgpu_ttm_buffer_entity * entity,enum drm_sched_priority prio,struct drm_gpu_scheduler ** scheds,int num_schedulers,u32 num_gart_windows)2013 static int amdgpu_ttm_buffer_entity_init(struct amdgpu_gtt_mgr *mgr,
2014 struct amdgpu_ttm_buffer_entity *entity,
2015 enum drm_sched_priority prio,
2016 struct drm_gpu_scheduler **scheds,
2017 int num_schedulers,
2018 u32 num_gart_windows)
2019 {
2020 int i, r, num_pages;
2021
2022 r = drm_sched_entity_init(&entity->base, prio, scheds, num_schedulers, NULL);
2023 if (r)
2024 return r;
2025
2026 mutex_init(&entity->lock);
2027
2028 if (ARRAY_SIZE(entity->gart_window_offs) < num_gart_windows)
2029 return -EINVAL;
2030 if (num_gart_windows == 0)
2031 return 0;
2032
2033 num_pages = num_gart_windows * AMDGPU_GTT_MAX_TRANSFER_SIZE;
2034 r = amdgpu_gtt_mgr_alloc_entries(mgr, &entity->gart_node, num_pages,
2035 DRM_MM_INSERT_BEST);
2036 if (r) {
2037 drm_sched_entity_destroy(&entity->base);
2038 return r;
2039 }
2040
2041 for (i = 0; i < num_gart_windows; i++) {
2042 entity->gart_window_offs[i] =
2043 amdgpu_gtt_node_to_byte_offset(&entity->gart_node) +
2044 i * AMDGPU_GTT_MAX_TRANSFER_SIZE * PAGE_SIZE;
2045 }
2046
2047 return 0;
2048 }
2049
amdgpu_ttm_buffer_entity_fini(struct amdgpu_gtt_mgr * mgr,struct amdgpu_ttm_buffer_entity * entity)2050 static void amdgpu_ttm_buffer_entity_fini(struct amdgpu_gtt_mgr *mgr,
2051 struct amdgpu_ttm_buffer_entity *entity)
2052 {
2053 amdgpu_gtt_mgr_free_entries(mgr, &entity->gart_node);
2054 drm_sched_entity_destroy(&entity->base);
2055 }
2056
2057 /*
2058 * amdgpu_ttm_init - Init the memory management (ttm) as well as various
2059 * gtt/vram related fields.
2060 *
2061 * This initializes all of the memory space pools that the TTM layer
2062 * will need such as the GTT space (system memory mapped to the device),
2063 * VRAM (on-board memory), and on-chip memories (GDS, GWS, OA) which
2064 * can be mapped per VMID.
2065 */
amdgpu_ttm_init(struct amdgpu_device * adev)2066 int amdgpu_ttm_init(struct amdgpu_device *adev)
2067 {
2068 uint64_t gtt_size;
2069 int r;
2070
2071 dma_set_max_seg_size(adev->dev, UINT_MAX);
2072 /* No others user of address space so set it to 0 */
2073 r = ttm_device_init(&adev->mman.bdev, &amdgpu_bo_driver, adev->dev,
2074 adev_to_drm(adev)->anon_inode->i_mapping,
2075 adev_to_drm(adev)->vma_offset_manager,
2076 (adev->need_swiotlb ?
2077 TTM_ALLOCATION_POOL_USE_DMA_ALLOC : 0) |
2078 (dma_addressing_limited(adev->dev) ?
2079 TTM_ALLOCATION_POOL_USE_DMA32 : 0) |
2080 TTM_ALLOCATION_POOL_BENEFICIAL_ORDER(get_order(SZ_2M)));
2081 if (r) {
2082 dev_err(adev->dev,
2083 "failed initializing buffer object driver(%d).\n", r);
2084 return r;
2085 }
2086
2087 r = amdgpu_ttm_pools_init(adev);
2088 if (r) {
2089 dev_err(adev->dev, "failed to init ttm pools(%d).\n", r);
2090 return r;
2091 }
2092 adev->mman.initialized = true;
2093
2094 if (!adev->gmc.is_app_apu) {
2095 /* Initialize VRAM pool with all of VRAM divided into pages */
2096 r = amdgpu_vram_mgr_init(adev);
2097 if (r) {
2098 dev_err(adev->dev, "Failed initializing VRAM heap.\n");
2099 return r;
2100 }
2101 }
2102
2103 /* Change the size here instead of the init above so only lpfn is affected */
2104 amdgpu_ttm_set_buffer_funcs_status(adev, false);
2105 #ifdef CONFIG_64BIT
2106 #ifdef CONFIG_X86
2107 if (adev->gmc.xgmi.connected_to_cpu)
2108 adev->mman.aper_base_kaddr = ioremap_cache(adev->gmc.aper_base,
2109 adev->gmc.visible_vram_size);
2110
2111 else if (adev->gmc.is_app_apu)
2112 DRM_DEBUG_DRIVER(
2113 "No need to ioremap when real vram size is 0\n");
2114 else
2115 #endif
2116 adev->mman.aper_base_kaddr = ioremap_wc(adev->gmc.aper_base,
2117 adev->gmc.visible_vram_size);
2118 #endif
2119
2120 amdgpu_ttm_init_vram_resv_regions(adev);
2121
2122 r = amdgpu_ttm_alloc_vram_resv_regions(adev);
2123 if (r)
2124 return r;
2125
2126 if (adev->mman.resv_region[AMDGPU_RESV_MEM_TRAIN].size) {
2127 struct psp_memory_training_context *ctx =
2128 &adev->psp.mem_train_ctx;
2129
2130 amdgpu_ttm_training_data_block_init(adev);
2131 ctx->init = PSP_MEM_TRAIN_RESERVE_SUCCESS;
2132 }
2133
2134 dev_info(adev->dev, " %uM of VRAM memory ready\n",
2135 (unsigned int)(adev->gmc.real_vram_size / (1024 * 1024)));
2136
2137 /* Compute GTT size, either based on TTM limit
2138 * or whatever the user passed on module init.
2139 */
2140 gtt_size = ttm_tt_pages_limit() << PAGE_SHIFT;
2141 if (amdgpu_gtt_size != -1) {
2142 uint64_t configured_size = (uint64_t)amdgpu_gtt_size << 20;
2143
2144 drm_warn(&adev->ddev,
2145 "Configuring gttsize via module parameter is deprecated, please use ttm.pages_limit\n");
2146 if (gtt_size != configured_size)
2147 drm_warn(&adev->ddev,
2148 "GTT size has been set as %llu but TTM size has been set as %llu, this is unusual\n",
2149 configured_size, gtt_size);
2150
2151 gtt_size = configured_size;
2152 }
2153
2154 /* Initialize GTT memory pool */
2155 r = amdgpu_gtt_mgr_init(adev, gtt_size);
2156 if (r) {
2157 dev_err(adev->dev, "Failed initializing GTT heap.\n");
2158 return r;
2159 }
2160 dev_info(adev->dev, " %uM of GTT memory ready.\n",
2161 (unsigned int)(gtt_size / (1024 * 1024)));
2162
2163 if (adev->flags & AMD_IS_APU) {
2164 if (adev->gmc.real_vram_size < gtt_size)
2165 adev->apu_prefer_gtt = true;
2166 }
2167
2168 /* Initialize doorbell pool on PCI BAR */
2169 r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_DOORBELL, adev->doorbell.size / PAGE_SIZE);
2170 if (r) {
2171 dev_err(adev->dev, "Failed initializing doorbell heap.\n");
2172 return r;
2173 }
2174
2175 /* Create a boorbell page for kernel usages */
2176 r = amdgpu_doorbell_create_kernel_doorbells(adev);
2177 if (r) {
2178 dev_err(adev->dev, "Failed to initialize kernel doorbells.\n");
2179 return r;
2180 }
2181
2182 /* Initialize MMIO-remap pool (single page 4K) */
2183 r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_MMIO_REMAP, 1);
2184 if (r) {
2185 dev_err(adev->dev, "Failed initializing MMIO-remap heap.\n");
2186 return r;
2187 }
2188
2189 /* Allocate the singleton MMIO_REMAP BO if supported */
2190 r = amdgpu_ttm_alloc_mmio_remap_bo(adev);
2191 if (r)
2192 return r;
2193
2194 /* Initialize preemptible memory pool */
2195 r = amdgpu_preempt_mgr_init(adev);
2196 if (r) {
2197 dev_err(adev->dev, "Failed initializing PREEMPT heap.\n");
2198 return r;
2199 }
2200
2201 /* Initialize various on-chip memory pools */
2202 r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_GDS, adev->gds.gds_size);
2203 if (r) {
2204 dev_err(adev->dev, "Failed initializing GDS heap.\n");
2205 return r;
2206 }
2207
2208 r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_GWS, adev->gds.gws_size);
2209 if (r) {
2210 dev_err(adev->dev, "Failed initializing gws heap.\n");
2211 return r;
2212 }
2213
2214 r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_OA, adev->gds.oa_size);
2215 if (r) {
2216 dev_err(adev->dev, "Failed initializing oa heap.\n");
2217 return r;
2218 }
2219 if (amdgpu_bo_create_kernel(adev, PAGE_SIZE, PAGE_SIZE,
2220 AMDGPU_GEM_DOMAIN_GTT,
2221 &adev->mman.sdma_access_bo, NULL,
2222 &adev->mman.sdma_access_ptr))
2223 drm_warn(adev_to_drm(adev),
2224 "Debug VRAM access will use slowpath MM access\n");
2225
2226 return 0;
2227 }
2228
2229 /*
2230 * amdgpu_ttm_fini - De-initialize the TTM memory pools
2231 */
amdgpu_ttm_fini(struct amdgpu_device * adev)2232 void amdgpu_ttm_fini(struct amdgpu_device *adev)
2233 {
2234 int idx;
2235
2236 if (!adev->mman.initialized)
2237 return;
2238
2239 amdgpu_ttm_pools_fini(adev);
2240
2241 amdgpu_ttm_training_reserve_vram_fini(adev);
2242 /* return the stolen vga memory back to VRAM */
2243 if (!adev->gmc.is_app_apu) {
2244 amdgpu_ttm_unmark_vram_reserved(adev, AMDGPU_RESV_STOLEN_VGA);
2245 amdgpu_ttm_unmark_vram_reserved(adev, AMDGPU_RESV_STOLEN_EXTENDED);
2246 /* return the FW reserved memory back to VRAM */
2247 amdgpu_ttm_unmark_vram_reserved(adev, AMDGPU_RESV_FW);
2248 amdgpu_ttm_unmark_vram_reserved(adev, AMDGPU_RESV_FW_EXTEND);
2249 amdgpu_ttm_unmark_vram_reserved(adev, AMDGPU_RESV_STOLEN_RESERVED);
2250 }
2251 amdgpu_bo_free_kernel(&adev->mman.sdma_access_bo, NULL,
2252 &adev->mman.sdma_access_ptr);
2253
2254 amdgpu_ttm_free_mmio_remap_bo(adev);
2255 amdgpu_ttm_unmark_vram_reserved(adev, AMDGPU_RESV_FW_VRAM_USAGE);
2256 amdgpu_ttm_unmark_vram_reserved(adev, AMDGPU_RESV_DRV_VRAM_USAGE);
2257
2258 if (drm_dev_enter(adev_to_drm(adev), &idx)) {
2259
2260 if (adev->mman.aper_base_kaddr)
2261 iounmap(adev->mman.aper_base_kaddr);
2262 adev->mman.aper_base_kaddr = NULL;
2263
2264 drm_dev_exit(idx);
2265 }
2266
2267 if (!adev->gmc.is_app_apu)
2268 amdgpu_vram_mgr_fini(adev);
2269 amdgpu_gtt_mgr_fini(adev);
2270 amdgpu_preempt_mgr_fini(adev);
2271 amdgpu_doorbell_fini(adev);
2272
2273 ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_GDS);
2274 ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_GWS);
2275 ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_OA);
2276 ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_DOORBELL);
2277 ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_MMIO_REMAP);
2278 ttm_device_fini(&adev->mman.bdev);
2279 adev->mman.initialized = false;
2280 dev_info(adev->dev, " ttm finalized\n");
2281 }
2282
2283 /**
2284 * amdgpu_ttm_set_buffer_funcs_status - enable/disable use of buffer functions
2285 *
2286 * @adev: amdgpu_device pointer
2287 * @enable: true when we can use buffer functions.
2288 *
2289 * Enable/disable use of buffer functions during suspend/resume. This should
2290 * only be called at bootup or when userspace isn't running.
2291 */
amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device * adev,bool enable)2292 void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable)
2293 {
2294 struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM);
2295 u32 num_clear_entities, num_move_entities;
2296 uint64_t size;
2297 int r, i, j;
2298
2299 if (!adev->mman.initialized || amdgpu_in_reset(adev) ||
2300 adev->mman.buffer_funcs_enabled == enable || adev->gmc.is_app_apu)
2301 return;
2302
2303 if (enable) {
2304 struct amdgpu_ring *ring;
2305 struct drm_gpu_scheduler *sched;
2306
2307 if (!adev->mman.buffer_funcs_ring || !adev->mman.buffer_funcs_ring->sched.ready) {
2308 dev_warn(adev->dev, "Not enabling DMA transfers for in kernel use");
2309 return;
2310 }
2311
2312 num_clear_entities = 1;
2313 num_move_entities = 1;
2314 ring = adev->mman.buffer_funcs_ring;
2315 sched = &ring->sched;
2316 r = amdgpu_ttm_buffer_entity_init(&adev->mman.gtt_mgr,
2317 &adev->mman.default_entity,
2318 DRM_SCHED_PRIORITY_KERNEL,
2319 &sched, 1, 0);
2320 if (r < 0) {
2321 dev_err(adev->dev,
2322 "Failed setting up TTM entity (%d)\n", r);
2323 return;
2324 }
2325
2326 adev->mman.clear_entities = kcalloc(num_clear_entities,
2327 sizeof(struct amdgpu_ttm_buffer_entity),
2328 GFP_KERNEL);
2329 atomic_set(&adev->mman.next_clear_entity, 0);
2330 if (!adev->mman.clear_entities)
2331 goto error_free_default_entity;
2332
2333 adev->mman.num_clear_entities = num_clear_entities;
2334
2335 for (i = 0; i < num_clear_entities; i++) {
2336 r = amdgpu_ttm_buffer_entity_init(
2337 &adev->mman.gtt_mgr, &adev->mman.clear_entities[i],
2338 DRM_SCHED_PRIORITY_NORMAL, &sched, 1, 1);
2339
2340 if (r < 0) {
2341 for (j = 0; j < i; j++)
2342 amdgpu_ttm_buffer_entity_fini(
2343 &adev->mman.gtt_mgr, &adev->mman.clear_entities[j]);
2344 kfree(adev->mman.clear_entities);
2345 adev->mman.num_clear_entities = 0;
2346 adev->mman.clear_entities = NULL;
2347 goto error_free_default_entity;
2348 }
2349 }
2350
2351 adev->mman.num_move_entities = num_move_entities;
2352 atomic_set(&adev->mman.next_move_entity, 0);
2353 for (i = 0; i < num_move_entities; i++) {
2354 r = amdgpu_ttm_buffer_entity_init(
2355 &adev->mman.gtt_mgr,
2356 &adev->mman.move_entities[i],
2357 DRM_SCHED_PRIORITY_NORMAL, &sched, 1, 2);
2358
2359 if (r < 0) {
2360 for (j = 0; j < i; j++)
2361 amdgpu_ttm_buffer_entity_fini(
2362 &adev->mman.gtt_mgr, &adev->mman.move_entities[j]);
2363 adev->mman.num_move_entities = 0;
2364 goto error_free_clear_entities;
2365 }
2366 }
2367 } else {
2368 amdgpu_ttm_buffer_entity_fini(&adev->mman.gtt_mgr,
2369 &adev->mman.default_entity);
2370 for (i = 0; i < adev->mman.num_clear_entities; i++)
2371 amdgpu_ttm_buffer_entity_fini(&adev->mman.gtt_mgr,
2372 &adev->mman.clear_entities[i]);
2373 for (i = 0; i < adev->mman.num_move_entities; i++)
2374 amdgpu_ttm_buffer_entity_fini(&adev->mman.gtt_mgr,
2375 &adev->mman.move_entities[i]);
2376 /* Drop all the old fences since re-creating the scheduler entities
2377 * will allocate new contexts.
2378 */
2379 ttm_resource_manager_cleanup(man);
2380 kfree(adev->mman.clear_entities);
2381 adev->mman.clear_entities = NULL;
2382 adev->mman.num_clear_entities = 0;
2383 adev->mman.num_move_entities = 0;
2384 }
2385
2386 /* this just adjusts TTM size idea, which sets lpfn to the correct value */
2387 if (enable)
2388 size = adev->gmc.real_vram_size;
2389 else
2390 size = adev->gmc.visible_vram_size;
2391 man->size = size;
2392 adev->mman.buffer_funcs_enabled = enable;
2393
2394 return;
2395
2396 error_free_clear_entities:
2397 for (i = 0; i < adev->mman.num_clear_entities; i++)
2398 amdgpu_ttm_buffer_entity_fini(&adev->mman.gtt_mgr,
2399 &adev->mman.clear_entities[i]);
2400 kfree(adev->mman.clear_entities);
2401 adev->mman.clear_entities = NULL;
2402 adev->mman.num_clear_entities = 0;
2403 error_free_default_entity:
2404 amdgpu_ttm_buffer_entity_fini(&adev->mman.gtt_mgr,
2405 &adev->mman.default_entity);
2406 }
2407
amdgpu_ttm_prepare_job(struct amdgpu_device * adev,struct amdgpu_ttm_buffer_entity * entity,unsigned int num_dw,struct dma_resv * resv,bool vm_needs_flush,struct amdgpu_job ** job,u64 k_job_id)2408 static int amdgpu_ttm_prepare_job(struct amdgpu_device *adev,
2409 struct amdgpu_ttm_buffer_entity *entity,
2410 unsigned int num_dw,
2411 struct dma_resv *resv,
2412 bool vm_needs_flush,
2413 struct amdgpu_job **job,
2414 u64 k_job_id)
2415 {
2416 enum amdgpu_ib_pool_type pool = AMDGPU_IB_POOL_DELAYED;
2417 int r;
2418 r = amdgpu_job_alloc_with_ib(adev, &entity->base,
2419 AMDGPU_FENCE_OWNER_UNDEFINED,
2420 num_dw * 4, pool, job, k_job_id);
2421 if (r)
2422 return r;
2423
2424 if (vm_needs_flush) {
2425 (*job)->vm_pd_addr = amdgpu_gmc_pd_addr(adev->gmc.pdb0_bo ?
2426 adev->gmc.pdb0_bo :
2427 adev->gart.bo);
2428 (*job)->vm_needs_flush = true;
2429 }
2430 if (!resv)
2431 return 0;
2432
2433 return drm_sched_job_add_resv_dependencies(&(*job)->base, resv,
2434 DMA_RESV_USAGE_BOOKKEEP);
2435 }
2436
amdgpu_copy_buffer(struct amdgpu_device * adev,struct amdgpu_ttm_buffer_entity * entity,uint64_t src_offset,uint64_t dst_offset,uint32_t byte_count,struct dma_resv * resv,struct dma_fence ** fence,bool vm_needs_flush,uint32_t copy_flags)2437 int amdgpu_copy_buffer(struct amdgpu_device *adev,
2438 struct amdgpu_ttm_buffer_entity *entity,
2439 uint64_t src_offset,
2440 uint64_t dst_offset, uint32_t byte_count,
2441 struct dma_resv *resv,
2442 struct dma_fence **fence,
2443 bool vm_needs_flush, uint32_t copy_flags)
2444 {
2445 unsigned int num_loops, num_dw;
2446 struct amdgpu_ring *ring;
2447 struct amdgpu_job *job;
2448 uint32_t max_bytes;
2449 unsigned int i;
2450 int r;
2451
2452 ring = adev->mman.buffer_funcs_ring;
2453
2454 if (!ring->sched.ready) {
2455 dev_err(adev->dev,
2456 "Trying to move memory with ring turned off.\n");
2457 return -EINVAL;
2458 }
2459
2460 max_bytes = adev->mman.buffer_funcs->copy_max_bytes;
2461 num_loops = DIV_ROUND_UP(byte_count, max_bytes);
2462 num_dw = ALIGN(num_loops * adev->mman.buffer_funcs->copy_num_dw, 8);
2463 r = amdgpu_ttm_prepare_job(adev, entity, num_dw,
2464 resv, vm_needs_flush, &job,
2465 AMDGPU_KERNEL_JOB_ID_TTM_COPY_BUFFER);
2466 if (r)
2467 goto error_free;
2468
2469 for (i = 0; i < num_loops; i++) {
2470 uint32_t cur_size_in_bytes = min(byte_count, max_bytes);
2471
2472 amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_offset,
2473 dst_offset, cur_size_in_bytes, copy_flags);
2474 src_offset += cur_size_in_bytes;
2475 dst_offset += cur_size_in_bytes;
2476 byte_count -= cur_size_in_bytes;
2477 }
2478
2479 *fence = amdgpu_ttm_job_submit(adev, entity, job, num_dw);
2480
2481 return 0;
2482
2483 error_free:
2484 amdgpu_job_free(job);
2485 dev_err(adev->dev, "Error scheduling IBs (%d)\n", r);
2486 return r;
2487 }
2488
amdgpu_ttm_fill_mem(struct amdgpu_device * adev,struct amdgpu_ttm_buffer_entity * entity,uint32_t src_data,uint64_t dst_addr,uint32_t byte_count,struct dma_resv * resv,struct dma_fence ** fence,bool vm_needs_flush,u64 k_job_id)2489 static int amdgpu_ttm_fill_mem(struct amdgpu_device *adev,
2490 struct amdgpu_ttm_buffer_entity *entity,
2491 uint32_t src_data,
2492 uint64_t dst_addr, uint32_t byte_count,
2493 struct dma_resv *resv,
2494 struct dma_fence **fence,
2495 bool vm_needs_flush,
2496 u64 k_job_id)
2497 {
2498 unsigned int num_loops, num_dw;
2499 struct amdgpu_job *job;
2500 uint32_t max_bytes;
2501 unsigned int i;
2502 int r;
2503
2504 max_bytes = adev->mman.buffer_funcs->fill_max_bytes;
2505 num_loops = DIV_ROUND_UP_ULL(byte_count, max_bytes);
2506 num_dw = ALIGN(num_loops * adev->mman.buffer_funcs->fill_num_dw, 8);
2507 r = amdgpu_ttm_prepare_job(adev, entity, num_dw, resv,
2508 vm_needs_flush, &job, k_job_id);
2509 if (r)
2510 return r;
2511
2512 for (i = 0; i < num_loops; i++) {
2513 uint32_t cur_size = min(byte_count, max_bytes);
2514
2515 amdgpu_emit_fill_buffer(adev, &job->ibs[0], src_data, dst_addr,
2516 cur_size);
2517
2518 dst_addr += cur_size;
2519 byte_count -= cur_size;
2520 }
2521
2522 *fence = amdgpu_ttm_job_submit(adev, entity, job, num_dw);
2523 return 0;
2524 }
2525
2526 /**
2527 * amdgpu_ttm_clear_buffer - clear memory buffers
2528 * @bo: amdgpu buffer object
2529 * @resv: reservation object
2530 * @fence: dma_fence associated with the operation
2531 *
2532 * Clear the memory buffer resource.
2533 *
2534 * Returns:
2535 * 0 for success or a negative error code on failure.
2536 */
amdgpu_ttm_clear_buffer(struct amdgpu_bo * bo,struct dma_resv * resv,struct dma_fence ** fence)2537 int amdgpu_ttm_clear_buffer(struct amdgpu_bo *bo,
2538 struct dma_resv *resv,
2539 struct dma_fence **fence)
2540 {
2541 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
2542 struct amdgpu_ttm_buffer_entity *entity;
2543 struct amdgpu_res_cursor cursor;
2544 u64 addr;
2545 int r = 0;
2546
2547 if (!adev->mman.buffer_funcs_enabled)
2548 return -EINVAL;
2549
2550 if (!fence)
2551 return -EINVAL;
2552 entity = &adev->mman.clear_entities[0];
2553 *fence = dma_fence_get_stub();
2554
2555 amdgpu_res_first(bo->tbo.resource, 0, amdgpu_bo_size(bo), &cursor);
2556
2557 mutex_lock(&entity->lock);
2558 while (cursor.remaining) {
2559 struct dma_fence *next = NULL;
2560 u64 size;
2561
2562 if (amdgpu_res_cleared(&cursor)) {
2563 amdgpu_res_next(&cursor, cursor.size);
2564 continue;
2565 }
2566
2567 /* Never clear more than 256MiB at once to avoid timeouts */
2568 size = min(cursor.size, 256ULL << 20);
2569
2570 r = amdgpu_ttm_map_buffer(entity, &bo->tbo, bo->tbo.resource, &cursor,
2571 0, false, &size, &addr);
2572 if (r)
2573 goto err;
2574
2575 r = amdgpu_ttm_fill_mem(adev, entity, 0, addr, size, resv,
2576 &next, true,
2577 AMDGPU_KERNEL_JOB_ID_TTM_CLEAR_BUFFER);
2578 if (r)
2579 goto err;
2580
2581 dma_fence_put(*fence);
2582 *fence = next;
2583
2584 amdgpu_res_next(&cursor, size);
2585 }
2586 err:
2587 mutex_unlock(&entity->lock);
2588
2589 return r;
2590 }
2591
amdgpu_fill_buffer(struct amdgpu_ttm_buffer_entity * entity,struct amdgpu_bo * bo,uint32_t src_data,struct dma_resv * resv,struct dma_fence ** f,u64 k_job_id)2592 int amdgpu_fill_buffer(struct amdgpu_ttm_buffer_entity *entity,
2593 struct amdgpu_bo *bo,
2594 uint32_t src_data,
2595 struct dma_resv *resv,
2596 struct dma_fence **f,
2597 u64 k_job_id)
2598 {
2599 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
2600 struct dma_fence *fence = NULL;
2601 struct amdgpu_res_cursor dst;
2602 int r;
2603
2604 if (!entity)
2605 return -EINVAL;
2606
2607 amdgpu_res_first(bo->tbo.resource, 0, amdgpu_bo_size(bo), &dst);
2608
2609 mutex_lock(&entity->lock);
2610 while (dst.remaining) {
2611 struct dma_fence *next;
2612 uint64_t cur_size, to;
2613
2614 /* Never fill more than 256MiB at once to avoid timeouts */
2615 cur_size = min(dst.size, 256ULL << 20);
2616
2617 r = amdgpu_ttm_map_buffer(entity, &bo->tbo, bo->tbo.resource, &dst,
2618 0, false, &cur_size, &to);
2619 if (r)
2620 goto error;
2621
2622 r = amdgpu_ttm_fill_mem(adev, entity,
2623 src_data, to, cur_size, resv,
2624 &next, true, k_job_id);
2625 if (r)
2626 goto error;
2627
2628 dma_fence_put(fence);
2629 fence = next;
2630
2631 amdgpu_res_next(&dst, cur_size);
2632 }
2633 error:
2634 mutex_unlock(&entity->lock);
2635 if (f)
2636 *f = dma_fence_get(fence);
2637 dma_fence_put(fence);
2638 return r;
2639 }
2640
2641 struct amdgpu_ttm_buffer_entity *
amdgpu_ttm_next_clear_entity(struct amdgpu_device * adev)2642 amdgpu_ttm_next_clear_entity(struct amdgpu_device *adev)
2643 {
2644 struct amdgpu_mman *mman = &adev->mman;
2645 u32 i;
2646
2647 if (mman->num_clear_entities == 0)
2648 return NULL;
2649
2650 i = atomic_inc_return(&mman->next_clear_entity) %
2651 mman->num_clear_entities;
2652 return &mman->clear_entities[i];
2653 }
2654
2655 /**
2656 * amdgpu_ttm_evict_resources - evict memory buffers
2657 * @adev: amdgpu device object
2658 * @mem_type: evicted BO's memory type
2659 *
2660 * Evicts all @mem_type buffers on the lru list of the memory type.
2661 *
2662 * Returns:
2663 * 0 for success or a negative error code on failure.
2664 */
amdgpu_ttm_evict_resources(struct amdgpu_device * adev,int mem_type)2665 int amdgpu_ttm_evict_resources(struct amdgpu_device *adev, int mem_type)
2666 {
2667 struct ttm_resource_manager *man;
2668
2669 switch (mem_type) {
2670 case TTM_PL_VRAM:
2671 case TTM_PL_TT:
2672 case AMDGPU_PL_GWS:
2673 case AMDGPU_PL_GDS:
2674 case AMDGPU_PL_OA:
2675 man = ttm_manager_type(&adev->mman.bdev, mem_type);
2676 break;
2677 default:
2678 dev_err(adev->dev, "Trying to evict invalid memory type\n");
2679 return -EINVAL;
2680 }
2681
2682 return ttm_resource_manager_evict_all(&adev->mman.bdev, man);
2683 }
2684
2685 #if defined(CONFIG_DEBUG_FS)
2686
amdgpu_ttm_page_pool_show(struct seq_file * m,void * unused)2687 static int amdgpu_ttm_page_pool_show(struct seq_file *m, void *unused)
2688 {
2689 struct amdgpu_device *adev = m->private;
2690
2691 return ttm_pool_debugfs(&adev->mman.bdev.pool, m);
2692 }
2693
2694 DEFINE_SHOW_ATTRIBUTE(amdgpu_ttm_page_pool);
2695
2696 /*
2697 * amdgpu_ttm_vram_read - Linear read access to VRAM
2698 *
2699 * Accesses VRAM via MMIO for debugging purposes.
2700 */
amdgpu_ttm_vram_read(struct file * f,char __user * buf,size_t size,loff_t * pos)2701 static ssize_t amdgpu_ttm_vram_read(struct file *f, char __user *buf,
2702 size_t size, loff_t *pos)
2703 {
2704 struct amdgpu_device *adev = file_inode(f)->i_private;
2705 ssize_t result = 0;
2706
2707 if (size & 0x3 || *pos & 0x3)
2708 return -EINVAL;
2709
2710 if (*pos >= adev->gmc.mc_vram_size)
2711 return -ENXIO;
2712
2713 size = min(size, (size_t)(adev->gmc.mc_vram_size - *pos));
2714 while (size) {
2715 size_t bytes = min(size, AMDGPU_TTM_VRAM_MAX_DW_READ * 4);
2716 uint32_t value[AMDGPU_TTM_VRAM_MAX_DW_READ];
2717
2718 amdgpu_device_vram_access(adev, *pos, value, bytes, false);
2719 if (copy_to_user(buf, value, bytes))
2720 return -EFAULT;
2721
2722 result += bytes;
2723 buf += bytes;
2724 *pos += bytes;
2725 size -= bytes;
2726 }
2727
2728 return result;
2729 }
2730
2731 /*
2732 * amdgpu_ttm_vram_write - Linear write access to VRAM
2733 *
2734 * Accesses VRAM via MMIO for debugging purposes.
2735 */
amdgpu_ttm_vram_write(struct file * f,const char __user * buf,size_t size,loff_t * pos)2736 static ssize_t amdgpu_ttm_vram_write(struct file *f, const char __user *buf,
2737 size_t size, loff_t *pos)
2738 {
2739 struct amdgpu_device *adev = file_inode(f)->i_private;
2740 ssize_t result = 0;
2741 int r;
2742
2743 if (size & 0x3 || *pos & 0x3)
2744 return -EINVAL;
2745
2746 if (*pos >= adev->gmc.mc_vram_size)
2747 return -ENXIO;
2748
2749 while (size) {
2750 uint32_t value;
2751
2752 if (*pos >= adev->gmc.mc_vram_size)
2753 return result;
2754
2755 r = get_user(value, (uint32_t *)buf);
2756 if (r)
2757 return r;
2758
2759 amdgpu_device_mm_access(adev, *pos, &value, 4, true);
2760
2761 result += 4;
2762 buf += 4;
2763 *pos += 4;
2764 size -= 4;
2765 }
2766
2767 return result;
2768 }
2769
2770 static const struct file_operations amdgpu_ttm_vram_fops = {
2771 .owner = THIS_MODULE,
2772 .read = amdgpu_ttm_vram_read,
2773 .write = amdgpu_ttm_vram_write,
2774 .llseek = default_llseek,
2775 };
2776
2777 /*
2778 * amdgpu_iomem_read - Virtual read access to GPU mapped memory
2779 *
2780 * This function is used to read memory that has been mapped to the
2781 * GPU and the known addresses are not physical addresses but instead
2782 * bus addresses (e.g., what you'd put in an IB or ring buffer).
2783 */
amdgpu_iomem_read(struct file * f,char __user * buf,size_t size,loff_t * pos)2784 static ssize_t amdgpu_iomem_read(struct file *f, char __user *buf,
2785 size_t size, loff_t *pos)
2786 {
2787 struct amdgpu_device *adev = file_inode(f)->i_private;
2788 struct iommu_domain *dom;
2789 ssize_t result = 0;
2790 int r;
2791
2792 /* retrieve the IOMMU domain if any for this device */
2793 dom = iommu_get_domain_for_dev(adev->dev);
2794
2795 while (size) {
2796 phys_addr_t addr = *pos & PAGE_MASK;
2797 loff_t off = *pos & ~PAGE_MASK;
2798 size_t bytes = PAGE_SIZE - off;
2799 unsigned long pfn;
2800 struct page *p;
2801 void *ptr;
2802
2803 bytes = min(bytes, size);
2804
2805 /* Translate the bus address to a physical address. If
2806 * the domain is NULL it means there is no IOMMU active
2807 * and the address translation is the identity
2808 */
2809 addr = dom ? iommu_iova_to_phys(dom, addr) : addr;
2810
2811 pfn = addr >> PAGE_SHIFT;
2812 if (!pfn_valid(pfn))
2813 return -EPERM;
2814
2815 p = pfn_to_page(pfn);
2816 if (p->mapping != adev->mman.bdev.dev_mapping)
2817 return -EPERM;
2818
2819 ptr = kmap_local_page(p);
2820 r = copy_to_user(buf, ptr + off, bytes);
2821 kunmap_local(ptr);
2822 if (r)
2823 return -EFAULT;
2824
2825 size -= bytes;
2826 *pos += bytes;
2827 result += bytes;
2828 }
2829
2830 return result;
2831 }
2832
2833 /*
2834 * amdgpu_iomem_write - Virtual write access to GPU mapped memory
2835 *
2836 * This function is used to write memory that has been mapped to the
2837 * GPU and the known addresses are not physical addresses but instead
2838 * bus addresses (e.g., what you'd put in an IB or ring buffer).
2839 */
amdgpu_iomem_write(struct file * f,const char __user * buf,size_t size,loff_t * pos)2840 static ssize_t amdgpu_iomem_write(struct file *f, const char __user *buf,
2841 size_t size, loff_t *pos)
2842 {
2843 struct amdgpu_device *adev = file_inode(f)->i_private;
2844 struct iommu_domain *dom;
2845 ssize_t result = 0;
2846 int r;
2847
2848 dom = iommu_get_domain_for_dev(adev->dev);
2849
2850 while (size) {
2851 phys_addr_t addr = *pos & PAGE_MASK;
2852 loff_t off = *pos & ~PAGE_MASK;
2853 size_t bytes = PAGE_SIZE - off;
2854 unsigned long pfn;
2855 struct page *p;
2856 void *ptr;
2857
2858 bytes = min(bytes, size);
2859
2860 addr = dom ? iommu_iova_to_phys(dom, addr) : addr;
2861
2862 pfn = addr >> PAGE_SHIFT;
2863 if (!pfn_valid(pfn))
2864 return -EPERM;
2865
2866 p = pfn_to_page(pfn);
2867 if (p->mapping != adev->mman.bdev.dev_mapping)
2868 return -EPERM;
2869
2870 ptr = kmap_local_page(p);
2871 r = copy_from_user(ptr + off, buf, bytes);
2872 kunmap_local(ptr);
2873 if (r)
2874 return -EFAULT;
2875
2876 size -= bytes;
2877 *pos += bytes;
2878 result += bytes;
2879 }
2880
2881 return result;
2882 }
2883
2884 static const struct file_operations amdgpu_ttm_iomem_fops = {
2885 .owner = THIS_MODULE,
2886 .read = amdgpu_iomem_read,
2887 .write = amdgpu_iomem_write,
2888 .llseek = default_llseek
2889 };
2890
2891 #endif
2892
amdgpu_ttm_debugfs_init(struct amdgpu_device * adev)2893 void amdgpu_ttm_debugfs_init(struct amdgpu_device *adev)
2894 {
2895 #if defined(CONFIG_DEBUG_FS)
2896 struct drm_minor *minor = adev_to_drm(adev)->primary;
2897 struct dentry *root = minor->debugfs_root;
2898
2899 debugfs_create_file_size("amdgpu_vram", 0444, root, adev,
2900 &amdgpu_ttm_vram_fops, adev->gmc.mc_vram_size);
2901 debugfs_create_file("amdgpu_iomem", 0444, root, adev,
2902 &amdgpu_ttm_iomem_fops);
2903 debugfs_create_file("ttm_page_pool", 0444, root, adev,
2904 &amdgpu_ttm_page_pool_fops);
2905 ttm_resource_manager_create_debugfs(ttm_manager_type(&adev->mman.bdev,
2906 TTM_PL_VRAM),
2907 root, "amdgpu_vram_mm");
2908 ttm_resource_manager_create_debugfs(ttm_manager_type(&adev->mman.bdev,
2909 TTM_PL_TT),
2910 root, "amdgpu_gtt_mm");
2911 ttm_resource_manager_create_debugfs(ttm_manager_type(&adev->mman.bdev,
2912 AMDGPU_PL_GDS),
2913 root, "amdgpu_gds_mm");
2914 ttm_resource_manager_create_debugfs(ttm_manager_type(&adev->mman.bdev,
2915 AMDGPU_PL_GWS),
2916 root, "amdgpu_gws_mm");
2917 ttm_resource_manager_create_debugfs(ttm_manager_type(&adev->mman.bdev,
2918 AMDGPU_PL_OA),
2919 root, "amdgpu_oa_mm");
2920
2921 #endif
2922 }
2923