1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /*
3 * Copyright 2020-2021 Advanced Micro Devices, Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
22 */
23 #include <linux/types.h>
24 #include <linux/hmm.h>
25 #include <linux/dma-direction.h>
26 #include <linux/dma-mapping.h>
27 #include <linux/migrate.h>
28 #include "amdgpu_sync.h"
29 #include "amdgpu_object.h"
30 #include "amdgpu_vm.h"
31 #include "amdgpu_res_cursor.h"
32 #include "kfd_priv.h"
33 #include "kfd_svm.h"
34 #include "kfd_migrate.h"
35 #include "kfd_smi_events.h"
36
37 #ifdef dev_fmt
38 #undef dev_fmt
39 #endif
40 #define dev_fmt(fmt) "kfd_migrate: " fmt
41
42 static uint64_t
svm_migrate_direct_mapping_addr(struct amdgpu_device * adev,uint64_t addr)43 svm_migrate_direct_mapping_addr(struct amdgpu_device *adev, uint64_t addr)
44 {
45 return addr + amdgpu_ttm_domain_start(adev, TTM_PL_VRAM);
46 }
47
48 static int
svm_migrate_gart_map(struct amdgpu_ring * ring,uint64_t npages,dma_addr_t * addr,uint64_t * gart_addr,uint64_t flags)49 svm_migrate_gart_map(struct amdgpu_ring *ring, uint64_t npages,
50 dma_addr_t *addr, uint64_t *gart_addr, uint64_t flags)
51 {
52 struct amdgpu_device *adev = ring->adev;
53 struct amdgpu_job *job;
54 unsigned int num_dw, num_bytes;
55 struct dma_fence *fence;
56 uint64_t src_addr, dst_addr;
57 uint64_t pte_flags;
58 void *cpu_addr;
59 int r;
60
61 /* use gart window 0 */
62 *gart_addr = adev->gmc.gart_start;
63
64 num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8);
65 num_bytes = npages * 8;
66
67 r = amdgpu_job_alloc_with_ib(adev, &adev->mman.high_pr,
68 AMDGPU_FENCE_OWNER_UNDEFINED,
69 num_dw * 4 + num_bytes,
70 AMDGPU_IB_POOL_DELAYED,
71 &job);
72 if (r)
73 return r;
74
75 src_addr = num_dw * 4;
76 src_addr += job->ibs[0].gpu_addr;
77
78 dst_addr = amdgpu_bo_gpu_offset(adev->gart.bo);
79 amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_addr,
80 dst_addr, num_bytes, 0);
81
82 amdgpu_ring_pad_ib(ring, &job->ibs[0]);
83 WARN_ON(job->ibs[0].length_dw > num_dw);
84
85 pte_flags = AMDGPU_PTE_VALID | AMDGPU_PTE_READABLE;
86 pte_flags |= AMDGPU_PTE_SYSTEM | AMDGPU_PTE_SNOOPED;
87 if (!(flags & KFD_IOCTL_SVM_FLAG_GPU_RO))
88 pte_flags |= AMDGPU_PTE_WRITEABLE;
89 pte_flags |= adev->gart.gart_pte_flags;
90
91 cpu_addr = &job->ibs[0].ptr[num_dw];
92
93 amdgpu_gart_map(adev, 0, npages, addr, pte_flags, cpu_addr);
94 fence = amdgpu_job_submit(job);
95 dma_fence_put(fence);
96
97 return r;
98 }
99
100 /**
101 * svm_migrate_copy_memory_gart - sdma copy data between ram and vram
102 *
103 * @adev: amdgpu device the sdma ring running
104 * @sys: system DMA pointer to be copied
105 * @vram: vram destination DMA pointer
106 * @npages: number of pages to copy
107 * @direction: enum MIGRATION_COPY_DIR
108 * @mfence: output, sdma fence to signal after sdma is done
109 *
110 * ram address uses GART table continuous entries mapping to ram pages,
111 * vram address uses direct mapping of vram pages, which must have npages
112 * number of continuous pages.
113 * GART update and sdma uses same buf copy function ring, sdma is splited to
114 * multiple GTT_MAX_PAGES transfer, all sdma operations are serialized, wait for
115 * the last sdma finish fence which is returned to check copy memory is done.
116 *
117 * Context: Process context, takes and releases gtt_window_lock
118 *
119 * Return:
120 * 0 - OK, otherwise error code
121 */
122
123 static int
svm_migrate_copy_memory_gart(struct amdgpu_device * adev,dma_addr_t * sys,uint64_t * vram,uint64_t npages,enum MIGRATION_COPY_DIR direction,struct dma_fence ** mfence)124 svm_migrate_copy_memory_gart(struct amdgpu_device *adev, dma_addr_t *sys,
125 uint64_t *vram, uint64_t npages,
126 enum MIGRATION_COPY_DIR direction,
127 struct dma_fence **mfence)
128 {
129 const uint64_t GTT_MAX_PAGES = AMDGPU_GTT_MAX_TRANSFER_SIZE;
130 struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
131 uint64_t gart_s, gart_d;
132 struct dma_fence *next;
133 uint64_t size;
134 int r;
135
136 mutex_lock(&adev->mman.gtt_window_lock);
137
138 while (npages) {
139 size = min(GTT_MAX_PAGES, npages);
140
141 if (direction == FROM_VRAM_TO_RAM) {
142 gart_s = svm_migrate_direct_mapping_addr(adev, *vram);
143 r = svm_migrate_gart_map(ring, size, sys, &gart_d, 0);
144
145 } else if (direction == FROM_RAM_TO_VRAM) {
146 r = svm_migrate_gart_map(ring, size, sys, &gart_s,
147 KFD_IOCTL_SVM_FLAG_GPU_RO);
148 gart_d = svm_migrate_direct_mapping_addr(adev, *vram);
149 }
150 if (r) {
151 dev_err(adev->dev, "fail %d create gart mapping\n", r);
152 goto out_unlock;
153 }
154
155 r = amdgpu_copy_buffer(ring, gart_s, gart_d, size * PAGE_SIZE,
156 NULL, &next, false, true, 0);
157 if (r) {
158 dev_err(adev->dev, "fail %d to copy memory\n", r);
159 goto out_unlock;
160 }
161
162 dma_fence_put(*mfence);
163 *mfence = next;
164 npages -= size;
165 if (npages) {
166 sys += size;
167 vram += size;
168 }
169 }
170
171 out_unlock:
172 mutex_unlock(&adev->mman.gtt_window_lock);
173
174 return r;
175 }
176
177 /**
178 * svm_migrate_copy_done - wait for memory copy sdma is done
179 *
180 * @adev: amdgpu device the sdma memory copy is executing on
181 * @mfence: migrate fence
182 *
183 * Wait for dma fence is signaled, if the copy ssplit into multiple sdma
184 * operations, this is the last sdma operation fence.
185 *
186 * Context: called after svm_migrate_copy_memory
187 *
188 * Return:
189 * 0 - success
190 * otherwise - error code from dma fence signal
191 */
192 static int
svm_migrate_copy_done(struct amdgpu_device * adev,struct dma_fence * mfence)193 svm_migrate_copy_done(struct amdgpu_device *adev, struct dma_fence *mfence)
194 {
195 int r = 0;
196
197 if (mfence) {
198 r = dma_fence_wait(mfence, false);
199 dma_fence_put(mfence);
200 pr_debug("sdma copy memory fence done\n");
201 }
202
203 return r;
204 }
205
206 unsigned long
svm_migrate_addr_to_pfn(struct amdgpu_device * adev,unsigned long addr)207 svm_migrate_addr_to_pfn(struct amdgpu_device *adev, unsigned long addr)
208 {
209 return (addr + adev->kfd.pgmap.range.start) >> PAGE_SHIFT;
210 }
211
212 static void
svm_migrate_get_vram_page(struct svm_range * prange,unsigned long pfn)213 svm_migrate_get_vram_page(struct svm_range *prange, unsigned long pfn)
214 {
215 struct page *page;
216
217 page = pfn_to_page(pfn);
218 svm_range_bo_ref(prange->svm_bo);
219 page->zone_device_data = prange->svm_bo;
220 zone_device_page_init(page);
221 }
222
223 static void
svm_migrate_put_vram_page(struct amdgpu_device * adev,unsigned long addr)224 svm_migrate_put_vram_page(struct amdgpu_device *adev, unsigned long addr)
225 {
226 struct page *page;
227
228 page = pfn_to_page(svm_migrate_addr_to_pfn(adev, addr));
229 unlock_page(page);
230 put_page(page);
231 }
232
233 static unsigned long
svm_migrate_addr(struct amdgpu_device * adev,struct page * page)234 svm_migrate_addr(struct amdgpu_device *adev, struct page *page)
235 {
236 unsigned long addr;
237
238 addr = page_to_pfn(page) << PAGE_SHIFT;
239 return (addr - adev->kfd.pgmap.range.start);
240 }
241
242 static struct page *
svm_migrate_get_sys_page(struct vm_area_struct * vma,unsigned long addr)243 svm_migrate_get_sys_page(struct vm_area_struct *vma, unsigned long addr)
244 {
245 struct page *page;
246
247 page = alloc_page_vma(GFP_HIGHUSER, vma, addr);
248 if (page)
249 lock_page(page);
250
251 return page;
252 }
253
svm_migrate_put_sys_page(unsigned long addr)254 static void svm_migrate_put_sys_page(unsigned long addr)
255 {
256 struct page *page;
257
258 page = pfn_to_page(addr >> PAGE_SHIFT);
259 unlock_page(page);
260 put_page(page);
261 }
262
svm_migrate_unsuccessful_pages(struct migrate_vma * migrate)263 static unsigned long svm_migrate_unsuccessful_pages(struct migrate_vma *migrate)
264 {
265 unsigned long upages = 0;
266 unsigned long i;
267
268 for (i = 0; i < migrate->npages; i++) {
269 if (migrate->src[i] & MIGRATE_PFN_VALID &&
270 !(migrate->src[i] & MIGRATE_PFN_MIGRATE))
271 upages++;
272 }
273 return upages;
274 }
275
276 static int
svm_migrate_copy_to_vram(struct kfd_node * node,struct svm_range * prange,struct migrate_vma * migrate,struct dma_fence ** mfence,dma_addr_t * scratch,uint64_t ttm_res_offset)277 svm_migrate_copy_to_vram(struct kfd_node *node, struct svm_range *prange,
278 struct migrate_vma *migrate, struct dma_fence **mfence,
279 dma_addr_t *scratch, uint64_t ttm_res_offset)
280 {
281 uint64_t npages = migrate->cpages;
282 struct amdgpu_device *adev = node->adev;
283 struct device *dev = adev->dev;
284 struct amdgpu_res_cursor cursor;
285 dma_addr_t *src;
286 uint64_t *dst;
287 uint64_t i, j;
288 int r;
289
290 pr_debug("svms 0x%p [0x%lx 0x%lx 0x%llx]\n", prange->svms, prange->start,
291 prange->last, ttm_res_offset);
292
293 src = scratch;
294 dst = (uint64_t *)(scratch + npages);
295
296 amdgpu_res_first(prange->ttm_res, ttm_res_offset,
297 npages << PAGE_SHIFT, &cursor);
298 for (i = j = 0; i < npages; i++) {
299 struct page *spage;
300
301 dst[i] = cursor.start + (j << PAGE_SHIFT);
302 migrate->dst[i] = svm_migrate_addr_to_pfn(adev, dst[i]);
303 svm_migrate_get_vram_page(prange, migrate->dst[i]);
304 migrate->dst[i] = migrate_pfn(migrate->dst[i]);
305
306 spage = migrate_pfn_to_page(migrate->src[i]);
307 if (spage && !is_zone_device_page(spage)) {
308 src[i] = dma_map_page(dev, spage, 0, PAGE_SIZE,
309 DMA_BIDIRECTIONAL);
310 r = dma_mapping_error(dev, src[i]);
311 if (r) {
312 dev_err(dev, "%s: fail %d dma_map_page\n",
313 __func__, r);
314 goto out_free_vram_pages;
315 }
316 } else {
317 if (j) {
318 r = svm_migrate_copy_memory_gart(
319 adev, src + i - j,
320 dst + i - j, j,
321 FROM_RAM_TO_VRAM,
322 mfence);
323 if (r)
324 goto out_free_vram_pages;
325 amdgpu_res_next(&cursor, (j + 1) << PAGE_SHIFT);
326 j = 0;
327 } else {
328 amdgpu_res_next(&cursor, PAGE_SIZE);
329 }
330 continue;
331 }
332
333 pr_debug_ratelimited("dma mapping src to 0x%llx, pfn 0x%lx\n",
334 src[i] >> PAGE_SHIFT, page_to_pfn(spage));
335
336 if (j >= (cursor.size >> PAGE_SHIFT) - 1 && i < npages - 1) {
337 r = svm_migrate_copy_memory_gart(adev, src + i - j,
338 dst + i - j, j + 1,
339 FROM_RAM_TO_VRAM,
340 mfence);
341 if (r)
342 goto out_free_vram_pages;
343 amdgpu_res_next(&cursor, (j + 1) * PAGE_SIZE);
344 j = 0;
345 } else {
346 j++;
347 }
348 }
349
350 r = svm_migrate_copy_memory_gart(adev, src + i - j, dst + i - j, j,
351 FROM_RAM_TO_VRAM, mfence);
352
353 out_free_vram_pages:
354 if (r) {
355 pr_debug("failed %d to copy memory to vram\n", r);
356 while (i--) {
357 svm_migrate_put_vram_page(adev, dst[i]);
358 migrate->dst[i] = 0;
359 }
360 }
361
362 #ifdef DEBUG_FORCE_MIXED_DOMAINS
363 for (i = 0, j = 0; i < npages; i += 4, j++) {
364 if (j & 1)
365 continue;
366 svm_migrate_put_vram_page(adev, dst[i]);
367 migrate->dst[i] = 0;
368 svm_migrate_put_vram_page(adev, dst[i + 1]);
369 migrate->dst[i + 1] = 0;
370 svm_migrate_put_vram_page(adev, dst[i + 2]);
371 migrate->dst[i + 2] = 0;
372 svm_migrate_put_vram_page(adev, dst[i + 3]);
373 migrate->dst[i + 3] = 0;
374 }
375 #endif
376
377 return r;
378 }
379
380 static long
svm_migrate_vma_to_vram(struct kfd_node * node,struct svm_range * prange,struct vm_area_struct * vma,uint64_t start,uint64_t end,uint32_t trigger,uint64_t ttm_res_offset)381 svm_migrate_vma_to_vram(struct kfd_node *node, struct svm_range *prange,
382 struct vm_area_struct *vma, uint64_t start,
383 uint64_t end, uint32_t trigger, uint64_t ttm_res_offset)
384 {
385 struct kfd_process *p = container_of(prange->svms, struct kfd_process, svms);
386 uint64_t npages = (end - start) >> PAGE_SHIFT;
387 struct amdgpu_device *adev = node->adev;
388 struct kfd_process_device *pdd;
389 struct dma_fence *mfence = NULL;
390 struct migrate_vma migrate = { 0 };
391 unsigned long cpages = 0;
392 unsigned long mpages = 0;
393 dma_addr_t *scratch;
394 void *buf;
395 int r = -ENOMEM;
396
397 memset(&migrate, 0, sizeof(migrate));
398 migrate.vma = vma;
399 migrate.start = start;
400 migrate.end = end;
401 migrate.flags = MIGRATE_VMA_SELECT_SYSTEM;
402 migrate.pgmap_owner = SVM_ADEV_PGMAP_OWNER(adev);
403
404 buf = kvcalloc(npages,
405 2 * sizeof(*migrate.src) + sizeof(uint64_t) + sizeof(dma_addr_t),
406 GFP_KERNEL);
407 if (!buf)
408 goto out;
409
410 migrate.src = buf;
411 migrate.dst = migrate.src + npages;
412 scratch = (dma_addr_t *)(migrate.dst + npages);
413
414 kfd_smi_event_migration_start(node, p->lead_thread->pid,
415 start >> PAGE_SHIFT, end >> PAGE_SHIFT,
416 0, node->id, prange->prefetch_loc,
417 prange->preferred_loc, trigger);
418
419 r = migrate_vma_setup(&migrate);
420 if (r) {
421 dev_err(adev->dev, "%s: vma setup fail %d range [0x%lx 0x%lx]\n",
422 __func__, r, prange->start, prange->last);
423 goto out_free;
424 }
425
426 cpages = migrate.cpages;
427 if (!cpages) {
428 pr_debug("failed collect migrate sys pages [0x%lx 0x%lx]\n",
429 prange->start, prange->last);
430 goto out_free;
431 }
432 if (cpages != npages)
433 pr_debug("partial migration, 0x%lx/0x%llx pages collected\n",
434 cpages, npages);
435 else
436 pr_debug("0x%lx pages collected\n", cpages);
437
438 r = svm_migrate_copy_to_vram(node, prange, &migrate, &mfence, scratch, ttm_res_offset);
439 migrate_vma_pages(&migrate);
440
441 svm_migrate_copy_done(adev, mfence);
442 migrate_vma_finalize(&migrate);
443
444 mpages = cpages - svm_migrate_unsuccessful_pages(&migrate);
445 pr_debug("successful/cpages/npages 0x%lx/0x%lx/0x%lx\n",
446 mpages, cpages, migrate.npages);
447
448 svm_range_dma_unmap_dev(adev->dev, scratch, 0, npages);
449
450 out_free:
451 kvfree(buf);
452 kfd_smi_event_migration_end(node, p->lead_thread->pid,
453 start >> PAGE_SHIFT, end >> PAGE_SHIFT,
454 0, node->id, trigger, r);
455 out:
456 if (!r && mpages) {
457 pdd = svm_range_get_pdd_by_node(prange, node);
458 if (pdd)
459 WRITE_ONCE(pdd->page_in, pdd->page_in + mpages);
460
461 return mpages;
462 }
463 return r;
464 }
465
466 /**
467 * svm_migrate_ram_to_vram - migrate svm range from system to device
468 * @prange: range structure
469 * @best_loc: the device to migrate to
470 * @start_mgr: start page to migrate
471 * @last_mgr: last page to migrate
472 * @mm: the process mm structure
473 * @trigger: reason of migration
474 *
475 * Context: Process context, caller hold mmap read lock, svms lock, prange lock
476 *
477 * Return:
478 * 0 - OK, otherwise error code
479 */
480 static int
svm_migrate_ram_to_vram(struct svm_range * prange,uint32_t best_loc,unsigned long start_mgr,unsigned long last_mgr,struct mm_struct * mm,uint32_t trigger)481 svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc,
482 unsigned long start_mgr, unsigned long last_mgr,
483 struct mm_struct *mm, uint32_t trigger)
484 {
485 unsigned long addr, start, end;
486 struct vm_area_struct *vma;
487 uint64_t ttm_res_offset;
488 struct kfd_node *node;
489 unsigned long mpages = 0;
490 long r = 0;
491
492 if (start_mgr < prange->start || last_mgr > prange->last) {
493 pr_debug("range [0x%lx 0x%lx] out prange [0x%lx 0x%lx]\n",
494 start_mgr, last_mgr, prange->start, prange->last);
495 return -EFAULT;
496 }
497
498 node = svm_range_get_node_by_id(prange, best_loc);
499 if (!node) {
500 pr_debug("failed to get kfd node by id 0x%x\n", best_loc);
501 return -ENODEV;
502 }
503
504 pr_debug("svms 0x%p [0x%lx 0x%lx] in [0x%lx 0x%lx] to gpu 0x%x\n",
505 prange->svms, start_mgr, last_mgr, prange->start, prange->last,
506 best_loc);
507
508 start = start_mgr << PAGE_SHIFT;
509 end = (last_mgr + 1) << PAGE_SHIFT;
510
511 r = amdgpu_amdkfd_reserve_mem_limit(node->adev,
512 prange->npages * PAGE_SIZE,
513 KFD_IOC_ALLOC_MEM_FLAGS_VRAM,
514 node->xcp ? node->xcp->id : 0);
515 if (r) {
516 dev_dbg(node->adev->dev, "failed to reserve VRAM, r: %ld\n", r);
517 return -ENOSPC;
518 }
519
520 r = svm_range_vram_node_new(node, prange, true);
521 if (r) {
522 dev_dbg(node->adev->dev, "fail %ld to alloc vram\n", r);
523 goto out;
524 }
525 ttm_res_offset = (start_mgr - prange->start + prange->offset) << PAGE_SHIFT;
526
527 for (addr = start; addr < end;) {
528 unsigned long next;
529
530 vma = vma_lookup(mm, addr);
531 if (!vma)
532 break;
533
534 next = min(vma->vm_end, end);
535 r = svm_migrate_vma_to_vram(node, prange, vma, addr, next, trigger, ttm_res_offset);
536 if (r < 0) {
537 pr_debug("failed %ld to migrate\n", r);
538 break;
539 } else {
540 mpages += r;
541 }
542 ttm_res_offset += next - addr;
543 addr = next;
544 }
545
546 if (mpages) {
547 prange->actual_loc = best_loc;
548 prange->vram_pages += mpages;
549 } else if (!prange->actual_loc) {
550 /* if no page migrated and all pages from prange are at
551 * sys ram drop svm_bo got from svm_range_vram_node_new
552 */
553 svm_range_vram_node_free(prange);
554 }
555
556 out:
557 amdgpu_amdkfd_unreserve_mem_limit(node->adev,
558 prange->npages * PAGE_SIZE,
559 KFD_IOC_ALLOC_MEM_FLAGS_VRAM,
560 node->xcp ? node->xcp->id : 0);
561 return r < 0 ? r : 0;
562 }
563
svm_migrate_page_free(struct page * page)564 static void svm_migrate_page_free(struct page *page)
565 {
566 struct svm_range_bo *svm_bo = page->zone_device_data;
567
568 if (svm_bo) {
569 pr_debug_ratelimited("ref: %d\n", kref_read(&svm_bo->kref));
570 svm_range_bo_unref_async(svm_bo);
571 }
572 }
573
574 static int
svm_migrate_copy_to_ram(struct amdgpu_device * adev,struct svm_range * prange,struct migrate_vma * migrate,struct dma_fence ** mfence,dma_addr_t * scratch,uint64_t npages)575 svm_migrate_copy_to_ram(struct amdgpu_device *adev, struct svm_range *prange,
576 struct migrate_vma *migrate, struct dma_fence **mfence,
577 dma_addr_t *scratch, uint64_t npages)
578 {
579 struct device *dev = adev->dev;
580 uint64_t *src;
581 dma_addr_t *dst;
582 struct page *dpage;
583 uint64_t i = 0, j;
584 uint64_t addr;
585 int r = 0;
586
587 pr_debug("svms 0x%p [0x%lx 0x%lx]\n", prange->svms, prange->start,
588 prange->last);
589
590 addr = migrate->start;
591
592 src = (uint64_t *)(scratch + npages);
593 dst = scratch;
594
595 for (i = 0, j = 0; i < npages; i++, addr += PAGE_SIZE) {
596 struct page *spage;
597
598 spage = migrate_pfn_to_page(migrate->src[i]);
599 if (!spage || !is_zone_device_page(spage)) {
600 pr_debug("invalid page. Could be in CPU already svms 0x%p [0x%lx 0x%lx]\n",
601 prange->svms, prange->start, prange->last);
602 if (j) {
603 r = svm_migrate_copy_memory_gart(adev, dst + i - j,
604 src + i - j, j,
605 FROM_VRAM_TO_RAM,
606 mfence);
607 if (r)
608 goto out_oom;
609 j = 0;
610 }
611 continue;
612 }
613 src[i] = svm_migrate_addr(adev, spage);
614 if (j > 0 && src[i] != src[i - 1] + PAGE_SIZE) {
615 r = svm_migrate_copy_memory_gart(adev, dst + i - j,
616 src + i - j, j,
617 FROM_VRAM_TO_RAM,
618 mfence);
619 if (r)
620 goto out_oom;
621 j = 0;
622 }
623
624 dpage = svm_migrate_get_sys_page(migrate->vma, addr);
625 if (!dpage) {
626 pr_debug("failed get page svms 0x%p [0x%lx 0x%lx]\n",
627 prange->svms, prange->start, prange->last);
628 r = -ENOMEM;
629 goto out_oom;
630 }
631
632 dst[i] = dma_map_page(dev, dpage, 0, PAGE_SIZE, DMA_BIDIRECTIONAL);
633 r = dma_mapping_error(dev, dst[i]);
634 if (r) {
635 dev_err(adev->dev, "%s: fail %d dma_map_page\n", __func__, r);
636 goto out_oom;
637 }
638
639 pr_debug_ratelimited("dma mapping dst to 0x%llx, pfn 0x%lx\n",
640 dst[i] >> PAGE_SHIFT, page_to_pfn(dpage));
641
642 migrate->dst[i] = migrate_pfn(page_to_pfn(dpage));
643 j++;
644 }
645
646 r = svm_migrate_copy_memory_gart(adev, dst + i - j, src + i - j, j,
647 FROM_VRAM_TO_RAM, mfence);
648
649 out_oom:
650 if (r) {
651 pr_debug("failed %d copy to ram\n", r);
652 while (i--) {
653 svm_migrate_put_sys_page(dst[i]);
654 migrate->dst[i] = 0;
655 }
656 }
657
658 return r;
659 }
660
661 /**
662 * svm_migrate_vma_to_ram - migrate range inside one vma from device to system
663 *
664 * @prange: svm range structure
665 * @vma: vm_area_struct that range [start, end] belongs to
666 * @start: range start virtual address in pages
667 * @end: range end virtual address in pages
668 * @node: kfd node device to migrate from
669 * @trigger: reason of migration
670 * @fault_page: is from vmf->page, svm_migrate_to_ram(), this is CPU page fault callback
671 *
672 * Context: Process context, caller hold mmap read lock, prange->migrate_mutex
673 *
674 * Return:
675 * negative values - indicate error
676 * positive values or zero - number of pages got migrated
677 */
678 static long
svm_migrate_vma_to_ram(struct kfd_node * node,struct svm_range * prange,struct vm_area_struct * vma,uint64_t start,uint64_t end,uint32_t trigger,struct page * fault_page)679 svm_migrate_vma_to_ram(struct kfd_node *node, struct svm_range *prange,
680 struct vm_area_struct *vma, uint64_t start, uint64_t end,
681 uint32_t trigger, struct page *fault_page)
682 {
683 struct kfd_process *p = container_of(prange->svms, struct kfd_process, svms);
684 uint64_t npages = (end - start) >> PAGE_SHIFT;
685 unsigned long upages = npages;
686 unsigned long cpages = 0;
687 unsigned long mpages = 0;
688 struct amdgpu_device *adev = node->adev;
689 struct kfd_process_device *pdd;
690 struct dma_fence *mfence = NULL;
691 struct migrate_vma migrate = { 0 };
692 dma_addr_t *scratch;
693 void *buf;
694 int r = -ENOMEM;
695
696 memset(&migrate, 0, sizeof(migrate));
697 migrate.vma = vma;
698 migrate.start = start;
699 migrate.end = end;
700 migrate.pgmap_owner = SVM_ADEV_PGMAP_OWNER(adev);
701 if (adev->gmc.xgmi.connected_to_cpu)
702 migrate.flags = MIGRATE_VMA_SELECT_DEVICE_COHERENT;
703 else
704 migrate.flags = MIGRATE_VMA_SELECT_DEVICE_PRIVATE;
705
706 buf = kvcalloc(npages,
707 2 * sizeof(*migrate.src) + sizeof(uint64_t) + sizeof(dma_addr_t),
708 GFP_KERNEL);
709 if (!buf)
710 goto out;
711
712 migrate.src = buf;
713 migrate.dst = migrate.src + npages;
714 migrate.fault_page = fault_page;
715 scratch = (dma_addr_t *)(migrate.dst + npages);
716
717 kfd_smi_event_migration_start(node, p->lead_thread->pid,
718 start >> PAGE_SHIFT, end >> PAGE_SHIFT,
719 node->id, 0, prange->prefetch_loc,
720 prange->preferred_loc, trigger);
721
722 r = migrate_vma_setup(&migrate);
723 if (r) {
724 dev_err(adev->dev, "%s: vma setup fail %d range [0x%lx 0x%lx]\n",
725 __func__, r, prange->start, prange->last);
726 goto out_free;
727 }
728
729 cpages = migrate.cpages;
730 if (!cpages) {
731 pr_debug("failed collect migrate device pages [0x%lx 0x%lx]\n",
732 prange->start, prange->last);
733 upages = svm_migrate_unsuccessful_pages(&migrate);
734 goto out_free;
735 }
736 if (cpages != npages)
737 pr_debug("partial migration, 0x%lx/0x%llx pages collected\n",
738 cpages, npages);
739 else
740 pr_debug("0x%lx pages collected\n", cpages);
741
742 r = svm_migrate_copy_to_ram(adev, prange, &migrate, &mfence,
743 scratch, npages);
744 migrate_vma_pages(&migrate);
745
746 upages = svm_migrate_unsuccessful_pages(&migrate);
747 pr_debug("unsuccessful/cpages/npages 0x%lx/0x%lx/0x%lx\n",
748 upages, cpages, migrate.npages);
749
750 svm_migrate_copy_done(adev, mfence);
751 migrate_vma_finalize(&migrate);
752
753 svm_range_dma_unmap_dev(adev->dev, scratch, 0, npages);
754
755 out_free:
756 kvfree(buf);
757 kfd_smi_event_migration_end(node, p->lead_thread->pid,
758 start >> PAGE_SHIFT, end >> PAGE_SHIFT,
759 node->id, 0, trigger, r);
760 out:
761 if (!r && cpages) {
762 mpages = cpages - upages;
763 pdd = svm_range_get_pdd_by_node(prange, node);
764 if (pdd)
765 WRITE_ONCE(pdd->page_out, pdd->page_out + mpages);
766 }
767
768 return r ? r : mpages;
769 }
770
771 /**
772 * svm_migrate_vram_to_ram - migrate svm range from device to system
773 * @prange: range structure
774 * @mm: process mm, use current->mm if NULL
775 * @start_mgr: start page need be migrated to sys ram
776 * @last_mgr: last page need be migrated to sys ram
777 * @trigger: reason of migration
778 * @fault_page: is from vmf->page, svm_migrate_to_ram(), this is CPU page fault callback
779 *
780 * Context: Process context, caller hold mmap read lock, prange->migrate_mutex
781 *
782 * Return:
783 * 0 - OK, otherwise error code
784 */
svm_migrate_vram_to_ram(struct svm_range * prange,struct mm_struct * mm,unsigned long start_mgr,unsigned long last_mgr,uint32_t trigger,struct page * fault_page)785 int svm_migrate_vram_to_ram(struct svm_range *prange, struct mm_struct *mm,
786 unsigned long start_mgr, unsigned long last_mgr,
787 uint32_t trigger, struct page *fault_page)
788 {
789 struct kfd_node *node;
790 struct vm_area_struct *vma;
791 unsigned long addr;
792 unsigned long start;
793 unsigned long end;
794 unsigned long mpages = 0;
795 long r = 0;
796
797 /* this pragne has no any vram page to migrate to sys ram */
798 if (!prange->actual_loc) {
799 pr_debug("[0x%lx 0x%lx] already migrated to ram\n",
800 prange->start, prange->last);
801 return 0;
802 }
803
804 if (start_mgr < prange->start || last_mgr > prange->last) {
805 pr_debug("range [0x%lx 0x%lx] out prange [0x%lx 0x%lx]\n",
806 start_mgr, last_mgr, prange->start, prange->last);
807 return -EFAULT;
808 }
809
810 node = svm_range_get_node_by_id(prange, prange->actual_loc);
811 if (!node) {
812 pr_debug("failed to get kfd node by id 0x%x\n", prange->actual_loc);
813 return -ENODEV;
814 }
815 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx] from gpu 0x%x to ram\n",
816 prange->svms, prange, start_mgr, last_mgr,
817 prange->actual_loc);
818
819 start = start_mgr << PAGE_SHIFT;
820 end = (last_mgr + 1) << PAGE_SHIFT;
821
822 for (addr = start; addr < end;) {
823 unsigned long next;
824
825 vma = vma_lookup(mm, addr);
826 if (!vma) {
827 pr_debug("failed to find vma for prange %p\n", prange);
828 r = -EFAULT;
829 break;
830 }
831
832 next = min(vma->vm_end, end);
833 r = svm_migrate_vma_to_ram(node, prange, vma, addr, next, trigger,
834 fault_page);
835 if (r < 0) {
836 pr_debug("failed %ld to migrate prange %p\n", r, prange);
837 break;
838 } else {
839 mpages += r;
840 }
841 addr = next;
842 }
843
844 if (r >= 0) {
845 prange->vram_pages -= mpages;
846
847 /* prange does not have vram page set its actual_loc to system
848 * and drop its svm_bo ref
849 */
850 if (prange->vram_pages == 0 && prange->ttm_res) {
851 prange->actual_loc = 0;
852 svm_range_vram_node_free(prange);
853 }
854 }
855
856 return r < 0 ? r : 0;
857 }
858
859 /**
860 * svm_migrate_vram_to_vram - migrate svm range from device to device
861 * @prange: range structure
862 * @best_loc: the device to migrate to
863 * @start: start page need be migrated to sys ram
864 * @last: last page need be migrated to sys ram
865 * @mm: process mm, use current->mm if NULL
866 * @trigger: reason of migration
867 *
868 * Context: Process context, caller hold mmap read lock, svms lock, prange lock
869 *
870 * migrate all vram pages in prange to sys ram, then migrate
871 * [start, last] pages from sys ram to gpu node best_loc.
872 *
873 * Return:
874 * 0 - OK, otherwise error code
875 */
876 static int
svm_migrate_vram_to_vram(struct svm_range * prange,uint32_t best_loc,unsigned long start,unsigned long last,struct mm_struct * mm,uint32_t trigger)877 svm_migrate_vram_to_vram(struct svm_range *prange, uint32_t best_loc,
878 unsigned long start, unsigned long last,
879 struct mm_struct *mm, uint32_t trigger)
880 {
881 int r, retries = 3;
882
883 /*
884 * TODO: for both devices with PCIe large bar or on same xgmi hive, skip
885 * system memory as migration bridge
886 */
887
888 pr_debug("from gpu 0x%x to gpu 0x%x\n", prange->actual_loc, best_loc);
889
890 do {
891 r = svm_migrate_vram_to_ram(prange, mm, prange->start, prange->last,
892 trigger, NULL);
893 if (r)
894 return r;
895 } while (prange->actual_loc && --retries);
896
897 if (prange->actual_loc)
898 return -EDEADLK;
899
900 return svm_migrate_ram_to_vram(prange, best_loc, start, last, mm, trigger);
901 }
902
903 int
svm_migrate_to_vram(struct svm_range * prange,uint32_t best_loc,unsigned long start,unsigned long last,struct mm_struct * mm,uint32_t trigger)904 svm_migrate_to_vram(struct svm_range *prange, uint32_t best_loc,
905 unsigned long start, unsigned long last,
906 struct mm_struct *mm, uint32_t trigger)
907 {
908 if (!prange->actual_loc || prange->actual_loc == best_loc)
909 return svm_migrate_ram_to_vram(prange, best_loc, start, last,
910 mm, trigger);
911
912 else
913 return svm_migrate_vram_to_vram(prange, best_loc, start, last,
914 mm, trigger);
915
916 }
917
918 /**
919 * svm_migrate_to_ram - CPU page fault handler
920 * @vmf: CPU vm fault vma, address
921 *
922 * Context: vm fault handler, caller holds the mmap read lock
923 *
924 * Return:
925 * 0 - OK
926 * VM_FAULT_SIGBUS - notice application to have SIGBUS page fault
927 */
svm_migrate_to_ram(struct vm_fault * vmf)928 static vm_fault_t svm_migrate_to_ram(struct vm_fault *vmf)
929 {
930 unsigned long start, last, size;
931 unsigned long addr = vmf->address;
932 struct svm_range_bo *svm_bo;
933 struct svm_range *prange;
934 struct kfd_process *p;
935 struct mm_struct *mm;
936 int r = 0;
937
938 svm_bo = vmf->page->zone_device_data;
939 if (!svm_bo) {
940 pr_debug("failed get device page at addr 0x%lx\n", addr);
941 return VM_FAULT_SIGBUS;
942 }
943 if (!mmget_not_zero(svm_bo->eviction_fence->mm)) {
944 pr_debug("addr 0x%lx of process mm is destroyed\n", addr);
945 return VM_FAULT_SIGBUS;
946 }
947
948 mm = svm_bo->eviction_fence->mm;
949 if (mm != vmf->vma->vm_mm)
950 pr_debug("addr 0x%lx is COW mapping in child process\n", addr);
951
952 p = kfd_lookup_process_by_mm(mm);
953 if (!p) {
954 pr_debug("failed find process at fault address 0x%lx\n", addr);
955 r = VM_FAULT_SIGBUS;
956 goto out_mmput;
957 }
958 if (READ_ONCE(p->svms.faulting_task) == current) {
959 pr_debug("skipping ram migration\n");
960 r = 0;
961 goto out_unref_process;
962 }
963
964 pr_debug("CPU page fault svms 0x%p address 0x%lx\n", &p->svms, addr);
965 addr >>= PAGE_SHIFT;
966
967 mutex_lock(&p->svms.lock);
968
969 prange = svm_range_from_addr(&p->svms, addr, NULL);
970 if (!prange) {
971 pr_debug("failed get range svms 0x%p addr 0x%lx\n", &p->svms, addr);
972 r = -EFAULT;
973 goto out_unlock_svms;
974 }
975
976 mutex_lock(&prange->migrate_mutex);
977
978 if (!prange->actual_loc)
979 goto out_unlock_prange;
980
981 /* Align migration range start and size to granularity size */
982 size = 1UL << prange->granularity;
983 start = max(ALIGN_DOWN(addr, size), prange->start);
984 last = min(ALIGN(addr + 1, size) - 1, prange->last);
985
986 r = svm_migrate_vram_to_ram(prange, vmf->vma->vm_mm, start, last,
987 KFD_MIGRATE_TRIGGER_PAGEFAULT_CPU, vmf->page);
988 if (r)
989 pr_debug("failed %d migrate svms 0x%p range 0x%p [0x%lx 0x%lx]\n",
990 r, prange->svms, prange, start, last);
991
992 out_unlock_prange:
993 mutex_unlock(&prange->migrate_mutex);
994 out_unlock_svms:
995 mutex_unlock(&p->svms.lock);
996 out_unref_process:
997 pr_debug("CPU fault svms 0x%p address 0x%lx done\n", &p->svms, addr);
998 kfd_unref_process(p);
999 out_mmput:
1000 mmput(mm);
1001 return r ? VM_FAULT_SIGBUS : 0;
1002 }
1003
1004 static const struct dev_pagemap_ops svm_migrate_pgmap_ops = {
1005 .page_free = svm_migrate_page_free,
1006 .migrate_to_ram = svm_migrate_to_ram,
1007 };
1008
1009 /* Each VRAM page uses sizeof(struct page) on system memory */
1010 #define SVM_HMM_PAGE_STRUCT_SIZE(size) ((size)/PAGE_SIZE * sizeof(struct page))
1011
kgd2kfd_init_zone_device(struct amdgpu_device * adev)1012 int kgd2kfd_init_zone_device(struct amdgpu_device *adev)
1013 {
1014 struct amdgpu_kfd_dev *kfddev = &adev->kfd;
1015 struct dev_pagemap *pgmap;
1016 struct resource *res = NULL;
1017 unsigned long size;
1018 void *r;
1019
1020 /* Page migration works on gfx9 or newer */
1021 if (amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(9, 0, 1))
1022 return -EINVAL;
1023
1024 if (adev->flags & AMD_IS_APU)
1025 return 0;
1026
1027 pgmap = &kfddev->pgmap;
1028 memset(pgmap, 0, sizeof(*pgmap));
1029
1030 /* TODO: register all vram to HMM for now.
1031 * should remove reserved size
1032 */
1033 size = ALIGN(adev->gmc.real_vram_size, 2ULL << 20);
1034 if (adev->gmc.xgmi.connected_to_cpu) {
1035 pgmap->range.start = adev->gmc.aper_base;
1036 pgmap->range.end = adev->gmc.aper_base + adev->gmc.aper_size - 1;
1037 pgmap->type = MEMORY_DEVICE_COHERENT;
1038 } else {
1039 res = devm_request_free_mem_region(adev->dev, &iomem_resource, size);
1040 if (IS_ERR(res))
1041 return PTR_ERR(res);
1042 pgmap->range.start = res->start;
1043 pgmap->range.end = res->end;
1044 pgmap->type = MEMORY_DEVICE_PRIVATE;
1045 }
1046
1047 pgmap->nr_range = 1;
1048 pgmap->ops = &svm_migrate_pgmap_ops;
1049 pgmap->owner = SVM_ADEV_PGMAP_OWNER(adev);
1050 pgmap->flags = 0;
1051 /* Device manager releases device-specific resources, memory region and
1052 * pgmap when driver disconnects from device.
1053 */
1054 r = devm_memremap_pages(adev->dev, pgmap);
1055 if (IS_ERR(r)) {
1056 pr_err("failed to register HMM device memory\n");
1057 if (pgmap->type == MEMORY_DEVICE_PRIVATE)
1058 devm_release_mem_region(adev->dev, res->start, resource_size(res));
1059 /* Disable SVM support capability */
1060 pgmap->type = 0;
1061 return PTR_ERR(r);
1062 }
1063
1064 pr_debug("reserve %ldMB system memory for VRAM pages struct\n",
1065 SVM_HMM_PAGE_STRUCT_SIZE(size) >> 20);
1066
1067 amdgpu_amdkfd_reserve_system_mem(SVM_HMM_PAGE_STRUCT_SIZE(size));
1068
1069 pr_info("HMM registered %ldMB device memory\n", size >> 20);
1070
1071 return 0;
1072 }
1073