xref: /linux/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c (revision 07fdad3a93756b872da7b53647715c48d0f4a2d0)
1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /*
3  * Copyright 2020-2021 Advanced Micro Devices, Inc.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21  * OTHER DEALINGS IN THE SOFTWARE.
22  */
23 #include <linux/types.h>
24 #include <linux/hmm.h>
25 #include <linux/dma-direction.h>
26 #include <linux/dma-mapping.h>
27 #include <linux/migrate.h>
28 #include "amdgpu_sync.h"
29 #include "amdgpu_object.h"
30 #include "amdgpu_vm.h"
31 #include "amdgpu_res_cursor.h"
32 #include "kfd_priv.h"
33 #include "kfd_svm.h"
34 #include "kfd_migrate.h"
35 #include "kfd_smi_events.h"
36 
37 #ifdef dev_fmt
38 #undef dev_fmt
39 #endif
40 #define dev_fmt(fmt) "kfd_migrate: " fmt
41 
42 static u64
43 svm_migrate_direct_mapping_addr(struct amdgpu_device *adev, u64 addr)
44 {
45 	return addr + amdgpu_ttm_domain_start(adev, TTM_PL_VRAM);
46 }
47 
48 static int
49 svm_migrate_gart_map(struct amdgpu_ring *ring, u64 npages,
50 		     dma_addr_t *addr, u64 *gart_addr, u64 flags)
51 {
52 	struct amdgpu_device *adev = ring->adev;
53 	struct amdgpu_job *job;
54 	unsigned int num_dw, num_bytes;
55 	struct dma_fence *fence;
56 	u64 src_addr, dst_addr;
57 	u64 pte_flags;
58 	void *cpu_addr;
59 	int r;
60 
61 	/* use gart window 0 */
62 	*gart_addr = adev->gmc.gart_start;
63 
64 	num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8);
65 	num_bytes = npages * 8;
66 
67 	r = amdgpu_job_alloc_with_ib(adev, &adev->mman.high_pr,
68 				     AMDGPU_FENCE_OWNER_UNDEFINED,
69 				     num_dw * 4 + num_bytes,
70 				     AMDGPU_IB_POOL_DELAYED,
71 				     &job,
72 				     AMDGPU_KERNEL_JOB_ID_KFD_GART_MAP);
73 	if (r)
74 		return r;
75 
76 	src_addr = num_dw * 4;
77 	src_addr += job->ibs[0].gpu_addr;
78 
79 	dst_addr = amdgpu_bo_gpu_offset(adev->gart.bo);
80 	amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_addr,
81 				dst_addr, num_bytes, 0);
82 
83 	amdgpu_ring_pad_ib(ring, &job->ibs[0]);
84 	WARN_ON(job->ibs[0].length_dw > num_dw);
85 
86 	pte_flags = AMDGPU_PTE_VALID | AMDGPU_PTE_READABLE;
87 	pte_flags |= AMDGPU_PTE_SYSTEM | AMDGPU_PTE_SNOOPED;
88 	if (!(flags & KFD_IOCTL_SVM_FLAG_GPU_RO))
89 		pte_flags |= AMDGPU_PTE_WRITEABLE;
90 	pte_flags |= adev->gart.gart_pte_flags;
91 
92 	cpu_addr = &job->ibs[0].ptr[num_dw];
93 
94 	amdgpu_gart_map(adev, 0, npages, addr, pte_flags, cpu_addr);
95 	fence = amdgpu_job_submit(job);
96 	dma_fence_put(fence);
97 
98 	return r;
99 }
100 
101 /**
102  * svm_migrate_copy_memory_gart - sdma copy data between ram and vram
103  *
104  * @adev: amdgpu device the sdma ring running
105  * @sys: system DMA pointer to be copied
106  * @vram: vram destination DMA pointer
107  * @npages: number of pages to copy
108  * @direction: enum MIGRATION_COPY_DIR
109  * @mfence: output, sdma fence to signal after sdma is done
110  *
111  * ram address uses GART table continuous entries mapping to ram pages,
112  * vram address uses direct mapping of vram pages, which must have npages
113  * number of continuous pages.
114  * GART update and sdma uses same buf copy function ring, sdma is splited to
115  * multiple GTT_MAX_PAGES transfer, all sdma operations are serialized, wait for
116  * the last sdma finish fence which is returned to check copy memory is done.
117  *
118  * Context: Process context, takes and releases gtt_window_lock
119  *
120  * Return:
121  * 0 - OK, otherwise error code
122  */
123 
124 static int
125 svm_migrate_copy_memory_gart(struct amdgpu_device *adev, dma_addr_t *sys,
126 			     u64 *vram, u64 npages,
127 			     enum MIGRATION_COPY_DIR direction,
128 			     struct dma_fence **mfence)
129 {
130 	const u64 GTT_MAX_PAGES = AMDGPU_GTT_MAX_TRANSFER_SIZE;
131 	struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
132 	u64 gart_s, gart_d;
133 	struct dma_fence *next;
134 	u64 size;
135 	int r;
136 
137 	mutex_lock(&adev->mman.gtt_window_lock);
138 
139 	while (npages) {
140 		size = min(GTT_MAX_PAGES, npages);
141 
142 		if (direction == FROM_VRAM_TO_RAM) {
143 			gart_s = svm_migrate_direct_mapping_addr(adev, *vram);
144 			r = svm_migrate_gart_map(ring, size, sys, &gart_d, 0);
145 
146 		} else if (direction == FROM_RAM_TO_VRAM) {
147 			r = svm_migrate_gart_map(ring, size, sys, &gart_s,
148 						 KFD_IOCTL_SVM_FLAG_GPU_RO);
149 			gart_d = svm_migrate_direct_mapping_addr(adev, *vram);
150 		}
151 		if (r) {
152 			dev_err(adev->dev, "fail %d create gart mapping\n", r);
153 			goto out_unlock;
154 		}
155 
156 		r = amdgpu_copy_buffer(ring, gart_s, gart_d, size * PAGE_SIZE,
157 				       NULL, &next, false, true, 0);
158 		if (r) {
159 			dev_err(adev->dev, "fail %d to copy memory\n", r);
160 			goto out_unlock;
161 		}
162 
163 		dma_fence_put(*mfence);
164 		*mfence = next;
165 		npages -= size;
166 		if (npages) {
167 			sys += size;
168 			vram += size;
169 		}
170 	}
171 
172 out_unlock:
173 	mutex_unlock(&adev->mman.gtt_window_lock);
174 
175 	return r;
176 }
177 
178 /**
179  * svm_migrate_copy_done - wait for memory copy sdma is done
180  *
181  * @adev: amdgpu device the sdma memory copy is executing on
182  * @mfence: migrate fence
183  *
184  * Wait for dma fence is signaled, if the copy ssplit into multiple sdma
185  * operations, this is the last sdma operation fence.
186  *
187  * Context: called after svm_migrate_copy_memory
188  *
189  * Return:
190  * 0		- success
191  * otherwise	- error code from dma fence signal
192  */
193 static int
194 svm_migrate_copy_done(struct amdgpu_device *adev, struct dma_fence *mfence)
195 {
196 	int r = 0;
197 
198 	if (mfence) {
199 		r = dma_fence_wait(mfence, false);
200 		dma_fence_put(mfence);
201 		pr_debug("sdma copy memory fence done\n");
202 	}
203 
204 	return r;
205 }
206 
207 unsigned long
208 svm_migrate_addr_to_pfn(struct amdgpu_device *adev, unsigned long addr)
209 {
210 	return (addr + adev->kfd.pgmap.range.start) >> PAGE_SHIFT;
211 }
212 
213 static void
214 svm_migrate_get_vram_page(struct svm_range *prange, unsigned long pfn)
215 {
216 	struct page *page;
217 
218 	page = pfn_to_page(pfn);
219 	svm_range_bo_ref(prange->svm_bo);
220 	page->zone_device_data = prange->svm_bo;
221 	zone_device_page_init(page);
222 }
223 
224 static void
225 svm_migrate_put_vram_page(struct amdgpu_device *adev, unsigned long addr)
226 {
227 	struct page *page;
228 
229 	page = pfn_to_page(svm_migrate_addr_to_pfn(adev, addr));
230 	unlock_page(page);
231 	put_page(page);
232 }
233 
234 static unsigned long
235 svm_migrate_addr(struct amdgpu_device *adev, struct page *page)
236 {
237 	unsigned long addr;
238 
239 	addr = page_to_pfn(page) << PAGE_SHIFT;
240 	return (addr - adev->kfd.pgmap.range.start);
241 }
242 
243 static struct page *
244 svm_migrate_get_sys_page(struct vm_area_struct *vma, unsigned long addr)
245 {
246 	struct page *page;
247 
248 	page = alloc_page_vma(GFP_HIGHUSER, vma, addr);
249 	if (page)
250 		lock_page(page);
251 
252 	return page;
253 }
254 
255 static void svm_migrate_put_sys_page(unsigned long addr)
256 {
257 	struct page *page;
258 
259 	page = pfn_to_page(addr >> PAGE_SHIFT);
260 	unlock_page(page);
261 	put_page(page);
262 }
263 
264 static unsigned long svm_migrate_successful_pages(struct migrate_vma *migrate)
265 {
266 	unsigned long mpages = 0;
267 	unsigned long i;
268 
269 	for (i = 0; i < migrate->npages; i++) {
270 		if (migrate->dst[i] & MIGRATE_PFN_VALID &&
271 		    migrate->src[i] & MIGRATE_PFN_MIGRATE)
272 			mpages++;
273 	}
274 	return mpages;
275 }
276 
277 static int
278 svm_migrate_copy_to_vram(struct kfd_node *node, struct svm_range *prange,
279 			 struct migrate_vma *migrate, struct dma_fence **mfence,
280 			 dma_addr_t *scratch, u64 ttm_res_offset)
281 {
282 	u64 npages = migrate->npages;
283 	struct amdgpu_device *adev = node->adev;
284 	struct device *dev = adev->dev;
285 	struct amdgpu_res_cursor cursor;
286 	u64 mpages = 0;
287 	dma_addr_t *src;
288 	u64 *dst;
289 	u64 i, j;
290 	int r;
291 
292 	pr_debug("svms 0x%p [0x%lx 0x%lx 0x%llx]\n", prange->svms, prange->start,
293 		 prange->last, ttm_res_offset);
294 
295 	src = scratch;
296 	dst = (u64 *)(scratch + npages);
297 
298 	amdgpu_res_first(prange->ttm_res, ttm_res_offset,
299 			 npages << PAGE_SHIFT, &cursor);
300 	for (i = j = 0; (i < npages) && (mpages < migrate->cpages); i++) {
301 		struct page *spage;
302 
303 		if (migrate->src[i] & MIGRATE_PFN_MIGRATE) {
304 			dst[i] = cursor.start + (j << PAGE_SHIFT);
305 			migrate->dst[i] = svm_migrate_addr_to_pfn(adev, dst[i]);
306 			svm_migrate_get_vram_page(prange, migrate->dst[i]);
307 			migrate->dst[i] = migrate_pfn(migrate->dst[i]);
308 			mpages++;
309 		}
310 		spage = migrate_pfn_to_page(migrate->src[i]);
311 		if (spage && !is_zone_device_page(spage)) {
312 			src[i] = dma_map_page(dev, spage, 0, PAGE_SIZE,
313 					      DMA_BIDIRECTIONAL);
314 			r = dma_mapping_error(dev, src[i]);
315 			if (r) {
316 				dev_err(dev, "%s: fail %d dma_map_page\n",
317 					__func__, r);
318 				goto out_free_vram_pages;
319 			}
320 		} else {
321 			if (j) {
322 				r = svm_migrate_copy_memory_gart(
323 						adev, src + i - j,
324 						dst + i - j, j,
325 						FROM_RAM_TO_VRAM,
326 						mfence);
327 				if (r)
328 					goto out_free_vram_pages;
329 				amdgpu_res_next(&cursor, (j + 1) << PAGE_SHIFT);
330 				j = 0;
331 			} else {
332 				amdgpu_res_next(&cursor, PAGE_SIZE);
333 			}
334 			continue;
335 		}
336 
337 		pr_debug_ratelimited("dma mapping src to 0x%llx, pfn 0x%lx\n",
338 				     src[i] >> PAGE_SHIFT, page_to_pfn(spage));
339 
340 		if (j >= (cursor.size >> PAGE_SHIFT) - 1 && i < npages - 1) {
341 			r = svm_migrate_copy_memory_gart(adev, src + i - j,
342 							 dst + i - j, j + 1,
343 							 FROM_RAM_TO_VRAM,
344 							 mfence);
345 			if (r)
346 				goto out_free_vram_pages;
347 			amdgpu_res_next(&cursor, (j + 1) * PAGE_SIZE);
348 			j = 0;
349 		} else {
350 			j++;
351 		}
352 	}
353 
354 	r = svm_migrate_copy_memory_gart(adev, src + i - j, dst + i - j, j,
355 					 FROM_RAM_TO_VRAM, mfence);
356 
357 out_free_vram_pages:
358 	if (r) {
359 		pr_debug("failed %d to copy memory to vram\n", r);
360 		for (i = 0; i < npages && mpages; i++) {
361 			if (!dst[i])
362 				continue;
363 			svm_migrate_put_vram_page(adev, dst[i]);
364 			migrate->dst[i] = 0;
365 			mpages--;
366 		}
367 	}
368 
369 #ifdef DEBUG_FORCE_MIXED_DOMAINS
370 	for (i = 0, j = 0; i < npages; i += 4, j++) {
371 		if (j & 1)
372 			continue;
373 		svm_migrate_put_vram_page(adev, dst[i]);
374 		migrate->dst[i] = 0;
375 		svm_migrate_put_vram_page(adev, dst[i + 1]);
376 		migrate->dst[i + 1] = 0;
377 		svm_migrate_put_vram_page(adev, dst[i + 2]);
378 		migrate->dst[i + 2] = 0;
379 		svm_migrate_put_vram_page(adev, dst[i + 3]);
380 		migrate->dst[i + 3] = 0;
381 	}
382 #endif
383 
384 	return r;
385 }
386 
387 static long
388 svm_migrate_vma_to_vram(struct kfd_node *node, struct svm_range *prange,
389 			struct vm_area_struct *vma, u64 start,
390 			u64 end, uint32_t trigger, u64 ttm_res_offset)
391 {
392 	struct kfd_process *p = container_of(prange->svms, struct kfd_process, svms);
393 	u64 npages = (end - start) >> PAGE_SHIFT;
394 	struct amdgpu_device *adev = node->adev;
395 	struct kfd_process_device *pdd;
396 	struct dma_fence *mfence = NULL;
397 	struct migrate_vma migrate = { 0 };
398 	unsigned long cpages = 0;
399 	unsigned long mpages = 0;
400 	dma_addr_t *scratch;
401 	void *buf;
402 	int r = -ENOMEM;
403 
404 	memset(&migrate, 0, sizeof(migrate));
405 	migrate.vma = vma;
406 	migrate.start = start;
407 	migrate.end = end;
408 	migrate.flags = MIGRATE_VMA_SELECT_SYSTEM;
409 	migrate.pgmap_owner = SVM_ADEV_PGMAP_OWNER(adev);
410 
411 	buf = kvcalloc(npages,
412 		       2 * sizeof(*migrate.src) + sizeof(u64) + sizeof(dma_addr_t),
413 		       GFP_KERNEL);
414 	if (!buf)
415 		goto out;
416 
417 	migrate.src = buf;
418 	migrate.dst = migrate.src + npages;
419 	scratch = (dma_addr_t *)(migrate.dst + npages);
420 
421 	kfd_smi_event_migration_start(node, p->lead_thread->pid,
422 				      start >> PAGE_SHIFT, end >> PAGE_SHIFT,
423 				      0, node->id, prange->prefetch_loc,
424 				      prange->preferred_loc, trigger);
425 
426 	r = migrate_vma_setup(&migrate);
427 	if (r) {
428 		dev_err(adev->dev, "%s: vma setup fail %d range [0x%lx 0x%lx]\n",
429 			__func__, r, prange->start, prange->last);
430 		goto out_free;
431 	}
432 
433 	cpages = migrate.cpages;
434 	if (!cpages) {
435 		pr_debug("failed collect migrate sys pages [0x%lx 0x%lx]\n",
436 			 prange->start, prange->last);
437 		goto out_free;
438 	}
439 	if (cpages != npages)
440 		pr_debug("partial migration, 0x%lx/0x%llx pages collected\n",
441 			 cpages, npages);
442 	else
443 		pr_debug("0x%lx pages collected\n", cpages);
444 
445 	r = svm_migrate_copy_to_vram(node, prange, &migrate, &mfence, scratch, ttm_res_offset);
446 	migrate_vma_pages(&migrate);
447 
448 	svm_migrate_copy_done(adev, mfence);
449 	migrate_vma_finalize(&migrate);
450 
451 	mpages = svm_migrate_successful_pages(&migrate);
452 	pr_debug("migrated/collected/requested 0x%lx/0x%lx/0x%lx\n",
453 		 mpages, cpages, migrate.npages);
454 
455 	svm_range_dma_unmap_dev(adev->dev, scratch, 0, npages);
456 
457 out_free:
458 	kvfree(buf);
459 	kfd_smi_event_migration_end(node, p->lead_thread->pid,
460 				    start >> PAGE_SHIFT, end >> PAGE_SHIFT,
461 				    0, node->id, trigger, r);
462 out:
463 	if (!r && mpages) {
464 		pdd = svm_range_get_pdd_by_node(prange, node);
465 		if (pdd)
466 			WRITE_ONCE(pdd->page_in, pdd->page_in + mpages);
467 
468 		return mpages;
469 	}
470 	return r;
471 }
472 
473 /**
474  * svm_migrate_ram_to_vram - migrate svm range from system to device
475  * @prange: range structure
476  * @best_loc: the device to migrate to
477  * @start_mgr: start page to migrate
478  * @last_mgr: last page to migrate
479  * @mm: the process mm structure
480  * @trigger: reason of migration
481  *
482  * Context: Process context, caller hold mmap read lock, svms lock, prange lock
483  *
484  * Return:
485  * 0 - OK, otherwise error code
486  */
487 static int
488 svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc,
489 			unsigned long start_mgr, unsigned long last_mgr,
490 			struct mm_struct *mm, uint32_t trigger)
491 {
492 	unsigned long addr, start, end;
493 	struct vm_area_struct *vma;
494 	u64 ttm_res_offset;
495 	struct kfd_node *node;
496 	unsigned long mpages = 0;
497 	long r = 0;
498 
499 	if (start_mgr < prange->start || last_mgr > prange->last) {
500 		pr_debug("range [0x%lx 0x%lx] out prange [0x%lx 0x%lx]\n",
501 			 start_mgr, last_mgr, prange->start, prange->last);
502 		return -EFAULT;
503 	}
504 
505 	node = svm_range_get_node_by_id(prange, best_loc);
506 	if (!node) {
507 		pr_debug("failed to get kfd node by id 0x%x\n", best_loc);
508 		return -ENODEV;
509 	}
510 
511 	pr_debug("svms 0x%p [0x%lx 0x%lx] in [0x%lx 0x%lx] to gpu 0x%x\n",
512 		prange->svms, start_mgr, last_mgr, prange->start, prange->last,
513 		best_loc);
514 
515 	start = start_mgr << PAGE_SHIFT;
516 	end = (last_mgr + 1) << PAGE_SHIFT;
517 
518 	r = amdgpu_amdkfd_reserve_mem_limit(node->adev,
519 					prange->npages * PAGE_SIZE,
520 					KFD_IOC_ALLOC_MEM_FLAGS_VRAM,
521 					node->xcp ? node->xcp->id : 0);
522 	if (r) {
523 		dev_dbg(node->adev->dev, "failed to reserve VRAM, r: %ld\n", r);
524 		return -ENOSPC;
525 	}
526 
527 	r = svm_range_vram_node_new(node, prange, true);
528 	if (r) {
529 		dev_dbg(node->adev->dev, "fail %ld to alloc vram\n", r);
530 		goto out;
531 	}
532 	ttm_res_offset = (start_mgr - prange->start + prange->offset) << PAGE_SHIFT;
533 
534 	for (addr = start; addr < end;) {
535 		unsigned long next;
536 
537 		vma = vma_lookup(mm, addr);
538 		if (!vma)
539 			break;
540 
541 		next = min(vma->vm_end, end);
542 		r = svm_migrate_vma_to_vram(node, prange, vma, addr, next, trigger, ttm_res_offset);
543 		if (r < 0) {
544 			pr_debug("failed %ld to migrate\n", r);
545 			break;
546 		} else {
547 			mpages += r;
548 		}
549 		ttm_res_offset += next - addr;
550 		addr = next;
551 	}
552 
553 	if (mpages) {
554 		prange->actual_loc = best_loc;
555 		prange->vram_pages += mpages;
556 	} else if (!prange->actual_loc) {
557 		/* if no page migrated and all pages from prange are at
558 		 * sys ram drop svm_bo got from svm_range_vram_node_new
559 		 */
560 		svm_range_vram_node_free(prange);
561 	}
562 
563 out:
564 	amdgpu_amdkfd_unreserve_mem_limit(node->adev,
565 					prange->npages * PAGE_SIZE,
566 					KFD_IOC_ALLOC_MEM_FLAGS_VRAM,
567 					node->xcp ? node->xcp->id : 0);
568 	return r < 0 ? r : 0;
569 }
570 
571 static void svm_migrate_page_free(struct page *page)
572 {
573 	struct svm_range_bo *svm_bo = page->zone_device_data;
574 
575 	if (svm_bo) {
576 		pr_debug_ratelimited("ref: %d\n", kref_read(&svm_bo->kref));
577 		svm_range_bo_unref_async(svm_bo);
578 	}
579 }
580 
581 static int
582 svm_migrate_copy_to_ram(struct amdgpu_device *adev, struct svm_range *prange,
583 			struct migrate_vma *migrate, struct dma_fence **mfence,
584 			dma_addr_t *scratch, u64 npages)
585 {
586 	struct device *dev = adev->dev;
587 	u64 *src;
588 	dma_addr_t *dst;
589 	struct page *dpage;
590 	u64 i = 0, j;
591 	u64 addr;
592 	int r = 0;
593 
594 	pr_debug("svms 0x%p [0x%lx 0x%lx]\n", prange->svms, prange->start,
595 		 prange->last);
596 
597 	addr = migrate->start;
598 
599 	src = (u64 *)(scratch + npages);
600 	dst = scratch;
601 
602 	for (i = 0, j = 0; i < npages; i++, addr += PAGE_SIZE) {
603 		struct page *spage;
604 
605 		spage = migrate_pfn_to_page(migrate->src[i]);
606 		if (!spage || !is_zone_device_page(spage)) {
607 			pr_debug("invalid page. Could be in CPU already svms 0x%p [0x%lx 0x%lx]\n",
608 				 prange->svms, prange->start, prange->last);
609 			if (j) {
610 				r = svm_migrate_copy_memory_gart(adev, dst + i - j,
611 								 src + i - j, j,
612 								 FROM_VRAM_TO_RAM,
613 								 mfence);
614 				if (r)
615 					goto out_oom;
616 				j = 0;
617 			}
618 			continue;
619 		}
620 		src[i] = svm_migrate_addr(adev, spage);
621 		if (j > 0 && src[i] != src[i - 1] + PAGE_SIZE) {
622 			r = svm_migrate_copy_memory_gart(adev, dst + i - j,
623 							 src + i - j, j,
624 							 FROM_VRAM_TO_RAM,
625 							 mfence);
626 			if (r)
627 				goto out_oom;
628 			j = 0;
629 		}
630 
631 		dpage = svm_migrate_get_sys_page(migrate->vma, addr);
632 		if (!dpage) {
633 			pr_debug("failed get page svms 0x%p [0x%lx 0x%lx]\n",
634 				 prange->svms, prange->start, prange->last);
635 			r = -ENOMEM;
636 			goto out_oom;
637 		}
638 
639 		dst[i] = dma_map_page(dev, dpage, 0, PAGE_SIZE, DMA_BIDIRECTIONAL);
640 		r = dma_mapping_error(dev, dst[i]);
641 		if (r) {
642 			dev_err(adev->dev, "%s: fail %d dma_map_page\n", __func__, r);
643 			goto out_oom;
644 		}
645 
646 		pr_debug_ratelimited("dma mapping dst to 0x%llx, pfn 0x%lx\n",
647 				     dst[i] >> PAGE_SHIFT, page_to_pfn(dpage));
648 
649 		migrate->dst[i] = migrate_pfn(page_to_pfn(dpage));
650 		j++;
651 	}
652 
653 	r = svm_migrate_copy_memory_gart(adev, dst + i - j, src + i - j, j,
654 					 FROM_VRAM_TO_RAM, mfence);
655 
656 out_oom:
657 	if (r) {
658 		pr_debug("failed %d copy to ram\n", r);
659 		while (i--) {
660 			svm_migrate_put_sys_page(dst[i]);
661 			migrate->dst[i] = 0;
662 		}
663 	}
664 
665 	return r;
666 }
667 
668 /**
669  * svm_migrate_vma_to_ram - migrate range inside one vma from device to system
670  *
671  * @prange: svm range structure
672  * @vma: vm_area_struct that range [start, end] belongs to
673  * @start: range start virtual address in pages
674  * @end: range end virtual address in pages
675  * @node: kfd node device to migrate from
676  * @trigger: reason of migration
677  * @fault_page: is from vmf->page, svm_migrate_to_ram(), this is CPU page fault callback
678  *
679  * Context: Process context, caller hold mmap read lock, prange->migrate_mutex
680  *
681  * Return:
682  *   negative values - indicate error
683  *   positive values or zero - number of pages got migrated
684  */
685 static long
686 svm_migrate_vma_to_ram(struct kfd_node *node, struct svm_range *prange,
687 		       struct vm_area_struct *vma, u64 start, u64 end,
688 		       uint32_t trigger, struct page *fault_page)
689 {
690 	struct kfd_process *p = container_of(prange->svms, struct kfd_process, svms);
691 	u64 npages = (end - start) >> PAGE_SHIFT;
692 	unsigned long cpages = 0;
693 	unsigned long mpages = 0;
694 	struct amdgpu_device *adev = node->adev;
695 	struct kfd_process_device *pdd;
696 	struct dma_fence *mfence = NULL;
697 	struct migrate_vma migrate = { 0 };
698 	dma_addr_t *scratch;
699 	void *buf;
700 	int r = -ENOMEM;
701 
702 	memset(&migrate, 0, sizeof(migrate));
703 	migrate.vma = vma;
704 	migrate.start = start;
705 	migrate.end = end;
706 	migrate.pgmap_owner = SVM_ADEV_PGMAP_OWNER(adev);
707 	if (adev->gmc.xgmi.connected_to_cpu)
708 		migrate.flags = MIGRATE_VMA_SELECT_DEVICE_COHERENT;
709 	else
710 		migrate.flags = MIGRATE_VMA_SELECT_DEVICE_PRIVATE;
711 
712 	buf = kvcalloc(npages,
713 		       2 * sizeof(*migrate.src) + sizeof(u64) + sizeof(dma_addr_t),
714 		       GFP_KERNEL);
715 	if (!buf)
716 		goto out;
717 
718 	migrate.src = buf;
719 	migrate.dst = migrate.src + npages;
720 	migrate.fault_page = fault_page;
721 	scratch = (dma_addr_t *)(migrate.dst + npages);
722 
723 	kfd_smi_event_migration_start(node, p->lead_thread->pid,
724 				      start >> PAGE_SHIFT, end >> PAGE_SHIFT,
725 				      node->id, 0, prange->prefetch_loc,
726 				      prange->preferred_loc, trigger);
727 
728 	r = migrate_vma_setup(&migrate);
729 	if (r) {
730 		dev_err(adev->dev, "%s: vma setup fail %d range [0x%lx 0x%lx]\n",
731 			__func__, r, prange->start, prange->last);
732 		goto out_free;
733 	}
734 
735 	cpages = migrate.cpages;
736 	if (!cpages) {
737 		pr_debug("failed collect migrate device pages [0x%lx 0x%lx]\n",
738 			 prange->start, prange->last);
739 		goto out_free;
740 	}
741 	if (cpages != npages)
742 		pr_debug("partial migration, 0x%lx/0x%llx pages collected\n",
743 			 cpages, npages);
744 	else
745 		pr_debug("0x%lx pages collected\n", cpages);
746 
747 	r = svm_migrate_copy_to_ram(adev, prange, &migrate, &mfence,
748 				    scratch, npages);
749 	migrate_vma_pages(&migrate);
750 
751 	mpages = svm_migrate_successful_pages(&migrate);
752 	pr_debug("migrated/collected/requested 0x%lx/0x%lx/0x%lx\n",
753 		 mpages, cpages, migrate.npages);
754 
755 	svm_migrate_copy_done(adev, mfence);
756 	migrate_vma_finalize(&migrate);
757 
758 	svm_range_dma_unmap_dev(adev->dev, scratch, 0, npages);
759 
760 out_free:
761 	kvfree(buf);
762 	kfd_smi_event_migration_end(node, p->lead_thread->pid,
763 				    start >> PAGE_SHIFT, end >> PAGE_SHIFT,
764 				    node->id, 0, trigger, r);
765 out:
766 	if (!r && mpages) {
767 		pdd = svm_range_get_pdd_by_node(prange, node);
768 		if (pdd)
769 			WRITE_ONCE(pdd->page_out, pdd->page_out + mpages);
770 	}
771 
772 	return r ? r : mpages;
773 }
774 
775 /**
776  * svm_migrate_vram_to_ram - migrate svm range from device to system
777  * @prange: range structure
778  * @mm: process mm, use current->mm if NULL
779  * @start_mgr: start page need be migrated to sys ram
780  * @last_mgr: last page need be migrated to sys ram
781  * @trigger: reason of migration
782  * @fault_page: is from vmf->page, svm_migrate_to_ram(), this is CPU page fault callback
783  *
784  * Context: Process context, caller hold mmap read lock, prange->migrate_mutex
785  *
786  * Return:
787  * 0 - OK, otherwise error code
788  */
789 int svm_migrate_vram_to_ram(struct svm_range *prange, struct mm_struct *mm,
790 			    unsigned long start_mgr, unsigned long last_mgr,
791 			    uint32_t trigger, struct page *fault_page)
792 {
793 	struct kfd_node *node;
794 	struct vm_area_struct *vma;
795 	unsigned long addr;
796 	unsigned long start;
797 	unsigned long end;
798 	unsigned long mpages = 0;
799 	long r = 0;
800 
801 	/* this pragne has no any vram page to migrate to sys ram */
802 	if (!prange->actual_loc) {
803 		pr_debug("[0x%lx 0x%lx] already migrated to ram\n",
804 			 prange->start, prange->last);
805 		return 0;
806 	}
807 
808 	if (start_mgr < prange->start || last_mgr > prange->last) {
809 		pr_debug("range [0x%lx 0x%lx] out prange [0x%lx 0x%lx]\n",
810 			 start_mgr, last_mgr, prange->start, prange->last);
811 		return -EFAULT;
812 	}
813 
814 	node = svm_range_get_node_by_id(prange, prange->actual_loc);
815 	if (!node) {
816 		pr_debug("failed to get kfd node by id 0x%x\n", prange->actual_loc);
817 		return -ENODEV;
818 	}
819 	pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx] from gpu 0x%x to ram\n",
820 		 prange->svms, prange, start_mgr, last_mgr,
821 		 prange->actual_loc);
822 
823 	start = start_mgr << PAGE_SHIFT;
824 	end = (last_mgr + 1) << PAGE_SHIFT;
825 
826 	for (addr = start; addr < end;) {
827 		unsigned long next;
828 
829 		vma = vma_lookup(mm, addr);
830 		if (!vma) {
831 			pr_debug("failed to find vma for prange %p\n", prange);
832 			r = -EFAULT;
833 			break;
834 		}
835 
836 		next = min(vma->vm_end, end);
837 		r = svm_migrate_vma_to_ram(node, prange, vma, addr, next, trigger,
838 			fault_page);
839 		if (r < 0) {
840 			pr_debug("failed %ld to migrate prange %p\n", r, prange);
841 			break;
842 		} else {
843 			mpages += r;
844 		}
845 		addr = next;
846 	}
847 
848 	if (r >= 0) {
849 		WARN_ONCE(prange->vram_pages < mpages,
850 			  "Recorded vram pages(0x%llx) should not be less than migration pages(0x%lx).",
851 			  prange->vram_pages, mpages);
852 		prange->vram_pages -= mpages;
853 
854 		/* prange does not have vram page set its actual_loc to system
855 		 * and drop its svm_bo ref
856 		 */
857 		if (prange->vram_pages == 0 && prange->ttm_res) {
858 			prange->actual_loc = 0;
859 			svm_range_vram_node_free(prange);
860 		}
861 	}
862 
863 	return r < 0 ? r : 0;
864 }
865 
866 /**
867  * svm_migrate_vram_to_vram - migrate svm range from device to device
868  * @prange: range structure
869  * @best_loc: the device to migrate to
870  * @start: start page need be migrated to sys ram
871  * @last: last page need be migrated to sys ram
872  * @mm: process mm, use current->mm if NULL
873  * @trigger: reason of migration
874  *
875  * Context: Process context, caller hold mmap read lock, svms lock, prange lock
876  *
877  * migrate all vram pages in prange to sys ram, then migrate
878  * [start, last] pages from sys ram to gpu node best_loc.
879  *
880  * Return:
881  * 0 - OK, otherwise error code
882  */
883 static int
884 svm_migrate_vram_to_vram(struct svm_range *prange, uint32_t best_loc,
885 			unsigned long start, unsigned long last,
886 			struct mm_struct *mm, uint32_t trigger)
887 {
888 	int r, retries = 3;
889 
890 	/*
891 	 * TODO: for both devices with PCIe large bar or on same xgmi hive, skip
892 	 * system memory as migration bridge
893 	 */
894 
895 	pr_debug("from gpu 0x%x to gpu 0x%x\n", prange->actual_loc, best_loc);
896 
897 	do {
898 		r = svm_migrate_vram_to_ram(prange, mm, prange->start, prange->last,
899 					    trigger, NULL);
900 		if (r)
901 			return r;
902 	} while (prange->actual_loc && --retries);
903 
904 	if (prange->actual_loc)
905 		return -EDEADLK;
906 
907 	return svm_migrate_ram_to_vram(prange, best_loc, start, last, mm, trigger);
908 }
909 
910 int
911 svm_migrate_to_vram(struct svm_range *prange, uint32_t best_loc,
912 		    unsigned long start, unsigned long last,
913 		    struct mm_struct *mm, uint32_t trigger)
914 {
915 	if  (!prange->actual_loc || prange->actual_loc == best_loc)
916 		return svm_migrate_ram_to_vram(prange, best_loc, start, last,
917 					       mm, trigger);
918 
919 	else
920 		return svm_migrate_vram_to_vram(prange, best_loc, start, last,
921 						mm, trigger);
922 
923 }
924 
925 /**
926  * svm_migrate_to_ram - CPU page fault handler
927  * @vmf: CPU vm fault vma, address
928  *
929  * Context: vm fault handler, caller holds the mmap read lock
930  *
931  * Return:
932  * 0 - OK
933  * VM_FAULT_SIGBUS - notice application to have SIGBUS page fault
934  */
935 static vm_fault_t svm_migrate_to_ram(struct vm_fault *vmf)
936 {
937 	unsigned long start, last, size;
938 	unsigned long addr = vmf->address;
939 	struct svm_range_bo *svm_bo;
940 	struct svm_range *prange;
941 	struct kfd_process *p;
942 	struct mm_struct *mm;
943 	int r = 0;
944 
945 	svm_bo = vmf->page->zone_device_data;
946 	if (!svm_bo) {
947 		pr_debug("failed get device page at addr 0x%lx\n", addr);
948 		return VM_FAULT_SIGBUS;
949 	}
950 	if (!mmget_not_zero(svm_bo->eviction_fence->mm)) {
951 		pr_debug("addr 0x%lx of process mm is destroyed\n", addr);
952 		return VM_FAULT_SIGBUS;
953 	}
954 
955 	mm = svm_bo->eviction_fence->mm;
956 	if (mm != vmf->vma->vm_mm)
957 		pr_debug("addr 0x%lx is COW mapping in child process\n", addr);
958 
959 	p = kfd_lookup_process_by_mm(mm);
960 	if (!p) {
961 		pr_debug("failed find process at fault address 0x%lx\n", addr);
962 		r = VM_FAULT_SIGBUS;
963 		goto out_mmput;
964 	}
965 	if (READ_ONCE(p->svms.faulting_task) == current) {
966 		pr_debug("skipping ram migration\n");
967 		r = 0;
968 		goto out_unref_process;
969 	}
970 
971 	pr_debug("CPU page fault svms 0x%p address 0x%lx\n", &p->svms, addr);
972 	addr >>= PAGE_SHIFT;
973 
974 	mutex_lock(&p->svms.lock);
975 
976 	prange = svm_range_from_addr(&p->svms, addr, NULL);
977 	if (!prange) {
978 		pr_debug("failed get range svms 0x%p addr 0x%lx\n", &p->svms, addr);
979 		r = -EFAULT;
980 		goto out_unlock_svms;
981 	}
982 
983 	mutex_lock(&prange->migrate_mutex);
984 
985 	if (!prange->actual_loc)
986 		goto out_unlock_prange;
987 
988 	/* Align migration range start and size to granularity size */
989 	size = 1UL << prange->granularity;
990 	start = max(ALIGN_DOWN(addr, size), prange->start);
991 	last = min(ALIGN(addr + 1, size) - 1, prange->last);
992 
993 	r = svm_migrate_vram_to_ram(prange, vmf->vma->vm_mm, start, last,
994 				    KFD_MIGRATE_TRIGGER_PAGEFAULT_CPU, vmf->page);
995 	if (r)
996 		pr_debug("failed %d migrate svms 0x%p range 0x%p [0x%lx 0x%lx]\n",
997 			r, prange->svms, prange, start, last);
998 
999 out_unlock_prange:
1000 	mutex_unlock(&prange->migrate_mutex);
1001 out_unlock_svms:
1002 	mutex_unlock(&p->svms.lock);
1003 out_unref_process:
1004 	pr_debug("CPU fault svms 0x%p address 0x%lx done\n", &p->svms, addr);
1005 	kfd_unref_process(p);
1006 out_mmput:
1007 	mmput(mm);
1008 	return r ? VM_FAULT_SIGBUS : 0;
1009 }
1010 
1011 static const struct dev_pagemap_ops svm_migrate_pgmap_ops = {
1012 	.page_free		= svm_migrate_page_free,
1013 	.migrate_to_ram		= svm_migrate_to_ram,
1014 };
1015 
1016 /* Each VRAM page uses sizeof(struct page) on system memory */
1017 #define SVM_HMM_PAGE_STRUCT_SIZE(size) ((size)/PAGE_SIZE * sizeof(struct page))
1018 
1019 int kgd2kfd_init_zone_device(struct amdgpu_device *adev)
1020 {
1021 	struct amdgpu_kfd_dev *kfddev = &adev->kfd;
1022 	struct dev_pagemap *pgmap;
1023 	struct resource *res = NULL;
1024 	unsigned long size;
1025 	void *r;
1026 
1027 	/* Page migration works on gfx9 or newer */
1028 	if (amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(9, 0, 1))
1029 		return -EINVAL;
1030 
1031 	if (adev->apu_prefer_gtt)
1032 		return 0;
1033 
1034 	pgmap = &kfddev->pgmap;
1035 	memset(pgmap, 0, sizeof(*pgmap));
1036 
1037 	/* TODO: register all vram to HMM for now.
1038 	 * should remove reserved size
1039 	 */
1040 	size = ALIGN(adev->gmc.real_vram_size, 2ULL << 20);
1041 	if (adev->gmc.xgmi.connected_to_cpu) {
1042 		pgmap->range.start = adev->gmc.aper_base;
1043 		pgmap->range.end = adev->gmc.aper_base + adev->gmc.aper_size - 1;
1044 		pgmap->type = MEMORY_DEVICE_COHERENT;
1045 	} else {
1046 		res = devm_request_free_mem_region(adev->dev, &iomem_resource, size);
1047 		if (IS_ERR(res))
1048 			return PTR_ERR(res);
1049 		pgmap->range.start = res->start;
1050 		pgmap->range.end = res->end;
1051 		pgmap->type = MEMORY_DEVICE_PRIVATE;
1052 	}
1053 
1054 	pgmap->nr_range = 1;
1055 	pgmap->ops = &svm_migrate_pgmap_ops;
1056 	pgmap->owner = SVM_ADEV_PGMAP_OWNER(adev);
1057 	pgmap->flags = 0;
1058 	/* Device manager releases device-specific resources, memory region and
1059 	 * pgmap when driver disconnects from device.
1060 	 */
1061 	r = devm_memremap_pages(adev->dev, pgmap);
1062 	if (IS_ERR(r)) {
1063 		pr_err("failed to register HMM device memory\n");
1064 		if (pgmap->type == MEMORY_DEVICE_PRIVATE)
1065 			devm_release_mem_region(adev->dev, res->start, resource_size(res));
1066 		/* Disable SVM support capability */
1067 		pgmap->type = 0;
1068 		return PTR_ERR(r);
1069 	}
1070 
1071 	pr_debug("reserve %ldMB system memory for VRAM pages struct\n",
1072 		 SVM_HMM_PAGE_STRUCT_SIZE(size) >> 20);
1073 
1074 	amdgpu_amdkfd_reserve_system_mem(SVM_HMM_PAGE_STRUCT_SIZE(size));
1075 
1076 	pr_info("HMM registered %ldMB device memory\n", size >> 20);
1077 
1078 	return 0;
1079 }
1080