xref: /linux/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c (revision 75372d75a4e23783583998ed99d5009d555850da)
1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /*
3  * Copyright 2020-2021 Advanced Micro Devices, Inc.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21  * OTHER DEALINGS IN THE SOFTWARE.
22  */
23 #include <linux/types.h>
24 #include <linux/dma-direction.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/migrate.h>
27 #include "amdgpu_sync.h"
28 #include "amdgpu_object.h"
29 #include "amdgpu_vm.h"
30 #include "amdgpu_res_cursor.h"
31 #include "kfd_priv.h"
32 #include "kfd_svm.h"
33 #include "kfd_migrate.h"
34 #include "kfd_smi_events.h"
35 
36 #ifdef dev_fmt
37 #undef dev_fmt
38 #endif
39 #define dev_fmt(fmt) "kfd_migrate: " fmt
40 
41 static u64
42 svm_migrate_direct_mapping_addr(struct amdgpu_device *adev, u64 addr)
43 {
44 	return addr + amdgpu_ttm_domain_start(adev, TTM_PL_VRAM);
45 }
46 
47 static int
48 svm_migrate_gart_map(struct amdgpu_ring *ring,
49 		     struct amdgpu_ttm_buffer_entity *entity,
50 		     u64 npages,
51 		     dma_addr_t *addr, u64 *gart_addr, u64 flags)
52 {
53 	struct amdgpu_device *adev = ring->adev;
54 	struct amdgpu_job *job;
55 	unsigned int num_dw, num_bytes;
56 	struct dma_fence *fence;
57 	u64 src_addr, dst_addr;
58 	u64 pte_flags;
59 	void *cpu_addr;
60 	int r;
61 
62 	/* use gart window 0 */
63 	*gart_addr = adev->gmc.gart_start;
64 
65 	num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8);
66 	num_bytes = npages * 8;
67 
68 	r = amdgpu_job_alloc_with_ib(adev, &entity->base,
69 				     AMDGPU_FENCE_OWNER_UNDEFINED,
70 				     num_dw * 4 + num_bytes,
71 				     AMDGPU_IB_POOL_DELAYED,
72 				     &job,
73 				     AMDGPU_KERNEL_JOB_ID_KFD_GART_MAP);
74 	if (r)
75 		return r;
76 
77 	src_addr = num_dw * 4;
78 	src_addr += job->ibs[0].gpu_addr;
79 
80 	dst_addr = amdgpu_bo_gpu_offset(adev->gart.bo);
81 	amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_addr,
82 				dst_addr, num_bytes, 0);
83 
84 	amdgpu_ring_pad_ib(ring, &job->ibs[0]);
85 	WARN_ON(job->ibs[0].length_dw > num_dw);
86 
87 	pte_flags = AMDGPU_PTE_VALID | AMDGPU_PTE_READABLE;
88 	pte_flags |= AMDGPU_PTE_SYSTEM | AMDGPU_PTE_SNOOPED;
89 	if (!(flags & KFD_IOCTL_SVM_FLAG_GPU_RO))
90 		pte_flags |= AMDGPU_PTE_WRITEABLE;
91 	pte_flags |= adev->gart.gart_pte_flags;
92 
93 	cpu_addr = &job->ibs[0].ptr[num_dw];
94 
95 	amdgpu_gart_map(adev, 0, npages, addr, pte_flags, cpu_addr);
96 	fence = amdgpu_job_submit(job);
97 	dma_fence_put(fence);
98 
99 	return r;
100 }
101 
102 /**
103  * svm_migrate_copy_memory_gart - sdma copy data between ram and vram
104  *
105  * @adev: amdgpu device the sdma ring running
106  * @sys: system DMA pointer to be copied
107  * @vram: vram destination DMA pointer
108  * @npages: number of pages to copy
109  * @direction: enum MIGRATION_COPY_DIR
110  * @mfence: output, sdma fence to signal after sdma is done
111  *
112  * ram address uses GART table continuous entries mapping to ram pages,
113  * vram address uses direct mapping of vram pages, which must have npages
114  * number of continuous pages.
115  * GART update and sdma uses same buf copy function ring, sdma is splited to
116  * multiple GTT_MAX_PAGES transfer, all sdma operations are serialized, wait for
117  * the last sdma finish fence which is returned to check copy memory is done.
118  *
119  * Context: Process context, takes and releases gtt_window_lock
120  *
121  * Return:
122  * 0 - OK, otherwise error code
123  */
124 
125 static int
126 svm_migrate_copy_memory_gart(struct amdgpu_device *adev, dma_addr_t *sys,
127 			     u64 *vram, u64 npages,
128 			     enum MIGRATION_COPY_DIR direction,
129 			     struct dma_fence **mfence)
130 {
131 	const u64 GTT_MAX_PAGES = AMDGPU_GTT_MAX_TRANSFER_SIZE;
132 	struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
133 	struct amdgpu_ttm_buffer_entity *entity;
134 	u64 gart_s, gart_d;
135 	struct dma_fence *next;
136 	u64 size;
137 	int r;
138 
139 	entity = &adev->mman.default_entity;
140 
141 	mutex_lock(&adev->mman.gtt_window_lock);
142 
143 	while (npages) {
144 		size = min(GTT_MAX_PAGES, npages);
145 
146 		if (direction == FROM_VRAM_TO_RAM) {
147 			gart_s = svm_migrate_direct_mapping_addr(adev, *vram);
148 			r = svm_migrate_gart_map(ring, entity, size, sys, &gart_d, 0);
149 
150 		} else if (direction == FROM_RAM_TO_VRAM) {
151 			r = svm_migrate_gart_map(ring, entity, size, sys, &gart_s,
152 						 KFD_IOCTL_SVM_FLAG_GPU_RO);
153 			gart_d = svm_migrate_direct_mapping_addr(adev, *vram);
154 		}
155 		if (r) {
156 			dev_err(adev->dev, "fail %d create gart mapping\n", r);
157 			goto out_unlock;
158 		}
159 
160 		r = amdgpu_copy_buffer(adev, entity,
161 				       gart_s, gart_d, size * PAGE_SIZE,
162 				       NULL, &next, true, 0);
163 		if (r) {
164 			dev_err(adev->dev, "fail %d to copy memory\n", r);
165 			goto out_unlock;
166 		}
167 
168 		dma_fence_put(*mfence);
169 		*mfence = next;
170 		npages -= size;
171 		if (npages) {
172 			sys += size;
173 			vram += size;
174 		}
175 	}
176 
177 out_unlock:
178 	mutex_unlock(&adev->mman.gtt_window_lock);
179 
180 	return r;
181 }
182 
183 /**
184  * svm_migrate_copy_done - wait for memory copy sdma is done
185  *
186  * @adev: amdgpu device the sdma memory copy is executing on
187  * @mfence: migrate fence
188  *
189  * Wait for dma fence is signaled, if the copy ssplit into multiple sdma
190  * operations, this is the last sdma operation fence.
191  *
192  * Context: called after svm_migrate_copy_memory
193  *
194  * Return:
195  * 0		- success
196  * otherwise	- error code from dma fence signal
197  */
198 static int
199 svm_migrate_copy_done(struct amdgpu_device *adev, struct dma_fence *mfence)
200 {
201 	int r = 0;
202 
203 	if (mfence) {
204 		r = dma_fence_wait(mfence, false);
205 		dma_fence_put(mfence);
206 		pr_debug("sdma copy memory fence done\n");
207 	}
208 
209 	return r;
210 }
211 
212 unsigned long
213 svm_migrate_addr_to_pfn(struct amdgpu_device *adev, unsigned long addr)
214 {
215 	return (addr + adev->kfd.pgmap.range.start) >> PAGE_SHIFT;
216 }
217 
218 static void
219 svm_migrate_get_vram_page(struct svm_range *prange, unsigned long pfn)
220 {
221 	struct page *page;
222 
223 	page = pfn_to_page(pfn);
224 	svm_range_bo_ref(prange->svm_bo);
225 	page->zone_device_data = prange->svm_bo;
226 	zone_device_page_init(page);
227 }
228 
229 static void
230 svm_migrate_put_vram_page(struct amdgpu_device *adev, unsigned long addr)
231 {
232 	struct page *page;
233 
234 	page = pfn_to_page(svm_migrate_addr_to_pfn(adev, addr));
235 	unlock_page(page);
236 	put_page(page);
237 }
238 
239 static unsigned long
240 svm_migrate_addr(struct amdgpu_device *adev, struct page *page)
241 {
242 	unsigned long addr;
243 
244 	addr = page_to_pfn(page) << PAGE_SHIFT;
245 	return (addr - adev->kfd.pgmap.range.start);
246 }
247 
248 static struct page *
249 svm_migrate_get_sys_page(struct vm_area_struct *vma, unsigned long addr)
250 {
251 	struct page *page;
252 
253 	page = alloc_page_vma(GFP_HIGHUSER, vma, addr);
254 	if (page)
255 		lock_page(page);
256 
257 	return page;
258 }
259 
260 static void svm_migrate_put_sys_page(unsigned long addr)
261 {
262 	struct page *page;
263 
264 	page = pfn_to_page(addr >> PAGE_SHIFT);
265 	unlock_page(page);
266 	put_page(page);
267 }
268 
269 static unsigned long svm_migrate_successful_pages(struct migrate_vma *migrate)
270 {
271 	unsigned long mpages = 0;
272 	unsigned long i;
273 
274 	for (i = 0; i < migrate->npages; i++) {
275 		if (migrate->dst[i] & MIGRATE_PFN_VALID &&
276 		    migrate->src[i] & MIGRATE_PFN_MIGRATE)
277 			mpages++;
278 	}
279 	return mpages;
280 }
281 
282 static int
283 svm_migrate_copy_to_vram(struct kfd_node *node, struct svm_range *prange,
284 			 struct migrate_vma *migrate, struct dma_fence **mfence,
285 			 dma_addr_t *scratch, u64 ttm_res_offset)
286 {
287 	u64 npages = migrate->npages;
288 	struct amdgpu_device *adev = node->adev;
289 	struct device *dev = adev->dev;
290 	struct amdgpu_res_cursor cursor;
291 	u64 mpages = 0;
292 	dma_addr_t *src;
293 	u64 *dst;
294 	u64 i, j;
295 	int r;
296 
297 	pr_debug("svms 0x%p [0x%lx 0x%lx 0x%llx]\n", prange->svms, prange->start,
298 		 prange->last, ttm_res_offset);
299 
300 	src = scratch;
301 	dst = (u64 *)(scratch + npages);
302 
303 	amdgpu_res_first(prange->ttm_res, ttm_res_offset,
304 			 npages << PAGE_SHIFT, &cursor);
305 	for (i = j = 0; (i < npages) && (mpages < migrate->cpages); i++) {
306 		struct page *spage;
307 
308 		if (migrate->src[i] & MIGRATE_PFN_MIGRATE) {
309 			dst[i] = cursor.start + (j << PAGE_SHIFT);
310 			migrate->dst[i] = svm_migrate_addr_to_pfn(adev, dst[i]);
311 			svm_migrate_get_vram_page(prange, migrate->dst[i]);
312 			migrate->dst[i] = migrate_pfn(migrate->dst[i]);
313 			mpages++;
314 		}
315 		spage = migrate_pfn_to_page(migrate->src[i]);
316 		if (spage && !is_zone_device_page(spage)) {
317 			src[i] = dma_map_page(dev, spage, 0, PAGE_SIZE,
318 					      DMA_BIDIRECTIONAL);
319 			r = dma_mapping_error(dev, src[i]);
320 			if (r) {
321 				dev_err(dev, "%s: fail %d dma_map_page\n",
322 					__func__, r);
323 				goto out_free_vram_pages;
324 			}
325 		} else {
326 			if (j) {
327 				r = svm_migrate_copy_memory_gart(
328 						adev, src + i - j,
329 						dst + i - j, j,
330 						FROM_RAM_TO_VRAM,
331 						mfence);
332 				if (r)
333 					goto out_free_vram_pages;
334 				amdgpu_res_next(&cursor, (j + 1) << PAGE_SHIFT);
335 				j = 0;
336 			} else {
337 				amdgpu_res_next(&cursor, PAGE_SIZE);
338 			}
339 			continue;
340 		}
341 
342 		pr_debug_ratelimited("dma mapping src to 0x%llx, pfn 0x%lx\n",
343 				     src[i] >> PAGE_SHIFT, page_to_pfn(spage));
344 
345 		if (j >= (cursor.size >> PAGE_SHIFT) - 1 && i < npages - 1) {
346 			r = svm_migrate_copy_memory_gart(adev, src + i - j,
347 							 dst + i - j, j + 1,
348 							 FROM_RAM_TO_VRAM,
349 							 mfence);
350 			if (r)
351 				goto out_free_vram_pages;
352 			amdgpu_res_next(&cursor, (j + 1) * PAGE_SIZE);
353 			j = 0;
354 		} else {
355 			j++;
356 		}
357 	}
358 
359 	r = svm_migrate_copy_memory_gart(adev, src + i - j, dst + i - j, j,
360 					 FROM_RAM_TO_VRAM, mfence);
361 
362 out_free_vram_pages:
363 	if (r) {
364 		pr_debug("failed %d to copy memory to vram\n", r);
365 		for (i = 0; i < npages && mpages; i++) {
366 			if (!dst[i])
367 				continue;
368 			svm_migrate_put_vram_page(adev, dst[i]);
369 			migrate->dst[i] = 0;
370 			mpages--;
371 		}
372 	}
373 
374 #ifdef DEBUG_FORCE_MIXED_DOMAINS
375 	for (i = 0, j = 0; i < npages; i += 4, j++) {
376 		if (j & 1)
377 			continue;
378 		svm_migrate_put_vram_page(adev, dst[i]);
379 		migrate->dst[i] = 0;
380 		svm_migrate_put_vram_page(adev, dst[i + 1]);
381 		migrate->dst[i + 1] = 0;
382 		svm_migrate_put_vram_page(adev, dst[i + 2]);
383 		migrate->dst[i + 2] = 0;
384 		svm_migrate_put_vram_page(adev, dst[i + 3]);
385 		migrate->dst[i + 3] = 0;
386 	}
387 #endif
388 
389 	return r;
390 }
391 
392 static long
393 svm_migrate_vma_to_vram(struct kfd_node *node, struct svm_range *prange,
394 			struct vm_area_struct *vma, u64 start,
395 			u64 end, uint32_t trigger, u64 ttm_res_offset)
396 {
397 	struct kfd_process *p = container_of(prange->svms, struct kfd_process, svms);
398 	u64 npages = (end - start) >> PAGE_SHIFT;
399 	struct amdgpu_device *adev = node->adev;
400 	struct kfd_process_device *pdd;
401 	struct dma_fence *mfence = NULL;
402 	struct migrate_vma migrate = { 0 };
403 	unsigned long cpages = 0;
404 	unsigned long mpages = 0;
405 	dma_addr_t *scratch;
406 	void *buf;
407 	int r = -ENOMEM;
408 
409 	memset(&migrate, 0, sizeof(migrate));
410 	migrate.vma = vma;
411 	migrate.start = start;
412 	migrate.end = end;
413 	migrate.flags = MIGRATE_VMA_SELECT_SYSTEM;
414 	migrate.pgmap_owner = SVM_ADEV_PGMAP_OWNER(adev);
415 
416 	buf = kvcalloc(npages,
417 		       2 * sizeof(*migrate.src) + sizeof(u64) + sizeof(dma_addr_t),
418 		       GFP_KERNEL);
419 	if (!buf)
420 		goto out;
421 
422 	migrate.src = buf;
423 	migrate.dst = migrate.src + npages;
424 	scratch = (dma_addr_t *)(migrate.dst + npages);
425 
426 	kfd_smi_event_migration_start(node, p->lead_thread->pid,
427 				      start >> PAGE_SHIFT, end >> PAGE_SHIFT,
428 				      0, node->id, prange->prefetch_loc,
429 				      prange->preferred_loc, trigger);
430 
431 	r = migrate_vma_setup(&migrate);
432 	if (r) {
433 		dev_err(adev->dev, "%s: vma setup fail %d range [0x%lx 0x%lx]\n",
434 			__func__, r, prange->start, prange->last);
435 		goto out_free;
436 	}
437 
438 	cpages = migrate.cpages;
439 	if (!cpages) {
440 		pr_debug("failed collect migrate sys pages [0x%lx 0x%lx]\n",
441 			 prange->start, prange->last);
442 		goto out_free;
443 	}
444 	if (cpages != npages)
445 		pr_debug("partial migration, 0x%lx/0x%llx pages collected\n",
446 			 cpages, npages);
447 	else
448 		pr_debug("0x%lx pages collected\n", cpages);
449 
450 	r = svm_migrate_copy_to_vram(node, prange, &migrate, &mfence, scratch, ttm_res_offset);
451 	migrate_vma_pages(&migrate);
452 
453 	svm_migrate_copy_done(adev, mfence);
454 	migrate_vma_finalize(&migrate);
455 
456 	mpages = svm_migrate_successful_pages(&migrate);
457 	pr_debug("migrated/collected/requested 0x%lx/0x%lx/0x%lx\n",
458 		 mpages, cpages, migrate.npages);
459 
460 	svm_range_dma_unmap_dev(adev->dev, scratch, 0, npages);
461 
462 out_free:
463 	kvfree(buf);
464 	kfd_smi_event_migration_end(node, p->lead_thread->pid,
465 				    start >> PAGE_SHIFT, end >> PAGE_SHIFT,
466 				    0, node->id, trigger, r);
467 out:
468 	if (!r && mpages) {
469 		pdd = svm_range_get_pdd_by_node(prange, node);
470 		if (pdd)
471 			WRITE_ONCE(pdd->page_in, pdd->page_in + mpages);
472 
473 		return mpages;
474 	}
475 	return r;
476 }
477 
478 /**
479  * svm_migrate_ram_to_vram - migrate svm range from system to device
480  * @prange: range structure
481  * @best_loc: the device to migrate to
482  * @start_mgr: start page to migrate
483  * @last_mgr: last page to migrate
484  * @mm: the process mm structure
485  * @trigger: reason of migration
486  *
487  * Context: Process context, caller hold mmap read lock, svms lock, prange lock
488  *
489  * Return:
490  * 0 - OK, otherwise error code
491  */
492 static int
493 svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc,
494 			unsigned long start_mgr, unsigned long last_mgr,
495 			struct mm_struct *mm, uint32_t trigger)
496 {
497 	unsigned long addr, start, end;
498 	struct vm_area_struct *vma;
499 	u64 ttm_res_offset;
500 	struct kfd_node *node;
501 	unsigned long mpages = 0;
502 	long r = 0;
503 
504 	if (start_mgr < prange->start || last_mgr > prange->last) {
505 		pr_debug("range [0x%lx 0x%lx] out prange [0x%lx 0x%lx]\n",
506 			 start_mgr, last_mgr, prange->start, prange->last);
507 		return -EFAULT;
508 	}
509 
510 	node = svm_range_get_node_by_id(prange, best_loc);
511 	if (!node) {
512 		pr_debug("failed to get kfd node by id 0x%x\n", best_loc);
513 		return -ENODEV;
514 	}
515 
516 	pr_debug("svms 0x%p [0x%lx 0x%lx] in [0x%lx 0x%lx] to gpu 0x%x\n",
517 		prange->svms, start_mgr, last_mgr, prange->start, prange->last,
518 		best_loc);
519 
520 	start = start_mgr << PAGE_SHIFT;
521 	end = (last_mgr + 1) << PAGE_SHIFT;
522 
523 	r = amdgpu_amdkfd_reserve_mem_limit(node->adev,
524 					prange->npages * PAGE_SIZE,
525 					KFD_IOC_ALLOC_MEM_FLAGS_VRAM,
526 					node->xcp ? node->xcp->id : 0);
527 	if (r) {
528 		dev_dbg(node->adev->dev, "failed to reserve VRAM, r: %ld\n", r);
529 		return -ENOSPC;
530 	}
531 
532 	r = svm_range_vram_node_new(node, prange, true);
533 	if (r) {
534 		dev_dbg(node->adev->dev, "fail %ld to alloc vram\n", r);
535 		goto out;
536 	}
537 	ttm_res_offset = (start_mgr - prange->start + prange->offset) << PAGE_SHIFT;
538 
539 	for (addr = start; addr < end;) {
540 		unsigned long next;
541 
542 		vma = vma_lookup(mm, addr);
543 		if (!vma)
544 			break;
545 
546 		next = min(vma->vm_end, end);
547 		r = svm_migrate_vma_to_vram(node, prange, vma, addr, next, trigger, ttm_res_offset);
548 		if (r < 0) {
549 			pr_debug("failed %ld to migrate\n", r);
550 			break;
551 		} else {
552 			mpages += r;
553 		}
554 		ttm_res_offset += next - addr;
555 		addr = next;
556 	}
557 
558 	if (mpages) {
559 		prange->actual_loc = best_loc;
560 		prange->vram_pages += mpages;
561 	} else if (!prange->actual_loc) {
562 		/* if no page migrated and all pages from prange are at
563 		 * sys ram drop svm_bo got from svm_range_vram_node_new
564 		 */
565 		svm_range_vram_node_free(prange);
566 	}
567 
568 out:
569 	amdgpu_amdkfd_unreserve_mem_limit(node->adev,
570 					prange->npages * PAGE_SIZE,
571 					KFD_IOC_ALLOC_MEM_FLAGS_VRAM,
572 					node->xcp ? node->xcp->id : 0);
573 	return r < 0 ? r : 0;
574 }
575 
576 static void svm_migrate_page_free(struct page *page)
577 {
578 	struct svm_range_bo *svm_bo = page->zone_device_data;
579 
580 	if (svm_bo) {
581 		pr_debug_ratelimited("ref: %d\n", kref_read(&svm_bo->kref));
582 		svm_range_bo_unref_async(svm_bo);
583 	}
584 }
585 
586 static int
587 svm_migrate_copy_to_ram(struct amdgpu_device *adev, struct svm_range *prange,
588 			struct migrate_vma *migrate, struct dma_fence **mfence,
589 			dma_addr_t *scratch, u64 npages)
590 {
591 	struct device *dev = adev->dev;
592 	u64 *src;
593 	dma_addr_t *dst;
594 	struct page *dpage;
595 	u64 i = 0, j;
596 	u64 addr;
597 	int r = 0;
598 
599 	pr_debug("svms 0x%p [0x%lx 0x%lx]\n", prange->svms, prange->start,
600 		 prange->last);
601 
602 	addr = migrate->start;
603 
604 	src = (u64 *)(scratch + npages);
605 	dst = scratch;
606 
607 	for (i = 0, j = 0; i < npages; i++, addr += PAGE_SIZE) {
608 		struct page *spage;
609 
610 		spage = migrate_pfn_to_page(migrate->src[i]);
611 		if (!spage || !is_zone_device_page(spage)) {
612 			pr_debug("invalid page. Could be in CPU already svms 0x%p [0x%lx 0x%lx]\n",
613 				 prange->svms, prange->start, prange->last);
614 			if (j) {
615 				r = svm_migrate_copy_memory_gart(adev, dst + i - j,
616 								 src + i - j, j,
617 								 FROM_VRAM_TO_RAM,
618 								 mfence);
619 				if (r)
620 					goto out_oom;
621 				j = 0;
622 			}
623 			continue;
624 		}
625 		src[i] = svm_migrate_addr(adev, spage);
626 		if (j > 0 && src[i] != src[i - 1] + PAGE_SIZE) {
627 			r = svm_migrate_copy_memory_gart(adev, dst + i - j,
628 							 src + i - j, j,
629 							 FROM_VRAM_TO_RAM,
630 							 mfence);
631 			if (r)
632 				goto out_oom;
633 			j = 0;
634 		}
635 
636 		dpage = svm_migrate_get_sys_page(migrate->vma, addr);
637 		if (!dpage) {
638 			pr_debug("failed get page svms 0x%p [0x%lx 0x%lx]\n",
639 				 prange->svms, prange->start, prange->last);
640 			r = -ENOMEM;
641 			goto out_oom;
642 		}
643 
644 		dst[i] = dma_map_page(dev, dpage, 0, PAGE_SIZE, DMA_BIDIRECTIONAL);
645 		r = dma_mapping_error(dev, dst[i]);
646 		if (r) {
647 			dev_err(adev->dev, "%s: fail %d dma_map_page\n", __func__, r);
648 			goto out_oom;
649 		}
650 
651 		pr_debug_ratelimited("dma mapping dst to 0x%llx, pfn 0x%lx\n",
652 				     dst[i] >> PAGE_SHIFT, page_to_pfn(dpage));
653 
654 		migrate->dst[i] = migrate_pfn(page_to_pfn(dpage));
655 		j++;
656 	}
657 
658 	r = svm_migrate_copy_memory_gart(adev, dst + i - j, src + i - j, j,
659 					 FROM_VRAM_TO_RAM, mfence);
660 
661 out_oom:
662 	if (r) {
663 		pr_debug("failed %d copy to ram\n", r);
664 		while (i--) {
665 			svm_migrate_put_sys_page(dst[i]);
666 			migrate->dst[i] = 0;
667 		}
668 	}
669 
670 	return r;
671 }
672 
673 /**
674  * svm_migrate_vma_to_ram - migrate range inside one vma from device to system
675  *
676  * @prange: svm range structure
677  * @vma: vm_area_struct that range [start, end] belongs to
678  * @start: range start virtual address in pages
679  * @end: range end virtual address in pages
680  * @node: kfd node device to migrate from
681  * @trigger: reason of migration
682  * @fault_page: is from vmf->page, svm_migrate_to_ram(), this is CPU page fault callback
683  *
684  * Context: Process context, caller hold mmap read lock, prange->migrate_mutex
685  *
686  * Return:
687  *   negative values - indicate error
688  *   positive values or zero - number of pages got migrated
689  */
690 static long
691 svm_migrate_vma_to_ram(struct kfd_node *node, struct svm_range *prange,
692 		       struct vm_area_struct *vma, u64 start, u64 end,
693 		       uint32_t trigger, struct page *fault_page)
694 {
695 	struct kfd_process *p = container_of(prange->svms, struct kfd_process, svms);
696 	u64 npages = (end - start) >> PAGE_SHIFT;
697 	unsigned long cpages = 0;
698 	unsigned long mpages = 0;
699 	struct amdgpu_device *adev = node->adev;
700 	struct kfd_process_device *pdd;
701 	struct dma_fence *mfence = NULL;
702 	struct migrate_vma migrate = { 0 };
703 	dma_addr_t *scratch;
704 	void *buf;
705 	int r = -ENOMEM;
706 
707 	memset(&migrate, 0, sizeof(migrate));
708 	migrate.vma = vma;
709 	migrate.start = start;
710 	migrate.end = end;
711 	migrate.pgmap_owner = SVM_ADEV_PGMAP_OWNER(adev);
712 	if (adev->gmc.xgmi.connected_to_cpu)
713 		migrate.flags = MIGRATE_VMA_SELECT_DEVICE_COHERENT;
714 	else
715 		migrate.flags = MIGRATE_VMA_SELECT_DEVICE_PRIVATE;
716 
717 	buf = kvcalloc(npages,
718 		       2 * sizeof(*migrate.src) + sizeof(u64) + sizeof(dma_addr_t),
719 		       GFP_KERNEL);
720 	if (!buf)
721 		goto out;
722 
723 	migrate.src = buf;
724 	migrate.dst = migrate.src + npages;
725 	migrate.fault_page = fault_page;
726 	scratch = (dma_addr_t *)(migrate.dst + npages);
727 
728 	kfd_smi_event_migration_start(node, p->lead_thread->pid,
729 				      start >> PAGE_SHIFT, end >> PAGE_SHIFT,
730 				      node->id, 0, prange->prefetch_loc,
731 				      prange->preferred_loc, trigger);
732 
733 	r = migrate_vma_setup(&migrate);
734 	if (r) {
735 		dev_err(adev->dev, "%s: vma setup fail %d range [0x%lx 0x%lx]\n",
736 			__func__, r, prange->start, prange->last);
737 		goto out_free;
738 	}
739 
740 	cpages = migrate.cpages;
741 	if (!cpages) {
742 		pr_debug("failed collect migrate device pages [0x%lx 0x%lx]\n",
743 			 prange->start, prange->last);
744 		goto out_free;
745 	}
746 	if (cpages != npages)
747 		pr_debug("partial migration, 0x%lx/0x%llx pages collected\n",
748 			 cpages, npages);
749 	else
750 		pr_debug("0x%lx pages collected\n", cpages);
751 
752 	r = svm_migrate_copy_to_ram(adev, prange, &migrate, &mfence,
753 				    scratch, npages);
754 	migrate_vma_pages(&migrate);
755 
756 	mpages = svm_migrate_successful_pages(&migrate);
757 	pr_debug("migrated/collected/requested 0x%lx/0x%lx/0x%lx\n",
758 		 mpages, cpages, migrate.npages);
759 
760 	svm_migrate_copy_done(adev, mfence);
761 	migrate_vma_finalize(&migrate);
762 
763 	svm_range_dma_unmap_dev(adev->dev, scratch, 0, npages);
764 
765 out_free:
766 	kvfree(buf);
767 	kfd_smi_event_migration_end(node, p->lead_thread->pid,
768 				    start >> PAGE_SHIFT, end >> PAGE_SHIFT,
769 				    node->id, 0, trigger, r);
770 out:
771 	if (!r && mpages) {
772 		pdd = svm_range_get_pdd_by_node(prange, node);
773 		if (pdd)
774 			WRITE_ONCE(pdd->page_out, pdd->page_out + mpages);
775 	}
776 
777 	return r ? r : mpages;
778 }
779 
780 /**
781  * svm_migrate_vram_to_ram - migrate svm range from device to system
782  * @prange: range structure
783  * @mm: process mm, use current->mm if NULL
784  * @start_mgr: start page need be migrated to sys ram
785  * @last_mgr: last page need be migrated to sys ram
786  * @trigger: reason of migration
787  * @fault_page: is from vmf->page, svm_migrate_to_ram(), this is CPU page fault callback
788  *
789  * Context: Process context, caller hold mmap read lock, prange->migrate_mutex
790  *
791  * Return:
792  * 0 - OK, otherwise error code
793  */
794 int svm_migrate_vram_to_ram(struct svm_range *prange, struct mm_struct *mm,
795 			    unsigned long start_mgr, unsigned long last_mgr,
796 			    uint32_t trigger, struct page *fault_page)
797 {
798 	struct kfd_node *node;
799 	struct vm_area_struct *vma;
800 	unsigned long addr;
801 	unsigned long start;
802 	unsigned long end;
803 	unsigned long mpages = 0;
804 	long r = 0;
805 
806 	/* this pragne has no any vram page to migrate to sys ram */
807 	if (!prange->actual_loc) {
808 		pr_debug("[0x%lx 0x%lx] already migrated to ram\n",
809 			 prange->start, prange->last);
810 		return 0;
811 	}
812 
813 	if (start_mgr < prange->start || last_mgr > prange->last) {
814 		pr_debug("range [0x%lx 0x%lx] out prange [0x%lx 0x%lx]\n",
815 			 start_mgr, last_mgr, prange->start, prange->last);
816 		return -EFAULT;
817 	}
818 
819 	node = svm_range_get_node_by_id(prange, prange->actual_loc);
820 	if (!node) {
821 		pr_debug("failed to get kfd node by id 0x%x\n", prange->actual_loc);
822 		return -ENODEV;
823 	}
824 	pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx] from gpu 0x%x to ram\n",
825 		 prange->svms, prange, start_mgr, last_mgr,
826 		 prange->actual_loc);
827 
828 	start = start_mgr << PAGE_SHIFT;
829 	end = (last_mgr + 1) << PAGE_SHIFT;
830 
831 	for (addr = start; addr < end;) {
832 		unsigned long next;
833 
834 		vma = vma_lookup(mm, addr);
835 		if (!vma) {
836 			pr_debug("failed to find vma for prange %p\n", prange);
837 			r = -EFAULT;
838 			break;
839 		}
840 
841 		next = min(vma->vm_end, end);
842 		r = svm_migrate_vma_to_ram(node, prange, vma, addr, next, trigger,
843 			fault_page);
844 		if (r < 0) {
845 			pr_debug("failed %ld to migrate prange %p\n", r, prange);
846 			break;
847 		} else {
848 			mpages += r;
849 		}
850 		addr = next;
851 	}
852 
853 	if (r >= 0) {
854 		WARN_ONCE(prange->vram_pages < mpages,
855 			  "Recorded vram pages(0x%llx) should not be less than migration pages(0x%lx).",
856 			  prange->vram_pages, mpages);
857 		prange->vram_pages -= mpages;
858 
859 		/* prange does not have vram page set its actual_loc to system
860 		 * and drop its svm_bo ref
861 		 */
862 		if (prange->vram_pages == 0 && prange->ttm_res) {
863 			prange->actual_loc = 0;
864 			svm_range_vram_node_free(prange);
865 		}
866 	}
867 
868 	return r < 0 ? r : 0;
869 }
870 
871 /**
872  * svm_migrate_vram_to_vram - migrate svm range from device to device
873  * @prange: range structure
874  * @best_loc: the device to migrate to
875  * @start: start page need be migrated to sys ram
876  * @last: last page need be migrated to sys ram
877  * @mm: process mm, use current->mm if NULL
878  * @trigger: reason of migration
879  *
880  * Context: Process context, caller hold mmap read lock, svms lock, prange lock
881  *
882  * migrate all vram pages in prange to sys ram, then migrate
883  * [start, last] pages from sys ram to gpu node best_loc.
884  *
885  * Return:
886  * 0 - OK, otherwise error code
887  */
888 static int
889 svm_migrate_vram_to_vram(struct svm_range *prange, uint32_t best_loc,
890 			unsigned long start, unsigned long last,
891 			struct mm_struct *mm, uint32_t trigger)
892 {
893 	int r, retries = 3;
894 
895 	/*
896 	 * TODO: for both devices with PCIe large bar or on same xgmi hive, skip
897 	 * system memory as migration bridge
898 	 */
899 
900 	pr_debug("from gpu 0x%x to gpu 0x%x\n", prange->actual_loc, best_loc);
901 
902 	do {
903 		r = svm_migrate_vram_to_ram(prange, mm, prange->start, prange->last,
904 					    trigger, NULL);
905 		if (r)
906 			return r;
907 	} while (prange->actual_loc && --retries);
908 
909 	if (prange->actual_loc)
910 		return -EDEADLK;
911 
912 	return svm_migrate_ram_to_vram(prange, best_loc, start, last, mm, trigger);
913 }
914 
915 int
916 svm_migrate_to_vram(struct svm_range *prange, uint32_t best_loc,
917 		    unsigned long start, unsigned long last,
918 		    struct mm_struct *mm, uint32_t trigger)
919 {
920 	if  (!prange->actual_loc || prange->actual_loc == best_loc)
921 		return svm_migrate_ram_to_vram(prange, best_loc, start, last,
922 					       mm, trigger);
923 
924 	else
925 		return svm_migrate_vram_to_vram(prange, best_loc, start, last,
926 						mm, trigger);
927 
928 }
929 
930 /**
931  * svm_migrate_to_ram - CPU page fault handler
932  * @vmf: CPU vm fault vma, address
933  *
934  * Context: vm fault handler, caller holds the mmap read lock
935  *
936  * Return:
937  * 0 - OK
938  * VM_FAULT_SIGBUS - notice application to have SIGBUS page fault
939  */
940 static vm_fault_t svm_migrate_to_ram(struct vm_fault *vmf)
941 {
942 	unsigned long start, last, size;
943 	unsigned long addr = vmf->address;
944 	struct svm_range_bo *svm_bo;
945 	struct svm_range *prange;
946 	struct kfd_process *p;
947 	struct mm_struct *mm;
948 	int r = 0;
949 
950 	svm_bo = vmf->page->zone_device_data;
951 	if (!svm_bo) {
952 		pr_debug("failed get device page at addr 0x%lx\n", addr);
953 		return VM_FAULT_SIGBUS;
954 	}
955 	if (!mmget_not_zero(svm_bo->eviction_fence->mm)) {
956 		pr_debug("addr 0x%lx of process mm is destroyed\n", addr);
957 		return VM_FAULT_SIGBUS;
958 	}
959 
960 	mm = svm_bo->eviction_fence->mm;
961 	if (mm != vmf->vma->vm_mm)
962 		pr_debug("addr 0x%lx is COW mapping in child process\n", addr);
963 
964 	p = kfd_lookup_process_by_mm(mm);
965 	if (!p) {
966 		pr_debug("failed find process at fault address 0x%lx\n", addr);
967 		r = VM_FAULT_SIGBUS;
968 		goto out_mmput;
969 	}
970 	if (READ_ONCE(p->svms.faulting_task) == current) {
971 		pr_debug("skipping ram migration\n");
972 		r = 0;
973 		goto out_unref_process;
974 	}
975 
976 	pr_debug("CPU page fault svms 0x%p address 0x%lx\n", &p->svms, addr);
977 	addr >>= PAGE_SHIFT;
978 
979 	mutex_lock(&p->svms.lock);
980 
981 	prange = svm_range_from_addr(&p->svms, addr, NULL);
982 	if (!prange) {
983 		pr_debug("failed get range svms 0x%p addr 0x%lx\n", &p->svms, addr);
984 		r = -EFAULT;
985 		goto out_unlock_svms;
986 	}
987 
988 	mutex_lock(&prange->migrate_mutex);
989 
990 	if (!prange->actual_loc)
991 		goto out_unlock_prange;
992 
993 	/* Align migration range start and size to granularity size */
994 	size = 1UL << prange->granularity;
995 	start = max(ALIGN_DOWN(addr, size), prange->start);
996 	last = min(ALIGN(addr + 1, size) - 1, prange->last);
997 
998 	r = svm_migrate_vram_to_ram(prange, vmf->vma->vm_mm, start, last,
999 				    KFD_MIGRATE_TRIGGER_PAGEFAULT_CPU, vmf->page);
1000 	if (r)
1001 		pr_debug("failed %d migrate svms 0x%p range 0x%p [0x%lx 0x%lx]\n",
1002 			r, prange->svms, prange, start, last);
1003 
1004 out_unlock_prange:
1005 	mutex_unlock(&prange->migrate_mutex);
1006 out_unlock_svms:
1007 	mutex_unlock(&p->svms.lock);
1008 out_unref_process:
1009 	pr_debug("CPU fault svms 0x%p address 0x%lx done\n", &p->svms, addr);
1010 	kfd_unref_process(p);
1011 out_mmput:
1012 	mmput(mm);
1013 	return r ? VM_FAULT_SIGBUS : 0;
1014 }
1015 
1016 static const struct dev_pagemap_ops svm_migrate_pgmap_ops = {
1017 	.page_free		= svm_migrate_page_free,
1018 	.migrate_to_ram		= svm_migrate_to_ram,
1019 };
1020 
1021 /* Each VRAM page uses sizeof(struct page) on system memory */
1022 #define SVM_HMM_PAGE_STRUCT_SIZE(size) ((size)/PAGE_SIZE * sizeof(struct page))
1023 
1024 int kgd2kfd_init_zone_device(struct amdgpu_device *adev)
1025 {
1026 	struct amdgpu_kfd_dev *kfddev = &adev->kfd;
1027 	struct dev_pagemap *pgmap;
1028 	struct resource *res = NULL;
1029 	unsigned long size;
1030 	void *r;
1031 
1032 	/* Page migration works on gfx9 or newer */
1033 	if (amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(9, 0, 1))
1034 		return -EINVAL;
1035 
1036 	if (adev->apu_prefer_gtt)
1037 		return 0;
1038 
1039 	pgmap = &kfddev->pgmap;
1040 	memset(pgmap, 0, sizeof(*pgmap));
1041 
1042 	/* TODO: register all vram to HMM for now.
1043 	 * should remove reserved size
1044 	 */
1045 	size = ALIGN(adev->gmc.real_vram_size, 2ULL << 20);
1046 	if (adev->gmc.xgmi.connected_to_cpu) {
1047 		pgmap->range.start = adev->gmc.aper_base;
1048 		pgmap->range.end = adev->gmc.aper_base + adev->gmc.aper_size - 1;
1049 		pgmap->type = MEMORY_DEVICE_COHERENT;
1050 	} else {
1051 		res = devm_request_free_mem_region(adev->dev, &iomem_resource, size);
1052 		if (IS_ERR(res))
1053 			return PTR_ERR(res);
1054 		pgmap->range.start = res->start;
1055 		pgmap->range.end = res->end;
1056 		pgmap->type = MEMORY_DEVICE_PRIVATE;
1057 	}
1058 
1059 	pgmap->nr_range = 1;
1060 	pgmap->ops = &svm_migrate_pgmap_ops;
1061 	pgmap->owner = SVM_ADEV_PGMAP_OWNER(adev);
1062 	pgmap->flags = 0;
1063 	/* Device manager releases device-specific resources, memory region and
1064 	 * pgmap when driver disconnects from device.
1065 	 */
1066 	r = devm_memremap_pages(adev->dev, pgmap);
1067 	if (IS_ERR(r)) {
1068 		pr_err("failed to register HMM device memory\n");
1069 		if (pgmap->type == MEMORY_DEVICE_PRIVATE)
1070 			devm_release_mem_region(adev->dev, res->start, resource_size(res));
1071 		/* Disable SVM support capability */
1072 		pgmap->type = 0;
1073 		return PTR_ERR(r);
1074 	}
1075 
1076 	pr_debug("reserve %ldMB system memory for VRAM pages struct\n",
1077 		 SVM_HMM_PAGE_STRUCT_SIZE(size) >> 20);
1078 
1079 	amdgpu_amdkfd_reserve_system_mem(SVM_HMM_PAGE_STRUCT_SIZE(size));
1080 
1081 	pr_info("HMM registered %ldMB device memory\n", size >> 20);
1082 
1083 	return 0;
1084 }
1085