xref: /linux/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c (revision ae22b2f1c8ccd9a3e5f19e3ebb4ef6ec1e8655e0)
1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /*
3  * Copyright 2020-2021 Advanced Micro Devices, Inc.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21  * OTHER DEALINGS IN THE SOFTWARE.
22  */
23 #include <linux/types.h>
24 #include <linux/hmm.h>
25 #include <linux/dma-direction.h>
26 #include <linux/dma-mapping.h>
27 #include <linux/migrate.h>
28 #include "amdgpu_sync.h"
29 #include "amdgpu_object.h"
30 #include "amdgpu_vm.h"
31 #include "amdgpu_res_cursor.h"
32 #include "kfd_priv.h"
33 #include "kfd_svm.h"
34 #include "kfd_migrate.h"
35 #include "kfd_smi_events.h"
36 
37 #ifdef dev_fmt
38 #undef dev_fmt
39 #endif
40 #define dev_fmt(fmt) "kfd_migrate: " fmt
41 
42 static uint64_t
43 svm_migrate_direct_mapping_addr(struct amdgpu_device *adev, uint64_t addr)
44 {
45 	return addr + amdgpu_ttm_domain_start(adev, TTM_PL_VRAM);
46 }
47 
48 static int
49 svm_migrate_gart_map(struct amdgpu_ring *ring, uint64_t npages,
50 		     dma_addr_t *addr, uint64_t *gart_addr, uint64_t flags)
51 {
52 	struct amdgpu_device *adev = ring->adev;
53 	struct amdgpu_job *job;
54 	unsigned int num_dw, num_bytes;
55 	struct dma_fence *fence;
56 	uint64_t src_addr, dst_addr;
57 	uint64_t pte_flags;
58 	void *cpu_addr;
59 	int r;
60 
61 	/* use gart window 0 */
62 	*gart_addr = adev->gmc.gart_start;
63 
64 	num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8);
65 	num_bytes = npages * 8;
66 
67 	r = amdgpu_job_alloc_with_ib(adev, &adev->mman.high_pr,
68 				     AMDGPU_FENCE_OWNER_UNDEFINED,
69 				     num_dw * 4 + num_bytes,
70 				     AMDGPU_IB_POOL_DELAYED,
71 				     &job);
72 	if (r)
73 		return r;
74 
75 	src_addr = num_dw * 4;
76 	src_addr += job->ibs[0].gpu_addr;
77 
78 	dst_addr = amdgpu_bo_gpu_offset(adev->gart.bo);
79 	amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_addr,
80 				dst_addr, num_bytes, 0);
81 
82 	amdgpu_ring_pad_ib(ring, &job->ibs[0]);
83 	WARN_ON(job->ibs[0].length_dw > num_dw);
84 
85 	pte_flags = AMDGPU_PTE_VALID | AMDGPU_PTE_READABLE;
86 	pte_flags |= AMDGPU_PTE_SYSTEM | AMDGPU_PTE_SNOOPED;
87 	if (!(flags & KFD_IOCTL_SVM_FLAG_GPU_RO))
88 		pte_flags |= AMDGPU_PTE_WRITEABLE;
89 	pte_flags |= adev->gart.gart_pte_flags;
90 
91 	cpu_addr = &job->ibs[0].ptr[num_dw];
92 
93 	amdgpu_gart_map(adev, 0, npages, addr, pte_flags, cpu_addr);
94 	fence = amdgpu_job_submit(job);
95 	dma_fence_put(fence);
96 
97 	return r;
98 }
99 
100 /**
101  * svm_migrate_copy_memory_gart - sdma copy data between ram and vram
102  *
103  * @adev: amdgpu device the sdma ring running
104  * @sys: system DMA pointer to be copied
105  * @vram: vram destination DMA pointer
106  * @npages: number of pages to copy
107  * @direction: enum MIGRATION_COPY_DIR
108  * @mfence: output, sdma fence to signal after sdma is done
109  *
110  * ram address uses GART table continuous entries mapping to ram pages,
111  * vram address uses direct mapping of vram pages, which must have npages
112  * number of continuous pages.
113  * GART update and sdma uses same buf copy function ring, sdma is splited to
114  * multiple GTT_MAX_PAGES transfer, all sdma operations are serialized, wait for
115  * the last sdma finish fence which is returned to check copy memory is done.
116  *
117  * Context: Process context, takes and releases gtt_window_lock
118  *
119  * Return:
120  * 0 - OK, otherwise error code
121  */
122 
123 static int
124 svm_migrate_copy_memory_gart(struct amdgpu_device *adev, dma_addr_t *sys,
125 			     uint64_t *vram, uint64_t npages,
126 			     enum MIGRATION_COPY_DIR direction,
127 			     struct dma_fence **mfence)
128 {
129 	const uint64_t GTT_MAX_PAGES = AMDGPU_GTT_MAX_TRANSFER_SIZE;
130 	struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
131 	uint64_t gart_s, gart_d;
132 	struct dma_fence *next;
133 	uint64_t size;
134 	int r;
135 
136 	mutex_lock(&adev->mman.gtt_window_lock);
137 
138 	while (npages) {
139 		size = min(GTT_MAX_PAGES, npages);
140 
141 		if (direction == FROM_VRAM_TO_RAM) {
142 			gart_s = svm_migrate_direct_mapping_addr(adev, *vram);
143 			r = svm_migrate_gart_map(ring, size, sys, &gart_d, 0);
144 
145 		} else if (direction == FROM_RAM_TO_VRAM) {
146 			r = svm_migrate_gart_map(ring, size, sys, &gart_s,
147 						 KFD_IOCTL_SVM_FLAG_GPU_RO);
148 			gart_d = svm_migrate_direct_mapping_addr(adev, *vram);
149 		}
150 		if (r) {
151 			dev_err(adev->dev, "fail %d create gart mapping\n", r);
152 			goto out_unlock;
153 		}
154 
155 		r = amdgpu_copy_buffer(ring, gart_s, gart_d, size * PAGE_SIZE,
156 				       NULL, &next, false, true, 0);
157 		if (r) {
158 			dev_err(adev->dev, "fail %d to copy memory\n", r);
159 			goto out_unlock;
160 		}
161 
162 		dma_fence_put(*mfence);
163 		*mfence = next;
164 		npages -= size;
165 		if (npages) {
166 			sys += size;
167 			vram += size;
168 		}
169 	}
170 
171 out_unlock:
172 	mutex_unlock(&adev->mman.gtt_window_lock);
173 
174 	return r;
175 }
176 
177 /**
178  * svm_migrate_copy_done - wait for memory copy sdma is done
179  *
180  * @adev: amdgpu device the sdma memory copy is executing on
181  * @mfence: migrate fence
182  *
183  * Wait for dma fence is signaled, if the copy ssplit into multiple sdma
184  * operations, this is the last sdma operation fence.
185  *
186  * Context: called after svm_migrate_copy_memory
187  *
188  * Return:
189  * 0		- success
190  * otherwise	- error code from dma fence signal
191  */
192 static int
193 svm_migrate_copy_done(struct amdgpu_device *adev, struct dma_fence *mfence)
194 {
195 	int r = 0;
196 
197 	if (mfence) {
198 		r = dma_fence_wait(mfence, false);
199 		dma_fence_put(mfence);
200 		pr_debug("sdma copy memory fence done\n");
201 	}
202 
203 	return r;
204 }
205 
206 unsigned long
207 svm_migrate_addr_to_pfn(struct amdgpu_device *adev, unsigned long addr)
208 {
209 	return (addr + adev->kfd.pgmap.range.start) >> PAGE_SHIFT;
210 }
211 
212 static void
213 svm_migrate_get_vram_page(struct svm_range *prange, unsigned long pfn)
214 {
215 	struct page *page;
216 
217 	page = pfn_to_page(pfn);
218 	svm_range_bo_ref(prange->svm_bo);
219 	page->zone_device_data = prange->svm_bo;
220 	zone_device_page_init(page);
221 }
222 
223 static void
224 svm_migrate_put_vram_page(struct amdgpu_device *adev, unsigned long addr)
225 {
226 	struct page *page;
227 
228 	page = pfn_to_page(svm_migrate_addr_to_pfn(adev, addr));
229 	unlock_page(page);
230 	put_page(page);
231 }
232 
233 static unsigned long
234 svm_migrate_addr(struct amdgpu_device *adev, struct page *page)
235 {
236 	unsigned long addr;
237 
238 	addr = page_to_pfn(page) << PAGE_SHIFT;
239 	return (addr - adev->kfd.pgmap.range.start);
240 }
241 
242 static struct page *
243 svm_migrate_get_sys_page(struct vm_area_struct *vma, unsigned long addr)
244 {
245 	struct page *page;
246 
247 	page = alloc_page_vma(GFP_HIGHUSER, vma, addr);
248 	if (page)
249 		lock_page(page);
250 
251 	return page;
252 }
253 
254 static void svm_migrate_put_sys_page(unsigned long addr)
255 {
256 	struct page *page;
257 
258 	page = pfn_to_page(addr >> PAGE_SHIFT);
259 	unlock_page(page);
260 	put_page(page);
261 }
262 
263 static unsigned long svm_migrate_unsuccessful_pages(struct migrate_vma *migrate)
264 {
265 	unsigned long upages = 0;
266 	unsigned long i;
267 
268 	for (i = 0; i < migrate->npages; i++) {
269 		if (migrate->src[i] & MIGRATE_PFN_VALID &&
270 		    !(migrate->src[i] & MIGRATE_PFN_MIGRATE))
271 			upages++;
272 	}
273 	return upages;
274 }
275 
276 static int
277 svm_migrate_copy_to_vram(struct kfd_node *node, struct svm_range *prange,
278 			 struct migrate_vma *migrate, struct dma_fence **mfence,
279 			 dma_addr_t *scratch, uint64_t ttm_res_offset)
280 {
281 	uint64_t npages = migrate->cpages;
282 	struct amdgpu_device *adev = node->adev;
283 	struct device *dev = adev->dev;
284 	struct amdgpu_res_cursor cursor;
285 	dma_addr_t *src;
286 	uint64_t *dst;
287 	uint64_t i, j;
288 	int r;
289 
290 	pr_debug("svms 0x%p [0x%lx 0x%lx 0x%llx]\n", prange->svms, prange->start,
291 		 prange->last, ttm_res_offset);
292 
293 	src = scratch;
294 	dst = (uint64_t *)(scratch + npages);
295 
296 	amdgpu_res_first(prange->ttm_res, ttm_res_offset,
297 			 npages << PAGE_SHIFT, &cursor);
298 	for (i = j = 0; i < npages; i++) {
299 		struct page *spage;
300 
301 		dst[i] = cursor.start + (j << PAGE_SHIFT);
302 		migrate->dst[i] = svm_migrate_addr_to_pfn(adev, dst[i]);
303 		svm_migrate_get_vram_page(prange, migrate->dst[i]);
304 		migrate->dst[i] = migrate_pfn(migrate->dst[i]);
305 
306 		spage = migrate_pfn_to_page(migrate->src[i]);
307 		if (spage && !is_zone_device_page(spage)) {
308 			src[i] = dma_map_page(dev, spage, 0, PAGE_SIZE,
309 					      DMA_TO_DEVICE);
310 			r = dma_mapping_error(dev, src[i]);
311 			if (r) {
312 				dev_err(dev, "%s: fail %d dma_map_page\n",
313 					__func__, r);
314 				goto out_free_vram_pages;
315 			}
316 		} else {
317 			if (j) {
318 				r = svm_migrate_copy_memory_gart(
319 						adev, src + i - j,
320 						dst + i - j, j,
321 						FROM_RAM_TO_VRAM,
322 						mfence);
323 				if (r)
324 					goto out_free_vram_pages;
325 				amdgpu_res_next(&cursor, (j + 1) << PAGE_SHIFT);
326 				j = 0;
327 			} else {
328 				amdgpu_res_next(&cursor, PAGE_SIZE);
329 			}
330 			continue;
331 		}
332 
333 		pr_debug_ratelimited("dma mapping src to 0x%llx, pfn 0x%lx\n",
334 				     src[i] >> PAGE_SHIFT, page_to_pfn(spage));
335 
336 		if (j >= (cursor.size >> PAGE_SHIFT) - 1 && i < npages - 1) {
337 			r = svm_migrate_copy_memory_gart(adev, src + i - j,
338 							 dst + i - j, j + 1,
339 							 FROM_RAM_TO_VRAM,
340 							 mfence);
341 			if (r)
342 				goto out_free_vram_pages;
343 			amdgpu_res_next(&cursor, (j + 1) * PAGE_SIZE);
344 			j = 0;
345 		} else {
346 			j++;
347 		}
348 	}
349 
350 	r = svm_migrate_copy_memory_gart(adev, src + i - j, dst + i - j, j,
351 					 FROM_RAM_TO_VRAM, mfence);
352 
353 out_free_vram_pages:
354 	if (r) {
355 		pr_debug("failed %d to copy memory to vram\n", r);
356 		while (i--) {
357 			svm_migrate_put_vram_page(adev, dst[i]);
358 			migrate->dst[i] = 0;
359 		}
360 	}
361 
362 #ifdef DEBUG_FORCE_MIXED_DOMAINS
363 	for (i = 0, j = 0; i < npages; i += 4, j++) {
364 		if (j & 1)
365 			continue;
366 		svm_migrate_put_vram_page(adev, dst[i]);
367 		migrate->dst[i] = 0;
368 		svm_migrate_put_vram_page(adev, dst[i + 1]);
369 		migrate->dst[i + 1] = 0;
370 		svm_migrate_put_vram_page(adev, dst[i + 2]);
371 		migrate->dst[i + 2] = 0;
372 		svm_migrate_put_vram_page(adev, dst[i + 3]);
373 		migrate->dst[i + 3] = 0;
374 	}
375 #endif
376 
377 	return r;
378 }
379 
380 static long
381 svm_migrate_vma_to_vram(struct kfd_node *node, struct svm_range *prange,
382 			struct vm_area_struct *vma, uint64_t start,
383 			uint64_t end, uint32_t trigger, uint64_t ttm_res_offset)
384 {
385 	struct kfd_process *p = container_of(prange->svms, struct kfd_process, svms);
386 	uint64_t npages = (end - start) >> PAGE_SHIFT;
387 	struct amdgpu_device *adev = node->adev;
388 	struct kfd_process_device *pdd;
389 	struct dma_fence *mfence = NULL;
390 	struct migrate_vma migrate = { 0 };
391 	unsigned long cpages = 0;
392 	unsigned long mpages = 0;
393 	dma_addr_t *scratch;
394 	void *buf;
395 	int r = -ENOMEM;
396 
397 	memset(&migrate, 0, sizeof(migrate));
398 	migrate.vma = vma;
399 	migrate.start = start;
400 	migrate.end = end;
401 	migrate.flags = MIGRATE_VMA_SELECT_SYSTEM;
402 	migrate.pgmap_owner = SVM_ADEV_PGMAP_OWNER(adev);
403 
404 	buf = kvcalloc(npages,
405 		       2 * sizeof(*migrate.src) + sizeof(uint64_t) + sizeof(dma_addr_t),
406 		       GFP_KERNEL);
407 	if (!buf)
408 		goto out;
409 
410 	migrate.src = buf;
411 	migrate.dst = migrate.src + npages;
412 	scratch = (dma_addr_t *)(migrate.dst + npages);
413 
414 	kfd_smi_event_migration_start(node, p->lead_thread->pid,
415 				      start >> PAGE_SHIFT, end >> PAGE_SHIFT,
416 				      0, node->id, prange->prefetch_loc,
417 				      prange->preferred_loc, trigger);
418 
419 	r = migrate_vma_setup(&migrate);
420 	if (r) {
421 		dev_err(adev->dev, "%s: vma setup fail %d range [0x%lx 0x%lx]\n",
422 			__func__, r, prange->start, prange->last);
423 		goto out_free;
424 	}
425 
426 	cpages = migrate.cpages;
427 	if (!cpages) {
428 		pr_debug("failed collect migrate sys pages [0x%lx 0x%lx]\n",
429 			 prange->start, prange->last);
430 		goto out_free;
431 	}
432 	if (cpages != npages)
433 		pr_debug("partial migration, 0x%lx/0x%llx pages collected\n",
434 			 cpages, npages);
435 	else
436 		pr_debug("0x%lx pages collected\n", cpages);
437 
438 	r = svm_migrate_copy_to_vram(node, prange, &migrate, &mfence, scratch, ttm_res_offset);
439 	migrate_vma_pages(&migrate);
440 
441 	svm_migrate_copy_done(adev, mfence);
442 	migrate_vma_finalize(&migrate);
443 
444 	mpages = cpages - svm_migrate_unsuccessful_pages(&migrate);
445 	pr_debug("successful/cpages/npages 0x%lx/0x%lx/0x%lx\n",
446 			 mpages, cpages, migrate.npages);
447 
448 	kfd_smi_event_migration_end(node, p->lead_thread->pid,
449 				    start >> PAGE_SHIFT, end >> PAGE_SHIFT,
450 				    0, node->id, trigger);
451 
452 	svm_range_dma_unmap_dev(adev->dev, scratch, 0, npages);
453 
454 out_free:
455 	kvfree(buf);
456 out:
457 	if (!r && mpages) {
458 		pdd = svm_range_get_pdd_by_node(prange, node);
459 		if (pdd)
460 			WRITE_ONCE(pdd->page_in, pdd->page_in + mpages);
461 
462 		return mpages;
463 	}
464 	return r;
465 }
466 
467 /**
468  * svm_migrate_ram_to_vram - migrate svm range from system to device
469  * @prange: range structure
470  * @best_loc: the device to migrate to
471  * @start_mgr: start page to migrate
472  * @last_mgr: last page to migrate
473  * @mm: the process mm structure
474  * @trigger: reason of migration
475  *
476  * Context: Process context, caller hold mmap read lock, svms lock, prange lock
477  *
478  * Return:
479  * 0 - OK, otherwise error code
480  */
481 static int
482 svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc,
483 			unsigned long start_mgr, unsigned long last_mgr,
484 			struct mm_struct *mm, uint32_t trigger)
485 {
486 	unsigned long addr, start, end;
487 	struct vm_area_struct *vma;
488 	uint64_t ttm_res_offset;
489 	struct kfd_node *node;
490 	unsigned long mpages = 0;
491 	long r = 0;
492 
493 	if (start_mgr < prange->start || last_mgr > prange->last) {
494 		pr_debug("range [0x%lx 0x%lx] out prange [0x%lx 0x%lx]\n",
495 			 start_mgr, last_mgr, prange->start, prange->last);
496 		return -EFAULT;
497 	}
498 
499 	node = svm_range_get_node_by_id(prange, best_loc);
500 	if (!node) {
501 		pr_debug("failed to get kfd node by id 0x%x\n", best_loc);
502 		return -ENODEV;
503 	}
504 
505 	pr_debug("svms 0x%p [0x%lx 0x%lx] in [0x%lx 0x%lx] to gpu 0x%x\n",
506 		prange->svms, start_mgr, last_mgr, prange->start, prange->last,
507 		best_loc);
508 
509 	start = start_mgr << PAGE_SHIFT;
510 	end = (last_mgr + 1) << PAGE_SHIFT;
511 
512 	r = amdgpu_amdkfd_reserve_mem_limit(node->adev,
513 					prange->npages * PAGE_SIZE,
514 					KFD_IOC_ALLOC_MEM_FLAGS_VRAM,
515 					node->xcp ? node->xcp->id : 0);
516 	if (r) {
517 		dev_dbg(node->adev->dev, "failed to reserve VRAM, r: %ld\n", r);
518 		return -ENOSPC;
519 	}
520 
521 	r = svm_range_vram_node_new(node, prange, true);
522 	if (r) {
523 		dev_dbg(node->adev->dev, "fail %ld to alloc vram\n", r);
524 		goto out;
525 	}
526 	ttm_res_offset = (start_mgr - prange->start + prange->offset) << PAGE_SHIFT;
527 
528 	for (addr = start; addr < end;) {
529 		unsigned long next;
530 
531 		vma = vma_lookup(mm, addr);
532 		if (!vma)
533 			break;
534 
535 		next = min(vma->vm_end, end);
536 		r = svm_migrate_vma_to_vram(node, prange, vma, addr, next, trigger, ttm_res_offset);
537 		if (r < 0) {
538 			pr_debug("failed %ld to migrate\n", r);
539 			break;
540 		} else {
541 			mpages += r;
542 		}
543 		ttm_res_offset += next - addr;
544 		addr = next;
545 	}
546 
547 	if (mpages) {
548 		prange->actual_loc = best_loc;
549 		prange->vram_pages += mpages;
550 	} else if (!prange->actual_loc) {
551 		/* if no page migrated and all pages from prange are at
552 		 * sys ram drop svm_bo got from svm_range_vram_node_new
553 		 */
554 		svm_range_vram_node_free(prange);
555 	}
556 
557 out:
558 	amdgpu_amdkfd_unreserve_mem_limit(node->adev,
559 					prange->npages * PAGE_SIZE,
560 					KFD_IOC_ALLOC_MEM_FLAGS_VRAM,
561 					node->xcp ? node->xcp->id : 0);
562 	return r < 0 ? r : 0;
563 }
564 
565 static void svm_migrate_page_free(struct page *page)
566 {
567 	struct svm_range_bo *svm_bo = page->zone_device_data;
568 
569 	if (svm_bo) {
570 		pr_debug_ratelimited("ref: %d\n", kref_read(&svm_bo->kref));
571 		svm_range_bo_unref_async(svm_bo);
572 	}
573 }
574 
575 static int
576 svm_migrate_copy_to_ram(struct amdgpu_device *adev, struct svm_range *prange,
577 			struct migrate_vma *migrate, struct dma_fence **mfence,
578 			dma_addr_t *scratch, uint64_t npages)
579 {
580 	struct device *dev = adev->dev;
581 	uint64_t *src;
582 	dma_addr_t *dst;
583 	struct page *dpage;
584 	uint64_t i = 0, j;
585 	uint64_t addr;
586 	int r = 0;
587 
588 	pr_debug("svms 0x%p [0x%lx 0x%lx]\n", prange->svms, prange->start,
589 		 prange->last);
590 
591 	addr = migrate->start;
592 
593 	src = (uint64_t *)(scratch + npages);
594 	dst = scratch;
595 
596 	for (i = 0, j = 0; i < npages; i++, addr += PAGE_SIZE) {
597 		struct page *spage;
598 
599 		spage = migrate_pfn_to_page(migrate->src[i]);
600 		if (!spage || !is_zone_device_page(spage)) {
601 			pr_debug("invalid page. Could be in CPU already svms 0x%p [0x%lx 0x%lx]\n",
602 				 prange->svms, prange->start, prange->last);
603 			if (j) {
604 				r = svm_migrate_copy_memory_gart(adev, dst + i - j,
605 								 src + i - j, j,
606 								 FROM_VRAM_TO_RAM,
607 								 mfence);
608 				if (r)
609 					goto out_oom;
610 				j = 0;
611 			}
612 			continue;
613 		}
614 		src[i] = svm_migrate_addr(adev, spage);
615 		if (j > 0 && src[i] != src[i - 1] + PAGE_SIZE) {
616 			r = svm_migrate_copy_memory_gart(adev, dst + i - j,
617 							 src + i - j, j,
618 							 FROM_VRAM_TO_RAM,
619 							 mfence);
620 			if (r)
621 				goto out_oom;
622 			j = 0;
623 		}
624 
625 		dpage = svm_migrate_get_sys_page(migrate->vma, addr);
626 		if (!dpage) {
627 			pr_debug("failed get page svms 0x%p [0x%lx 0x%lx]\n",
628 				 prange->svms, prange->start, prange->last);
629 			r = -ENOMEM;
630 			goto out_oom;
631 		}
632 
633 		dst[i] = dma_map_page(dev, dpage, 0, PAGE_SIZE, DMA_FROM_DEVICE);
634 		r = dma_mapping_error(dev, dst[i]);
635 		if (r) {
636 			dev_err(adev->dev, "%s: fail %d dma_map_page\n", __func__, r);
637 			goto out_oom;
638 		}
639 
640 		pr_debug_ratelimited("dma mapping dst to 0x%llx, pfn 0x%lx\n",
641 				     dst[i] >> PAGE_SHIFT, page_to_pfn(dpage));
642 
643 		migrate->dst[i] = migrate_pfn(page_to_pfn(dpage));
644 		j++;
645 	}
646 
647 	r = svm_migrate_copy_memory_gart(adev, dst + i - j, src + i - j, j,
648 					 FROM_VRAM_TO_RAM, mfence);
649 
650 out_oom:
651 	if (r) {
652 		pr_debug("failed %d copy to ram\n", r);
653 		while (i--) {
654 			svm_migrate_put_sys_page(dst[i]);
655 			migrate->dst[i] = 0;
656 		}
657 	}
658 
659 	return r;
660 }
661 
662 /**
663  * svm_migrate_vma_to_ram - migrate range inside one vma from device to system
664  *
665  * @prange: svm range structure
666  * @vma: vm_area_struct that range [start, end] belongs to
667  * @start: range start virtual address in pages
668  * @end: range end virtual address in pages
669  * @node: kfd node device to migrate from
670  * @trigger: reason of migration
671  * @fault_page: is from vmf->page, svm_migrate_to_ram(), this is CPU page fault callback
672  *
673  * Context: Process context, caller hold mmap read lock, prange->migrate_mutex
674  *
675  * Return:
676  *   negative values - indicate error
677  *   positive values or zero - number of pages got migrated
678  */
679 static long
680 svm_migrate_vma_to_ram(struct kfd_node *node, struct svm_range *prange,
681 		       struct vm_area_struct *vma, uint64_t start, uint64_t end,
682 		       uint32_t trigger, struct page *fault_page)
683 {
684 	struct kfd_process *p = container_of(prange->svms, struct kfd_process, svms);
685 	uint64_t npages = (end - start) >> PAGE_SHIFT;
686 	unsigned long upages = npages;
687 	unsigned long cpages = 0;
688 	unsigned long mpages = 0;
689 	struct amdgpu_device *adev = node->adev;
690 	struct kfd_process_device *pdd;
691 	struct dma_fence *mfence = NULL;
692 	struct migrate_vma migrate = { 0 };
693 	dma_addr_t *scratch;
694 	void *buf;
695 	int r = -ENOMEM;
696 
697 	memset(&migrate, 0, sizeof(migrate));
698 	migrate.vma = vma;
699 	migrate.start = start;
700 	migrate.end = end;
701 	migrate.pgmap_owner = SVM_ADEV_PGMAP_OWNER(adev);
702 	if (adev->gmc.xgmi.connected_to_cpu)
703 		migrate.flags = MIGRATE_VMA_SELECT_DEVICE_COHERENT;
704 	else
705 		migrate.flags = MIGRATE_VMA_SELECT_DEVICE_PRIVATE;
706 
707 	buf = kvcalloc(npages,
708 		       2 * sizeof(*migrate.src) + sizeof(uint64_t) + sizeof(dma_addr_t),
709 		       GFP_KERNEL);
710 	if (!buf)
711 		goto out;
712 
713 	migrate.src = buf;
714 	migrate.dst = migrate.src + npages;
715 	migrate.fault_page = fault_page;
716 	scratch = (dma_addr_t *)(migrate.dst + npages);
717 
718 	kfd_smi_event_migration_start(node, p->lead_thread->pid,
719 				      start >> PAGE_SHIFT, end >> PAGE_SHIFT,
720 				      node->id, 0, prange->prefetch_loc,
721 				      prange->preferred_loc, trigger);
722 
723 	r = migrate_vma_setup(&migrate);
724 	if (r) {
725 		dev_err(adev->dev, "%s: vma setup fail %d range [0x%lx 0x%lx]\n",
726 			__func__, r, prange->start, prange->last);
727 		goto out_free;
728 	}
729 
730 	cpages = migrate.cpages;
731 	if (!cpages) {
732 		pr_debug("failed collect migrate device pages [0x%lx 0x%lx]\n",
733 			 prange->start, prange->last);
734 		upages = svm_migrate_unsuccessful_pages(&migrate);
735 		goto out_free;
736 	}
737 	if (cpages != npages)
738 		pr_debug("partial migration, 0x%lx/0x%llx pages collected\n",
739 			 cpages, npages);
740 	else
741 		pr_debug("0x%lx pages collected\n", cpages);
742 
743 	r = svm_migrate_copy_to_ram(adev, prange, &migrate, &mfence,
744 				    scratch, npages);
745 	migrate_vma_pages(&migrate);
746 
747 	upages = svm_migrate_unsuccessful_pages(&migrate);
748 	pr_debug("unsuccessful/cpages/npages 0x%lx/0x%lx/0x%lx\n",
749 		 upages, cpages, migrate.npages);
750 
751 	svm_migrate_copy_done(adev, mfence);
752 	migrate_vma_finalize(&migrate);
753 
754 	kfd_smi_event_migration_end(node, p->lead_thread->pid,
755 				    start >> PAGE_SHIFT, end >> PAGE_SHIFT,
756 				    node->id, 0, trigger);
757 
758 	svm_range_dma_unmap_dev(adev->dev, scratch, 0, npages);
759 
760 out_free:
761 	kvfree(buf);
762 out:
763 	if (!r && cpages) {
764 		mpages = cpages - upages;
765 		pdd = svm_range_get_pdd_by_node(prange, node);
766 		if (pdd)
767 			WRITE_ONCE(pdd->page_out, pdd->page_out + mpages);
768 	}
769 
770 	return r ? r : mpages;
771 }
772 
773 /**
774  * svm_migrate_vram_to_ram - migrate svm range from device to system
775  * @prange: range structure
776  * @mm: process mm, use current->mm if NULL
777  * @start_mgr: start page need be migrated to sys ram
778  * @last_mgr: last page need be migrated to sys ram
779  * @trigger: reason of migration
780  * @fault_page: is from vmf->page, svm_migrate_to_ram(), this is CPU page fault callback
781  *
782  * Context: Process context, caller hold mmap read lock, prange->migrate_mutex
783  *
784  * Return:
785  * 0 - OK, otherwise error code
786  */
787 int svm_migrate_vram_to_ram(struct svm_range *prange, struct mm_struct *mm,
788 			    unsigned long start_mgr, unsigned long last_mgr,
789 			    uint32_t trigger, struct page *fault_page)
790 {
791 	struct kfd_node *node;
792 	struct vm_area_struct *vma;
793 	unsigned long addr;
794 	unsigned long start;
795 	unsigned long end;
796 	unsigned long mpages = 0;
797 	long r = 0;
798 
799 	/* this pragne has no any vram page to migrate to sys ram */
800 	if (!prange->actual_loc) {
801 		pr_debug("[0x%lx 0x%lx] already migrated to ram\n",
802 			 prange->start, prange->last);
803 		return 0;
804 	}
805 
806 	if (start_mgr < prange->start || last_mgr > prange->last) {
807 		pr_debug("range [0x%lx 0x%lx] out prange [0x%lx 0x%lx]\n",
808 			 start_mgr, last_mgr, prange->start, prange->last);
809 		return -EFAULT;
810 	}
811 
812 	node = svm_range_get_node_by_id(prange, prange->actual_loc);
813 	if (!node) {
814 		pr_debug("failed to get kfd node by id 0x%x\n", prange->actual_loc);
815 		return -ENODEV;
816 	}
817 	pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx] from gpu 0x%x to ram\n",
818 		 prange->svms, prange, start_mgr, last_mgr,
819 		 prange->actual_loc);
820 
821 	start = start_mgr << PAGE_SHIFT;
822 	end = (last_mgr + 1) << PAGE_SHIFT;
823 
824 	for (addr = start; addr < end;) {
825 		unsigned long next;
826 
827 		vma = vma_lookup(mm, addr);
828 		if (!vma) {
829 			pr_debug("failed to find vma for prange %p\n", prange);
830 			r = -EFAULT;
831 			break;
832 		}
833 
834 		next = min(vma->vm_end, end);
835 		r = svm_migrate_vma_to_ram(node, prange, vma, addr, next, trigger,
836 			fault_page);
837 		if (r < 0) {
838 			pr_debug("failed %ld to migrate prange %p\n", r, prange);
839 			break;
840 		} else {
841 			mpages += r;
842 		}
843 		addr = next;
844 	}
845 
846 	if (r >= 0) {
847 		prange->vram_pages -= mpages;
848 
849 		/* prange does not have vram page set its actual_loc to system
850 		 * and drop its svm_bo ref
851 		 */
852 		if (prange->vram_pages == 0 && prange->ttm_res) {
853 			prange->actual_loc = 0;
854 			svm_range_vram_node_free(prange);
855 		}
856 	}
857 
858 	return r < 0 ? r : 0;
859 }
860 
861 /**
862  * svm_migrate_vram_to_vram - migrate svm range from device to device
863  * @prange: range structure
864  * @best_loc: the device to migrate to
865  * @start: start page need be migrated to sys ram
866  * @last: last page need be migrated to sys ram
867  * @mm: process mm, use current->mm if NULL
868  * @trigger: reason of migration
869  *
870  * Context: Process context, caller hold mmap read lock, svms lock, prange lock
871  *
872  * migrate all vram pages in prange to sys ram, then migrate
873  * [start, last] pages from sys ram to gpu node best_loc.
874  *
875  * Return:
876  * 0 - OK, otherwise error code
877  */
878 static int
879 svm_migrate_vram_to_vram(struct svm_range *prange, uint32_t best_loc,
880 			unsigned long start, unsigned long last,
881 			struct mm_struct *mm, uint32_t trigger)
882 {
883 	int r, retries = 3;
884 
885 	/*
886 	 * TODO: for both devices with PCIe large bar or on same xgmi hive, skip
887 	 * system memory as migration bridge
888 	 */
889 
890 	pr_debug("from gpu 0x%x to gpu 0x%x\n", prange->actual_loc, best_loc);
891 
892 	do {
893 		r = svm_migrate_vram_to_ram(prange, mm, prange->start, prange->last,
894 					    trigger, NULL);
895 		if (r)
896 			return r;
897 	} while (prange->actual_loc && --retries);
898 
899 	if (prange->actual_loc)
900 		return -EDEADLK;
901 
902 	return svm_migrate_ram_to_vram(prange, best_loc, start, last, mm, trigger);
903 }
904 
905 int
906 svm_migrate_to_vram(struct svm_range *prange, uint32_t best_loc,
907 		    unsigned long start, unsigned long last,
908 		    struct mm_struct *mm, uint32_t trigger)
909 {
910 	if  (!prange->actual_loc || prange->actual_loc == best_loc)
911 		return svm_migrate_ram_to_vram(prange, best_loc, start, last,
912 					       mm, trigger);
913 
914 	else
915 		return svm_migrate_vram_to_vram(prange, best_loc, start, last,
916 						mm, trigger);
917 
918 }
919 
920 /**
921  * svm_migrate_to_ram - CPU page fault handler
922  * @vmf: CPU vm fault vma, address
923  *
924  * Context: vm fault handler, caller holds the mmap read lock
925  *
926  * Return:
927  * 0 - OK
928  * VM_FAULT_SIGBUS - notice application to have SIGBUS page fault
929  */
930 static vm_fault_t svm_migrate_to_ram(struct vm_fault *vmf)
931 {
932 	unsigned long start, last, size;
933 	unsigned long addr = vmf->address;
934 	struct svm_range_bo *svm_bo;
935 	struct svm_range *prange;
936 	struct kfd_process *p;
937 	struct mm_struct *mm;
938 	int r = 0;
939 
940 	svm_bo = vmf->page->zone_device_data;
941 	if (!svm_bo) {
942 		pr_debug("failed get device page at addr 0x%lx\n", addr);
943 		return VM_FAULT_SIGBUS;
944 	}
945 	if (!mmget_not_zero(svm_bo->eviction_fence->mm)) {
946 		pr_debug("addr 0x%lx of process mm is destroyed\n", addr);
947 		return VM_FAULT_SIGBUS;
948 	}
949 
950 	mm = svm_bo->eviction_fence->mm;
951 	if (mm != vmf->vma->vm_mm)
952 		pr_debug("addr 0x%lx is COW mapping in child process\n", addr);
953 
954 	p = kfd_lookup_process_by_mm(mm);
955 	if (!p) {
956 		pr_debug("failed find process at fault address 0x%lx\n", addr);
957 		r = VM_FAULT_SIGBUS;
958 		goto out_mmput;
959 	}
960 	if (READ_ONCE(p->svms.faulting_task) == current) {
961 		pr_debug("skipping ram migration\n");
962 		r = 0;
963 		goto out_unref_process;
964 	}
965 
966 	pr_debug("CPU page fault svms 0x%p address 0x%lx\n", &p->svms, addr);
967 	addr >>= PAGE_SHIFT;
968 
969 	mutex_lock(&p->svms.lock);
970 
971 	prange = svm_range_from_addr(&p->svms, addr, NULL);
972 	if (!prange) {
973 		pr_debug("failed get range svms 0x%p addr 0x%lx\n", &p->svms, addr);
974 		r = -EFAULT;
975 		goto out_unlock_svms;
976 	}
977 
978 	mutex_lock(&prange->migrate_mutex);
979 
980 	if (!prange->actual_loc)
981 		goto out_unlock_prange;
982 
983 	/* Align migration range start and size to granularity size */
984 	size = 1UL << prange->granularity;
985 	start = max(ALIGN_DOWN(addr, size), prange->start);
986 	last = min(ALIGN(addr + 1, size) - 1, prange->last);
987 
988 	r = svm_migrate_vram_to_ram(prange, vmf->vma->vm_mm, start, last,
989 				    KFD_MIGRATE_TRIGGER_PAGEFAULT_CPU, vmf->page);
990 	if (r)
991 		pr_debug("failed %d migrate svms 0x%p range 0x%p [0x%lx 0x%lx]\n",
992 			r, prange->svms, prange, start, last);
993 
994 out_unlock_prange:
995 	mutex_unlock(&prange->migrate_mutex);
996 out_unlock_svms:
997 	mutex_unlock(&p->svms.lock);
998 out_unref_process:
999 	pr_debug("CPU fault svms 0x%p address 0x%lx done\n", &p->svms, addr);
1000 	kfd_unref_process(p);
1001 out_mmput:
1002 	mmput(mm);
1003 	return r ? VM_FAULT_SIGBUS : 0;
1004 }
1005 
1006 static const struct dev_pagemap_ops svm_migrate_pgmap_ops = {
1007 	.page_free		= svm_migrate_page_free,
1008 	.migrate_to_ram		= svm_migrate_to_ram,
1009 };
1010 
1011 /* Each VRAM page uses sizeof(struct page) on system memory */
1012 #define SVM_HMM_PAGE_STRUCT_SIZE(size) ((size)/PAGE_SIZE * sizeof(struct page))
1013 
1014 int kgd2kfd_init_zone_device(struct amdgpu_device *adev)
1015 {
1016 	struct amdgpu_kfd_dev *kfddev = &adev->kfd;
1017 	struct dev_pagemap *pgmap;
1018 	struct resource *res = NULL;
1019 	unsigned long size;
1020 	void *r;
1021 
1022 	/* Page migration works on gfx9 or newer */
1023 	if (amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(9, 0, 1))
1024 		return -EINVAL;
1025 
1026 	if (adev->gmc.is_app_apu)
1027 		return 0;
1028 
1029 	pgmap = &kfddev->pgmap;
1030 	memset(pgmap, 0, sizeof(*pgmap));
1031 
1032 	/* TODO: register all vram to HMM for now.
1033 	 * should remove reserved size
1034 	 */
1035 	size = ALIGN(adev->gmc.real_vram_size, 2ULL << 20);
1036 	if (adev->gmc.xgmi.connected_to_cpu) {
1037 		pgmap->range.start = adev->gmc.aper_base;
1038 		pgmap->range.end = adev->gmc.aper_base + adev->gmc.aper_size - 1;
1039 		pgmap->type = MEMORY_DEVICE_COHERENT;
1040 	} else {
1041 		res = devm_request_free_mem_region(adev->dev, &iomem_resource, size);
1042 		if (IS_ERR(res))
1043 			return PTR_ERR(res);
1044 		pgmap->range.start = res->start;
1045 		pgmap->range.end = res->end;
1046 		pgmap->type = MEMORY_DEVICE_PRIVATE;
1047 	}
1048 
1049 	pgmap->nr_range = 1;
1050 	pgmap->ops = &svm_migrate_pgmap_ops;
1051 	pgmap->owner = SVM_ADEV_PGMAP_OWNER(adev);
1052 	pgmap->flags = 0;
1053 	/* Device manager releases device-specific resources, memory region and
1054 	 * pgmap when driver disconnects from device.
1055 	 */
1056 	r = devm_memremap_pages(adev->dev, pgmap);
1057 	if (IS_ERR(r)) {
1058 		pr_err("failed to register HMM device memory\n");
1059 		if (pgmap->type == MEMORY_DEVICE_PRIVATE)
1060 			devm_release_mem_region(adev->dev, res->start, resource_size(res));
1061 		/* Disable SVM support capability */
1062 		pgmap->type = 0;
1063 		return PTR_ERR(r);
1064 	}
1065 
1066 	pr_debug("reserve %ldMB system memory for VRAM pages struct\n",
1067 		 SVM_HMM_PAGE_STRUCT_SIZE(size) >> 20);
1068 
1069 	amdgpu_amdkfd_reserve_system_mem(SVM_HMM_PAGE_STRUCT_SIZE(size));
1070 
1071 	pr_info("HMM registered %ldMB device memory\n", size >> 20);
1072 
1073 	return 0;
1074 }
1075