xref: /linux/drivers/gpu/drm/amd/amdkfd/kfd_svm.c (revision b6c0783ff278671e38fed978fefb732101ac8836)
1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /*
3  * Copyright 2020-2021 Advanced Micro Devices, Inc.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21  * OTHER DEALINGS IN THE SOFTWARE.
22  */
23 
24 #include <linux/types.h>
25 #include <linux/sched/task.h>
26 #include <linux/dynamic_debug.h>
27 #include <drm/ttm/ttm_tt.h>
28 #include <drm/drm_exec.h>
29 
30 #include "amdgpu_sync.h"
31 #include "amdgpu_object.h"
32 #include "amdgpu_vm.h"
33 #include "amdgpu_hmm.h"
34 #include "amdgpu.h"
35 #include "amdgpu_xgmi.h"
36 #include "amdgpu_reset.h"
37 #include "kfd_priv.h"
38 #include "kfd_svm.h"
39 #include "kfd_migrate.h"
40 #include "kfd_smi_events.h"
41 
42 #ifdef dev_fmt
43 #undef dev_fmt
44 #endif
45 #define dev_fmt(fmt) "kfd_svm: %s: " fmt, __func__
46 
47 #define AMDGPU_SVM_RANGE_RESTORE_DELAY_MS 1
48 
49 /* Long enough to ensure no retry fault comes after svm range is restored and
50  * page table is updated.
51  */
52 #define AMDGPU_SVM_RANGE_RETRY_FAULT_PENDING	(2UL * NSEC_PER_MSEC)
53 #if IS_ENABLED(CONFIG_DYNAMIC_DEBUG)
54 #define dynamic_svm_range_dump(svms) \
55 	_dynamic_func_call_no_desc("svm_range_dump", svm_range_debug_dump, svms)
56 #else
57 #define dynamic_svm_range_dump(svms) \
58 	do { if (0) svm_range_debug_dump(svms); } while (0)
59 #endif
60 
61 /* Giant svm range split into smaller ranges based on this, it is decided using
62  * minimum of all dGPU/APU 1/32 VRAM size, between 2MB to 1GB and alignment to
63  * power of 2MB.
64  */
65 static uint64_t max_svm_range_pages;
66 
67 struct criu_svm_metadata {
68 	struct list_head list;
69 	struct kfd_criu_svm_range_priv_data data;
70 };
71 
72 static void svm_range_evict_svm_bo_worker(struct work_struct *work);
73 static bool
74 svm_range_cpu_invalidate_pagetables(struct mmu_interval_notifier *mni,
75 				    const struct mmu_notifier_range *range,
76 				    unsigned long cur_seq);
77 static int
78 svm_range_check_vm(struct kfd_process *p, uint64_t start, uint64_t last,
79 		   uint64_t *bo_s, uint64_t *bo_l);
80 static const struct mmu_interval_notifier_ops svm_range_mn_ops = {
81 	.invalidate = svm_range_cpu_invalidate_pagetables,
82 };
83 
84 /**
85  * svm_range_unlink - unlink svm_range from lists and interval tree
86  * @prange: svm range structure to be removed
87  *
88  * Remove the svm_range from the svms and svm_bo lists and the svms
89  * interval tree.
90  *
91  * Context: The caller must hold svms->lock
92  */
93 static void svm_range_unlink(struct svm_range *prange)
94 {
95 	pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx]\n", prange->svms,
96 		 prange, prange->start, prange->last);
97 
98 	if (prange->svm_bo) {
99 		spin_lock(&prange->svm_bo->list_lock);
100 		list_del(&prange->svm_bo_list);
101 		spin_unlock(&prange->svm_bo->list_lock);
102 	}
103 
104 	list_del(&prange->list);
105 	if (prange->it_node.start != 0 && prange->it_node.last != 0)
106 		interval_tree_remove(&prange->it_node, &prange->svms->objects);
107 }
108 
109 static void
110 svm_range_add_notifier_locked(struct mm_struct *mm, struct svm_range *prange)
111 {
112 	pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx]\n", prange->svms,
113 		 prange, prange->start, prange->last);
114 
115 	mmu_interval_notifier_insert_locked(&prange->notifier, mm,
116 				     prange->start << PAGE_SHIFT,
117 				     prange->npages << PAGE_SHIFT,
118 				     &svm_range_mn_ops);
119 }
120 
121 /**
122  * svm_range_add_to_svms - add svm range to svms
123  * @prange: svm range structure to be added
124  *
125  * Add the svm range to svms interval tree and link list
126  *
127  * Context: The caller must hold svms->lock
128  */
129 static void svm_range_add_to_svms(struct svm_range *prange)
130 {
131 	pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx]\n", prange->svms,
132 		 prange, prange->start, prange->last);
133 
134 	list_move_tail(&prange->list, &prange->svms->list);
135 	prange->it_node.start = prange->start;
136 	prange->it_node.last = prange->last;
137 	interval_tree_insert(&prange->it_node, &prange->svms->objects);
138 }
139 
140 static void svm_range_remove_notifier(struct svm_range *prange)
141 {
142 	pr_debug("remove notifier svms 0x%p prange 0x%p [0x%lx 0x%lx]\n",
143 		 prange->svms, prange,
144 		 prange->notifier.interval_tree.start >> PAGE_SHIFT,
145 		 prange->notifier.interval_tree.last >> PAGE_SHIFT);
146 
147 	if (prange->notifier.interval_tree.start != 0 &&
148 	    prange->notifier.interval_tree.last != 0)
149 		mmu_interval_notifier_remove(&prange->notifier);
150 }
151 
152 static bool
153 svm_is_valid_dma_mapping_addr(struct device *dev, dma_addr_t dma_addr)
154 {
155 	return dma_addr && !dma_mapping_error(dev, dma_addr) &&
156 	       !(dma_addr & SVM_RANGE_VRAM_DOMAIN);
157 }
158 
159 static int
160 svm_range_dma_map_dev(struct amdgpu_device *adev, struct svm_range *prange,
161 		      unsigned long offset, unsigned long npages,
162 		      unsigned long *hmm_pfns, uint32_t gpuidx)
163 {
164 	enum dma_data_direction dir = DMA_BIDIRECTIONAL;
165 	dma_addr_t *addr = prange->dma_addr[gpuidx];
166 	struct device *dev = adev->dev;
167 	struct page *page;
168 	int i, r;
169 
170 	if (!addr) {
171 		addr = kvzalloc_objs(*addr, prange->npages);
172 		if (!addr)
173 			return -ENOMEM;
174 		prange->dma_addr[gpuidx] = addr;
175 	}
176 
177 	addr += offset;
178 	for (i = 0; i < npages; i++) {
179 		if (svm_is_valid_dma_mapping_addr(dev, addr[i]))
180 			dma_unmap_page(dev, addr[i], PAGE_SIZE, dir);
181 
182 		page = hmm_pfn_to_page(hmm_pfns[i]);
183 		if (is_zone_device_page(page)) {
184 			struct amdgpu_device *bo_adev = prange->svm_bo->node->adev;
185 
186 			addr[i] = (hmm_pfns[i] << PAGE_SHIFT) +
187 				   bo_adev->vm_manager.vram_base_offset -
188 				   bo_adev->kfd.pgmap.range.start;
189 			addr[i] |= SVM_RANGE_VRAM_DOMAIN;
190 			pr_debug_ratelimited("vram address: 0x%llx\n", addr[i]);
191 			continue;
192 		}
193 		addr[i] = dma_map_page(dev, page, 0, PAGE_SIZE, dir);
194 		r = dma_mapping_error(dev, addr[i]);
195 		if (r) {
196 			dev_err(dev, "failed %d dma_map_page\n", r);
197 			return r;
198 		}
199 		pr_debug_ratelimited("dma mapping 0x%llx for page addr 0x%lx\n",
200 				     addr[i] >> PAGE_SHIFT, page_to_pfn(page));
201 	}
202 
203 	return 0;
204 }
205 
206 static int
207 svm_range_dma_map(struct svm_range *prange, unsigned long *bitmap,
208 		  unsigned long offset, unsigned long npages,
209 		  unsigned long *hmm_pfns)
210 {
211 	struct kfd_process *p;
212 	uint32_t gpuidx;
213 	int r;
214 
215 	p = container_of(prange->svms, struct kfd_process, svms);
216 
217 	for_each_set_bit(gpuidx, bitmap, MAX_GPU_INSTANCE) {
218 		struct kfd_process_device *pdd;
219 
220 		pr_debug("mapping to gpu idx 0x%x\n", gpuidx);
221 		pdd = kfd_process_device_from_gpuidx(p, gpuidx);
222 		if (!pdd) {
223 			pr_debug("failed to find device idx %d\n", gpuidx);
224 			return -EINVAL;
225 		}
226 
227 		r = svm_range_dma_map_dev(pdd->dev->adev, prange, offset, npages,
228 					  hmm_pfns, gpuidx);
229 		if (r)
230 			break;
231 	}
232 
233 	return r;
234 }
235 
236 void svm_range_dma_unmap_dev(struct device *dev, dma_addr_t *dma_addr,
237 			 unsigned long offset, unsigned long npages)
238 {
239 	enum dma_data_direction dir = DMA_BIDIRECTIONAL;
240 	int i;
241 
242 	if (!dma_addr)
243 		return;
244 
245 	for (i = offset; i < offset + npages; i++) {
246 		if (!svm_is_valid_dma_mapping_addr(dev, dma_addr[i]))
247 			continue;
248 		pr_debug_ratelimited("unmap 0x%llx\n", dma_addr[i] >> PAGE_SHIFT);
249 		dma_unmap_page(dev, dma_addr[i], PAGE_SIZE, dir);
250 		dma_addr[i] = 0;
251 	}
252 }
253 
254 void svm_range_dma_unmap(struct svm_range *prange)
255 {
256 	struct kfd_process_device *pdd;
257 	dma_addr_t *dma_addr;
258 	struct device *dev;
259 	struct kfd_process *p;
260 	uint32_t gpuidx;
261 
262 	p = container_of(prange->svms, struct kfd_process, svms);
263 
264 	for (gpuidx = 0; gpuidx < MAX_GPU_INSTANCE; gpuidx++) {
265 		dma_addr = prange->dma_addr[gpuidx];
266 		if (!dma_addr)
267 			continue;
268 
269 		pdd = kfd_process_device_from_gpuidx(p, gpuidx);
270 		if (!pdd) {
271 			pr_debug("failed to find device idx %d\n", gpuidx);
272 			continue;
273 		}
274 		dev = &pdd->dev->adev->pdev->dev;
275 
276 		svm_range_dma_unmap_dev(dev, dma_addr, 0, prange->npages);
277 	}
278 }
279 
280 static void svm_range_free(struct svm_range *prange, bool do_unmap)
281 {
282 	uint64_t size = (prange->last - prange->start + 1) << PAGE_SHIFT;
283 	struct kfd_process *p = container_of(prange->svms, struct kfd_process, svms);
284 	uint32_t gpuidx;
285 
286 	pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx]\n", prange->svms, prange,
287 		 prange->start, prange->last);
288 
289 	svm_range_vram_node_free(prange);
290 	if (do_unmap)
291 		svm_range_dma_unmap(prange);
292 
293 	if (do_unmap && !p->xnack_enabled) {
294 		pr_debug("unreserve prange 0x%p size: 0x%llx\n", prange, size);
295 		amdgpu_amdkfd_unreserve_mem_limit(NULL, size,
296 					KFD_IOC_ALLOC_MEM_FLAGS_USERPTR, 0);
297 	}
298 
299 	/* free dma_addr array for each gpu */
300 	for (gpuidx = 0; gpuidx < MAX_GPU_INSTANCE; gpuidx++) {
301 		if (prange->dma_addr[gpuidx]) {
302 			kvfree(prange->dma_addr[gpuidx]);
303 			prange->dma_addr[gpuidx] = NULL;
304 		}
305 	}
306 
307 	mutex_destroy(&prange->lock);
308 	mutex_destroy(&prange->migrate_mutex);
309 	kfree(prange);
310 }
311 
312 static void
313 svm_range_set_default_attributes(struct svm_range_list *svms, int32_t *location,
314 				 int32_t *prefetch_loc, uint8_t *granularity,
315 				 uint32_t *flags)
316 {
317 	*location = KFD_IOCTL_SVM_LOCATION_UNDEFINED;
318 	*prefetch_loc = KFD_IOCTL_SVM_LOCATION_UNDEFINED;
319 	*granularity = svms->default_granularity;
320 	*flags =
321 		KFD_IOCTL_SVM_FLAG_HOST_ACCESS | KFD_IOCTL_SVM_FLAG_COHERENT;
322 }
323 
324 static struct
325 svm_range *svm_range_new(struct svm_range_list *svms, uint64_t start,
326 			 uint64_t last, bool update_mem_usage)
327 {
328 	uint64_t size = last - start + 1;
329 	struct svm_range *prange;
330 	struct kfd_process *p;
331 
332 	prange = kzalloc_obj(*prange);
333 	if (!prange)
334 		return NULL;
335 
336 	p = container_of(svms, struct kfd_process, svms);
337 	if (!p->xnack_enabled && update_mem_usage &&
338 	    amdgpu_amdkfd_reserve_mem_limit(NULL, size << PAGE_SHIFT,
339 				    KFD_IOC_ALLOC_MEM_FLAGS_USERPTR, 0)) {
340 		pr_info("SVM mapping failed, exceeds resident system memory limit\n");
341 		kfree(prange);
342 		return NULL;
343 	}
344 	prange->npages = size;
345 	prange->svms = svms;
346 	prange->start = start;
347 	prange->last = last;
348 	INIT_LIST_HEAD(&prange->list);
349 	INIT_LIST_HEAD(&prange->update_list);
350 	INIT_LIST_HEAD(&prange->svm_bo_list);
351 	INIT_LIST_HEAD(&prange->deferred_list);
352 	INIT_LIST_HEAD(&prange->child_list);
353 	atomic_set(&prange->invalid, 0);
354 	prange->validate_timestamp = 0;
355 	prange->vram_pages = 0;
356 	mutex_init(&prange->migrate_mutex);
357 	mutex_init(&prange->lock);
358 
359 	if (p->xnack_enabled)
360 		bitmap_copy(prange->bitmap_access, svms->bitmap_supported,
361 			    MAX_GPU_INSTANCE);
362 
363 	svm_range_set_default_attributes(svms, &prange->preferred_loc,
364 					 &prange->prefetch_loc,
365 					 &prange->granularity, &prange->flags);
366 
367 	pr_debug("svms 0x%p [0x%llx 0x%llx]\n", svms, start, last);
368 
369 	return prange;
370 }
371 
372 static bool svm_bo_ref_unless_zero(struct svm_range_bo *svm_bo)
373 {
374 	if (!svm_bo || !kref_get_unless_zero(&svm_bo->kref))
375 		return false;
376 
377 	return true;
378 }
379 
380 static void svm_range_bo_release(struct kref *kref)
381 {
382 	struct svm_range_bo *svm_bo;
383 
384 	svm_bo = container_of(kref, struct svm_range_bo, kref);
385 	pr_debug("svm_bo 0x%p\n", svm_bo);
386 
387 	spin_lock(&svm_bo->list_lock);
388 	while (!list_empty(&svm_bo->range_list)) {
389 		struct svm_range *prange =
390 				list_first_entry(&svm_bo->range_list,
391 						struct svm_range, svm_bo_list);
392 		/* list_del_init tells a concurrent svm_range_vram_node_new when
393 		 * it's safe to reuse the svm_bo pointer and svm_bo_list head.
394 		 */
395 		list_del_init(&prange->svm_bo_list);
396 		spin_unlock(&svm_bo->list_lock);
397 
398 		pr_debug("svms 0x%p [0x%lx 0x%lx]\n", prange->svms,
399 			 prange->start, prange->last);
400 		mutex_lock(&prange->lock);
401 		prange->svm_bo = NULL;
402 		/* prange should not hold vram page now */
403 		WARN_ONCE(prange->actual_loc, "prange should not hold vram page");
404 		mutex_unlock(&prange->lock);
405 
406 		spin_lock(&svm_bo->list_lock);
407 	}
408 	spin_unlock(&svm_bo->list_lock);
409 
410 	if (mmget_not_zero(svm_bo->eviction_fence->mm)) {
411 		struct kfd_process_device *pdd;
412 		struct kfd_process *p;
413 		struct mm_struct *mm;
414 
415 		mm = svm_bo->eviction_fence->mm;
416 		/*
417 		 * The forked child process takes svm_bo device pages ref, svm_bo could be
418 		 * released after parent process is gone.
419 		 */
420 		p = kfd_lookup_process_by_mm(mm);
421 		if (p) {
422 			pdd = kfd_get_process_device_data(svm_bo->node, p);
423 			if (pdd)
424 				atomic64_sub(amdgpu_bo_size(svm_bo->bo), &pdd->vram_usage);
425 			kfd_unref_process(p);
426 		}
427 		mmput(mm);
428 	}
429 
430 	if (!dma_fence_is_signaled(&svm_bo->eviction_fence->base))
431 		/* We're not in the eviction worker. Signal the fence. */
432 		dma_fence_signal(&svm_bo->eviction_fence->base);
433 	dma_fence_put(&svm_bo->eviction_fence->base);
434 	amdgpu_bo_unref(&svm_bo->bo);
435 	kfree(svm_bo);
436 }
437 
438 static void svm_range_bo_wq_release(struct work_struct *work)
439 {
440 	struct svm_range_bo *svm_bo;
441 
442 	svm_bo = container_of(work, struct svm_range_bo, release_work);
443 	svm_range_bo_release(&svm_bo->kref);
444 }
445 
446 static void svm_range_bo_release_async(struct kref *kref)
447 {
448 	struct svm_range_bo *svm_bo;
449 
450 	svm_bo = container_of(kref, struct svm_range_bo, kref);
451 	pr_debug("svm_bo 0x%p\n", svm_bo);
452 	INIT_WORK(&svm_bo->release_work, svm_range_bo_wq_release);
453 	schedule_work(&svm_bo->release_work);
454 }
455 
456 void svm_range_bo_unref_async(struct svm_range_bo *svm_bo)
457 {
458 	kref_put(&svm_bo->kref, svm_range_bo_release_async);
459 }
460 
461 static void svm_range_bo_unref(struct svm_range_bo *svm_bo)
462 {
463 	if (svm_bo)
464 		kref_put(&svm_bo->kref, svm_range_bo_release);
465 }
466 
467 static bool
468 svm_range_validate_svm_bo(struct kfd_node *node, struct svm_range *prange)
469 {
470 	mutex_lock(&prange->lock);
471 	if (!prange->svm_bo) {
472 		mutex_unlock(&prange->lock);
473 		return false;
474 	}
475 	if (prange->ttm_res) {
476 		/* We still have a reference, all is well */
477 		mutex_unlock(&prange->lock);
478 		return true;
479 	}
480 	if (svm_bo_ref_unless_zero(prange->svm_bo)) {
481 		/*
482 		 * Migrate from GPU to GPU, remove range from source svm_bo->node
483 		 * range list, and return false to allocate svm_bo from destination
484 		 * node.
485 		 */
486 		if (prange->svm_bo->node != node) {
487 			mutex_unlock(&prange->lock);
488 
489 			spin_lock(&prange->svm_bo->list_lock);
490 			list_del_init(&prange->svm_bo_list);
491 			spin_unlock(&prange->svm_bo->list_lock);
492 
493 			svm_range_bo_unref(prange->svm_bo);
494 			return false;
495 		}
496 		if (READ_ONCE(prange->svm_bo->evicting)) {
497 			struct dma_fence *f;
498 			struct svm_range_bo *svm_bo;
499 			/* The BO is getting evicted,
500 			 * we need to get a new one
501 			 */
502 			mutex_unlock(&prange->lock);
503 			svm_bo = prange->svm_bo;
504 			f = dma_fence_get(&svm_bo->eviction_fence->base);
505 			svm_range_bo_unref(prange->svm_bo);
506 			/* wait for the fence to avoid long spin-loop
507 			 * at list_empty_careful
508 			 */
509 			dma_fence_wait(f, false);
510 			dma_fence_put(f);
511 		} else {
512 			/* The BO was still around and we got
513 			 * a new reference to it
514 			 */
515 			mutex_unlock(&prange->lock);
516 			pr_debug("reuse old bo svms 0x%p [0x%lx 0x%lx]\n",
517 				 prange->svms, prange->start, prange->last);
518 
519 			prange->ttm_res = prange->svm_bo->bo->tbo.resource;
520 			return true;
521 		}
522 
523 	} else {
524 		mutex_unlock(&prange->lock);
525 	}
526 
527 	/* We need a new svm_bo. Spin-loop to wait for concurrent
528 	 * svm_range_bo_release to finish removing this range from
529 	 * its range list and set prange->svm_bo to null. After this,
530 	 * it is safe to reuse the svm_bo pointer and svm_bo_list head.
531 	 */
532 	while (!list_empty_careful(&prange->svm_bo_list) || prange->svm_bo)
533 		cond_resched();
534 
535 	return false;
536 }
537 
538 static struct svm_range_bo *svm_range_bo_new(void)
539 {
540 	struct svm_range_bo *svm_bo;
541 
542 	svm_bo = kzalloc_obj(*svm_bo);
543 	if (!svm_bo)
544 		return NULL;
545 
546 	kref_init(&svm_bo->kref);
547 	INIT_LIST_HEAD(&svm_bo->range_list);
548 	spin_lock_init(&svm_bo->list_lock);
549 
550 	return svm_bo;
551 }
552 
553 int
554 svm_range_vram_node_new(struct kfd_node *node, struct svm_range *prange,
555 			bool clear)
556 {
557 	struct kfd_process_device *pdd;
558 	struct amdgpu_bo_param bp;
559 	struct svm_range_bo *svm_bo;
560 	struct amdgpu_bo_user *ubo;
561 	struct amdgpu_bo *bo;
562 	struct kfd_process *p;
563 	struct mm_struct *mm;
564 	int r;
565 
566 	p = container_of(prange->svms, struct kfd_process, svms);
567 	pr_debug("process pid: %d svms 0x%p [0x%lx 0x%lx]\n",
568 		 p->lead_thread->pid, prange->svms,
569 		 prange->start, prange->last);
570 
571 	if (svm_range_validate_svm_bo(node, prange))
572 		return 0;
573 
574 	svm_bo = svm_range_bo_new();
575 	if (!svm_bo) {
576 		pr_debug("failed to alloc svm bo\n");
577 		return -ENOMEM;
578 	}
579 	mm = get_task_mm(p->lead_thread);
580 	if (!mm) {
581 		pr_debug("failed to get mm\n");
582 		kfree(svm_bo);
583 		return -ESRCH;
584 	}
585 	svm_bo->node = node;
586 	svm_bo->eviction_fence =
587 		amdgpu_amdkfd_fence_create(dma_fence_context_alloc(1),
588 					   mm,
589 					   svm_bo, p->context_id);
590 	mmput(mm);
591 	INIT_WORK(&svm_bo->eviction_work, svm_range_evict_svm_bo_worker);
592 	svm_bo->evicting = 0;
593 	memset(&bp, 0, sizeof(bp));
594 	bp.size = prange->npages * PAGE_SIZE;
595 	bp.byte_align = PAGE_SIZE;
596 	bp.domain = AMDGPU_GEM_DOMAIN_VRAM;
597 	bp.flags = AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
598 	bp.flags |= clear ? AMDGPU_GEM_CREATE_VRAM_CLEARED : 0;
599 	bp.flags |= AMDGPU_GEM_CREATE_DISCARDABLE;
600 	bp.type = ttm_bo_type_device;
601 	bp.resv = NULL;
602 	if (node->xcp)
603 		bp.xcp_id_plus1 = node->xcp->id + 1;
604 
605 	r = amdgpu_bo_create_user(node->adev, &bp, &ubo);
606 	if (r) {
607 		pr_debug("failed %d to create bo\n", r);
608 		goto create_bo_failed;
609 	}
610 	bo = &ubo->bo;
611 
612 	pr_debug("alloc bo at offset 0x%lx size 0x%lx on partition %d\n",
613 		 bo->tbo.resource->start << PAGE_SHIFT, bp.size,
614 		 bp.xcp_id_plus1 - 1);
615 
616 	r = amdgpu_bo_reserve(bo, true);
617 	if (r) {
618 		pr_debug("failed %d to reserve bo\n", r);
619 		goto reserve_bo_failed;
620 	}
621 
622 	if (clear) {
623 		r = amdgpu_bo_sync_wait(bo, AMDGPU_FENCE_OWNER_KFD, false);
624 		if (r) {
625 			pr_debug("failed %d to sync bo\n", r);
626 			amdgpu_bo_unreserve(bo);
627 			goto reserve_bo_failed;
628 		}
629 	}
630 
631 	r = dma_resv_reserve_fences(bo->tbo.base.resv, 1);
632 	if (r) {
633 		pr_debug("failed %d to reserve bo\n", r);
634 		amdgpu_bo_unreserve(bo);
635 		goto reserve_bo_failed;
636 	}
637 	amdgpu_bo_fence(bo, &svm_bo->eviction_fence->base, true);
638 
639 	amdgpu_bo_unreserve(bo);
640 
641 	svm_bo->bo = bo;
642 	prange->svm_bo = svm_bo;
643 	prange->ttm_res = bo->tbo.resource;
644 	prange->offset = 0;
645 
646 	spin_lock(&svm_bo->list_lock);
647 	list_add(&prange->svm_bo_list, &svm_bo->range_list);
648 	spin_unlock(&svm_bo->list_lock);
649 
650 	pdd = svm_range_get_pdd_by_node(prange, node);
651 	if (pdd)
652 		atomic64_add(amdgpu_bo_size(bo), &pdd->vram_usage);
653 
654 	return 0;
655 
656 reserve_bo_failed:
657 	amdgpu_bo_unref(&bo);
658 create_bo_failed:
659 	dma_fence_put(&svm_bo->eviction_fence->base);
660 	kfree(svm_bo);
661 	prange->ttm_res = NULL;
662 
663 	return r;
664 }
665 
666 void svm_range_vram_node_free(struct svm_range *prange)
667 {
668 	/* serialize prange->svm_bo unref */
669 	mutex_lock(&prange->lock);
670 	/* prange->svm_bo has not been unref */
671 	if (prange->ttm_res) {
672 		prange->ttm_res = NULL;
673 		mutex_unlock(&prange->lock);
674 		svm_range_bo_unref(prange->svm_bo);
675 	} else
676 		mutex_unlock(&prange->lock);
677 }
678 
679 struct kfd_node *
680 svm_range_get_node_by_id(struct svm_range *prange, uint32_t gpu_id)
681 {
682 	struct kfd_process *p;
683 	struct kfd_process_device *pdd;
684 
685 	p = container_of(prange->svms, struct kfd_process, svms);
686 	pdd = kfd_process_device_data_by_id(p, gpu_id);
687 	if (!pdd) {
688 		pr_debug("failed to get kfd process device by id 0x%x\n", gpu_id);
689 		return NULL;
690 	}
691 
692 	return pdd->dev;
693 }
694 
695 struct kfd_process_device *
696 svm_range_get_pdd_by_node(struct svm_range *prange, struct kfd_node *node)
697 {
698 	struct kfd_process *p;
699 
700 	p = container_of(prange->svms, struct kfd_process, svms);
701 
702 	return kfd_get_process_device_data(node, p);
703 }
704 
705 static int svm_range_bo_validate(void *param, struct amdgpu_bo *bo)
706 {
707 	struct ttm_operation_ctx ctx = { false, false };
708 
709 	amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM);
710 
711 	return ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
712 }
713 
714 static int
715 svm_range_check_attr(struct kfd_process *p,
716 		     uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs)
717 {
718 	uint32_t i;
719 
720 	for (i = 0; i < nattr; i++) {
721 		uint32_t val = attrs[i].value;
722 		int gpuidx = MAX_GPU_INSTANCE;
723 
724 		switch (attrs[i].type) {
725 		case KFD_IOCTL_SVM_ATTR_PREFERRED_LOC:
726 			if (val != KFD_IOCTL_SVM_LOCATION_SYSMEM &&
727 			    val != KFD_IOCTL_SVM_LOCATION_UNDEFINED)
728 				gpuidx = kfd_process_gpuidx_from_gpuid(p, val);
729 			break;
730 		case KFD_IOCTL_SVM_ATTR_PREFETCH_LOC:
731 			if (val != KFD_IOCTL_SVM_LOCATION_SYSMEM)
732 				gpuidx = kfd_process_gpuidx_from_gpuid(p, val);
733 			break;
734 		case KFD_IOCTL_SVM_ATTR_ACCESS:
735 		case KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE:
736 		case KFD_IOCTL_SVM_ATTR_NO_ACCESS:
737 			gpuidx = kfd_process_gpuidx_from_gpuid(p, val);
738 			break;
739 		case KFD_IOCTL_SVM_ATTR_SET_FLAGS:
740 			break;
741 		case KFD_IOCTL_SVM_ATTR_CLR_FLAGS:
742 			break;
743 		case KFD_IOCTL_SVM_ATTR_GRANULARITY:
744 			break;
745 		default:
746 			pr_debug("unknown attr type 0x%x\n", attrs[i].type);
747 			return -EINVAL;
748 		}
749 
750 		if (gpuidx < 0) {
751 			pr_debug("no GPU 0x%x found\n", val);
752 			return -EINVAL;
753 		} else if (gpuidx < MAX_GPU_INSTANCE &&
754 			   !test_bit(gpuidx, p->svms.bitmap_supported)) {
755 			pr_debug("GPU 0x%x not supported\n", val);
756 			return -EINVAL;
757 		}
758 	}
759 
760 	return 0;
761 }
762 
763 static void
764 svm_range_apply_attrs(struct kfd_process *p, struct svm_range *prange,
765 		      uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs,
766 		      bool *update_mapping)
767 {
768 	uint32_t i;
769 	int gpuidx;
770 
771 	for (i = 0; i < nattr; i++) {
772 		switch (attrs[i].type) {
773 		case KFD_IOCTL_SVM_ATTR_PREFERRED_LOC:
774 			prange->preferred_loc = attrs[i].value;
775 			break;
776 		case KFD_IOCTL_SVM_ATTR_PREFETCH_LOC:
777 			prange->prefetch_loc = attrs[i].value;
778 			break;
779 		case KFD_IOCTL_SVM_ATTR_ACCESS:
780 		case KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE:
781 		case KFD_IOCTL_SVM_ATTR_NO_ACCESS:
782 			if (!p->xnack_enabled)
783 				*update_mapping = true;
784 
785 			gpuidx = kfd_process_gpuidx_from_gpuid(p,
786 							       attrs[i].value);
787 			if (attrs[i].type == KFD_IOCTL_SVM_ATTR_NO_ACCESS) {
788 				bitmap_clear(prange->bitmap_access, gpuidx, 1);
789 				bitmap_clear(prange->bitmap_aip, gpuidx, 1);
790 			} else if (attrs[i].type == KFD_IOCTL_SVM_ATTR_ACCESS) {
791 				bitmap_set(prange->bitmap_access, gpuidx, 1);
792 				bitmap_clear(prange->bitmap_aip, gpuidx, 1);
793 			} else {
794 				bitmap_clear(prange->bitmap_access, gpuidx, 1);
795 				bitmap_set(prange->bitmap_aip, gpuidx, 1);
796 			}
797 			break;
798 		case KFD_IOCTL_SVM_ATTR_SET_FLAGS:
799 			*update_mapping = true;
800 			prange->flags |= attrs[i].value;
801 			break;
802 		case KFD_IOCTL_SVM_ATTR_CLR_FLAGS:
803 			*update_mapping = true;
804 			prange->flags &= ~attrs[i].value;
805 			break;
806 		case KFD_IOCTL_SVM_ATTR_GRANULARITY:
807 			prange->granularity = min_t(uint32_t, attrs[i].value, 0x3F);
808 			break;
809 		default:
810 			WARN_ONCE(1, "svm_range_check_attrs wasn't called?");
811 		}
812 	}
813 }
814 
815 static bool
816 svm_range_is_same_attrs(struct kfd_process *p, struct svm_range *prange,
817 			uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs)
818 {
819 	uint32_t i;
820 	int gpuidx;
821 
822 	for (i = 0; i < nattr; i++) {
823 		switch (attrs[i].type) {
824 		case KFD_IOCTL_SVM_ATTR_PREFERRED_LOC:
825 			if (prange->preferred_loc != attrs[i].value)
826 				return false;
827 			break;
828 		case KFD_IOCTL_SVM_ATTR_PREFETCH_LOC:
829 			/* Prefetch should always trigger a migration even
830 			 * if the value of the attribute didn't change.
831 			 */
832 			return false;
833 		case KFD_IOCTL_SVM_ATTR_ACCESS:
834 		case KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE:
835 		case KFD_IOCTL_SVM_ATTR_NO_ACCESS:
836 			gpuidx = kfd_process_gpuidx_from_gpuid(p,
837 							       attrs[i].value);
838 			if (attrs[i].type == KFD_IOCTL_SVM_ATTR_NO_ACCESS) {
839 				if (test_bit(gpuidx, prange->bitmap_access) ||
840 				    test_bit(gpuidx, prange->bitmap_aip))
841 					return false;
842 			} else if (attrs[i].type == KFD_IOCTL_SVM_ATTR_ACCESS) {
843 				if (!test_bit(gpuidx, prange->bitmap_access))
844 					return false;
845 			} else {
846 				if (!test_bit(gpuidx, prange->bitmap_aip))
847 					return false;
848 			}
849 			break;
850 		case KFD_IOCTL_SVM_ATTR_SET_FLAGS:
851 			if ((prange->flags & attrs[i].value) != attrs[i].value)
852 				return false;
853 			break;
854 		case KFD_IOCTL_SVM_ATTR_CLR_FLAGS:
855 			if ((prange->flags & attrs[i].value) != 0)
856 				return false;
857 			break;
858 		case KFD_IOCTL_SVM_ATTR_GRANULARITY:
859 			if (prange->granularity != attrs[i].value)
860 				return false;
861 			break;
862 		default:
863 			WARN_ONCE(1, "svm_range_check_attrs wasn't called?");
864 		}
865 	}
866 
867 	return true;
868 }
869 
870 /**
871  * svm_range_debug_dump - print all range information from svms
872  * @svms: svm range list header
873  *
874  * debug output svm range start, end, prefetch location from svms
875  * interval tree and link list
876  *
877  * Context: The caller must hold svms->lock
878  */
879 static void svm_range_debug_dump(struct svm_range_list *svms)
880 {
881 	struct interval_tree_node *node;
882 	struct svm_range *prange;
883 
884 	pr_debug("dump svms 0x%p list\n", svms);
885 	pr_debug("range\tstart\tpage\tend\t\tlocation\n");
886 
887 	list_for_each_entry(prange, &svms->list, list) {
888 		pr_debug("0x%p 0x%lx\t0x%llx\t0x%llx\t0x%x\n",
889 			 prange, prange->start, prange->npages,
890 			 prange->start + prange->npages - 1,
891 			 prange->actual_loc);
892 	}
893 
894 	pr_debug("dump svms 0x%p interval tree\n", svms);
895 	pr_debug("range\tstart\tpage\tend\t\tlocation\n");
896 	node = interval_tree_iter_first(&svms->objects, 0, ~0ULL);
897 	while (node) {
898 		prange = container_of(node, struct svm_range, it_node);
899 		pr_debug("0x%p 0x%lx\t0x%llx\t0x%llx\t0x%x\n",
900 			 prange, prange->start, prange->npages,
901 			 prange->start + prange->npages - 1,
902 			 prange->actual_loc);
903 		node = interval_tree_iter_next(node, 0, ~0ULL);
904 	}
905 }
906 
907 static void *
908 svm_range_copy_array(void *psrc, size_t size, uint64_t num_elements,
909 		     uint64_t offset, uint64_t *vram_pages)
910 {
911 	unsigned char *src = (unsigned char *)psrc + offset;
912 	unsigned char *dst;
913 	uint64_t i;
914 
915 	dst = kvmalloc_array(num_elements, size, GFP_KERNEL);
916 	if (!dst)
917 		return NULL;
918 
919 	if (!vram_pages) {
920 		memcpy(dst, src, num_elements * size);
921 		return (void *)dst;
922 	}
923 
924 	*vram_pages = 0;
925 	for (i = 0; i < num_elements; i++) {
926 		dma_addr_t *temp;
927 		temp = (dma_addr_t *)dst + i;
928 		*temp = *((dma_addr_t *)src + i);
929 		if (*temp&SVM_RANGE_VRAM_DOMAIN)
930 			(*vram_pages)++;
931 	}
932 
933 	return (void *)dst;
934 }
935 
936 static int
937 svm_range_copy_dma_addrs(struct svm_range *dst, struct svm_range *src)
938 {
939 	int i;
940 
941 	for (i = 0; i < MAX_GPU_INSTANCE; i++) {
942 		if (!src->dma_addr[i])
943 			continue;
944 		dst->dma_addr[i] = svm_range_copy_array(src->dma_addr[i],
945 					sizeof(*src->dma_addr[i]), src->npages, 0, NULL);
946 		if (!dst->dma_addr[i])
947 			return -ENOMEM;
948 	}
949 
950 	return 0;
951 }
952 
953 static int
954 svm_range_split_array(void *ppnew, void *ppold, size_t size,
955 		      uint64_t old_start, uint64_t old_n,
956 		      uint64_t new_start, uint64_t new_n, uint64_t *new_vram_pages)
957 {
958 	unsigned char *new, *old, *pold;
959 	uint64_t d;
960 
961 	if (!ppold)
962 		return 0;
963 	pold = *(unsigned char **)ppold;
964 	if (!pold)
965 		return 0;
966 
967 	d = (new_start - old_start) * size;
968 	/* get dma addr array for new range and calculte its vram page number */
969 	new = svm_range_copy_array(pold, size, new_n, d, new_vram_pages);
970 	if (!new)
971 		return -ENOMEM;
972 	d = (new_start == old_start) ? new_n * size : 0;
973 	old = svm_range_copy_array(pold, size, old_n, d, NULL);
974 	if (!old) {
975 		kvfree(new);
976 		return -ENOMEM;
977 	}
978 	kvfree(pold);
979 	*(void **)ppold = old;
980 	*(void **)ppnew = new;
981 
982 	return 0;
983 }
984 
985 static int
986 svm_range_split_pages(struct svm_range *new, struct svm_range *old,
987 		      uint64_t start, uint64_t last)
988 {
989 	uint64_t npages = last - start + 1;
990 	int i, r;
991 
992 	for (i = 0; i < MAX_GPU_INSTANCE; i++) {
993 		r = svm_range_split_array(&new->dma_addr[i], &old->dma_addr[i],
994 					  sizeof(*old->dma_addr[i]), old->start,
995 					  npages, new->start, new->npages,
996 					  old->actual_loc ? &new->vram_pages : NULL);
997 		if (r)
998 			return r;
999 	}
1000 	if (old->actual_loc)
1001 		old->vram_pages -= new->vram_pages;
1002 
1003 	return 0;
1004 }
1005 
1006 static int
1007 svm_range_split_nodes(struct svm_range *new, struct svm_range *old,
1008 		      uint64_t start, uint64_t last)
1009 {
1010 	uint64_t npages = last - start + 1;
1011 
1012 	pr_debug("svms 0x%p new prange 0x%p start 0x%lx [0x%llx 0x%llx]\n",
1013 		 new->svms, new, new->start, start, last);
1014 
1015 	if (new->start == old->start) {
1016 		new->offset = old->offset;
1017 		old->offset += new->npages;
1018 	} else {
1019 		new->offset = old->offset + npages;
1020 	}
1021 
1022 	new->svm_bo = svm_range_bo_ref(old->svm_bo);
1023 	new->ttm_res = old->ttm_res;
1024 
1025 	spin_lock(&new->svm_bo->list_lock);
1026 	list_add(&new->svm_bo_list, &new->svm_bo->range_list);
1027 	spin_unlock(&new->svm_bo->list_lock);
1028 
1029 	return 0;
1030 }
1031 
1032 /**
1033  * svm_range_split_adjust - split range and adjust
1034  *
1035  * @new: new range
1036  * @old: the old range
1037  * @start: the old range adjust to start address in pages
1038  * @last: the old range adjust to last address in pages
1039  *
1040  * Copy system memory dma_addr or vram ttm_res in old range to new
1041  * range from new_start up to size new->npages, the remaining old range is from
1042  * start to last
1043  *
1044  * Return:
1045  * 0 - OK, -ENOMEM - out of memory
1046  */
1047 static int
1048 svm_range_split_adjust(struct svm_range *new, struct svm_range *old,
1049 		      uint64_t start, uint64_t last)
1050 {
1051 	int r;
1052 
1053 	pr_debug("svms 0x%p new 0x%lx old [0x%lx 0x%lx] => [0x%llx 0x%llx]\n",
1054 		 new->svms, new->start, old->start, old->last, start, last);
1055 
1056 	if (new->start < old->start ||
1057 	    new->last > old->last) {
1058 		WARN_ONCE(1, "invalid new range start or last\n");
1059 		return -EINVAL;
1060 	}
1061 
1062 	r = svm_range_split_pages(new, old, start, last);
1063 	if (r)
1064 		return r;
1065 
1066 	if (old->actual_loc && old->ttm_res) {
1067 		r = svm_range_split_nodes(new, old, start, last);
1068 		if (r)
1069 			return r;
1070 	}
1071 
1072 	old->npages = last - start + 1;
1073 	old->start = start;
1074 	old->last = last;
1075 	new->flags = old->flags;
1076 	new->preferred_loc = old->preferred_loc;
1077 	new->prefetch_loc = old->prefetch_loc;
1078 	new->actual_loc = old->actual_loc;
1079 	new->granularity = old->granularity;
1080 	new->mapped_to_gpu = old->mapped_to_gpu;
1081 	bitmap_copy(new->bitmap_access, old->bitmap_access, MAX_GPU_INSTANCE);
1082 	bitmap_copy(new->bitmap_aip, old->bitmap_aip, MAX_GPU_INSTANCE);
1083 	atomic_set(&new->queue_refcount, atomic_read(&old->queue_refcount));
1084 
1085 	return 0;
1086 }
1087 
1088 /**
1089  * svm_range_split - split a range in 2 ranges
1090  *
1091  * @prange: the svm range to split
1092  * @start: the remaining range start address in pages
1093  * @last: the remaining range last address in pages
1094  * @new: the result new range generated
1095  *
1096  * Two cases only:
1097  * case 1: if start == prange->start
1098  *         prange ==> prange[start, last]
1099  *         new range [last + 1, prange->last]
1100  *
1101  * case 2: if last == prange->last
1102  *         prange ==> prange[start, last]
1103  *         new range [prange->start, start - 1]
1104  *
1105  * Return:
1106  * 0 - OK, -ENOMEM - out of memory, -EINVAL - invalid start, last
1107  */
1108 static int
1109 svm_range_split(struct svm_range *prange, uint64_t start, uint64_t last,
1110 		struct svm_range **new)
1111 {
1112 	uint64_t old_start = prange->start;
1113 	uint64_t old_last = prange->last;
1114 	struct svm_range_list *svms;
1115 	int r = 0;
1116 
1117 	pr_debug("svms 0x%p [0x%llx 0x%llx] to [0x%llx 0x%llx]\n", prange->svms,
1118 		 old_start, old_last, start, last);
1119 
1120 	if (old_start != start && old_last != last)
1121 		return -EINVAL;
1122 	if (start < old_start || last > old_last)
1123 		return -EINVAL;
1124 
1125 	svms = prange->svms;
1126 	if (old_start == start)
1127 		*new = svm_range_new(svms, last + 1, old_last, false);
1128 	else
1129 		*new = svm_range_new(svms, old_start, start - 1, false);
1130 	if (!*new)
1131 		return -ENOMEM;
1132 
1133 	r = svm_range_split_adjust(*new, prange, start, last);
1134 	if (r) {
1135 		pr_debug("failed %d split [0x%llx 0x%llx] to [0x%llx 0x%llx]\n",
1136 			 r, old_start, old_last, start, last);
1137 		svm_range_free(*new, false);
1138 		*new = NULL;
1139 	}
1140 
1141 	return r;
1142 }
1143 
1144 static int
1145 svm_range_split_tail(struct svm_range *prange, uint64_t new_last,
1146 		     struct list_head *insert_list, struct list_head *remap_list)
1147 {
1148 	unsigned long last_align_down = ALIGN_DOWN(prange->last, 512);
1149 	unsigned long start_align = ALIGN(prange->start, 512);
1150 	bool huge_page_mapping = last_align_down > start_align;
1151 	struct svm_range *tail = NULL;
1152 	int r;
1153 
1154 	r = svm_range_split(prange, prange->start, new_last, &tail);
1155 
1156 	if (r)
1157 		return r;
1158 
1159 	list_add(&tail->list, insert_list);
1160 
1161 	if (huge_page_mapping && tail->start > start_align &&
1162 	    tail->start < last_align_down && (!IS_ALIGNED(tail->start, 512)))
1163 		list_add(&tail->update_list, remap_list);
1164 
1165 	return 0;
1166 }
1167 
1168 static int
1169 svm_range_split_head(struct svm_range *prange, uint64_t new_start,
1170 		     struct list_head *insert_list, struct list_head *remap_list)
1171 {
1172 	unsigned long last_align_down = ALIGN_DOWN(prange->last, 512);
1173 	unsigned long start_align = ALIGN(prange->start, 512);
1174 	bool huge_page_mapping = last_align_down > start_align;
1175 	struct svm_range *head = NULL;
1176 	int r;
1177 
1178 	r = svm_range_split(prange, new_start, prange->last, &head);
1179 
1180 	if (r)
1181 		return r;
1182 
1183 	list_add(&head->list, insert_list);
1184 
1185 	if (huge_page_mapping && head->last + 1 > start_align &&
1186 	    head->last + 1 < last_align_down && (!IS_ALIGNED(head->last, 512)))
1187 		list_add(&head->update_list, remap_list);
1188 
1189 	return 0;
1190 }
1191 
1192 static void
1193 svm_range_add_child(struct svm_range *prange, struct svm_range *pchild, enum svm_work_list_ops op)
1194 {
1195 	pr_debug("add child 0x%p [0x%lx 0x%lx] to prange 0x%p child list %d\n",
1196 		 pchild, pchild->start, pchild->last, prange, op);
1197 
1198 	pchild->work_item.mm = NULL;
1199 	pchild->work_item.op = op;
1200 	list_add_tail(&pchild->child_list, &prange->child_list);
1201 }
1202 
1203 static bool
1204 svm_nodes_in_same_hive(struct kfd_node *node_a, struct kfd_node *node_b)
1205 {
1206 	return (node_a->adev == node_b->adev ||
1207 		amdgpu_xgmi_same_hive(node_a->adev, node_b->adev));
1208 }
1209 
1210 static uint64_t
1211 svm_range_get_pte_flags(struct kfd_node *node, struct amdgpu_vm *vm,
1212 			struct svm_range *prange, int domain)
1213 {
1214 	struct kfd_node *bo_node;
1215 	uint32_t flags = prange->flags;
1216 	uint32_t mapping_flags = 0;
1217 	uint32_t gc_ip_version = KFD_GC_VERSION(node);
1218 	uint64_t pte_flags;
1219 	bool snoop = (domain != SVM_RANGE_VRAM_DOMAIN);
1220 	bool coherent = flags & (KFD_IOCTL_SVM_FLAG_COHERENT | KFD_IOCTL_SVM_FLAG_EXT_COHERENT);
1221 	bool ext_coherent = flags & KFD_IOCTL_SVM_FLAG_EXT_COHERENT;
1222 	unsigned int mtype_local, mtype_remote;
1223 	bool is_aid_a1, is_local;
1224 
1225 	if (domain == SVM_RANGE_VRAM_DOMAIN)
1226 		bo_node = prange->svm_bo->node;
1227 
1228 	switch (gc_ip_version) {
1229 	case IP_VERSION(9, 4, 1):
1230 		if (domain == SVM_RANGE_VRAM_DOMAIN) {
1231 			if (bo_node == node) {
1232 				mapping_flags |= coherent ?
1233 					AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW;
1234 			} else {
1235 				mapping_flags |= coherent ?
1236 					AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
1237 				if (svm_nodes_in_same_hive(node, bo_node))
1238 					snoop = true;
1239 			}
1240 		} else {
1241 			mapping_flags |= coherent ?
1242 				AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
1243 		}
1244 		break;
1245 	case IP_VERSION(9, 4, 2):
1246 		if (domain == SVM_RANGE_VRAM_DOMAIN) {
1247 			if (bo_node == node) {
1248 				mapping_flags |= coherent ?
1249 					AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW;
1250 				if (node->adev->gmc.xgmi.connected_to_cpu)
1251 					snoop = true;
1252 			} else {
1253 				mapping_flags |= coherent ?
1254 					AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
1255 				if (svm_nodes_in_same_hive(node, bo_node))
1256 					snoop = true;
1257 			}
1258 		} else {
1259 			mapping_flags |= coherent ?
1260 				AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
1261 		}
1262 		break;
1263 	case IP_VERSION(9, 4, 3):
1264 	case IP_VERSION(9, 4, 4):
1265 	case IP_VERSION(9, 5, 0):
1266 		if (ext_coherent)
1267 			mtype_local = AMDGPU_VM_MTYPE_CC;
1268 		else
1269 			mtype_local = amdgpu_mtype_local == 1 ? AMDGPU_VM_MTYPE_NC :
1270 				amdgpu_mtype_local == 2 ? AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW;
1271 		snoop = true;
1272 		if (domain == SVM_RANGE_VRAM_DOMAIN) {
1273 			/* local HBM region close to partition */
1274 			if (bo_node->adev == node->adev &&
1275 			    (!bo_node->xcp || !node->xcp || bo_node->xcp->mem_id == node->xcp->mem_id))
1276 				mapping_flags |= mtype_local;
1277 			/* local HBM region far from partition or remote XGMI GPU
1278 			 * with regular system scope coherence
1279 			 */
1280 			else if (svm_nodes_in_same_hive(bo_node, node) && !ext_coherent)
1281 				mapping_flags |= AMDGPU_VM_MTYPE_NC;
1282 			/* PCIe P2P on GPUs pre-9.5.0 */
1283 			else if (gc_ip_version < IP_VERSION(9, 5, 0) &&
1284 				 !svm_nodes_in_same_hive(bo_node, node))
1285 				mapping_flags |= AMDGPU_VM_MTYPE_UC;
1286 			/* Other remote memory */
1287 			else
1288 				mapping_flags |= ext_coherent ? AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
1289 		/* system memory accessed by the APU */
1290 		} else if (node->adev->flags & AMD_IS_APU) {
1291 			/* On NUMA systems, locality is determined per-page
1292 			 * in amdgpu_gmc_override_vm_pte_flags
1293 			 */
1294 			if (num_possible_nodes() <= 1)
1295 				mapping_flags |= mtype_local;
1296 			else
1297 				mapping_flags |= ext_coherent ? AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
1298 		/* system memory accessed by the dGPU */
1299 		} else {
1300 			if (gc_ip_version < IP_VERSION(9, 5, 0) || ext_coherent)
1301 				mapping_flags |= AMDGPU_VM_MTYPE_UC;
1302 			else
1303 				mapping_flags |= AMDGPU_VM_MTYPE_NC;
1304 		}
1305 		break;
1306 	case IP_VERSION(12, 0, 0):
1307 	case IP_VERSION(12, 0, 1):
1308 		mapping_flags |= AMDGPU_VM_MTYPE_NC;
1309 		break;
1310 	case IP_VERSION(12, 1, 0):
1311 		is_aid_a1 = (node->adev->rev_id & 0x10);
1312 		is_local = (domain == SVM_RANGE_VRAM_DOMAIN) &&
1313 				(bo_node->adev == node->adev);
1314 
1315 		mtype_local = amdgpu_mtype_local == 0 ? AMDGPU_VM_MTYPE_RW :
1316 				amdgpu_mtype_local == 1 ? AMDGPU_VM_MTYPE_NC :
1317 				is_aid_a1 ? AMDGPU_VM_MTYPE_RW : AMDGPU_VM_MTYPE_NC;
1318 		mtype_remote = is_aid_a1 ? AMDGPU_VM_MTYPE_NC : AMDGPU_VM_MTYPE_UC;
1319 		snoop = true;
1320 
1321 		if (is_local) /* local HBM  */ {
1322 			mapping_flags |= mtype_local;
1323 		} else if (ext_coherent) {
1324 			mapping_flags |= AMDGPU_VM_MTYPE_UC;
1325 		} else {
1326 			/* system memory or remote VRAM */
1327 			mapping_flags |= mtype_remote;
1328 		}
1329 		break;
1330 	default:
1331 		mapping_flags |= coherent ?
1332 			AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
1333 	}
1334 
1335 	if (flags & KFD_IOCTL_SVM_FLAG_GPU_EXEC)
1336 		mapping_flags |= AMDGPU_VM_PAGE_EXECUTABLE;
1337 
1338 	pte_flags = AMDGPU_PTE_VALID;
1339 	pte_flags |= (domain == SVM_RANGE_VRAM_DOMAIN) ? 0 : AMDGPU_PTE_SYSTEM;
1340 	pte_flags |= snoop ? AMDGPU_PTE_SNOOPED : 0;
1341 	if (gc_ip_version >= IP_VERSION(12, 0, 0))
1342 		pte_flags |= AMDGPU_PTE_IS_PTE;
1343 
1344 	amdgpu_gmc_get_vm_pte(node->adev, vm, NULL, mapping_flags, &pte_flags);
1345 	pte_flags |= AMDGPU_PTE_READABLE;
1346 	if (!(flags & KFD_IOCTL_SVM_FLAG_GPU_RO))
1347 		pte_flags |= AMDGPU_PTE_WRITEABLE;
1348 
1349 	if ((gc_ip_version == IP_VERSION(12, 1, 0)) &&
1350 	    node->adev->have_atomics_support)
1351 		pte_flags |= AMDGPU_PTE_BUS_ATOMICS;
1352 
1353 	return pte_flags;
1354 }
1355 
1356 static int
1357 svm_range_unmap_from_gpu(struct amdgpu_device *adev, struct amdgpu_vm *vm,
1358 			 uint64_t start, uint64_t last,
1359 			 struct dma_fence **fence)
1360 {
1361 	uint64_t init_pte_value = adev->gmc.init_pte_flags;
1362 	uint64_t gpu_start, gpu_end;
1363 
1364 	/* Convert CPU page range to GPU page range */
1365 	gpu_start = start * AMDGPU_GPU_PAGES_IN_CPU_PAGE;
1366 	gpu_end = (last + 1) * AMDGPU_GPU_PAGES_IN_CPU_PAGE - 1;
1367 
1368 	pr_debug("CPU[0x%llx 0x%llx] -> GPU[0x%llx 0x%llx]\n", start, last,
1369 		gpu_start, gpu_end);
1370 	return amdgpu_vm_update_range(adev, vm, false, true, true, false, NULL, gpu_start,
1371 				      gpu_end, init_pte_value, 0, 0, NULL, NULL,
1372 				      fence);
1373 }
1374 
1375 static int
1376 svm_range_unmap_from_gpus(struct svm_range *prange, unsigned long start,
1377 			  unsigned long last, uint32_t trigger)
1378 {
1379 	struct kfd_process_device *pdd;
1380 	struct dma_fence *fence = NULL;
1381 	struct kfd_process *p;
1382 	uint32_t gpuidx;
1383 	int r = 0;
1384 
1385 	if (!prange->mapped_to_gpu) {
1386 		pr_debug("prange 0x%p [0x%lx 0x%lx] not mapped to GPU\n",
1387 			 prange, prange->start, prange->last);
1388 		return 0;
1389 	}
1390 
1391 	if (prange->start == start && prange->last == last) {
1392 		pr_debug("unmap svms 0x%p prange 0x%p\n", prange->svms, prange);
1393 		prange->mapped_to_gpu = false;
1394 	}
1395 
1396 	p = container_of(prange->svms, struct kfd_process, svms);
1397 
1398 	for_each_or_bit(gpuidx, prange->bitmap_access, prange->bitmap_aip, MAX_GPU_INSTANCE) {
1399 		pr_debug("unmap from gpu idx 0x%x\n", gpuidx);
1400 		pdd = kfd_process_device_from_gpuidx(p, gpuidx);
1401 		if (!pdd) {
1402 			pr_debug("failed to find device idx %d\n", gpuidx);
1403 			return -EINVAL;
1404 		}
1405 
1406 		kfd_smi_event_unmap_from_gpu(pdd->dev, p->lead_thread->pid,
1407 					     start, last, trigger);
1408 
1409 		r = svm_range_unmap_from_gpu(pdd->dev->adev,
1410 					     drm_priv_to_vm(pdd->drm_priv),
1411 					     start, last, &fence);
1412 		if (r)
1413 			break;
1414 
1415 		if (fence) {
1416 			r = dma_fence_wait(fence, false);
1417 			dma_fence_put(fence);
1418 			fence = NULL;
1419 			if (r)
1420 				break;
1421 		}
1422 		kfd_flush_tlb(pdd, TLB_FLUSH_HEAVYWEIGHT);
1423 	}
1424 
1425 	return r;
1426 }
1427 
1428 static int
1429 svm_range_map_to_gpu(struct kfd_process_device *pdd, struct svm_range *prange,
1430 		     unsigned long offset, unsigned long npages, bool readonly,
1431 		     dma_addr_t *dma_addr, struct amdgpu_device *bo_adev,
1432 		     struct dma_fence **fence, bool flush_tlb)
1433 {
1434 	struct amdgpu_device *adev = pdd->dev->adev;
1435 	struct amdgpu_vm *vm = drm_priv_to_vm(pdd->drm_priv);
1436 	uint64_t pte_flags;
1437 	unsigned long last_start;
1438 	int last_domain;
1439 	int r = 0;
1440 	int64_t i, j;
1441 
1442 	last_start = prange->start + offset;
1443 
1444 	pr_debug("svms 0x%p [0x%lx 0x%lx] readonly %d\n", prange->svms,
1445 		 last_start, last_start + npages - 1, readonly);
1446 
1447 	for (i = offset; i < offset + npages; i++) {
1448 		uint64_t gpu_start;
1449 		uint64_t gpu_end;
1450 
1451 		last_domain = dma_addr[i] & SVM_RANGE_VRAM_DOMAIN;
1452 		dma_addr[i] &= ~SVM_RANGE_VRAM_DOMAIN;
1453 
1454 		/* Collect all pages in the same address range and memory domain
1455 		 * that can be mapped with a single call to update mapping.
1456 		 */
1457 		if (i < offset + npages - 1 &&
1458 		    last_domain == (dma_addr[i + 1] & SVM_RANGE_VRAM_DOMAIN))
1459 			continue;
1460 
1461 		pr_debug("Mapping range [0x%lx 0x%llx] on domain: %s\n",
1462 			 last_start, prange->start + i, last_domain ? "GPU" : "CPU");
1463 
1464 		pte_flags = svm_range_get_pte_flags(pdd->dev, vm, prange, last_domain);
1465 		if (readonly)
1466 			pte_flags &= ~AMDGPU_PTE_WRITEABLE;
1467 
1468 
1469 		/* For dGPU mode, we use same vm_manager to allocate VRAM for
1470 		 * different memory partition based on fpfn/lpfn, we should use
1471 		 * same vm_manager.vram_base_offset regardless memory partition.
1472 		 */
1473 		gpu_start = last_start * AMDGPU_GPU_PAGES_IN_CPU_PAGE;
1474 		gpu_end = (prange->start + i + 1) * AMDGPU_GPU_PAGES_IN_CPU_PAGE - 1;
1475 
1476 		pr_debug("svms 0x%p map CPU[0x%lx 0x%llx] GPU[0x%llx 0x%llx] vram %d PTE 0x%llx\n",
1477 			 prange->svms, last_start, prange->start + i,
1478 			 gpu_start, gpu_end,
1479 			 (last_domain == SVM_RANGE_VRAM_DOMAIN) ? 1 : 0,
1480 			 pte_flags);
1481 
1482 		r = amdgpu_vm_update_range(adev, vm, false, false, flush_tlb, true,
1483 					   NULL, gpu_start, gpu_end,
1484 					   pte_flags,
1485 					   (last_start - prange->start) << PAGE_SHIFT,
1486 					   bo_adev ? bo_adev->vm_manager.vram_base_offset : 0,
1487 					   NULL, dma_addr, &vm->last_update);
1488 
1489 		for (j = last_start - prange->start; j <= i; j++)
1490 			dma_addr[j] |= last_domain;
1491 
1492 		if (r) {
1493 			pr_debug("failed %d to map to gpu 0x%lx\n", r, prange->start);
1494 			goto out;
1495 		}
1496 		last_start = prange->start + i + 1;
1497 	}
1498 
1499 	r = amdgpu_vm_update_pdes(adev, vm, false);
1500 	if (r) {
1501 		pr_debug("failed %d to update directories 0x%lx\n", r,
1502 			 prange->start);
1503 		goto out;
1504 	}
1505 
1506 	if (fence)
1507 		*fence = dma_fence_get(vm->last_update);
1508 
1509 out:
1510 	return r;
1511 }
1512 
1513 static int
1514 svm_range_map_to_gpus(struct svm_range *prange, unsigned long offset,
1515 		      unsigned long npages, bool readonly,
1516 		      unsigned long *bitmap, bool wait, bool flush_tlb)
1517 {
1518 	struct kfd_process_device *pdd;
1519 	struct amdgpu_device *bo_adev = NULL;
1520 	struct kfd_process *p;
1521 	struct dma_fence *fence = NULL;
1522 	uint32_t gpuidx;
1523 	int r = 0;
1524 
1525 	if (prange->svm_bo && prange->ttm_res)
1526 		bo_adev = prange->svm_bo->node->adev;
1527 
1528 	p = container_of(prange->svms, struct kfd_process, svms);
1529 	for_each_set_bit(gpuidx, bitmap, MAX_GPU_INSTANCE) {
1530 		pr_debug("mapping to gpu idx 0x%x\n", gpuidx);
1531 		pdd = kfd_process_device_from_gpuidx(p, gpuidx);
1532 		if (!pdd) {
1533 			pr_debug("failed to find device idx %d\n", gpuidx);
1534 			return -EINVAL;
1535 		}
1536 
1537 		pdd = kfd_bind_process_to_device(pdd->dev, p);
1538 		if (IS_ERR(pdd))
1539 			return -EINVAL;
1540 
1541 		if (bo_adev && pdd->dev->adev != bo_adev &&
1542 		    !amdgpu_xgmi_same_hive(pdd->dev->adev, bo_adev)) {
1543 			pr_debug("cannot map to device idx %d\n", gpuidx);
1544 			continue;
1545 		}
1546 
1547 		r = svm_range_map_to_gpu(pdd, prange, offset, npages, readonly,
1548 					 prange->dma_addr[gpuidx],
1549 					 bo_adev, wait ? &fence : NULL,
1550 					 flush_tlb);
1551 		if (r)
1552 			break;
1553 
1554 		if (fence) {
1555 			r = dma_fence_wait(fence, false);
1556 			dma_fence_put(fence);
1557 			fence = NULL;
1558 			if (r) {
1559 				pr_debug("failed %d to dma fence wait\n", r);
1560 				break;
1561 			}
1562 		}
1563 
1564 		kfd_flush_tlb(pdd, TLB_FLUSH_LEGACY);
1565 	}
1566 
1567 	return r;
1568 }
1569 
1570 struct svm_validate_context {
1571 	struct kfd_process *process;
1572 	struct svm_range *prange;
1573 	bool intr;
1574 	DECLARE_BITMAP(bitmap, MAX_GPU_INSTANCE);
1575 	struct drm_exec exec;
1576 };
1577 
1578 static int svm_range_reserve_bos(struct svm_validate_context *ctx, bool intr)
1579 {
1580 	struct kfd_process_device *pdd;
1581 	struct amdgpu_vm *vm;
1582 	uint32_t gpuidx;
1583 	int r;
1584 
1585 	drm_exec_init(&ctx->exec, intr ? DRM_EXEC_INTERRUPTIBLE_WAIT: 0, 0);
1586 	drm_exec_until_all_locked(&ctx->exec) {
1587 		for_each_set_bit(gpuidx, ctx->bitmap, MAX_GPU_INSTANCE) {
1588 			pdd = kfd_process_device_from_gpuidx(ctx->process, gpuidx);
1589 			if (!pdd) {
1590 				pr_debug("failed to find device idx %d\n", gpuidx);
1591 				r = -EINVAL;
1592 				goto unreserve_out;
1593 			}
1594 			vm = drm_priv_to_vm(pdd->drm_priv);
1595 
1596 			r = amdgpu_vm_lock_pd(vm, &ctx->exec, 2);
1597 			drm_exec_retry_on_contention(&ctx->exec);
1598 			if (unlikely(r)) {
1599 				pr_debug("failed %d to reserve bo\n", r);
1600 				goto unreserve_out;
1601 			}
1602 		}
1603 	}
1604 
1605 	for_each_set_bit(gpuidx, ctx->bitmap, MAX_GPU_INSTANCE) {
1606 		pdd = kfd_process_device_from_gpuidx(ctx->process, gpuidx);
1607 		if (!pdd) {
1608 			pr_debug("failed to find device idx %d\n", gpuidx);
1609 			r = -EINVAL;
1610 			goto unreserve_out;
1611 		}
1612 
1613 		r = amdgpu_vm_validate(pdd->dev->adev,
1614 				       drm_priv_to_vm(pdd->drm_priv), NULL,
1615 				       svm_range_bo_validate, NULL);
1616 		if (r) {
1617 			pr_debug("failed %d validate pt bos\n", r);
1618 			goto unreserve_out;
1619 		}
1620 	}
1621 
1622 	return 0;
1623 
1624 unreserve_out:
1625 	drm_exec_fini(&ctx->exec);
1626 	return r;
1627 }
1628 
1629 static void svm_range_unreserve_bos(struct svm_validate_context *ctx)
1630 {
1631 	drm_exec_fini(&ctx->exec);
1632 }
1633 
1634 static void *kfd_svm_page_owner(struct kfd_process *p, int32_t gpuidx)
1635 {
1636 	struct kfd_process_device *pdd;
1637 
1638 	pdd = kfd_process_device_from_gpuidx(p, gpuidx);
1639 	if (!pdd)
1640 		return NULL;
1641 
1642 	return SVM_ADEV_PGMAP_OWNER(pdd->dev->adev);
1643 }
1644 
1645 /*
1646  * Validation+GPU mapping with concurrent invalidation (MMU notifiers)
1647  *
1648  * To prevent concurrent destruction or change of range attributes, the
1649  * svm_read_lock must be held. The caller must not hold the svm_write_lock
1650  * because that would block concurrent evictions and lead to deadlocks. To
1651  * serialize concurrent migrations or validations of the same range, the
1652  * prange->migrate_mutex must be held.
1653  *
1654  * For VRAM ranges, the SVM BO must be allocated and valid (protected by its
1655  * eviction fence.
1656  *
1657  * The following sequence ensures race-free validation and GPU mapping:
1658  *
1659  * 1. Reserve page table (and SVM BO if range is in VRAM)
1660  * 2. hmm_range_fault to get page addresses (if system memory)
1661  * 3. DMA-map pages (if system memory)
1662  * 4-a. Take notifier lock
1663  * 4-b. Check that pages still valid (mmu_interval_read_retry)
1664  * 4-c. Check that the range was not split or otherwise invalidated
1665  * 4-d. Update GPU page table
1666  * 4.e. Release notifier lock
1667  * 5. Release page table (and SVM BO) reservation
1668  */
1669 static int svm_range_validate_and_map(struct mm_struct *mm,
1670 				      unsigned long map_start, unsigned long map_last,
1671 				      struct svm_range *prange, int32_t gpuidx,
1672 				      bool intr, bool wait, bool flush_tlb)
1673 {
1674 	struct svm_validate_context *ctx;
1675 	unsigned long start, end, addr;
1676 	struct kfd_process *p;
1677 	void *owner;
1678 	int32_t idx;
1679 	int r = 0;
1680 
1681 	ctx = kzalloc_obj(struct svm_validate_context);
1682 	if (!ctx)
1683 		return -ENOMEM;
1684 	ctx->process = container_of(prange->svms, struct kfd_process, svms);
1685 	ctx->prange = prange;
1686 	ctx->intr = intr;
1687 
1688 	if (gpuidx < MAX_GPU_INSTANCE) {
1689 		bitmap_zero(ctx->bitmap, MAX_GPU_INSTANCE);
1690 		bitmap_set(ctx->bitmap, gpuidx, 1);
1691 	} else if (ctx->process->xnack_enabled) {
1692 		bitmap_copy(ctx->bitmap, prange->bitmap_aip, MAX_GPU_INSTANCE);
1693 
1694 		/* If prefetch range to GPU, or GPU retry fault migrate range to
1695 		 * GPU, which has ACCESS attribute to the range, create mapping
1696 		 * on that GPU.
1697 		 */
1698 		if (prange->actual_loc) {
1699 			gpuidx = kfd_process_gpuidx_from_gpuid(ctx->process,
1700 							prange->actual_loc);
1701 			if (gpuidx < 0) {
1702 				WARN_ONCE(1, "failed get device by id 0x%x\n",
1703 					 prange->actual_loc);
1704 				r = -EINVAL;
1705 				goto free_ctx;
1706 			}
1707 			if (test_bit(gpuidx, prange->bitmap_access))
1708 				bitmap_set(ctx->bitmap, gpuidx, 1);
1709 		}
1710 
1711 		/*
1712 		 * If prange is already mapped or with always mapped flag,
1713 		 * update mapping on GPUs with ACCESS attribute
1714 		 */
1715 		if (bitmap_empty(ctx->bitmap, MAX_GPU_INSTANCE)) {
1716 			if (prange->mapped_to_gpu ||
1717 			    prange->flags & KFD_IOCTL_SVM_FLAG_GPU_ALWAYS_MAPPED)
1718 				bitmap_copy(ctx->bitmap, prange->bitmap_access, MAX_GPU_INSTANCE);
1719 		}
1720 	} else {
1721 		bitmap_or(ctx->bitmap, prange->bitmap_access,
1722 			  prange->bitmap_aip, MAX_GPU_INSTANCE);
1723 	}
1724 
1725 	if (bitmap_empty(ctx->bitmap, MAX_GPU_INSTANCE)) {
1726 		r = 0;
1727 		goto free_ctx;
1728 	}
1729 
1730 	if (prange->actual_loc && !prange->ttm_res) {
1731 		/* This should never happen. actual_loc gets set by
1732 		 * svm_migrate_ram_to_vram after allocating a BO.
1733 		 */
1734 		WARN_ONCE(1, "VRAM BO missing during validation\n");
1735 		r = -EINVAL;
1736 		goto free_ctx;
1737 	}
1738 
1739 	r = svm_range_reserve_bos(ctx, intr);
1740 	if (r)
1741 		goto free_ctx;
1742 
1743 	p = container_of(prange->svms, struct kfd_process, svms);
1744 	owner = kfd_svm_page_owner(p, find_first_bit(ctx->bitmap,
1745 						MAX_GPU_INSTANCE));
1746 	for_each_set_bit(idx, ctx->bitmap, MAX_GPU_INSTANCE) {
1747 		if (kfd_svm_page_owner(p, idx) != owner) {
1748 			owner = NULL;
1749 			break;
1750 		}
1751 	}
1752 
1753 	start = map_start << PAGE_SHIFT;
1754 	end = (map_last + 1) << PAGE_SHIFT;
1755 	for (addr = start; !r && addr < end; ) {
1756 		struct amdgpu_hmm_range *range = NULL;
1757 		unsigned long map_start_vma;
1758 		unsigned long map_last_vma;
1759 		struct vm_area_struct *vma;
1760 		unsigned long next = 0;
1761 		unsigned long offset;
1762 		unsigned long npages;
1763 		bool readonly;
1764 
1765 		vma = vma_lookup(mm, addr);
1766 		if (vma) {
1767 			readonly = !(vma->vm_flags & VM_WRITE);
1768 
1769 			next = min(vma->vm_end, end);
1770 			npages = (next - addr) >> PAGE_SHIFT;
1771 			/* HMM requires at least READ permissions. If provided with PROT_NONE,
1772 			 * unmap the memory. If it's not already mapped, this is a no-op
1773 			 * If PROT_WRITE is provided without READ, warn first then unmap
1774 			 */
1775 			if (!(vma->vm_flags & VM_READ)) {
1776 				unsigned long e, s;
1777 
1778 				svm_range_lock(prange);
1779 				if (vma->vm_flags & VM_WRITE)
1780 					pr_debug("VM_WRITE without VM_READ is not supported");
1781 				s = max(start, prange->start);
1782 				e = min(end, prange->last);
1783 				if (e >= s)
1784 					r = svm_range_unmap_from_gpus(prange, s, e,
1785 						       KFD_SVM_UNMAP_TRIGGER_UNMAP_FROM_CPU);
1786 				svm_range_unlock(prange);
1787 				/* If unmap returns non-zero, we'll bail on the next for loop
1788 				 * iteration, so just leave r and continue
1789 				 */
1790 				addr = next;
1791 				continue;
1792 			}
1793 
1794 			WRITE_ONCE(p->svms.faulting_task, current);
1795 			range = amdgpu_hmm_range_alloc(NULL);
1796 			if (likely(range))
1797 				r = amdgpu_hmm_range_get_pages(&prange->notifier, addr, npages,
1798 							       readonly, owner, range);
1799 			else
1800 				r = -ENOMEM;
1801 			WRITE_ONCE(p->svms.faulting_task, NULL);
1802 			if (r)
1803 				pr_debug("failed %d to get svm range pages\n", r);
1804 		} else {
1805 			r = -EFAULT;
1806 		}
1807 
1808 		if (!r) {
1809 			offset = (addr >> PAGE_SHIFT) - prange->start;
1810 			r = svm_range_dma_map(prange, ctx->bitmap, offset, npages,
1811 					      range->hmm_range.hmm_pfns);
1812 			if (r)
1813 				pr_debug("failed %d to dma map range\n", r);
1814 		}
1815 
1816 		svm_range_lock(prange);
1817 
1818 		/* Free backing memory of hmm_range if it was initialized
1819 		 * Override return value to TRY AGAIN only if prior returns
1820 		 * were successful
1821 		 */
1822 		if (range && !amdgpu_hmm_range_valid(range) && !r) {
1823 			pr_debug("hmm update the range, need validate again\n");
1824 			r = -EAGAIN;
1825 		}
1826 
1827 		/* Free the hmm range */
1828 		amdgpu_hmm_range_free(range);
1829 
1830 		if (!r && !list_empty(&prange->child_list)) {
1831 			pr_debug("range split by unmap in parallel, validate again\n");
1832 			r = -EAGAIN;
1833 		}
1834 
1835 		if (!r) {
1836 			map_start_vma = max(map_start, prange->start + offset);
1837 			map_last_vma = min(map_last, prange->start + offset + npages - 1);
1838 			if (map_start_vma <= map_last_vma) {
1839 				offset = map_start_vma - prange->start;
1840 				npages = map_last_vma - map_start_vma + 1;
1841 				r = svm_range_map_to_gpus(prange, offset, npages, readonly,
1842 							  ctx->bitmap, wait, flush_tlb);
1843 			}
1844 		}
1845 
1846 		if (!r && next == end)
1847 			prange->mapped_to_gpu = true;
1848 
1849 		svm_range_unlock(prange);
1850 
1851 		addr = next;
1852 	}
1853 
1854 	svm_range_unreserve_bos(ctx);
1855 	if (!r)
1856 		prange->validate_timestamp = ktime_get_boottime();
1857 
1858 free_ctx:
1859 	kfree(ctx);
1860 
1861 	return r;
1862 }
1863 
1864 /**
1865  * svm_range_list_lock_and_flush_work - flush pending deferred work
1866  *
1867  * @svms: the svm range list
1868  * @mm: the mm structure
1869  *
1870  * Context: Returns with mmap write lock held, pending deferred work flushed
1871  *
1872  */
1873 void
1874 svm_range_list_lock_and_flush_work(struct svm_range_list *svms,
1875 				   struct mm_struct *mm)
1876 {
1877 retry_flush_work:
1878 	flush_work(&svms->deferred_list_work);
1879 	mmap_write_lock(mm);
1880 
1881 	if (list_empty(&svms->deferred_range_list))
1882 		return;
1883 	mmap_write_unlock(mm);
1884 	pr_debug("retry flush\n");
1885 	goto retry_flush_work;
1886 }
1887 
1888 static void svm_range_restore_work(struct work_struct *work)
1889 {
1890 	struct delayed_work *dwork = to_delayed_work(work);
1891 	struct amdkfd_process_info *process_info;
1892 	struct svm_range_list *svms;
1893 	struct svm_range *prange;
1894 	struct kfd_process *p;
1895 	struct mm_struct *mm;
1896 	int evicted_ranges;
1897 	int invalid;
1898 	int r;
1899 
1900 	svms = container_of(dwork, struct svm_range_list, restore_work);
1901 	evicted_ranges = atomic_read(&svms->evicted_ranges);
1902 	if (!evicted_ranges)
1903 		return;
1904 
1905 	pr_debug("restore svm ranges\n");
1906 
1907 	p = container_of(svms, struct kfd_process, svms);
1908 	process_info = p->kgd_process_info;
1909 
1910 	/* Keep mm reference when svm_range_validate_and_map ranges */
1911 	mm = get_task_mm(p->lead_thread);
1912 	if (!mm) {
1913 		pr_debug("svms 0x%p process mm gone\n", svms);
1914 		return;
1915 	}
1916 
1917 	mutex_lock(&process_info->lock);
1918 	svm_range_list_lock_and_flush_work(svms, mm);
1919 	mutex_lock(&svms->lock);
1920 
1921 	evicted_ranges = atomic_read(&svms->evicted_ranges);
1922 
1923 	list_for_each_entry(prange, &svms->list, list) {
1924 		invalid = atomic_read(&prange->invalid);
1925 		if (!invalid)
1926 			continue;
1927 
1928 		pr_debug("restoring svms 0x%p prange 0x%p [0x%lx %lx] inv %d\n",
1929 			 prange->svms, prange, prange->start, prange->last,
1930 			 invalid);
1931 
1932 		/*
1933 		 * If range is migrating, wait for migration is done.
1934 		 */
1935 		mutex_lock(&prange->migrate_mutex);
1936 
1937 		r = svm_range_validate_and_map(mm, prange->start, prange->last, prange,
1938 					       MAX_GPU_INSTANCE, false, true, false);
1939 		if (r)
1940 			pr_debug("failed %d to map 0x%lx to gpus\n", r,
1941 				 prange->start);
1942 
1943 		mutex_unlock(&prange->migrate_mutex);
1944 		if (r)
1945 			goto out_reschedule;
1946 
1947 		if (atomic_cmpxchg(&prange->invalid, invalid, 0) != invalid)
1948 			goto out_reschedule;
1949 	}
1950 
1951 	if (atomic_cmpxchg(&svms->evicted_ranges, evicted_ranges, 0) !=
1952 	    evicted_ranges)
1953 		goto out_reschedule;
1954 
1955 	evicted_ranges = 0;
1956 
1957 	r = kgd2kfd_resume_mm(mm);
1958 	if (r) {
1959 		/* No recovery from this failure. Probably the CP is
1960 		 * hanging. No point trying again.
1961 		 */
1962 		pr_debug("failed %d to resume KFD\n", r);
1963 	}
1964 
1965 	pr_debug("restore svm ranges successfully\n");
1966 
1967 out_reschedule:
1968 	mutex_unlock(&svms->lock);
1969 	mmap_write_unlock(mm);
1970 	mutex_unlock(&process_info->lock);
1971 
1972 	/* If validation failed, reschedule another attempt */
1973 	if (evicted_ranges) {
1974 		pr_debug("reschedule to restore svm range\n");
1975 		queue_delayed_work(system_freezable_wq, &svms->restore_work,
1976 			msecs_to_jiffies(AMDGPU_SVM_RANGE_RESTORE_DELAY_MS));
1977 
1978 		kfd_smi_event_queue_restore_rescheduled(mm);
1979 	}
1980 	mmput(mm);
1981 }
1982 
1983 /**
1984  * svm_range_evict - evict svm range
1985  * @prange: svm range structure
1986  * @mm: current process mm_struct
1987  * @start: starting process queue number
1988  * @last: last process queue number
1989  * @event: mmu notifier event when range is evicted or migrated
1990  *
1991  * Stop all queues of the process to ensure GPU doesn't access the memory, then
1992  * return to let CPU evict the buffer and proceed CPU pagetable update.
1993  *
1994  * Don't need use lock to sync cpu pagetable invalidation with GPU execution.
1995  * If invalidation happens while restore work is running, restore work will
1996  * restart to ensure to get the latest CPU pages mapping to GPU, then start
1997  * the queues.
1998  */
1999 static int
2000 svm_range_evict(struct svm_range *prange, struct mm_struct *mm,
2001 		unsigned long start, unsigned long last,
2002 		enum mmu_notifier_event event)
2003 {
2004 	struct svm_range_list *svms = prange->svms;
2005 	struct svm_range *pchild;
2006 	struct kfd_process *p;
2007 	int r = 0;
2008 
2009 	p = container_of(svms, struct kfd_process, svms);
2010 
2011 	pr_debug("invalidate svms 0x%p prange [0x%lx 0x%lx] [0x%lx 0x%lx]\n",
2012 		 svms, prange->start, prange->last, start, last);
2013 
2014 	if (!p->xnack_enabled ||
2015 	    (prange->flags & KFD_IOCTL_SVM_FLAG_GPU_ALWAYS_MAPPED)) {
2016 		int evicted_ranges;
2017 		bool mapped = prange->mapped_to_gpu;
2018 
2019 		list_for_each_entry(pchild, &prange->child_list, child_list) {
2020 			if (!pchild->mapped_to_gpu)
2021 				continue;
2022 			mapped = true;
2023 			mutex_lock_nested(&pchild->lock, 1);
2024 			if (pchild->start <= last && pchild->last >= start) {
2025 				pr_debug("increment pchild invalid [0x%lx 0x%lx]\n",
2026 					 pchild->start, pchild->last);
2027 				atomic_inc(&pchild->invalid);
2028 			}
2029 			mutex_unlock(&pchild->lock);
2030 		}
2031 
2032 		if (!mapped)
2033 			return r;
2034 
2035 		if (prange->start <= last && prange->last >= start)
2036 			atomic_inc(&prange->invalid);
2037 
2038 		evicted_ranges = atomic_inc_return(&svms->evicted_ranges);
2039 		if (evicted_ranges != 1)
2040 			return r;
2041 
2042 		pr_debug("evicting svms 0x%p range [0x%lx 0x%lx]\n",
2043 			 prange->svms, prange->start, prange->last);
2044 
2045 		/* First eviction, stop the queues */
2046 		r = kgd2kfd_quiesce_mm(mm, KFD_QUEUE_EVICTION_TRIGGER_SVM);
2047 		if (r)
2048 			pr_debug("failed to quiesce KFD\n");
2049 
2050 		pr_debug("schedule to restore svm %p ranges\n", svms);
2051 		queue_delayed_work(system_freezable_wq, &svms->restore_work,
2052 			msecs_to_jiffies(AMDGPU_SVM_RANGE_RESTORE_DELAY_MS));
2053 	} else {
2054 		unsigned long s, l;
2055 		uint32_t trigger;
2056 
2057 		if (event == MMU_NOTIFY_MIGRATE)
2058 			trigger = KFD_SVM_UNMAP_TRIGGER_MMU_NOTIFY_MIGRATE;
2059 		else
2060 			trigger = KFD_SVM_UNMAP_TRIGGER_MMU_NOTIFY;
2061 
2062 		pr_debug("invalidate unmap svms 0x%p [0x%lx 0x%lx] from GPUs\n",
2063 			 prange->svms, start, last);
2064 		list_for_each_entry(pchild, &prange->child_list, child_list) {
2065 			mutex_lock_nested(&pchild->lock, 1);
2066 			s = max(start, pchild->start);
2067 			l = min(last, pchild->last);
2068 			if (l >= s)
2069 				svm_range_unmap_from_gpus(pchild, s, l, trigger);
2070 			mutex_unlock(&pchild->lock);
2071 		}
2072 		s = max(start, prange->start);
2073 		l = min(last, prange->last);
2074 		if (l >= s)
2075 			svm_range_unmap_from_gpus(prange, s, l, trigger);
2076 	}
2077 
2078 	return r;
2079 }
2080 
2081 static struct svm_range *svm_range_clone(struct svm_range *old)
2082 {
2083 	struct svm_range *new;
2084 
2085 	new = svm_range_new(old->svms, old->start, old->last, false);
2086 	if (!new)
2087 		return NULL;
2088 	if (svm_range_copy_dma_addrs(new, old)) {
2089 		svm_range_free(new, false);
2090 		return NULL;
2091 	}
2092 	if (old->svm_bo) {
2093 		new->ttm_res = old->ttm_res;
2094 		new->offset = old->offset;
2095 		new->svm_bo = svm_range_bo_ref(old->svm_bo);
2096 		spin_lock(&new->svm_bo->list_lock);
2097 		list_add(&new->svm_bo_list, &new->svm_bo->range_list);
2098 		spin_unlock(&new->svm_bo->list_lock);
2099 	}
2100 	new->flags = old->flags;
2101 	new->preferred_loc = old->preferred_loc;
2102 	new->prefetch_loc = old->prefetch_loc;
2103 	new->actual_loc = old->actual_loc;
2104 	new->granularity = old->granularity;
2105 	new->mapped_to_gpu = old->mapped_to_gpu;
2106 	new->vram_pages = old->vram_pages;
2107 	bitmap_copy(new->bitmap_access, old->bitmap_access, MAX_GPU_INSTANCE);
2108 	bitmap_copy(new->bitmap_aip, old->bitmap_aip, MAX_GPU_INSTANCE);
2109 	atomic_set(&new->queue_refcount, atomic_read(&old->queue_refcount));
2110 
2111 	return new;
2112 }
2113 
2114 void svm_range_set_max_pages(struct amdgpu_device *adev)
2115 {
2116 	uint64_t max_pages;
2117 	uint64_t pages, _pages;
2118 	uint64_t min_pages = 0;
2119 	int i, id;
2120 
2121 	for (i = 0; i < adev->kfd.dev->num_nodes; i++) {
2122 		if (adev->kfd.dev->nodes[i]->xcp)
2123 			id = adev->kfd.dev->nodes[i]->xcp->id;
2124 		else
2125 			id = -1;
2126 		pages = KFD_XCP_MEMORY_SIZE(adev, id) >> 17;
2127 		pages = clamp(pages, 1ULL << 9, 1ULL << 18);
2128 		pages = rounddown_pow_of_two(pages);
2129 		min_pages = min_not_zero(min_pages, pages);
2130 	}
2131 
2132 	do {
2133 		max_pages = READ_ONCE(max_svm_range_pages);
2134 		_pages = min_not_zero(max_pages, min_pages);
2135 	} while (cmpxchg(&max_svm_range_pages, max_pages, _pages) != max_pages);
2136 }
2137 
2138 static int
2139 svm_range_split_new(struct svm_range_list *svms, uint64_t start, uint64_t last,
2140 		    uint64_t max_pages, struct list_head *insert_list,
2141 		    struct list_head *update_list)
2142 {
2143 	struct svm_range *prange;
2144 	uint64_t l;
2145 
2146 	pr_debug("max_svm_range_pages 0x%llx adding [0x%llx 0x%llx]\n",
2147 		 max_pages, start, last);
2148 
2149 	while (last >= start) {
2150 		l = min(last, ALIGN_DOWN(start + max_pages, max_pages) - 1);
2151 
2152 		prange = svm_range_new(svms, start, l, true);
2153 		if (!prange)
2154 			return -ENOMEM;
2155 		list_add(&prange->list, insert_list);
2156 		list_add(&prange->update_list, update_list);
2157 
2158 		start = l + 1;
2159 	}
2160 	return 0;
2161 }
2162 
2163 /**
2164  * svm_range_add - add svm range and handle overlap
2165  * @p: the range add to this process svms
2166  * @start: page size aligned
2167  * @size: page size aligned
2168  * @nattr: number of attributes
2169  * @attrs: array of attributes
2170  * @update_list: output, the ranges need validate and update GPU mapping
2171  * @insert_list: output, the ranges need insert to svms
2172  * @remove_list: output, the ranges are replaced and need remove from svms
2173  * @remap_list: output, remap unaligned svm ranges
2174  *
2175  * Check if the virtual address range has overlap with any existing ranges,
2176  * split partly overlapping ranges and add new ranges in the gaps. All changes
2177  * should be applied to the range_list and interval tree transactionally. If
2178  * any range split or allocation fails, the entire update fails. Therefore any
2179  * existing overlapping svm_ranges are cloned and the original svm_ranges left
2180  * unchanged.
2181  *
2182  * If the transaction succeeds, the caller can update and insert clones and
2183  * new ranges, then free the originals.
2184  *
2185  * Otherwise the caller can free the clones and new ranges, while the old
2186  * svm_ranges remain unchanged.
2187  *
2188  * Context: Process context, caller must hold svms->lock
2189  *
2190  * Return:
2191  * 0 - OK, otherwise error code
2192  */
2193 static int
2194 svm_range_add(struct kfd_process *p, uint64_t start, uint64_t size,
2195 	      uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs,
2196 	      struct list_head *update_list, struct list_head *insert_list,
2197 	      struct list_head *remove_list, struct list_head *remap_list)
2198 {
2199 	unsigned long last = start + size - 1UL;
2200 	struct svm_range_list *svms = &p->svms;
2201 	struct interval_tree_node *node;
2202 	struct svm_range *prange;
2203 	struct svm_range *tmp;
2204 	struct list_head new_list;
2205 	int r = 0;
2206 
2207 	pr_debug("svms 0x%p [0x%llx 0x%lx]\n", &p->svms, start, last);
2208 
2209 	INIT_LIST_HEAD(update_list);
2210 	INIT_LIST_HEAD(insert_list);
2211 	INIT_LIST_HEAD(remove_list);
2212 	INIT_LIST_HEAD(&new_list);
2213 	INIT_LIST_HEAD(remap_list);
2214 
2215 	node = interval_tree_iter_first(&svms->objects, start, last);
2216 	while (node) {
2217 		struct interval_tree_node *next;
2218 		unsigned long next_start;
2219 
2220 		pr_debug("found overlap node [0x%lx 0x%lx]\n", node->start,
2221 			 node->last);
2222 
2223 		prange = container_of(node, struct svm_range, it_node);
2224 		next = interval_tree_iter_next(node, start, last);
2225 		next_start = min(node->last, last) + 1;
2226 
2227 		if (svm_range_is_same_attrs(p, prange, nattr, attrs) &&
2228 		    prange->mapped_to_gpu) {
2229 			/* nothing to do */
2230 		} else if (node->start < start || node->last > last) {
2231 			/* node intersects the update range and its attributes
2232 			 * will change. Clone and split it, apply updates only
2233 			 * to the overlapping part
2234 			 */
2235 			struct svm_range *old = prange;
2236 
2237 			prange = svm_range_clone(old);
2238 			if (!prange) {
2239 				r = -ENOMEM;
2240 				goto out;
2241 			}
2242 
2243 			list_add(&old->update_list, remove_list);
2244 			list_add(&prange->list, insert_list);
2245 			list_add(&prange->update_list, update_list);
2246 
2247 			if (node->start < start) {
2248 				pr_debug("change old range start\n");
2249 				r = svm_range_split_head(prange, start,
2250 							 insert_list, remap_list);
2251 				if (r)
2252 					goto out;
2253 			}
2254 			if (node->last > last) {
2255 				pr_debug("change old range last\n");
2256 				r = svm_range_split_tail(prange, last,
2257 							 insert_list, remap_list);
2258 				if (r)
2259 					goto out;
2260 			}
2261 		} else {
2262 			/* The node is contained within start..last,
2263 			 * just update it
2264 			 */
2265 			list_add(&prange->update_list, update_list);
2266 		}
2267 
2268 		/* insert a new node if needed */
2269 		if (node->start > start) {
2270 			r = svm_range_split_new(svms, start, node->start - 1,
2271 						READ_ONCE(max_svm_range_pages),
2272 						&new_list, update_list);
2273 			if (r)
2274 				goto out;
2275 		}
2276 
2277 		node = next;
2278 		start = next_start;
2279 	}
2280 
2281 	/* add a final range at the end if needed */
2282 	if (start <= last)
2283 		r = svm_range_split_new(svms, start, last,
2284 					READ_ONCE(max_svm_range_pages),
2285 					&new_list, update_list);
2286 
2287 out:
2288 	if (r) {
2289 		list_for_each_entry_safe(prange, tmp, insert_list, list)
2290 			svm_range_free(prange, false);
2291 		list_for_each_entry_safe(prange, tmp, &new_list, list)
2292 			svm_range_free(prange, true);
2293 	} else {
2294 		list_splice(&new_list, insert_list);
2295 	}
2296 
2297 	return r;
2298 }
2299 
2300 static void
2301 svm_range_update_notifier_and_interval_tree(struct mm_struct *mm,
2302 					    struct svm_range *prange)
2303 {
2304 	unsigned long start;
2305 	unsigned long last;
2306 
2307 	start = prange->notifier.interval_tree.start >> PAGE_SHIFT;
2308 	last = prange->notifier.interval_tree.last >> PAGE_SHIFT;
2309 
2310 	if (prange->start == start && prange->last == last)
2311 		return;
2312 
2313 	pr_debug("up notifier 0x%p prange 0x%p [0x%lx 0x%lx] [0x%lx 0x%lx]\n",
2314 		  prange->svms, prange, start, last, prange->start,
2315 		  prange->last);
2316 
2317 	if (start != 0 && last != 0) {
2318 		interval_tree_remove(&prange->it_node, &prange->svms->objects);
2319 		svm_range_remove_notifier(prange);
2320 	}
2321 	prange->it_node.start = prange->start;
2322 	prange->it_node.last = prange->last;
2323 
2324 	interval_tree_insert(&prange->it_node, &prange->svms->objects);
2325 	svm_range_add_notifier_locked(mm, prange);
2326 }
2327 
2328 static void
2329 svm_range_handle_list_op(struct svm_range_list *svms, struct svm_range *prange,
2330 			 struct mm_struct *mm)
2331 {
2332 	switch (prange->work_item.op) {
2333 	case SVM_OP_NULL:
2334 		pr_debug("NULL OP 0x%p prange 0x%p [0x%lx 0x%lx]\n",
2335 			 svms, prange, prange->start, prange->last);
2336 		break;
2337 	case SVM_OP_UNMAP_RANGE:
2338 		pr_debug("remove 0x%p prange 0x%p [0x%lx 0x%lx]\n",
2339 			 svms, prange, prange->start, prange->last);
2340 		svm_range_unlink(prange);
2341 		svm_range_remove_notifier(prange);
2342 		svm_range_free(prange, true);
2343 		break;
2344 	case SVM_OP_UPDATE_RANGE_NOTIFIER:
2345 		pr_debug("update notifier 0x%p prange 0x%p [0x%lx 0x%lx]\n",
2346 			 svms, prange, prange->start, prange->last);
2347 		svm_range_update_notifier_and_interval_tree(mm, prange);
2348 		break;
2349 	case SVM_OP_UPDATE_RANGE_NOTIFIER_AND_MAP:
2350 		pr_debug("update and map 0x%p prange 0x%p [0x%lx 0x%lx]\n",
2351 			 svms, prange, prange->start, prange->last);
2352 		svm_range_update_notifier_and_interval_tree(mm, prange);
2353 		/* TODO: implement deferred validation and mapping */
2354 		break;
2355 	case SVM_OP_ADD_RANGE:
2356 		pr_debug("add 0x%p prange 0x%p [0x%lx 0x%lx]\n", svms, prange,
2357 			 prange->start, prange->last);
2358 		svm_range_add_to_svms(prange);
2359 		svm_range_add_notifier_locked(mm, prange);
2360 		break;
2361 	case SVM_OP_ADD_RANGE_AND_MAP:
2362 		pr_debug("add and map 0x%p prange 0x%p [0x%lx 0x%lx]\n", svms,
2363 			 prange, prange->start, prange->last);
2364 		svm_range_add_to_svms(prange);
2365 		svm_range_add_notifier_locked(mm, prange);
2366 		/* TODO: implement deferred validation and mapping */
2367 		break;
2368 	default:
2369 		WARN_ONCE(1, "Unknown prange 0x%p work op %d\n", prange,
2370 			 prange->work_item.op);
2371 	}
2372 }
2373 
2374 static void svm_range_drain_retry_fault(struct svm_range_list *svms)
2375 {
2376 	struct kfd_process_device *pdd;
2377 	struct kfd_process *p;
2378 	uint32_t i;
2379 
2380 	p = container_of(svms, struct kfd_process, svms);
2381 
2382 	for_each_set_bit(i, svms->bitmap_supported, p->n_pdds) {
2383 		pdd = p->pdds[i];
2384 		if (!pdd)
2385 			continue;
2386 
2387 		pr_debug("drain retry fault gpu %d svms %p\n", i, svms);
2388 
2389 		if (!down_read_trylock(&pdd->dev->adev->reset_domain->sem))
2390 			continue;
2391 
2392 		amdgpu_ih_wait_on_checkpoint_process_ts(pdd->dev->adev,
2393 				pdd->dev->adev->irq.retry_cam_enabled ?
2394 				&pdd->dev->adev->irq.ih :
2395 				&pdd->dev->adev->irq.ih1);
2396 
2397 		if (pdd->dev->adev->irq.retry_cam_enabled)
2398 			amdgpu_ih_wait_on_checkpoint_process_ts(pdd->dev->adev,
2399 				&pdd->dev->adev->irq.ih_soft);
2400 
2401 		up_read(&pdd->dev->adev->reset_domain->sem);
2402 
2403 		pr_debug("drain retry fault gpu %d svms 0x%p done\n", i, svms);
2404 	}
2405 }
2406 
2407 static void svm_range_deferred_list_work(struct work_struct *work)
2408 {
2409 	struct svm_range_list *svms;
2410 	struct svm_range *prange;
2411 	struct mm_struct *mm;
2412 
2413 	svms = container_of(work, struct svm_range_list, deferred_list_work);
2414 	pr_debug("enter svms 0x%p\n", svms);
2415 
2416 	spin_lock(&svms->deferred_list_lock);
2417 	while (!list_empty(&svms->deferred_range_list)) {
2418 		prange = list_first_entry(&svms->deferred_range_list,
2419 					  struct svm_range, deferred_list);
2420 		spin_unlock(&svms->deferred_list_lock);
2421 
2422 		pr_debug("prange 0x%p [0x%lx 0x%lx] op %d\n", prange,
2423 			 prange->start, prange->last, prange->work_item.op);
2424 
2425 		mm = prange->work_item.mm;
2426 
2427 		mmap_write_lock(mm);
2428 
2429 		/* Remove from deferred_list must be inside mmap write lock, for
2430 		 * two race cases:
2431 		 * 1. unmap_from_cpu may change work_item.op and add the range
2432 		 *    to deferred_list again, cause use after free bug.
2433 		 * 2. svm_range_list_lock_and_flush_work may hold mmap write
2434 		 *    lock and continue because deferred_list is empty, but
2435 		 *    deferred_list work is actually waiting for mmap lock.
2436 		 */
2437 		spin_lock(&svms->deferred_list_lock);
2438 		list_del_init(&prange->deferred_list);
2439 		spin_unlock(&svms->deferred_list_lock);
2440 
2441 		mutex_lock(&svms->lock);
2442 		mutex_lock(&prange->migrate_mutex);
2443 		while (!list_empty(&prange->child_list)) {
2444 			struct svm_range *pchild;
2445 
2446 			pchild = list_first_entry(&prange->child_list,
2447 						struct svm_range, child_list);
2448 			pr_debug("child prange 0x%p op %d\n", pchild,
2449 				 pchild->work_item.op);
2450 			list_del_init(&pchild->child_list);
2451 			svm_range_handle_list_op(svms, pchild, mm);
2452 		}
2453 		mutex_unlock(&prange->migrate_mutex);
2454 
2455 		svm_range_handle_list_op(svms, prange, mm);
2456 		mutex_unlock(&svms->lock);
2457 		mmap_write_unlock(mm);
2458 
2459 		/* Pairs with mmget in svm_range_add_list_work. If dropping the
2460 		 * last mm refcount, schedule release work to avoid circular locking
2461 		 */
2462 		mmput_async(mm);
2463 
2464 		spin_lock(&svms->deferred_list_lock);
2465 	}
2466 	spin_unlock(&svms->deferred_list_lock);
2467 	pr_debug("exit svms 0x%p\n", svms);
2468 }
2469 
2470 void
2471 svm_range_add_list_work(struct svm_range_list *svms, struct svm_range *prange,
2472 			struct mm_struct *mm, enum svm_work_list_ops op)
2473 {
2474 	spin_lock(&svms->deferred_list_lock);
2475 	/* if prange is on the deferred list */
2476 	if (!list_empty(&prange->deferred_list)) {
2477 		pr_debug("update exist prange 0x%p work op %d\n", prange, op);
2478 		WARN_ONCE(prange->work_item.mm != mm, "unmatch mm\n");
2479 		if (op != SVM_OP_NULL &&
2480 		    prange->work_item.op != SVM_OP_UNMAP_RANGE)
2481 			prange->work_item.op = op;
2482 	} else {
2483 		/* Pairs with mmput in deferred_list_work.
2484 		 * If process is exiting and mm is gone, don't update mmu notifier.
2485 		 */
2486 		if (mmget_not_zero(mm)) {
2487 			prange->work_item.mm = mm;
2488 			prange->work_item.op = op;
2489 			list_add_tail(&prange->deferred_list,
2490 				      &prange->svms->deferred_range_list);
2491 			pr_debug("add prange 0x%p [0x%lx 0x%lx] to work list op %d\n",
2492 				 prange, prange->start, prange->last, op);
2493 		}
2494 	}
2495 	spin_unlock(&svms->deferred_list_lock);
2496 }
2497 
2498 void schedule_deferred_list_work(struct svm_range_list *svms)
2499 {
2500 	spin_lock(&svms->deferred_list_lock);
2501 	if (!list_empty(&svms->deferred_range_list))
2502 		schedule_work(&svms->deferred_list_work);
2503 	spin_unlock(&svms->deferred_list_lock);
2504 }
2505 
2506 static void
2507 svm_range_unmap_split(struct svm_range *parent, struct svm_range *prange, unsigned long start,
2508 		      unsigned long last)
2509 {
2510 	struct svm_range *head;
2511 	struct svm_range *tail;
2512 
2513 	if (prange->work_item.op == SVM_OP_UNMAP_RANGE) {
2514 		pr_debug("prange 0x%p [0x%lx 0x%lx] is already freed\n", prange,
2515 			 prange->start, prange->last);
2516 		return;
2517 	}
2518 	if (start > prange->last || last < prange->start)
2519 		return;
2520 
2521 	head = tail = prange;
2522 	if (start > prange->start)
2523 		svm_range_split(prange, prange->start, start - 1, &tail);
2524 	if (last < tail->last)
2525 		svm_range_split(tail, last + 1, tail->last, &head);
2526 
2527 	if (head != prange && tail != prange) {
2528 		svm_range_add_child(parent, head, SVM_OP_UNMAP_RANGE);
2529 		svm_range_add_child(parent, tail, SVM_OP_ADD_RANGE);
2530 	} else if (tail != prange) {
2531 		svm_range_add_child(parent, tail, SVM_OP_UNMAP_RANGE);
2532 	} else if (head != prange) {
2533 		svm_range_add_child(parent, head, SVM_OP_UNMAP_RANGE);
2534 	} else if (parent != prange) {
2535 		prange->work_item.op = SVM_OP_UNMAP_RANGE;
2536 	}
2537 }
2538 
2539 static void
2540 svm_range_unmap_from_cpu(struct mm_struct *mm, struct svm_range *prange,
2541 			 unsigned long start, unsigned long last)
2542 {
2543 	uint32_t trigger = KFD_SVM_UNMAP_TRIGGER_UNMAP_FROM_CPU;
2544 	struct svm_range_list *svms;
2545 	struct svm_range *pchild;
2546 	struct kfd_process *p;
2547 	unsigned long s, l;
2548 	bool unmap_parent;
2549 	uint32_t i;
2550 
2551 	if (atomic_read(&prange->queue_refcount)) {
2552 		int r;
2553 
2554 		pr_warn("Freeing queue vital buffer 0x%lx, queue evicted\n",
2555 			prange->start << PAGE_SHIFT);
2556 		r = kgd2kfd_quiesce_mm(mm, KFD_QUEUE_EVICTION_TRIGGER_SVM);
2557 		if (r)
2558 			pr_debug("failed %d to quiesce KFD queues\n", r);
2559 	}
2560 
2561 	p = kfd_lookup_process_by_mm(mm);
2562 	if (!p)
2563 		return;
2564 	svms = &p->svms;
2565 
2566 	pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx] [0x%lx 0x%lx]\n", svms,
2567 		 prange, prange->start, prange->last, start, last);
2568 
2569 	/* calculate time stamps that are used to decide which page faults need be
2570 	 * dropped or handled before unmap pages from gpu vm
2571 	 */
2572 	for_each_set_bit(i, svms->bitmap_supported, p->n_pdds) {
2573 		struct kfd_process_device *pdd;
2574 		struct amdgpu_device *adev;
2575 		struct amdgpu_ih_ring *ih;
2576 		uint32_t checkpoint_wptr;
2577 
2578 		pdd = p->pdds[i];
2579 		if (!pdd)
2580 			continue;
2581 
2582 		adev = pdd->dev->adev;
2583 
2584 		/* Check and drain ih1 ring if cam not available */
2585 		if (!adev->irq.retry_cam_enabled && adev->irq.ih1.ring_size) {
2586 			ih = &adev->irq.ih1;
2587 			checkpoint_wptr = amdgpu_ih_get_wptr(adev, ih);
2588 			if (ih->rptr != checkpoint_wptr) {
2589 				svms->checkpoint_ts[i] =
2590 					amdgpu_ih_decode_iv_ts(adev, ih, checkpoint_wptr, -1);
2591 				continue;
2592 			}
2593 		}
2594 
2595 		/* check if dev->irq.ih_soft is not empty */
2596 		ih = &adev->irq.ih_soft;
2597 		checkpoint_wptr = amdgpu_ih_get_wptr(adev, ih);
2598 		if (ih->rptr != checkpoint_wptr)
2599 			svms->checkpoint_ts[i] = amdgpu_ih_decode_iv_ts(adev, ih, checkpoint_wptr, -1);
2600 	}
2601 
2602 	unmap_parent = start <= prange->start && last >= prange->last;
2603 
2604 	list_for_each_entry(pchild, &prange->child_list, child_list) {
2605 		mutex_lock_nested(&pchild->lock, 1);
2606 		s = max(start, pchild->start);
2607 		l = min(last, pchild->last);
2608 		if (l >= s)
2609 			svm_range_unmap_from_gpus(pchild, s, l, trigger);
2610 		svm_range_unmap_split(prange, pchild, start, last);
2611 		mutex_unlock(&pchild->lock);
2612 	}
2613 	s = max(start, prange->start);
2614 	l = min(last, prange->last);
2615 	if (l >= s)
2616 		svm_range_unmap_from_gpus(prange, s, l, trigger);
2617 	svm_range_unmap_split(prange, prange, start, last);
2618 
2619 	if (unmap_parent)
2620 		svm_range_add_list_work(svms, prange, mm, SVM_OP_UNMAP_RANGE);
2621 	else
2622 		svm_range_add_list_work(svms, prange, mm,
2623 					SVM_OP_UPDATE_RANGE_NOTIFIER);
2624 	schedule_deferred_list_work(svms);
2625 
2626 	kfd_unref_process(p);
2627 }
2628 
2629 /**
2630  * svm_range_cpu_invalidate_pagetables - interval notifier callback
2631  * @mni: mmu_interval_notifier struct
2632  * @range: mmu_notifier_range struct
2633  * @cur_seq: value to pass to mmu_interval_set_seq()
2634  *
2635  * If event is MMU_NOTIFY_UNMAP, this is from CPU unmap range, otherwise, it
2636  * is from migration, or CPU page invalidation callback.
2637  *
2638  * For unmap event, unmap range from GPUs, remove prange from svms in a delayed
2639  * work thread, and split prange if only part of prange is unmapped.
2640  *
2641  * For invalidation event, if GPU retry fault is not enabled, evict the queues,
2642  * then schedule svm_range_restore_work to update GPU mapping and resume queues.
2643  * If GPU retry fault is enabled, unmap the svm range from GPU, retry fault will
2644  * update GPU mapping to recover.
2645  *
2646  * Context: mmap lock, notifier_invalidate_start lock are held
2647  *          for invalidate event, prange lock is held if this is from migration
2648  */
2649 static bool
2650 svm_range_cpu_invalidate_pagetables(struct mmu_interval_notifier *mni,
2651 				    const struct mmu_notifier_range *range,
2652 				    unsigned long cur_seq)
2653 {
2654 	struct svm_range *prange;
2655 	unsigned long start;
2656 	unsigned long last;
2657 
2658 	if (range->event == MMU_NOTIFY_RELEASE)
2659 		return true;
2660 
2661 	start = mni->interval_tree.start;
2662 	last = mni->interval_tree.last;
2663 	start = max(start, range->start) >> PAGE_SHIFT;
2664 	last = min(last, range->end - 1) >> PAGE_SHIFT;
2665 	pr_debug("[0x%lx 0x%lx] range[0x%lx 0x%lx] notifier[0x%lx 0x%lx] %d\n",
2666 		 start, last, range->start >> PAGE_SHIFT,
2667 		 (range->end - 1) >> PAGE_SHIFT,
2668 		 mni->interval_tree.start >> PAGE_SHIFT,
2669 		 mni->interval_tree.last >> PAGE_SHIFT, range->event);
2670 
2671 	prange = container_of(mni, struct svm_range, notifier);
2672 
2673 	svm_range_lock(prange);
2674 	mmu_interval_set_seq(mni, cur_seq);
2675 
2676 	switch (range->event) {
2677 	case MMU_NOTIFY_UNMAP:
2678 		svm_range_unmap_from_cpu(mni->mm, prange, start, last);
2679 		break;
2680 	default:
2681 		svm_range_evict(prange, mni->mm, start, last, range->event);
2682 		break;
2683 	}
2684 
2685 	svm_range_unlock(prange);
2686 
2687 	return true;
2688 }
2689 
2690 /**
2691  * svm_range_from_addr - find svm range from fault address
2692  * @svms: svm range list header
2693  * @addr: address to search range interval tree, in pages
2694  * @parent: parent range if range is on child list
2695  *
2696  * Context: The caller must hold svms->lock
2697  *
2698  * Return: the svm_range found or NULL
2699  */
2700 struct svm_range *
2701 svm_range_from_addr(struct svm_range_list *svms, unsigned long addr,
2702 		    struct svm_range **parent)
2703 {
2704 	struct interval_tree_node *node;
2705 	struct svm_range *prange;
2706 	struct svm_range *pchild;
2707 
2708 	node = interval_tree_iter_first(&svms->objects, addr, addr);
2709 	if (!node)
2710 		return NULL;
2711 
2712 	prange = container_of(node, struct svm_range, it_node);
2713 	pr_debug("address 0x%lx prange [0x%lx 0x%lx] node [0x%lx 0x%lx]\n",
2714 		 addr, prange->start, prange->last, node->start, node->last);
2715 
2716 	if (addr >= prange->start && addr <= prange->last) {
2717 		if (parent)
2718 			*parent = prange;
2719 		return prange;
2720 	}
2721 	list_for_each_entry(pchild, &prange->child_list, child_list)
2722 		if (addr >= pchild->start && addr <= pchild->last) {
2723 			pr_debug("found address 0x%lx pchild [0x%lx 0x%lx]\n",
2724 				 addr, pchild->start, pchild->last);
2725 			if (parent)
2726 				*parent = prange;
2727 			return pchild;
2728 		}
2729 
2730 	return NULL;
2731 }
2732 
2733 /* svm_range_best_restore_location - decide the best fault restore location
2734  * @prange: svm range structure
2735  * @adev: the GPU on which vm fault happened
2736  *
2737  * This is only called when xnack is on, to decide the best location to restore
2738  * the range mapping after GPU vm fault. Caller uses the best location to do
2739  * migration if actual loc is not best location, then update GPU page table
2740  * mapping to the best location.
2741  *
2742  * If the preferred loc is accessible by faulting GPU, use preferred loc.
2743  * If vm fault gpu idx is on range ACCESSIBLE bitmap, best_loc is vm fault gpu
2744  * If vm fault gpu idx is on range ACCESSIBLE_IN_PLACE bitmap, then
2745  *    if range actual loc is cpu, best_loc is cpu
2746  *    if vm fault gpu is on xgmi same hive of range actual loc gpu, best_loc is
2747  *    range actual loc.
2748  * Otherwise, GPU no access, best_loc is -1.
2749  *
2750  * Return:
2751  * -1 means vm fault GPU no access
2752  * 0 for CPU or GPU id
2753  */
2754 static int32_t
2755 svm_range_best_restore_location(struct svm_range *prange,
2756 				struct kfd_node *node,
2757 				int32_t *gpuidx)
2758 {
2759 	struct kfd_node *bo_node, *preferred_node;
2760 	struct kfd_process *p;
2761 	uint32_t gpuid;
2762 	int r;
2763 
2764 	p = container_of(prange->svms, struct kfd_process, svms);
2765 
2766 	r = kfd_process_gpuid_from_node(p, node, &gpuid, gpuidx);
2767 	if (r < 0) {
2768 		pr_debug("failed to get gpuid from kgd\n");
2769 		return -1;
2770 	}
2771 
2772 	if (node->adev->apu_prefer_gtt)
2773 		return 0;
2774 
2775 	if (prange->preferred_loc == gpuid ||
2776 	    prange->preferred_loc == KFD_IOCTL_SVM_LOCATION_SYSMEM) {
2777 		return prange->preferred_loc;
2778 	} else if (prange->preferred_loc != KFD_IOCTL_SVM_LOCATION_UNDEFINED) {
2779 		preferred_node = svm_range_get_node_by_id(prange, prange->preferred_loc);
2780 		if (preferred_node && svm_nodes_in_same_hive(node, preferred_node))
2781 			return prange->preferred_loc;
2782 		/* fall through */
2783 	}
2784 
2785 	if (test_bit(*gpuidx, prange->bitmap_access))
2786 		return gpuid;
2787 
2788 	if (test_bit(*gpuidx, prange->bitmap_aip)) {
2789 		if (!prange->actual_loc)
2790 			return 0;
2791 
2792 		bo_node = svm_range_get_node_by_id(prange, prange->actual_loc);
2793 		if (bo_node && svm_nodes_in_same_hive(node, bo_node))
2794 			return prange->actual_loc;
2795 		else
2796 			return 0;
2797 	}
2798 
2799 	return -1;
2800 }
2801 
2802 static int
2803 svm_range_get_range_boundaries(struct kfd_process *p, int64_t addr,
2804 			       unsigned long *start, unsigned long *last,
2805 			       bool *is_heap_stack)
2806 {
2807 	struct vm_area_struct *vma;
2808 	struct interval_tree_node *node;
2809 	struct rb_node *rb_node;
2810 	unsigned long start_limit, end_limit;
2811 
2812 	vma = vma_lookup(p->mm, addr << PAGE_SHIFT);
2813 	if (!vma) {
2814 		pr_debug("VMA does not exist in address [0x%llx]\n", addr);
2815 		return -EFAULT;
2816 	}
2817 
2818 	*is_heap_stack = vma_is_initial_heap(vma) || vma_is_initial_stack(vma);
2819 
2820 	start_limit = max(vma->vm_start >> PAGE_SHIFT,
2821 		      (unsigned long)ALIGN_DOWN(addr, 1UL << p->svms.default_granularity));
2822 	end_limit = min(vma->vm_end >> PAGE_SHIFT,
2823 		    (unsigned long)ALIGN(addr + 1, 1UL << p->svms.default_granularity));
2824 
2825 	/* First range that starts after the fault address */
2826 	node = interval_tree_iter_first(&p->svms.objects, addr + 1, ULONG_MAX);
2827 	if (node) {
2828 		end_limit = min(end_limit, node->start);
2829 		/* Last range that ends before the fault address */
2830 		rb_node = rb_prev(&node->rb);
2831 	} else {
2832 		/* Last range must end before addr because
2833 		 * there was no range after addr
2834 		 */
2835 		rb_node = rb_last(&p->svms.objects.rb_root);
2836 	}
2837 	if (rb_node) {
2838 		node = container_of(rb_node, struct interval_tree_node, rb);
2839 		if (node->last >= addr) {
2840 			WARN(1, "Overlap with prev node and page fault addr\n");
2841 			return -EFAULT;
2842 		}
2843 		start_limit = max(start_limit, node->last + 1);
2844 	}
2845 
2846 	*start = start_limit;
2847 	*last = end_limit - 1;
2848 
2849 	pr_debug("vma [0x%lx 0x%lx] range [0x%lx 0x%lx] is_heap_stack %d\n",
2850 		 vma->vm_start >> PAGE_SHIFT, vma->vm_end >> PAGE_SHIFT,
2851 		 *start, *last, *is_heap_stack);
2852 
2853 	return 0;
2854 }
2855 
2856 static int
2857 svm_range_check_vm_userptr(struct kfd_process *p, uint64_t start, uint64_t last,
2858 			   uint64_t *bo_s, uint64_t *bo_l)
2859 {
2860 	struct amdgpu_bo_va_mapping *mapping;
2861 	struct interval_tree_node *node;
2862 	struct amdgpu_bo *bo = NULL;
2863 	unsigned long userptr;
2864 	uint32_t i;
2865 	int r;
2866 
2867 	for (i = 0; i < p->n_pdds; i++) {
2868 		struct amdgpu_vm *vm;
2869 
2870 		if (!p->pdds[i]->drm_priv)
2871 			continue;
2872 
2873 		vm = drm_priv_to_vm(p->pdds[i]->drm_priv);
2874 		r = amdgpu_bo_reserve(vm->root.bo, false);
2875 		if (r)
2876 			return r;
2877 
2878 		/* Check userptr by searching entire vm->va interval tree */
2879 		node = interval_tree_iter_first(&vm->va, 0, ~0ULL);
2880 		while (node) {
2881 			mapping = container_of((struct rb_node *)node,
2882 					       struct amdgpu_bo_va_mapping, rb);
2883 			bo = mapping->bo_va->base.bo;
2884 
2885 			if (!amdgpu_ttm_tt_affect_userptr(bo->tbo.ttm,
2886 							 start << PAGE_SHIFT,
2887 							 last << PAGE_SHIFT,
2888 							 &userptr)) {
2889 				node = interval_tree_iter_next(node, 0, ~0ULL);
2890 				continue;
2891 			}
2892 
2893 			pr_debug("[0x%llx 0x%llx] already userptr mapped\n",
2894 				 start, last);
2895 			if (bo_s && bo_l) {
2896 				*bo_s = userptr >> PAGE_SHIFT;
2897 				*bo_l = *bo_s + bo->tbo.ttm->num_pages - 1;
2898 			}
2899 			amdgpu_bo_unreserve(vm->root.bo);
2900 			return -EADDRINUSE;
2901 		}
2902 		amdgpu_bo_unreserve(vm->root.bo);
2903 	}
2904 	return 0;
2905 }
2906 
2907 static struct
2908 svm_range *svm_range_create_unregistered_range(struct kfd_node *node,
2909 						struct kfd_process *p,
2910 						struct mm_struct *mm,
2911 						int64_t addr)
2912 {
2913 	struct svm_range *prange = NULL;
2914 	unsigned long start, last;
2915 	uint32_t gpuid, gpuidx;
2916 	bool is_heap_stack;
2917 	uint64_t bo_s = 0;
2918 	uint64_t bo_l = 0;
2919 	int r;
2920 
2921 	if (svm_range_get_range_boundaries(p, addr, &start, &last,
2922 					   &is_heap_stack))
2923 		return NULL;
2924 
2925 	r = svm_range_check_vm(p, start, last, &bo_s, &bo_l);
2926 	if (r != -EADDRINUSE)
2927 		r = svm_range_check_vm_userptr(p, start, last, &bo_s, &bo_l);
2928 
2929 	if (r == -EADDRINUSE) {
2930 		if (addr >= bo_s && addr <= bo_l)
2931 			return NULL;
2932 
2933 		/* Create one page svm range if 2MB range overlapping */
2934 		start = addr;
2935 		last = addr;
2936 	}
2937 
2938 	prange = svm_range_new(&p->svms, start, last, true);
2939 	if (!prange) {
2940 		pr_debug("Failed to create prange in address [0x%llx]\n", addr);
2941 		return NULL;
2942 	}
2943 	if (kfd_process_gpuid_from_node(p, node, &gpuid, &gpuidx)) {
2944 		pr_debug("failed to get gpuid from kgd\n");
2945 		svm_range_free(prange, true);
2946 		return NULL;
2947 	}
2948 
2949 	if (is_heap_stack)
2950 		prange->preferred_loc = KFD_IOCTL_SVM_LOCATION_SYSMEM;
2951 
2952 	svm_range_add_to_svms(prange);
2953 	svm_range_add_notifier_locked(mm, prange);
2954 
2955 	return prange;
2956 }
2957 
2958 /* svm_range_skip_recover - decide if prange can be recovered
2959  * @prange: svm range structure
2960  *
2961  * GPU vm retry fault handle skip recover the range for cases:
2962  * 1. prange is on deferred list to be removed after unmap, it is stale fault,
2963  *    deferred list work will drain the stale fault before free the prange.
2964  * 2. prange is on deferred list to add interval notifier after split, or
2965  * 3. prange is child range, it is split from parent prange, recover later
2966  *    after interval notifier is added.
2967  *
2968  * Return: true to skip recover, false to recover
2969  */
2970 static bool svm_range_skip_recover(struct svm_range *prange)
2971 {
2972 	struct svm_range_list *svms = prange->svms;
2973 
2974 	spin_lock(&svms->deferred_list_lock);
2975 	if (list_empty(&prange->deferred_list) &&
2976 	    list_empty(&prange->child_list)) {
2977 		spin_unlock(&svms->deferred_list_lock);
2978 		return false;
2979 	}
2980 	spin_unlock(&svms->deferred_list_lock);
2981 
2982 	if (prange->work_item.op == SVM_OP_UNMAP_RANGE) {
2983 		pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx] unmapped\n",
2984 			 svms, prange, prange->start, prange->last);
2985 		return true;
2986 	}
2987 	if (prange->work_item.op == SVM_OP_ADD_RANGE_AND_MAP ||
2988 	    prange->work_item.op == SVM_OP_ADD_RANGE) {
2989 		pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx] not added yet\n",
2990 			 svms, prange, prange->start, prange->last);
2991 		return true;
2992 	}
2993 	return false;
2994 }
2995 
2996 static void
2997 svm_range_count_fault(struct kfd_node *node, struct kfd_process *p,
2998 		      int32_t gpuidx)
2999 {
3000 	struct kfd_process_device *pdd;
3001 
3002 	/* fault is on different page of same range
3003 	 * or fault is skipped to recover later
3004 	 * or fault is on invalid virtual address
3005 	 */
3006 	if (gpuidx == MAX_GPU_INSTANCE) {
3007 		uint32_t gpuid;
3008 		int r;
3009 
3010 		r = kfd_process_gpuid_from_node(p, node, &gpuid, &gpuidx);
3011 		if (r < 0)
3012 			return;
3013 	}
3014 
3015 	/* fault is recovered
3016 	 * or fault cannot recover because GPU no access on the range
3017 	 */
3018 	pdd = kfd_process_device_from_gpuidx(p, gpuidx);
3019 	if (pdd)
3020 		WRITE_ONCE(pdd->faults, pdd->faults + 1);
3021 }
3022 
3023 static bool
3024 svm_fault_allowed(struct vm_area_struct *vma, bool write_fault)
3025 {
3026 	unsigned long requested = VM_READ;
3027 
3028 	if (write_fault)
3029 		requested |= VM_WRITE;
3030 
3031 	pr_debug("requested 0x%lx, vma permission flags 0x%lx\n", requested,
3032 		vma->vm_flags);
3033 	return (vma->vm_flags & requested) == requested;
3034 }
3035 
3036 int
3037 svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid,
3038 			uint32_t vmid, uint32_t node_id,
3039 			uint64_t addr, uint64_t ts, bool write_fault)
3040 {
3041 	unsigned long start, last, size;
3042 	struct mm_struct *mm = NULL;
3043 	struct svm_range_list *svms;
3044 	struct svm_range *prange;
3045 	struct kfd_process *p;
3046 	ktime_t timestamp = ktime_get_boottime();
3047 	struct kfd_node *node;
3048 	int32_t best_loc;
3049 	int32_t gpuid, gpuidx = MAX_GPU_INSTANCE;
3050 	bool write_locked = false;
3051 	struct vm_area_struct *vma;
3052 	bool migration = false;
3053 	int r = 0;
3054 
3055 	if (!KFD_IS_SVM_API_SUPPORTED(adev)) {
3056 		pr_debug("device does not support SVM\n");
3057 		return -EFAULT;
3058 	}
3059 
3060 	p = kfd_lookup_process_by_pasid(pasid, NULL);
3061 	if (!p) {
3062 		pr_debug("kfd process not founded pasid 0x%x\n", pasid);
3063 		return 0;
3064 	}
3065 	svms = &p->svms;
3066 
3067 	pr_debug("restoring svms 0x%p fault address 0x%llx\n", svms, addr);
3068 
3069 	if (atomic_read(&svms->drain_pagefaults)) {
3070 		pr_debug("page fault handling disabled, drop fault 0x%llx\n", addr);
3071 		r = 0;
3072 		goto out;
3073 	}
3074 
3075 	node = kfd_node_by_irq_ids(adev, node_id, vmid);
3076 	if (!node) {
3077 		pr_debug("kfd node does not exist node_id: %d, vmid: %d\n", node_id,
3078 			 vmid);
3079 		r = -EFAULT;
3080 		goto out;
3081 	}
3082 
3083 	if (kfd_process_gpuid_from_node(p, node, &gpuid, &gpuidx)) {
3084 		pr_debug("failed to get gpuid/gpuidex for node_id: %d\n", node_id);
3085 		r = -EFAULT;
3086 		goto out;
3087 	}
3088 
3089 	if (!p->xnack_enabled) {
3090 		pr_debug("XNACK not enabled for pasid 0x%x\n", pasid);
3091 		r = -EFAULT;
3092 		goto out;
3093 	}
3094 
3095 	/* p->lead_thread is available as kfd_process_wq_release flush the work
3096 	 * before releasing task ref.
3097 	 */
3098 	mm = get_task_mm(p->lead_thread);
3099 	if (!mm) {
3100 		pr_debug("svms 0x%p failed to get mm\n", svms);
3101 		r = 0;
3102 		goto out;
3103 	}
3104 
3105 	mmap_read_lock(mm);
3106 retry_write_locked:
3107 	mutex_lock(&svms->lock);
3108 
3109 	/* check if this page fault time stamp is before svms->checkpoint_ts */
3110 	if (svms->checkpoint_ts[gpuidx] != 0) {
3111 		if (amdgpu_ih_ts_after_or_equal(ts,  svms->checkpoint_ts[gpuidx])) {
3112 			pr_debug("draining retry fault, drop fault 0x%llx\n", addr);
3113 			if (write_locked)
3114 				mmap_write_downgrade(mm);
3115 			r = -EAGAIN;
3116 			goto out_unlock_svms;
3117 		} else {
3118 			/* ts is after svms->checkpoint_ts now, reset svms->checkpoint_ts
3119 			 * to zero to avoid following ts wrap around give wrong comparing
3120 			 */
3121 			svms->checkpoint_ts[gpuidx] = 0;
3122 		}
3123 	}
3124 
3125 	prange = svm_range_from_addr(svms, addr, NULL);
3126 	if (!prange) {
3127 		pr_debug("failed to find prange svms 0x%p address [0x%llx]\n",
3128 			 svms, addr);
3129 		if (!write_locked) {
3130 			/* Need the write lock to create new range with MMU notifier.
3131 			 * Also flush pending deferred work to make sure the interval
3132 			 * tree is up to date before we add a new range
3133 			 */
3134 			mutex_unlock(&svms->lock);
3135 			mmap_read_unlock(mm);
3136 			mmap_write_lock(mm);
3137 			write_locked = true;
3138 			goto retry_write_locked;
3139 		}
3140 		prange = svm_range_create_unregistered_range(node, p, mm, addr);
3141 		if (!prange) {
3142 			pr_debug("failed to create unregistered range svms 0x%p address [0x%llx]\n",
3143 				 svms, addr);
3144 			mmap_write_downgrade(mm);
3145 			r = -EFAULT;
3146 			goto out_unlock_svms;
3147 		}
3148 	}
3149 	if (write_locked)
3150 		mmap_write_downgrade(mm);
3151 
3152 	mutex_lock(&prange->migrate_mutex);
3153 
3154 	if (svm_range_skip_recover(prange)) {
3155 		amdgpu_gmc_filter_faults_remove(node->adev, addr, pasid);
3156 		r = 0;
3157 		goto out_unlock_range;
3158 	}
3159 
3160 	/* skip duplicate vm fault on different pages of same range */
3161 	if (ktime_before(timestamp, ktime_add_ns(prange->validate_timestamp,
3162 				AMDGPU_SVM_RANGE_RETRY_FAULT_PENDING))) {
3163 		pr_debug("svms 0x%p [0x%lx %lx] already restored\n",
3164 			 svms, prange->start, prange->last);
3165 		r = 0;
3166 		goto out_unlock_range;
3167 	}
3168 
3169 	/* __do_munmap removed VMA, return success as we are handling stale
3170 	 * retry fault.
3171 	 */
3172 	vma = vma_lookup(mm, addr << PAGE_SHIFT);
3173 	if (!vma) {
3174 		pr_debug("address 0x%llx VMA is removed\n", addr);
3175 		r = 0;
3176 		goto out_unlock_range;
3177 	}
3178 
3179 	if (!svm_fault_allowed(vma, write_fault)) {
3180 		pr_debug("fault addr 0x%llx no %s permission\n", addr,
3181 			write_fault ? "write" : "read");
3182 		r = -EPERM;
3183 		goto out_unlock_range;
3184 	}
3185 
3186 	best_loc = svm_range_best_restore_location(prange, node, &gpuidx);
3187 	if (best_loc == -1) {
3188 		pr_debug("svms %p failed get best restore loc [0x%lx 0x%lx]\n",
3189 			 svms, prange->start, prange->last);
3190 		r = -EACCES;
3191 		goto out_unlock_range;
3192 	}
3193 
3194 	pr_debug("svms %p [0x%lx 0x%lx] best restore 0x%x, actual loc 0x%x\n",
3195 		 svms, prange->start, prange->last, best_loc,
3196 		 prange->actual_loc);
3197 
3198 	kfd_smi_event_page_fault_start(node, p->lead_thread->pid, addr,
3199 				       write_fault, timestamp);
3200 
3201 	/* Align migration range start and size to granularity size */
3202 	size = 1UL << prange->granularity;
3203 	start = max_t(unsigned long, ALIGN_DOWN(addr, size), prange->start);
3204 	last = min_t(unsigned long, ALIGN(addr + 1, size) - 1, prange->last);
3205 	if (prange->actual_loc != 0 || best_loc != 0) {
3206 		if (best_loc) {
3207 			r = svm_migrate_to_vram(prange, best_loc, start, last,
3208 					mm, KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU);
3209 			if (r) {
3210 				pr_debug("svm_migrate_to_vram failed (%d) at %llx, falling back to system memory\n",
3211 					 r, addr);
3212 				/* Fallback to system memory if migration to
3213 				 * VRAM failed
3214 				 */
3215 				if (prange->actual_loc && prange->actual_loc != best_loc)
3216 					r = svm_migrate_vram_to_ram(prange, mm, start, last,
3217 						KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU, NULL);
3218 				else
3219 					r = 0;
3220 			}
3221 		} else {
3222 			r = svm_migrate_vram_to_ram(prange, mm, start, last,
3223 					KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU, NULL);
3224 		}
3225 		if (r) {
3226 			pr_debug("failed %d to migrate svms %p [0x%lx 0x%lx]\n",
3227 				 r, svms, start, last);
3228 			goto out_migrate_fail;
3229 		} else {
3230 			migration = true;
3231 		}
3232 	}
3233 
3234 	r = svm_range_validate_and_map(mm, start, last, prange, gpuidx, false,
3235 				       false, false);
3236 	if (r)
3237 		pr_debug("failed %d to map svms 0x%p [0x%lx 0x%lx] to gpus\n",
3238 			 r, svms, start, last);
3239 
3240 out_migrate_fail:
3241 	kfd_smi_event_page_fault_end(node, p->lead_thread->pid, addr,
3242 				     migration);
3243 
3244 out_unlock_range:
3245 	mutex_unlock(&prange->migrate_mutex);
3246 out_unlock_svms:
3247 	mutex_unlock(&svms->lock);
3248 	mmap_read_unlock(mm);
3249 
3250 	if (r != -EAGAIN)
3251 		svm_range_count_fault(node, p, gpuidx);
3252 
3253 	mmput(mm);
3254 out:
3255 	kfd_unref_process(p);
3256 
3257 	if (r == -EAGAIN) {
3258 		pr_debug("recover vm fault later\n");
3259 		amdgpu_gmc_filter_faults_remove(node->adev, addr, pasid);
3260 		r = 0;
3261 	}
3262 	return r;
3263 }
3264 
3265 int
3266 svm_range_switch_xnack_reserve_mem(struct kfd_process *p, bool xnack_enabled)
3267 {
3268 	struct svm_range *prange, *pchild;
3269 	uint64_t reserved_size = 0;
3270 	uint64_t size;
3271 	int r = 0;
3272 
3273 	pr_debug("switching xnack from %d to %d\n", p->xnack_enabled, xnack_enabled);
3274 
3275 	mutex_lock(&p->svms.lock);
3276 
3277 	list_for_each_entry(prange, &p->svms.list, list) {
3278 		svm_range_lock(prange);
3279 		list_for_each_entry(pchild, &prange->child_list, child_list) {
3280 			size = (pchild->last - pchild->start + 1) << PAGE_SHIFT;
3281 			if (xnack_enabled) {
3282 				amdgpu_amdkfd_unreserve_mem_limit(NULL, size,
3283 					KFD_IOC_ALLOC_MEM_FLAGS_USERPTR, 0);
3284 			} else {
3285 				r = amdgpu_amdkfd_reserve_mem_limit(NULL, size,
3286 					KFD_IOC_ALLOC_MEM_FLAGS_USERPTR, 0);
3287 				if (r)
3288 					goto out_unlock;
3289 				reserved_size += size;
3290 			}
3291 		}
3292 
3293 		size = (prange->last - prange->start + 1) << PAGE_SHIFT;
3294 		if (xnack_enabled) {
3295 			amdgpu_amdkfd_unreserve_mem_limit(NULL, size,
3296 					KFD_IOC_ALLOC_MEM_FLAGS_USERPTR, 0);
3297 		} else {
3298 			r = amdgpu_amdkfd_reserve_mem_limit(NULL, size,
3299 					KFD_IOC_ALLOC_MEM_FLAGS_USERPTR, 0);
3300 			if (r)
3301 				goto out_unlock;
3302 			reserved_size += size;
3303 		}
3304 out_unlock:
3305 		svm_range_unlock(prange);
3306 		if (r)
3307 			break;
3308 	}
3309 
3310 	if (r)
3311 		amdgpu_amdkfd_unreserve_mem_limit(NULL, reserved_size,
3312 					KFD_IOC_ALLOC_MEM_FLAGS_USERPTR, 0);
3313 	else
3314 		/* Change xnack mode must be inside svms lock, to avoid race with
3315 		 * svm_range_deferred_list_work unreserve memory in parallel.
3316 		 */
3317 		p->xnack_enabled = xnack_enabled;
3318 
3319 	mutex_unlock(&p->svms.lock);
3320 	return r;
3321 }
3322 
3323 void svm_range_list_fini(struct kfd_process *p)
3324 {
3325 	struct svm_range *prange;
3326 	struct svm_range *next;
3327 
3328 	pr_debug("process pid %d svms 0x%p\n", p->lead_thread->pid,
3329 		 &p->svms);
3330 
3331 	cancel_delayed_work_sync(&p->svms.restore_work);
3332 
3333 	/* Ensure list work is finished before process is destroyed */
3334 	flush_work(&p->svms.deferred_list_work);
3335 
3336 	/*
3337 	 * Ensure no retry fault comes in afterwards, as page fault handler will
3338 	 * not find kfd process and take mm lock to recover fault.
3339 	 * stop kfd page fault handing, then wait pending page faults got drained
3340 	 */
3341 	atomic_set(&p->svms.drain_pagefaults, 1);
3342 	svm_range_drain_retry_fault(&p->svms);
3343 
3344 	list_for_each_entry_safe(prange, next, &p->svms.list, list) {
3345 		svm_range_unlink(prange);
3346 		svm_range_remove_notifier(prange);
3347 		svm_range_free(prange, true);
3348 	}
3349 
3350 	mutex_destroy(&p->svms.lock);
3351 
3352 	pr_debug("process pid %d svms 0x%p done\n",
3353 		p->lead_thread->pid, &p->svms);
3354 }
3355 
3356 int svm_range_list_init(struct kfd_process *p)
3357 {
3358 	struct svm_range_list *svms = &p->svms;
3359 	int i;
3360 
3361 	svms->objects = RB_ROOT_CACHED;
3362 	mutex_init(&svms->lock);
3363 	INIT_LIST_HEAD(&svms->list);
3364 	atomic_set(&svms->evicted_ranges, 0);
3365 	atomic_set(&svms->drain_pagefaults, 0);
3366 	INIT_DELAYED_WORK(&svms->restore_work, svm_range_restore_work);
3367 	INIT_WORK(&svms->deferred_list_work, svm_range_deferred_list_work);
3368 	INIT_LIST_HEAD(&svms->deferred_range_list);
3369 	INIT_LIST_HEAD(&svms->criu_svm_metadata_list);
3370 	spin_lock_init(&svms->deferred_list_lock);
3371 
3372 	for (i = 0; i < p->n_pdds; i++)
3373 		if (KFD_IS_SVM_API_SUPPORTED(p->pdds[i]->dev->adev))
3374 			bitmap_set(svms->bitmap_supported, i, 1);
3375 
3376 	 /* Value of default granularity cannot exceed 0x1B, the
3377 	  * number of pages supported by a 4-level paging table
3378 	  */
3379 	svms->default_granularity = min_t(u8, amdgpu_svm_default_granularity, 0x1B);
3380 	pr_debug("Default SVM Granularity to use: %d\n", svms->default_granularity);
3381 
3382 	return 0;
3383 }
3384 
3385 /**
3386  * svm_range_check_vm - check if virtual address range mapped already
3387  * @p: current kfd_process
3388  * @start: range start address, in pages
3389  * @last: range last address, in pages
3390  * @bo_s: mapping start address in pages if address range already mapped
3391  * @bo_l: mapping last address in pages if address range already mapped
3392  *
3393  * The purpose is to avoid virtual address ranges already allocated by
3394  * kfd_ioctl_alloc_memory_of_gpu ioctl.
3395  * It looks for each pdd in the kfd_process.
3396  *
3397  * Context: Process context
3398  *
3399  * Return 0 - OK, if the range is not mapped.
3400  * Otherwise error code:
3401  * -EADDRINUSE - if address is mapped already by kfd_ioctl_alloc_memory_of_gpu
3402  * -ERESTARTSYS - A wait for the buffer to become unreserved was interrupted by
3403  * a signal. Release all buffer reservations and return to user-space.
3404  */
3405 static int
3406 svm_range_check_vm(struct kfd_process *p, uint64_t start, uint64_t last,
3407 		   uint64_t *bo_s, uint64_t *bo_l)
3408 {
3409 	struct amdgpu_bo_va_mapping *mapping;
3410 	struct interval_tree_node *node;
3411 	uint32_t i;
3412 	int r;
3413 
3414 	for (i = 0; i < p->n_pdds; i++) {
3415 		struct amdgpu_vm *vm;
3416 
3417 		if (!p->pdds[i]->drm_priv)
3418 			continue;
3419 
3420 		vm = drm_priv_to_vm(p->pdds[i]->drm_priv);
3421 		r = amdgpu_bo_reserve(vm->root.bo, false);
3422 		if (r)
3423 			return r;
3424 
3425 		node = interval_tree_iter_first(&vm->va, start, last);
3426 		if (node) {
3427 			pr_debug("range [0x%llx 0x%llx] already TTM mapped\n",
3428 				 start, last);
3429 			mapping = container_of((struct rb_node *)node,
3430 					       struct amdgpu_bo_va_mapping, rb);
3431 			if (bo_s && bo_l) {
3432 				*bo_s = mapping->start;
3433 				*bo_l = mapping->last;
3434 			}
3435 			amdgpu_bo_unreserve(vm->root.bo);
3436 			return -EADDRINUSE;
3437 		}
3438 		amdgpu_bo_unreserve(vm->root.bo);
3439 	}
3440 
3441 	return 0;
3442 }
3443 
3444 /**
3445  * svm_range_is_valid - check if virtual address range is valid
3446  * @p: current kfd_process
3447  * @start: range start address, in pages
3448  * @size: range size, in pages
3449  *
3450  * Valid virtual address range means it belongs to one or more VMAs
3451  *
3452  * Context: Process context
3453  *
3454  * Return:
3455  *  0 - OK, otherwise error code
3456  */
3457 static int
3458 svm_range_is_valid(struct kfd_process *p, uint64_t start, uint64_t size)
3459 {
3460 	const unsigned long device_vma = VM_IO | VM_PFNMAP | VM_MIXEDMAP;
3461 	struct vm_area_struct *vma;
3462 	unsigned long end;
3463 	unsigned long start_unchg = start;
3464 
3465 	start <<= PAGE_SHIFT;
3466 	end = start + (size << PAGE_SHIFT);
3467 	do {
3468 		vma = vma_lookup(p->mm, start);
3469 		if (!vma || (vma->vm_flags & device_vma))
3470 			return -EFAULT;
3471 		start = min(end, vma->vm_end);
3472 	} while (start < end);
3473 
3474 	return svm_range_check_vm(p, start_unchg, (end - 1) >> PAGE_SHIFT, NULL,
3475 				  NULL);
3476 }
3477 
3478 /**
3479  * svm_range_best_prefetch_location - decide the best prefetch location
3480  * @prange: svm range structure
3481  *
3482  * For xnack off:
3483  * If range map to single GPU, the best prefetch location is prefetch_loc, which
3484  * can be CPU or GPU.
3485  *
3486  * If range is ACCESS or ACCESS_IN_PLACE by mGPUs, only if mGPU connection on
3487  * XGMI same hive, the best prefetch location is prefetch_loc GPU, othervise
3488  * the best prefetch location is always CPU, because GPU can not have coherent
3489  * mapping VRAM of other GPUs even with large-BAR PCIe connection.
3490  *
3491  * For xnack on:
3492  * If range is not ACCESS_IN_PLACE by mGPUs, the best prefetch location is
3493  * prefetch_loc, other GPU access will generate vm fault and trigger migration.
3494  *
3495  * If range is ACCESS_IN_PLACE by mGPUs, only if mGPU connection on XGMI same
3496  * hive, the best prefetch location is prefetch_loc GPU, otherwise the best
3497  * prefetch location is always CPU.
3498  *
3499  * Context: Process context
3500  *
3501  * Return:
3502  * 0 for CPU or GPU id
3503  */
3504 static uint32_t
3505 svm_range_best_prefetch_location(struct svm_range *prange)
3506 {
3507 	DECLARE_BITMAP(bitmap, MAX_GPU_INSTANCE);
3508 	uint32_t best_loc = prange->prefetch_loc;
3509 	struct kfd_process_device *pdd;
3510 	struct kfd_node *bo_node;
3511 	struct kfd_process *p;
3512 	uint32_t gpuidx;
3513 
3514 	p = container_of(prange->svms, struct kfd_process, svms);
3515 
3516 	if (!best_loc || best_loc == KFD_IOCTL_SVM_LOCATION_UNDEFINED)
3517 		goto out;
3518 
3519 	bo_node = svm_range_get_node_by_id(prange, best_loc);
3520 	if (!bo_node) {
3521 		WARN_ONCE(1, "failed to get valid kfd node at id%x\n", best_loc);
3522 		best_loc = 0;
3523 		goto out;
3524 	}
3525 
3526 	if (bo_node->adev->apu_prefer_gtt) {
3527 		best_loc = 0;
3528 		goto out;
3529 	}
3530 
3531 	if (p->xnack_enabled)
3532 		bitmap_copy(bitmap, prange->bitmap_aip, MAX_GPU_INSTANCE);
3533 	else
3534 		bitmap_or(bitmap, prange->bitmap_access, prange->bitmap_aip,
3535 			  MAX_GPU_INSTANCE);
3536 
3537 	for_each_set_bit(gpuidx, bitmap, MAX_GPU_INSTANCE) {
3538 		pdd = kfd_process_device_from_gpuidx(p, gpuidx);
3539 		if (!pdd) {
3540 			pr_debug("failed to get device by idx 0x%x\n", gpuidx);
3541 			continue;
3542 		}
3543 
3544 		if (pdd->dev->adev == bo_node->adev)
3545 			continue;
3546 
3547 		if (!svm_nodes_in_same_hive(pdd->dev, bo_node)) {
3548 			best_loc = 0;
3549 			break;
3550 		}
3551 	}
3552 
3553 out:
3554 	pr_debug("xnack %d svms 0x%p [0x%lx 0x%lx] best loc 0x%x\n",
3555 		 p->xnack_enabled, &p->svms, prange->start, prange->last,
3556 		 best_loc);
3557 
3558 	return best_loc;
3559 }
3560 
3561 /* svm_range_trigger_migration - start page migration if prefetch loc changed
3562  * @mm: current process mm_struct
3563  * @prange: svm range structure
3564  * @migrated: output, true if migration is triggered
3565  *
3566  * If range perfetch_loc is GPU, actual loc is cpu 0, then migrate the range
3567  * from ram to vram.
3568  * If range prefetch_loc is cpu 0, actual loc is GPU, then migrate the range
3569  * from vram to ram.
3570  *
3571  * If GPU vm fault retry is not enabled, migration interact with MMU notifier
3572  * and restore work:
3573  * 1. migrate_vma_setup invalidate pages, MMU notifier callback svm_range_evict
3574  *    stops all queues, schedule restore work
3575  * 2. svm_range_restore_work wait for migration is done by
3576  *    a. svm_range_validate_vram takes prange->migrate_mutex
3577  *    b. svm_range_validate_ram HMM get pages wait for CPU fault handle returns
3578  * 3. restore work update mappings of GPU, resume all queues.
3579  *
3580  * Context: Process context
3581  *
3582  * Return:
3583  * 0 - OK, otherwise - error code of migration
3584  */
3585 static int
3586 svm_range_trigger_migration(struct mm_struct *mm, struct svm_range *prange,
3587 			    bool *migrated)
3588 {
3589 	uint32_t best_loc;
3590 	int r = 0;
3591 
3592 	*migrated = false;
3593 	best_loc = svm_range_best_prefetch_location(prange);
3594 
3595 	/* when best_loc is a gpu node and same as prange->actual_loc
3596 	 * we still need do migration as prange->actual_loc !=0 does
3597 	 * not mean all pages in prange are vram. hmm migrate will pick
3598 	 * up right pages during migration.
3599 	 */
3600 	if ((best_loc == KFD_IOCTL_SVM_LOCATION_UNDEFINED) ||
3601 	    (best_loc == 0 && prange->actual_loc == 0))
3602 		return 0;
3603 
3604 	if (!best_loc) {
3605 		r = svm_migrate_vram_to_ram(prange, mm, prange->start, prange->last,
3606 					KFD_MIGRATE_TRIGGER_PREFETCH, NULL);
3607 		*migrated = !r;
3608 		return r;
3609 	}
3610 
3611 	r = svm_migrate_to_vram(prange, best_loc, prange->start, prange->last,
3612 				mm, KFD_MIGRATE_TRIGGER_PREFETCH);
3613 	*migrated = !r;
3614 
3615 	return 0;
3616 }
3617 
3618 int svm_range_schedule_evict_svm_bo(struct amdgpu_amdkfd_fence *fence)
3619 {
3620 	/* Dereferencing fence->svm_bo is safe here because the fence hasn't
3621 	 * signaled yet and we're under the protection of the fence->lock.
3622 	 * After the fence is signaled in svm_range_bo_release, we cannot get
3623 	 * here any more.
3624 	 *
3625 	 * Reference is dropped in svm_range_evict_svm_bo_worker.
3626 	 */
3627 	if (svm_bo_ref_unless_zero(fence->svm_bo)) {
3628 		WRITE_ONCE(fence->svm_bo->evicting, 1);
3629 		schedule_work(&fence->svm_bo->eviction_work);
3630 	}
3631 
3632 	return 0;
3633 }
3634 
3635 static void svm_range_evict_svm_bo_worker(struct work_struct *work)
3636 {
3637 	struct svm_range_bo *svm_bo;
3638 	struct mm_struct *mm;
3639 	int r = 0;
3640 
3641 	svm_bo = container_of(work, struct svm_range_bo, eviction_work);
3642 
3643 	if (mmget_not_zero(svm_bo->eviction_fence->mm)) {
3644 		mm = svm_bo->eviction_fence->mm;
3645 	} else {
3646 		svm_range_bo_unref(svm_bo);
3647 		return;
3648 	}
3649 
3650 	mmap_read_lock(mm);
3651 	spin_lock(&svm_bo->list_lock);
3652 	while (!list_empty(&svm_bo->range_list) && !r) {
3653 		struct svm_range *prange =
3654 				list_first_entry(&svm_bo->range_list,
3655 						struct svm_range, svm_bo_list);
3656 		int retries = 3;
3657 
3658 		list_del_init(&prange->svm_bo_list);
3659 		spin_unlock(&svm_bo->list_lock);
3660 
3661 		pr_debug("svms 0x%p [0x%lx 0x%lx]\n", prange->svms,
3662 			 prange->start, prange->last);
3663 
3664 		mutex_lock(&prange->migrate_mutex);
3665 		do {
3666 			/* migrate all vram pages in this prange to sys ram
3667 			 * after that prange->actual_loc should be zero
3668 			 */
3669 			r = svm_migrate_vram_to_ram(prange, mm,
3670 					prange->start, prange->last,
3671 					KFD_MIGRATE_TRIGGER_TTM_EVICTION, NULL);
3672 		} while (!r && prange->actual_loc && --retries);
3673 
3674 		if (!r && prange->actual_loc)
3675 			pr_info_once("Migration failed during eviction");
3676 
3677 		if (!prange->actual_loc) {
3678 			mutex_lock(&prange->lock);
3679 			prange->svm_bo = NULL;
3680 			mutex_unlock(&prange->lock);
3681 		}
3682 		mutex_unlock(&prange->migrate_mutex);
3683 
3684 		spin_lock(&svm_bo->list_lock);
3685 	}
3686 	spin_unlock(&svm_bo->list_lock);
3687 	mmap_read_unlock(mm);
3688 	mmput(mm);
3689 
3690 	dma_fence_signal(&svm_bo->eviction_fence->base);
3691 
3692 	/* This is the last reference to svm_bo, after svm_range_vram_node_free
3693 	 * has been called in svm_migrate_vram_to_ram
3694 	 */
3695 	WARN_ONCE(!r && kref_read(&svm_bo->kref) != 1, "This was not the last reference\n");
3696 	svm_range_bo_unref(svm_bo);
3697 }
3698 
3699 static int
3700 svm_range_set_attr(struct kfd_process *p, struct mm_struct *mm,
3701 		   uint64_t start, uint64_t size, uint32_t nattr,
3702 		   struct kfd_ioctl_svm_attribute *attrs)
3703 {
3704 	struct amdkfd_process_info *process_info = p->kgd_process_info;
3705 	struct list_head update_list;
3706 	struct list_head insert_list;
3707 	struct list_head remove_list;
3708 	struct list_head remap_list;
3709 	struct svm_range_list *svms;
3710 	struct svm_range *prange;
3711 	struct svm_range *next;
3712 	bool update_mapping = false;
3713 	bool flush_tlb;
3714 	int r, ret = 0;
3715 
3716 	pr_debug("process pid %d svms 0x%p [0x%llx 0x%llx] pages 0x%llx\n",
3717 		 p->lead_thread->pid, &p->svms, start, start + size - 1, size);
3718 
3719 	r = svm_range_check_attr(p, nattr, attrs);
3720 	if (r)
3721 		return r;
3722 
3723 	svms = &p->svms;
3724 
3725 	mutex_lock(&process_info->lock);
3726 
3727 	svm_range_list_lock_and_flush_work(svms, mm);
3728 
3729 	r = svm_range_is_valid(p, start, size);
3730 	if (r) {
3731 		pr_debug("invalid range r=%d\n", r);
3732 		mmap_write_unlock(mm);
3733 		goto out;
3734 	}
3735 
3736 	mutex_lock(&svms->lock);
3737 
3738 	/* Add new range and split existing ranges as needed */
3739 	r = svm_range_add(p, start, size, nattr, attrs, &update_list,
3740 			  &insert_list, &remove_list, &remap_list);
3741 	if (r) {
3742 		mutex_unlock(&svms->lock);
3743 		mmap_write_unlock(mm);
3744 		goto out;
3745 	}
3746 	/* Apply changes as a transaction */
3747 	list_for_each_entry_safe(prange, next, &insert_list, list) {
3748 		svm_range_add_to_svms(prange);
3749 		svm_range_add_notifier_locked(mm, prange);
3750 	}
3751 	list_for_each_entry(prange, &update_list, update_list) {
3752 		svm_range_apply_attrs(p, prange, nattr, attrs, &update_mapping);
3753 		/* TODO: unmap ranges from GPU that lost access */
3754 	}
3755 	update_mapping |= !p->xnack_enabled && !list_empty(&remap_list);
3756 
3757 	list_for_each_entry_safe(prange, next, &remove_list, update_list) {
3758 		pr_debug("unlink old 0x%p prange 0x%p [0x%lx 0x%lx]\n",
3759 			 prange->svms, prange, prange->start,
3760 			 prange->last);
3761 		svm_range_unlink(prange);
3762 		svm_range_remove_notifier(prange);
3763 		svm_range_free(prange, false);
3764 	}
3765 
3766 	mmap_write_downgrade(mm);
3767 	/* Trigger migrations and revalidate and map to GPUs as needed. If
3768 	 * this fails we may be left with partially completed actions. There
3769 	 * is no clean way of rolling back to the previous state in such a
3770 	 * case because the rollback wouldn't be guaranteed to work either.
3771 	 */
3772 	list_for_each_entry(prange, &update_list, update_list) {
3773 		bool migrated;
3774 
3775 		mutex_lock(&prange->migrate_mutex);
3776 
3777 		r = svm_range_trigger_migration(mm, prange, &migrated);
3778 		if (r)
3779 			goto out_unlock_range;
3780 
3781 		if (migrated && (!p->xnack_enabled ||
3782 		    (prange->flags & KFD_IOCTL_SVM_FLAG_GPU_ALWAYS_MAPPED)) &&
3783 		    prange->mapped_to_gpu) {
3784 			pr_debug("restore_work will update mappings of GPUs\n");
3785 			mutex_unlock(&prange->migrate_mutex);
3786 			continue;
3787 		}
3788 
3789 		if (!migrated && !update_mapping) {
3790 			mutex_unlock(&prange->migrate_mutex);
3791 			continue;
3792 		}
3793 
3794 		flush_tlb = !migrated && update_mapping && prange->mapped_to_gpu;
3795 
3796 		r = svm_range_validate_and_map(mm, prange->start, prange->last, prange,
3797 					       MAX_GPU_INSTANCE, true, true, flush_tlb);
3798 		if (r)
3799 			pr_debug("failed %d to map svm range\n", r);
3800 
3801 out_unlock_range:
3802 		mutex_unlock(&prange->migrate_mutex);
3803 		if (r)
3804 			ret = r;
3805 	}
3806 
3807 	list_for_each_entry(prange, &remap_list, update_list) {
3808 		pr_debug("Remapping prange 0x%p [0x%lx 0x%lx]\n",
3809 			 prange, prange->start, prange->last);
3810 		mutex_lock(&prange->migrate_mutex);
3811 		r = svm_range_validate_and_map(mm,  prange->start, prange->last, prange,
3812 					       MAX_GPU_INSTANCE, true, true, prange->mapped_to_gpu);
3813 		if (r)
3814 			pr_debug("failed %d on remap svm range\n", r);
3815 		mutex_unlock(&prange->migrate_mutex);
3816 		if (r)
3817 			ret = r;
3818 	}
3819 
3820 	dynamic_svm_range_dump(svms);
3821 
3822 	mutex_unlock(&svms->lock);
3823 	mmap_read_unlock(mm);
3824 out:
3825 	mutex_unlock(&process_info->lock);
3826 
3827 	pr_debug("process pid %d svms 0x%p [0x%llx 0x%llx] done, r=%d\n",
3828 		 p->lead_thread->pid, &p->svms, start, start + size - 1, r);
3829 
3830 	return ret ? ret : r;
3831 }
3832 
3833 static int
3834 svm_range_get_attr(struct kfd_process *p, struct mm_struct *mm,
3835 		   uint64_t start, uint64_t size, uint32_t nattr,
3836 		   struct kfd_ioctl_svm_attribute *attrs)
3837 {
3838 	DECLARE_BITMAP(bitmap_access, MAX_GPU_INSTANCE);
3839 	DECLARE_BITMAP(bitmap_aip, MAX_GPU_INSTANCE);
3840 	bool get_preferred_loc = false;
3841 	bool get_prefetch_loc = false;
3842 	bool get_granularity = false;
3843 	bool get_accessible = false;
3844 	bool get_flags = false;
3845 	uint64_t last = start + size - 1UL;
3846 	uint8_t granularity = 0xff;
3847 	struct interval_tree_node *node;
3848 	struct svm_range_list *svms;
3849 	struct svm_range *prange;
3850 	uint32_t prefetch_loc = KFD_IOCTL_SVM_LOCATION_UNDEFINED;
3851 	uint32_t location = KFD_IOCTL_SVM_LOCATION_UNDEFINED;
3852 	uint32_t flags_and = 0xffffffff;
3853 	uint32_t flags_or = 0;
3854 	int gpuidx;
3855 	uint32_t i;
3856 	int r = 0;
3857 
3858 	pr_debug("svms 0x%p [0x%llx 0x%llx] nattr 0x%x\n", &p->svms, start,
3859 		 start + size - 1, nattr);
3860 
3861 	/* Flush pending deferred work to avoid racing with deferred actions from
3862 	 * previous memory map changes (e.g. munmap). Concurrent memory map changes
3863 	 * can still race with get_attr because we don't hold the mmap lock. But that
3864 	 * would be a race condition in the application anyway, and undefined
3865 	 * behaviour is acceptable in that case.
3866 	 */
3867 	flush_work(&p->svms.deferred_list_work);
3868 
3869 	mmap_read_lock(mm);
3870 	r = svm_range_is_valid(p, start, size);
3871 	mmap_read_unlock(mm);
3872 	if (r) {
3873 		pr_debug("invalid range r=%d\n", r);
3874 		return r;
3875 	}
3876 
3877 	for (i = 0; i < nattr; i++) {
3878 		switch (attrs[i].type) {
3879 		case KFD_IOCTL_SVM_ATTR_PREFERRED_LOC:
3880 			get_preferred_loc = true;
3881 			break;
3882 		case KFD_IOCTL_SVM_ATTR_PREFETCH_LOC:
3883 			get_prefetch_loc = true;
3884 			break;
3885 		case KFD_IOCTL_SVM_ATTR_ACCESS:
3886 			get_accessible = true;
3887 			break;
3888 		case KFD_IOCTL_SVM_ATTR_SET_FLAGS:
3889 		case KFD_IOCTL_SVM_ATTR_CLR_FLAGS:
3890 			get_flags = true;
3891 			break;
3892 		case KFD_IOCTL_SVM_ATTR_GRANULARITY:
3893 			get_granularity = true;
3894 			break;
3895 		case KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE:
3896 		case KFD_IOCTL_SVM_ATTR_NO_ACCESS:
3897 			fallthrough;
3898 		default:
3899 			pr_debug("get invalid attr type 0x%x\n", attrs[i].type);
3900 			return -EINVAL;
3901 		}
3902 	}
3903 
3904 	svms = &p->svms;
3905 
3906 	mutex_lock(&svms->lock);
3907 
3908 	node = interval_tree_iter_first(&svms->objects, start, last);
3909 	if (!node) {
3910 		pr_debug("range attrs not found return default values\n");
3911 		svm_range_set_default_attributes(svms, &location, &prefetch_loc,
3912 						 &granularity, &flags_and);
3913 		flags_or = flags_and;
3914 		if (p->xnack_enabled)
3915 			bitmap_copy(bitmap_access, svms->bitmap_supported,
3916 				    MAX_GPU_INSTANCE);
3917 		else
3918 			bitmap_zero(bitmap_access, MAX_GPU_INSTANCE);
3919 		bitmap_zero(bitmap_aip, MAX_GPU_INSTANCE);
3920 		goto fill_values;
3921 	}
3922 	bitmap_copy(bitmap_access, svms->bitmap_supported, MAX_GPU_INSTANCE);
3923 	bitmap_copy(bitmap_aip, svms->bitmap_supported, MAX_GPU_INSTANCE);
3924 
3925 	while (node) {
3926 		struct interval_tree_node *next;
3927 
3928 		prange = container_of(node, struct svm_range, it_node);
3929 		next = interval_tree_iter_next(node, start, last);
3930 
3931 		if (get_preferred_loc) {
3932 			if (prange->preferred_loc ==
3933 					KFD_IOCTL_SVM_LOCATION_UNDEFINED ||
3934 			    (location != KFD_IOCTL_SVM_LOCATION_UNDEFINED &&
3935 			     location != prange->preferred_loc)) {
3936 				location = KFD_IOCTL_SVM_LOCATION_UNDEFINED;
3937 				get_preferred_loc = false;
3938 			} else {
3939 				location = prange->preferred_loc;
3940 			}
3941 		}
3942 		if (get_prefetch_loc) {
3943 			if (prange->prefetch_loc ==
3944 					KFD_IOCTL_SVM_LOCATION_UNDEFINED ||
3945 			    (prefetch_loc != KFD_IOCTL_SVM_LOCATION_UNDEFINED &&
3946 			     prefetch_loc != prange->prefetch_loc)) {
3947 				prefetch_loc = KFD_IOCTL_SVM_LOCATION_UNDEFINED;
3948 				get_prefetch_loc = false;
3949 			} else {
3950 				prefetch_loc = prange->prefetch_loc;
3951 			}
3952 		}
3953 		if (get_accessible) {
3954 			bitmap_and(bitmap_access, bitmap_access,
3955 				   prange->bitmap_access, MAX_GPU_INSTANCE);
3956 			bitmap_and(bitmap_aip, bitmap_aip,
3957 				   prange->bitmap_aip, MAX_GPU_INSTANCE);
3958 		}
3959 		if (get_flags) {
3960 			flags_and &= prange->flags;
3961 			flags_or |= prange->flags;
3962 		}
3963 
3964 		if (get_granularity && prange->granularity < granularity)
3965 			granularity = prange->granularity;
3966 
3967 		node = next;
3968 	}
3969 fill_values:
3970 	mutex_unlock(&svms->lock);
3971 
3972 	for (i = 0; i < nattr; i++) {
3973 		switch (attrs[i].type) {
3974 		case KFD_IOCTL_SVM_ATTR_PREFERRED_LOC:
3975 			attrs[i].value = location;
3976 			break;
3977 		case KFD_IOCTL_SVM_ATTR_PREFETCH_LOC:
3978 			attrs[i].value = prefetch_loc;
3979 			break;
3980 		case KFD_IOCTL_SVM_ATTR_ACCESS:
3981 			gpuidx = kfd_process_gpuidx_from_gpuid(p,
3982 							       attrs[i].value);
3983 			if (gpuidx < 0) {
3984 				pr_debug("invalid gpuid %x\n", attrs[i].value);
3985 				return -EINVAL;
3986 			}
3987 			if (test_bit(gpuidx, bitmap_access))
3988 				attrs[i].type = KFD_IOCTL_SVM_ATTR_ACCESS;
3989 			else if (test_bit(gpuidx, bitmap_aip))
3990 				attrs[i].type =
3991 					KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE;
3992 			else
3993 				attrs[i].type = KFD_IOCTL_SVM_ATTR_NO_ACCESS;
3994 			break;
3995 		case KFD_IOCTL_SVM_ATTR_SET_FLAGS:
3996 			attrs[i].value = flags_and;
3997 			break;
3998 		case KFD_IOCTL_SVM_ATTR_CLR_FLAGS:
3999 			attrs[i].value = ~flags_or;
4000 			break;
4001 		case KFD_IOCTL_SVM_ATTR_GRANULARITY:
4002 			attrs[i].value = (uint32_t)granularity;
4003 			break;
4004 		}
4005 	}
4006 
4007 	return 0;
4008 }
4009 
4010 int kfd_criu_resume_svm(struct kfd_process *p)
4011 {
4012 	struct kfd_ioctl_svm_attribute *set_attr_new, *set_attr = NULL;
4013 	int nattr_common = 4, nattr_accessibility = 1;
4014 	struct criu_svm_metadata *criu_svm_md = NULL;
4015 	struct svm_range_list *svms = &p->svms;
4016 	struct criu_svm_metadata *next = NULL;
4017 	uint32_t set_flags = 0xffffffff;
4018 	int i, j, num_attrs, ret = 0;
4019 	uint64_t set_attr_size;
4020 	struct mm_struct *mm;
4021 
4022 	if (list_empty(&svms->criu_svm_metadata_list)) {
4023 		pr_debug("No SVM data from CRIU restore stage 2\n");
4024 		return ret;
4025 	}
4026 
4027 	mm = get_task_mm(p->lead_thread);
4028 	if (!mm) {
4029 		pr_err("failed to get mm for the target process\n");
4030 		return -ESRCH;
4031 	}
4032 
4033 	num_attrs = nattr_common + (nattr_accessibility * p->n_pdds);
4034 
4035 	i = j = 0;
4036 	list_for_each_entry(criu_svm_md, &svms->criu_svm_metadata_list, list) {
4037 		pr_debug("criu_svm_md[%d]\n\tstart: 0x%llx size: 0x%llx (npages)\n",
4038 			 i, criu_svm_md->data.start_addr, criu_svm_md->data.size);
4039 
4040 		for (j = 0; j < num_attrs; j++) {
4041 			pr_debug("\ncriu_svm_md[%d]->attrs[%d].type : 0x%x\ncriu_svm_md[%d]->attrs[%d].value : 0x%x\n",
4042 				 i, j, criu_svm_md->data.attrs[j].type,
4043 				 i, j, criu_svm_md->data.attrs[j].value);
4044 			switch (criu_svm_md->data.attrs[j].type) {
4045 			/* During Checkpoint operation, the query for
4046 			 * KFD_IOCTL_SVM_ATTR_PREFETCH_LOC attribute might
4047 			 * return KFD_IOCTL_SVM_LOCATION_UNDEFINED if they were
4048 			 * not used by the range which was checkpointed. Care
4049 			 * must be taken to not restore with an invalid value
4050 			 * otherwise the gpuidx value will be invalid and
4051 			 * set_attr would eventually fail so just replace those
4052 			 * with another dummy attribute such as
4053 			 * KFD_IOCTL_SVM_ATTR_SET_FLAGS.
4054 			 */
4055 			case KFD_IOCTL_SVM_ATTR_PREFETCH_LOC:
4056 				if (criu_svm_md->data.attrs[j].value ==
4057 				    KFD_IOCTL_SVM_LOCATION_UNDEFINED) {
4058 					criu_svm_md->data.attrs[j].type =
4059 						KFD_IOCTL_SVM_ATTR_SET_FLAGS;
4060 					criu_svm_md->data.attrs[j].value = 0;
4061 				}
4062 				break;
4063 			case KFD_IOCTL_SVM_ATTR_SET_FLAGS:
4064 				set_flags = criu_svm_md->data.attrs[j].value;
4065 				break;
4066 			default:
4067 				break;
4068 			}
4069 		}
4070 
4071 		/* CLR_FLAGS is not available via get_attr during checkpoint but
4072 		 * it needs to be inserted before restoring the ranges so
4073 		 * allocate extra space for it before calling set_attr
4074 		 */
4075 		set_attr_size = sizeof(struct kfd_ioctl_svm_attribute) *
4076 						(num_attrs + 1);
4077 		set_attr_new = krealloc(set_attr, set_attr_size,
4078 					    GFP_KERNEL);
4079 		if (!set_attr_new) {
4080 			ret = -ENOMEM;
4081 			goto exit;
4082 		}
4083 		set_attr = set_attr_new;
4084 
4085 		memcpy(set_attr, criu_svm_md->data.attrs, num_attrs *
4086 					sizeof(struct kfd_ioctl_svm_attribute));
4087 		set_attr[num_attrs].type = KFD_IOCTL_SVM_ATTR_CLR_FLAGS;
4088 		set_attr[num_attrs].value = ~set_flags;
4089 
4090 		ret = svm_range_set_attr(p, mm, criu_svm_md->data.start_addr,
4091 					 criu_svm_md->data.size, num_attrs + 1,
4092 					 set_attr);
4093 		if (ret) {
4094 			pr_err("CRIU: failed to set range attributes\n");
4095 			goto exit;
4096 		}
4097 
4098 		i++;
4099 	}
4100 exit:
4101 	kfree(set_attr);
4102 	list_for_each_entry_safe(criu_svm_md, next, &svms->criu_svm_metadata_list, list) {
4103 		pr_debug("freeing criu_svm_md[]\n\tstart: 0x%llx\n",
4104 						criu_svm_md->data.start_addr);
4105 		kfree(criu_svm_md);
4106 	}
4107 
4108 	mmput(mm);
4109 	return ret;
4110 
4111 }
4112 
4113 int kfd_criu_restore_svm(struct kfd_process *p,
4114 			 uint8_t __user *user_priv_ptr,
4115 			 uint64_t *priv_data_offset,
4116 			 uint64_t max_priv_data_size)
4117 {
4118 	uint64_t svm_priv_data_size, svm_object_md_size, svm_attrs_size;
4119 	int nattr_common = 4, nattr_accessibility = 1;
4120 	struct criu_svm_metadata *criu_svm_md = NULL;
4121 	struct svm_range_list *svms = &p->svms;
4122 	uint32_t num_devices;
4123 	int ret = 0;
4124 
4125 	num_devices = p->n_pdds;
4126 	/* Handle one SVM range object at a time, also the number of gpus are
4127 	 * assumed to be same on the restore node, checking must be done while
4128 	 * evaluating the topology earlier
4129 	 */
4130 
4131 	svm_attrs_size = sizeof(struct kfd_ioctl_svm_attribute) *
4132 		(nattr_common + nattr_accessibility * num_devices);
4133 	svm_object_md_size = sizeof(struct criu_svm_metadata) + svm_attrs_size;
4134 
4135 	svm_priv_data_size = sizeof(struct kfd_criu_svm_range_priv_data) +
4136 								svm_attrs_size;
4137 
4138 	criu_svm_md = kzalloc(svm_object_md_size, GFP_KERNEL);
4139 	if (!criu_svm_md) {
4140 		pr_err("failed to allocate memory to store svm metadata\n");
4141 		return -ENOMEM;
4142 	}
4143 	if (*priv_data_offset + svm_priv_data_size > max_priv_data_size) {
4144 		ret = -EINVAL;
4145 		goto exit;
4146 	}
4147 
4148 	ret = copy_from_user(&criu_svm_md->data, user_priv_ptr + *priv_data_offset,
4149 			     svm_priv_data_size);
4150 	if (ret) {
4151 		ret = -EFAULT;
4152 		goto exit;
4153 	}
4154 	*priv_data_offset += svm_priv_data_size;
4155 
4156 	list_add_tail(&criu_svm_md->list, &svms->criu_svm_metadata_list);
4157 
4158 	return 0;
4159 
4160 
4161 exit:
4162 	kfree(criu_svm_md);
4163 	return ret;
4164 }
4165 
4166 void svm_range_get_info(struct kfd_process *p, uint32_t *num_svm_ranges,
4167 			uint64_t *svm_priv_data_size)
4168 {
4169 	uint64_t total_size, accessibility_size, common_attr_size;
4170 	int nattr_common = 4, nattr_accessibility = 1;
4171 	int num_devices = p->n_pdds;
4172 	struct svm_range_list *svms;
4173 	struct svm_range *prange;
4174 	uint32_t count = 0;
4175 
4176 	*svm_priv_data_size = 0;
4177 
4178 	svms = &p->svms;
4179 
4180 	mutex_lock(&svms->lock);
4181 	list_for_each_entry(prange, &svms->list, list) {
4182 		pr_debug("prange: 0x%p start: 0x%lx\t npages: 0x%llx\t end: 0x%llx\n",
4183 			 prange, prange->start, prange->npages,
4184 			 prange->start + prange->npages - 1);
4185 		count++;
4186 	}
4187 	mutex_unlock(&svms->lock);
4188 
4189 	*num_svm_ranges = count;
4190 	/* Only the accessbility attributes need to be queried for all the gpus
4191 	 * individually, remaining ones are spanned across the entire process
4192 	 * regardless of the various gpu nodes. Of the remaining attributes,
4193 	 * KFD_IOCTL_SVM_ATTR_CLR_FLAGS need not be saved.
4194 	 *
4195 	 * KFD_IOCTL_SVM_ATTR_PREFERRED_LOC
4196 	 * KFD_IOCTL_SVM_ATTR_PREFETCH_LOC
4197 	 * KFD_IOCTL_SVM_ATTR_SET_FLAGS
4198 	 * KFD_IOCTL_SVM_ATTR_GRANULARITY
4199 	 *
4200 	 * ** ACCESSBILITY ATTRIBUTES **
4201 	 * (Considered as one, type is altered during query, value is gpuid)
4202 	 * KFD_IOCTL_SVM_ATTR_ACCESS
4203 	 * KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE
4204 	 * KFD_IOCTL_SVM_ATTR_NO_ACCESS
4205 	 */
4206 	if (*num_svm_ranges > 0) {
4207 		common_attr_size = sizeof(struct kfd_ioctl_svm_attribute) *
4208 			nattr_common;
4209 		accessibility_size = sizeof(struct kfd_ioctl_svm_attribute) *
4210 			nattr_accessibility * num_devices;
4211 
4212 		total_size = sizeof(struct kfd_criu_svm_range_priv_data) +
4213 			common_attr_size + accessibility_size;
4214 
4215 		*svm_priv_data_size = *num_svm_ranges * total_size;
4216 	}
4217 
4218 	pr_debug("num_svm_ranges %u total_priv_size %llu\n", *num_svm_ranges,
4219 		 *svm_priv_data_size);
4220 }
4221 
4222 int kfd_criu_checkpoint_svm(struct kfd_process *p,
4223 			    uint8_t __user *user_priv_data,
4224 			    uint64_t *priv_data_offset)
4225 {
4226 	struct kfd_criu_svm_range_priv_data *svm_priv = NULL;
4227 	struct kfd_ioctl_svm_attribute *query_attr = NULL;
4228 	uint64_t svm_priv_data_size, query_attr_size = 0;
4229 	int index, nattr_common = 4, ret = 0;
4230 	struct svm_range_list *svms;
4231 	int num_devices = p->n_pdds;
4232 	struct svm_range *prange;
4233 	struct mm_struct *mm;
4234 
4235 	svms = &p->svms;
4236 
4237 	mm = get_task_mm(p->lead_thread);
4238 	if (!mm) {
4239 		pr_err("failed to get mm for the target process\n");
4240 		return -ESRCH;
4241 	}
4242 
4243 	query_attr_size = sizeof(struct kfd_ioctl_svm_attribute) *
4244 				(nattr_common + num_devices);
4245 
4246 	query_attr = kzalloc(query_attr_size, GFP_KERNEL);
4247 	if (!query_attr) {
4248 		ret = -ENOMEM;
4249 		goto exit;
4250 	}
4251 
4252 	query_attr[0].type = KFD_IOCTL_SVM_ATTR_PREFERRED_LOC;
4253 	query_attr[1].type = KFD_IOCTL_SVM_ATTR_PREFETCH_LOC;
4254 	query_attr[2].type = KFD_IOCTL_SVM_ATTR_SET_FLAGS;
4255 	query_attr[3].type = KFD_IOCTL_SVM_ATTR_GRANULARITY;
4256 
4257 	for (index = 0; index < num_devices; index++) {
4258 		struct kfd_process_device *pdd = p->pdds[index];
4259 
4260 		query_attr[index + nattr_common].type =
4261 			KFD_IOCTL_SVM_ATTR_ACCESS;
4262 		query_attr[index + nattr_common].value = pdd->user_gpu_id;
4263 	}
4264 
4265 	svm_priv_data_size = sizeof(*svm_priv) + query_attr_size;
4266 
4267 	svm_priv = kzalloc(svm_priv_data_size, GFP_KERNEL);
4268 	if (!svm_priv) {
4269 		ret = -ENOMEM;
4270 		goto exit_query;
4271 	}
4272 
4273 	index = 0;
4274 	list_for_each_entry(prange, &svms->list, list) {
4275 
4276 		svm_priv->object_type = KFD_CRIU_OBJECT_TYPE_SVM_RANGE;
4277 		svm_priv->start_addr = prange->start;
4278 		svm_priv->size = prange->npages;
4279 		memcpy(&svm_priv->attrs, query_attr, query_attr_size);
4280 		pr_debug("CRIU: prange: 0x%p start: 0x%lx\t npages: 0x%llx end: 0x%llx\t size: 0x%llx\n",
4281 			 prange, prange->start, prange->npages,
4282 			 prange->start + prange->npages - 1,
4283 			 prange->npages * PAGE_SIZE);
4284 
4285 		ret = svm_range_get_attr(p, mm, svm_priv->start_addr,
4286 					 svm_priv->size,
4287 					 (nattr_common + num_devices),
4288 					 svm_priv->attrs);
4289 		if (ret) {
4290 			pr_err("CRIU: failed to obtain range attributes\n");
4291 			goto exit_priv;
4292 		}
4293 
4294 		if (copy_to_user(user_priv_data + *priv_data_offset, svm_priv,
4295 				 svm_priv_data_size)) {
4296 			pr_err("Failed to copy svm priv to user\n");
4297 			ret = -EFAULT;
4298 			goto exit_priv;
4299 		}
4300 
4301 		*priv_data_offset += svm_priv_data_size;
4302 
4303 	}
4304 
4305 
4306 exit_priv:
4307 	kfree(svm_priv);
4308 exit_query:
4309 	kfree(query_attr);
4310 exit:
4311 	mmput(mm);
4312 	return ret;
4313 }
4314 
4315 int
4316 svm_ioctl(struct kfd_process *p, enum kfd_ioctl_svm_op op, uint64_t start,
4317 	  uint64_t size, uint32_t nattrs, struct kfd_ioctl_svm_attribute *attrs)
4318 {
4319 	struct mm_struct *mm = current->mm;
4320 	int r;
4321 
4322 	start >>= PAGE_SHIFT;
4323 	size >>= PAGE_SHIFT;
4324 
4325 	switch (op) {
4326 	case KFD_IOCTL_SVM_OP_SET_ATTR:
4327 		r = svm_range_set_attr(p, mm, start, size, nattrs, attrs);
4328 		break;
4329 	case KFD_IOCTL_SVM_OP_GET_ATTR:
4330 		r = svm_range_get_attr(p, mm, start, size, nattrs, attrs);
4331 		break;
4332 	default:
4333 		r = -EINVAL;
4334 		break;
4335 	}
4336 
4337 	return r;
4338 }
4339