xref: /linux/drivers/gpu/drm/amd/amdkfd/kfd_svm.c (revision 3027ce13e04eee76539ca65c2cb1028a01c8c508)
1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /*
3  * Copyright 2020-2021 Advanced Micro Devices, Inc.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21  * OTHER DEALINGS IN THE SOFTWARE.
22  */
23 
24 #include <linux/types.h>
25 #include <linux/sched/task.h>
26 #include <linux/dynamic_debug.h>
27 #include <drm/ttm/ttm_tt.h>
28 #include <drm/drm_exec.h>
29 
30 #include "amdgpu_sync.h"
31 #include "amdgpu_object.h"
32 #include "amdgpu_vm.h"
33 #include "amdgpu_hmm.h"
34 #include "amdgpu.h"
35 #include "amdgpu_xgmi.h"
36 #include "kfd_priv.h"
37 #include "kfd_svm.h"
38 #include "kfd_migrate.h"
39 #include "kfd_smi_events.h"
40 
41 #ifdef dev_fmt
42 #undef dev_fmt
43 #endif
44 #define dev_fmt(fmt) "kfd_svm: %s: " fmt, __func__
45 
46 #define AMDGPU_SVM_RANGE_RESTORE_DELAY_MS 1
47 
48 /* Long enough to ensure no retry fault comes after svm range is restored and
49  * page table is updated.
50  */
51 #define AMDGPU_SVM_RANGE_RETRY_FAULT_PENDING	(2UL * NSEC_PER_MSEC)
52 #if IS_ENABLED(CONFIG_DYNAMIC_DEBUG)
53 #define dynamic_svm_range_dump(svms) \
54 	_dynamic_func_call_no_desc("svm_range_dump", svm_range_debug_dump, svms)
55 #else
56 #define dynamic_svm_range_dump(svms) \
57 	do { if (0) svm_range_debug_dump(svms); } while (0)
58 #endif
59 
60 /* Giant svm range split into smaller ranges based on this, it is decided using
61  * minimum of all dGPU/APU 1/32 VRAM size, between 2MB to 1GB and alignment to
62  * power of 2MB.
63  */
64 static uint64_t max_svm_range_pages;
65 
66 struct criu_svm_metadata {
67 	struct list_head list;
68 	struct kfd_criu_svm_range_priv_data data;
69 };
70 
71 static void svm_range_evict_svm_bo_worker(struct work_struct *work);
72 static bool
73 svm_range_cpu_invalidate_pagetables(struct mmu_interval_notifier *mni,
74 				    const struct mmu_notifier_range *range,
75 				    unsigned long cur_seq);
76 static int
77 svm_range_check_vm(struct kfd_process *p, uint64_t start, uint64_t last,
78 		   uint64_t *bo_s, uint64_t *bo_l);
79 static const struct mmu_interval_notifier_ops svm_range_mn_ops = {
80 	.invalidate = svm_range_cpu_invalidate_pagetables,
81 };
82 
83 /**
84  * svm_range_unlink - unlink svm_range from lists and interval tree
85  * @prange: svm range structure to be removed
86  *
87  * Remove the svm_range from the svms and svm_bo lists and the svms
88  * interval tree.
89  *
90  * Context: The caller must hold svms->lock
91  */
92 static void svm_range_unlink(struct svm_range *prange)
93 {
94 	pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx]\n", prange->svms,
95 		 prange, prange->start, prange->last);
96 
97 	if (prange->svm_bo) {
98 		spin_lock(&prange->svm_bo->list_lock);
99 		list_del(&prange->svm_bo_list);
100 		spin_unlock(&prange->svm_bo->list_lock);
101 	}
102 
103 	list_del(&prange->list);
104 	if (prange->it_node.start != 0 && prange->it_node.last != 0)
105 		interval_tree_remove(&prange->it_node, &prange->svms->objects);
106 }
107 
108 static void
109 svm_range_add_notifier_locked(struct mm_struct *mm, struct svm_range *prange)
110 {
111 	pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx]\n", prange->svms,
112 		 prange, prange->start, prange->last);
113 
114 	mmu_interval_notifier_insert_locked(&prange->notifier, mm,
115 				     prange->start << PAGE_SHIFT,
116 				     prange->npages << PAGE_SHIFT,
117 				     &svm_range_mn_ops);
118 }
119 
120 /**
121  * svm_range_add_to_svms - add svm range to svms
122  * @prange: svm range structure to be added
123  *
124  * Add the svm range to svms interval tree and link list
125  *
126  * Context: The caller must hold svms->lock
127  */
128 static void svm_range_add_to_svms(struct svm_range *prange)
129 {
130 	pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx]\n", prange->svms,
131 		 prange, prange->start, prange->last);
132 
133 	list_move_tail(&prange->list, &prange->svms->list);
134 	prange->it_node.start = prange->start;
135 	prange->it_node.last = prange->last;
136 	interval_tree_insert(&prange->it_node, &prange->svms->objects);
137 }
138 
139 static void svm_range_remove_notifier(struct svm_range *prange)
140 {
141 	pr_debug("remove notifier svms 0x%p prange 0x%p [0x%lx 0x%lx]\n",
142 		 prange->svms, prange,
143 		 prange->notifier.interval_tree.start >> PAGE_SHIFT,
144 		 prange->notifier.interval_tree.last >> PAGE_SHIFT);
145 
146 	if (prange->notifier.interval_tree.start != 0 &&
147 	    prange->notifier.interval_tree.last != 0)
148 		mmu_interval_notifier_remove(&prange->notifier);
149 }
150 
151 static bool
152 svm_is_valid_dma_mapping_addr(struct device *dev, dma_addr_t dma_addr)
153 {
154 	return dma_addr && !dma_mapping_error(dev, dma_addr) &&
155 	       !(dma_addr & SVM_RANGE_VRAM_DOMAIN);
156 }
157 
158 static int
159 svm_range_dma_map_dev(struct amdgpu_device *adev, struct svm_range *prange,
160 		      unsigned long offset, unsigned long npages,
161 		      unsigned long *hmm_pfns, uint32_t gpuidx)
162 {
163 	enum dma_data_direction dir = DMA_BIDIRECTIONAL;
164 	dma_addr_t *addr = prange->dma_addr[gpuidx];
165 	struct device *dev = adev->dev;
166 	struct page *page;
167 	int i, r;
168 
169 	if (!addr) {
170 		addr = kvcalloc(prange->npages, sizeof(*addr), GFP_KERNEL);
171 		if (!addr)
172 			return -ENOMEM;
173 		prange->dma_addr[gpuidx] = addr;
174 	}
175 
176 	addr += offset;
177 	for (i = 0; i < npages; i++) {
178 		if (svm_is_valid_dma_mapping_addr(dev, addr[i]))
179 			dma_unmap_page(dev, addr[i], PAGE_SIZE, dir);
180 
181 		page = hmm_pfn_to_page(hmm_pfns[i]);
182 		if (is_zone_device_page(page)) {
183 			struct amdgpu_device *bo_adev = prange->svm_bo->node->adev;
184 
185 			addr[i] = (hmm_pfns[i] << PAGE_SHIFT) +
186 				   bo_adev->vm_manager.vram_base_offset -
187 				   bo_adev->kfd.pgmap.range.start;
188 			addr[i] |= SVM_RANGE_VRAM_DOMAIN;
189 			pr_debug_ratelimited("vram address: 0x%llx\n", addr[i]);
190 			continue;
191 		}
192 		addr[i] = dma_map_page(dev, page, 0, PAGE_SIZE, dir);
193 		r = dma_mapping_error(dev, addr[i]);
194 		if (r) {
195 			dev_err(dev, "failed %d dma_map_page\n", r);
196 			return r;
197 		}
198 		pr_debug_ratelimited("dma mapping 0x%llx for page addr 0x%lx\n",
199 				     addr[i] >> PAGE_SHIFT, page_to_pfn(page));
200 	}
201 
202 	return 0;
203 }
204 
205 static int
206 svm_range_dma_map(struct svm_range *prange, unsigned long *bitmap,
207 		  unsigned long offset, unsigned long npages,
208 		  unsigned long *hmm_pfns)
209 {
210 	struct kfd_process *p;
211 	uint32_t gpuidx;
212 	int r;
213 
214 	p = container_of(prange->svms, struct kfd_process, svms);
215 
216 	for_each_set_bit(gpuidx, bitmap, MAX_GPU_INSTANCE) {
217 		struct kfd_process_device *pdd;
218 
219 		pr_debug("mapping to gpu idx 0x%x\n", gpuidx);
220 		pdd = kfd_process_device_from_gpuidx(p, gpuidx);
221 		if (!pdd) {
222 			pr_debug("failed to find device idx %d\n", gpuidx);
223 			return -EINVAL;
224 		}
225 
226 		r = svm_range_dma_map_dev(pdd->dev->adev, prange, offset, npages,
227 					  hmm_pfns, gpuidx);
228 		if (r)
229 			break;
230 	}
231 
232 	return r;
233 }
234 
235 void svm_range_dma_unmap_dev(struct device *dev, dma_addr_t *dma_addr,
236 			 unsigned long offset, unsigned long npages)
237 {
238 	enum dma_data_direction dir = DMA_BIDIRECTIONAL;
239 	int i;
240 
241 	if (!dma_addr)
242 		return;
243 
244 	for (i = offset; i < offset + npages; i++) {
245 		if (!svm_is_valid_dma_mapping_addr(dev, dma_addr[i]))
246 			continue;
247 		pr_debug_ratelimited("unmap 0x%llx\n", dma_addr[i] >> PAGE_SHIFT);
248 		dma_unmap_page(dev, dma_addr[i], PAGE_SIZE, dir);
249 		dma_addr[i] = 0;
250 	}
251 }
252 
253 void svm_range_dma_unmap(struct svm_range *prange)
254 {
255 	struct kfd_process_device *pdd;
256 	dma_addr_t *dma_addr;
257 	struct device *dev;
258 	struct kfd_process *p;
259 	uint32_t gpuidx;
260 
261 	p = container_of(prange->svms, struct kfd_process, svms);
262 
263 	for (gpuidx = 0; gpuidx < MAX_GPU_INSTANCE; gpuidx++) {
264 		dma_addr = prange->dma_addr[gpuidx];
265 		if (!dma_addr)
266 			continue;
267 
268 		pdd = kfd_process_device_from_gpuidx(p, gpuidx);
269 		if (!pdd) {
270 			pr_debug("failed to find device idx %d\n", gpuidx);
271 			continue;
272 		}
273 		dev = &pdd->dev->adev->pdev->dev;
274 
275 		svm_range_dma_unmap_dev(dev, dma_addr, 0, prange->npages);
276 	}
277 }
278 
279 static void svm_range_free(struct svm_range *prange, bool do_unmap)
280 {
281 	uint64_t size = (prange->last - prange->start + 1) << PAGE_SHIFT;
282 	struct kfd_process *p = container_of(prange->svms, struct kfd_process, svms);
283 	uint32_t gpuidx;
284 
285 	pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx]\n", prange->svms, prange,
286 		 prange->start, prange->last);
287 
288 	svm_range_vram_node_free(prange);
289 	if (do_unmap)
290 		svm_range_dma_unmap(prange);
291 
292 	if (do_unmap && !p->xnack_enabled) {
293 		pr_debug("unreserve prange 0x%p size: 0x%llx\n", prange, size);
294 		amdgpu_amdkfd_unreserve_mem_limit(NULL, size,
295 					KFD_IOC_ALLOC_MEM_FLAGS_USERPTR, 0);
296 	}
297 
298 	/* free dma_addr array for each gpu */
299 	for (gpuidx = 0; gpuidx < MAX_GPU_INSTANCE; gpuidx++) {
300 		if (prange->dma_addr[gpuidx]) {
301 			kvfree(prange->dma_addr[gpuidx]);
302 			prange->dma_addr[gpuidx] = NULL;
303 		}
304 	}
305 
306 	mutex_destroy(&prange->lock);
307 	mutex_destroy(&prange->migrate_mutex);
308 	kfree(prange);
309 }
310 
311 static void
312 svm_range_set_default_attributes(int32_t *location, int32_t *prefetch_loc,
313 				 uint8_t *granularity, uint32_t *flags)
314 {
315 	*location = KFD_IOCTL_SVM_LOCATION_UNDEFINED;
316 	*prefetch_loc = KFD_IOCTL_SVM_LOCATION_UNDEFINED;
317 	*granularity = 9;
318 	*flags =
319 		KFD_IOCTL_SVM_FLAG_HOST_ACCESS | KFD_IOCTL_SVM_FLAG_COHERENT;
320 }
321 
322 static struct
323 svm_range *svm_range_new(struct svm_range_list *svms, uint64_t start,
324 			 uint64_t last, bool update_mem_usage)
325 {
326 	uint64_t size = last - start + 1;
327 	struct svm_range *prange;
328 	struct kfd_process *p;
329 
330 	prange = kzalloc(sizeof(*prange), GFP_KERNEL);
331 	if (!prange)
332 		return NULL;
333 
334 	p = container_of(svms, struct kfd_process, svms);
335 	if (!p->xnack_enabled && update_mem_usage &&
336 	    amdgpu_amdkfd_reserve_mem_limit(NULL, size << PAGE_SHIFT,
337 				    KFD_IOC_ALLOC_MEM_FLAGS_USERPTR, 0)) {
338 		pr_info("SVM mapping failed, exceeds resident system memory limit\n");
339 		kfree(prange);
340 		return NULL;
341 	}
342 	prange->npages = size;
343 	prange->svms = svms;
344 	prange->start = start;
345 	prange->last = last;
346 	INIT_LIST_HEAD(&prange->list);
347 	INIT_LIST_HEAD(&prange->update_list);
348 	INIT_LIST_HEAD(&prange->svm_bo_list);
349 	INIT_LIST_HEAD(&prange->deferred_list);
350 	INIT_LIST_HEAD(&prange->child_list);
351 	atomic_set(&prange->invalid, 0);
352 	prange->validate_timestamp = 0;
353 	prange->vram_pages = 0;
354 	mutex_init(&prange->migrate_mutex);
355 	mutex_init(&prange->lock);
356 
357 	if (p->xnack_enabled)
358 		bitmap_copy(prange->bitmap_access, svms->bitmap_supported,
359 			    MAX_GPU_INSTANCE);
360 
361 	svm_range_set_default_attributes(&prange->preferred_loc,
362 					 &prange->prefetch_loc,
363 					 &prange->granularity, &prange->flags);
364 
365 	pr_debug("svms 0x%p [0x%llx 0x%llx]\n", svms, start, last);
366 
367 	return prange;
368 }
369 
370 static bool svm_bo_ref_unless_zero(struct svm_range_bo *svm_bo)
371 {
372 	if (!svm_bo || !kref_get_unless_zero(&svm_bo->kref))
373 		return false;
374 
375 	return true;
376 }
377 
378 static void svm_range_bo_release(struct kref *kref)
379 {
380 	struct svm_range_bo *svm_bo;
381 
382 	svm_bo = container_of(kref, struct svm_range_bo, kref);
383 	pr_debug("svm_bo 0x%p\n", svm_bo);
384 
385 	spin_lock(&svm_bo->list_lock);
386 	while (!list_empty(&svm_bo->range_list)) {
387 		struct svm_range *prange =
388 				list_first_entry(&svm_bo->range_list,
389 						struct svm_range, svm_bo_list);
390 		/* list_del_init tells a concurrent svm_range_vram_node_new when
391 		 * it's safe to reuse the svm_bo pointer and svm_bo_list head.
392 		 */
393 		list_del_init(&prange->svm_bo_list);
394 		spin_unlock(&svm_bo->list_lock);
395 
396 		pr_debug("svms 0x%p [0x%lx 0x%lx]\n", prange->svms,
397 			 prange->start, prange->last);
398 		mutex_lock(&prange->lock);
399 		prange->svm_bo = NULL;
400 		/* prange should not hold vram page now */
401 		WARN_ONCE(prange->actual_loc, "prange should not hold vram page");
402 		mutex_unlock(&prange->lock);
403 
404 		spin_lock(&svm_bo->list_lock);
405 	}
406 	spin_unlock(&svm_bo->list_lock);
407 	if (!dma_fence_is_signaled(&svm_bo->eviction_fence->base))
408 		/* We're not in the eviction worker. Signal the fence. */
409 		dma_fence_signal(&svm_bo->eviction_fence->base);
410 	dma_fence_put(&svm_bo->eviction_fence->base);
411 	amdgpu_bo_unref(&svm_bo->bo);
412 	kfree(svm_bo);
413 }
414 
415 static void svm_range_bo_wq_release(struct work_struct *work)
416 {
417 	struct svm_range_bo *svm_bo;
418 
419 	svm_bo = container_of(work, struct svm_range_bo, release_work);
420 	svm_range_bo_release(&svm_bo->kref);
421 }
422 
423 static void svm_range_bo_release_async(struct kref *kref)
424 {
425 	struct svm_range_bo *svm_bo;
426 
427 	svm_bo = container_of(kref, struct svm_range_bo, kref);
428 	pr_debug("svm_bo 0x%p\n", svm_bo);
429 	INIT_WORK(&svm_bo->release_work, svm_range_bo_wq_release);
430 	schedule_work(&svm_bo->release_work);
431 }
432 
433 void svm_range_bo_unref_async(struct svm_range_bo *svm_bo)
434 {
435 	kref_put(&svm_bo->kref, svm_range_bo_release_async);
436 }
437 
438 static void svm_range_bo_unref(struct svm_range_bo *svm_bo)
439 {
440 	if (svm_bo)
441 		kref_put(&svm_bo->kref, svm_range_bo_release);
442 }
443 
444 static bool
445 svm_range_validate_svm_bo(struct kfd_node *node, struct svm_range *prange)
446 {
447 	mutex_lock(&prange->lock);
448 	if (!prange->svm_bo) {
449 		mutex_unlock(&prange->lock);
450 		return false;
451 	}
452 	if (prange->ttm_res) {
453 		/* We still have a reference, all is well */
454 		mutex_unlock(&prange->lock);
455 		return true;
456 	}
457 	if (svm_bo_ref_unless_zero(prange->svm_bo)) {
458 		/*
459 		 * Migrate from GPU to GPU, remove range from source svm_bo->node
460 		 * range list, and return false to allocate svm_bo from destination
461 		 * node.
462 		 */
463 		if (prange->svm_bo->node != node) {
464 			mutex_unlock(&prange->lock);
465 
466 			spin_lock(&prange->svm_bo->list_lock);
467 			list_del_init(&prange->svm_bo_list);
468 			spin_unlock(&prange->svm_bo->list_lock);
469 
470 			svm_range_bo_unref(prange->svm_bo);
471 			return false;
472 		}
473 		if (READ_ONCE(prange->svm_bo->evicting)) {
474 			struct dma_fence *f;
475 			struct svm_range_bo *svm_bo;
476 			/* The BO is getting evicted,
477 			 * we need to get a new one
478 			 */
479 			mutex_unlock(&prange->lock);
480 			svm_bo = prange->svm_bo;
481 			f = dma_fence_get(&svm_bo->eviction_fence->base);
482 			svm_range_bo_unref(prange->svm_bo);
483 			/* wait for the fence to avoid long spin-loop
484 			 * at list_empty_careful
485 			 */
486 			dma_fence_wait(f, false);
487 			dma_fence_put(f);
488 		} else {
489 			/* The BO was still around and we got
490 			 * a new reference to it
491 			 */
492 			mutex_unlock(&prange->lock);
493 			pr_debug("reuse old bo svms 0x%p [0x%lx 0x%lx]\n",
494 				 prange->svms, prange->start, prange->last);
495 
496 			prange->ttm_res = prange->svm_bo->bo->tbo.resource;
497 			return true;
498 		}
499 
500 	} else {
501 		mutex_unlock(&prange->lock);
502 	}
503 
504 	/* We need a new svm_bo. Spin-loop to wait for concurrent
505 	 * svm_range_bo_release to finish removing this range from
506 	 * its range list and set prange->svm_bo to null. After this,
507 	 * it is safe to reuse the svm_bo pointer and svm_bo_list head.
508 	 */
509 	while (!list_empty_careful(&prange->svm_bo_list) || prange->svm_bo)
510 		cond_resched();
511 
512 	return false;
513 }
514 
515 static struct svm_range_bo *svm_range_bo_new(void)
516 {
517 	struct svm_range_bo *svm_bo;
518 
519 	svm_bo = kzalloc(sizeof(*svm_bo), GFP_KERNEL);
520 	if (!svm_bo)
521 		return NULL;
522 
523 	kref_init(&svm_bo->kref);
524 	INIT_LIST_HEAD(&svm_bo->range_list);
525 	spin_lock_init(&svm_bo->list_lock);
526 
527 	return svm_bo;
528 }
529 
530 int
531 svm_range_vram_node_new(struct kfd_node *node, struct svm_range *prange,
532 			bool clear)
533 {
534 	struct amdgpu_bo_param bp;
535 	struct svm_range_bo *svm_bo;
536 	struct amdgpu_bo_user *ubo;
537 	struct amdgpu_bo *bo;
538 	struct kfd_process *p;
539 	struct mm_struct *mm;
540 	int r;
541 
542 	p = container_of(prange->svms, struct kfd_process, svms);
543 	pr_debug("pasid: %x svms 0x%p [0x%lx 0x%lx]\n", p->pasid, prange->svms,
544 		 prange->start, prange->last);
545 
546 	if (svm_range_validate_svm_bo(node, prange))
547 		return 0;
548 
549 	svm_bo = svm_range_bo_new();
550 	if (!svm_bo) {
551 		pr_debug("failed to alloc svm bo\n");
552 		return -ENOMEM;
553 	}
554 	mm = get_task_mm(p->lead_thread);
555 	if (!mm) {
556 		pr_debug("failed to get mm\n");
557 		kfree(svm_bo);
558 		return -ESRCH;
559 	}
560 	svm_bo->node = node;
561 	svm_bo->eviction_fence =
562 		amdgpu_amdkfd_fence_create(dma_fence_context_alloc(1),
563 					   mm,
564 					   svm_bo);
565 	mmput(mm);
566 	INIT_WORK(&svm_bo->eviction_work, svm_range_evict_svm_bo_worker);
567 	svm_bo->evicting = 0;
568 	memset(&bp, 0, sizeof(bp));
569 	bp.size = prange->npages * PAGE_SIZE;
570 	bp.byte_align = PAGE_SIZE;
571 	bp.domain = AMDGPU_GEM_DOMAIN_VRAM;
572 	bp.flags = AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
573 	bp.flags |= clear ? AMDGPU_GEM_CREATE_VRAM_CLEARED : 0;
574 	bp.flags |= AMDGPU_GEM_CREATE_DISCARDABLE;
575 	bp.type = ttm_bo_type_device;
576 	bp.resv = NULL;
577 	if (node->xcp)
578 		bp.xcp_id_plus1 = node->xcp->id + 1;
579 
580 	r = amdgpu_bo_create_user(node->adev, &bp, &ubo);
581 	if (r) {
582 		pr_debug("failed %d to create bo\n", r);
583 		goto create_bo_failed;
584 	}
585 	bo = &ubo->bo;
586 
587 	pr_debug("alloc bo at offset 0x%lx size 0x%lx on partition %d\n",
588 		 bo->tbo.resource->start << PAGE_SHIFT, bp.size,
589 		 bp.xcp_id_plus1 - 1);
590 
591 	r = amdgpu_bo_reserve(bo, true);
592 	if (r) {
593 		pr_debug("failed %d to reserve bo\n", r);
594 		goto reserve_bo_failed;
595 	}
596 
597 	if (clear) {
598 		r = amdgpu_bo_sync_wait(bo, AMDGPU_FENCE_OWNER_KFD, false);
599 		if (r) {
600 			pr_debug("failed %d to sync bo\n", r);
601 			amdgpu_bo_unreserve(bo);
602 			goto reserve_bo_failed;
603 		}
604 	}
605 
606 	r = dma_resv_reserve_fences(bo->tbo.base.resv, 1);
607 	if (r) {
608 		pr_debug("failed %d to reserve bo\n", r);
609 		amdgpu_bo_unreserve(bo);
610 		goto reserve_bo_failed;
611 	}
612 	amdgpu_bo_fence(bo, &svm_bo->eviction_fence->base, true);
613 
614 	amdgpu_bo_unreserve(bo);
615 
616 	svm_bo->bo = bo;
617 	prange->svm_bo = svm_bo;
618 	prange->ttm_res = bo->tbo.resource;
619 	prange->offset = 0;
620 
621 	spin_lock(&svm_bo->list_lock);
622 	list_add(&prange->svm_bo_list, &svm_bo->range_list);
623 	spin_unlock(&svm_bo->list_lock);
624 
625 	return 0;
626 
627 reserve_bo_failed:
628 	amdgpu_bo_unref(&bo);
629 create_bo_failed:
630 	dma_fence_put(&svm_bo->eviction_fence->base);
631 	kfree(svm_bo);
632 	prange->ttm_res = NULL;
633 
634 	return r;
635 }
636 
637 void svm_range_vram_node_free(struct svm_range *prange)
638 {
639 	/* serialize prange->svm_bo unref */
640 	mutex_lock(&prange->lock);
641 	/* prange->svm_bo has not been unref */
642 	if (prange->ttm_res) {
643 		prange->ttm_res = NULL;
644 		mutex_unlock(&prange->lock);
645 		svm_range_bo_unref(prange->svm_bo);
646 	} else
647 		mutex_unlock(&prange->lock);
648 }
649 
650 struct kfd_node *
651 svm_range_get_node_by_id(struct svm_range *prange, uint32_t gpu_id)
652 {
653 	struct kfd_process *p;
654 	struct kfd_process_device *pdd;
655 
656 	p = container_of(prange->svms, struct kfd_process, svms);
657 	pdd = kfd_process_device_data_by_id(p, gpu_id);
658 	if (!pdd) {
659 		pr_debug("failed to get kfd process device by id 0x%x\n", gpu_id);
660 		return NULL;
661 	}
662 
663 	return pdd->dev;
664 }
665 
666 struct kfd_process_device *
667 svm_range_get_pdd_by_node(struct svm_range *prange, struct kfd_node *node)
668 {
669 	struct kfd_process *p;
670 
671 	p = container_of(prange->svms, struct kfd_process, svms);
672 
673 	return kfd_get_process_device_data(node, p);
674 }
675 
676 static int svm_range_bo_validate(void *param, struct amdgpu_bo *bo)
677 {
678 	struct ttm_operation_ctx ctx = { false, false };
679 
680 	amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM);
681 
682 	return ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
683 }
684 
685 static int
686 svm_range_check_attr(struct kfd_process *p,
687 		     uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs)
688 {
689 	uint32_t i;
690 
691 	for (i = 0; i < nattr; i++) {
692 		uint32_t val = attrs[i].value;
693 		int gpuidx = MAX_GPU_INSTANCE;
694 
695 		switch (attrs[i].type) {
696 		case KFD_IOCTL_SVM_ATTR_PREFERRED_LOC:
697 			if (val != KFD_IOCTL_SVM_LOCATION_SYSMEM &&
698 			    val != KFD_IOCTL_SVM_LOCATION_UNDEFINED)
699 				gpuidx = kfd_process_gpuidx_from_gpuid(p, val);
700 			break;
701 		case KFD_IOCTL_SVM_ATTR_PREFETCH_LOC:
702 			if (val != KFD_IOCTL_SVM_LOCATION_SYSMEM)
703 				gpuidx = kfd_process_gpuidx_from_gpuid(p, val);
704 			break;
705 		case KFD_IOCTL_SVM_ATTR_ACCESS:
706 		case KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE:
707 		case KFD_IOCTL_SVM_ATTR_NO_ACCESS:
708 			gpuidx = kfd_process_gpuidx_from_gpuid(p, val);
709 			break;
710 		case KFD_IOCTL_SVM_ATTR_SET_FLAGS:
711 			break;
712 		case KFD_IOCTL_SVM_ATTR_CLR_FLAGS:
713 			break;
714 		case KFD_IOCTL_SVM_ATTR_GRANULARITY:
715 			break;
716 		default:
717 			pr_debug("unknown attr type 0x%x\n", attrs[i].type);
718 			return -EINVAL;
719 		}
720 
721 		if (gpuidx < 0) {
722 			pr_debug("no GPU 0x%x found\n", val);
723 			return -EINVAL;
724 		} else if (gpuidx < MAX_GPU_INSTANCE &&
725 			   !test_bit(gpuidx, p->svms.bitmap_supported)) {
726 			pr_debug("GPU 0x%x not supported\n", val);
727 			return -EINVAL;
728 		}
729 	}
730 
731 	return 0;
732 }
733 
734 static void
735 svm_range_apply_attrs(struct kfd_process *p, struct svm_range *prange,
736 		      uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs,
737 		      bool *update_mapping)
738 {
739 	uint32_t i;
740 	int gpuidx;
741 
742 	for (i = 0; i < nattr; i++) {
743 		switch (attrs[i].type) {
744 		case KFD_IOCTL_SVM_ATTR_PREFERRED_LOC:
745 			prange->preferred_loc = attrs[i].value;
746 			break;
747 		case KFD_IOCTL_SVM_ATTR_PREFETCH_LOC:
748 			prange->prefetch_loc = attrs[i].value;
749 			break;
750 		case KFD_IOCTL_SVM_ATTR_ACCESS:
751 		case KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE:
752 		case KFD_IOCTL_SVM_ATTR_NO_ACCESS:
753 			if (!p->xnack_enabled)
754 				*update_mapping = true;
755 
756 			gpuidx = kfd_process_gpuidx_from_gpuid(p,
757 							       attrs[i].value);
758 			if (attrs[i].type == KFD_IOCTL_SVM_ATTR_NO_ACCESS) {
759 				bitmap_clear(prange->bitmap_access, gpuidx, 1);
760 				bitmap_clear(prange->bitmap_aip, gpuidx, 1);
761 			} else if (attrs[i].type == KFD_IOCTL_SVM_ATTR_ACCESS) {
762 				bitmap_set(prange->bitmap_access, gpuidx, 1);
763 				bitmap_clear(prange->bitmap_aip, gpuidx, 1);
764 			} else {
765 				bitmap_clear(prange->bitmap_access, gpuidx, 1);
766 				bitmap_set(prange->bitmap_aip, gpuidx, 1);
767 			}
768 			break;
769 		case KFD_IOCTL_SVM_ATTR_SET_FLAGS:
770 			*update_mapping = true;
771 			prange->flags |= attrs[i].value;
772 			break;
773 		case KFD_IOCTL_SVM_ATTR_CLR_FLAGS:
774 			*update_mapping = true;
775 			prange->flags &= ~attrs[i].value;
776 			break;
777 		case KFD_IOCTL_SVM_ATTR_GRANULARITY:
778 			prange->granularity = min_t(uint32_t, attrs[i].value, 0x3F);
779 			break;
780 		default:
781 			WARN_ONCE(1, "svm_range_check_attrs wasn't called?");
782 		}
783 	}
784 }
785 
786 static bool
787 svm_range_is_same_attrs(struct kfd_process *p, struct svm_range *prange,
788 			uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs)
789 {
790 	uint32_t i;
791 	int gpuidx;
792 
793 	for (i = 0; i < nattr; i++) {
794 		switch (attrs[i].type) {
795 		case KFD_IOCTL_SVM_ATTR_PREFERRED_LOC:
796 			if (prange->preferred_loc != attrs[i].value)
797 				return false;
798 			break;
799 		case KFD_IOCTL_SVM_ATTR_PREFETCH_LOC:
800 			/* Prefetch should always trigger a migration even
801 			 * if the value of the attribute didn't change.
802 			 */
803 			return false;
804 		case KFD_IOCTL_SVM_ATTR_ACCESS:
805 		case KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE:
806 		case KFD_IOCTL_SVM_ATTR_NO_ACCESS:
807 			gpuidx = kfd_process_gpuidx_from_gpuid(p,
808 							       attrs[i].value);
809 			if (attrs[i].type == KFD_IOCTL_SVM_ATTR_NO_ACCESS) {
810 				if (test_bit(gpuidx, prange->bitmap_access) ||
811 				    test_bit(gpuidx, prange->bitmap_aip))
812 					return false;
813 			} else if (attrs[i].type == KFD_IOCTL_SVM_ATTR_ACCESS) {
814 				if (!test_bit(gpuidx, prange->bitmap_access))
815 					return false;
816 			} else {
817 				if (!test_bit(gpuidx, prange->bitmap_aip))
818 					return false;
819 			}
820 			break;
821 		case KFD_IOCTL_SVM_ATTR_SET_FLAGS:
822 			if ((prange->flags & attrs[i].value) != attrs[i].value)
823 				return false;
824 			break;
825 		case KFD_IOCTL_SVM_ATTR_CLR_FLAGS:
826 			if ((prange->flags & attrs[i].value) != 0)
827 				return false;
828 			break;
829 		case KFD_IOCTL_SVM_ATTR_GRANULARITY:
830 			if (prange->granularity != attrs[i].value)
831 				return false;
832 			break;
833 		default:
834 			WARN_ONCE(1, "svm_range_check_attrs wasn't called?");
835 		}
836 	}
837 
838 	return true;
839 }
840 
841 /**
842  * svm_range_debug_dump - print all range information from svms
843  * @svms: svm range list header
844  *
845  * debug output svm range start, end, prefetch location from svms
846  * interval tree and link list
847  *
848  * Context: The caller must hold svms->lock
849  */
850 static void svm_range_debug_dump(struct svm_range_list *svms)
851 {
852 	struct interval_tree_node *node;
853 	struct svm_range *prange;
854 
855 	pr_debug("dump svms 0x%p list\n", svms);
856 	pr_debug("range\tstart\tpage\tend\t\tlocation\n");
857 
858 	list_for_each_entry(prange, &svms->list, list) {
859 		pr_debug("0x%p 0x%lx\t0x%llx\t0x%llx\t0x%x\n",
860 			 prange, prange->start, prange->npages,
861 			 prange->start + prange->npages - 1,
862 			 prange->actual_loc);
863 	}
864 
865 	pr_debug("dump svms 0x%p interval tree\n", svms);
866 	pr_debug("range\tstart\tpage\tend\t\tlocation\n");
867 	node = interval_tree_iter_first(&svms->objects, 0, ~0ULL);
868 	while (node) {
869 		prange = container_of(node, struct svm_range, it_node);
870 		pr_debug("0x%p 0x%lx\t0x%llx\t0x%llx\t0x%x\n",
871 			 prange, prange->start, prange->npages,
872 			 prange->start + prange->npages - 1,
873 			 prange->actual_loc);
874 		node = interval_tree_iter_next(node, 0, ~0ULL);
875 	}
876 }
877 
878 static void *
879 svm_range_copy_array(void *psrc, size_t size, uint64_t num_elements,
880 		     uint64_t offset, uint64_t *vram_pages)
881 {
882 	unsigned char *src = (unsigned char *)psrc + offset;
883 	unsigned char *dst;
884 	uint64_t i;
885 
886 	dst = kvmalloc_array(num_elements, size, GFP_KERNEL);
887 	if (!dst)
888 		return NULL;
889 
890 	if (!vram_pages) {
891 		memcpy(dst, src, num_elements * size);
892 		return (void *)dst;
893 	}
894 
895 	*vram_pages = 0;
896 	for (i = 0; i < num_elements; i++) {
897 		dma_addr_t *temp;
898 		temp = (dma_addr_t *)dst + i;
899 		*temp = *((dma_addr_t *)src + i);
900 		if (*temp&SVM_RANGE_VRAM_DOMAIN)
901 			(*vram_pages)++;
902 	}
903 
904 	return (void *)dst;
905 }
906 
907 static int
908 svm_range_copy_dma_addrs(struct svm_range *dst, struct svm_range *src)
909 {
910 	int i;
911 
912 	for (i = 0; i < MAX_GPU_INSTANCE; i++) {
913 		if (!src->dma_addr[i])
914 			continue;
915 		dst->dma_addr[i] = svm_range_copy_array(src->dma_addr[i],
916 					sizeof(*src->dma_addr[i]), src->npages, 0, NULL);
917 		if (!dst->dma_addr[i])
918 			return -ENOMEM;
919 	}
920 
921 	return 0;
922 }
923 
924 static int
925 svm_range_split_array(void *ppnew, void *ppold, size_t size,
926 		      uint64_t old_start, uint64_t old_n,
927 		      uint64_t new_start, uint64_t new_n, uint64_t *new_vram_pages)
928 {
929 	unsigned char *new, *old, *pold;
930 	uint64_t d;
931 
932 	if (!ppold)
933 		return 0;
934 	pold = *(unsigned char **)ppold;
935 	if (!pold)
936 		return 0;
937 
938 	d = (new_start - old_start) * size;
939 	/* get dma addr array for new range and calculte its vram page number */
940 	new = svm_range_copy_array(pold, size, new_n, d, new_vram_pages);
941 	if (!new)
942 		return -ENOMEM;
943 	d = (new_start == old_start) ? new_n * size : 0;
944 	old = svm_range_copy_array(pold, size, old_n, d, NULL);
945 	if (!old) {
946 		kvfree(new);
947 		return -ENOMEM;
948 	}
949 	kvfree(pold);
950 	*(void **)ppold = old;
951 	*(void **)ppnew = new;
952 
953 	return 0;
954 }
955 
956 static int
957 svm_range_split_pages(struct svm_range *new, struct svm_range *old,
958 		      uint64_t start, uint64_t last)
959 {
960 	uint64_t npages = last - start + 1;
961 	int i, r;
962 
963 	for (i = 0; i < MAX_GPU_INSTANCE; i++) {
964 		r = svm_range_split_array(&new->dma_addr[i], &old->dma_addr[i],
965 					  sizeof(*old->dma_addr[i]), old->start,
966 					  npages, new->start, new->npages,
967 					  old->actual_loc ? &new->vram_pages : NULL);
968 		if (r)
969 			return r;
970 	}
971 	if (old->actual_loc)
972 		old->vram_pages -= new->vram_pages;
973 
974 	return 0;
975 }
976 
977 static int
978 svm_range_split_nodes(struct svm_range *new, struct svm_range *old,
979 		      uint64_t start, uint64_t last)
980 {
981 	uint64_t npages = last - start + 1;
982 
983 	pr_debug("svms 0x%p new prange 0x%p start 0x%lx [0x%llx 0x%llx]\n",
984 		 new->svms, new, new->start, start, last);
985 
986 	if (new->start == old->start) {
987 		new->offset = old->offset;
988 		old->offset += new->npages;
989 	} else {
990 		new->offset = old->offset + npages;
991 	}
992 
993 	new->svm_bo = svm_range_bo_ref(old->svm_bo);
994 	new->ttm_res = old->ttm_res;
995 
996 	spin_lock(&new->svm_bo->list_lock);
997 	list_add(&new->svm_bo_list, &new->svm_bo->range_list);
998 	spin_unlock(&new->svm_bo->list_lock);
999 
1000 	return 0;
1001 }
1002 
1003 /**
1004  * svm_range_split_adjust - split range and adjust
1005  *
1006  * @new: new range
1007  * @old: the old range
1008  * @start: the old range adjust to start address in pages
1009  * @last: the old range adjust to last address in pages
1010  *
1011  * Copy system memory dma_addr or vram ttm_res in old range to new
1012  * range from new_start up to size new->npages, the remaining old range is from
1013  * start to last
1014  *
1015  * Return:
1016  * 0 - OK, -ENOMEM - out of memory
1017  */
1018 static int
1019 svm_range_split_adjust(struct svm_range *new, struct svm_range *old,
1020 		      uint64_t start, uint64_t last)
1021 {
1022 	int r;
1023 
1024 	pr_debug("svms 0x%p new 0x%lx old [0x%lx 0x%lx] => [0x%llx 0x%llx]\n",
1025 		 new->svms, new->start, old->start, old->last, start, last);
1026 
1027 	if (new->start < old->start ||
1028 	    new->last > old->last) {
1029 		WARN_ONCE(1, "invalid new range start or last\n");
1030 		return -EINVAL;
1031 	}
1032 
1033 	r = svm_range_split_pages(new, old, start, last);
1034 	if (r)
1035 		return r;
1036 
1037 	if (old->actual_loc && old->ttm_res) {
1038 		r = svm_range_split_nodes(new, old, start, last);
1039 		if (r)
1040 			return r;
1041 	}
1042 
1043 	old->npages = last - start + 1;
1044 	old->start = start;
1045 	old->last = last;
1046 	new->flags = old->flags;
1047 	new->preferred_loc = old->preferred_loc;
1048 	new->prefetch_loc = old->prefetch_loc;
1049 	new->actual_loc = old->actual_loc;
1050 	new->granularity = old->granularity;
1051 	new->mapped_to_gpu = old->mapped_to_gpu;
1052 	bitmap_copy(new->bitmap_access, old->bitmap_access, MAX_GPU_INSTANCE);
1053 	bitmap_copy(new->bitmap_aip, old->bitmap_aip, MAX_GPU_INSTANCE);
1054 
1055 	return 0;
1056 }
1057 
1058 /**
1059  * svm_range_split - split a range in 2 ranges
1060  *
1061  * @prange: the svm range to split
1062  * @start: the remaining range start address in pages
1063  * @last: the remaining range last address in pages
1064  * @new: the result new range generated
1065  *
1066  * Two cases only:
1067  * case 1: if start == prange->start
1068  *         prange ==> prange[start, last]
1069  *         new range [last + 1, prange->last]
1070  *
1071  * case 2: if last == prange->last
1072  *         prange ==> prange[start, last]
1073  *         new range [prange->start, start - 1]
1074  *
1075  * Return:
1076  * 0 - OK, -ENOMEM - out of memory, -EINVAL - invalid start, last
1077  */
1078 static int
1079 svm_range_split(struct svm_range *prange, uint64_t start, uint64_t last,
1080 		struct svm_range **new)
1081 {
1082 	uint64_t old_start = prange->start;
1083 	uint64_t old_last = prange->last;
1084 	struct svm_range_list *svms;
1085 	int r = 0;
1086 
1087 	pr_debug("svms 0x%p [0x%llx 0x%llx] to [0x%llx 0x%llx]\n", prange->svms,
1088 		 old_start, old_last, start, last);
1089 
1090 	if (old_start != start && old_last != last)
1091 		return -EINVAL;
1092 	if (start < old_start || last > old_last)
1093 		return -EINVAL;
1094 
1095 	svms = prange->svms;
1096 	if (old_start == start)
1097 		*new = svm_range_new(svms, last + 1, old_last, false);
1098 	else
1099 		*new = svm_range_new(svms, old_start, start - 1, false);
1100 	if (!*new)
1101 		return -ENOMEM;
1102 
1103 	r = svm_range_split_adjust(*new, prange, start, last);
1104 	if (r) {
1105 		pr_debug("failed %d split [0x%llx 0x%llx] to [0x%llx 0x%llx]\n",
1106 			 r, old_start, old_last, start, last);
1107 		svm_range_free(*new, false);
1108 		*new = NULL;
1109 	}
1110 
1111 	return r;
1112 }
1113 
1114 static int
1115 svm_range_split_tail(struct svm_range *prange, uint64_t new_last,
1116 		     struct list_head *insert_list, struct list_head *remap_list)
1117 {
1118 	struct svm_range *tail = NULL;
1119 	int r = svm_range_split(prange, prange->start, new_last, &tail);
1120 
1121 	if (!r) {
1122 		list_add(&tail->list, insert_list);
1123 		if (!IS_ALIGNED(new_last + 1, 1UL << prange->granularity))
1124 			list_add(&tail->update_list, remap_list);
1125 	}
1126 	return r;
1127 }
1128 
1129 static int
1130 svm_range_split_head(struct svm_range *prange, uint64_t new_start,
1131 		     struct list_head *insert_list, struct list_head *remap_list)
1132 {
1133 	struct svm_range *head = NULL;
1134 	int r = svm_range_split(prange, new_start, prange->last, &head);
1135 
1136 	if (!r) {
1137 		list_add(&head->list, insert_list);
1138 		if (!IS_ALIGNED(new_start, 1UL << prange->granularity))
1139 			list_add(&head->update_list, remap_list);
1140 	}
1141 	return r;
1142 }
1143 
1144 static void
1145 svm_range_add_child(struct svm_range *prange, struct mm_struct *mm,
1146 		    struct svm_range *pchild, enum svm_work_list_ops op)
1147 {
1148 	pr_debug("add child 0x%p [0x%lx 0x%lx] to prange 0x%p child list %d\n",
1149 		 pchild, pchild->start, pchild->last, prange, op);
1150 
1151 	pchild->work_item.mm = mm;
1152 	pchild->work_item.op = op;
1153 	list_add_tail(&pchild->child_list, &prange->child_list);
1154 }
1155 
1156 static bool
1157 svm_nodes_in_same_hive(struct kfd_node *node_a, struct kfd_node *node_b)
1158 {
1159 	return (node_a->adev == node_b->adev ||
1160 		amdgpu_xgmi_same_hive(node_a->adev, node_b->adev));
1161 }
1162 
1163 static uint64_t
1164 svm_range_get_pte_flags(struct kfd_node *node,
1165 			struct svm_range *prange, int domain)
1166 {
1167 	struct kfd_node *bo_node;
1168 	uint32_t flags = prange->flags;
1169 	uint32_t mapping_flags = 0;
1170 	uint64_t pte_flags;
1171 	bool snoop = (domain != SVM_RANGE_VRAM_DOMAIN);
1172 	bool coherent = flags & (KFD_IOCTL_SVM_FLAG_COHERENT | KFD_IOCTL_SVM_FLAG_EXT_COHERENT);
1173 	bool ext_coherent = flags & KFD_IOCTL_SVM_FLAG_EXT_COHERENT;
1174 	bool uncached = false; /*flags & KFD_IOCTL_SVM_FLAG_UNCACHED;*/
1175 	unsigned int mtype_local;
1176 
1177 	if (domain == SVM_RANGE_VRAM_DOMAIN)
1178 		bo_node = prange->svm_bo->node;
1179 
1180 	switch (amdgpu_ip_version(node->adev, GC_HWIP, 0)) {
1181 	case IP_VERSION(9, 4, 1):
1182 		if (domain == SVM_RANGE_VRAM_DOMAIN) {
1183 			if (bo_node == node) {
1184 				mapping_flags |= coherent ?
1185 					AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW;
1186 			} else {
1187 				mapping_flags |= coherent ?
1188 					AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
1189 				if (svm_nodes_in_same_hive(node, bo_node))
1190 					snoop = true;
1191 			}
1192 		} else {
1193 			mapping_flags |= coherent ?
1194 				AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
1195 		}
1196 		break;
1197 	case IP_VERSION(9, 4, 2):
1198 		if (domain == SVM_RANGE_VRAM_DOMAIN) {
1199 			if (bo_node == node) {
1200 				mapping_flags |= coherent ?
1201 					AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW;
1202 				if (node->adev->gmc.xgmi.connected_to_cpu)
1203 					snoop = true;
1204 			} else {
1205 				mapping_flags |= coherent ?
1206 					AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
1207 				if (svm_nodes_in_same_hive(node, bo_node))
1208 					snoop = true;
1209 			}
1210 		} else {
1211 			mapping_flags |= coherent ?
1212 				AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
1213 		}
1214 		break;
1215 	case IP_VERSION(9, 4, 3):
1216 	case IP_VERSION(9, 4, 4):
1217 		if (ext_coherent)
1218 			mtype_local = node->adev->rev_id ? AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_UC;
1219 		else
1220 			mtype_local = amdgpu_mtype_local == 1 ? AMDGPU_VM_MTYPE_NC :
1221 				amdgpu_mtype_local == 2 ? AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW;
1222 		snoop = true;
1223 		if (uncached) {
1224 			mapping_flags |= AMDGPU_VM_MTYPE_UC;
1225 		} else if (domain == SVM_RANGE_VRAM_DOMAIN) {
1226 			/* local HBM region close to partition */
1227 			if (bo_node->adev == node->adev &&
1228 			    (!bo_node->xcp || !node->xcp || bo_node->xcp->mem_id == node->xcp->mem_id))
1229 				mapping_flags |= mtype_local;
1230 			/* local HBM region far from partition or remote XGMI GPU
1231 			 * with regular system scope coherence
1232 			 */
1233 			else if (svm_nodes_in_same_hive(bo_node, node) && !ext_coherent)
1234 				mapping_flags |= AMDGPU_VM_MTYPE_NC;
1235 			/* PCIe P2P or extended system scope coherence */
1236 			else
1237 				mapping_flags |= AMDGPU_VM_MTYPE_UC;
1238 		/* system memory accessed by the APU */
1239 		} else if (node->adev->flags & AMD_IS_APU) {
1240 			/* On NUMA systems, locality is determined per-page
1241 			 * in amdgpu_gmc_override_vm_pte_flags
1242 			 */
1243 			if (num_possible_nodes() <= 1)
1244 				mapping_flags |= mtype_local;
1245 			else
1246 				mapping_flags |= ext_coherent ? AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
1247 		/* system memory accessed by the dGPU */
1248 		} else {
1249 			mapping_flags |= AMDGPU_VM_MTYPE_UC;
1250 		}
1251 		break;
1252 	case IP_VERSION(12, 0, 0):
1253 	case IP_VERSION(12, 0, 1):
1254 		if (domain == SVM_RANGE_VRAM_DOMAIN) {
1255 			if (bo_node != node)
1256 				mapping_flags |= AMDGPU_VM_MTYPE_NC;
1257 		} else {
1258 			mapping_flags |= coherent ?
1259 				AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
1260 		}
1261 		break;
1262 	default:
1263 		mapping_flags |= coherent ?
1264 			AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
1265 	}
1266 
1267 	mapping_flags |= AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_PAGE_WRITEABLE;
1268 
1269 	if (flags & KFD_IOCTL_SVM_FLAG_GPU_RO)
1270 		mapping_flags &= ~AMDGPU_VM_PAGE_WRITEABLE;
1271 	if (flags & KFD_IOCTL_SVM_FLAG_GPU_EXEC)
1272 		mapping_flags |= AMDGPU_VM_PAGE_EXECUTABLE;
1273 
1274 	pte_flags = AMDGPU_PTE_VALID;
1275 	pte_flags |= (domain == SVM_RANGE_VRAM_DOMAIN) ? 0 : AMDGPU_PTE_SYSTEM;
1276 	pte_flags |= snoop ? AMDGPU_PTE_SNOOPED : 0;
1277 	if (KFD_GC_VERSION(node) >= IP_VERSION(12, 0, 0))
1278 		pte_flags |= AMDGPU_PTE_IS_PTE;
1279 
1280 	pte_flags |= amdgpu_gem_va_map_flags(node->adev, mapping_flags);
1281 	return pte_flags;
1282 }
1283 
1284 static int
1285 svm_range_unmap_from_gpu(struct amdgpu_device *adev, struct amdgpu_vm *vm,
1286 			 uint64_t start, uint64_t last,
1287 			 struct dma_fence **fence)
1288 {
1289 	uint64_t init_pte_value = 0;
1290 
1291 	pr_debug("[0x%llx 0x%llx]\n", start, last);
1292 
1293 	return amdgpu_vm_update_range(adev, vm, false, true, true, false, NULL, start,
1294 				      last, init_pte_value, 0, 0, NULL, NULL,
1295 				      fence);
1296 }
1297 
1298 static int
1299 svm_range_unmap_from_gpus(struct svm_range *prange, unsigned long start,
1300 			  unsigned long last, uint32_t trigger)
1301 {
1302 	DECLARE_BITMAP(bitmap, MAX_GPU_INSTANCE);
1303 	struct kfd_process_device *pdd;
1304 	struct dma_fence *fence = NULL;
1305 	struct kfd_process *p;
1306 	uint32_t gpuidx;
1307 	int r = 0;
1308 
1309 	if (!prange->mapped_to_gpu) {
1310 		pr_debug("prange 0x%p [0x%lx 0x%lx] not mapped to GPU\n",
1311 			 prange, prange->start, prange->last);
1312 		return 0;
1313 	}
1314 
1315 	if (prange->start == start && prange->last == last) {
1316 		pr_debug("unmap svms 0x%p prange 0x%p\n", prange->svms, prange);
1317 		prange->mapped_to_gpu = false;
1318 	}
1319 
1320 	bitmap_or(bitmap, prange->bitmap_access, prange->bitmap_aip,
1321 		  MAX_GPU_INSTANCE);
1322 	p = container_of(prange->svms, struct kfd_process, svms);
1323 
1324 	for_each_set_bit(gpuidx, bitmap, MAX_GPU_INSTANCE) {
1325 		pr_debug("unmap from gpu idx 0x%x\n", gpuidx);
1326 		pdd = kfd_process_device_from_gpuidx(p, gpuidx);
1327 		if (!pdd) {
1328 			pr_debug("failed to find device idx %d\n", gpuidx);
1329 			return -EINVAL;
1330 		}
1331 
1332 		kfd_smi_event_unmap_from_gpu(pdd->dev, p->lead_thread->pid,
1333 					     start, last, trigger);
1334 
1335 		r = svm_range_unmap_from_gpu(pdd->dev->adev,
1336 					     drm_priv_to_vm(pdd->drm_priv),
1337 					     start, last, &fence);
1338 		if (r)
1339 			break;
1340 
1341 		if (fence) {
1342 			r = dma_fence_wait(fence, false);
1343 			dma_fence_put(fence);
1344 			fence = NULL;
1345 			if (r)
1346 				break;
1347 		}
1348 		kfd_flush_tlb(pdd, TLB_FLUSH_HEAVYWEIGHT);
1349 	}
1350 
1351 	return r;
1352 }
1353 
1354 static int
1355 svm_range_map_to_gpu(struct kfd_process_device *pdd, struct svm_range *prange,
1356 		     unsigned long offset, unsigned long npages, bool readonly,
1357 		     dma_addr_t *dma_addr, struct amdgpu_device *bo_adev,
1358 		     struct dma_fence **fence, bool flush_tlb)
1359 {
1360 	struct amdgpu_device *adev = pdd->dev->adev;
1361 	struct amdgpu_vm *vm = drm_priv_to_vm(pdd->drm_priv);
1362 	uint64_t pte_flags;
1363 	unsigned long last_start;
1364 	int last_domain;
1365 	int r = 0;
1366 	int64_t i, j;
1367 
1368 	last_start = prange->start + offset;
1369 
1370 	pr_debug("svms 0x%p [0x%lx 0x%lx] readonly %d\n", prange->svms,
1371 		 last_start, last_start + npages - 1, readonly);
1372 
1373 	for (i = offset; i < offset + npages; i++) {
1374 		last_domain = dma_addr[i] & SVM_RANGE_VRAM_DOMAIN;
1375 		dma_addr[i] &= ~SVM_RANGE_VRAM_DOMAIN;
1376 
1377 		/* Collect all pages in the same address range and memory domain
1378 		 * that can be mapped with a single call to update mapping.
1379 		 */
1380 		if (i < offset + npages - 1 &&
1381 		    last_domain == (dma_addr[i + 1] & SVM_RANGE_VRAM_DOMAIN))
1382 			continue;
1383 
1384 		pr_debug("Mapping range [0x%lx 0x%llx] on domain: %s\n",
1385 			 last_start, prange->start + i, last_domain ? "GPU" : "CPU");
1386 
1387 		pte_flags = svm_range_get_pte_flags(pdd->dev, prange, last_domain);
1388 		if (readonly)
1389 			pte_flags &= ~AMDGPU_PTE_WRITEABLE;
1390 
1391 		pr_debug("svms 0x%p map [0x%lx 0x%llx] vram %d PTE 0x%llx\n",
1392 			 prange->svms, last_start, prange->start + i,
1393 			 (last_domain == SVM_RANGE_VRAM_DOMAIN) ? 1 : 0,
1394 			 pte_flags);
1395 
1396 		/* For dGPU mode, we use same vm_manager to allocate VRAM for
1397 		 * different memory partition based on fpfn/lpfn, we should use
1398 		 * same vm_manager.vram_base_offset regardless memory partition.
1399 		 */
1400 		r = amdgpu_vm_update_range(adev, vm, false, false, flush_tlb, true,
1401 					   NULL, last_start, prange->start + i,
1402 					   pte_flags,
1403 					   (last_start - prange->start) << PAGE_SHIFT,
1404 					   bo_adev ? bo_adev->vm_manager.vram_base_offset : 0,
1405 					   NULL, dma_addr, &vm->last_update);
1406 
1407 		for (j = last_start - prange->start; j <= i; j++)
1408 			dma_addr[j] |= last_domain;
1409 
1410 		if (r) {
1411 			pr_debug("failed %d to map to gpu 0x%lx\n", r, prange->start);
1412 			goto out;
1413 		}
1414 		last_start = prange->start + i + 1;
1415 	}
1416 
1417 	r = amdgpu_vm_update_pdes(adev, vm, false);
1418 	if (r) {
1419 		pr_debug("failed %d to update directories 0x%lx\n", r,
1420 			 prange->start);
1421 		goto out;
1422 	}
1423 
1424 	if (fence)
1425 		*fence = dma_fence_get(vm->last_update);
1426 
1427 out:
1428 	return r;
1429 }
1430 
1431 static int
1432 svm_range_map_to_gpus(struct svm_range *prange, unsigned long offset,
1433 		      unsigned long npages, bool readonly,
1434 		      unsigned long *bitmap, bool wait, bool flush_tlb)
1435 {
1436 	struct kfd_process_device *pdd;
1437 	struct amdgpu_device *bo_adev = NULL;
1438 	struct kfd_process *p;
1439 	struct dma_fence *fence = NULL;
1440 	uint32_t gpuidx;
1441 	int r = 0;
1442 
1443 	if (prange->svm_bo && prange->ttm_res)
1444 		bo_adev = prange->svm_bo->node->adev;
1445 
1446 	p = container_of(prange->svms, struct kfd_process, svms);
1447 	for_each_set_bit(gpuidx, bitmap, MAX_GPU_INSTANCE) {
1448 		pr_debug("mapping to gpu idx 0x%x\n", gpuidx);
1449 		pdd = kfd_process_device_from_gpuidx(p, gpuidx);
1450 		if (!pdd) {
1451 			pr_debug("failed to find device idx %d\n", gpuidx);
1452 			return -EINVAL;
1453 		}
1454 
1455 		pdd = kfd_bind_process_to_device(pdd->dev, p);
1456 		if (IS_ERR(pdd))
1457 			return -EINVAL;
1458 
1459 		if (bo_adev && pdd->dev->adev != bo_adev &&
1460 		    !amdgpu_xgmi_same_hive(pdd->dev->adev, bo_adev)) {
1461 			pr_debug("cannot map to device idx %d\n", gpuidx);
1462 			continue;
1463 		}
1464 
1465 		r = svm_range_map_to_gpu(pdd, prange, offset, npages, readonly,
1466 					 prange->dma_addr[gpuidx],
1467 					 bo_adev, wait ? &fence : NULL,
1468 					 flush_tlb);
1469 		if (r)
1470 			break;
1471 
1472 		if (fence) {
1473 			r = dma_fence_wait(fence, false);
1474 			dma_fence_put(fence);
1475 			fence = NULL;
1476 			if (r) {
1477 				pr_debug("failed %d to dma fence wait\n", r);
1478 				break;
1479 			}
1480 		}
1481 
1482 		kfd_flush_tlb(pdd, TLB_FLUSH_LEGACY);
1483 	}
1484 
1485 	return r;
1486 }
1487 
1488 struct svm_validate_context {
1489 	struct kfd_process *process;
1490 	struct svm_range *prange;
1491 	bool intr;
1492 	DECLARE_BITMAP(bitmap, MAX_GPU_INSTANCE);
1493 	struct drm_exec exec;
1494 };
1495 
1496 static int svm_range_reserve_bos(struct svm_validate_context *ctx, bool intr)
1497 {
1498 	struct kfd_process_device *pdd;
1499 	struct amdgpu_vm *vm;
1500 	uint32_t gpuidx;
1501 	int r;
1502 
1503 	drm_exec_init(&ctx->exec, intr ? DRM_EXEC_INTERRUPTIBLE_WAIT: 0, 0);
1504 	drm_exec_until_all_locked(&ctx->exec) {
1505 		for_each_set_bit(gpuidx, ctx->bitmap, MAX_GPU_INSTANCE) {
1506 			pdd = kfd_process_device_from_gpuidx(ctx->process, gpuidx);
1507 			if (!pdd) {
1508 				pr_debug("failed to find device idx %d\n", gpuidx);
1509 				r = -EINVAL;
1510 				goto unreserve_out;
1511 			}
1512 			vm = drm_priv_to_vm(pdd->drm_priv);
1513 
1514 			r = amdgpu_vm_lock_pd(vm, &ctx->exec, 2);
1515 			drm_exec_retry_on_contention(&ctx->exec);
1516 			if (unlikely(r)) {
1517 				pr_debug("failed %d to reserve bo\n", r);
1518 				goto unreserve_out;
1519 			}
1520 		}
1521 	}
1522 
1523 	for_each_set_bit(gpuidx, ctx->bitmap, MAX_GPU_INSTANCE) {
1524 		pdd = kfd_process_device_from_gpuidx(ctx->process, gpuidx);
1525 		if (!pdd) {
1526 			pr_debug("failed to find device idx %d\n", gpuidx);
1527 			r = -EINVAL;
1528 			goto unreserve_out;
1529 		}
1530 
1531 		r = amdgpu_vm_validate(pdd->dev->adev,
1532 				       drm_priv_to_vm(pdd->drm_priv), NULL,
1533 				       svm_range_bo_validate, NULL);
1534 		if (r) {
1535 			pr_debug("failed %d validate pt bos\n", r);
1536 			goto unreserve_out;
1537 		}
1538 	}
1539 
1540 	return 0;
1541 
1542 unreserve_out:
1543 	drm_exec_fini(&ctx->exec);
1544 	return r;
1545 }
1546 
1547 static void svm_range_unreserve_bos(struct svm_validate_context *ctx)
1548 {
1549 	drm_exec_fini(&ctx->exec);
1550 }
1551 
1552 static void *kfd_svm_page_owner(struct kfd_process *p, int32_t gpuidx)
1553 {
1554 	struct kfd_process_device *pdd;
1555 
1556 	pdd = kfd_process_device_from_gpuidx(p, gpuidx);
1557 	if (!pdd)
1558 		return NULL;
1559 
1560 	return SVM_ADEV_PGMAP_OWNER(pdd->dev->adev);
1561 }
1562 
1563 /*
1564  * Validation+GPU mapping with concurrent invalidation (MMU notifiers)
1565  *
1566  * To prevent concurrent destruction or change of range attributes, the
1567  * svm_read_lock must be held. The caller must not hold the svm_write_lock
1568  * because that would block concurrent evictions and lead to deadlocks. To
1569  * serialize concurrent migrations or validations of the same range, the
1570  * prange->migrate_mutex must be held.
1571  *
1572  * For VRAM ranges, the SVM BO must be allocated and valid (protected by its
1573  * eviction fence.
1574  *
1575  * The following sequence ensures race-free validation and GPU mapping:
1576  *
1577  * 1. Reserve page table (and SVM BO if range is in VRAM)
1578  * 2. hmm_range_fault to get page addresses (if system memory)
1579  * 3. DMA-map pages (if system memory)
1580  * 4-a. Take notifier lock
1581  * 4-b. Check that pages still valid (mmu_interval_read_retry)
1582  * 4-c. Check that the range was not split or otherwise invalidated
1583  * 4-d. Update GPU page table
1584  * 4.e. Release notifier lock
1585  * 5. Release page table (and SVM BO) reservation
1586  */
1587 static int svm_range_validate_and_map(struct mm_struct *mm,
1588 				      unsigned long map_start, unsigned long map_last,
1589 				      struct svm_range *prange, int32_t gpuidx,
1590 				      bool intr, bool wait, bool flush_tlb)
1591 {
1592 	struct svm_validate_context *ctx;
1593 	unsigned long start, end, addr;
1594 	struct kfd_process *p;
1595 	void *owner;
1596 	int32_t idx;
1597 	int r = 0;
1598 
1599 	ctx = kzalloc(sizeof(struct svm_validate_context), GFP_KERNEL);
1600 	if (!ctx)
1601 		return -ENOMEM;
1602 	ctx->process = container_of(prange->svms, struct kfd_process, svms);
1603 	ctx->prange = prange;
1604 	ctx->intr = intr;
1605 
1606 	if (gpuidx < MAX_GPU_INSTANCE) {
1607 		bitmap_zero(ctx->bitmap, MAX_GPU_INSTANCE);
1608 		bitmap_set(ctx->bitmap, gpuidx, 1);
1609 	} else if (ctx->process->xnack_enabled) {
1610 		bitmap_copy(ctx->bitmap, prange->bitmap_aip, MAX_GPU_INSTANCE);
1611 
1612 		/* If prefetch range to GPU, or GPU retry fault migrate range to
1613 		 * GPU, which has ACCESS attribute to the range, create mapping
1614 		 * on that GPU.
1615 		 */
1616 		if (prange->actual_loc) {
1617 			gpuidx = kfd_process_gpuidx_from_gpuid(ctx->process,
1618 							prange->actual_loc);
1619 			if (gpuidx < 0) {
1620 				WARN_ONCE(1, "failed get device by id 0x%x\n",
1621 					 prange->actual_loc);
1622 				r = -EINVAL;
1623 				goto free_ctx;
1624 			}
1625 			if (test_bit(gpuidx, prange->bitmap_access))
1626 				bitmap_set(ctx->bitmap, gpuidx, 1);
1627 		}
1628 
1629 		/*
1630 		 * If prange is already mapped or with always mapped flag,
1631 		 * update mapping on GPUs with ACCESS attribute
1632 		 */
1633 		if (bitmap_empty(ctx->bitmap, MAX_GPU_INSTANCE)) {
1634 			if (prange->mapped_to_gpu ||
1635 			    prange->flags & KFD_IOCTL_SVM_FLAG_GPU_ALWAYS_MAPPED)
1636 				bitmap_copy(ctx->bitmap, prange->bitmap_access, MAX_GPU_INSTANCE);
1637 		}
1638 	} else {
1639 		bitmap_or(ctx->bitmap, prange->bitmap_access,
1640 			  prange->bitmap_aip, MAX_GPU_INSTANCE);
1641 	}
1642 
1643 	if (bitmap_empty(ctx->bitmap, MAX_GPU_INSTANCE)) {
1644 		r = 0;
1645 		goto free_ctx;
1646 	}
1647 
1648 	if (prange->actual_loc && !prange->ttm_res) {
1649 		/* This should never happen. actual_loc gets set by
1650 		 * svm_migrate_ram_to_vram after allocating a BO.
1651 		 */
1652 		WARN_ONCE(1, "VRAM BO missing during validation\n");
1653 		r = -EINVAL;
1654 		goto free_ctx;
1655 	}
1656 
1657 	r = svm_range_reserve_bos(ctx, intr);
1658 	if (r)
1659 		goto free_ctx;
1660 
1661 	p = container_of(prange->svms, struct kfd_process, svms);
1662 	owner = kfd_svm_page_owner(p, find_first_bit(ctx->bitmap,
1663 						MAX_GPU_INSTANCE));
1664 	for_each_set_bit(idx, ctx->bitmap, MAX_GPU_INSTANCE) {
1665 		if (kfd_svm_page_owner(p, idx) != owner) {
1666 			owner = NULL;
1667 			break;
1668 		}
1669 	}
1670 
1671 	start = map_start << PAGE_SHIFT;
1672 	end = (map_last + 1) << PAGE_SHIFT;
1673 	for (addr = start; !r && addr < end; ) {
1674 		struct hmm_range *hmm_range = NULL;
1675 		unsigned long map_start_vma;
1676 		unsigned long map_last_vma;
1677 		struct vm_area_struct *vma;
1678 		unsigned long next = 0;
1679 		unsigned long offset;
1680 		unsigned long npages;
1681 		bool readonly;
1682 
1683 		vma = vma_lookup(mm, addr);
1684 		if (vma) {
1685 			readonly = !(vma->vm_flags & VM_WRITE);
1686 
1687 			next = min(vma->vm_end, end);
1688 			npages = (next - addr) >> PAGE_SHIFT;
1689 			WRITE_ONCE(p->svms.faulting_task, current);
1690 			r = amdgpu_hmm_range_get_pages(&prange->notifier, addr, npages,
1691 						       readonly, owner, NULL,
1692 						       &hmm_range);
1693 			WRITE_ONCE(p->svms.faulting_task, NULL);
1694 			if (r)
1695 				pr_debug("failed %d to get svm range pages\n", r);
1696 		} else {
1697 			r = -EFAULT;
1698 		}
1699 
1700 		if (!r) {
1701 			offset = (addr >> PAGE_SHIFT) - prange->start;
1702 			r = svm_range_dma_map(prange, ctx->bitmap, offset, npages,
1703 					      hmm_range->hmm_pfns);
1704 			if (r)
1705 				pr_debug("failed %d to dma map range\n", r);
1706 		}
1707 
1708 		svm_range_lock(prange);
1709 
1710 		/* Free backing memory of hmm_range if it was initialized
1711 		 * Overrride return value to TRY AGAIN only if prior returns
1712 		 * were successful
1713 		 */
1714 		if (hmm_range && amdgpu_hmm_range_get_pages_done(hmm_range) && !r) {
1715 			pr_debug("hmm update the range, need validate again\n");
1716 			r = -EAGAIN;
1717 		}
1718 
1719 		if (!r && !list_empty(&prange->child_list)) {
1720 			pr_debug("range split by unmap in parallel, validate again\n");
1721 			r = -EAGAIN;
1722 		}
1723 
1724 		if (!r) {
1725 			map_start_vma = max(map_start, prange->start + offset);
1726 			map_last_vma = min(map_last, prange->start + offset + npages - 1);
1727 			if (map_start_vma <= map_last_vma) {
1728 				offset = map_start_vma - prange->start;
1729 				npages = map_last_vma - map_start_vma + 1;
1730 				r = svm_range_map_to_gpus(prange, offset, npages, readonly,
1731 							  ctx->bitmap, wait, flush_tlb);
1732 			}
1733 		}
1734 
1735 		if (!r && next == end)
1736 			prange->mapped_to_gpu = true;
1737 
1738 		svm_range_unlock(prange);
1739 
1740 		addr = next;
1741 	}
1742 
1743 	svm_range_unreserve_bos(ctx);
1744 	if (!r)
1745 		prange->validate_timestamp = ktime_get_boottime();
1746 
1747 free_ctx:
1748 	kfree(ctx);
1749 
1750 	return r;
1751 }
1752 
1753 /**
1754  * svm_range_list_lock_and_flush_work - flush pending deferred work
1755  *
1756  * @svms: the svm range list
1757  * @mm: the mm structure
1758  *
1759  * Context: Returns with mmap write lock held, pending deferred work flushed
1760  *
1761  */
1762 void
1763 svm_range_list_lock_and_flush_work(struct svm_range_list *svms,
1764 				   struct mm_struct *mm)
1765 {
1766 retry_flush_work:
1767 	flush_work(&svms->deferred_list_work);
1768 	mmap_write_lock(mm);
1769 
1770 	if (list_empty(&svms->deferred_range_list))
1771 		return;
1772 	mmap_write_unlock(mm);
1773 	pr_debug("retry flush\n");
1774 	goto retry_flush_work;
1775 }
1776 
1777 static void svm_range_restore_work(struct work_struct *work)
1778 {
1779 	struct delayed_work *dwork = to_delayed_work(work);
1780 	struct amdkfd_process_info *process_info;
1781 	struct svm_range_list *svms;
1782 	struct svm_range *prange;
1783 	struct kfd_process *p;
1784 	struct mm_struct *mm;
1785 	int evicted_ranges;
1786 	int invalid;
1787 	int r;
1788 
1789 	svms = container_of(dwork, struct svm_range_list, restore_work);
1790 	evicted_ranges = atomic_read(&svms->evicted_ranges);
1791 	if (!evicted_ranges)
1792 		return;
1793 
1794 	pr_debug("restore svm ranges\n");
1795 
1796 	p = container_of(svms, struct kfd_process, svms);
1797 	process_info = p->kgd_process_info;
1798 
1799 	/* Keep mm reference when svm_range_validate_and_map ranges */
1800 	mm = get_task_mm(p->lead_thread);
1801 	if (!mm) {
1802 		pr_debug("svms 0x%p process mm gone\n", svms);
1803 		return;
1804 	}
1805 
1806 	mutex_lock(&process_info->lock);
1807 	svm_range_list_lock_and_flush_work(svms, mm);
1808 	mutex_lock(&svms->lock);
1809 
1810 	evicted_ranges = atomic_read(&svms->evicted_ranges);
1811 
1812 	list_for_each_entry(prange, &svms->list, list) {
1813 		invalid = atomic_read(&prange->invalid);
1814 		if (!invalid)
1815 			continue;
1816 
1817 		pr_debug("restoring svms 0x%p prange 0x%p [0x%lx %lx] inv %d\n",
1818 			 prange->svms, prange, prange->start, prange->last,
1819 			 invalid);
1820 
1821 		/*
1822 		 * If range is migrating, wait for migration is done.
1823 		 */
1824 		mutex_lock(&prange->migrate_mutex);
1825 
1826 		r = svm_range_validate_and_map(mm, prange->start, prange->last, prange,
1827 					       MAX_GPU_INSTANCE, false, true, false);
1828 		if (r)
1829 			pr_debug("failed %d to map 0x%lx to gpus\n", r,
1830 				 prange->start);
1831 
1832 		mutex_unlock(&prange->migrate_mutex);
1833 		if (r)
1834 			goto out_reschedule;
1835 
1836 		if (atomic_cmpxchg(&prange->invalid, invalid, 0) != invalid)
1837 			goto out_reschedule;
1838 	}
1839 
1840 	if (atomic_cmpxchg(&svms->evicted_ranges, evicted_ranges, 0) !=
1841 	    evicted_ranges)
1842 		goto out_reschedule;
1843 
1844 	evicted_ranges = 0;
1845 
1846 	r = kgd2kfd_resume_mm(mm);
1847 	if (r) {
1848 		/* No recovery from this failure. Probably the CP is
1849 		 * hanging. No point trying again.
1850 		 */
1851 		pr_debug("failed %d to resume KFD\n", r);
1852 	}
1853 
1854 	pr_debug("restore svm ranges successfully\n");
1855 
1856 out_reschedule:
1857 	mutex_unlock(&svms->lock);
1858 	mmap_write_unlock(mm);
1859 	mutex_unlock(&process_info->lock);
1860 
1861 	/* If validation failed, reschedule another attempt */
1862 	if (evicted_ranges) {
1863 		pr_debug("reschedule to restore svm range\n");
1864 		queue_delayed_work(system_freezable_wq, &svms->restore_work,
1865 			msecs_to_jiffies(AMDGPU_SVM_RANGE_RESTORE_DELAY_MS));
1866 
1867 		kfd_smi_event_queue_restore_rescheduled(mm);
1868 	}
1869 	mmput(mm);
1870 }
1871 
1872 /**
1873  * svm_range_evict - evict svm range
1874  * @prange: svm range structure
1875  * @mm: current process mm_struct
1876  * @start: starting process queue number
1877  * @last: last process queue number
1878  * @event: mmu notifier event when range is evicted or migrated
1879  *
1880  * Stop all queues of the process to ensure GPU doesn't access the memory, then
1881  * return to let CPU evict the buffer and proceed CPU pagetable update.
1882  *
1883  * Don't need use lock to sync cpu pagetable invalidation with GPU execution.
1884  * If invalidation happens while restore work is running, restore work will
1885  * restart to ensure to get the latest CPU pages mapping to GPU, then start
1886  * the queues.
1887  */
1888 static int
1889 svm_range_evict(struct svm_range *prange, struct mm_struct *mm,
1890 		unsigned long start, unsigned long last,
1891 		enum mmu_notifier_event event)
1892 {
1893 	struct svm_range_list *svms = prange->svms;
1894 	struct svm_range *pchild;
1895 	struct kfd_process *p;
1896 	int r = 0;
1897 
1898 	p = container_of(svms, struct kfd_process, svms);
1899 
1900 	pr_debug("invalidate svms 0x%p prange [0x%lx 0x%lx] [0x%lx 0x%lx]\n",
1901 		 svms, prange->start, prange->last, start, last);
1902 
1903 	if (!p->xnack_enabled ||
1904 	    (prange->flags & KFD_IOCTL_SVM_FLAG_GPU_ALWAYS_MAPPED)) {
1905 		int evicted_ranges;
1906 		bool mapped = prange->mapped_to_gpu;
1907 
1908 		list_for_each_entry(pchild, &prange->child_list, child_list) {
1909 			if (!pchild->mapped_to_gpu)
1910 				continue;
1911 			mapped = true;
1912 			mutex_lock_nested(&pchild->lock, 1);
1913 			if (pchild->start <= last && pchild->last >= start) {
1914 				pr_debug("increment pchild invalid [0x%lx 0x%lx]\n",
1915 					 pchild->start, pchild->last);
1916 				atomic_inc(&pchild->invalid);
1917 			}
1918 			mutex_unlock(&pchild->lock);
1919 		}
1920 
1921 		if (!mapped)
1922 			return r;
1923 
1924 		if (prange->start <= last && prange->last >= start)
1925 			atomic_inc(&prange->invalid);
1926 
1927 		evicted_ranges = atomic_inc_return(&svms->evicted_ranges);
1928 		if (evicted_ranges != 1)
1929 			return r;
1930 
1931 		pr_debug("evicting svms 0x%p range [0x%lx 0x%lx]\n",
1932 			 prange->svms, prange->start, prange->last);
1933 
1934 		/* First eviction, stop the queues */
1935 		r = kgd2kfd_quiesce_mm(mm, KFD_QUEUE_EVICTION_TRIGGER_SVM);
1936 		if (r)
1937 			pr_debug("failed to quiesce KFD\n");
1938 
1939 		pr_debug("schedule to restore svm %p ranges\n", svms);
1940 		queue_delayed_work(system_freezable_wq, &svms->restore_work,
1941 			msecs_to_jiffies(AMDGPU_SVM_RANGE_RESTORE_DELAY_MS));
1942 	} else {
1943 		unsigned long s, l;
1944 		uint32_t trigger;
1945 
1946 		if (event == MMU_NOTIFY_MIGRATE)
1947 			trigger = KFD_SVM_UNMAP_TRIGGER_MMU_NOTIFY_MIGRATE;
1948 		else
1949 			trigger = KFD_SVM_UNMAP_TRIGGER_MMU_NOTIFY;
1950 
1951 		pr_debug("invalidate unmap svms 0x%p [0x%lx 0x%lx] from GPUs\n",
1952 			 prange->svms, start, last);
1953 		list_for_each_entry(pchild, &prange->child_list, child_list) {
1954 			mutex_lock_nested(&pchild->lock, 1);
1955 			s = max(start, pchild->start);
1956 			l = min(last, pchild->last);
1957 			if (l >= s)
1958 				svm_range_unmap_from_gpus(pchild, s, l, trigger);
1959 			mutex_unlock(&pchild->lock);
1960 		}
1961 		s = max(start, prange->start);
1962 		l = min(last, prange->last);
1963 		if (l >= s)
1964 			svm_range_unmap_from_gpus(prange, s, l, trigger);
1965 	}
1966 
1967 	return r;
1968 }
1969 
1970 static struct svm_range *svm_range_clone(struct svm_range *old)
1971 {
1972 	struct svm_range *new;
1973 
1974 	new = svm_range_new(old->svms, old->start, old->last, false);
1975 	if (!new)
1976 		return NULL;
1977 	if (svm_range_copy_dma_addrs(new, old)) {
1978 		svm_range_free(new, false);
1979 		return NULL;
1980 	}
1981 	if (old->svm_bo) {
1982 		new->ttm_res = old->ttm_res;
1983 		new->offset = old->offset;
1984 		new->svm_bo = svm_range_bo_ref(old->svm_bo);
1985 		spin_lock(&new->svm_bo->list_lock);
1986 		list_add(&new->svm_bo_list, &new->svm_bo->range_list);
1987 		spin_unlock(&new->svm_bo->list_lock);
1988 	}
1989 	new->flags = old->flags;
1990 	new->preferred_loc = old->preferred_loc;
1991 	new->prefetch_loc = old->prefetch_loc;
1992 	new->actual_loc = old->actual_loc;
1993 	new->granularity = old->granularity;
1994 	new->mapped_to_gpu = old->mapped_to_gpu;
1995 	new->vram_pages = old->vram_pages;
1996 	bitmap_copy(new->bitmap_access, old->bitmap_access, MAX_GPU_INSTANCE);
1997 	bitmap_copy(new->bitmap_aip, old->bitmap_aip, MAX_GPU_INSTANCE);
1998 
1999 	return new;
2000 }
2001 
2002 void svm_range_set_max_pages(struct amdgpu_device *adev)
2003 {
2004 	uint64_t max_pages;
2005 	uint64_t pages, _pages;
2006 	uint64_t min_pages = 0;
2007 	int i, id;
2008 
2009 	for (i = 0; i < adev->kfd.dev->num_nodes; i++) {
2010 		if (adev->kfd.dev->nodes[i]->xcp)
2011 			id = adev->kfd.dev->nodes[i]->xcp->id;
2012 		else
2013 			id = -1;
2014 		pages = KFD_XCP_MEMORY_SIZE(adev, id) >> 17;
2015 		pages = clamp(pages, 1ULL << 9, 1ULL << 18);
2016 		pages = rounddown_pow_of_two(pages);
2017 		min_pages = min_not_zero(min_pages, pages);
2018 	}
2019 
2020 	do {
2021 		max_pages = READ_ONCE(max_svm_range_pages);
2022 		_pages = min_not_zero(max_pages, min_pages);
2023 	} while (cmpxchg(&max_svm_range_pages, max_pages, _pages) != max_pages);
2024 }
2025 
2026 static int
2027 svm_range_split_new(struct svm_range_list *svms, uint64_t start, uint64_t last,
2028 		    uint64_t max_pages, struct list_head *insert_list,
2029 		    struct list_head *update_list)
2030 {
2031 	struct svm_range *prange;
2032 	uint64_t l;
2033 
2034 	pr_debug("max_svm_range_pages 0x%llx adding [0x%llx 0x%llx]\n",
2035 		 max_pages, start, last);
2036 
2037 	while (last >= start) {
2038 		l = min(last, ALIGN_DOWN(start + max_pages, max_pages) - 1);
2039 
2040 		prange = svm_range_new(svms, start, l, true);
2041 		if (!prange)
2042 			return -ENOMEM;
2043 		list_add(&prange->list, insert_list);
2044 		list_add(&prange->update_list, update_list);
2045 
2046 		start = l + 1;
2047 	}
2048 	return 0;
2049 }
2050 
2051 /**
2052  * svm_range_add - add svm range and handle overlap
2053  * @p: the range add to this process svms
2054  * @start: page size aligned
2055  * @size: page size aligned
2056  * @nattr: number of attributes
2057  * @attrs: array of attributes
2058  * @update_list: output, the ranges need validate and update GPU mapping
2059  * @insert_list: output, the ranges need insert to svms
2060  * @remove_list: output, the ranges are replaced and need remove from svms
2061  * @remap_list: output, remap unaligned svm ranges
2062  *
2063  * Check if the virtual address range has overlap with any existing ranges,
2064  * split partly overlapping ranges and add new ranges in the gaps. All changes
2065  * should be applied to the range_list and interval tree transactionally. If
2066  * any range split or allocation fails, the entire update fails. Therefore any
2067  * existing overlapping svm_ranges are cloned and the original svm_ranges left
2068  * unchanged.
2069  *
2070  * If the transaction succeeds, the caller can update and insert clones and
2071  * new ranges, then free the originals.
2072  *
2073  * Otherwise the caller can free the clones and new ranges, while the old
2074  * svm_ranges remain unchanged.
2075  *
2076  * Context: Process context, caller must hold svms->lock
2077  *
2078  * Return:
2079  * 0 - OK, otherwise error code
2080  */
2081 static int
2082 svm_range_add(struct kfd_process *p, uint64_t start, uint64_t size,
2083 	      uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs,
2084 	      struct list_head *update_list, struct list_head *insert_list,
2085 	      struct list_head *remove_list, struct list_head *remap_list)
2086 {
2087 	unsigned long last = start + size - 1UL;
2088 	struct svm_range_list *svms = &p->svms;
2089 	struct interval_tree_node *node;
2090 	struct svm_range *prange;
2091 	struct svm_range *tmp;
2092 	struct list_head new_list;
2093 	int r = 0;
2094 
2095 	pr_debug("svms 0x%p [0x%llx 0x%lx]\n", &p->svms, start, last);
2096 
2097 	INIT_LIST_HEAD(update_list);
2098 	INIT_LIST_HEAD(insert_list);
2099 	INIT_LIST_HEAD(remove_list);
2100 	INIT_LIST_HEAD(&new_list);
2101 	INIT_LIST_HEAD(remap_list);
2102 
2103 	node = interval_tree_iter_first(&svms->objects, start, last);
2104 	while (node) {
2105 		struct interval_tree_node *next;
2106 		unsigned long next_start;
2107 
2108 		pr_debug("found overlap node [0x%lx 0x%lx]\n", node->start,
2109 			 node->last);
2110 
2111 		prange = container_of(node, struct svm_range, it_node);
2112 		next = interval_tree_iter_next(node, start, last);
2113 		next_start = min(node->last, last) + 1;
2114 
2115 		if (svm_range_is_same_attrs(p, prange, nattr, attrs) &&
2116 		    prange->mapped_to_gpu) {
2117 			/* nothing to do */
2118 		} else if (node->start < start || node->last > last) {
2119 			/* node intersects the update range and its attributes
2120 			 * will change. Clone and split it, apply updates only
2121 			 * to the overlapping part
2122 			 */
2123 			struct svm_range *old = prange;
2124 
2125 			prange = svm_range_clone(old);
2126 			if (!prange) {
2127 				r = -ENOMEM;
2128 				goto out;
2129 			}
2130 
2131 			list_add(&old->update_list, remove_list);
2132 			list_add(&prange->list, insert_list);
2133 			list_add(&prange->update_list, update_list);
2134 
2135 			if (node->start < start) {
2136 				pr_debug("change old range start\n");
2137 				r = svm_range_split_head(prange, start,
2138 							 insert_list, remap_list);
2139 				if (r)
2140 					goto out;
2141 			}
2142 			if (node->last > last) {
2143 				pr_debug("change old range last\n");
2144 				r = svm_range_split_tail(prange, last,
2145 							 insert_list, remap_list);
2146 				if (r)
2147 					goto out;
2148 			}
2149 		} else {
2150 			/* The node is contained within start..last,
2151 			 * just update it
2152 			 */
2153 			list_add(&prange->update_list, update_list);
2154 		}
2155 
2156 		/* insert a new node if needed */
2157 		if (node->start > start) {
2158 			r = svm_range_split_new(svms, start, node->start - 1,
2159 						READ_ONCE(max_svm_range_pages),
2160 						&new_list, update_list);
2161 			if (r)
2162 				goto out;
2163 		}
2164 
2165 		node = next;
2166 		start = next_start;
2167 	}
2168 
2169 	/* add a final range at the end if needed */
2170 	if (start <= last)
2171 		r = svm_range_split_new(svms, start, last,
2172 					READ_ONCE(max_svm_range_pages),
2173 					&new_list, update_list);
2174 
2175 out:
2176 	if (r) {
2177 		list_for_each_entry_safe(prange, tmp, insert_list, list)
2178 			svm_range_free(prange, false);
2179 		list_for_each_entry_safe(prange, tmp, &new_list, list)
2180 			svm_range_free(prange, true);
2181 	} else {
2182 		list_splice(&new_list, insert_list);
2183 	}
2184 
2185 	return r;
2186 }
2187 
2188 static void
2189 svm_range_update_notifier_and_interval_tree(struct mm_struct *mm,
2190 					    struct svm_range *prange)
2191 {
2192 	unsigned long start;
2193 	unsigned long last;
2194 
2195 	start = prange->notifier.interval_tree.start >> PAGE_SHIFT;
2196 	last = prange->notifier.interval_tree.last >> PAGE_SHIFT;
2197 
2198 	if (prange->start == start && prange->last == last)
2199 		return;
2200 
2201 	pr_debug("up notifier 0x%p prange 0x%p [0x%lx 0x%lx] [0x%lx 0x%lx]\n",
2202 		  prange->svms, prange, start, last, prange->start,
2203 		  prange->last);
2204 
2205 	if (start != 0 && last != 0) {
2206 		interval_tree_remove(&prange->it_node, &prange->svms->objects);
2207 		svm_range_remove_notifier(prange);
2208 	}
2209 	prange->it_node.start = prange->start;
2210 	prange->it_node.last = prange->last;
2211 
2212 	interval_tree_insert(&prange->it_node, &prange->svms->objects);
2213 	svm_range_add_notifier_locked(mm, prange);
2214 }
2215 
2216 static void
2217 svm_range_handle_list_op(struct svm_range_list *svms, struct svm_range *prange,
2218 			 struct mm_struct *mm)
2219 {
2220 	switch (prange->work_item.op) {
2221 	case SVM_OP_NULL:
2222 		pr_debug("NULL OP 0x%p prange 0x%p [0x%lx 0x%lx]\n",
2223 			 svms, prange, prange->start, prange->last);
2224 		break;
2225 	case SVM_OP_UNMAP_RANGE:
2226 		pr_debug("remove 0x%p prange 0x%p [0x%lx 0x%lx]\n",
2227 			 svms, prange, prange->start, prange->last);
2228 		svm_range_unlink(prange);
2229 		svm_range_remove_notifier(prange);
2230 		svm_range_free(prange, true);
2231 		break;
2232 	case SVM_OP_UPDATE_RANGE_NOTIFIER:
2233 		pr_debug("update notifier 0x%p prange 0x%p [0x%lx 0x%lx]\n",
2234 			 svms, prange, prange->start, prange->last);
2235 		svm_range_update_notifier_and_interval_tree(mm, prange);
2236 		break;
2237 	case SVM_OP_UPDATE_RANGE_NOTIFIER_AND_MAP:
2238 		pr_debug("update and map 0x%p prange 0x%p [0x%lx 0x%lx]\n",
2239 			 svms, prange, prange->start, prange->last);
2240 		svm_range_update_notifier_and_interval_tree(mm, prange);
2241 		/* TODO: implement deferred validation and mapping */
2242 		break;
2243 	case SVM_OP_ADD_RANGE:
2244 		pr_debug("add 0x%p prange 0x%p [0x%lx 0x%lx]\n", svms, prange,
2245 			 prange->start, prange->last);
2246 		svm_range_add_to_svms(prange);
2247 		svm_range_add_notifier_locked(mm, prange);
2248 		break;
2249 	case SVM_OP_ADD_RANGE_AND_MAP:
2250 		pr_debug("add and map 0x%p prange 0x%p [0x%lx 0x%lx]\n", svms,
2251 			 prange, prange->start, prange->last);
2252 		svm_range_add_to_svms(prange);
2253 		svm_range_add_notifier_locked(mm, prange);
2254 		/* TODO: implement deferred validation and mapping */
2255 		break;
2256 	default:
2257 		WARN_ONCE(1, "Unknown prange 0x%p work op %d\n", prange,
2258 			 prange->work_item.op);
2259 	}
2260 }
2261 
2262 static void svm_range_drain_retry_fault(struct svm_range_list *svms)
2263 {
2264 	struct kfd_process_device *pdd;
2265 	struct kfd_process *p;
2266 	int drain;
2267 	uint32_t i;
2268 
2269 	p = container_of(svms, struct kfd_process, svms);
2270 
2271 restart:
2272 	drain = atomic_read(&svms->drain_pagefaults);
2273 	if (!drain)
2274 		return;
2275 
2276 	for_each_set_bit(i, svms->bitmap_supported, p->n_pdds) {
2277 		pdd = p->pdds[i];
2278 		if (!pdd)
2279 			continue;
2280 
2281 		pr_debug("drain retry fault gpu %d svms %p\n", i, svms);
2282 
2283 		amdgpu_ih_wait_on_checkpoint_process_ts(pdd->dev->adev,
2284 				pdd->dev->adev->irq.retry_cam_enabled ?
2285 				&pdd->dev->adev->irq.ih :
2286 				&pdd->dev->adev->irq.ih1);
2287 
2288 		if (pdd->dev->adev->irq.retry_cam_enabled)
2289 			amdgpu_ih_wait_on_checkpoint_process_ts(pdd->dev->adev,
2290 				&pdd->dev->adev->irq.ih_soft);
2291 
2292 
2293 		pr_debug("drain retry fault gpu %d svms 0x%p done\n", i, svms);
2294 	}
2295 	if (atomic_cmpxchg(&svms->drain_pagefaults, drain, 0) != drain)
2296 		goto restart;
2297 }
2298 
2299 static void svm_range_deferred_list_work(struct work_struct *work)
2300 {
2301 	struct svm_range_list *svms;
2302 	struct svm_range *prange;
2303 	struct mm_struct *mm;
2304 
2305 	svms = container_of(work, struct svm_range_list, deferred_list_work);
2306 	pr_debug("enter svms 0x%p\n", svms);
2307 
2308 	spin_lock(&svms->deferred_list_lock);
2309 	while (!list_empty(&svms->deferred_range_list)) {
2310 		prange = list_first_entry(&svms->deferred_range_list,
2311 					  struct svm_range, deferred_list);
2312 		spin_unlock(&svms->deferred_list_lock);
2313 
2314 		pr_debug("prange 0x%p [0x%lx 0x%lx] op %d\n", prange,
2315 			 prange->start, prange->last, prange->work_item.op);
2316 
2317 		mm = prange->work_item.mm;
2318 retry:
2319 		mmap_write_lock(mm);
2320 
2321 		/* Checking for the need to drain retry faults must be inside
2322 		 * mmap write lock to serialize with munmap notifiers.
2323 		 */
2324 		if (unlikely(atomic_read(&svms->drain_pagefaults))) {
2325 			mmap_write_unlock(mm);
2326 			svm_range_drain_retry_fault(svms);
2327 			goto retry;
2328 		}
2329 
2330 		/* Remove from deferred_list must be inside mmap write lock, for
2331 		 * two race cases:
2332 		 * 1. unmap_from_cpu may change work_item.op and add the range
2333 		 *    to deferred_list again, cause use after free bug.
2334 		 * 2. svm_range_list_lock_and_flush_work may hold mmap write
2335 		 *    lock and continue because deferred_list is empty, but
2336 		 *    deferred_list work is actually waiting for mmap lock.
2337 		 */
2338 		spin_lock(&svms->deferred_list_lock);
2339 		list_del_init(&prange->deferred_list);
2340 		spin_unlock(&svms->deferred_list_lock);
2341 
2342 		mutex_lock(&svms->lock);
2343 		mutex_lock(&prange->migrate_mutex);
2344 		while (!list_empty(&prange->child_list)) {
2345 			struct svm_range *pchild;
2346 
2347 			pchild = list_first_entry(&prange->child_list,
2348 						struct svm_range, child_list);
2349 			pr_debug("child prange 0x%p op %d\n", pchild,
2350 				 pchild->work_item.op);
2351 			list_del_init(&pchild->child_list);
2352 			svm_range_handle_list_op(svms, pchild, mm);
2353 		}
2354 		mutex_unlock(&prange->migrate_mutex);
2355 
2356 		svm_range_handle_list_op(svms, prange, mm);
2357 		mutex_unlock(&svms->lock);
2358 		mmap_write_unlock(mm);
2359 
2360 		/* Pairs with mmget in svm_range_add_list_work. If dropping the
2361 		 * last mm refcount, schedule release work to avoid circular locking
2362 		 */
2363 		mmput_async(mm);
2364 
2365 		spin_lock(&svms->deferred_list_lock);
2366 	}
2367 	spin_unlock(&svms->deferred_list_lock);
2368 	pr_debug("exit svms 0x%p\n", svms);
2369 }
2370 
2371 void
2372 svm_range_add_list_work(struct svm_range_list *svms, struct svm_range *prange,
2373 			struct mm_struct *mm, enum svm_work_list_ops op)
2374 {
2375 	spin_lock(&svms->deferred_list_lock);
2376 	/* if prange is on the deferred list */
2377 	if (!list_empty(&prange->deferred_list)) {
2378 		pr_debug("update exist prange 0x%p work op %d\n", prange, op);
2379 		WARN_ONCE(prange->work_item.mm != mm, "unmatch mm\n");
2380 		if (op != SVM_OP_NULL &&
2381 		    prange->work_item.op != SVM_OP_UNMAP_RANGE)
2382 			prange->work_item.op = op;
2383 	} else {
2384 		prange->work_item.op = op;
2385 
2386 		/* Pairs with mmput in deferred_list_work */
2387 		mmget(mm);
2388 		prange->work_item.mm = mm;
2389 		list_add_tail(&prange->deferred_list,
2390 			      &prange->svms->deferred_range_list);
2391 		pr_debug("add prange 0x%p [0x%lx 0x%lx] to work list op %d\n",
2392 			 prange, prange->start, prange->last, op);
2393 	}
2394 	spin_unlock(&svms->deferred_list_lock);
2395 }
2396 
2397 void schedule_deferred_list_work(struct svm_range_list *svms)
2398 {
2399 	spin_lock(&svms->deferred_list_lock);
2400 	if (!list_empty(&svms->deferred_range_list))
2401 		schedule_work(&svms->deferred_list_work);
2402 	spin_unlock(&svms->deferred_list_lock);
2403 }
2404 
2405 static void
2406 svm_range_unmap_split(struct mm_struct *mm, struct svm_range *parent,
2407 		      struct svm_range *prange, unsigned long start,
2408 		      unsigned long last)
2409 {
2410 	struct svm_range *head;
2411 	struct svm_range *tail;
2412 
2413 	if (prange->work_item.op == SVM_OP_UNMAP_RANGE) {
2414 		pr_debug("prange 0x%p [0x%lx 0x%lx] is already freed\n", prange,
2415 			 prange->start, prange->last);
2416 		return;
2417 	}
2418 	if (start > prange->last || last < prange->start)
2419 		return;
2420 
2421 	head = tail = prange;
2422 	if (start > prange->start)
2423 		svm_range_split(prange, prange->start, start - 1, &tail);
2424 	if (last < tail->last)
2425 		svm_range_split(tail, last + 1, tail->last, &head);
2426 
2427 	if (head != prange && tail != prange) {
2428 		svm_range_add_child(parent, mm, head, SVM_OP_UNMAP_RANGE);
2429 		svm_range_add_child(parent, mm, tail, SVM_OP_ADD_RANGE);
2430 	} else if (tail != prange) {
2431 		svm_range_add_child(parent, mm, tail, SVM_OP_UNMAP_RANGE);
2432 	} else if (head != prange) {
2433 		svm_range_add_child(parent, mm, head, SVM_OP_UNMAP_RANGE);
2434 	} else if (parent != prange) {
2435 		prange->work_item.op = SVM_OP_UNMAP_RANGE;
2436 	}
2437 }
2438 
2439 static void
2440 svm_range_unmap_from_cpu(struct mm_struct *mm, struct svm_range *prange,
2441 			 unsigned long start, unsigned long last)
2442 {
2443 	uint32_t trigger = KFD_SVM_UNMAP_TRIGGER_UNMAP_FROM_CPU;
2444 	struct svm_range_list *svms;
2445 	struct svm_range *pchild;
2446 	struct kfd_process *p;
2447 	unsigned long s, l;
2448 	bool unmap_parent;
2449 
2450 	p = kfd_lookup_process_by_mm(mm);
2451 	if (!p)
2452 		return;
2453 	svms = &p->svms;
2454 
2455 	pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx] [0x%lx 0x%lx]\n", svms,
2456 		 prange, prange->start, prange->last, start, last);
2457 
2458 	/* Make sure pending page faults are drained in the deferred worker
2459 	 * before the range is freed to avoid straggler interrupts on
2460 	 * unmapped memory causing "phantom faults".
2461 	 */
2462 	atomic_inc(&svms->drain_pagefaults);
2463 
2464 	unmap_parent = start <= prange->start && last >= prange->last;
2465 
2466 	list_for_each_entry(pchild, &prange->child_list, child_list) {
2467 		mutex_lock_nested(&pchild->lock, 1);
2468 		s = max(start, pchild->start);
2469 		l = min(last, pchild->last);
2470 		if (l >= s)
2471 			svm_range_unmap_from_gpus(pchild, s, l, trigger);
2472 		svm_range_unmap_split(mm, prange, pchild, start, last);
2473 		mutex_unlock(&pchild->lock);
2474 	}
2475 	s = max(start, prange->start);
2476 	l = min(last, prange->last);
2477 	if (l >= s)
2478 		svm_range_unmap_from_gpus(prange, s, l, trigger);
2479 	svm_range_unmap_split(mm, prange, prange, start, last);
2480 
2481 	if (unmap_parent)
2482 		svm_range_add_list_work(svms, prange, mm, SVM_OP_UNMAP_RANGE);
2483 	else
2484 		svm_range_add_list_work(svms, prange, mm,
2485 					SVM_OP_UPDATE_RANGE_NOTIFIER);
2486 	schedule_deferred_list_work(svms);
2487 
2488 	kfd_unref_process(p);
2489 }
2490 
2491 /**
2492  * svm_range_cpu_invalidate_pagetables - interval notifier callback
2493  * @mni: mmu_interval_notifier struct
2494  * @range: mmu_notifier_range struct
2495  * @cur_seq: value to pass to mmu_interval_set_seq()
2496  *
2497  * If event is MMU_NOTIFY_UNMAP, this is from CPU unmap range, otherwise, it
2498  * is from migration, or CPU page invalidation callback.
2499  *
2500  * For unmap event, unmap range from GPUs, remove prange from svms in a delayed
2501  * work thread, and split prange if only part of prange is unmapped.
2502  *
2503  * For invalidation event, if GPU retry fault is not enabled, evict the queues,
2504  * then schedule svm_range_restore_work to update GPU mapping and resume queues.
2505  * If GPU retry fault is enabled, unmap the svm range from GPU, retry fault will
2506  * update GPU mapping to recover.
2507  *
2508  * Context: mmap lock, notifier_invalidate_start lock are held
2509  *          for invalidate event, prange lock is held if this is from migration
2510  */
2511 static bool
2512 svm_range_cpu_invalidate_pagetables(struct mmu_interval_notifier *mni,
2513 				    const struct mmu_notifier_range *range,
2514 				    unsigned long cur_seq)
2515 {
2516 	struct svm_range *prange;
2517 	unsigned long start;
2518 	unsigned long last;
2519 
2520 	if (range->event == MMU_NOTIFY_RELEASE)
2521 		return true;
2522 	if (!mmget_not_zero(mni->mm))
2523 		return true;
2524 
2525 	start = mni->interval_tree.start;
2526 	last = mni->interval_tree.last;
2527 	start = max(start, range->start) >> PAGE_SHIFT;
2528 	last = min(last, range->end - 1) >> PAGE_SHIFT;
2529 	pr_debug("[0x%lx 0x%lx] range[0x%lx 0x%lx] notifier[0x%lx 0x%lx] %d\n",
2530 		 start, last, range->start >> PAGE_SHIFT,
2531 		 (range->end - 1) >> PAGE_SHIFT,
2532 		 mni->interval_tree.start >> PAGE_SHIFT,
2533 		 mni->interval_tree.last >> PAGE_SHIFT, range->event);
2534 
2535 	prange = container_of(mni, struct svm_range, notifier);
2536 
2537 	svm_range_lock(prange);
2538 	mmu_interval_set_seq(mni, cur_seq);
2539 
2540 	switch (range->event) {
2541 	case MMU_NOTIFY_UNMAP:
2542 		svm_range_unmap_from_cpu(mni->mm, prange, start, last);
2543 		break;
2544 	default:
2545 		svm_range_evict(prange, mni->mm, start, last, range->event);
2546 		break;
2547 	}
2548 
2549 	svm_range_unlock(prange);
2550 	mmput(mni->mm);
2551 
2552 	return true;
2553 }
2554 
2555 /**
2556  * svm_range_from_addr - find svm range from fault address
2557  * @svms: svm range list header
2558  * @addr: address to search range interval tree, in pages
2559  * @parent: parent range if range is on child list
2560  *
2561  * Context: The caller must hold svms->lock
2562  *
2563  * Return: the svm_range found or NULL
2564  */
2565 struct svm_range *
2566 svm_range_from_addr(struct svm_range_list *svms, unsigned long addr,
2567 		    struct svm_range **parent)
2568 {
2569 	struct interval_tree_node *node;
2570 	struct svm_range *prange;
2571 	struct svm_range *pchild;
2572 
2573 	node = interval_tree_iter_first(&svms->objects, addr, addr);
2574 	if (!node)
2575 		return NULL;
2576 
2577 	prange = container_of(node, struct svm_range, it_node);
2578 	pr_debug("address 0x%lx prange [0x%lx 0x%lx] node [0x%lx 0x%lx]\n",
2579 		 addr, prange->start, prange->last, node->start, node->last);
2580 
2581 	if (addr >= prange->start && addr <= prange->last) {
2582 		if (parent)
2583 			*parent = prange;
2584 		return prange;
2585 	}
2586 	list_for_each_entry(pchild, &prange->child_list, child_list)
2587 		if (addr >= pchild->start && addr <= pchild->last) {
2588 			pr_debug("found address 0x%lx pchild [0x%lx 0x%lx]\n",
2589 				 addr, pchild->start, pchild->last);
2590 			if (parent)
2591 				*parent = prange;
2592 			return pchild;
2593 		}
2594 
2595 	return NULL;
2596 }
2597 
2598 /* svm_range_best_restore_location - decide the best fault restore location
2599  * @prange: svm range structure
2600  * @adev: the GPU on which vm fault happened
2601  *
2602  * This is only called when xnack is on, to decide the best location to restore
2603  * the range mapping after GPU vm fault. Caller uses the best location to do
2604  * migration if actual loc is not best location, then update GPU page table
2605  * mapping to the best location.
2606  *
2607  * If the preferred loc is accessible by faulting GPU, use preferred loc.
2608  * If vm fault gpu idx is on range ACCESSIBLE bitmap, best_loc is vm fault gpu
2609  * If vm fault gpu idx is on range ACCESSIBLE_IN_PLACE bitmap, then
2610  *    if range actual loc is cpu, best_loc is cpu
2611  *    if vm fault gpu is on xgmi same hive of range actual loc gpu, best_loc is
2612  *    range actual loc.
2613  * Otherwise, GPU no access, best_loc is -1.
2614  *
2615  * Return:
2616  * -1 means vm fault GPU no access
2617  * 0 for CPU or GPU id
2618  */
2619 static int32_t
2620 svm_range_best_restore_location(struct svm_range *prange,
2621 				struct kfd_node *node,
2622 				int32_t *gpuidx)
2623 {
2624 	struct kfd_node *bo_node, *preferred_node;
2625 	struct kfd_process *p;
2626 	uint32_t gpuid;
2627 	int r;
2628 
2629 	p = container_of(prange->svms, struct kfd_process, svms);
2630 
2631 	r = kfd_process_gpuid_from_node(p, node, &gpuid, gpuidx);
2632 	if (r < 0) {
2633 		pr_debug("failed to get gpuid from kgd\n");
2634 		return -1;
2635 	}
2636 
2637 	if (node->adev->flags & AMD_IS_APU)
2638 		return 0;
2639 
2640 	if (prange->preferred_loc == gpuid ||
2641 	    prange->preferred_loc == KFD_IOCTL_SVM_LOCATION_SYSMEM) {
2642 		return prange->preferred_loc;
2643 	} else if (prange->preferred_loc != KFD_IOCTL_SVM_LOCATION_UNDEFINED) {
2644 		preferred_node = svm_range_get_node_by_id(prange, prange->preferred_loc);
2645 		if (preferred_node && svm_nodes_in_same_hive(node, preferred_node))
2646 			return prange->preferred_loc;
2647 		/* fall through */
2648 	}
2649 
2650 	if (test_bit(*gpuidx, prange->bitmap_access))
2651 		return gpuid;
2652 
2653 	if (test_bit(*gpuidx, prange->bitmap_aip)) {
2654 		if (!prange->actual_loc)
2655 			return 0;
2656 
2657 		bo_node = svm_range_get_node_by_id(prange, prange->actual_loc);
2658 		if (bo_node && svm_nodes_in_same_hive(node, bo_node))
2659 			return prange->actual_loc;
2660 		else
2661 			return 0;
2662 	}
2663 
2664 	return -1;
2665 }
2666 
2667 static int
2668 svm_range_get_range_boundaries(struct kfd_process *p, int64_t addr,
2669 			       unsigned long *start, unsigned long *last,
2670 			       bool *is_heap_stack)
2671 {
2672 	struct vm_area_struct *vma;
2673 	struct interval_tree_node *node;
2674 	struct rb_node *rb_node;
2675 	unsigned long start_limit, end_limit;
2676 
2677 	vma = vma_lookup(p->mm, addr << PAGE_SHIFT);
2678 	if (!vma) {
2679 		pr_debug("VMA does not exist in address [0x%llx]\n", addr);
2680 		return -EFAULT;
2681 	}
2682 
2683 	*is_heap_stack = vma_is_initial_heap(vma) || vma_is_initial_stack(vma);
2684 
2685 	start_limit = max(vma->vm_start >> PAGE_SHIFT,
2686 		      (unsigned long)ALIGN_DOWN(addr, 2UL << 8));
2687 	end_limit = min(vma->vm_end >> PAGE_SHIFT,
2688 		    (unsigned long)ALIGN(addr + 1, 2UL << 8));
2689 	/* First range that starts after the fault address */
2690 	node = interval_tree_iter_first(&p->svms.objects, addr + 1, ULONG_MAX);
2691 	if (node) {
2692 		end_limit = min(end_limit, node->start);
2693 		/* Last range that ends before the fault address */
2694 		rb_node = rb_prev(&node->rb);
2695 	} else {
2696 		/* Last range must end before addr because
2697 		 * there was no range after addr
2698 		 */
2699 		rb_node = rb_last(&p->svms.objects.rb_root);
2700 	}
2701 	if (rb_node) {
2702 		node = container_of(rb_node, struct interval_tree_node, rb);
2703 		if (node->last >= addr) {
2704 			WARN(1, "Overlap with prev node and page fault addr\n");
2705 			return -EFAULT;
2706 		}
2707 		start_limit = max(start_limit, node->last + 1);
2708 	}
2709 
2710 	*start = start_limit;
2711 	*last = end_limit - 1;
2712 
2713 	pr_debug("vma [0x%lx 0x%lx] range [0x%lx 0x%lx] is_heap_stack %d\n",
2714 		 vma->vm_start >> PAGE_SHIFT, vma->vm_end >> PAGE_SHIFT,
2715 		 *start, *last, *is_heap_stack);
2716 
2717 	return 0;
2718 }
2719 
2720 static int
2721 svm_range_check_vm_userptr(struct kfd_process *p, uint64_t start, uint64_t last,
2722 			   uint64_t *bo_s, uint64_t *bo_l)
2723 {
2724 	struct amdgpu_bo_va_mapping *mapping;
2725 	struct interval_tree_node *node;
2726 	struct amdgpu_bo *bo = NULL;
2727 	unsigned long userptr;
2728 	uint32_t i;
2729 	int r;
2730 
2731 	for (i = 0; i < p->n_pdds; i++) {
2732 		struct amdgpu_vm *vm;
2733 
2734 		if (!p->pdds[i]->drm_priv)
2735 			continue;
2736 
2737 		vm = drm_priv_to_vm(p->pdds[i]->drm_priv);
2738 		r = amdgpu_bo_reserve(vm->root.bo, false);
2739 		if (r)
2740 			return r;
2741 
2742 		/* Check userptr by searching entire vm->va interval tree */
2743 		node = interval_tree_iter_first(&vm->va, 0, ~0ULL);
2744 		while (node) {
2745 			mapping = container_of((struct rb_node *)node,
2746 					       struct amdgpu_bo_va_mapping, rb);
2747 			bo = mapping->bo_va->base.bo;
2748 
2749 			if (!amdgpu_ttm_tt_affect_userptr(bo->tbo.ttm,
2750 							 start << PAGE_SHIFT,
2751 							 last << PAGE_SHIFT,
2752 							 &userptr)) {
2753 				node = interval_tree_iter_next(node, 0, ~0ULL);
2754 				continue;
2755 			}
2756 
2757 			pr_debug("[0x%llx 0x%llx] already userptr mapped\n",
2758 				 start, last);
2759 			if (bo_s && bo_l) {
2760 				*bo_s = userptr >> PAGE_SHIFT;
2761 				*bo_l = *bo_s + bo->tbo.ttm->num_pages - 1;
2762 			}
2763 			amdgpu_bo_unreserve(vm->root.bo);
2764 			return -EADDRINUSE;
2765 		}
2766 		amdgpu_bo_unreserve(vm->root.bo);
2767 	}
2768 	return 0;
2769 }
2770 
2771 static struct
2772 svm_range *svm_range_create_unregistered_range(struct kfd_node *node,
2773 						struct kfd_process *p,
2774 						struct mm_struct *mm,
2775 						int64_t addr)
2776 {
2777 	struct svm_range *prange = NULL;
2778 	unsigned long start, last;
2779 	uint32_t gpuid, gpuidx;
2780 	bool is_heap_stack;
2781 	uint64_t bo_s = 0;
2782 	uint64_t bo_l = 0;
2783 	int r;
2784 
2785 	if (svm_range_get_range_boundaries(p, addr, &start, &last,
2786 					   &is_heap_stack))
2787 		return NULL;
2788 
2789 	r = svm_range_check_vm(p, start, last, &bo_s, &bo_l);
2790 	if (r != -EADDRINUSE)
2791 		r = svm_range_check_vm_userptr(p, start, last, &bo_s, &bo_l);
2792 
2793 	if (r == -EADDRINUSE) {
2794 		if (addr >= bo_s && addr <= bo_l)
2795 			return NULL;
2796 
2797 		/* Create one page svm range if 2MB range overlapping */
2798 		start = addr;
2799 		last = addr;
2800 	}
2801 
2802 	prange = svm_range_new(&p->svms, start, last, true);
2803 	if (!prange) {
2804 		pr_debug("Failed to create prange in address [0x%llx]\n", addr);
2805 		return NULL;
2806 	}
2807 	if (kfd_process_gpuid_from_node(p, node, &gpuid, &gpuidx)) {
2808 		pr_debug("failed to get gpuid from kgd\n");
2809 		svm_range_free(prange, true);
2810 		return NULL;
2811 	}
2812 
2813 	if (is_heap_stack)
2814 		prange->preferred_loc = KFD_IOCTL_SVM_LOCATION_SYSMEM;
2815 
2816 	svm_range_add_to_svms(prange);
2817 	svm_range_add_notifier_locked(mm, prange);
2818 
2819 	return prange;
2820 }
2821 
2822 /* svm_range_skip_recover - decide if prange can be recovered
2823  * @prange: svm range structure
2824  *
2825  * GPU vm retry fault handle skip recover the range for cases:
2826  * 1. prange is on deferred list to be removed after unmap, it is stale fault,
2827  *    deferred list work will drain the stale fault before free the prange.
2828  * 2. prange is on deferred list to add interval notifier after split, or
2829  * 3. prange is child range, it is split from parent prange, recover later
2830  *    after interval notifier is added.
2831  *
2832  * Return: true to skip recover, false to recover
2833  */
2834 static bool svm_range_skip_recover(struct svm_range *prange)
2835 {
2836 	struct svm_range_list *svms = prange->svms;
2837 
2838 	spin_lock(&svms->deferred_list_lock);
2839 	if (list_empty(&prange->deferred_list) &&
2840 	    list_empty(&prange->child_list)) {
2841 		spin_unlock(&svms->deferred_list_lock);
2842 		return false;
2843 	}
2844 	spin_unlock(&svms->deferred_list_lock);
2845 
2846 	if (prange->work_item.op == SVM_OP_UNMAP_RANGE) {
2847 		pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx] unmapped\n",
2848 			 svms, prange, prange->start, prange->last);
2849 		return true;
2850 	}
2851 	if (prange->work_item.op == SVM_OP_ADD_RANGE_AND_MAP ||
2852 	    prange->work_item.op == SVM_OP_ADD_RANGE) {
2853 		pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx] not added yet\n",
2854 			 svms, prange, prange->start, prange->last);
2855 		return true;
2856 	}
2857 	return false;
2858 }
2859 
2860 static void
2861 svm_range_count_fault(struct kfd_node *node, struct kfd_process *p,
2862 		      int32_t gpuidx)
2863 {
2864 	struct kfd_process_device *pdd;
2865 
2866 	/* fault is on different page of same range
2867 	 * or fault is skipped to recover later
2868 	 * or fault is on invalid virtual address
2869 	 */
2870 	if (gpuidx == MAX_GPU_INSTANCE) {
2871 		uint32_t gpuid;
2872 		int r;
2873 
2874 		r = kfd_process_gpuid_from_node(p, node, &gpuid, &gpuidx);
2875 		if (r < 0)
2876 			return;
2877 	}
2878 
2879 	/* fault is recovered
2880 	 * or fault cannot recover because GPU no access on the range
2881 	 */
2882 	pdd = kfd_process_device_from_gpuidx(p, gpuidx);
2883 	if (pdd)
2884 		WRITE_ONCE(pdd->faults, pdd->faults + 1);
2885 }
2886 
2887 static bool
2888 svm_fault_allowed(struct vm_area_struct *vma, bool write_fault)
2889 {
2890 	unsigned long requested = VM_READ;
2891 
2892 	if (write_fault)
2893 		requested |= VM_WRITE;
2894 
2895 	pr_debug("requested 0x%lx, vma permission flags 0x%lx\n", requested,
2896 		vma->vm_flags);
2897 	return (vma->vm_flags & requested) == requested;
2898 }
2899 
2900 int
2901 svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid,
2902 			uint32_t vmid, uint32_t node_id,
2903 			uint64_t addr, bool write_fault)
2904 {
2905 	unsigned long start, last, size;
2906 	struct mm_struct *mm = NULL;
2907 	struct svm_range_list *svms;
2908 	struct svm_range *prange;
2909 	struct kfd_process *p;
2910 	ktime_t timestamp = ktime_get_boottime();
2911 	struct kfd_node *node;
2912 	int32_t best_loc;
2913 	int32_t gpuidx = MAX_GPU_INSTANCE;
2914 	bool write_locked = false;
2915 	struct vm_area_struct *vma;
2916 	bool migration = false;
2917 	int r = 0;
2918 
2919 	if (!KFD_IS_SVM_API_SUPPORTED(adev)) {
2920 		pr_debug("device does not support SVM\n");
2921 		return -EFAULT;
2922 	}
2923 
2924 	p = kfd_lookup_process_by_pasid(pasid);
2925 	if (!p) {
2926 		pr_debug("kfd process not founded pasid 0x%x\n", pasid);
2927 		return 0;
2928 	}
2929 	svms = &p->svms;
2930 
2931 	pr_debug("restoring svms 0x%p fault address 0x%llx\n", svms, addr);
2932 
2933 	if (atomic_read(&svms->drain_pagefaults)) {
2934 		pr_debug("draining retry fault, drop fault 0x%llx\n", addr);
2935 		r = 0;
2936 		goto out;
2937 	}
2938 
2939 	if (!p->xnack_enabled) {
2940 		pr_debug("XNACK not enabled for pasid 0x%x\n", pasid);
2941 		r = -EFAULT;
2942 		goto out;
2943 	}
2944 
2945 	/* p->lead_thread is available as kfd_process_wq_release flush the work
2946 	 * before releasing task ref.
2947 	 */
2948 	mm = get_task_mm(p->lead_thread);
2949 	if (!mm) {
2950 		pr_debug("svms 0x%p failed to get mm\n", svms);
2951 		r = 0;
2952 		goto out;
2953 	}
2954 
2955 	node = kfd_node_by_irq_ids(adev, node_id, vmid);
2956 	if (!node) {
2957 		pr_debug("kfd node does not exist node_id: %d, vmid: %d\n", node_id,
2958 			 vmid);
2959 		r = -EFAULT;
2960 		goto out;
2961 	}
2962 	mmap_read_lock(mm);
2963 retry_write_locked:
2964 	mutex_lock(&svms->lock);
2965 	prange = svm_range_from_addr(svms, addr, NULL);
2966 	if (!prange) {
2967 		pr_debug("failed to find prange svms 0x%p address [0x%llx]\n",
2968 			 svms, addr);
2969 		if (!write_locked) {
2970 			/* Need the write lock to create new range with MMU notifier.
2971 			 * Also flush pending deferred work to make sure the interval
2972 			 * tree is up to date before we add a new range
2973 			 */
2974 			mutex_unlock(&svms->lock);
2975 			mmap_read_unlock(mm);
2976 			mmap_write_lock(mm);
2977 			write_locked = true;
2978 			goto retry_write_locked;
2979 		}
2980 		prange = svm_range_create_unregistered_range(node, p, mm, addr);
2981 		if (!prange) {
2982 			pr_debug("failed to create unregistered range svms 0x%p address [0x%llx]\n",
2983 				 svms, addr);
2984 			mmap_write_downgrade(mm);
2985 			r = -EFAULT;
2986 			goto out_unlock_svms;
2987 		}
2988 	}
2989 	if (write_locked)
2990 		mmap_write_downgrade(mm);
2991 
2992 	mutex_lock(&prange->migrate_mutex);
2993 
2994 	if (svm_range_skip_recover(prange)) {
2995 		amdgpu_gmc_filter_faults_remove(node->adev, addr, pasid);
2996 		r = 0;
2997 		goto out_unlock_range;
2998 	}
2999 
3000 	/* skip duplicate vm fault on different pages of same range */
3001 	if (ktime_before(timestamp, ktime_add_ns(prange->validate_timestamp,
3002 				AMDGPU_SVM_RANGE_RETRY_FAULT_PENDING))) {
3003 		pr_debug("svms 0x%p [0x%lx %lx] already restored\n",
3004 			 svms, prange->start, prange->last);
3005 		r = 0;
3006 		goto out_unlock_range;
3007 	}
3008 
3009 	/* __do_munmap removed VMA, return success as we are handling stale
3010 	 * retry fault.
3011 	 */
3012 	vma = vma_lookup(mm, addr << PAGE_SHIFT);
3013 	if (!vma) {
3014 		pr_debug("address 0x%llx VMA is removed\n", addr);
3015 		r = 0;
3016 		goto out_unlock_range;
3017 	}
3018 
3019 	if (!svm_fault_allowed(vma, write_fault)) {
3020 		pr_debug("fault addr 0x%llx no %s permission\n", addr,
3021 			write_fault ? "write" : "read");
3022 		r = -EPERM;
3023 		goto out_unlock_range;
3024 	}
3025 
3026 	best_loc = svm_range_best_restore_location(prange, node, &gpuidx);
3027 	if (best_loc == -1) {
3028 		pr_debug("svms %p failed get best restore loc [0x%lx 0x%lx]\n",
3029 			 svms, prange->start, prange->last);
3030 		r = -EACCES;
3031 		goto out_unlock_range;
3032 	}
3033 
3034 	pr_debug("svms %p [0x%lx 0x%lx] best restore 0x%x, actual loc 0x%x\n",
3035 		 svms, prange->start, prange->last, best_loc,
3036 		 prange->actual_loc);
3037 
3038 	kfd_smi_event_page_fault_start(node, p->lead_thread->pid, addr,
3039 				       write_fault, timestamp);
3040 
3041 	/* Align migration range start and size to granularity size */
3042 	size = 1UL << prange->granularity;
3043 	start = max_t(unsigned long, ALIGN_DOWN(addr, size), prange->start);
3044 	last = min_t(unsigned long, ALIGN(addr + 1, size) - 1, prange->last);
3045 	if (prange->actual_loc != 0 || best_loc != 0) {
3046 		migration = true;
3047 
3048 		if (best_loc) {
3049 			r = svm_migrate_to_vram(prange, best_loc, start, last,
3050 					mm, KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU);
3051 			if (r) {
3052 				pr_debug("svm_migrate_to_vram failed (%d) at %llx, falling back to system memory\n",
3053 					 r, addr);
3054 				/* Fallback to system memory if migration to
3055 				 * VRAM failed
3056 				 */
3057 				if (prange->actual_loc && prange->actual_loc != best_loc)
3058 					r = svm_migrate_vram_to_ram(prange, mm, start, last,
3059 						KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU, NULL);
3060 				else
3061 					r = 0;
3062 			}
3063 		} else {
3064 			r = svm_migrate_vram_to_ram(prange, mm, start, last,
3065 					KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU, NULL);
3066 		}
3067 		if (r) {
3068 			pr_debug("failed %d to migrate svms %p [0x%lx 0x%lx]\n",
3069 				 r, svms, start, last);
3070 			goto out_unlock_range;
3071 		}
3072 	}
3073 
3074 	r = svm_range_validate_and_map(mm, start, last, prange, gpuidx, false,
3075 				       false, false);
3076 	if (r)
3077 		pr_debug("failed %d to map svms 0x%p [0x%lx 0x%lx] to gpus\n",
3078 			 r, svms, start, last);
3079 
3080 	kfd_smi_event_page_fault_end(node, p->lead_thread->pid, addr,
3081 				     migration);
3082 
3083 out_unlock_range:
3084 	mutex_unlock(&prange->migrate_mutex);
3085 out_unlock_svms:
3086 	mutex_unlock(&svms->lock);
3087 	mmap_read_unlock(mm);
3088 
3089 	svm_range_count_fault(node, p, gpuidx);
3090 
3091 	mmput(mm);
3092 out:
3093 	kfd_unref_process(p);
3094 
3095 	if (r == -EAGAIN) {
3096 		pr_debug("recover vm fault later\n");
3097 		amdgpu_gmc_filter_faults_remove(node->adev, addr, pasid);
3098 		r = 0;
3099 	}
3100 	return r;
3101 }
3102 
3103 int
3104 svm_range_switch_xnack_reserve_mem(struct kfd_process *p, bool xnack_enabled)
3105 {
3106 	struct svm_range *prange, *pchild;
3107 	uint64_t reserved_size = 0;
3108 	uint64_t size;
3109 	int r = 0;
3110 
3111 	pr_debug("switching xnack from %d to %d\n", p->xnack_enabled, xnack_enabled);
3112 
3113 	mutex_lock(&p->svms.lock);
3114 
3115 	list_for_each_entry(prange, &p->svms.list, list) {
3116 		svm_range_lock(prange);
3117 		list_for_each_entry(pchild, &prange->child_list, child_list) {
3118 			size = (pchild->last - pchild->start + 1) << PAGE_SHIFT;
3119 			if (xnack_enabled) {
3120 				amdgpu_amdkfd_unreserve_mem_limit(NULL, size,
3121 					KFD_IOC_ALLOC_MEM_FLAGS_USERPTR, 0);
3122 			} else {
3123 				r = amdgpu_amdkfd_reserve_mem_limit(NULL, size,
3124 					KFD_IOC_ALLOC_MEM_FLAGS_USERPTR, 0);
3125 				if (r)
3126 					goto out_unlock;
3127 				reserved_size += size;
3128 			}
3129 		}
3130 
3131 		size = (prange->last - prange->start + 1) << PAGE_SHIFT;
3132 		if (xnack_enabled) {
3133 			amdgpu_amdkfd_unreserve_mem_limit(NULL, size,
3134 					KFD_IOC_ALLOC_MEM_FLAGS_USERPTR, 0);
3135 		} else {
3136 			r = amdgpu_amdkfd_reserve_mem_limit(NULL, size,
3137 					KFD_IOC_ALLOC_MEM_FLAGS_USERPTR, 0);
3138 			if (r)
3139 				goto out_unlock;
3140 			reserved_size += size;
3141 		}
3142 out_unlock:
3143 		svm_range_unlock(prange);
3144 		if (r)
3145 			break;
3146 	}
3147 
3148 	if (r)
3149 		amdgpu_amdkfd_unreserve_mem_limit(NULL, reserved_size,
3150 					KFD_IOC_ALLOC_MEM_FLAGS_USERPTR, 0);
3151 	else
3152 		/* Change xnack mode must be inside svms lock, to avoid race with
3153 		 * svm_range_deferred_list_work unreserve memory in parallel.
3154 		 */
3155 		p->xnack_enabled = xnack_enabled;
3156 
3157 	mutex_unlock(&p->svms.lock);
3158 	return r;
3159 }
3160 
3161 void svm_range_list_fini(struct kfd_process *p)
3162 {
3163 	struct svm_range *prange;
3164 	struct svm_range *next;
3165 
3166 	pr_debug("pasid 0x%x svms 0x%p\n", p->pasid, &p->svms);
3167 
3168 	cancel_delayed_work_sync(&p->svms.restore_work);
3169 
3170 	/* Ensure list work is finished before process is destroyed */
3171 	flush_work(&p->svms.deferred_list_work);
3172 
3173 	/*
3174 	 * Ensure no retry fault comes in afterwards, as page fault handler will
3175 	 * not find kfd process and take mm lock to recover fault.
3176 	 */
3177 	atomic_inc(&p->svms.drain_pagefaults);
3178 	svm_range_drain_retry_fault(&p->svms);
3179 
3180 	list_for_each_entry_safe(prange, next, &p->svms.list, list) {
3181 		svm_range_unlink(prange);
3182 		svm_range_remove_notifier(prange);
3183 		svm_range_free(prange, true);
3184 	}
3185 
3186 	mutex_destroy(&p->svms.lock);
3187 
3188 	pr_debug("pasid 0x%x svms 0x%p done\n", p->pasid, &p->svms);
3189 }
3190 
3191 int svm_range_list_init(struct kfd_process *p)
3192 {
3193 	struct svm_range_list *svms = &p->svms;
3194 	int i;
3195 
3196 	svms->objects = RB_ROOT_CACHED;
3197 	mutex_init(&svms->lock);
3198 	INIT_LIST_HEAD(&svms->list);
3199 	atomic_set(&svms->evicted_ranges, 0);
3200 	atomic_set(&svms->drain_pagefaults, 0);
3201 	INIT_DELAYED_WORK(&svms->restore_work, svm_range_restore_work);
3202 	INIT_WORK(&svms->deferred_list_work, svm_range_deferred_list_work);
3203 	INIT_LIST_HEAD(&svms->deferred_range_list);
3204 	INIT_LIST_HEAD(&svms->criu_svm_metadata_list);
3205 	spin_lock_init(&svms->deferred_list_lock);
3206 
3207 	for (i = 0; i < p->n_pdds; i++)
3208 		if (KFD_IS_SVM_API_SUPPORTED(p->pdds[i]->dev->adev))
3209 			bitmap_set(svms->bitmap_supported, i, 1);
3210 
3211 	return 0;
3212 }
3213 
3214 /**
3215  * svm_range_check_vm - check if virtual address range mapped already
3216  * @p: current kfd_process
3217  * @start: range start address, in pages
3218  * @last: range last address, in pages
3219  * @bo_s: mapping start address in pages if address range already mapped
3220  * @bo_l: mapping last address in pages if address range already mapped
3221  *
3222  * The purpose is to avoid virtual address ranges already allocated by
3223  * kfd_ioctl_alloc_memory_of_gpu ioctl.
3224  * It looks for each pdd in the kfd_process.
3225  *
3226  * Context: Process context
3227  *
3228  * Return 0 - OK, if the range is not mapped.
3229  * Otherwise error code:
3230  * -EADDRINUSE - if address is mapped already by kfd_ioctl_alloc_memory_of_gpu
3231  * -ERESTARTSYS - A wait for the buffer to become unreserved was interrupted by
3232  * a signal. Release all buffer reservations and return to user-space.
3233  */
3234 static int
3235 svm_range_check_vm(struct kfd_process *p, uint64_t start, uint64_t last,
3236 		   uint64_t *bo_s, uint64_t *bo_l)
3237 {
3238 	struct amdgpu_bo_va_mapping *mapping;
3239 	struct interval_tree_node *node;
3240 	uint32_t i;
3241 	int r;
3242 
3243 	for (i = 0; i < p->n_pdds; i++) {
3244 		struct amdgpu_vm *vm;
3245 
3246 		if (!p->pdds[i]->drm_priv)
3247 			continue;
3248 
3249 		vm = drm_priv_to_vm(p->pdds[i]->drm_priv);
3250 		r = amdgpu_bo_reserve(vm->root.bo, false);
3251 		if (r)
3252 			return r;
3253 
3254 		node = interval_tree_iter_first(&vm->va, start, last);
3255 		if (node) {
3256 			pr_debug("range [0x%llx 0x%llx] already TTM mapped\n",
3257 				 start, last);
3258 			mapping = container_of((struct rb_node *)node,
3259 					       struct amdgpu_bo_va_mapping, rb);
3260 			if (bo_s && bo_l) {
3261 				*bo_s = mapping->start;
3262 				*bo_l = mapping->last;
3263 			}
3264 			amdgpu_bo_unreserve(vm->root.bo);
3265 			return -EADDRINUSE;
3266 		}
3267 		amdgpu_bo_unreserve(vm->root.bo);
3268 	}
3269 
3270 	return 0;
3271 }
3272 
3273 /**
3274  * svm_range_is_valid - check if virtual address range is valid
3275  * @p: current kfd_process
3276  * @start: range start address, in pages
3277  * @size: range size, in pages
3278  *
3279  * Valid virtual address range means it belongs to one or more VMAs
3280  *
3281  * Context: Process context
3282  *
3283  * Return:
3284  *  0 - OK, otherwise error code
3285  */
3286 static int
3287 svm_range_is_valid(struct kfd_process *p, uint64_t start, uint64_t size)
3288 {
3289 	const unsigned long device_vma = VM_IO | VM_PFNMAP | VM_MIXEDMAP;
3290 	struct vm_area_struct *vma;
3291 	unsigned long end;
3292 	unsigned long start_unchg = start;
3293 
3294 	start <<= PAGE_SHIFT;
3295 	end = start + (size << PAGE_SHIFT);
3296 	do {
3297 		vma = vma_lookup(p->mm, start);
3298 		if (!vma || (vma->vm_flags & device_vma))
3299 			return -EFAULT;
3300 		start = min(end, vma->vm_end);
3301 	} while (start < end);
3302 
3303 	return svm_range_check_vm(p, start_unchg, (end - 1) >> PAGE_SHIFT, NULL,
3304 				  NULL);
3305 }
3306 
3307 /**
3308  * svm_range_best_prefetch_location - decide the best prefetch location
3309  * @prange: svm range structure
3310  *
3311  * For xnack off:
3312  * If range map to single GPU, the best prefetch location is prefetch_loc, which
3313  * can be CPU or GPU.
3314  *
3315  * If range is ACCESS or ACCESS_IN_PLACE by mGPUs, only if mGPU connection on
3316  * XGMI same hive, the best prefetch location is prefetch_loc GPU, othervise
3317  * the best prefetch location is always CPU, because GPU can not have coherent
3318  * mapping VRAM of other GPUs even with large-BAR PCIe connection.
3319  *
3320  * For xnack on:
3321  * If range is not ACCESS_IN_PLACE by mGPUs, the best prefetch location is
3322  * prefetch_loc, other GPU access will generate vm fault and trigger migration.
3323  *
3324  * If range is ACCESS_IN_PLACE by mGPUs, only if mGPU connection on XGMI same
3325  * hive, the best prefetch location is prefetch_loc GPU, otherwise the best
3326  * prefetch location is always CPU.
3327  *
3328  * Context: Process context
3329  *
3330  * Return:
3331  * 0 for CPU or GPU id
3332  */
3333 static uint32_t
3334 svm_range_best_prefetch_location(struct svm_range *prange)
3335 {
3336 	DECLARE_BITMAP(bitmap, MAX_GPU_INSTANCE);
3337 	uint32_t best_loc = prange->prefetch_loc;
3338 	struct kfd_process_device *pdd;
3339 	struct kfd_node *bo_node;
3340 	struct kfd_process *p;
3341 	uint32_t gpuidx;
3342 
3343 	p = container_of(prange->svms, struct kfd_process, svms);
3344 
3345 	if (!best_loc || best_loc == KFD_IOCTL_SVM_LOCATION_UNDEFINED)
3346 		goto out;
3347 
3348 	bo_node = svm_range_get_node_by_id(prange, best_loc);
3349 	if (!bo_node) {
3350 		WARN_ONCE(1, "failed to get valid kfd node at id%x\n", best_loc);
3351 		best_loc = 0;
3352 		goto out;
3353 	}
3354 
3355 	if (bo_node->adev->flags & AMD_IS_APU) {
3356 		best_loc = 0;
3357 		goto out;
3358 	}
3359 
3360 	if (p->xnack_enabled)
3361 		bitmap_copy(bitmap, prange->bitmap_aip, MAX_GPU_INSTANCE);
3362 	else
3363 		bitmap_or(bitmap, prange->bitmap_access, prange->bitmap_aip,
3364 			  MAX_GPU_INSTANCE);
3365 
3366 	for_each_set_bit(gpuidx, bitmap, MAX_GPU_INSTANCE) {
3367 		pdd = kfd_process_device_from_gpuidx(p, gpuidx);
3368 		if (!pdd) {
3369 			pr_debug("failed to get device by idx 0x%x\n", gpuidx);
3370 			continue;
3371 		}
3372 
3373 		if (pdd->dev->adev == bo_node->adev)
3374 			continue;
3375 
3376 		if (!svm_nodes_in_same_hive(pdd->dev, bo_node)) {
3377 			best_loc = 0;
3378 			break;
3379 		}
3380 	}
3381 
3382 out:
3383 	pr_debug("xnack %d svms 0x%p [0x%lx 0x%lx] best loc 0x%x\n",
3384 		 p->xnack_enabled, &p->svms, prange->start, prange->last,
3385 		 best_loc);
3386 
3387 	return best_loc;
3388 }
3389 
3390 /* svm_range_trigger_migration - start page migration if prefetch loc changed
3391  * @mm: current process mm_struct
3392  * @prange: svm range structure
3393  * @migrated: output, true if migration is triggered
3394  *
3395  * If range perfetch_loc is GPU, actual loc is cpu 0, then migrate the range
3396  * from ram to vram.
3397  * If range prefetch_loc is cpu 0, actual loc is GPU, then migrate the range
3398  * from vram to ram.
3399  *
3400  * If GPU vm fault retry is not enabled, migration interact with MMU notifier
3401  * and restore work:
3402  * 1. migrate_vma_setup invalidate pages, MMU notifier callback svm_range_evict
3403  *    stops all queues, schedule restore work
3404  * 2. svm_range_restore_work wait for migration is done by
3405  *    a. svm_range_validate_vram takes prange->migrate_mutex
3406  *    b. svm_range_validate_ram HMM get pages wait for CPU fault handle returns
3407  * 3. restore work update mappings of GPU, resume all queues.
3408  *
3409  * Context: Process context
3410  *
3411  * Return:
3412  * 0 - OK, otherwise - error code of migration
3413  */
3414 static int
3415 svm_range_trigger_migration(struct mm_struct *mm, struct svm_range *prange,
3416 			    bool *migrated)
3417 {
3418 	uint32_t best_loc;
3419 	int r = 0;
3420 
3421 	*migrated = false;
3422 	best_loc = svm_range_best_prefetch_location(prange);
3423 
3424 	/* when best_loc is a gpu node and same as prange->actual_loc
3425 	 * we still need do migration as prange->actual_loc !=0 does
3426 	 * not mean all pages in prange are vram. hmm migrate will pick
3427 	 * up right pages during migration.
3428 	 */
3429 	if ((best_loc == KFD_IOCTL_SVM_LOCATION_UNDEFINED) ||
3430 	    (best_loc == 0 && prange->actual_loc == 0))
3431 		return 0;
3432 
3433 	if (!best_loc) {
3434 		r = svm_migrate_vram_to_ram(prange, mm, prange->start, prange->last,
3435 					KFD_MIGRATE_TRIGGER_PREFETCH, NULL);
3436 		*migrated = !r;
3437 		return r;
3438 	}
3439 
3440 	r = svm_migrate_to_vram(prange, best_loc, prange->start, prange->last,
3441 				mm, KFD_MIGRATE_TRIGGER_PREFETCH);
3442 	*migrated = !r;
3443 
3444 	return 0;
3445 }
3446 
3447 int svm_range_schedule_evict_svm_bo(struct amdgpu_amdkfd_fence *fence)
3448 {
3449 	/* Dereferencing fence->svm_bo is safe here because the fence hasn't
3450 	 * signaled yet and we're under the protection of the fence->lock.
3451 	 * After the fence is signaled in svm_range_bo_release, we cannot get
3452 	 * here any more.
3453 	 *
3454 	 * Reference is dropped in svm_range_evict_svm_bo_worker.
3455 	 */
3456 	if (svm_bo_ref_unless_zero(fence->svm_bo)) {
3457 		WRITE_ONCE(fence->svm_bo->evicting, 1);
3458 		schedule_work(&fence->svm_bo->eviction_work);
3459 	}
3460 
3461 	return 0;
3462 }
3463 
3464 static void svm_range_evict_svm_bo_worker(struct work_struct *work)
3465 {
3466 	struct svm_range_bo *svm_bo;
3467 	struct mm_struct *mm;
3468 	int r = 0;
3469 
3470 	svm_bo = container_of(work, struct svm_range_bo, eviction_work);
3471 
3472 	if (mmget_not_zero(svm_bo->eviction_fence->mm)) {
3473 		mm = svm_bo->eviction_fence->mm;
3474 	} else {
3475 		svm_range_bo_unref(svm_bo);
3476 		return;
3477 	}
3478 
3479 	mmap_read_lock(mm);
3480 	spin_lock(&svm_bo->list_lock);
3481 	while (!list_empty(&svm_bo->range_list) && !r) {
3482 		struct svm_range *prange =
3483 				list_first_entry(&svm_bo->range_list,
3484 						struct svm_range, svm_bo_list);
3485 		int retries = 3;
3486 
3487 		list_del_init(&prange->svm_bo_list);
3488 		spin_unlock(&svm_bo->list_lock);
3489 
3490 		pr_debug("svms 0x%p [0x%lx 0x%lx]\n", prange->svms,
3491 			 prange->start, prange->last);
3492 
3493 		mutex_lock(&prange->migrate_mutex);
3494 		do {
3495 			/* migrate all vram pages in this prange to sys ram
3496 			 * after that prange->actual_loc should be zero
3497 			 */
3498 			r = svm_migrate_vram_to_ram(prange, mm,
3499 					prange->start, prange->last,
3500 					KFD_MIGRATE_TRIGGER_TTM_EVICTION, NULL);
3501 		} while (!r && prange->actual_loc && --retries);
3502 
3503 		if (!r && prange->actual_loc)
3504 			pr_info_once("Migration failed during eviction");
3505 
3506 		if (!prange->actual_loc) {
3507 			mutex_lock(&prange->lock);
3508 			prange->svm_bo = NULL;
3509 			mutex_unlock(&prange->lock);
3510 		}
3511 		mutex_unlock(&prange->migrate_mutex);
3512 
3513 		spin_lock(&svm_bo->list_lock);
3514 	}
3515 	spin_unlock(&svm_bo->list_lock);
3516 	mmap_read_unlock(mm);
3517 	mmput(mm);
3518 
3519 	dma_fence_signal(&svm_bo->eviction_fence->base);
3520 
3521 	/* This is the last reference to svm_bo, after svm_range_vram_node_free
3522 	 * has been called in svm_migrate_vram_to_ram
3523 	 */
3524 	WARN_ONCE(!r && kref_read(&svm_bo->kref) != 1, "This was not the last reference\n");
3525 	svm_range_bo_unref(svm_bo);
3526 }
3527 
3528 static int
3529 svm_range_set_attr(struct kfd_process *p, struct mm_struct *mm,
3530 		   uint64_t start, uint64_t size, uint32_t nattr,
3531 		   struct kfd_ioctl_svm_attribute *attrs)
3532 {
3533 	struct amdkfd_process_info *process_info = p->kgd_process_info;
3534 	struct list_head update_list;
3535 	struct list_head insert_list;
3536 	struct list_head remove_list;
3537 	struct list_head remap_list;
3538 	struct svm_range_list *svms;
3539 	struct svm_range *prange;
3540 	struct svm_range *next;
3541 	bool update_mapping = false;
3542 	bool flush_tlb;
3543 	int r, ret = 0;
3544 
3545 	pr_debug("pasid 0x%x svms 0x%p [0x%llx 0x%llx] pages 0x%llx\n",
3546 		 p->pasid, &p->svms, start, start + size - 1, size);
3547 
3548 	r = svm_range_check_attr(p, nattr, attrs);
3549 	if (r)
3550 		return r;
3551 
3552 	svms = &p->svms;
3553 
3554 	mutex_lock(&process_info->lock);
3555 
3556 	svm_range_list_lock_and_flush_work(svms, mm);
3557 
3558 	r = svm_range_is_valid(p, start, size);
3559 	if (r) {
3560 		pr_debug("invalid range r=%d\n", r);
3561 		mmap_write_unlock(mm);
3562 		goto out;
3563 	}
3564 
3565 	mutex_lock(&svms->lock);
3566 
3567 	/* Add new range and split existing ranges as needed */
3568 	r = svm_range_add(p, start, size, nattr, attrs, &update_list,
3569 			  &insert_list, &remove_list, &remap_list);
3570 	if (r) {
3571 		mutex_unlock(&svms->lock);
3572 		mmap_write_unlock(mm);
3573 		goto out;
3574 	}
3575 	/* Apply changes as a transaction */
3576 	list_for_each_entry_safe(prange, next, &insert_list, list) {
3577 		svm_range_add_to_svms(prange);
3578 		svm_range_add_notifier_locked(mm, prange);
3579 	}
3580 	list_for_each_entry(prange, &update_list, update_list) {
3581 		svm_range_apply_attrs(p, prange, nattr, attrs, &update_mapping);
3582 		/* TODO: unmap ranges from GPU that lost access */
3583 	}
3584 	list_for_each_entry_safe(prange, next, &remove_list, update_list) {
3585 		pr_debug("unlink old 0x%p prange 0x%p [0x%lx 0x%lx]\n",
3586 			 prange->svms, prange, prange->start,
3587 			 prange->last);
3588 		svm_range_unlink(prange);
3589 		svm_range_remove_notifier(prange);
3590 		svm_range_free(prange, false);
3591 	}
3592 
3593 	mmap_write_downgrade(mm);
3594 	/* Trigger migrations and revalidate and map to GPUs as needed. If
3595 	 * this fails we may be left with partially completed actions. There
3596 	 * is no clean way of rolling back to the previous state in such a
3597 	 * case because the rollback wouldn't be guaranteed to work either.
3598 	 */
3599 	list_for_each_entry(prange, &update_list, update_list) {
3600 		bool migrated;
3601 
3602 		mutex_lock(&prange->migrate_mutex);
3603 
3604 		r = svm_range_trigger_migration(mm, prange, &migrated);
3605 		if (r)
3606 			goto out_unlock_range;
3607 
3608 		if (migrated && (!p->xnack_enabled ||
3609 		    (prange->flags & KFD_IOCTL_SVM_FLAG_GPU_ALWAYS_MAPPED)) &&
3610 		    prange->mapped_to_gpu) {
3611 			pr_debug("restore_work will update mappings of GPUs\n");
3612 			mutex_unlock(&prange->migrate_mutex);
3613 			continue;
3614 		}
3615 
3616 		if (!migrated && !update_mapping) {
3617 			mutex_unlock(&prange->migrate_mutex);
3618 			continue;
3619 		}
3620 
3621 		flush_tlb = !migrated && update_mapping && prange->mapped_to_gpu;
3622 
3623 		r = svm_range_validate_and_map(mm, prange->start, prange->last, prange,
3624 					       MAX_GPU_INSTANCE, true, true, flush_tlb);
3625 		if (r)
3626 			pr_debug("failed %d to map svm range\n", r);
3627 
3628 out_unlock_range:
3629 		mutex_unlock(&prange->migrate_mutex);
3630 		if (r)
3631 			ret = r;
3632 	}
3633 
3634 	list_for_each_entry(prange, &remap_list, update_list) {
3635 		pr_debug("Remapping prange 0x%p [0x%lx 0x%lx]\n",
3636 			 prange, prange->start, prange->last);
3637 		mutex_lock(&prange->migrate_mutex);
3638 		r = svm_range_validate_and_map(mm,  prange->start, prange->last, prange,
3639 					       MAX_GPU_INSTANCE, true, true, prange->mapped_to_gpu);
3640 		if (r)
3641 			pr_debug("failed %d on remap svm range\n", r);
3642 		mutex_unlock(&prange->migrate_mutex);
3643 		if (r)
3644 			ret = r;
3645 	}
3646 
3647 	dynamic_svm_range_dump(svms);
3648 
3649 	mutex_unlock(&svms->lock);
3650 	mmap_read_unlock(mm);
3651 out:
3652 	mutex_unlock(&process_info->lock);
3653 
3654 	pr_debug("pasid 0x%x svms 0x%p [0x%llx 0x%llx] done, r=%d\n", p->pasid,
3655 		 &p->svms, start, start + size - 1, r);
3656 
3657 	return ret ? ret : r;
3658 }
3659 
3660 static int
3661 svm_range_get_attr(struct kfd_process *p, struct mm_struct *mm,
3662 		   uint64_t start, uint64_t size, uint32_t nattr,
3663 		   struct kfd_ioctl_svm_attribute *attrs)
3664 {
3665 	DECLARE_BITMAP(bitmap_access, MAX_GPU_INSTANCE);
3666 	DECLARE_BITMAP(bitmap_aip, MAX_GPU_INSTANCE);
3667 	bool get_preferred_loc = false;
3668 	bool get_prefetch_loc = false;
3669 	bool get_granularity = false;
3670 	bool get_accessible = false;
3671 	bool get_flags = false;
3672 	uint64_t last = start + size - 1UL;
3673 	uint8_t granularity = 0xff;
3674 	struct interval_tree_node *node;
3675 	struct svm_range_list *svms;
3676 	struct svm_range *prange;
3677 	uint32_t prefetch_loc = KFD_IOCTL_SVM_LOCATION_UNDEFINED;
3678 	uint32_t location = KFD_IOCTL_SVM_LOCATION_UNDEFINED;
3679 	uint32_t flags_and = 0xffffffff;
3680 	uint32_t flags_or = 0;
3681 	int gpuidx;
3682 	uint32_t i;
3683 	int r = 0;
3684 
3685 	pr_debug("svms 0x%p [0x%llx 0x%llx] nattr 0x%x\n", &p->svms, start,
3686 		 start + size - 1, nattr);
3687 
3688 	/* Flush pending deferred work to avoid racing with deferred actions from
3689 	 * previous memory map changes (e.g. munmap). Concurrent memory map changes
3690 	 * can still race with get_attr because we don't hold the mmap lock. But that
3691 	 * would be a race condition in the application anyway, and undefined
3692 	 * behaviour is acceptable in that case.
3693 	 */
3694 	flush_work(&p->svms.deferred_list_work);
3695 
3696 	mmap_read_lock(mm);
3697 	r = svm_range_is_valid(p, start, size);
3698 	mmap_read_unlock(mm);
3699 	if (r) {
3700 		pr_debug("invalid range r=%d\n", r);
3701 		return r;
3702 	}
3703 
3704 	for (i = 0; i < nattr; i++) {
3705 		switch (attrs[i].type) {
3706 		case KFD_IOCTL_SVM_ATTR_PREFERRED_LOC:
3707 			get_preferred_loc = true;
3708 			break;
3709 		case KFD_IOCTL_SVM_ATTR_PREFETCH_LOC:
3710 			get_prefetch_loc = true;
3711 			break;
3712 		case KFD_IOCTL_SVM_ATTR_ACCESS:
3713 			get_accessible = true;
3714 			break;
3715 		case KFD_IOCTL_SVM_ATTR_SET_FLAGS:
3716 		case KFD_IOCTL_SVM_ATTR_CLR_FLAGS:
3717 			get_flags = true;
3718 			break;
3719 		case KFD_IOCTL_SVM_ATTR_GRANULARITY:
3720 			get_granularity = true;
3721 			break;
3722 		case KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE:
3723 		case KFD_IOCTL_SVM_ATTR_NO_ACCESS:
3724 			fallthrough;
3725 		default:
3726 			pr_debug("get invalid attr type 0x%x\n", attrs[i].type);
3727 			return -EINVAL;
3728 		}
3729 	}
3730 
3731 	svms = &p->svms;
3732 
3733 	mutex_lock(&svms->lock);
3734 
3735 	node = interval_tree_iter_first(&svms->objects, start, last);
3736 	if (!node) {
3737 		pr_debug("range attrs not found return default values\n");
3738 		svm_range_set_default_attributes(&location, &prefetch_loc,
3739 						 &granularity, &flags_and);
3740 		flags_or = flags_and;
3741 		if (p->xnack_enabled)
3742 			bitmap_copy(bitmap_access, svms->bitmap_supported,
3743 				    MAX_GPU_INSTANCE);
3744 		else
3745 			bitmap_zero(bitmap_access, MAX_GPU_INSTANCE);
3746 		bitmap_zero(bitmap_aip, MAX_GPU_INSTANCE);
3747 		goto fill_values;
3748 	}
3749 	bitmap_copy(bitmap_access, svms->bitmap_supported, MAX_GPU_INSTANCE);
3750 	bitmap_copy(bitmap_aip, svms->bitmap_supported, MAX_GPU_INSTANCE);
3751 
3752 	while (node) {
3753 		struct interval_tree_node *next;
3754 
3755 		prange = container_of(node, struct svm_range, it_node);
3756 		next = interval_tree_iter_next(node, start, last);
3757 
3758 		if (get_preferred_loc) {
3759 			if (prange->preferred_loc ==
3760 					KFD_IOCTL_SVM_LOCATION_UNDEFINED ||
3761 			    (location != KFD_IOCTL_SVM_LOCATION_UNDEFINED &&
3762 			     location != prange->preferred_loc)) {
3763 				location = KFD_IOCTL_SVM_LOCATION_UNDEFINED;
3764 				get_preferred_loc = false;
3765 			} else {
3766 				location = prange->preferred_loc;
3767 			}
3768 		}
3769 		if (get_prefetch_loc) {
3770 			if (prange->prefetch_loc ==
3771 					KFD_IOCTL_SVM_LOCATION_UNDEFINED ||
3772 			    (prefetch_loc != KFD_IOCTL_SVM_LOCATION_UNDEFINED &&
3773 			     prefetch_loc != prange->prefetch_loc)) {
3774 				prefetch_loc = KFD_IOCTL_SVM_LOCATION_UNDEFINED;
3775 				get_prefetch_loc = false;
3776 			} else {
3777 				prefetch_loc = prange->prefetch_loc;
3778 			}
3779 		}
3780 		if (get_accessible) {
3781 			bitmap_and(bitmap_access, bitmap_access,
3782 				   prange->bitmap_access, MAX_GPU_INSTANCE);
3783 			bitmap_and(bitmap_aip, bitmap_aip,
3784 				   prange->bitmap_aip, MAX_GPU_INSTANCE);
3785 		}
3786 		if (get_flags) {
3787 			flags_and &= prange->flags;
3788 			flags_or |= prange->flags;
3789 		}
3790 
3791 		if (get_granularity && prange->granularity < granularity)
3792 			granularity = prange->granularity;
3793 
3794 		node = next;
3795 	}
3796 fill_values:
3797 	mutex_unlock(&svms->lock);
3798 
3799 	for (i = 0; i < nattr; i++) {
3800 		switch (attrs[i].type) {
3801 		case KFD_IOCTL_SVM_ATTR_PREFERRED_LOC:
3802 			attrs[i].value = location;
3803 			break;
3804 		case KFD_IOCTL_SVM_ATTR_PREFETCH_LOC:
3805 			attrs[i].value = prefetch_loc;
3806 			break;
3807 		case KFD_IOCTL_SVM_ATTR_ACCESS:
3808 			gpuidx = kfd_process_gpuidx_from_gpuid(p,
3809 							       attrs[i].value);
3810 			if (gpuidx < 0) {
3811 				pr_debug("invalid gpuid %x\n", attrs[i].value);
3812 				return -EINVAL;
3813 			}
3814 			if (test_bit(gpuidx, bitmap_access))
3815 				attrs[i].type = KFD_IOCTL_SVM_ATTR_ACCESS;
3816 			else if (test_bit(gpuidx, bitmap_aip))
3817 				attrs[i].type =
3818 					KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE;
3819 			else
3820 				attrs[i].type = KFD_IOCTL_SVM_ATTR_NO_ACCESS;
3821 			break;
3822 		case KFD_IOCTL_SVM_ATTR_SET_FLAGS:
3823 			attrs[i].value = flags_and;
3824 			break;
3825 		case KFD_IOCTL_SVM_ATTR_CLR_FLAGS:
3826 			attrs[i].value = ~flags_or;
3827 			break;
3828 		case KFD_IOCTL_SVM_ATTR_GRANULARITY:
3829 			attrs[i].value = (uint32_t)granularity;
3830 			break;
3831 		}
3832 	}
3833 
3834 	return 0;
3835 }
3836 
3837 int kfd_criu_resume_svm(struct kfd_process *p)
3838 {
3839 	struct kfd_ioctl_svm_attribute *set_attr_new, *set_attr = NULL;
3840 	int nattr_common = 4, nattr_accessibility = 1;
3841 	struct criu_svm_metadata *criu_svm_md = NULL;
3842 	struct svm_range_list *svms = &p->svms;
3843 	struct criu_svm_metadata *next = NULL;
3844 	uint32_t set_flags = 0xffffffff;
3845 	int i, j, num_attrs, ret = 0;
3846 	uint64_t set_attr_size;
3847 	struct mm_struct *mm;
3848 
3849 	if (list_empty(&svms->criu_svm_metadata_list)) {
3850 		pr_debug("No SVM data from CRIU restore stage 2\n");
3851 		return ret;
3852 	}
3853 
3854 	mm = get_task_mm(p->lead_thread);
3855 	if (!mm) {
3856 		pr_err("failed to get mm for the target process\n");
3857 		return -ESRCH;
3858 	}
3859 
3860 	num_attrs = nattr_common + (nattr_accessibility * p->n_pdds);
3861 
3862 	i = j = 0;
3863 	list_for_each_entry(criu_svm_md, &svms->criu_svm_metadata_list, list) {
3864 		pr_debug("criu_svm_md[%d]\n\tstart: 0x%llx size: 0x%llx (npages)\n",
3865 			 i, criu_svm_md->data.start_addr, criu_svm_md->data.size);
3866 
3867 		for (j = 0; j < num_attrs; j++) {
3868 			pr_debug("\ncriu_svm_md[%d]->attrs[%d].type : 0x%x\ncriu_svm_md[%d]->attrs[%d].value : 0x%x\n",
3869 				 i, j, criu_svm_md->data.attrs[j].type,
3870 				 i, j, criu_svm_md->data.attrs[j].value);
3871 			switch (criu_svm_md->data.attrs[j].type) {
3872 			/* During Checkpoint operation, the query for
3873 			 * KFD_IOCTL_SVM_ATTR_PREFETCH_LOC attribute might
3874 			 * return KFD_IOCTL_SVM_LOCATION_UNDEFINED if they were
3875 			 * not used by the range which was checkpointed. Care
3876 			 * must be taken to not restore with an invalid value
3877 			 * otherwise the gpuidx value will be invalid and
3878 			 * set_attr would eventually fail so just replace those
3879 			 * with another dummy attribute such as
3880 			 * KFD_IOCTL_SVM_ATTR_SET_FLAGS.
3881 			 */
3882 			case KFD_IOCTL_SVM_ATTR_PREFETCH_LOC:
3883 				if (criu_svm_md->data.attrs[j].value ==
3884 				    KFD_IOCTL_SVM_LOCATION_UNDEFINED) {
3885 					criu_svm_md->data.attrs[j].type =
3886 						KFD_IOCTL_SVM_ATTR_SET_FLAGS;
3887 					criu_svm_md->data.attrs[j].value = 0;
3888 				}
3889 				break;
3890 			case KFD_IOCTL_SVM_ATTR_SET_FLAGS:
3891 				set_flags = criu_svm_md->data.attrs[j].value;
3892 				break;
3893 			default:
3894 				break;
3895 			}
3896 		}
3897 
3898 		/* CLR_FLAGS is not available via get_attr during checkpoint but
3899 		 * it needs to be inserted before restoring the ranges so
3900 		 * allocate extra space for it before calling set_attr
3901 		 */
3902 		set_attr_size = sizeof(struct kfd_ioctl_svm_attribute) *
3903 						(num_attrs + 1);
3904 		set_attr_new = krealloc(set_attr, set_attr_size,
3905 					    GFP_KERNEL);
3906 		if (!set_attr_new) {
3907 			ret = -ENOMEM;
3908 			goto exit;
3909 		}
3910 		set_attr = set_attr_new;
3911 
3912 		memcpy(set_attr, criu_svm_md->data.attrs, num_attrs *
3913 					sizeof(struct kfd_ioctl_svm_attribute));
3914 		set_attr[num_attrs].type = KFD_IOCTL_SVM_ATTR_CLR_FLAGS;
3915 		set_attr[num_attrs].value = ~set_flags;
3916 
3917 		ret = svm_range_set_attr(p, mm, criu_svm_md->data.start_addr,
3918 					 criu_svm_md->data.size, num_attrs + 1,
3919 					 set_attr);
3920 		if (ret) {
3921 			pr_err("CRIU: failed to set range attributes\n");
3922 			goto exit;
3923 		}
3924 
3925 		i++;
3926 	}
3927 exit:
3928 	kfree(set_attr);
3929 	list_for_each_entry_safe(criu_svm_md, next, &svms->criu_svm_metadata_list, list) {
3930 		pr_debug("freeing criu_svm_md[]\n\tstart: 0x%llx\n",
3931 						criu_svm_md->data.start_addr);
3932 		kfree(criu_svm_md);
3933 	}
3934 
3935 	mmput(mm);
3936 	return ret;
3937 
3938 }
3939 
3940 int kfd_criu_restore_svm(struct kfd_process *p,
3941 			 uint8_t __user *user_priv_ptr,
3942 			 uint64_t *priv_data_offset,
3943 			 uint64_t max_priv_data_size)
3944 {
3945 	uint64_t svm_priv_data_size, svm_object_md_size, svm_attrs_size;
3946 	int nattr_common = 4, nattr_accessibility = 1;
3947 	struct criu_svm_metadata *criu_svm_md = NULL;
3948 	struct svm_range_list *svms = &p->svms;
3949 	uint32_t num_devices;
3950 	int ret = 0;
3951 
3952 	num_devices = p->n_pdds;
3953 	/* Handle one SVM range object at a time, also the number of gpus are
3954 	 * assumed to be same on the restore node, checking must be done while
3955 	 * evaluating the topology earlier
3956 	 */
3957 
3958 	svm_attrs_size = sizeof(struct kfd_ioctl_svm_attribute) *
3959 		(nattr_common + nattr_accessibility * num_devices);
3960 	svm_object_md_size = sizeof(struct criu_svm_metadata) + svm_attrs_size;
3961 
3962 	svm_priv_data_size = sizeof(struct kfd_criu_svm_range_priv_data) +
3963 								svm_attrs_size;
3964 
3965 	criu_svm_md = kzalloc(svm_object_md_size, GFP_KERNEL);
3966 	if (!criu_svm_md) {
3967 		pr_err("failed to allocate memory to store svm metadata\n");
3968 		return -ENOMEM;
3969 	}
3970 	if (*priv_data_offset + svm_priv_data_size > max_priv_data_size) {
3971 		ret = -EINVAL;
3972 		goto exit;
3973 	}
3974 
3975 	ret = copy_from_user(&criu_svm_md->data, user_priv_ptr + *priv_data_offset,
3976 			     svm_priv_data_size);
3977 	if (ret) {
3978 		ret = -EFAULT;
3979 		goto exit;
3980 	}
3981 	*priv_data_offset += svm_priv_data_size;
3982 
3983 	list_add_tail(&criu_svm_md->list, &svms->criu_svm_metadata_list);
3984 
3985 	return 0;
3986 
3987 
3988 exit:
3989 	kfree(criu_svm_md);
3990 	return ret;
3991 }
3992 
3993 int svm_range_get_info(struct kfd_process *p, uint32_t *num_svm_ranges,
3994 		       uint64_t *svm_priv_data_size)
3995 {
3996 	uint64_t total_size, accessibility_size, common_attr_size;
3997 	int nattr_common = 4, nattr_accessibility = 1;
3998 	int num_devices = p->n_pdds;
3999 	struct svm_range_list *svms;
4000 	struct svm_range *prange;
4001 	uint32_t count = 0;
4002 
4003 	*svm_priv_data_size = 0;
4004 
4005 	svms = &p->svms;
4006 	if (!svms)
4007 		return -EINVAL;
4008 
4009 	mutex_lock(&svms->lock);
4010 	list_for_each_entry(prange, &svms->list, list) {
4011 		pr_debug("prange: 0x%p start: 0x%lx\t npages: 0x%llx\t end: 0x%llx\n",
4012 			 prange, prange->start, prange->npages,
4013 			 prange->start + prange->npages - 1);
4014 		count++;
4015 	}
4016 	mutex_unlock(&svms->lock);
4017 
4018 	*num_svm_ranges = count;
4019 	/* Only the accessbility attributes need to be queried for all the gpus
4020 	 * individually, remaining ones are spanned across the entire process
4021 	 * regardless of the various gpu nodes. Of the remaining attributes,
4022 	 * KFD_IOCTL_SVM_ATTR_CLR_FLAGS need not be saved.
4023 	 *
4024 	 * KFD_IOCTL_SVM_ATTR_PREFERRED_LOC
4025 	 * KFD_IOCTL_SVM_ATTR_PREFETCH_LOC
4026 	 * KFD_IOCTL_SVM_ATTR_SET_FLAGS
4027 	 * KFD_IOCTL_SVM_ATTR_GRANULARITY
4028 	 *
4029 	 * ** ACCESSBILITY ATTRIBUTES **
4030 	 * (Considered as one, type is altered during query, value is gpuid)
4031 	 * KFD_IOCTL_SVM_ATTR_ACCESS
4032 	 * KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE
4033 	 * KFD_IOCTL_SVM_ATTR_NO_ACCESS
4034 	 */
4035 	if (*num_svm_ranges > 0) {
4036 		common_attr_size = sizeof(struct kfd_ioctl_svm_attribute) *
4037 			nattr_common;
4038 		accessibility_size = sizeof(struct kfd_ioctl_svm_attribute) *
4039 			nattr_accessibility * num_devices;
4040 
4041 		total_size = sizeof(struct kfd_criu_svm_range_priv_data) +
4042 			common_attr_size + accessibility_size;
4043 
4044 		*svm_priv_data_size = *num_svm_ranges * total_size;
4045 	}
4046 
4047 	pr_debug("num_svm_ranges %u total_priv_size %llu\n", *num_svm_ranges,
4048 		 *svm_priv_data_size);
4049 	return 0;
4050 }
4051 
4052 int kfd_criu_checkpoint_svm(struct kfd_process *p,
4053 			    uint8_t __user *user_priv_data,
4054 			    uint64_t *priv_data_offset)
4055 {
4056 	struct kfd_criu_svm_range_priv_data *svm_priv = NULL;
4057 	struct kfd_ioctl_svm_attribute *query_attr = NULL;
4058 	uint64_t svm_priv_data_size, query_attr_size = 0;
4059 	int index, nattr_common = 4, ret = 0;
4060 	struct svm_range_list *svms;
4061 	int num_devices = p->n_pdds;
4062 	struct svm_range *prange;
4063 	struct mm_struct *mm;
4064 
4065 	svms = &p->svms;
4066 	if (!svms)
4067 		return -EINVAL;
4068 
4069 	mm = get_task_mm(p->lead_thread);
4070 	if (!mm) {
4071 		pr_err("failed to get mm for the target process\n");
4072 		return -ESRCH;
4073 	}
4074 
4075 	query_attr_size = sizeof(struct kfd_ioctl_svm_attribute) *
4076 				(nattr_common + num_devices);
4077 
4078 	query_attr = kzalloc(query_attr_size, GFP_KERNEL);
4079 	if (!query_attr) {
4080 		ret = -ENOMEM;
4081 		goto exit;
4082 	}
4083 
4084 	query_attr[0].type = KFD_IOCTL_SVM_ATTR_PREFERRED_LOC;
4085 	query_attr[1].type = KFD_IOCTL_SVM_ATTR_PREFETCH_LOC;
4086 	query_attr[2].type = KFD_IOCTL_SVM_ATTR_SET_FLAGS;
4087 	query_attr[3].type = KFD_IOCTL_SVM_ATTR_GRANULARITY;
4088 
4089 	for (index = 0; index < num_devices; index++) {
4090 		struct kfd_process_device *pdd = p->pdds[index];
4091 
4092 		query_attr[index + nattr_common].type =
4093 			KFD_IOCTL_SVM_ATTR_ACCESS;
4094 		query_attr[index + nattr_common].value = pdd->user_gpu_id;
4095 	}
4096 
4097 	svm_priv_data_size = sizeof(*svm_priv) + query_attr_size;
4098 
4099 	svm_priv = kzalloc(svm_priv_data_size, GFP_KERNEL);
4100 	if (!svm_priv) {
4101 		ret = -ENOMEM;
4102 		goto exit_query;
4103 	}
4104 
4105 	index = 0;
4106 	list_for_each_entry(prange, &svms->list, list) {
4107 
4108 		svm_priv->object_type = KFD_CRIU_OBJECT_TYPE_SVM_RANGE;
4109 		svm_priv->start_addr = prange->start;
4110 		svm_priv->size = prange->npages;
4111 		memcpy(&svm_priv->attrs, query_attr, query_attr_size);
4112 		pr_debug("CRIU: prange: 0x%p start: 0x%lx\t npages: 0x%llx end: 0x%llx\t size: 0x%llx\n",
4113 			 prange, prange->start, prange->npages,
4114 			 prange->start + prange->npages - 1,
4115 			 prange->npages * PAGE_SIZE);
4116 
4117 		ret = svm_range_get_attr(p, mm, svm_priv->start_addr,
4118 					 svm_priv->size,
4119 					 (nattr_common + num_devices),
4120 					 svm_priv->attrs);
4121 		if (ret) {
4122 			pr_err("CRIU: failed to obtain range attributes\n");
4123 			goto exit_priv;
4124 		}
4125 
4126 		if (copy_to_user(user_priv_data + *priv_data_offset, svm_priv,
4127 				 svm_priv_data_size)) {
4128 			pr_err("Failed to copy svm priv to user\n");
4129 			ret = -EFAULT;
4130 			goto exit_priv;
4131 		}
4132 
4133 		*priv_data_offset += svm_priv_data_size;
4134 
4135 	}
4136 
4137 
4138 exit_priv:
4139 	kfree(svm_priv);
4140 exit_query:
4141 	kfree(query_attr);
4142 exit:
4143 	mmput(mm);
4144 	return ret;
4145 }
4146 
4147 int
4148 svm_ioctl(struct kfd_process *p, enum kfd_ioctl_svm_op op, uint64_t start,
4149 	  uint64_t size, uint32_t nattrs, struct kfd_ioctl_svm_attribute *attrs)
4150 {
4151 	struct mm_struct *mm = current->mm;
4152 	int r;
4153 
4154 	start >>= PAGE_SHIFT;
4155 	size >>= PAGE_SHIFT;
4156 
4157 	switch (op) {
4158 	case KFD_IOCTL_SVM_OP_SET_ATTR:
4159 		r = svm_range_set_attr(p, mm, start, size, nattrs, attrs);
4160 		break;
4161 	case KFD_IOCTL_SVM_OP_GET_ATTR:
4162 		r = svm_range_get_attr(p, mm, start, size, nattrs, attrs);
4163 		break;
4164 	default:
4165 		r = EINVAL;
4166 		break;
4167 	}
4168 
4169 	return r;
4170 }
4171