xref: /linux/drivers/gpu/drm/amd/amdkfd/kfd_svm.c (revision 8b971ce0cbc71a10f1d19d2bb6f3dc5c6f07d9d9)
1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /*
3  * Copyright 2020-2021 Advanced Micro Devices, Inc.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21  * OTHER DEALINGS IN THE SOFTWARE.
22  */
23 
24 #include <linux/types.h>
25 #include <linux/sched/task.h>
26 #include <linux/dynamic_debug.h>
27 #include <drm/ttm/ttm_tt.h>
28 #include <drm/drm_exec.h>
29 
30 #include "amdgpu_sync.h"
31 #include "amdgpu_object.h"
32 #include "amdgpu_vm.h"
33 #include "amdgpu_hmm.h"
34 #include "amdgpu.h"
35 #include "amdgpu_xgmi.h"
36 #include "kfd_priv.h"
37 #include "kfd_svm.h"
38 #include "kfd_migrate.h"
39 #include "kfd_smi_events.h"
40 
41 #ifdef dev_fmt
42 #undef dev_fmt
43 #endif
44 #define dev_fmt(fmt) "kfd_svm: %s: " fmt, __func__
45 
46 #define AMDGPU_SVM_RANGE_RESTORE_DELAY_MS 1
47 
48 /* Long enough to ensure no retry fault comes after svm range is restored and
49  * page table is updated.
50  */
51 #define AMDGPU_SVM_RANGE_RETRY_FAULT_PENDING	(2UL * NSEC_PER_MSEC)
52 #if IS_ENABLED(CONFIG_DYNAMIC_DEBUG)
53 #define dynamic_svm_range_dump(svms) \
54 	_dynamic_func_call_no_desc("svm_range_dump", svm_range_debug_dump, svms)
55 #else
56 #define dynamic_svm_range_dump(svms) \
57 	do { if (0) svm_range_debug_dump(svms); } while (0)
58 #endif
59 
60 /* Giant svm range split into smaller ranges based on this, it is decided using
61  * minimum of all dGPU/APU 1/32 VRAM size, between 2MB to 1GB and alignment to
62  * power of 2MB.
63  */
64 static uint64_t max_svm_range_pages;
65 
66 struct criu_svm_metadata {
67 	struct list_head list;
68 	struct kfd_criu_svm_range_priv_data data;
69 };
70 
71 static void svm_range_evict_svm_bo_worker(struct work_struct *work);
72 static bool
73 svm_range_cpu_invalidate_pagetables(struct mmu_interval_notifier *mni,
74 				    const struct mmu_notifier_range *range,
75 				    unsigned long cur_seq);
76 static int
77 svm_range_check_vm(struct kfd_process *p, uint64_t start, uint64_t last,
78 		   uint64_t *bo_s, uint64_t *bo_l);
79 static const struct mmu_interval_notifier_ops svm_range_mn_ops = {
80 	.invalidate = svm_range_cpu_invalidate_pagetables,
81 };
82 
83 /**
84  * svm_range_unlink - unlink svm_range from lists and interval tree
85  * @prange: svm range structure to be removed
86  *
87  * Remove the svm_range from the svms and svm_bo lists and the svms
88  * interval tree.
89  *
90  * Context: The caller must hold svms->lock
91  */
92 static void svm_range_unlink(struct svm_range *prange)
93 {
94 	pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx]\n", prange->svms,
95 		 prange, prange->start, prange->last);
96 
97 	if (prange->svm_bo) {
98 		spin_lock(&prange->svm_bo->list_lock);
99 		list_del(&prange->svm_bo_list);
100 		spin_unlock(&prange->svm_bo->list_lock);
101 	}
102 
103 	list_del(&prange->list);
104 	if (prange->it_node.start != 0 && prange->it_node.last != 0)
105 		interval_tree_remove(&prange->it_node, &prange->svms->objects);
106 }
107 
108 static void
109 svm_range_add_notifier_locked(struct mm_struct *mm, struct svm_range *prange)
110 {
111 	pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx]\n", prange->svms,
112 		 prange, prange->start, prange->last);
113 
114 	mmu_interval_notifier_insert_locked(&prange->notifier, mm,
115 				     prange->start << PAGE_SHIFT,
116 				     prange->npages << PAGE_SHIFT,
117 				     &svm_range_mn_ops);
118 }
119 
120 /**
121  * svm_range_add_to_svms - add svm range to svms
122  * @prange: svm range structure to be added
123  *
124  * Add the svm range to svms interval tree and link list
125  *
126  * Context: The caller must hold svms->lock
127  */
128 static void svm_range_add_to_svms(struct svm_range *prange)
129 {
130 	pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx]\n", prange->svms,
131 		 prange, prange->start, prange->last);
132 
133 	list_move_tail(&prange->list, &prange->svms->list);
134 	prange->it_node.start = prange->start;
135 	prange->it_node.last = prange->last;
136 	interval_tree_insert(&prange->it_node, &prange->svms->objects);
137 }
138 
139 static void svm_range_remove_notifier(struct svm_range *prange)
140 {
141 	pr_debug("remove notifier svms 0x%p prange 0x%p [0x%lx 0x%lx]\n",
142 		 prange->svms, prange,
143 		 prange->notifier.interval_tree.start >> PAGE_SHIFT,
144 		 prange->notifier.interval_tree.last >> PAGE_SHIFT);
145 
146 	if (prange->notifier.interval_tree.start != 0 &&
147 	    prange->notifier.interval_tree.last != 0)
148 		mmu_interval_notifier_remove(&prange->notifier);
149 }
150 
151 static bool
152 svm_is_valid_dma_mapping_addr(struct device *dev, dma_addr_t dma_addr)
153 {
154 	return dma_addr && !dma_mapping_error(dev, dma_addr) &&
155 	       !(dma_addr & SVM_RANGE_VRAM_DOMAIN);
156 }
157 
158 static int
159 svm_range_dma_map_dev(struct amdgpu_device *adev, struct svm_range *prange,
160 		      unsigned long offset, unsigned long npages,
161 		      unsigned long *hmm_pfns, uint32_t gpuidx)
162 {
163 	enum dma_data_direction dir = DMA_BIDIRECTIONAL;
164 	dma_addr_t *addr = prange->dma_addr[gpuidx];
165 	struct device *dev = adev->dev;
166 	struct page *page;
167 	int i, r;
168 
169 	if (!addr) {
170 		addr = kvcalloc(prange->npages, sizeof(*addr), GFP_KERNEL);
171 		if (!addr)
172 			return -ENOMEM;
173 		prange->dma_addr[gpuidx] = addr;
174 	}
175 
176 	addr += offset;
177 	for (i = 0; i < npages; i++) {
178 		if (svm_is_valid_dma_mapping_addr(dev, addr[i]))
179 			dma_unmap_page(dev, addr[i], PAGE_SIZE, dir);
180 
181 		page = hmm_pfn_to_page(hmm_pfns[i]);
182 		if (is_zone_device_page(page)) {
183 			struct amdgpu_device *bo_adev = prange->svm_bo->node->adev;
184 
185 			addr[i] = (hmm_pfns[i] << PAGE_SHIFT) +
186 				   bo_adev->vm_manager.vram_base_offset -
187 				   bo_adev->kfd.pgmap.range.start;
188 			addr[i] |= SVM_RANGE_VRAM_DOMAIN;
189 			pr_debug_ratelimited("vram address: 0x%llx\n", addr[i]);
190 			continue;
191 		}
192 		addr[i] = dma_map_page(dev, page, 0, PAGE_SIZE, dir);
193 		r = dma_mapping_error(dev, addr[i]);
194 		if (r) {
195 			dev_err(dev, "failed %d dma_map_page\n", r);
196 			return r;
197 		}
198 		pr_debug_ratelimited("dma mapping 0x%llx for page addr 0x%lx\n",
199 				     addr[i] >> PAGE_SHIFT, page_to_pfn(page));
200 	}
201 
202 	return 0;
203 }
204 
205 static int
206 svm_range_dma_map(struct svm_range *prange, unsigned long *bitmap,
207 		  unsigned long offset, unsigned long npages,
208 		  unsigned long *hmm_pfns)
209 {
210 	struct kfd_process *p;
211 	uint32_t gpuidx;
212 	int r;
213 
214 	p = container_of(prange->svms, struct kfd_process, svms);
215 
216 	for_each_set_bit(gpuidx, bitmap, MAX_GPU_INSTANCE) {
217 		struct kfd_process_device *pdd;
218 
219 		pr_debug("mapping to gpu idx 0x%x\n", gpuidx);
220 		pdd = kfd_process_device_from_gpuidx(p, gpuidx);
221 		if (!pdd) {
222 			pr_debug("failed to find device idx %d\n", gpuidx);
223 			return -EINVAL;
224 		}
225 
226 		r = svm_range_dma_map_dev(pdd->dev->adev, prange, offset, npages,
227 					  hmm_pfns, gpuidx);
228 		if (r)
229 			break;
230 	}
231 
232 	return r;
233 }
234 
235 void svm_range_dma_unmap_dev(struct device *dev, dma_addr_t *dma_addr,
236 			 unsigned long offset, unsigned long npages)
237 {
238 	enum dma_data_direction dir = DMA_BIDIRECTIONAL;
239 	int i;
240 
241 	if (!dma_addr)
242 		return;
243 
244 	for (i = offset; i < offset + npages; i++) {
245 		if (!svm_is_valid_dma_mapping_addr(dev, dma_addr[i]))
246 			continue;
247 		pr_debug_ratelimited("unmap 0x%llx\n", dma_addr[i] >> PAGE_SHIFT);
248 		dma_unmap_page(dev, dma_addr[i], PAGE_SIZE, dir);
249 		dma_addr[i] = 0;
250 	}
251 }
252 
253 void svm_range_dma_unmap(struct svm_range *prange)
254 {
255 	struct kfd_process_device *pdd;
256 	dma_addr_t *dma_addr;
257 	struct device *dev;
258 	struct kfd_process *p;
259 	uint32_t gpuidx;
260 
261 	p = container_of(prange->svms, struct kfd_process, svms);
262 
263 	for (gpuidx = 0; gpuidx < MAX_GPU_INSTANCE; gpuidx++) {
264 		dma_addr = prange->dma_addr[gpuidx];
265 		if (!dma_addr)
266 			continue;
267 
268 		pdd = kfd_process_device_from_gpuidx(p, gpuidx);
269 		if (!pdd) {
270 			pr_debug("failed to find device idx %d\n", gpuidx);
271 			continue;
272 		}
273 		dev = &pdd->dev->adev->pdev->dev;
274 
275 		svm_range_dma_unmap_dev(dev, dma_addr, 0, prange->npages);
276 	}
277 }
278 
279 static void svm_range_free(struct svm_range *prange, bool do_unmap)
280 {
281 	uint64_t size = (prange->last - prange->start + 1) << PAGE_SHIFT;
282 	struct kfd_process *p = container_of(prange->svms, struct kfd_process, svms);
283 	uint32_t gpuidx;
284 
285 	pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx]\n", prange->svms, prange,
286 		 prange->start, prange->last);
287 
288 	svm_range_vram_node_free(prange);
289 	if (do_unmap)
290 		svm_range_dma_unmap(prange);
291 
292 	if (do_unmap && !p->xnack_enabled) {
293 		pr_debug("unreserve prange 0x%p size: 0x%llx\n", prange, size);
294 		amdgpu_amdkfd_unreserve_mem_limit(NULL, size,
295 					KFD_IOC_ALLOC_MEM_FLAGS_USERPTR, 0);
296 	}
297 
298 	/* free dma_addr array for each gpu */
299 	for (gpuidx = 0; gpuidx < MAX_GPU_INSTANCE; gpuidx++) {
300 		if (prange->dma_addr[gpuidx]) {
301 			kvfree(prange->dma_addr[gpuidx]);
302 			prange->dma_addr[gpuidx] = NULL;
303 		}
304 	}
305 
306 	mutex_destroy(&prange->lock);
307 	mutex_destroy(&prange->migrate_mutex);
308 	kfree(prange);
309 }
310 
311 static void
312 svm_range_set_default_attributes(struct svm_range_list *svms, int32_t *location,
313 				 int32_t *prefetch_loc, uint8_t *granularity,
314 				 uint32_t *flags)
315 {
316 	*location = KFD_IOCTL_SVM_LOCATION_UNDEFINED;
317 	*prefetch_loc = KFD_IOCTL_SVM_LOCATION_UNDEFINED;
318 	*granularity = svms->default_granularity;
319 	*flags =
320 		KFD_IOCTL_SVM_FLAG_HOST_ACCESS | KFD_IOCTL_SVM_FLAG_COHERENT;
321 }
322 
323 static struct
324 svm_range *svm_range_new(struct svm_range_list *svms, uint64_t start,
325 			 uint64_t last, bool update_mem_usage)
326 {
327 	uint64_t size = last - start + 1;
328 	struct svm_range *prange;
329 	struct kfd_process *p;
330 
331 	prange = kzalloc(sizeof(*prange), GFP_KERNEL);
332 	if (!prange)
333 		return NULL;
334 
335 	p = container_of(svms, struct kfd_process, svms);
336 	if (!p->xnack_enabled && update_mem_usage &&
337 	    amdgpu_amdkfd_reserve_mem_limit(NULL, size << PAGE_SHIFT,
338 				    KFD_IOC_ALLOC_MEM_FLAGS_USERPTR, 0)) {
339 		pr_info("SVM mapping failed, exceeds resident system memory limit\n");
340 		kfree(prange);
341 		return NULL;
342 	}
343 	prange->npages = size;
344 	prange->svms = svms;
345 	prange->start = start;
346 	prange->last = last;
347 	INIT_LIST_HEAD(&prange->list);
348 	INIT_LIST_HEAD(&prange->update_list);
349 	INIT_LIST_HEAD(&prange->svm_bo_list);
350 	INIT_LIST_HEAD(&prange->deferred_list);
351 	INIT_LIST_HEAD(&prange->child_list);
352 	atomic_set(&prange->invalid, 0);
353 	prange->validate_timestamp = 0;
354 	prange->vram_pages = 0;
355 	mutex_init(&prange->migrate_mutex);
356 	mutex_init(&prange->lock);
357 
358 	if (p->xnack_enabled)
359 		bitmap_copy(prange->bitmap_access, svms->bitmap_supported,
360 			    MAX_GPU_INSTANCE);
361 
362 	svm_range_set_default_attributes(svms, &prange->preferred_loc,
363 					 &prange->prefetch_loc,
364 					 &prange->granularity, &prange->flags);
365 
366 	pr_debug("svms 0x%p [0x%llx 0x%llx]\n", svms, start, last);
367 
368 	return prange;
369 }
370 
371 static bool svm_bo_ref_unless_zero(struct svm_range_bo *svm_bo)
372 {
373 	if (!svm_bo || !kref_get_unless_zero(&svm_bo->kref))
374 		return false;
375 
376 	return true;
377 }
378 
379 static void svm_range_bo_release(struct kref *kref)
380 {
381 	struct svm_range_bo *svm_bo;
382 
383 	svm_bo = container_of(kref, struct svm_range_bo, kref);
384 	pr_debug("svm_bo 0x%p\n", svm_bo);
385 
386 	spin_lock(&svm_bo->list_lock);
387 	while (!list_empty(&svm_bo->range_list)) {
388 		struct svm_range *prange =
389 				list_first_entry(&svm_bo->range_list,
390 						struct svm_range, svm_bo_list);
391 		/* list_del_init tells a concurrent svm_range_vram_node_new when
392 		 * it's safe to reuse the svm_bo pointer and svm_bo_list head.
393 		 */
394 		list_del_init(&prange->svm_bo_list);
395 		spin_unlock(&svm_bo->list_lock);
396 
397 		pr_debug("svms 0x%p [0x%lx 0x%lx]\n", prange->svms,
398 			 prange->start, prange->last);
399 		mutex_lock(&prange->lock);
400 		prange->svm_bo = NULL;
401 		/* prange should not hold vram page now */
402 		WARN_ONCE(prange->actual_loc, "prange should not hold vram page");
403 		mutex_unlock(&prange->lock);
404 
405 		spin_lock(&svm_bo->list_lock);
406 	}
407 	spin_unlock(&svm_bo->list_lock);
408 
409 	if (mmget_not_zero(svm_bo->eviction_fence->mm)) {
410 		struct kfd_process_device *pdd;
411 		struct kfd_process *p;
412 		struct mm_struct *mm;
413 
414 		mm = svm_bo->eviction_fence->mm;
415 		/*
416 		 * The forked child process takes svm_bo device pages ref, svm_bo could be
417 		 * released after parent process is gone.
418 		 */
419 		p = kfd_lookup_process_by_mm(mm);
420 		if (p) {
421 			pdd = kfd_get_process_device_data(svm_bo->node, p);
422 			if (pdd)
423 				atomic64_sub(amdgpu_bo_size(svm_bo->bo), &pdd->vram_usage);
424 			kfd_unref_process(p);
425 		}
426 		mmput(mm);
427 	}
428 
429 	if (!dma_fence_is_signaled(&svm_bo->eviction_fence->base))
430 		/* We're not in the eviction worker. Signal the fence. */
431 		dma_fence_signal(&svm_bo->eviction_fence->base);
432 	dma_fence_put(&svm_bo->eviction_fence->base);
433 	amdgpu_bo_unref(&svm_bo->bo);
434 	kfree(svm_bo);
435 }
436 
437 static void svm_range_bo_wq_release(struct work_struct *work)
438 {
439 	struct svm_range_bo *svm_bo;
440 
441 	svm_bo = container_of(work, struct svm_range_bo, release_work);
442 	svm_range_bo_release(&svm_bo->kref);
443 }
444 
445 static void svm_range_bo_release_async(struct kref *kref)
446 {
447 	struct svm_range_bo *svm_bo;
448 
449 	svm_bo = container_of(kref, struct svm_range_bo, kref);
450 	pr_debug("svm_bo 0x%p\n", svm_bo);
451 	INIT_WORK(&svm_bo->release_work, svm_range_bo_wq_release);
452 	schedule_work(&svm_bo->release_work);
453 }
454 
455 void svm_range_bo_unref_async(struct svm_range_bo *svm_bo)
456 {
457 	kref_put(&svm_bo->kref, svm_range_bo_release_async);
458 }
459 
460 static void svm_range_bo_unref(struct svm_range_bo *svm_bo)
461 {
462 	if (svm_bo)
463 		kref_put(&svm_bo->kref, svm_range_bo_release);
464 }
465 
466 static bool
467 svm_range_validate_svm_bo(struct kfd_node *node, struct svm_range *prange)
468 {
469 	mutex_lock(&prange->lock);
470 	if (!prange->svm_bo) {
471 		mutex_unlock(&prange->lock);
472 		return false;
473 	}
474 	if (prange->ttm_res) {
475 		/* We still have a reference, all is well */
476 		mutex_unlock(&prange->lock);
477 		return true;
478 	}
479 	if (svm_bo_ref_unless_zero(prange->svm_bo)) {
480 		/*
481 		 * Migrate from GPU to GPU, remove range from source svm_bo->node
482 		 * range list, and return false to allocate svm_bo from destination
483 		 * node.
484 		 */
485 		if (prange->svm_bo->node != node) {
486 			mutex_unlock(&prange->lock);
487 
488 			spin_lock(&prange->svm_bo->list_lock);
489 			list_del_init(&prange->svm_bo_list);
490 			spin_unlock(&prange->svm_bo->list_lock);
491 
492 			svm_range_bo_unref(prange->svm_bo);
493 			return false;
494 		}
495 		if (READ_ONCE(prange->svm_bo->evicting)) {
496 			struct dma_fence *f;
497 			struct svm_range_bo *svm_bo;
498 			/* The BO is getting evicted,
499 			 * we need to get a new one
500 			 */
501 			mutex_unlock(&prange->lock);
502 			svm_bo = prange->svm_bo;
503 			f = dma_fence_get(&svm_bo->eviction_fence->base);
504 			svm_range_bo_unref(prange->svm_bo);
505 			/* wait for the fence to avoid long spin-loop
506 			 * at list_empty_careful
507 			 */
508 			dma_fence_wait(f, false);
509 			dma_fence_put(f);
510 		} else {
511 			/* The BO was still around and we got
512 			 * a new reference to it
513 			 */
514 			mutex_unlock(&prange->lock);
515 			pr_debug("reuse old bo svms 0x%p [0x%lx 0x%lx]\n",
516 				 prange->svms, prange->start, prange->last);
517 
518 			prange->ttm_res = prange->svm_bo->bo->tbo.resource;
519 			return true;
520 		}
521 
522 	} else {
523 		mutex_unlock(&prange->lock);
524 	}
525 
526 	/* We need a new svm_bo. Spin-loop to wait for concurrent
527 	 * svm_range_bo_release to finish removing this range from
528 	 * its range list and set prange->svm_bo to null. After this,
529 	 * it is safe to reuse the svm_bo pointer and svm_bo_list head.
530 	 */
531 	while (!list_empty_careful(&prange->svm_bo_list) || prange->svm_bo)
532 		cond_resched();
533 
534 	return false;
535 }
536 
537 static struct svm_range_bo *svm_range_bo_new(void)
538 {
539 	struct svm_range_bo *svm_bo;
540 
541 	svm_bo = kzalloc(sizeof(*svm_bo), GFP_KERNEL);
542 	if (!svm_bo)
543 		return NULL;
544 
545 	kref_init(&svm_bo->kref);
546 	INIT_LIST_HEAD(&svm_bo->range_list);
547 	spin_lock_init(&svm_bo->list_lock);
548 
549 	return svm_bo;
550 }
551 
552 int
553 svm_range_vram_node_new(struct kfd_node *node, struct svm_range *prange,
554 			bool clear)
555 {
556 	struct kfd_process_device *pdd;
557 	struct amdgpu_bo_param bp;
558 	struct svm_range_bo *svm_bo;
559 	struct amdgpu_bo_user *ubo;
560 	struct amdgpu_bo *bo;
561 	struct kfd_process *p;
562 	struct mm_struct *mm;
563 	int r;
564 
565 	p = container_of(prange->svms, struct kfd_process, svms);
566 	pr_debug("process pid: %d svms 0x%p [0x%lx 0x%lx]\n",
567 		 p->lead_thread->pid, prange->svms,
568 		 prange->start, prange->last);
569 
570 	if (svm_range_validate_svm_bo(node, prange))
571 		return 0;
572 
573 	svm_bo = svm_range_bo_new();
574 	if (!svm_bo) {
575 		pr_debug("failed to alloc svm bo\n");
576 		return -ENOMEM;
577 	}
578 	mm = get_task_mm(p->lead_thread);
579 	if (!mm) {
580 		pr_debug("failed to get mm\n");
581 		kfree(svm_bo);
582 		return -ESRCH;
583 	}
584 	svm_bo->node = node;
585 	svm_bo->eviction_fence =
586 		amdgpu_amdkfd_fence_create(dma_fence_context_alloc(1),
587 					   mm,
588 					   svm_bo, p->context_id);
589 	mmput(mm);
590 	INIT_WORK(&svm_bo->eviction_work, svm_range_evict_svm_bo_worker);
591 	svm_bo->evicting = 0;
592 	memset(&bp, 0, sizeof(bp));
593 	bp.size = prange->npages * PAGE_SIZE;
594 	bp.byte_align = PAGE_SIZE;
595 	bp.domain = AMDGPU_GEM_DOMAIN_VRAM;
596 	bp.flags = AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
597 	bp.flags |= clear ? AMDGPU_GEM_CREATE_VRAM_CLEARED : 0;
598 	bp.flags |= AMDGPU_GEM_CREATE_DISCARDABLE;
599 	bp.type = ttm_bo_type_device;
600 	bp.resv = NULL;
601 	if (node->xcp)
602 		bp.xcp_id_plus1 = node->xcp->id + 1;
603 
604 	r = amdgpu_bo_create_user(node->adev, &bp, &ubo);
605 	if (r) {
606 		pr_debug("failed %d to create bo\n", r);
607 		goto create_bo_failed;
608 	}
609 	bo = &ubo->bo;
610 
611 	pr_debug("alloc bo at offset 0x%lx size 0x%lx on partition %d\n",
612 		 bo->tbo.resource->start << PAGE_SHIFT, bp.size,
613 		 bp.xcp_id_plus1 - 1);
614 
615 	r = amdgpu_bo_reserve(bo, true);
616 	if (r) {
617 		pr_debug("failed %d to reserve bo\n", r);
618 		goto reserve_bo_failed;
619 	}
620 
621 	if (clear) {
622 		r = amdgpu_bo_sync_wait(bo, AMDGPU_FENCE_OWNER_KFD, false);
623 		if (r) {
624 			pr_debug("failed %d to sync bo\n", r);
625 			amdgpu_bo_unreserve(bo);
626 			goto reserve_bo_failed;
627 		}
628 	}
629 
630 	r = dma_resv_reserve_fences(bo->tbo.base.resv, 1);
631 	if (r) {
632 		pr_debug("failed %d to reserve bo\n", r);
633 		amdgpu_bo_unreserve(bo);
634 		goto reserve_bo_failed;
635 	}
636 	amdgpu_bo_fence(bo, &svm_bo->eviction_fence->base, true);
637 
638 	amdgpu_bo_unreserve(bo);
639 
640 	svm_bo->bo = bo;
641 	prange->svm_bo = svm_bo;
642 	prange->ttm_res = bo->tbo.resource;
643 	prange->offset = 0;
644 
645 	spin_lock(&svm_bo->list_lock);
646 	list_add(&prange->svm_bo_list, &svm_bo->range_list);
647 	spin_unlock(&svm_bo->list_lock);
648 
649 	pdd = svm_range_get_pdd_by_node(prange, node);
650 	if (pdd)
651 		atomic64_add(amdgpu_bo_size(bo), &pdd->vram_usage);
652 
653 	return 0;
654 
655 reserve_bo_failed:
656 	amdgpu_bo_unref(&bo);
657 create_bo_failed:
658 	dma_fence_put(&svm_bo->eviction_fence->base);
659 	kfree(svm_bo);
660 	prange->ttm_res = NULL;
661 
662 	return r;
663 }
664 
665 void svm_range_vram_node_free(struct svm_range *prange)
666 {
667 	/* serialize prange->svm_bo unref */
668 	mutex_lock(&prange->lock);
669 	/* prange->svm_bo has not been unref */
670 	if (prange->ttm_res) {
671 		prange->ttm_res = NULL;
672 		mutex_unlock(&prange->lock);
673 		svm_range_bo_unref(prange->svm_bo);
674 	} else
675 		mutex_unlock(&prange->lock);
676 }
677 
678 struct kfd_node *
679 svm_range_get_node_by_id(struct svm_range *prange, uint32_t gpu_id)
680 {
681 	struct kfd_process *p;
682 	struct kfd_process_device *pdd;
683 
684 	p = container_of(prange->svms, struct kfd_process, svms);
685 	pdd = kfd_process_device_data_by_id(p, gpu_id);
686 	if (!pdd) {
687 		pr_debug("failed to get kfd process device by id 0x%x\n", gpu_id);
688 		return NULL;
689 	}
690 
691 	return pdd->dev;
692 }
693 
694 struct kfd_process_device *
695 svm_range_get_pdd_by_node(struct svm_range *prange, struct kfd_node *node)
696 {
697 	struct kfd_process *p;
698 
699 	p = container_of(prange->svms, struct kfd_process, svms);
700 
701 	return kfd_get_process_device_data(node, p);
702 }
703 
704 static int svm_range_bo_validate(void *param, struct amdgpu_bo *bo)
705 {
706 	struct ttm_operation_ctx ctx = { false, false };
707 
708 	amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM);
709 
710 	return ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
711 }
712 
713 static int
714 svm_range_check_attr(struct kfd_process *p,
715 		     uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs)
716 {
717 	uint32_t i;
718 
719 	for (i = 0; i < nattr; i++) {
720 		uint32_t val = attrs[i].value;
721 		int gpuidx = MAX_GPU_INSTANCE;
722 
723 		switch (attrs[i].type) {
724 		case KFD_IOCTL_SVM_ATTR_PREFERRED_LOC:
725 			if (val != KFD_IOCTL_SVM_LOCATION_SYSMEM &&
726 			    val != KFD_IOCTL_SVM_LOCATION_UNDEFINED)
727 				gpuidx = kfd_process_gpuidx_from_gpuid(p, val);
728 			break;
729 		case KFD_IOCTL_SVM_ATTR_PREFETCH_LOC:
730 			if (val != KFD_IOCTL_SVM_LOCATION_SYSMEM)
731 				gpuidx = kfd_process_gpuidx_from_gpuid(p, val);
732 			break;
733 		case KFD_IOCTL_SVM_ATTR_ACCESS:
734 		case KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE:
735 		case KFD_IOCTL_SVM_ATTR_NO_ACCESS:
736 			gpuidx = kfd_process_gpuidx_from_gpuid(p, val);
737 			break;
738 		case KFD_IOCTL_SVM_ATTR_SET_FLAGS:
739 			break;
740 		case KFD_IOCTL_SVM_ATTR_CLR_FLAGS:
741 			break;
742 		case KFD_IOCTL_SVM_ATTR_GRANULARITY:
743 			break;
744 		default:
745 			pr_debug("unknown attr type 0x%x\n", attrs[i].type);
746 			return -EINVAL;
747 		}
748 
749 		if (gpuidx < 0) {
750 			pr_debug("no GPU 0x%x found\n", val);
751 			return -EINVAL;
752 		} else if (gpuidx < MAX_GPU_INSTANCE &&
753 			   !test_bit(gpuidx, p->svms.bitmap_supported)) {
754 			pr_debug("GPU 0x%x not supported\n", val);
755 			return -EINVAL;
756 		}
757 	}
758 
759 	return 0;
760 }
761 
762 static void
763 svm_range_apply_attrs(struct kfd_process *p, struct svm_range *prange,
764 		      uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs,
765 		      bool *update_mapping)
766 {
767 	uint32_t i;
768 	int gpuidx;
769 
770 	for (i = 0; i < nattr; i++) {
771 		switch (attrs[i].type) {
772 		case KFD_IOCTL_SVM_ATTR_PREFERRED_LOC:
773 			prange->preferred_loc = attrs[i].value;
774 			break;
775 		case KFD_IOCTL_SVM_ATTR_PREFETCH_LOC:
776 			prange->prefetch_loc = attrs[i].value;
777 			break;
778 		case KFD_IOCTL_SVM_ATTR_ACCESS:
779 		case KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE:
780 		case KFD_IOCTL_SVM_ATTR_NO_ACCESS:
781 			if (!p->xnack_enabled)
782 				*update_mapping = true;
783 
784 			gpuidx = kfd_process_gpuidx_from_gpuid(p,
785 							       attrs[i].value);
786 			if (attrs[i].type == KFD_IOCTL_SVM_ATTR_NO_ACCESS) {
787 				bitmap_clear(prange->bitmap_access, gpuidx, 1);
788 				bitmap_clear(prange->bitmap_aip, gpuidx, 1);
789 			} else if (attrs[i].type == KFD_IOCTL_SVM_ATTR_ACCESS) {
790 				bitmap_set(prange->bitmap_access, gpuidx, 1);
791 				bitmap_clear(prange->bitmap_aip, gpuidx, 1);
792 			} else {
793 				bitmap_clear(prange->bitmap_access, gpuidx, 1);
794 				bitmap_set(prange->bitmap_aip, gpuidx, 1);
795 			}
796 			break;
797 		case KFD_IOCTL_SVM_ATTR_SET_FLAGS:
798 			*update_mapping = true;
799 			prange->flags |= attrs[i].value;
800 			break;
801 		case KFD_IOCTL_SVM_ATTR_CLR_FLAGS:
802 			*update_mapping = true;
803 			prange->flags &= ~attrs[i].value;
804 			break;
805 		case KFD_IOCTL_SVM_ATTR_GRANULARITY:
806 			prange->granularity = min_t(uint32_t, attrs[i].value, 0x3F);
807 			break;
808 		default:
809 			WARN_ONCE(1, "svm_range_check_attrs wasn't called?");
810 		}
811 	}
812 }
813 
814 static bool
815 svm_range_is_same_attrs(struct kfd_process *p, struct svm_range *prange,
816 			uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs)
817 {
818 	uint32_t i;
819 	int gpuidx;
820 
821 	for (i = 0; i < nattr; i++) {
822 		switch (attrs[i].type) {
823 		case KFD_IOCTL_SVM_ATTR_PREFERRED_LOC:
824 			if (prange->preferred_loc != attrs[i].value)
825 				return false;
826 			break;
827 		case KFD_IOCTL_SVM_ATTR_PREFETCH_LOC:
828 			/* Prefetch should always trigger a migration even
829 			 * if the value of the attribute didn't change.
830 			 */
831 			return false;
832 		case KFD_IOCTL_SVM_ATTR_ACCESS:
833 		case KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE:
834 		case KFD_IOCTL_SVM_ATTR_NO_ACCESS:
835 			gpuidx = kfd_process_gpuidx_from_gpuid(p,
836 							       attrs[i].value);
837 			if (attrs[i].type == KFD_IOCTL_SVM_ATTR_NO_ACCESS) {
838 				if (test_bit(gpuidx, prange->bitmap_access) ||
839 				    test_bit(gpuidx, prange->bitmap_aip))
840 					return false;
841 			} else if (attrs[i].type == KFD_IOCTL_SVM_ATTR_ACCESS) {
842 				if (!test_bit(gpuidx, prange->bitmap_access))
843 					return false;
844 			} else {
845 				if (!test_bit(gpuidx, prange->bitmap_aip))
846 					return false;
847 			}
848 			break;
849 		case KFD_IOCTL_SVM_ATTR_SET_FLAGS:
850 			if ((prange->flags & attrs[i].value) != attrs[i].value)
851 				return false;
852 			break;
853 		case KFD_IOCTL_SVM_ATTR_CLR_FLAGS:
854 			if ((prange->flags & attrs[i].value) != 0)
855 				return false;
856 			break;
857 		case KFD_IOCTL_SVM_ATTR_GRANULARITY:
858 			if (prange->granularity != attrs[i].value)
859 				return false;
860 			break;
861 		default:
862 			WARN_ONCE(1, "svm_range_check_attrs wasn't called?");
863 		}
864 	}
865 
866 	return true;
867 }
868 
869 /**
870  * svm_range_debug_dump - print all range information from svms
871  * @svms: svm range list header
872  *
873  * debug output svm range start, end, prefetch location from svms
874  * interval tree and link list
875  *
876  * Context: The caller must hold svms->lock
877  */
878 static void svm_range_debug_dump(struct svm_range_list *svms)
879 {
880 	struct interval_tree_node *node;
881 	struct svm_range *prange;
882 
883 	pr_debug("dump svms 0x%p list\n", svms);
884 	pr_debug("range\tstart\tpage\tend\t\tlocation\n");
885 
886 	list_for_each_entry(prange, &svms->list, list) {
887 		pr_debug("0x%p 0x%lx\t0x%llx\t0x%llx\t0x%x\n",
888 			 prange, prange->start, prange->npages,
889 			 prange->start + prange->npages - 1,
890 			 prange->actual_loc);
891 	}
892 
893 	pr_debug("dump svms 0x%p interval tree\n", svms);
894 	pr_debug("range\tstart\tpage\tend\t\tlocation\n");
895 	node = interval_tree_iter_first(&svms->objects, 0, ~0ULL);
896 	while (node) {
897 		prange = container_of(node, struct svm_range, it_node);
898 		pr_debug("0x%p 0x%lx\t0x%llx\t0x%llx\t0x%x\n",
899 			 prange, prange->start, prange->npages,
900 			 prange->start + prange->npages - 1,
901 			 prange->actual_loc);
902 		node = interval_tree_iter_next(node, 0, ~0ULL);
903 	}
904 }
905 
906 static void *
907 svm_range_copy_array(void *psrc, size_t size, uint64_t num_elements,
908 		     uint64_t offset, uint64_t *vram_pages)
909 {
910 	unsigned char *src = (unsigned char *)psrc + offset;
911 	unsigned char *dst;
912 	uint64_t i;
913 
914 	dst = kvmalloc_array(num_elements, size, GFP_KERNEL);
915 	if (!dst)
916 		return NULL;
917 
918 	if (!vram_pages) {
919 		memcpy(dst, src, num_elements * size);
920 		return (void *)dst;
921 	}
922 
923 	*vram_pages = 0;
924 	for (i = 0; i < num_elements; i++) {
925 		dma_addr_t *temp;
926 		temp = (dma_addr_t *)dst + i;
927 		*temp = *((dma_addr_t *)src + i);
928 		if (*temp&SVM_RANGE_VRAM_DOMAIN)
929 			(*vram_pages)++;
930 	}
931 
932 	return (void *)dst;
933 }
934 
935 static int
936 svm_range_copy_dma_addrs(struct svm_range *dst, struct svm_range *src)
937 {
938 	int i;
939 
940 	for (i = 0; i < MAX_GPU_INSTANCE; i++) {
941 		if (!src->dma_addr[i])
942 			continue;
943 		dst->dma_addr[i] = svm_range_copy_array(src->dma_addr[i],
944 					sizeof(*src->dma_addr[i]), src->npages, 0, NULL);
945 		if (!dst->dma_addr[i])
946 			return -ENOMEM;
947 	}
948 
949 	return 0;
950 }
951 
952 static int
953 svm_range_split_array(void *ppnew, void *ppold, size_t size,
954 		      uint64_t old_start, uint64_t old_n,
955 		      uint64_t new_start, uint64_t new_n, uint64_t *new_vram_pages)
956 {
957 	unsigned char *new, *old, *pold;
958 	uint64_t d;
959 
960 	if (!ppold)
961 		return 0;
962 	pold = *(unsigned char **)ppold;
963 	if (!pold)
964 		return 0;
965 
966 	d = (new_start - old_start) * size;
967 	/* get dma addr array for new range and calculte its vram page number */
968 	new = svm_range_copy_array(pold, size, new_n, d, new_vram_pages);
969 	if (!new)
970 		return -ENOMEM;
971 	d = (new_start == old_start) ? new_n * size : 0;
972 	old = svm_range_copy_array(pold, size, old_n, d, NULL);
973 	if (!old) {
974 		kvfree(new);
975 		return -ENOMEM;
976 	}
977 	kvfree(pold);
978 	*(void **)ppold = old;
979 	*(void **)ppnew = new;
980 
981 	return 0;
982 }
983 
984 static int
985 svm_range_split_pages(struct svm_range *new, struct svm_range *old,
986 		      uint64_t start, uint64_t last)
987 {
988 	uint64_t npages = last - start + 1;
989 	int i, r;
990 
991 	for (i = 0; i < MAX_GPU_INSTANCE; i++) {
992 		r = svm_range_split_array(&new->dma_addr[i], &old->dma_addr[i],
993 					  sizeof(*old->dma_addr[i]), old->start,
994 					  npages, new->start, new->npages,
995 					  old->actual_loc ? &new->vram_pages : NULL);
996 		if (r)
997 			return r;
998 	}
999 	if (old->actual_loc)
1000 		old->vram_pages -= new->vram_pages;
1001 
1002 	return 0;
1003 }
1004 
1005 static int
1006 svm_range_split_nodes(struct svm_range *new, struct svm_range *old,
1007 		      uint64_t start, uint64_t last)
1008 {
1009 	uint64_t npages = last - start + 1;
1010 
1011 	pr_debug("svms 0x%p new prange 0x%p start 0x%lx [0x%llx 0x%llx]\n",
1012 		 new->svms, new, new->start, start, last);
1013 
1014 	if (new->start == old->start) {
1015 		new->offset = old->offset;
1016 		old->offset += new->npages;
1017 	} else {
1018 		new->offset = old->offset + npages;
1019 	}
1020 
1021 	new->svm_bo = svm_range_bo_ref(old->svm_bo);
1022 	new->ttm_res = old->ttm_res;
1023 
1024 	spin_lock(&new->svm_bo->list_lock);
1025 	list_add(&new->svm_bo_list, &new->svm_bo->range_list);
1026 	spin_unlock(&new->svm_bo->list_lock);
1027 
1028 	return 0;
1029 }
1030 
1031 /**
1032  * svm_range_split_adjust - split range and adjust
1033  *
1034  * @new: new range
1035  * @old: the old range
1036  * @start: the old range adjust to start address in pages
1037  * @last: the old range adjust to last address in pages
1038  *
1039  * Copy system memory dma_addr or vram ttm_res in old range to new
1040  * range from new_start up to size new->npages, the remaining old range is from
1041  * start to last
1042  *
1043  * Return:
1044  * 0 - OK, -ENOMEM - out of memory
1045  */
1046 static int
1047 svm_range_split_adjust(struct svm_range *new, struct svm_range *old,
1048 		      uint64_t start, uint64_t last)
1049 {
1050 	int r;
1051 
1052 	pr_debug("svms 0x%p new 0x%lx old [0x%lx 0x%lx] => [0x%llx 0x%llx]\n",
1053 		 new->svms, new->start, old->start, old->last, start, last);
1054 
1055 	if (new->start < old->start ||
1056 	    new->last > old->last) {
1057 		WARN_ONCE(1, "invalid new range start or last\n");
1058 		return -EINVAL;
1059 	}
1060 
1061 	r = svm_range_split_pages(new, old, start, last);
1062 	if (r)
1063 		return r;
1064 
1065 	if (old->actual_loc && old->ttm_res) {
1066 		r = svm_range_split_nodes(new, old, start, last);
1067 		if (r)
1068 			return r;
1069 	}
1070 
1071 	old->npages = last - start + 1;
1072 	old->start = start;
1073 	old->last = last;
1074 	new->flags = old->flags;
1075 	new->preferred_loc = old->preferred_loc;
1076 	new->prefetch_loc = old->prefetch_loc;
1077 	new->actual_loc = old->actual_loc;
1078 	new->granularity = old->granularity;
1079 	new->mapped_to_gpu = old->mapped_to_gpu;
1080 	bitmap_copy(new->bitmap_access, old->bitmap_access, MAX_GPU_INSTANCE);
1081 	bitmap_copy(new->bitmap_aip, old->bitmap_aip, MAX_GPU_INSTANCE);
1082 	atomic_set(&new->queue_refcount, atomic_read(&old->queue_refcount));
1083 
1084 	return 0;
1085 }
1086 
1087 /**
1088  * svm_range_split - split a range in 2 ranges
1089  *
1090  * @prange: the svm range to split
1091  * @start: the remaining range start address in pages
1092  * @last: the remaining range last address in pages
1093  * @new: the result new range generated
1094  *
1095  * Two cases only:
1096  * case 1: if start == prange->start
1097  *         prange ==> prange[start, last]
1098  *         new range [last + 1, prange->last]
1099  *
1100  * case 2: if last == prange->last
1101  *         prange ==> prange[start, last]
1102  *         new range [prange->start, start - 1]
1103  *
1104  * Return:
1105  * 0 - OK, -ENOMEM - out of memory, -EINVAL - invalid start, last
1106  */
1107 static int
1108 svm_range_split(struct svm_range *prange, uint64_t start, uint64_t last,
1109 		struct svm_range **new)
1110 {
1111 	uint64_t old_start = prange->start;
1112 	uint64_t old_last = prange->last;
1113 	struct svm_range_list *svms;
1114 	int r = 0;
1115 
1116 	pr_debug("svms 0x%p [0x%llx 0x%llx] to [0x%llx 0x%llx]\n", prange->svms,
1117 		 old_start, old_last, start, last);
1118 
1119 	if (old_start != start && old_last != last)
1120 		return -EINVAL;
1121 	if (start < old_start || last > old_last)
1122 		return -EINVAL;
1123 
1124 	svms = prange->svms;
1125 	if (old_start == start)
1126 		*new = svm_range_new(svms, last + 1, old_last, false);
1127 	else
1128 		*new = svm_range_new(svms, old_start, start - 1, false);
1129 	if (!*new)
1130 		return -ENOMEM;
1131 
1132 	r = svm_range_split_adjust(*new, prange, start, last);
1133 	if (r) {
1134 		pr_debug("failed %d split [0x%llx 0x%llx] to [0x%llx 0x%llx]\n",
1135 			 r, old_start, old_last, start, last);
1136 		svm_range_free(*new, false);
1137 		*new = NULL;
1138 	}
1139 
1140 	return r;
1141 }
1142 
1143 static int
1144 svm_range_split_tail(struct svm_range *prange, uint64_t new_last,
1145 		     struct list_head *insert_list, struct list_head *remap_list)
1146 {
1147 	unsigned long last_align_down = ALIGN_DOWN(prange->last, 512);
1148 	unsigned long start_align = ALIGN(prange->start, 512);
1149 	bool huge_page_mapping = last_align_down > start_align;
1150 	struct svm_range *tail = NULL;
1151 	int r;
1152 
1153 	r = svm_range_split(prange, prange->start, new_last, &tail);
1154 
1155 	if (r)
1156 		return r;
1157 
1158 	list_add(&tail->list, insert_list);
1159 
1160 	if (huge_page_mapping && tail->start > start_align &&
1161 	    tail->start < last_align_down && (!IS_ALIGNED(tail->start, 512)))
1162 		list_add(&tail->update_list, remap_list);
1163 
1164 	return 0;
1165 }
1166 
1167 static int
1168 svm_range_split_head(struct svm_range *prange, uint64_t new_start,
1169 		     struct list_head *insert_list, struct list_head *remap_list)
1170 {
1171 	unsigned long last_align_down = ALIGN_DOWN(prange->last, 512);
1172 	unsigned long start_align = ALIGN(prange->start, 512);
1173 	bool huge_page_mapping = last_align_down > start_align;
1174 	struct svm_range *head = NULL;
1175 	int r;
1176 
1177 	r = svm_range_split(prange, new_start, prange->last, &head);
1178 
1179 	if (r)
1180 		return r;
1181 
1182 	list_add(&head->list, insert_list);
1183 
1184 	if (huge_page_mapping && head->last + 1 > start_align &&
1185 	    head->last + 1 < last_align_down && (!IS_ALIGNED(head->last, 512)))
1186 		list_add(&head->update_list, remap_list);
1187 
1188 	return 0;
1189 }
1190 
1191 static void
1192 svm_range_add_child(struct svm_range *prange, struct svm_range *pchild, enum svm_work_list_ops op)
1193 {
1194 	pr_debug("add child 0x%p [0x%lx 0x%lx] to prange 0x%p child list %d\n",
1195 		 pchild, pchild->start, pchild->last, prange, op);
1196 
1197 	pchild->work_item.mm = NULL;
1198 	pchild->work_item.op = op;
1199 	list_add_tail(&pchild->child_list, &prange->child_list);
1200 }
1201 
1202 static bool
1203 svm_nodes_in_same_hive(struct kfd_node *node_a, struct kfd_node *node_b)
1204 {
1205 	return (node_a->adev == node_b->adev ||
1206 		amdgpu_xgmi_same_hive(node_a->adev, node_b->adev));
1207 }
1208 
1209 static uint64_t
1210 svm_range_get_pte_flags(struct kfd_node *node, struct amdgpu_vm *vm,
1211 			struct svm_range *prange, int domain)
1212 {
1213 	struct kfd_node *bo_node;
1214 	uint32_t flags = prange->flags;
1215 	uint32_t mapping_flags = 0;
1216 	uint32_t gc_ip_version = KFD_GC_VERSION(node);
1217 	uint64_t pte_flags;
1218 	bool snoop = (domain != SVM_RANGE_VRAM_DOMAIN);
1219 	bool coherent = flags & (KFD_IOCTL_SVM_FLAG_COHERENT | KFD_IOCTL_SVM_FLAG_EXT_COHERENT);
1220 	bool ext_coherent = flags & KFD_IOCTL_SVM_FLAG_EXT_COHERENT;
1221 	unsigned int mtype_local;
1222 
1223 	if (domain == SVM_RANGE_VRAM_DOMAIN)
1224 		bo_node = prange->svm_bo->node;
1225 
1226 	switch (gc_ip_version) {
1227 	case IP_VERSION(9, 4, 1):
1228 		if (domain == SVM_RANGE_VRAM_DOMAIN) {
1229 			if (bo_node == node) {
1230 				mapping_flags |= coherent ?
1231 					AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW;
1232 			} else {
1233 				mapping_flags |= coherent ?
1234 					AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
1235 				if (svm_nodes_in_same_hive(node, bo_node))
1236 					snoop = true;
1237 			}
1238 		} else {
1239 			mapping_flags |= coherent ?
1240 				AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
1241 		}
1242 		break;
1243 	case IP_VERSION(9, 4, 2):
1244 		if (domain == SVM_RANGE_VRAM_DOMAIN) {
1245 			if (bo_node == node) {
1246 				mapping_flags |= coherent ?
1247 					AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW;
1248 				if (node->adev->gmc.xgmi.connected_to_cpu)
1249 					snoop = true;
1250 			} else {
1251 				mapping_flags |= coherent ?
1252 					AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
1253 				if (svm_nodes_in_same_hive(node, bo_node))
1254 					snoop = true;
1255 			}
1256 		} else {
1257 			mapping_flags |= coherent ?
1258 				AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
1259 		}
1260 		break;
1261 	case IP_VERSION(9, 4, 3):
1262 	case IP_VERSION(9, 4, 4):
1263 	case IP_VERSION(9, 5, 0):
1264 		if (ext_coherent)
1265 			mtype_local = AMDGPU_VM_MTYPE_CC;
1266 		else
1267 			mtype_local = amdgpu_mtype_local == 1 ? AMDGPU_VM_MTYPE_NC :
1268 				amdgpu_mtype_local == 2 ? AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW;
1269 		snoop = true;
1270 		if (domain == SVM_RANGE_VRAM_DOMAIN) {
1271 			/* local HBM region close to partition */
1272 			if (bo_node->adev == node->adev &&
1273 			    (!bo_node->xcp || !node->xcp || bo_node->xcp->mem_id == node->xcp->mem_id))
1274 				mapping_flags |= mtype_local;
1275 			/* local HBM region far from partition or remote XGMI GPU
1276 			 * with regular system scope coherence
1277 			 */
1278 			else if (svm_nodes_in_same_hive(bo_node, node) && !ext_coherent)
1279 				mapping_flags |= AMDGPU_VM_MTYPE_NC;
1280 			/* PCIe P2P on GPUs pre-9.5.0 */
1281 			else if (gc_ip_version < IP_VERSION(9, 5, 0) &&
1282 				 !svm_nodes_in_same_hive(bo_node, node))
1283 				mapping_flags |= AMDGPU_VM_MTYPE_UC;
1284 			/* Other remote memory */
1285 			else
1286 				mapping_flags |= ext_coherent ? AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
1287 		/* system memory accessed by the APU */
1288 		} else if (node->adev->flags & AMD_IS_APU) {
1289 			/* On NUMA systems, locality is determined per-page
1290 			 * in amdgpu_gmc_override_vm_pte_flags
1291 			 */
1292 			if (num_possible_nodes() <= 1)
1293 				mapping_flags |= mtype_local;
1294 			else
1295 				mapping_flags |= ext_coherent ? AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
1296 		/* system memory accessed by the dGPU */
1297 		} else {
1298 			if (gc_ip_version < IP_VERSION(9, 5, 0) || ext_coherent)
1299 				mapping_flags |= AMDGPU_VM_MTYPE_UC;
1300 			else
1301 				mapping_flags |= AMDGPU_VM_MTYPE_NC;
1302 		}
1303 		break;
1304 	case IP_VERSION(12, 0, 0):
1305 	case IP_VERSION(12, 0, 1):
1306 		mapping_flags |= AMDGPU_VM_MTYPE_NC;
1307 		break;
1308 	default:
1309 		mapping_flags |= coherent ?
1310 			AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
1311 	}
1312 
1313 	if (flags & KFD_IOCTL_SVM_FLAG_GPU_EXEC)
1314 		mapping_flags |= AMDGPU_VM_PAGE_EXECUTABLE;
1315 
1316 	pte_flags = AMDGPU_PTE_VALID;
1317 	pte_flags |= (domain == SVM_RANGE_VRAM_DOMAIN) ? 0 : AMDGPU_PTE_SYSTEM;
1318 	pte_flags |= snoop ? AMDGPU_PTE_SNOOPED : 0;
1319 	if (gc_ip_version >= IP_VERSION(12, 0, 0))
1320 		pte_flags |= AMDGPU_PTE_IS_PTE;
1321 
1322 	amdgpu_gmc_get_vm_pte(node->adev, vm, NULL, mapping_flags, &pte_flags);
1323 	pte_flags |= AMDGPU_PTE_READABLE;
1324 	if (!(flags & KFD_IOCTL_SVM_FLAG_GPU_RO))
1325 		pte_flags |= AMDGPU_PTE_WRITEABLE;
1326 
1327 	if ((gc_ip_version == IP_VERSION(12, 1, 0)) &&
1328 	    node->adev->have_atomics_support)
1329 		pte_flags |= AMDGPU_PTE_BUS_ATOMICS;
1330 
1331 	return pte_flags;
1332 }
1333 
1334 static int
1335 svm_range_unmap_from_gpu(struct amdgpu_device *adev, struct amdgpu_vm *vm,
1336 			 uint64_t start, uint64_t last,
1337 			 struct dma_fence **fence)
1338 {
1339 	uint64_t init_pte_value = adev->gmc.init_pte_flags;
1340 
1341 	pr_debug("[0x%llx 0x%llx]\n", start, last);
1342 
1343 	return amdgpu_vm_update_range(adev, vm, false, true, true, false, NULL, start,
1344 				      last, init_pte_value, 0, 0, NULL, NULL,
1345 				      fence);
1346 }
1347 
1348 static int
1349 svm_range_unmap_from_gpus(struct svm_range *prange, unsigned long start,
1350 			  unsigned long last, uint32_t trigger)
1351 {
1352 	DECLARE_BITMAP(bitmap, MAX_GPU_INSTANCE);
1353 	struct kfd_process_device *pdd;
1354 	struct dma_fence *fence = NULL;
1355 	struct kfd_process *p;
1356 	uint32_t gpuidx;
1357 	int r = 0;
1358 
1359 	if (!prange->mapped_to_gpu) {
1360 		pr_debug("prange 0x%p [0x%lx 0x%lx] not mapped to GPU\n",
1361 			 prange, prange->start, prange->last);
1362 		return 0;
1363 	}
1364 
1365 	if (prange->start == start && prange->last == last) {
1366 		pr_debug("unmap svms 0x%p prange 0x%p\n", prange->svms, prange);
1367 		prange->mapped_to_gpu = false;
1368 	}
1369 
1370 	bitmap_or(bitmap, prange->bitmap_access, prange->bitmap_aip,
1371 		  MAX_GPU_INSTANCE);
1372 	p = container_of(prange->svms, struct kfd_process, svms);
1373 
1374 	for_each_set_bit(gpuidx, bitmap, MAX_GPU_INSTANCE) {
1375 		pr_debug("unmap from gpu idx 0x%x\n", gpuidx);
1376 		pdd = kfd_process_device_from_gpuidx(p, gpuidx);
1377 		if (!pdd) {
1378 			pr_debug("failed to find device idx %d\n", gpuidx);
1379 			return -EINVAL;
1380 		}
1381 
1382 		kfd_smi_event_unmap_from_gpu(pdd->dev, p->lead_thread->pid,
1383 					     start, last, trigger);
1384 
1385 		r = svm_range_unmap_from_gpu(pdd->dev->adev,
1386 					     drm_priv_to_vm(pdd->drm_priv),
1387 					     start, last, &fence);
1388 		if (r)
1389 			break;
1390 
1391 		if (fence) {
1392 			r = dma_fence_wait(fence, false);
1393 			dma_fence_put(fence);
1394 			fence = NULL;
1395 			if (r)
1396 				break;
1397 		}
1398 		kfd_flush_tlb(pdd, TLB_FLUSH_HEAVYWEIGHT);
1399 	}
1400 
1401 	return r;
1402 }
1403 
1404 static int
1405 svm_range_map_to_gpu(struct kfd_process_device *pdd, struct svm_range *prange,
1406 		     unsigned long offset, unsigned long npages, bool readonly,
1407 		     dma_addr_t *dma_addr, struct amdgpu_device *bo_adev,
1408 		     struct dma_fence **fence, bool flush_tlb)
1409 {
1410 	struct amdgpu_device *adev = pdd->dev->adev;
1411 	struct amdgpu_vm *vm = drm_priv_to_vm(pdd->drm_priv);
1412 	uint64_t pte_flags;
1413 	unsigned long last_start;
1414 	int last_domain;
1415 	int r = 0;
1416 	int64_t i, j;
1417 
1418 	last_start = prange->start + offset;
1419 
1420 	pr_debug("svms 0x%p [0x%lx 0x%lx] readonly %d\n", prange->svms,
1421 		 last_start, last_start + npages - 1, readonly);
1422 
1423 	for (i = offset; i < offset + npages; i++) {
1424 		last_domain = dma_addr[i] & SVM_RANGE_VRAM_DOMAIN;
1425 		dma_addr[i] &= ~SVM_RANGE_VRAM_DOMAIN;
1426 
1427 		/* Collect all pages in the same address range and memory domain
1428 		 * that can be mapped with a single call to update mapping.
1429 		 */
1430 		if (i < offset + npages - 1 &&
1431 		    last_domain == (dma_addr[i + 1] & SVM_RANGE_VRAM_DOMAIN))
1432 			continue;
1433 
1434 		pr_debug("Mapping range [0x%lx 0x%llx] on domain: %s\n",
1435 			 last_start, prange->start + i, last_domain ? "GPU" : "CPU");
1436 
1437 		pte_flags = svm_range_get_pte_flags(pdd->dev, vm, prange, last_domain);
1438 		if (readonly)
1439 			pte_flags &= ~AMDGPU_PTE_WRITEABLE;
1440 
1441 		pr_debug("svms 0x%p map [0x%lx 0x%llx] vram %d PTE 0x%llx\n",
1442 			 prange->svms, last_start, prange->start + i,
1443 			 (last_domain == SVM_RANGE_VRAM_DOMAIN) ? 1 : 0,
1444 			 pte_flags);
1445 
1446 		/* For dGPU mode, we use same vm_manager to allocate VRAM for
1447 		 * different memory partition based on fpfn/lpfn, we should use
1448 		 * same vm_manager.vram_base_offset regardless memory partition.
1449 		 */
1450 		r = amdgpu_vm_update_range(adev, vm, false, false, flush_tlb, true,
1451 					   NULL, last_start, prange->start + i,
1452 					   pte_flags,
1453 					   (last_start - prange->start) << PAGE_SHIFT,
1454 					   bo_adev ? bo_adev->vm_manager.vram_base_offset : 0,
1455 					   NULL, dma_addr, &vm->last_update);
1456 
1457 		for (j = last_start - prange->start; j <= i; j++)
1458 			dma_addr[j] |= last_domain;
1459 
1460 		if (r) {
1461 			pr_debug("failed %d to map to gpu 0x%lx\n", r, prange->start);
1462 			goto out;
1463 		}
1464 		last_start = prange->start + i + 1;
1465 	}
1466 
1467 	r = amdgpu_vm_update_pdes(adev, vm, false);
1468 	if (r) {
1469 		pr_debug("failed %d to update directories 0x%lx\n", r,
1470 			 prange->start);
1471 		goto out;
1472 	}
1473 
1474 	if (fence)
1475 		*fence = dma_fence_get(vm->last_update);
1476 
1477 out:
1478 	return r;
1479 }
1480 
1481 static int
1482 svm_range_map_to_gpus(struct svm_range *prange, unsigned long offset,
1483 		      unsigned long npages, bool readonly,
1484 		      unsigned long *bitmap, bool wait, bool flush_tlb)
1485 {
1486 	struct kfd_process_device *pdd;
1487 	struct amdgpu_device *bo_adev = NULL;
1488 	struct kfd_process *p;
1489 	struct dma_fence *fence = NULL;
1490 	uint32_t gpuidx;
1491 	int r = 0;
1492 
1493 	if (prange->svm_bo && prange->ttm_res)
1494 		bo_adev = prange->svm_bo->node->adev;
1495 
1496 	p = container_of(prange->svms, struct kfd_process, svms);
1497 	for_each_set_bit(gpuidx, bitmap, MAX_GPU_INSTANCE) {
1498 		pr_debug("mapping to gpu idx 0x%x\n", gpuidx);
1499 		pdd = kfd_process_device_from_gpuidx(p, gpuidx);
1500 		if (!pdd) {
1501 			pr_debug("failed to find device idx %d\n", gpuidx);
1502 			return -EINVAL;
1503 		}
1504 
1505 		pdd = kfd_bind_process_to_device(pdd->dev, p);
1506 		if (IS_ERR(pdd))
1507 			return -EINVAL;
1508 
1509 		if (bo_adev && pdd->dev->adev != bo_adev &&
1510 		    !amdgpu_xgmi_same_hive(pdd->dev->adev, bo_adev)) {
1511 			pr_debug("cannot map to device idx %d\n", gpuidx);
1512 			continue;
1513 		}
1514 
1515 		r = svm_range_map_to_gpu(pdd, prange, offset, npages, readonly,
1516 					 prange->dma_addr[gpuidx],
1517 					 bo_adev, wait ? &fence : NULL,
1518 					 flush_tlb);
1519 		if (r)
1520 			break;
1521 
1522 		if (fence) {
1523 			r = dma_fence_wait(fence, false);
1524 			dma_fence_put(fence);
1525 			fence = NULL;
1526 			if (r) {
1527 				pr_debug("failed %d to dma fence wait\n", r);
1528 				break;
1529 			}
1530 		}
1531 
1532 		kfd_flush_tlb(pdd, TLB_FLUSH_LEGACY);
1533 	}
1534 
1535 	return r;
1536 }
1537 
1538 struct svm_validate_context {
1539 	struct kfd_process *process;
1540 	struct svm_range *prange;
1541 	bool intr;
1542 	DECLARE_BITMAP(bitmap, MAX_GPU_INSTANCE);
1543 	struct drm_exec exec;
1544 };
1545 
1546 static int svm_range_reserve_bos(struct svm_validate_context *ctx, bool intr)
1547 {
1548 	struct kfd_process_device *pdd;
1549 	struct amdgpu_vm *vm;
1550 	uint32_t gpuidx;
1551 	int r;
1552 
1553 	drm_exec_init(&ctx->exec, intr ? DRM_EXEC_INTERRUPTIBLE_WAIT: 0, 0);
1554 	drm_exec_until_all_locked(&ctx->exec) {
1555 		for_each_set_bit(gpuidx, ctx->bitmap, MAX_GPU_INSTANCE) {
1556 			pdd = kfd_process_device_from_gpuidx(ctx->process, gpuidx);
1557 			if (!pdd) {
1558 				pr_debug("failed to find device idx %d\n", gpuidx);
1559 				r = -EINVAL;
1560 				goto unreserve_out;
1561 			}
1562 			vm = drm_priv_to_vm(pdd->drm_priv);
1563 
1564 			r = amdgpu_vm_lock_pd(vm, &ctx->exec, 2);
1565 			drm_exec_retry_on_contention(&ctx->exec);
1566 			if (unlikely(r)) {
1567 				pr_debug("failed %d to reserve bo\n", r);
1568 				goto unreserve_out;
1569 			}
1570 		}
1571 	}
1572 
1573 	for_each_set_bit(gpuidx, ctx->bitmap, MAX_GPU_INSTANCE) {
1574 		pdd = kfd_process_device_from_gpuidx(ctx->process, gpuidx);
1575 		if (!pdd) {
1576 			pr_debug("failed to find device idx %d\n", gpuidx);
1577 			r = -EINVAL;
1578 			goto unreserve_out;
1579 		}
1580 
1581 		r = amdgpu_vm_validate(pdd->dev->adev,
1582 				       drm_priv_to_vm(pdd->drm_priv), NULL,
1583 				       svm_range_bo_validate, NULL);
1584 		if (r) {
1585 			pr_debug("failed %d validate pt bos\n", r);
1586 			goto unreserve_out;
1587 		}
1588 	}
1589 
1590 	return 0;
1591 
1592 unreserve_out:
1593 	drm_exec_fini(&ctx->exec);
1594 	return r;
1595 }
1596 
1597 static void svm_range_unreserve_bos(struct svm_validate_context *ctx)
1598 {
1599 	drm_exec_fini(&ctx->exec);
1600 }
1601 
1602 static void *kfd_svm_page_owner(struct kfd_process *p, int32_t gpuidx)
1603 {
1604 	struct kfd_process_device *pdd;
1605 
1606 	pdd = kfd_process_device_from_gpuidx(p, gpuidx);
1607 	if (!pdd)
1608 		return NULL;
1609 
1610 	return SVM_ADEV_PGMAP_OWNER(pdd->dev->adev);
1611 }
1612 
1613 /*
1614  * Validation+GPU mapping with concurrent invalidation (MMU notifiers)
1615  *
1616  * To prevent concurrent destruction or change of range attributes, the
1617  * svm_read_lock must be held. The caller must not hold the svm_write_lock
1618  * because that would block concurrent evictions and lead to deadlocks. To
1619  * serialize concurrent migrations or validations of the same range, the
1620  * prange->migrate_mutex must be held.
1621  *
1622  * For VRAM ranges, the SVM BO must be allocated and valid (protected by its
1623  * eviction fence.
1624  *
1625  * The following sequence ensures race-free validation and GPU mapping:
1626  *
1627  * 1. Reserve page table (and SVM BO if range is in VRAM)
1628  * 2. hmm_range_fault to get page addresses (if system memory)
1629  * 3. DMA-map pages (if system memory)
1630  * 4-a. Take notifier lock
1631  * 4-b. Check that pages still valid (mmu_interval_read_retry)
1632  * 4-c. Check that the range was not split or otherwise invalidated
1633  * 4-d. Update GPU page table
1634  * 4.e. Release notifier lock
1635  * 5. Release page table (and SVM BO) reservation
1636  */
1637 static int svm_range_validate_and_map(struct mm_struct *mm,
1638 				      unsigned long map_start, unsigned long map_last,
1639 				      struct svm_range *prange, int32_t gpuidx,
1640 				      bool intr, bool wait, bool flush_tlb)
1641 {
1642 	struct svm_validate_context *ctx;
1643 	unsigned long start, end, addr;
1644 	struct kfd_process *p;
1645 	void *owner;
1646 	int32_t idx;
1647 	int r = 0;
1648 
1649 	ctx = kzalloc(sizeof(struct svm_validate_context), GFP_KERNEL);
1650 	if (!ctx)
1651 		return -ENOMEM;
1652 	ctx->process = container_of(prange->svms, struct kfd_process, svms);
1653 	ctx->prange = prange;
1654 	ctx->intr = intr;
1655 
1656 	if (gpuidx < MAX_GPU_INSTANCE) {
1657 		bitmap_zero(ctx->bitmap, MAX_GPU_INSTANCE);
1658 		bitmap_set(ctx->bitmap, gpuidx, 1);
1659 	} else if (ctx->process->xnack_enabled) {
1660 		bitmap_copy(ctx->bitmap, prange->bitmap_aip, MAX_GPU_INSTANCE);
1661 
1662 		/* If prefetch range to GPU, or GPU retry fault migrate range to
1663 		 * GPU, which has ACCESS attribute to the range, create mapping
1664 		 * on that GPU.
1665 		 */
1666 		if (prange->actual_loc) {
1667 			gpuidx = kfd_process_gpuidx_from_gpuid(ctx->process,
1668 							prange->actual_loc);
1669 			if (gpuidx < 0) {
1670 				WARN_ONCE(1, "failed get device by id 0x%x\n",
1671 					 prange->actual_loc);
1672 				r = -EINVAL;
1673 				goto free_ctx;
1674 			}
1675 			if (test_bit(gpuidx, prange->bitmap_access))
1676 				bitmap_set(ctx->bitmap, gpuidx, 1);
1677 		}
1678 
1679 		/*
1680 		 * If prange is already mapped or with always mapped flag,
1681 		 * update mapping on GPUs with ACCESS attribute
1682 		 */
1683 		if (bitmap_empty(ctx->bitmap, MAX_GPU_INSTANCE)) {
1684 			if (prange->mapped_to_gpu ||
1685 			    prange->flags & KFD_IOCTL_SVM_FLAG_GPU_ALWAYS_MAPPED)
1686 				bitmap_copy(ctx->bitmap, prange->bitmap_access, MAX_GPU_INSTANCE);
1687 		}
1688 	} else {
1689 		bitmap_or(ctx->bitmap, prange->bitmap_access,
1690 			  prange->bitmap_aip, MAX_GPU_INSTANCE);
1691 	}
1692 
1693 	if (bitmap_empty(ctx->bitmap, MAX_GPU_INSTANCE)) {
1694 		r = 0;
1695 		goto free_ctx;
1696 	}
1697 
1698 	if (prange->actual_loc && !prange->ttm_res) {
1699 		/* This should never happen. actual_loc gets set by
1700 		 * svm_migrate_ram_to_vram after allocating a BO.
1701 		 */
1702 		WARN_ONCE(1, "VRAM BO missing during validation\n");
1703 		r = -EINVAL;
1704 		goto free_ctx;
1705 	}
1706 
1707 	r = svm_range_reserve_bos(ctx, intr);
1708 	if (r)
1709 		goto free_ctx;
1710 
1711 	p = container_of(prange->svms, struct kfd_process, svms);
1712 	owner = kfd_svm_page_owner(p, find_first_bit(ctx->bitmap,
1713 						MAX_GPU_INSTANCE));
1714 	for_each_set_bit(idx, ctx->bitmap, MAX_GPU_INSTANCE) {
1715 		if (kfd_svm_page_owner(p, idx) != owner) {
1716 			owner = NULL;
1717 			break;
1718 		}
1719 	}
1720 
1721 	start = map_start << PAGE_SHIFT;
1722 	end = (map_last + 1) << PAGE_SHIFT;
1723 	for (addr = start; !r && addr < end; ) {
1724 		struct amdgpu_hmm_range *range = NULL;
1725 		unsigned long map_start_vma;
1726 		unsigned long map_last_vma;
1727 		struct vm_area_struct *vma;
1728 		unsigned long next = 0;
1729 		unsigned long offset;
1730 		unsigned long npages;
1731 		bool readonly;
1732 
1733 		vma = vma_lookup(mm, addr);
1734 		if (vma) {
1735 			readonly = !(vma->vm_flags & VM_WRITE);
1736 
1737 			next = min(vma->vm_end, end);
1738 			npages = (next - addr) >> PAGE_SHIFT;
1739 			/* HMM requires at least READ permissions. If provided with PROT_NONE,
1740 			 * unmap the memory. If it's not already mapped, this is a no-op
1741 			 * If PROT_WRITE is provided without READ, warn first then unmap
1742 			 */
1743 			if (!(vma->vm_flags & VM_READ)) {
1744 				unsigned long e, s;
1745 
1746 				svm_range_lock(prange);
1747 				if (vma->vm_flags & VM_WRITE)
1748 					pr_debug("VM_WRITE without VM_READ is not supported");
1749 				s = max(start, prange->start);
1750 				e = min(end, prange->last);
1751 				if (e >= s)
1752 					r = svm_range_unmap_from_gpus(prange, s, e,
1753 						       KFD_SVM_UNMAP_TRIGGER_UNMAP_FROM_CPU);
1754 				svm_range_unlock(prange);
1755 				/* If unmap returns non-zero, we'll bail on the next for loop
1756 				 * iteration, so just leave r and continue
1757 				 */
1758 				addr = next;
1759 				continue;
1760 			}
1761 
1762 			WRITE_ONCE(p->svms.faulting_task, current);
1763 			range = amdgpu_hmm_range_alloc(NULL);
1764 			if (likely(range))
1765 				r = amdgpu_hmm_range_get_pages(&prange->notifier, addr, npages,
1766 							       readonly, owner, range);
1767 			else
1768 				r = -ENOMEM;
1769 			WRITE_ONCE(p->svms.faulting_task, NULL);
1770 			if (r)
1771 				pr_debug("failed %d to get svm range pages\n", r);
1772 		} else {
1773 			r = -EFAULT;
1774 		}
1775 
1776 		if (!r) {
1777 			offset = (addr >> PAGE_SHIFT) - prange->start;
1778 			r = svm_range_dma_map(prange, ctx->bitmap, offset, npages,
1779 					      range->hmm_range.hmm_pfns);
1780 			if (r)
1781 				pr_debug("failed %d to dma map range\n", r);
1782 		}
1783 
1784 		svm_range_lock(prange);
1785 
1786 		/* Free backing memory of hmm_range if it was initialized
1787 		 * Override return value to TRY AGAIN only if prior returns
1788 		 * were successful
1789 		 */
1790 		if (range && !amdgpu_hmm_range_valid(range) && !r) {
1791 			pr_debug("hmm update the range, need validate again\n");
1792 			r = -EAGAIN;
1793 		}
1794 
1795 		/* Free the hmm range */
1796 		amdgpu_hmm_range_free(range);
1797 
1798 		if (!r && !list_empty(&prange->child_list)) {
1799 			pr_debug("range split by unmap in parallel, validate again\n");
1800 			r = -EAGAIN;
1801 		}
1802 
1803 		if (!r) {
1804 			map_start_vma = max(map_start, prange->start + offset);
1805 			map_last_vma = min(map_last, prange->start + offset + npages - 1);
1806 			if (map_start_vma <= map_last_vma) {
1807 				offset = map_start_vma - prange->start;
1808 				npages = map_last_vma - map_start_vma + 1;
1809 				r = svm_range_map_to_gpus(prange, offset, npages, readonly,
1810 							  ctx->bitmap, wait, flush_tlb);
1811 			}
1812 		}
1813 
1814 		if (!r && next == end)
1815 			prange->mapped_to_gpu = true;
1816 
1817 		svm_range_unlock(prange);
1818 
1819 		addr = next;
1820 	}
1821 
1822 	svm_range_unreserve_bos(ctx);
1823 	if (!r)
1824 		prange->validate_timestamp = ktime_get_boottime();
1825 
1826 free_ctx:
1827 	kfree(ctx);
1828 
1829 	return r;
1830 }
1831 
1832 /**
1833  * svm_range_list_lock_and_flush_work - flush pending deferred work
1834  *
1835  * @svms: the svm range list
1836  * @mm: the mm structure
1837  *
1838  * Context: Returns with mmap write lock held, pending deferred work flushed
1839  *
1840  */
1841 void
1842 svm_range_list_lock_and_flush_work(struct svm_range_list *svms,
1843 				   struct mm_struct *mm)
1844 {
1845 retry_flush_work:
1846 	flush_work(&svms->deferred_list_work);
1847 	mmap_write_lock(mm);
1848 
1849 	if (list_empty(&svms->deferred_range_list))
1850 		return;
1851 	mmap_write_unlock(mm);
1852 	pr_debug("retry flush\n");
1853 	goto retry_flush_work;
1854 }
1855 
1856 static void svm_range_restore_work(struct work_struct *work)
1857 {
1858 	struct delayed_work *dwork = to_delayed_work(work);
1859 	struct amdkfd_process_info *process_info;
1860 	struct svm_range_list *svms;
1861 	struct svm_range *prange;
1862 	struct kfd_process *p;
1863 	struct mm_struct *mm;
1864 	int evicted_ranges;
1865 	int invalid;
1866 	int r;
1867 
1868 	svms = container_of(dwork, struct svm_range_list, restore_work);
1869 	evicted_ranges = atomic_read(&svms->evicted_ranges);
1870 	if (!evicted_ranges)
1871 		return;
1872 
1873 	pr_debug("restore svm ranges\n");
1874 
1875 	p = container_of(svms, struct kfd_process, svms);
1876 	process_info = p->kgd_process_info;
1877 
1878 	/* Keep mm reference when svm_range_validate_and_map ranges */
1879 	mm = get_task_mm(p->lead_thread);
1880 	if (!mm) {
1881 		pr_debug("svms 0x%p process mm gone\n", svms);
1882 		return;
1883 	}
1884 
1885 	mutex_lock(&process_info->lock);
1886 	svm_range_list_lock_and_flush_work(svms, mm);
1887 	mutex_lock(&svms->lock);
1888 
1889 	evicted_ranges = atomic_read(&svms->evicted_ranges);
1890 
1891 	list_for_each_entry(prange, &svms->list, list) {
1892 		invalid = atomic_read(&prange->invalid);
1893 		if (!invalid)
1894 			continue;
1895 
1896 		pr_debug("restoring svms 0x%p prange 0x%p [0x%lx %lx] inv %d\n",
1897 			 prange->svms, prange, prange->start, prange->last,
1898 			 invalid);
1899 
1900 		/*
1901 		 * If range is migrating, wait for migration is done.
1902 		 */
1903 		mutex_lock(&prange->migrate_mutex);
1904 
1905 		r = svm_range_validate_and_map(mm, prange->start, prange->last, prange,
1906 					       MAX_GPU_INSTANCE, false, true, false);
1907 		if (r)
1908 			pr_debug("failed %d to map 0x%lx to gpus\n", r,
1909 				 prange->start);
1910 
1911 		mutex_unlock(&prange->migrate_mutex);
1912 		if (r)
1913 			goto out_reschedule;
1914 
1915 		if (atomic_cmpxchg(&prange->invalid, invalid, 0) != invalid)
1916 			goto out_reschedule;
1917 	}
1918 
1919 	if (atomic_cmpxchg(&svms->evicted_ranges, evicted_ranges, 0) !=
1920 	    evicted_ranges)
1921 		goto out_reschedule;
1922 
1923 	evicted_ranges = 0;
1924 
1925 	r = kgd2kfd_resume_mm(mm);
1926 	if (r) {
1927 		/* No recovery from this failure. Probably the CP is
1928 		 * hanging. No point trying again.
1929 		 */
1930 		pr_debug("failed %d to resume KFD\n", r);
1931 	}
1932 
1933 	pr_debug("restore svm ranges successfully\n");
1934 
1935 out_reschedule:
1936 	mutex_unlock(&svms->lock);
1937 	mmap_write_unlock(mm);
1938 	mutex_unlock(&process_info->lock);
1939 
1940 	/* If validation failed, reschedule another attempt */
1941 	if (evicted_ranges) {
1942 		pr_debug("reschedule to restore svm range\n");
1943 		queue_delayed_work(system_freezable_wq, &svms->restore_work,
1944 			msecs_to_jiffies(AMDGPU_SVM_RANGE_RESTORE_DELAY_MS));
1945 
1946 		kfd_smi_event_queue_restore_rescheduled(mm);
1947 	}
1948 	mmput(mm);
1949 }
1950 
1951 /**
1952  * svm_range_evict - evict svm range
1953  * @prange: svm range structure
1954  * @mm: current process mm_struct
1955  * @start: starting process queue number
1956  * @last: last process queue number
1957  * @event: mmu notifier event when range is evicted or migrated
1958  *
1959  * Stop all queues of the process to ensure GPU doesn't access the memory, then
1960  * return to let CPU evict the buffer and proceed CPU pagetable update.
1961  *
1962  * Don't need use lock to sync cpu pagetable invalidation with GPU execution.
1963  * If invalidation happens while restore work is running, restore work will
1964  * restart to ensure to get the latest CPU pages mapping to GPU, then start
1965  * the queues.
1966  */
1967 static int
1968 svm_range_evict(struct svm_range *prange, struct mm_struct *mm,
1969 		unsigned long start, unsigned long last,
1970 		enum mmu_notifier_event event)
1971 {
1972 	struct svm_range_list *svms = prange->svms;
1973 	struct svm_range *pchild;
1974 	struct kfd_process *p;
1975 	int r = 0;
1976 
1977 	p = container_of(svms, struct kfd_process, svms);
1978 
1979 	pr_debug("invalidate svms 0x%p prange [0x%lx 0x%lx] [0x%lx 0x%lx]\n",
1980 		 svms, prange->start, prange->last, start, last);
1981 
1982 	if (!p->xnack_enabled ||
1983 	    (prange->flags & KFD_IOCTL_SVM_FLAG_GPU_ALWAYS_MAPPED)) {
1984 		int evicted_ranges;
1985 		bool mapped = prange->mapped_to_gpu;
1986 
1987 		list_for_each_entry(pchild, &prange->child_list, child_list) {
1988 			if (!pchild->mapped_to_gpu)
1989 				continue;
1990 			mapped = true;
1991 			mutex_lock_nested(&pchild->lock, 1);
1992 			if (pchild->start <= last && pchild->last >= start) {
1993 				pr_debug("increment pchild invalid [0x%lx 0x%lx]\n",
1994 					 pchild->start, pchild->last);
1995 				atomic_inc(&pchild->invalid);
1996 			}
1997 			mutex_unlock(&pchild->lock);
1998 		}
1999 
2000 		if (!mapped)
2001 			return r;
2002 
2003 		if (prange->start <= last && prange->last >= start)
2004 			atomic_inc(&prange->invalid);
2005 
2006 		evicted_ranges = atomic_inc_return(&svms->evicted_ranges);
2007 		if (evicted_ranges != 1)
2008 			return r;
2009 
2010 		pr_debug("evicting svms 0x%p range [0x%lx 0x%lx]\n",
2011 			 prange->svms, prange->start, prange->last);
2012 
2013 		/* First eviction, stop the queues */
2014 		r = kgd2kfd_quiesce_mm(mm, KFD_QUEUE_EVICTION_TRIGGER_SVM);
2015 		if (r)
2016 			pr_debug("failed to quiesce KFD\n");
2017 
2018 		pr_debug("schedule to restore svm %p ranges\n", svms);
2019 		queue_delayed_work(system_freezable_wq, &svms->restore_work,
2020 			msecs_to_jiffies(AMDGPU_SVM_RANGE_RESTORE_DELAY_MS));
2021 	} else {
2022 		unsigned long s, l;
2023 		uint32_t trigger;
2024 
2025 		if (event == MMU_NOTIFY_MIGRATE)
2026 			trigger = KFD_SVM_UNMAP_TRIGGER_MMU_NOTIFY_MIGRATE;
2027 		else
2028 			trigger = KFD_SVM_UNMAP_TRIGGER_MMU_NOTIFY;
2029 
2030 		pr_debug("invalidate unmap svms 0x%p [0x%lx 0x%lx] from GPUs\n",
2031 			 prange->svms, start, last);
2032 		list_for_each_entry(pchild, &prange->child_list, child_list) {
2033 			mutex_lock_nested(&pchild->lock, 1);
2034 			s = max(start, pchild->start);
2035 			l = min(last, pchild->last);
2036 			if (l >= s)
2037 				svm_range_unmap_from_gpus(pchild, s, l, trigger);
2038 			mutex_unlock(&pchild->lock);
2039 		}
2040 		s = max(start, prange->start);
2041 		l = min(last, prange->last);
2042 		if (l >= s)
2043 			svm_range_unmap_from_gpus(prange, s, l, trigger);
2044 	}
2045 
2046 	return r;
2047 }
2048 
2049 static struct svm_range *svm_range_clone(struct svm_range *old)
2050 {
2051 	struct svm_range *new;
2052 
2053 	new = svm_range_new(old->svms, old->start, old->last, false);
2054 	if (!new)
2055 		return NULL;
2056 	if (svm_range_copy_dma_addrs(new, old)) {
2057 		svm_range_free(new, false);
2058 		return NULL;
2059 	}
2060 	if (old->svm_bo) {
2061 		new->ttm_res = old->ttm_res;
2062 		new->offset = old->offset;
2063 		new->svm_bo = svm_range_bo_ref(old->svm_bo);
2064 		spin_lock(&new->svm_bo->list_lock);
2065 		list_add(&new->svm_bo_list, &new->svm_bo->range_list);
2066 		spin_unlock(&new->svm_bo->list_lock);
2067 	}
2068 	new->flags = old->flags;
2069 	new->preferred_loc = old->preferred_loc;
2070 	new->prefetch_loc = old->prefetch_loc;
2071 	new->actual_loc = old->actual_loc;
2072 	new->granularity = old->granularity;
2073 	new->mapped_to_gpu = old->mapped_to_gpu;
2074 	new->vram_pages = old->vram_pages;
2075 	bitmap_copy(new->bitmap_access, old->bitmap_access, MAX_GPU_INSTANCE);
2076 	bitmap_copy(new->bitmap_aip, old->bitmap_aip, MAX_GPU_INSTANCE);
2077 	atomic_set(&new->queue_refcount, atomic_read(&old->queue_refcount));
2078 
2079 	return new;
2080 }
2081 
2082 void svm_range_set_max_pages(struct amdgpu_device *adev)
2083 {
2084 	uint64_t max_pages;
2085 	uint64_t pages, _pages;
2086 	uint64_t min_pages = 0;
2087 	int i, id;
2088 
2089 	for (i = 0; i < adev->kfd.dev->num_nodes; i++) {
2090 		if (adev->kfd.dev->nodes[i]->xcp)
2091 			id = adev->kfd.dev->nodes[i]->xcp->id;
2092 		else
2093 			id = -1;
2094 		pages = KFD_XCP_MEMORY_SIZE(adev, id) >> 17;
2095 		pages = clamp(pages, 1ULL << 9, 1ULL << 18);
2096 		pages = rounddown_pow_of_two(pages);
2097 		min_pages = min_not_zero(min_pages, pages);
2098 	}
2099 
2100 	do {
2101 		max_pages = READ_ONCE(max_svm_range_pages);
2102 		_pages = min_not_zero(max_pages, min_pages);
2103 	} while (cmpxchg(&max_svm_range_pages, max_pages, _pages) != max_pages);
2104 }
2105 
2106 static int
2107 svm_range_split_new(struct svm_range_list *svms, uint64_t start, uint64_t last,
2108 		    uint64_t max_pages, struct list_head *insert_list,
2109 		    struct list_head *update_list)
2110 {
2111 	struct svm_range *prange;
2112 	uint64_t l;
2113 
2114 	pr_debug("max_svm_range_pages 0x%llx adding [0x%llx 0x%llx]\n",
2115 		 max_pages, start, last);
2116 
2117 	while (last >= start) {
2118 		l = min(last, ALIGN_DOWN(start + max_pages, max_pages) - 1);
2119 
2120 		prange = svm_range_new(svms, start, l, true);
2121 		if (!prange)
2122 			return -ENOMEM;
2123 		list_add(&prange->list, insert_list);
2124 		list_add(&prange->update_list, update_list);
2125 
2126 		start = l + 1;
2127 	}
2128 	return 0;
2129 }
2130 
2131 /**
2132  * svm_range_add - add svm range and handle overlap
2133  * @p: the range add to this process svms
2134  * @start: page size aligned
2135  * @size: page size aligned
2136  * @nattr: number of attributes
2137  * @attrs: array of attributes
2138  * @update_list: output, the ranges need validate and update GPU mapping
2139  * @insert_list: output, the ranges need insert to svms
2140  * @remove_list: output, the ranges are replaced and need remove from svms
2141  * @remap_list: output, remap unaligned svm ranges
2142  *
2143  * Check if the virtual address range has overlap with any existing ranges,
2144  * split partly overlapping ranges and add new ranges in the gaps. All changes
2145  * should be applied to the range_list and interval tree transactionally. If
2146  * any range split or allocation fails, the entire update fails. Therefore any
2147  * existing overlapping svm_ranges are cloned and the original svm_ranges left
2148  * unchanged.
2149  *
2150  * If the transaction succeeds, the caller can update and insert clones and
2151  * new ranges, then free the originals.
2152  *
2153  * Otherwise the caller can free the clones and new ranges, while the old
2154  * svm_ranges remain unchanged.
2155  *
2156  * Context: Process context, caller must hold svms->lock
2157  *
2158  * Return:
2159  * 0 - OK, otherwise error code
2160  */
2161 static int
2162 svm_range_add(struct kfd_process *p, uint64_t start, uint64_t size,
2163 	      uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs,
2164 	      struct list_head *update_list, struct list_head *insert_list,
2165 	      struct list_head *remove_list, struct list_head *remap_list)
2166 {
2167 	unsigned long last = start + size - 1UL;
2168 	struct svm_range_list *svms = &p->svms;
2169 	struct interval_tree_node *node;
2170 	struct svm_range *prange;
2171 	struct svm_range *tmp;
2172 	struct list_head new_list;
2173 	int r = 0;
2174 
2175 	pr_debug("svms 0x%p [0x%llx 0x%lx]\n", &p->svms, start, last);
2176 
2177 	INIT_LIST_HEAD(update_list);
2178 	INIT_LIST_HEAD(insert_list);
2179 	INIT_LIST_HEAD(remove_list);
2180 	INIT_LIST_HEAD(&new_list);
2181 	INIT_LIST_HEAD(remap_list);
2182 
2183 	node = interval_tree_iter_first(&svms->objects, start, last);
2184 	while (node) {
2185 		struct interval_tree_node *next;
2186 		unsigned long next_start;
2187 
2188 		pr_debug("found overlap node [0x%lx 0x%lx]\n", node->start,
2189 			 node->last);
2190 
2191 		prange = container_of(node, struct svm_range, it_node);
2192 		next = interval_tree_iter_next(node, start, last);
2193 		next_start = min(node->last, last) + 1;
2194 
2195 		if (svm_range_is_same_attrs(p, prange, nattr, attrs) &&
2196 		    prange->mapped_to_gpu) {
2197 			/* nothing to do */
2198 		} else if (node->start < start || node->last > last) {
2199 			/* node intersects the update range and its attributes
2200 			 * will change. Clone and split it, apply updates only
2201 			 * to the overlapping part
2202 			 */
2203 			struct svm_range *old = prange;
2204 
2205 			prange = svm_range_clone(old);
2206 			if (!prange) {
2207 				r = -ENOMEM;
2208 				goto out;
2209 			}
2210 
2211 			list_add(&old->update_list, remove_list);
2212 			list_add(&prange->list, insert_list);
2213 			list_add(&prange->update_list, update_list);
2214 
2215 			if (node->start < start) {
2216 				pr_debug("change old range start\n");
2217 				r = svm_range_split_head(prange, start,
2218 							 insert_list, remap_list);
2219 				if (r)
2220 					goto out;
2221 			}
2222 			if (node->last > last) {
2223 				pr_debug("change old range last\n");
2224 				r = svm_range_split_tail(prange, last,
2225 							 insert_list, remap_list);
2226 				if (r)
2227 					goto out;
2228 			}
2229 		} else {
2230 			/* The node is contained within start..last,
2231 			 * just update it
2232 			 */
2233 			list_add(&prange->update_list, update_list);
2234 		}
2235 
2236 		/* insert a new node if needed */
2237 		if (node->start > start) {
2238 			r = svm_range_split_new(svms, start, node->start - 1,
2239 						READ_ONCE(max_svm_range_pages),
2240 						&new_list, update_list);
2241 			if (r)
2242 				goto out;
2243 		}
2244 
2245 		node = next;
2246 		start = next_start;
2247 	}
2248 
2249 	/* add a final range at the end if needed */
2250 	if (start <= last)
2251 		r = svm_range_split_new(svms, start, last,
2252 					READ_ONCE(max_svm_range_pages),
2253 					&new_list, update_list);
2254 
2255 out:
2256 	if (r) {
2257 		list_for_each_entry_safe(prange, tmp, insert_list, list)
2258 			svm_range_free(prange, false);
2259 		list_for_each_entry_safe(prange, tmp, &new_list, list)
2260 			svm_range_free(prange, true);
2261 	} else {
2262 		list_splice(&new_list, insert_list);
2263 	}
2264 
2265 	return r;
2266 }
2267 
2268 static void
2269 svm_range_update_notifier_and_interval_tree(struct mm_struct *mm,
2270 					    struct svm_range *prange)
2271 {
2272 	unsigned long start;
2273 	unsigned long last;
2274 
2275 	start = prange->notifier.interval_tree.start >> PAGE_SHIFT;
2276 	last = prange->notifier.interval_tree.last >> PAGE_SHIFT;
2277 
2278 	if (prange->start == start && prange->last == last)
2279 		return;
2280 
2281 	pr_debug("up notifier 0x%p prange 0x%p [0x%lx 0x%lx] [0x%lx 0x%lx]\n",
2282 		  prange->svms, prange, start, last, prange->start,
2283 		  prange->last);
2284 
2285 	if (start != 0 && last != 0) {
2286 		interval_tree_remove(&prange->it_node, &prange->svms->objects);
2287 		svm_range_remove_notifier(prange);
2288 	}
2289 	prange->it_node.start = prange->start;
2290 	prange->it_node.last = prange->last;
2291 
2292 	interval_tree_insert(&prange->it_node, &prange->svms->objects);
2293 	svm_range_add_notifier_locked(mm, prange);
2294 }
2295 
2296 static void
2297 svm_range_handle_list_op(struct svm_range_list *svms, struct svm_range *prange,
2298 			 struct mm_struct *mm)
2299 {
2300 	switch (prange->work_item.op) {
2301 	case SVM_OP_NULL:
2302 		pr_debug("NULL OP 0x%p prange 0x%p [0x%lx 0x%lx]\n",
2303 			 svms, prange, prange->start, prange->last);
2304 		break;
2305 	case SVM_OP_UNMAP_RANGE:
2306 		pr_debug("remove 0x%p prange 0x%p [0x%lx 0x%lx]\n",
2307 			 svms, prange, prange->start, prange->last);
2308 		svm_range_unlink(prange);
2309 		svm_range_remove_notifier(prange);
2310 		svm_range_free(prange, true);
2311 		break;
2312 	case SVM_OP_UPDATE_RANGE_NOTIFIER:
2313 		pr_debug("update notifier 0x%p prange 0x%p [0x%lx 0x%lx]\n",
2314 			 svms, prange, prange->start, prange->last);
2315 		svm_range_update_notifier_and_interval_tree(mm, prange);
2316 		break;
2317 	case SVM_OP_UPDATE_RANGE_NOTIFIER_AND_MAP:
2318 		pr_debug("update and map 0x%p prange 0x%p [0x%lx 0x%lx]\n",
2319 			 svms, prange, prange->start, prange->last);
2320 		svm_range_update_notifier_and_interval_tree(mm, prange);
2321 		/* TODO: implement deferred validation and mapping */
2322 		break;
2323 	case SVM_OP_ADD_RANGE:
2324 		pr_debug("add 0x%p prange 0x%p [0x%lx 0x%lx]\n", svms, prange,
2325 			 prange->start, prange->last);
2326 		svm_range_add_to_svms(prange);
2327 		svm_range_add_notifier_locked(mm, prange);
2328 		break;
2329 	case SVM_OP_ADD_RANGE_AND_MAP:
2330 		pr_debug("add and map 0x%p prange 0x%p [0x%lx 0x%lx]\n", svms,
2331 			 prange, prange->start, prange->last);
2332 		svm_range_add_to_svms(prange);
2333 		svm_range_add_notifier_locked(mm, prange);
2334 		/* TODO: implement deferred validation and mapping */
2335 		break;
2336 	default:
2337 		WARN_ONCE(1, "Unknown prange 0x%p work op %d\n", prange,
2338 			 prange->work_item.op);
2339 	}
2340 }
2341 
2342 static void svm_range_drain_retry_fault(struct svm_range_list *svms)
2343 {
2344 	struct kfd_process_device *pdd;
2345 	struct kfd_process *p;
2346 	uint32_t i;
2347 
2348 	p = container_of(svms, struct kfd_process, svms);
2349 
2350 	for_each_set_bit(i, svms->bitmap_supported, p->n_pdds) {
2351 		pdd = p->pdds[i];
2352 		if (!pdd)
2353 			continue;
2354 
2355 		pr_debug("drain retry fault gpu %d svms %p\n", i, svms);
2356 
2357 		amdgpu_ih_wait_on_checkpoint_process_ts(pdd->dev->adev,
2358 				pdd->dev->adev->irq.retry_cam_enabled ?
2359 				&pdd->dev->adev->irq.ih :
2360 				&pdd->dev->adev->irq.ih1);
2361 
2362 		if (pdd->dev->adev->irq.retry_cam_enabled)
2363 			amdgpu_ih_wait_on_checkpoint_process_ts(pdd->dev->adev,
2364 				&pdd->dev->adev->irq.ih_soft);
2365 
2366 
2367 		pr_debug("drain retry fault gpu %d svms 0x%p done\n", i, svms);
2368 	}
2369 }
2370 
2371 static void svm_range_deferred_list_work(struct work_struct *work)
2372 {
2373 	struct svm_range_list *svms;
2374 	struct svm_range *prange;
2375 	struct mm_struct *mm;
2376 
2377 	svms = container_of(work, struct svm_range_list, deferred_list_work);
2378 	pr_debug("enter svms 0x%p\n", svms);
2379 
2380 	spin_lock(&svms->deferred_list_lock);
2381 	while (!list_empty(&svms->deferred_range_list)) {
2382 		prange = list_first_entry(&svms->deferred_range_list,
2383 					  struct svm_range, deferred_list);
2384 		spin_unlock(&svms->deferred_list_lock);
2385 
2386 		pr_debug("prange 0x%p [0x%lx 0x%lx] op %d\n", prange,
2387 			 prange->start, prange->last, prange->work_item.op);
2388 
2389 		mm = prange->work_item.mm;
2390 
2391 		mmap_write_lock(mm);
2392 
2393 		/* Remove from deferred_list must be inside mmap write lock, for
2394 		 * two race cases:
2395 		 * 1. unmap_from_cpu may change work_item.op and add the range
2396 		 *    to deferred_list again, cause use after free bug.
2397 		 * 2. svm_range_list_lock_and_flush_work may hold mmap write
2398 		 *    lock and continue because deferred_list is empty, but
2399 		 *    deferred_list work is actually waiting for mmap lock.
2400 		 */
2401 		spin_lock(&svms->deferred_list_lock);
2402 		list_del_init(&prange->deferred_list);
2403 		spin_unlock(&svms->deferred_list_lock);
2404 
2405 		mutex_lock(&svms->lock);
2406 		mutex_lock(&prange->migrate_mutex);
2407 		while (!list_empty(&prange->child_list)) {
2408 			struct svm_range *pchild;
2409 
2410 			pchild = list_first_entry(&prange->child_list,
2411 						struct svm_range, child_list);
2412 			pr_debug("child prange 0x%p op %d\n", pchild,
2413 				 pchild->work_item.op);
2414 			list_del_init(&pchild->child_list);
2415 			svm_range_handle_list_op(svms, pchild, mm);
2416 		}
2417 		mutex_unlock(&prange->migrate_mutex);
2418 
2419 		svm_range_handle_list_op(svms, prange, mm);
2420 		mutex_unlock(&svms->lock);
2421 		mmap_write_unlock(mm);
2422 
2423 		/* Pairs with mmget in svm_range_add_list_work. If dropping the
2424 		 * last mm refcount, schedule release work to avoid circular locking
2425 		 */
2426 		mmput_async(mm);
2427 
2428 		spin_lock(&svms->deferred_list_lock);
2429 	}
2430 	spin_unlock(&svms->deferred_list_lock);
2431 	pr_debug("exit svms 0x%p\n", svms);
2432 }
2433 
2434 void
2435 svm_range_add_list_work(struct svm_range_list *svms, struct svm_range *prange,
2436 			struct mm_struct *mm, enum svm_work_list_ops op)
2437 {
2438 	spin_lock(&svms->deferred_list_lock);
2439 	/* if prange is on the deferred list */
2440 	if (!list_empty(&prange->deferred_list)) {
2441 		pr_debug("update exist prange 0x%p work op %d\n", prange, op);
2442 		WARN_ONCE(prange->work_item.mm != mm, "unmatch mm\n");
2443 		if (op != SVM_OP_NULL &&
2444 		    prange->work_item.op != SVM_OP_UNMAP_RANGE)
2445 			prange->work_item.op = op;
2446 	} else {
2447 		/* Pairs with mmput in deferred_list_work.
2448 		 * If process is exiting and mm is gone, don't update mmu notifier.
2449 		 */
2450 		if (mmget_not_zero(mm)) {
2451 			prange->work_item.mm = mm;
2452 			prange->work_item.op = op;
2453 			list_add_tail(&prange->deferred_list,
2454 				      &prange->svms->deferred_range_list);
2455 			pr_debug("add prange 0x%p [0x%lx 0x%lx] to work list op %d\n",
2456 				 prange, prange->start, prange->last, op);
2457 		}
2458 	}
2459 	spin_unlock(&svms->deferred_list_lock);
2460 }
2461 
2462 void schedule_deferred_list_work(struct svm_range_list *svms)
2463 {
2464 	spin_lock(&svms->deferred_list_lock);
2465 	if (!list_empty(&svms->deferred_range_list))
2466 		schedule_work(&svms->deferred_list_work);
2467 	spin_unlock(&svms->deferred_list_lock);
2468 }
2469 
2470 static void
2471 svm_range_unmap_split(struct svm_range *parent, struct svm_range *prange, unsigned long start,
2472 		      unsigned long last)
2473 {
2474 	struct svm_range *head;
2475 	struct svm_range *tail;
2476 
2477 	if (prange->work_item.op == SVM_OP_UNMAP_RANGE) {
2478 		pr_debug("prange 0x%p [0x%lx 0x%lx] is already freed\n", prange,
2479 			 prange->start, prange->last);
2480 		return;
2481 	}
2482 	if (start > prange->last || last < prange->start)
2483 		return;
2484 
2485 	head = tail = prange;
2486 	if (start > prange->start)
2487 		svm_range_split(prange, prange->start, start - 1, &tail);
2488 	if (last < tail->last)
2489 		svm_range_split(tail, last + 1, tail->last, &head);
2490 
2491 	if (head != prange && tail != prange) {
2492 		svm_range_add_child(parent, head, SVM_OP_UNMAP_RANGE);
2493 		svm_range_add_child(parent, tail, SVM_OP_ADD_RANGE);
2494 	} else if (tail != prange) {
2495 		svm_range_add_child(parent, tail, SVM_OP_UNMAP_RANGE);
2496 	} else if (head != prange) {
2497 		svm_range_add_child(parent, head, SVM_OP_UNMAP_RANGE);
2498 	} else if (parent != prange) {
2499 		prange->work_item.op = SVM_OP_UNMAP_RANGE;
2500 	}
2501 }
2502 
2503 static void
2504 svm_range_unmap_from_cpu(struct mm_struct *mm, struct svm_range *prange,
2505 			 unsigned long start, unsigned long last)
2506 {
2507 	uint32_t trigger = KFD_SVM_UNMAP_TRIGGER_UNMAP_FROM_CPU;
2508 	struct svm_range_list *svms;
2509 	struct svm_range *pchild;
2510 	struct kfd_process *p;
2511 	unsigned long s, l;
2512 	bool unmap_parent;
2513 	uint32_t i;
2514 
2515 	if (atomic_read(&prange->queue_refcount)) {
2516 		int r;
2517 
2518 		pr_warn("Freeing queue vital buffer 0x%lx, queue evicted\n",
2519 			prange->start << PAGE_SHIFT);
2520 		r = kgd2kfd_quiesce_mm(mm, KFD_QUEUE_EVICTION_TRIGGER_SVM);
2521 		if (r)
2522 			pr_debug("failed %d to quiesce KFD queues\n", r);
2523 	}
2524 
2525 	p = kfd_lookup_process_by_mm(mm);
2526 	if (!p)
2527 		return;
2528 	svms = &p->svms;
2529 
2530 	pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx] [0x%lx 0x%lx]\n", svms,
2531 		 prange, prange->start, prange->last, start, last);
2532 
2533 	/* calculate time stamps that are used to decide which page faults need be
2534 	 * dropped or handled before unmap pages from gpu vm
2535 	 */
2536 	for_each_set_bit(i, svms->bitmap_supported, p->n_pdds) {
2537 		struct kfd_process_device *pdd;
2538 		struct amdgpu_device *adev;
2539 		struct amdgpu_ih_ring *ih;
2540 		uint32_t checkpoint_wptr;
2541 
2542 		pdd = p->pdds[i];
2543 		if (!pdd)
2544 			continue;
2545 
2546 		adev = pdd->dev->adev;
2547 
2548 		/* Check and drain ih1 ring if cam not available */
2549 		if (adev->irq.ih1.ring_size) {
2550 			ih = &adev->irq.ih1;
2551 			checkpoint_wptr = amdgpu_ih_get_wptr(adev, ih);
2552 			if (ih->rptr != checkpoint_wptr) {
2553 				svms->checkpoint_ts[i] =
2554 					amdgpu_ih_decode_iv_ts(adev, ih, checkpoint_wptr, -1);
2555 				continue;
2556 			}
2557 		}
2558 
2559 		/* check if dev->irq.ih_soft is not empty */
2560 		ih = &adev->irq.ih_soft;
2561 		checkpoint_wptr = amdgpu_ih_get_wptr(adev, ih);
2562 		if (ih->rptr != checkpoint_wptr)
2563 			svms->checkpoint_ts[i] = amdgpu_ih_decode_iv_ts(adev, ih, checkpoint_wptr, -1);
2564 	}
2565 
2566 	unmap_parent = start <= prange->start && last >= prange->last;
2567 
2568 	list_for_each_entry(pchild, &prange->child_list, child_list) {
2569 		mutex_lock_nested(&pchild->lock, 1);
2570 		s = max(start, pchild->start);
2571 		l = min(last, pchild->last);
2572 		if (l >= s)
2573 			svm_range_unmap_from_gpus(pchild, s, l, trigger);
2574 		svm_range_unmap_split(prange, pchild, start, last);
2575 		mutex_unlock(&pchild->lock);
2576 	}
2577 	s = max(start, prange->start);
2578 	l = min(last, prange->last);
2579 	if (l >= s)
2580 		svm_range_unmap_from_gpus(prange, s, l, trigger);
2581 	svm_range_unmap_split(prange, prange, start, last);
2582 
2583 	if (unmap_parent)
2584 		svm_range_add_list_work(svms, prange, mm, SVM_OP_UNMAP_RANGE);
2585 	else
2586 		svm_range_add_list_work(svms, prange, mm,
2587 					SVM_OP_UPDATE_RANGE_NOTIFIER);
2588 	schedule_deferred_list_work(svms);
2589 
2590 	kfd_unref_process(p);
2591 }
2592 
2593 /**
2594  * svm_range_cpu_invalidate_pagetables - interval notifier callback
2595  * @mni: mmu_interval_notifier struct
2596  * @range: mmu_notifier_range struct
2597  * @cur_seq: value to pass to mmu_interval_set_seq()
2598  *
2599  * If event is MMU_NOTIFY_UNMAP, this is from CPU unmap range, otherwise, it
2600  * is from migration, or CPU page invalidation callback.
2601  *
2602  * For unmap event, unmap range from GPUs, remove prange from svms in a delayed
2603  * work thread, and split prange if only part of prange is unmapped.
2604  *
2605  * For invalidation event, if GPU retry fault is not enabled, evict the queues,
2606  * then schedule svm_range_restore_work to update GPU mapping and resume queues.
2607  * If GPU retry fault is enabled, unmap the svm range from GPU, retry fault will
2608  * update GPU mapping to recover.
2609  *
2610  * Context: mmap lock, notifier_invalidate_start lock are held
2611  *          for invalidate event, prange lock is held if this is from migration
2612  */
2613 static bool
2614 svm_range_cpu_invalidate_pagetables(struct mmu_interval_notifier *mni,
2615 				    const struct mmu_notifier_range *range,
2616 				    unsigned long cur_seq)
2617 {
2618 	struct svm_range *prange;
2619 	unsigned long start;
2620 	unsigned long last;
2621 
2622 	if (range->event == MMU_NOTIFY_RELEASE)
2623 		return true;
2624 
2625 	start = mni->interval_tree.start;
2626 	last = mni->interval_tree.last;
2627 	start = max(start, range->start) >> PAGE_SHIFT;
2628 	last = min(last, range->end - 1) >> PAGE_SHIFT;
2629 	pr_debug("[0x%lx 0x%lx] range[0x%lx 0x%lx] notifier[0x%lx 0x%lx] %d\n",
2630 		 start, last, range->start >> PAGE_SHIFT,
2631 		 (range->end - 1) >> PAGE_SHIFT,
2632 		 mni->interval_tree.start >> PAGE_SHIFT,
2633 		 mni->interval_tree.last >> PAGE_SHIFT, range->event);
2634 
2635 	prange = container_of(mni, struct svm_range, notifier);
2636 
2637 	svm_range_lock(prange);
2638 	mmu_interval_set_seq(mni, cur_seq);
2639 
2640 	switch (range->event) {
2641 	case MMU_NOTIFY_UNMAP:
2642 		svm_range_unmap_from_cpu(mni->mm, prange, start, last);
2643 		break;
2644 	default:
2645 		svm_range_evict(prange, mni->mm, start, last, range->event);
2646 		break;
2647 	}
2648 
2649 	svm_range_unlock(prange);
2650 
2651 	return true;
2652 }
2653 
2654 /**
2655  * svm_range_from_addr - find svm range from fault address
2656  * @svms: svm range list header
2657  * @addr: address to search range interval tree, in pages
2658  * @parent: parent range if range is on child list
2659  *
2660  * Context: The caller must hold svms->lock
2661  *
2662  * Return: the svm_range found or NULL
2663  */
2664 struct svm_range *
2665 svm_range_from_addr(struct svm_range_list *svms, unsigned long addr,
2666 		    struct svm_range **parent)
2667 {
2668 	struct interval_tree_node *node;
2669 	struct svm_range *prange;
2670 	struct svm_range *pchild;
2671 
2672 	node = interval_tree_iter_first(&svms->objects, addr, addr);
2673 	if (!node)
2674 		return NULL;
2675 
2676 	prange = container_of(node, struct svm_range, it_node);
2677 	pr_debug("address 0x%lx prange [0x%lx 0x%lx] node [0x%lx 0x%lx]\n",
2678 		 addr, prange->start, prange->last, node->start, node->last);
2679 
2680 	if (addr >= prange->start && addr <= prange->last) {
2681 		if (parent)
2682 			*parent = prange;
2683 		return prange;
2684 	}
2685 	list_for_each_entry(pchild, &prange->child_list, child_list)
2686 		if (addr >= pchild->start && addr <= pchild->last) {
2687 			pr_debug("found address 0x%lx pchild [0x%lx 0x%lx]\n",
2688 				 addr, pchild->start, pchild->last);
2689 			if (parent)
2690 				*parent = prange;
2691 			return pchild;
2692 		}
2693 
2694 	return NULL;
2695 }
2696 
2697 /* svm_range_best_restore_location - decide the best fault restore location
2698  * @prange: svm range structure
2699  * @adev: the GPU on which vm fault happened
2700  *
2701  * This is only called when xnack is on, to decide the best location to restore
2702  * the range mapping after GPU vm fault. Caller uses the best location to do
2703  * migration if actual loc is not best location, then update GPU page table
2704  * mapping to the best location.
2705  *
2706  * If the preferred loc is accessible by faulting GPU, use preferred loc.
2707  * If vm fault gpu idx is on range ACCESSIBLE bitmap, best_loc is vm fault gpu
2708  * If vm fault gpu idx is on range ACCESSIBLE_IN_PLACE bitmap, then
2709  *    if range actual loc is cpu, best_loc is cpu
2710  *    if vm fault gpu is on xgmi same hive of range actual loc gpu, best_loc is
2711  *    range actual loc.
2712  * Otherwise, GPU no access, best_loc is -1.
2713  *
2714  * Return:
2715  * -1 means vm fault GPU no access
2716  * 0 for CPU or GPU id
2717  */
2718 static int32_t
2719 svm_range_best_restore_location(struct svm_range *prange,
2720 				struct kfd_node *node,
2721 				int32_t *gpuidx)
2722 {
2723 	struct kfd_node *bo_node, *preferred_node;
2724 	struct kfd_process *p;
2725 	uint32_t gpuid;
2726 	int r;
2727 
2728 	p = container_of(prange->svms, struct kfd_process, svms);
2729 
2730 	r = kfd_process_gpuid_from_node(p, node, &gpuid, gpuidx);
2731 	if (r < 0) {
2732 		pr_debug("failed to get gpuid from kgd\n");
2733 		return -1;
2734 	}
2735 
2736 	if (node->adev->apu_prefer_gtt)
2737 		return 0;
2738 
2739 	if (prange->preferred_loc == gpuid ||
2740 	    prange->preferred_loc == KFD_IOCTL_SVM_LOCATION_SYSMEM) {
2741 		return prange->preferred_loc;
2742 	} else if (prange->preferred_loc != KFD_IOCTL_SVM_LOCATION_UNDEFINED) {
2743 		preferred_node = svm_range_get_node_by_id(prange, prange->preferred_loc);
2744 		if (preferred_node && svm_nodes_in_same_hive(node, preferred_node))
2745 			return prange->preferred_loc;
2746 		/* fall through */
2747 	}
2748 
2749 	if (test_bit(*gpuidx, prange->bitmap_access))
2750 		return gpuid;
2751 
2752 	if (test_bit(*gpuidx, prange->bitmap_aip)) {
2753 		if (!prange->actual_loc)
2754 			return 0;
2755 
2756 		bo_node = svm_range_get_node_by_id(prange, prange->actual_loc);
2757 		if (bo_node && svm_nodes_in_same_hive(node, bo_node))
2758 			return prange->actual_loc;
2759 		else
2760 			return 0;
2761 	}
2762 
2763 	return -1;
2764 }
2765 
2766 static int
2767 svm_range_get_range_boundaries(struct kfd_process *p, int64_t addr,
2768 			       unsigned long *start, unsigned long *last,
2769 			       bool *is_heap_stack)
2770 {
2771 	struct vm_area_struct *vma;
2772 	struct interval_tree_node *node;
2773 	struct rb_node *rb_node;
2774 	unsigned long start_limit, end_limit;
2775 
2776 	vma = vma_lookup(p->mm, addr << PAGE_SHIFT);
2777 	if (!vma) {
2778 		pr_debug("VMA does not exist in address [0x%llx]\n", addr);
2779 		return -EFAULT;
2780 	}
2781 
2782 	*is_heap_stack = vma_is_initial_heap(vma) || vma_is_initial_stack(vma);
2783 
2784 	start_limit = max(vma->vm_start >> PAGE_SHIFT,
2785 		      (unsigned long)ALIGN_DOWN(addr, 1UL << p->svms.default_granularity));
2786 	end_limit = min(vma->vm_end >> PAGE_SHIFT,
2787 		    (unsigned long)ALIGN(addr + 1, 1UL << p->svms.default_granularity));
2788 
2789 	/* First range that starts after the fault address */
2790 	node = interval_tree_iter_first(&p->svms.objects, addr + 1, ULONG_MAX);
2791 	if (node) {
2792 		end_limit = min(end_limit, node->start);
2793 		/* Last range that ends before the fault address */
2794 		rb_node = rb_prev(&node->rb);
2795 	} else {
2796 		/* Last range must end before addr because
2797 		 * there was no range after addr
2798 		 */
2799 		rb_node = rb_last(&p->svms.objects.rb_root);
2800 	}
2801 	if (rb_node) {
2802 		node = container_of(rb_node, struct interval_tree_node, rb);
2803 		if (node->last >= addr) {
2804 			WARN(1, "Overlap with prev node and page fault addr\n");
2805 			return -EFAULT;
2806 		}
2807 		start_limit = max(start_limit, node->last + 1);
2808 	}
2809 
2810 	*start = start_limit;
2811 	*last = end_limit - 1;
2812 
2813 	pr_debug("vma [0x%lx 0x%lx] range [0x%lx 0x%lx] is_heap_stack %d\n",
2814 		 vma->vm_start >> PAGE_SHIFT, vma->vm_end >> PAGE_SHIFT,
2815 		 *start, *last, *is_heap_stack);
2816 
2817 	return 0;
2818 }
2819 
2820 static int
2821 svm_range_check_vm_userptr(struct kfd_process *p, uint64_t start, uint64_t last,
2822 			   uint64_t *bo_s, uint64_t *bo_l)
2823 {
2824 	struct amdgpu_bo_va_mapping *mapping;
2825 	struct interval_tree_node *node;
2826 	struct amdgpu_bo *bo = NULL;
2827 	unsigned long userptr;
2828 	uint32_t i;
2829 	int r;
2830 
2831 	for (i = 0; i < p->n_pdds; i++) {
2832 		struct amdgpu_vm *vm;
2833 
2834 		if (!p->pdds[i]->drm_priv)
2835 			continue;
2836 
2837 		vm = drm_priv_to_vm(p->pdds[i]->drm_priv);
2838 		r = amdgpu_bo_reserve(vm->root.bo, false);
2839 		if (r)
2840 			return r;
2841 
2842 		/* Check userptr by searching entire vm->va interval tree */
2843 		node = interval_tree_iter_first(&vm->va, 0, ~0ULL);
2844 		while (node) {
2845 			mapping = container_of((struct rb_node *)node,
2846 					       struct amdgpu_bo_va_mapping, rb);
2847 			bo = mapping->bo_va->base.bo;
2848 
2849 			if (!amdgpu_ttm_tt_affect_userptr(bo->tbo.ttm,
2850 							 start << PAGE_SHIFT,
2851 							 last << PAGE_SHIFT,
2852 							 &userptr)) {
2853 				node = interval_tree_iter_next(node, 0, ~0ULL);
2854 				continue;
2855 			}
2856 
2857 			pr_debug("[0x%llx 0x%llx] already userptr mapped\n",
2858 				 start, last);
2859 			if (bo_s && bo_l) {
2860 				*bo_s = userptr >> PAGE_SHIFT;
2861 				*bo_l = *bo_s + bo->tbo.ttm->num_pages - 1;
2862 			}
2863 			amdgpu_bo_unreserve(vm->root.bo);
2864 			return -EADDRINUSE;
2865 		}
2866 		amdgpu_bo_unreserve(vm->root.bo);
2867 	}
2868 	return 0;
2869 }
2870 
2871 static struct
2872 svm_range *svm_range_create_unregistered_range(struct kfd_node *node,
2873 						struct kfd_process *p,
2874 						struct mm_struct *mm,
2875 						int64_t addr)
2876 {
2877 	struct svm_range *prange = NULL;
2878 	unsigned long start, last;
2879 	uint32_t gpuid, gpuidx;
2880 	bool is_heap_stack;
2881 	uint64_t bo_s = 0;
2882 	uint64_t bo_l = 0;
2883 	int r;
2884 
2885 	if (svm_range_get_range_boundaries(p, addr, &start, &last,
2886 					   &is_heap_stack))
2887 		return NULL;
2888 
2889 	r = svm_range_check_vm(p, start, last, &bo_s, &bo_l);
2890 	if (r != -EADDRINUSE)
2891 		r = svm_range_check_vm_userptr(p, start, last, &bo_s, &bo_l);
2892 
2893 	if (r == -EADDRINUSE) {
2894 		if (addr >= bo_s && addr <= bo_l)
2895 			return NULL;
2896 
2897 		/* Create one page svm range if 2MB range overlapping */
2898 		start = addr;
2899 		last = addr;
2900 	}
2901 
2902 	prange = svm_range_new(&p->svms, start, last, true);
2903 	if (!prange) {
2904 		pr_debug("Failed to create prange in address [0x%llx]\n", addr);
2905 		return NULL;
2906 	}
2907 	if (kfd_process_gpuid_from_node(p, node, &gpuid, &gpuidx)) {
2908 		pr_debug("failed to get gpuid from kgd\n");
2909 		svm_range_free(prange, true);
2910 		return NULL;
2911 	}
2912 
2913 	if (is_heap_stack)
2914 		prange->preferred_loc = KFD_IOCTL_SVM_LOCATION_SYSMEM;
2915 
2916 	svm_range_add_to_svms(prange);
2917 	svm_range_add_notifier_locked(mm, prange);
2918 
2919 	return prange;
2920 }
2921 
2922 /* svm_range_skip_recover - decide if prange can be recovered
2923  * @prange: svm range structure
2924  *
2925  * GPU vm retry fault handle skip recover the range for cases:
2926  * 1. prange is on deferred list to be removed after unmap, it is stale fault,
2927  *    deferred list work will drain the stale fault before free the prange.
2928  * 2. prange is on deferred list to add interval notifier after split, or
2929  * 3. prange is child range, it is split from parent prange, recover later
2930  *    after interval notifier is added.
2931  *
2932  * Return: true to skip recover, false to recover
2933  */
2934 static bool svm_range_skip_recover(struct svm_range *prange)
2935 {
2936 	struct svm_range_list *svms = prange->svms;
2937 
2938 	spin_lock(&svms->deferred_list_lock);
2939 	if (list_empty(&prange->deferred_list) &&
2940 	    list_empty(&prange->child_list)) {
2941 		spin_unlock(&svms->deferred_list_lock);
2942 		return false;
2943 	}
2944 	spin_unlock(&svms->deferred_list_lock);
2945 
2946 	if (prange->work_item.op == SVM_OP_UNMAP_RANGE) {
2947 		pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx] unmapped\n",
2948 			 svms, prange, prange->start, prange->last);
2949 		return true;
2950 	}
2951 	if (prange->work_item.op == SVM_OP_ADD_RANGE_AND_MAP ||
2952 	    prange->work_item.op == SVM_OP_ADD_RANGE) {
2953 		pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx] not added yet\n",
2954 			 svms, prange, prange->start, prange->last);
2955 		return true;
2956 	}
2957 	return false;
2958 }
2959 
2960 static void
2961 svm_range_count_fault(struct kfd_node *node, struct kfd_process *p,
2962 		      int32_t gpuidx)
2963 {
2964 	struct kfd_process_device *pdd;
2965 
2966 	/* fault is on different page of same range
2967 	 * or fault is skipped to recover later
2968 	 * or fault is on invalid virtual address
2969 	 */
2970 	if (gpuidx == MAX_GPU_INSTANCE) {
2971 		uint32_t gpuid;
2972 		int r;
2973 
2974 		r = kfd_process_gpuid_from_node(p, node, &gpuid, &gpuidx);
2975 		if (r < 0)
2976 			return;
2977 	}
2978 
2979 	/* fault is recovered
2980 	 * or fault cannot recover because GPU no access on the range
2981 	 */
2982 	pdd = kfd_process_device_from_gpuidx(p, gpuidx);
2983 	if (pdd)
2984 		WRITE_ONCE(pdd->faults, pdd->faults + 1);
2985 }
2986 
2987 static bool
2988 svm_fault_allowed(struct vm_area_struct *vma, bool write_fault)
2989 {
2990 	unsigned long requested = VM_READ;
2991 
2992 	if (write_fault)
2993 		requested |= VM_WRITE;
2994 
2995 	pr_debug("requested 0x%lx, vma permission flags 0x%lx\n", requested,
2996 		vma->vm_flags);
2997 	return (vma->vm_flags & requested) == requested;
2998 }
2999 
3000 int
3001 svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid,
3002 			uint32_t vmid, uint32_t node_id,
3003 			uint64_t addr, uint64_t ts, bool write_fault)
3004 {
3005 	unsigned long start, last, size;
3006 	struct mm_struct *mm = NULL;
3007 	struct svm_range_list *svms;
3008 	struct svm_range *prange;
3009 	struct kfd_process *p;
3010 	ktime_t timestamp = ktime_get_boottime();
3011 	struct kfd_node *node;
3012 	int32_t best_loc;
3013 	int32_t gpuid, gpuidx = MAX_GPU_INSTANCE;
3014 	bool write_locked = false;
3015 	struct vm_area_struct *vma;
3016 	bool migration = false;
3017 	int r = 0;
3018 
3019 	if (!KFD_IS_SVM_API_SUPPORTED(adev)) {
3020 		pr_debug("device does not support SVM\n");
3021 		return -EFAULT;
3022 	}
3023 
3024 	p = kfd_lookup_process_by_pasid(pasid, NULL);
3025 	if (!p) {
3026 		pr_debug("kfd process not founded pasid 0x%x\n", pasid);
3027 		return 0;
3028 	}
3029 	svms = &p->svms;
3030 
3031 	pr_debug("restoring svms 0x%p fault address 0x%llx\n", svms, addr);
3032 
3033 	if (atomic_read(&svms->drain_pagefaults)) {
3034 		pr_debug("page fault handling disabled, drop fault 0x%llx\n", addr);
3035 		r = 0;
3036 		goto out;
3037 	}
3038 
3039 	node = kfd_node_by_irq_ids(adev, node_id, vmid);
3040 	if (!node) {
3041 		pr_debug("kfd node does not exist node_id: %d, vmid: %d\n", node_id,
3042 			 vmid);
3043 		r = -EFAULT;
3044 		goto out;
3045 	}
3046 
3047 	if (kfd_process_gpuid_from_node(p, node, &gpuid, &gpuidx)) {
3048 		pr_debug("failed to get gpuid/gpuidex for node_id: %d\n", node_id);
3049 		r = -EFAULT;
3050 		goto out;
3051 	}
3052 
3053 	if (!p->xnack_enabled) {
3054 		pr_debug("XNACK not enabled for pasid 0x%x\n", pasid);
3055 		r = -EFAULT;
3056 		goto out;
3057 	}
3058 
3059 	/* p->lead_thread is available as kfd_process_wq_release flush the work
3060 	 * before releasing task ref.
3061 	 */
3062 	mm = get_task_mm(p->lead_thread);
3063 	if (!mm) {
3064 		pr_debug("svms 0x%p failed to get mm\n", svms);
3065 		r = 0;
3066 		goto out;
3067 	}
3068 
3069 	mmap_read_lock(mm);
3070 retry_write_locked:
3071 	mutex_lock(&svms->lock);
3072 
3073 	/* check if this page fault time stamp is before svms->checkpoint_ts */
3074 	if (svms->checkpoint_ts[gpuidx] != 0) {
3075 		if (amdgpu_ih_ts_after_or_equal(ts,  svms->checkpoint_ts[gpuidx])) {
3076 			pr_debug("draining retry fault, drop fault 0x%llx\n", addr);
3077 			if (write_locked)
3078 				mmap_write_downgrade(mm);
3079 			r = -EAGAIN;
3080 			goto out_unlock_svms;
3081 		} else {
3082 			/* ts is after svms->checkpoint_ts now, reset svms->checkpoint_ts
3083 			 * to zero to avoid following ts wrap around give wrong comparing
3084 			 */
3085 			svms->checkpoint_ts[gpuidx] = 0;
3086 		}
3087 	}
3088 
3089 	prange = svm_range_from_addr(svms, addr, NULL);
3090 	if (!prange) {
3091 		pr_debug("failed to find prange svms 0x%p address [0x%llx]\n",
3092 			 svms, addr);
3093 		if (!write_locked) {
3094 			/* Need the write lock to create new range with MMU notifier.
3095 			 * Also flush pending deferred work to make sure the interval
3096 			 * tree is up to date before we add a new range
3097 			 */
3098 			mutex_unlock(&svms->lock);
3099 			mmap_read_unlock(mm);
3100 			mmap_write_lock(mm);
3101 			write_locked = true;
3102 			goto retry_write_locked;
3103 		}
3104 		prange = svm_range_create_unregistered_range(node, p, mm, addr);
3105 		if (!prange) {
3106 			pr_debug("failed to create unregistered range svms 0x%p address [0x%llx]\n",
3107 				 svms, addr);
3108 			mmap_write_downgrade(mm);
3109 			r = -EFAULT;
3110 			goto out_unlock_svms;
3111 		}
3112 	}
3113 	if (write_locked)
3114 		mmap_write_downgrade(mm);
3115 
3116 	mutex_lock(&prange->migrate_mutex);
3117 
3118 	if (svm_range_skip_recover(prange)) {
3119 		amdgpu_gmc_filter_faults_remove(node->adev, addr, pasid);
3120 		r = 0;
3121 		goto out_unlock_range;
3122 	}
3123 
3124 	/* skip duplicate vm fault on different pages of same range */
3125 	if (ktime_before(timestamp, ktime_add_ns(prange->validate_timestamp,
3126 				AMDGPU_SVM_RANGE_RETRY_FAULT_PENDING))) {
3127 		pr_debug("svms 0x%p [0x%lx %lx] already restored\n",
3128 			 svms, prange->start, prange->last);
3129 		r = 0;
3130 		goto out_unlock_range;
3131 	}
3132 
3133 	/* __do_munmap removed VMA, return success as we are handling stale
3134 	 * retry fault.
3135 	 */
3136 	vma = vma_lookup(mm, addr << PAGE_SHIFT);
3137 	if (!vma) {
3138 		pr_debug("address 0x%llx VMA is removed\n", addr);
3139 		r = 0;
3140 		goto out_unlock_range;
3141 	}
3142 
3143 	if (!svm_fault_allowed(vma, write_fault)) {
3144 		pr_debug("fault addr 0x%llx no %s permission\n", addr,
3145 			write_fault ? "write" : "read");
3146 		r = -EPERM;
3147 		goto out_unlock_range;
3148 	}
3149 
3150 	best_loc = svm_range_best_restore_location(prange, node, &gpuidx);
3151 	if (best_loc == -1) {
3152 		pr_debug("svms %p failed get best restore loc [0x%lx 0x%lx]\n",
3153 			 svms, prange->start, prange->last);
3154 		r = -EACCES;
3155 		goto out_unlock_range;
3156 	}
3157 
3158 	pr_debug("svms %p [0x%lx 0x%lx] best restore 0x%x, actual loc 0x%x\n",
3159 		 svms, prange->start, prange->last, best_loc,
3160 		 prange->actual_loc);
3161 
3162 	kfd_smi_event_page_fault_start(node, p->lead_thread->pid, addr,
3163 				       write_fault, timestamp);
3164 
3165 	/* Align migration range start and size to granularity size */
3166 	size = 1UL << prange->granularity;
3167 	start = max_t(unsigned long, ALIGN_DOWN(addr, size), prange->start);
3168 	last = min_t(unsigned long, ALIGN(addr + 1, size) - 1, prange->last);
3169 	if (prange->actual_loc != 0 || best_loc != 0) {
3170 		if (best_loc) {
3171 			r = svm_migrate_to_vram(prange, best_loc, start, last,
3172 					mm, KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU);
3173 			if (r) {
3174 				pr_debug("svm_migrate_to_vram failed (%d) at %llx, falling back to system memory\n",
3175 					 r, addr);
3176 				/* Fallback to system memory if migration to
3177 				 * VRAM failed
3178 				 */
3179 				if (prange->actual_loc && prange->actual_loc != best_loc)
3180 					r = svm_migrate_vram_to_ram(prange, mm, start, last,
3181 						KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU, NULL);
3182 				else
3183 					r = 0;
3184 			}
3185 		} else {
3186 			r = svm_migrate_vram_to_ram(prange, mm, start, last,
3187 					KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU, NULL);
3188 		}
3189 		if (r) {
3190 			pr_debug("failed %d to migrate svms %p [0x%lx 0x%lx]\n",
3191 				 r, svms, start, last);
3192 			goto out_migrate_fail;
3193 		} else {
3194 			migration = true;
3195 		}
3196 	}
3197 
3198 	r = svm_range_validate_and_map(mm, start, last, prange, gpuidx, false,
3199 				       false, false);
3200 	if (r)
3201 		pr_debug("failed %d to map svms 0x%p [0x%lx 0x%lx] to gpus\n",
3202 			 r, svms, start, last);
3203 
3204 out_migrate_fail:
3205 	kfd_smi_event_page_fault_end(node, p->lead_thread->pid, addr,
3206 				     migration);
3207 
3208 out_unlock_range:
3209 	mutex_unlock(&prange->migrate_mutex);
3210 out_unlock_svms:
3211 	mutex_unlock(&svms->lock);
3212 	mmap_read_unlock(mm);
3213 
3214 	if (r != -EAGAIN)
3215 		svm_range_count_fault(node, p, gpuidx);
3216 
3217 	mmput(mm);
3218 out:
3219 	kfd_unref_process(p);
3220 
3221 	if (r == -EAGAIN) {
3222 		pr_debug("recover vm fault later\n");
3223 		amdgpu_gmc_filter_faults_remove(node->adev, addr, pasid);
3224 		r = 0;
3225 	}
3226 	return r;
3227 }
3228 
3229 int
3230 svm_range_switch_xnack_reserve_mem(struct kfd_process *p, bool xnack_enabled)
3231 {
3232 	struct svm_range *prange, *pchild;
3233 	uint64_t reserved_size = 0;
3234 	uint64_t size;
3235 	int r = 0;
3236 
3237 	pr_debug("switching xnack from %d to %d\n", p->xnack_enabled, xnack_enabled);
3238 
3239 	mutex_lock(&p->svms.lock);
3240 
3241 	list_for_each_entry(prange, &p->svms.list, list) {
3242 		svm_range_lock(prange);
3243 		list_for_each_entry(pchild, &prange->child_list, child_list) {
3244 			size = (pchild->last - pchild->start + 1) << PAGE_SHIFT;
3245 			if (xnack_enabled) {
3246 				amdgpu_amdkfd_unreserve_mem_limit(NULL, size,
3247 					KFD_IOC_ALLOC_MEM_FLAGS_USERPTR, 0);
3248 			} else {
3249 				r = amdgpu_amdkfd_reserve_mem_limit(NULL, size,
3250 					KFD_IOC_ALLOC_MEM_FLAGS_USERPTR, 0);
3251 				if (r)
3252 					goto out_unlock;
3253 				reserved_size += size;
3254 			}
3255 		}
3256 
3257 		size = (prange->last - prange->start + 1) << PAGE_SHIFT;
3258 		if (xnack_enabled) {
3259 			amdgpu_amdkfd_unreserve_mem_limit(NULL, size,
3260 					KFD_IOC_ALLOC_MEM_FLAGS_USERPTR, 0);
3261 		} else {
3262 			r = amdgpu_amdkfd_reserve_mem_limit(NULL, size,
3263 					KFD_IOC_ALLOC_MEM_FLAGS_USERPTR, 0);
3264 			if (r)
3265 				goto out_unlock;
3266 			reserved_size += size;
3267 		}
3268 out_unlock:
3269 		svm_range_unlock(prange);
3270 		if (r)
3271 			break;
3272 	}
3273 
3274 	if (r)
3275 		amdgpu_amdkfd_unreserve_mem_limit(NULL, reserved_size,
3276 					KFD_IOC_ALLOC_MEM_FLAGS_USERPTR, 0);
3277 	else
3278 		/* Change xnack mode must be inside svms lock, to avoid race with
3279 		 * svm_range_deferred_list_work unreserve memory in parallel.
3280 		 */
3281 		p->xnack_enabled = xnack_enabled;
3282 
3283 	mutex_unlock(&p->svms.lock);
3284 	return r;
3285 }
3286 
3287 void svm_range_list_fini(struct kfd_process *p)
3288 {
3289 	struct svm_range *prange;
3290 	struct svm_range *next;
3291 
3292 	pr_debug("process pid %d svms 0x%p\n", p->lead_thread->pid,
3293 		 &p->svms);
3294 
3295 	cancel_delayed_work_sync(&p->svms.restore_work);
3296 
3297 	/* Ensure list work is finished before process is destroyed */
3298 	flush_work(&p->svms.deferred_list_work);
3299 
3300 	/*
3301 	 * Ensure no retry fault comes in afterwards, as page fault handler will
3302 	 * not find kfd process and take mm lock to recover fault.
3303 	 * stop kfd page fault handing, then wait pending page faults got drained
3304 	 */
3305 	atomic_set(&p->svms.drain_pagefaults, 1);
3306 	svm_range_drain_retry_fault(&p->svms);
3307 
3308 	list_for_each_entry_safe(prange, next, &p->svms.list, list) {
3309 		svm_range_unlink(prange);
3310 		svm_range_remove_notifier(prange);
3311 		svm_range_free(prange, true);
3312 	}
3313 
3314 	mutex_destroy(&p->svms.lock);
3315 
3316 	pr_debug("process pid %d svms 0x%p done\n",
3317 		p->lead_thread->pid, &p->svms);
3318 }
3319 
3320 int svm_range_list_init(struct kfd_process *p)
3321 {
3322 	struct svm_range_list *svms = &p->svms;
3323 	int i;
3324 
3325 	svms->objects = RB_ROOT_CACHED;
3326 	mutex_init(&svms->lock);
3327 	INIT_LIST_HEAD(&svms->list);
3328 	atomic_set(&svms->evicted_ranges, 0);
3329 	atomic_set(&svms->drain_pagefaults, 0);
3330 	INIT_DELAYED_WORK(&svms->restore_work, svm_range_restore_work);
3331 	INIT_WORK(&svms->deferred_list_work, svm_range_deferred_list_work);
3332 	INIT_LIST_HEAD(&svms->deferred_range_list);
3333 	INIT_LIST_HEAD(&svms->criu_svm_metadata_list);
3334 	spin_lock_init(&svms->deferred_list_lock);
3335 
3336 	for (i = 0; i < p->n_pdds; i++)
3337 		if (KFD_IS_SVM_API_SUPPORTED(p->pdds[i]->dev->adev))
3338 			bitmap_set(svms->bitmap_supported, i, 1);
3339 
3340 	 /* Value of default granularity cannot exceed 0x1B, the
3341 	  * number of pages supported by a 4-level paging table
3342 	  */
3343 	svms->default_granularity = min_t(u8, amdgpu_svm_default_granularity, 0x1B);
3344 	pr_debug("Default SVM Granularity to use: %d\n", svms->default_granularity);
3345 
3346 	return 0;
3347 }
3348 
3349 /**
3350  * svm_range_check_vm - check if virtual address range mapped already
3351  * @p: current kfd_process
3352  * @start: range start address, in pages
3353  * @last: range last address, in pages
3354  * @bo_s: mapping start address in pages if address range already mapped
3355  * @bo_l: mapping last address in pages if address range already mapped
3356  *
3357  * The purpose is to avoid virtual address ranges already allocated by
3358  * kfd_ioctl_alloc_memory_of_gpu ioctl.
3359  * It looks for each pdd in the kfd_process.
3360  *
3361  * Context: Process context
3362  *
3363  * Return 0 - OK, if the range is not mapped.
3364  * Otherwise error code:
3365  * -EADDRINUSE - if address is mapped already by kfd_ioctl_alloc_memory_of_gpu
3366  * -ERESTARTSYS - A wait for the buffer to become unreserved was interrupted by
3367  * a signal. Release all buffer reservations and return to user-space.
3368  */
3369 static int
3370 svm_range_check_vm(struct kfd_process *p, uint64_t start, uint64_t last,
3371 		   uint64_t *bo_s, uint64_t *bo_l)
3372 {
3373 	struct amdgpu_bo_va_mapping *mapping;
3374 	struct interval_tree_node *node;
3375 	uint32_t i;
3376 	int r;
3377 
3378 	for (i = 0; i < p->n_pdds; i++) {
3379 		struct amdgpu_vm *vm;
3380 
3381 		if (!p->pdds[i]->drm_priv)
3382 			continue;
3383 
3384 		vm = drm_priv_to_vm(p->pdds[i]->drm_priv);
3385 		r = amdgpu_bo_reserve(vm->root.bo, false);
3386 		if (r)
3387 			return r;
3388 
3389 		node = interval_tree_iter_first(&vm->va, start, last);
3390 		if (node) {
3391 			pr_debug("range [0x%llx 0x%llx] already TTM mapped\n",
3392 				 start, last);
3393 			mapping = container_of((struct rb_node *)node,
3394 					       struct amdgpu_bo_va_mapping, rb);
3395 			if (bo_s && bo_l) {
3396 				*bo_s = mapping->start;
3397 				*bo_l = mapping->last;
3398 			}
3399 			amdgpu_bo_unreserve(vm->root.bo);
3400 			return -EADDRINUSE;
3401 		}
3402 		amdgpu_bo_unreserve(vm->root.bo);
3403 	}
3404 
3405 	return 0;
3406 }
3407 
3408 /**
3409  * svm_range_is_valid - check if virtual address range is valid
3410  * @p: current kfd_process
3411  * @start: range start address, in pages
3412  * @size: range size, in pages
3413  *
3414  * Valid virtual address range means it belongs to one or more VMAs
3415  *
3416  * Context: Process context
3417  *
3418  * Return:
3419  *  0 - OK, otherwise error code
3420  */
3421 static int
3422 svm_range_is_valid(struct kfd_process *p, uint64_t start, uint64_t size)
3423 {
3424 	const unsigned long device_vma = VM_IO | VM_PFNMAP | VM_MIXEDMAP;
3425 	struct vm_area_struct *vma;
3426 	unsigned long end;
3427 	unsigned long start_unchg = start;
3428 
3429 	start <<= PAGE_SHIFT;
3430 	end = start + (size << PAGE_SHIFT);
3431 	do {
3432 		vma = vma_lookup(p->mm, start);
3433 		if (!vma || (vma->vm_flags & device_vma))
3434 			return -EFAULT;
3435 		start = min(end, vma->vm_end);
3436 	} while (start < end);
3437 
3438 	return svm_range_check_vm(p, start_unchg, (end - 1) >> PAGE_SHIFT, NULL,
3439 				  NULL);
3440 }
3441 
3442 /**
3443  * svm_range_best_prefetch_location - decide the best prefetch location
3444  * @prange: svm range structure
3445  *
3446  * For xnack off:
3447  * If range map to single GPU, the best prefetch location is prefetch_loc, which
3448  * can be CPU or GPU.
3449  *
3450  * If range is ACCESS or ACCESS_IN_PLACE by mGPUs, only if mGPU connection on
3451  * XGMI same hive, the best prefetch location is prefetch_loc GPU, othervise
3452  * the best prefetch location is always CPU, because GPU can not have coherent
3453  * mapping VRAM of other GPUs even with large-BAR PCIe connection.
3454  *
3455  * For xnack on:
3456  * If range is not ACCESS_IN_PLACE by mGPUs, the best prefetch location is
3457  * prefetch_loc, other GPU access will generate vm fault and trigger migration.
3458  *
3459  * If range is ACCESS_IN_PLACE by mGPUs, only if mGPU connection on XGMI same
3460  * hive, the best prefetch location is prefetch_loc GPU, otherwise the best
3461  * prefetch location is always CPU.
3462  *
3463  * Context: Process context
3464  *
3465  * Return:
3466  * 0 for CPU or GPU id
3467  */
3468 static uint32_t
3469 svm_range_best_prefetch_location(struct svm_range *prange)
3470 {
3471 	DECLARE_BITMAP(bitmap, MAX_GPU_INSTANCE);
3472 	uint32_t best_loc = prange->prefetch_loc;
3473 	struct kfd_process_device *pdd;
3474 	struct kfd_node *bo_node;
3475 	struct kfd_process *p;
3476 	uint32_t gpuidx;
3477 
3478 	p = container_of(prange->svms, struct kfd_process, svms);
3479 
3480 	if (!best_loc || best_loc == KFD_IOCTL_SVM_LOCATION_UNDEFINED)
3481 		goto out;
3482 
3483 	bo_node = svm_range_get_node_by_id(prange, best_loc);
3484 	if (!bo_node) {
3485 		WARN_ONCE(1, "failed to get valid kfd node at id%x\n", best_loc);
3486 		best_loc = 0;
3487 		goto out;
3488 	}
3489 
3490 	if (bo_node->adev->apu_prefer_gtt) {
3491 		best_loc = 0;
3492 		goto out;
3493 	}
3494 
3495 	if (p->xnack_enabled)
3496 		bitmap_copy(bitmap, prange->bitmap_aip, MAX_GPU_INSTANCE);
3497 	else
3498 		bitmap_or(bitmap, prange->bitmap_access, prange->bitmap_aip,
3499 			  MAX_GPU_INSTANCE);
3500 
3501 	for_each_set_bit(gpuidx, bitmap, MAX_GPU_INSTANCE) {
3502 		pdd = kfd_process_device_from_gpuidx(p, gpuidx);
3503 		if (!pdd) {
3504 			pr_debug("failed to get device by idx 0x%x\n", gpuidx);
3505 			continue;
3506 		}
3507 
3508 		if (pdd->dev->adev == bo_node->adev)
3509 			continue;
3510 
3511 		if (!svm_nodes_in_same_hive(pdd->dev, bo_node)) {
3512 			best_loc = 0;
3513 			break;
3514 		}
3515 	}
3516 
3517 out:
3518 	pr_debug("xnack %d svms 0x%p [0x%lx 0x%lx] best loc 0x%x\n",
3519 		 p->xnack_enabled, &p->svms, prange->start, prange->last,
3520 		 best_loc);
3521 
3522 	return best_loc;
3523 }
3524 
3525 /* svm_range_trigger_migration - start page migration if prefetch loc changed
3526  * @mm: current process mm_struct
3527  * @prange: svm range structure
3528  * @migrated: output, true if migration is triggered
3529  *
3530  * If range perfetch_loc is GPU, actual loc is cpu 0, then migrate the range
3531  * from ram to vram.
3532  * If range prefetch_loc is cpu 0, actual loc is GPU, then migrate the range
3533  * from vram to ram.
3534  *
3535  * If GPU vm fault retry is not enabled, migration interact with MMU notifier
3536  * and restore work:
3537  * 1. migrate_vma_setup invalidate pages, MMU notifier callback svm_range_evict
3538  *    stops all queues, schedule restore work
3539  * 2. svm_range_restore_work wait for migration is done by
3540  *    a. svm_range_validate_vram takes prange->migrate_mutex
3541  *    b. svm_range_validate_ram HMM get pages wait for CPU fault handle returns
3542  * 3. restore work update mappings of GPU, resume all queues.
3543  *
3544  * Context: Process context
3545  *
3546  * Return:
3547  * 0 - OK, otherwise - error code of migration
3548  */
3549 static int
3550 svm_range_trigger_migration(struct mm_struct *mm, struct svm_range *prange,
3551 			    bool *migrated)
3552 {
3553 	uint32_t best_loc;
3554 	int r = 0;
3555 
3556 	*migrated = false;
3557 	best_loc = svm_range_best_prefetch_location(prange);
3558 
3559 	/* when best_loc is a gpu node and same as prange->actual_loc
3560 	 * we still need do migration as prange->actual_loc !=0 does
3561 	 * not mean all pages in prange are vram. hmm migrate will pick
3562 	 * up right pages during migration.
3563 	 */
3564 	if ((best_loc == KFD_IOCTL_SVM_LOCATION_UNDEFINED) ||
3565 	    (best_loc == 0 && prange->actual_loc == 0))
3566 		return 0;
3567 
3568 	if (!best_loc) {
3569 		r = svm_migrate_vram_to_ram(prange, mm, prange->start, prange->last,
3570 					KFD_MIGRATE_TRIGGER_PREFETCH, NULL);
3571 		*migrated = !r;
3572 		return r;
3573 	}
3574 
3575 	r = svm_migrate_to_vram(prange, best_loc, prange->start, prange->last,
3576 				mm, KFD_MIGRATE_TRIGGER_PREFETCH);
3577 	*migrated = !r;
3578 
3579 	return 0;
3580 }
3581 
3582 int svm_range_schedule_evict_svm_bo(struct amdgpu_amdkfd_fence *fence)
3583 {
3584 	/* Dereferencing fence->svm_bo is safe here because the fence hasn't
3585 	 * signaled yet and we're under the protection of the fence->lock.
3586 	 * After the fence is signaled in svm_range_bo_release, we cannot get
3587 	 * here any more.
3588 	 *
3589 	 * Reference is dropped in svm_range_evict_svm_bo_worker.
3590 	 */
3591 	if (svm_bo_ref_unless_zero(fence->svm_bo)) {
3592 		WRITE_ONCE(fence->svm_bo->evicting, 1);
3593 		schedule_work(&fence->svm_bo->eviction_work);
3594 	}
3595 
3596 	return 0;
3597 }
3598 
3599 static void svm_range_evict_svm_bo_worker(struct work_struct *work)
3600 {
3601 	struct svm_range_bo *svm_bo;
3602 	struct mm_struct *mm;
3603 	int r = 0;
3604 
3605 	svm_bo = container_of(work, struct svm_range_bo, eviction_work);
3606 
3607 	if (mmget_not_zero(svm_bo->eviction_fence->mm)) {
3608 		mm = svm_bo->eviction_fence->mm;
3609 	} else {
3610 		svm_range_bo_unref(svm_bo);
3611 		return;
3612 	}
3613 
3614 	mmap_read_lock(mm);
3615 	spin_lock(&svm_bo->list_lock);
3616 	while (!list_empty(&svm_bo->range_list) && !r) {
3617 		struct svm_range *prange =
3618 				list_first_entry(&svm_bo->range_list,
3619 						struct svm_range, svm_bo_list);
3620 		int retries = 3;
3621 
3622 		list_del_init(&prange->svm_bo_list);
3623 		spin_unlock(&svm_bo->list_lock);
3624 
3625 		pr_debug("svms 0x%p [0x%lx 0x%lx]\n", prange->svms,
3626 			 prange->start, prange->last);
3627 
3628 		mutex_lock(&prange->migrate_mutex);
3629 		do {
3630 			/* migrate all vram pages in this prange to sys ram
3631 			 * after that prange->actual_loc should be zero
3632 			 */
3633 			r = svm_migrate_vram_to_ram(prange, mm,
3634 					prange->start, prange->last,
3635 					KFD_MIGRATE_TRIGGER_TTM_EVICTION, NULL);
3636 		} while (!r && prange->actual_loc && --retries);
3637 
3638 		if (!r && prange->actual_loc)
3639 			pr_info_once("Migration failed during eviction");
3640 
3641 		if (!prange->actual_loc) {
3642 			mutex_lock(&prange->lock);
3643 			prange->svm_bo = NULL;
3644 			mutex_unlock(&prange->lock);
3645 		}
3646 		mutex_unlock(&prange->migrate_mutex);
3647 
3648 		spin_lock(&svm_bo->list_lock);
3649 	}
3650 	spin_unlock(&svm_bo->list_lock);
3651 	mmap_read_unlock(mm);
3652 	mmput(mm);
3653 
3654 	dma_fence_signal(&svm_bo->eviction_fence->base);
3655 
3656 	/* This is the last reference to svm_bo, after svm_range_vram_node_free
3657 	 * has been called in svm_migrate_vram_to_ram
3658 	 */
3659 	WARN_ONCE(!r && kref_read(&svm_bo->kref) != 1, "This was not the last reference\n");
3660 	svm_range_bo_unref(svm_bo);
3661 }
3662 
3663 static int
3664 svm_range_set_attr(struct kfd_process *p, struct mm_struct *mm,
3665 		   uint64_t start, uint64_t size, uint32_t nattr,
3666 		   struct kfd_ioctl_svm_attribute *attrs)
3667 {
3668 	struct amdkfd_process_info *process_info = p->kgd_process_info;
3669 	struct list_head update_list;
3670 	struct list_head insert_list;
3671 	struct list_head remove_list;
3672 	struct list_head remap_list;
3673 	struct svm_range_list *svms;
3674 	struct svm_range *prange;
3675 	struct svm_range *next;
3676 	bool update_mapping = false;
3677 	bool flush_tlb;
3678 	int r, ret = 0;
3679 
3680 	pr_debug("process pid %d svms 0x%p [0x%llx 0x%llx] pages 0x%llx\n",
3681 		 p->lead_thread->pid, &p->svms, start, start + size - 1, size);
3682 
3683 	r = svm_range_check_attr(p, nattr, attrs);
3684 	if (r)
3685 		return r;
3686 
3687 	svms = &p->svms;
3688 
3689 	mutex_lock(&process_info->lock);
3690 
3691 	svm_range_list_lock_and_flush_work(svms, mm);
3692 
3693 	r = svm_range_is_valid(p, start, size);
3694 	if (r) {
3695 		pr_debug("invalid range r=%d\n", r);
3696 		mmap_write_unlock(mm);
3697 		goto out;
3698 	}
3699 
3700 	mutex_lock(&svms->lock);
3701 
3702 	/* Add new range and split existing ranges as needed */
3703 	r = svm_range_add(p, start, size, nattr, attrs, &update_list,
3704 			  &insert_list, &remove_list, &remap_list);
3705 	if (r) {
3706 		mutex_unlock(&svms->lock);
3707 		mmap_write_unlock(mm);
3708 		goto out;
3709 	}
3710 	/* Apply changes as a transaction */
3711 	list_for_each_entry_safe(prange, next, &insert_list, list) {
3712 		svm_range_add_to_svms(prange);
3713 		svm_range_add_notifier_locked(mm, prange);
3714 	}
3715 	list_for_each_entry(prange, &update_list, update_list) {
3716 		svm_range_apply_attrs(p, prange, nattr, attrs, &update_mapping);
3717 		/* TODO: unmap ranges from GPU that lost access */
3718 	}
3719 	update_mapping |= !p->xnack_enabled && !list_empty(&remap_list);
3720 
3721 	list_for_each_entry_safe(prange, next, &remove_list, update_list) {
3722 		pr_debug("unlink old 0x%p prange 0x%p [0x%lx 0x%lx]\n",
3723 			 prange->svms, prange, prange->start,
3724 			 prange->last);
3725 		svm_range_unlink(prange);
3726 		svm_range_remove_notifier(prange);
3727 		svm_range_free(prange, false);
3728 	}
3729 
3730 	mmap_write_downgrade(mm);
3731 	/* Trigger migrations and revalidate and map to GPUs as needed. If
3732 	 * this fails we may be left with partially completed actions. There
3733 	 * is no clean way of rolling back to the previous state in such a
3734 	 * case because the rollback wouldn't be guaranteed to work either.
3735 	 */
3736 	list_for_each_entry(prange, &update_list, update_list) {
3737 		bool migrated;
3738 
3739 		mutex_lock(&prange->migrate_mutex);
3740 
3741 		r = svm_range_trigger_migration(mm, prange, &migrated);
3742 		if (r)
3743 			goto out_unlock_range;
3744 
3745 		if (migrated && (!p->xnack_enabled ||
3746 		    (prange->flags & KFD_IOCTL_SVM_FLAG_GPU_ALWAYS_MAPPED)) &&
3747 		    prange->mapped_to_gpu) {
3748 			pr_debug("restore_work will update mappings of GPUs\n");
3749 			mutex_unlock(&prange->migrate_mutex);
3750 			continue;
3751 		}
3752 
3753 		if (!migrated && !update_mapping) {
3754 			mutex_unlock(&prange->migrate_mutex);
3755 			continue;
3756 		}
3757 
3758 		flush_tlb = !migrated && update_mapping && prange->mapped_to_gpu;
3759 
3760 		r = svm_range_validate_and_map(mm, prange->start, prange->last, prange,
3761 					       MAX_GPU_INSTANCE, true, true, flush_tlb);
3762 		if (r)
3763 			pr_debug("failed %d to map svm range\n", r);
3764 
3765 out_unlock_range:
3766 		mutex_unlock(&prange->migrate_mutex);
3767 		if (r)
3768 			ret = r;
3769 	}
3770 
3771 	list_for_each_entry(prange, &remap_list, update_list) {
3772 		pr_debug("Remapping prange 0x%p [0x%lx 0x%lx]\n",
3773 			 prange, prange->start, prange->last);
3774 		mutex_lock(&prange->migrate_mutex);
3775 		r = svm_range_validate_and_map(mm,  prange->start, prange->last, prange,
3776 					       MAX_GPU_INSTANCE, true, true, prange->mapped_to_gpu);
3777 		if (r)
3778 			pr_debug("failed %d on remap svm range\n", r);
3779 		mutex_unlock(&prange->migrate_mutex);
3780 		if (r)
3781 			ret = r;
3782 	}
3783 
3784 	dynamic_svm_range_dump(svms);
3785 
3786 	mutex_unlock(&svms->lock);
3787 	mmap_read_unlock(mm);
3788 out:
3789 	mutex_unlock(&process_info->lock);
3790 
3791 	pr_debug("process pid %d svms 0x%p [0x%llx 0x%llx] done, r=%d\n",
3792 		 p->lead_thread->pid, &p->svms, start, start + size - 1, r);
3793 
3794 	return ret ? ret : r;
3795 }
3796 
3797 static int
3798 svm_range_get_attr(struct kfd_process *p, struct mm_struct *mm,
3799 		   uint64_t start, uint64_t size, uint32_t nattr,
3800 		   struct kfd_ioctl_svm_attribute *attrs)
3801 {
3802 	DECLARE_BITMAP(bitmap_access, MAX_GPU_INSTANCE);
3803 	DECLARE_BITMAP(bitmap_aip, MAX_GPU_INSTANCE);
3804 	bool get_preferred_loc = false;
3805 	bool get_prefetch_loc = false;
3806 	bool get_granularity = false;
3807 	bool get_accessible = false;
3808 	bool get_flags = false;
3809 	uint64_t last = start + size - 1UL;
3810 	uint8_t granularity = 0xff;
3811 	struct interval_tree_node *node;
3812 	struct svm_range_list *svms;
3813 	struct svm_range *prange;
3814 	uint32_t prefetch_loc = KFD_IOCTL_SVM_LOCATION_UNDEFINED;
3815 	uint32_t location = KFD_IOCTL_SVM_LOCATION_UNDEFINED;
3816 	uint32_t flags_and = 0xffffffff;
3817 	uint32_t flags_or = 0;
3818 	int gpuidx;
3819 	uint32_t i;
3820 	int r = 0;
3821 
3822 	pr_debug("svms 0x%p [0x%llx 0x%llx] nattr 0x%x\n", &p->svms, start,
3823 		 start + size - 1, nattr);
3824 
3825 	/* Flush pending deferred work to avoid racing with deferred actions from
3826 	 * previous memory map changes (e.g. munmap). Concurrent memory map changes
3827 	 * can still race with get_attr because we don't hold the mmap lock. But that
3828 	 * would be a race condition in the application anyway, and undefined
3829 	 * behaviour is acceptable in that case.
3830 	 */
3831 	flush_work(&p->svms.deferred_list_work);
3832 
3833 	mmap_read_lock(mm);
3834 	r = svm_range_is_valid(p, start, size);
3835 	mmap_read_unlock(mm);
3836 	if (r) {
3837 		pr_debug("invalid range r=%d\n", r);
3838 		return r;
3839 	}
3840 
3841 	for (i = 0; i < nattr; i++) {
3842 		switch (attrs[i].type) {
3843 		case KFD_IOCTL_SVM_ATTR_PREFERRED_LOC:
3844 			get_preferred_loc = true;
3845 			break;
3846 		case KFD_IOCTL_SVM_ATTR_PREFETCH_LOC:
3847 			get_prefetch_loc = true;
3848 			break;
3849 		case KFD_IOCTL_SVM_ATTR_ACCESS:
3850 			get_accessible = true;
3851 			break;
3852 		case KFD_IOCTL_SVM_ATTR_SET_FLAGS:
3853 		case KFD_IOCTL_SVM_ATTR_CLR_FLAGS:
3854 			get_flags = true;
3855 			break;
3856 		case KFD_IOCTL_SVM_ATTR_GRANULARITY:
3857 			get_granularity = true;
3858 			break;
3859 		case KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE:
3860 		case KFD_IOCTL_SVM_ATTR_NO_ACCESS:
3861 			fallthrough;
3862 		default:
3863 			pr_debug("get invalid attr type 0x%x\n", attrs[i].type);
3864 			return -EINVAL;
3865 		}
3866 	}
3867 
3868 	svms = &p->svms;
3869 
3870 	mutex_lock(&svms->lock);
3871 
3872 	node = interval_tree_iter_first(&svms->objects, start, last);
3873 	if (!node) {
3874 		pr_debug("range attrs not found return default values\n");
3875 		svm_range_set_default_attributes(svms, &location, &prefetch_loc,
3876 						 &granularity, &flags_and);
3877 		flags_or = flags_and;
3878 		if (p->xnack_enabled)
3879 			bitmap_copy(bitmap_access, svms->bitmap_supported,
3880 				    MAX_GPU_INSTANCE);
3881 		else
3882 			bitmap_zero(bitmap_access, MAX_GPU_INSTANCE);
3883 		bitmap_zero(bitmap_aip, MAX_GPU_INSTANCE);
3884 		goto fill_values;
3885 	}
3886 	bitmap_copy(bitmap_access, svms->bitmap_supported, MAX_GPU_INSTANCE);
3887 	bitmap_copy(bitmap_aip, svms->bitmap_supported, MAX_GPU_INSTANCE);
3888 
3889 	while (node) {
3890 		struct interval_tree_node *next;
3891 
3892 		prange = container_of(node, struct svm_range, it_node);
3893 		next = interval_tree_iter_next(node, start, last);
3894 
3895 		if (get_preferred_loc) {
3896 			if (prange->preferred_loc ==
3897 					KFD_IOCTL_SVM_LOCATION_UNDEFINED ||
3898 			    (location != KFD_IOCTL_SVM_LOCATION_UNDEFINED &&
3899 			     location != prange->preferred_loc)) {
3900 				location = KFD_IOCTL_SVM_LOCATION_UNDEFINED;
3901 				get_preferred_loc = false;
3902 			} else {
3903 				location = prange->preferred_loc;
3904 			}
3905 		}
3906 		if (get_prefetch_loc) {
3907 			if (prange->prefetch_loc ==
3908 					KFD_IOCTL_SVM_LOCATION_UNDEFINED ||
3909 			    (prefetch_loc != KFD_IOCTL_SVM_LOCATION_UNDEFINED &&
3910 			     prefetch_loc != prange->prefetch_loc)) {
3911 				prefetch_loc = KFD_IOCTL_SVM_LOCATION_UNDEFINED;
3912 				get_prefetch_loc = false;
3913 			} else {
3914 				prefetch_loc = prange->prefetch_loc;
3915 			}
3916 		}
3917 		if (get_accessible) {
3918 			bitmap_and(bitmap_access, bitmap_access,
3919 				   prange->bitmap_access, MAX_GPU_INSTANCE);
3920 			bitmap_and(bitmap_aip, bitmap_aip,
3921 				   prange->bitmap_aip, MAX_GPU_INSTANCE);
3922 		}
3923 		if (get_flags) {
3924 			flags_and &= prange->flags;
3925 			flags_or |= prange->flags;
3926 		}
3927 
3928 		if (get_granularity && prange->granularity < granularity)
3929 			granularity = prange->granularity;
3930 
3931 		node = next;
3932 	}
3933 fill_values:
3934 	mutex_unlock(&svms->lock);
3935 
3936 	for (i = 0; i < nattr; i++) {
3937 		switch (attrs[i].type) {
3938 		case KFD_IOCTL_SVM_ATTR_PREFERRED_LOC:
3939 			attrs[i].value = location;
3940 			break;
3941 		case KFD_IOCTL_SVM_ATTR_PREFETCH_LOC:
3942 			attrs[i].value = prefetch_loc;
3943 			break;
3944 		case KFD_IOCTL_SVM_ATTR_ACCESS:
3945 			gpuidx = kfd_process_gpuidx_from_gpuid(p,
3946 							       attrs[i].value);
3947 			if (gpuidx < 0) {
3948 				pr_debug("invalid gpuid %x\n", attrs[i].value);
3949 				return -EINVAL;
3950 			}
3951 			if (test_bit(gpuidx, bitmap_access))
3952 				attrs[i].type = KFD_IOCTL_SVM_ATTR_ACCESS;
3953 			else if (test_bit(gpuidx, bitmap_aip))
3954 				attrs[i].type =
3955 					KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE;
3956 			else
3957 				attrs[i].type = KFD_IOCTL_SVM_ATTR_NO_ACCESS;
3958 			break;
3959 		case KFD_IOCTL_SVM_ATTR_SET_FLAGS:
3960 			attrs[i].value = flags_and;
3961 			break;
3962 		case KFD_IOCTL_SVM_ATTR_CLR_FLAGS:
3963 			attrs[i].value = ~flags_or;
3964 			break;
3965 		case KFD_IOCTL_SVM_ATTR_GRANULARITY:
3966 			attrs[i].value = (uint32_t)granularity;
3967 			break;
3968 		}
3969 	}
3970 
3971 	return 0;
3972 }
3973 
3974 int kfd_criu_resume_svm(struct kfd_process *p)
3975 {
3976 	struct kfd_ioctl_svm_attribute *set_attr_new, *set_attr = NULL;
3977 	int nattr_common = 4, nattr_accessibility = 1;
3978 	struct criu_svm_metadata *criu_svm_md = NULL;
3979 	struct svm_range_list *svms = &p->svms;
3980 	struct criu_svm_metadata *next = NULL;
3981 	uint32_t set_flags = 0xffffffff;
3982 	int i, j, num_attrs, ret = 0;
3983 	uint64_t set_attr_size;
3984 	struct mm_struct *mm;
3985 
3986 	if (list_empty(&svms->criu_svm_metadata_list)) {
3987 		pr_debug("No SVM data from CRIU restore stage 2\n");
3988 		return ret;
3989 	}
3990 
3991 	mm = get_task_mm(p->lead_thread);
3992 	if (!mm) {
3993 		pr_err("failed to get mm for the target process\n");
3994 		return -ESRCH;
3995 	}
3996 
3997 	num_attrs = nattr_common + (nattr_accessibility * p->n_pdds);
3998 
3999 	i = j = 0;
4000 	list_for_each_entry(criu_svm_md, &svms->criu_svm_metadata_list, list) {
4001 		pr_debug("criu_svm_md[%d]\n\tstart: 0x%llx size: 0x%llx (npages)\n",
4002 			 i, criu_svm_md->data.start_addr, criu_svm_md->data.size);
4003 
4004 		for (j = 0; j < num_attrs; j++) {
4005 			pr_debug("\ncriu_svm_md[%d]->attrs[%d].type : 0x%x\ncriu_svm_md[%d]->attrs[%d].value : 0x%x\n",
4006 				 i, j, criu_svm_md->data.attrs[j].type,
4007 				 i, j, criu_svm_md->data.attrs[j].value);
4008 			switch (criu_svm_md->data.attrs[j].type) {
4009 			/* During Checkpoint operation, the query for
4010 			 * KFD_IOCTL_SVM_ATTR_PREFETCH_LOC attribute might
4011 			 * return KFD_IOCTL_SVM_LOCATION_UNDEFINED if they were
4012 			 * not used by the range which was checkpointed. Care
4013 			 * must be taken to not restore with an invalid value
4014 			 * otherwise the gpuidx value will be invalid and
4015 			 * set_attr would eventually fail so just replace those
4016 			 * with another dummy attribute such as
4017 			 * KFD_IOCTL_SVM_ATTR_SET_FLAGS.
4018 			 */
4019 			case KFD_IOCTL_SVM_ATTR_PREFETCH_LOC:
4020 				if (criu_svm_md->data.attrs[j].value ==
4021 				    KFD_IOCTL_SVM_LOCATION_UNDEFINED) {
4022 					criu_svm_md->data.attrs[j].type =
4023 						KFD_IOCTL_SVM_ATTR_SET_FLAGS;
4024 					criu_svm_md->data.attrs[j].value = 0;
4025 				}
4026 				break;
4027 			case KFD_IOCTL_SVM_ATTR_SET_FLAGS:
4028 				set_flags = criu_svm_md->data.attrs[j].value;
4029 				break;
4030 			default:
4031 				break;
4032 			}
4033 		}
4034 
4035 		/* CLR_FLAGS is not available via get_attr during checkpoint but
4036 		 * it needs to be inserted before restoring the ranges so
4037 		 * allocate extra space for it before calling set_attr
4038 		 */
4039 		set_attr_size = sizeof(struct kfd_ioctl_svm_attribute) *
4040 						(num_attrs + 1);
4041 		set_attr_new = krealloc(set_attr, set_attr_size,
4042 					    GFP_KERNEL);
4043 		if (!set_attr_new) {
4044 			ret = -ENOMEM;
4045 			goto exit;
4046 		}
4047 		set_attr = set_attr_new;
4048 
4049 		memcpy(set_attr, criu_svm_md->data.attrs, num_attrs *
4050 					sizeof(struct kfd_ioctl_svm_attribute));
4051 		set_attr[num_attrs].type = KFD_IOCTL_SVM_ATTR_CLR_FLAGS;
4052 		set_attr[num_attrs].value = ~set_flags;
4053 
4054 		ret = svm_range_set_attr(p, mm, criu_svm_md->data.start_addr,
4055 					 criu_svm_md->data.size, num_attrs + 1,
4056 					 set_attr);
4057 		if (ret) {
4058 			pr_err("CRIU: failed to set range attributes\n");
4059 			goto exit;
4060 		}
4061 
4062 		i++;
4063 	}
4064 exit:
4065 	kfree(set_attr);
4066 	list_for_each_entry_safe(criu_svm_md, next, &svms->criu_svm_metadata_list, list) {
4067 		pr_debug("freeing criu_svm_md[]\n\tstart: 0x%llx\n",
4068 						criu_svm_md->data.start_addr);
4069 		kfree(criu_svm_md);
4070 	}
4071 
4072 	mmput(mm);
4073 	return ret;
4074 
4075 }
4076 
4077 int kfd_criu_restore_svm(struct kfd_process *p,
4078 			 uint8_t __user *user_priv_ptr,
4079 			 uint64_t *priv_data_offset,
4080 			 uint64_t max_priv_data_size)
4081 {
4082 	uint64_t svm_priv_data_size, svm_object_md_size, svm_attrs_size;
4083 	int nattr_common = 4, nattr_accessibility = 1;
4084 	struct criu_svm_metadata *criu_svm_md = NULL;
4085 	struct svm_range_list *svms = &p->svms;
4086 	uint32_t num_devices;
4087 	int ret = 0;
4088 
4089 	num_devices = p->n_pdds;
4090 	/* Handle one SVM range object at a time, also the number of gpus are
4091 	 * assumed to be same on the restore node, checking must be done while
4092 	 * evaluating the topology earlier
4093 	 */
4094 
4095 	svm_attrs_size = sizeof(struct kfd_ioctl_svm_attribute) *
4096 		(nattr_common + nattr_accessibility * num_devices);
4097 	svm_object_md_size = sizeof(struct criu_svm_metadata) + svm_attrs_size;
4098 
4099 	svm_priv_data_size = sizeof(struct kfd_criu_svm_range_priv_data) +
4100 								svm_attrs_size;
4101 
4102 	criu_svm_md = kzalloc(svm_object_md_size, GFP_KERNEL);
4103 	if (!criu_svm_md) {
4104 		pr_err("failed to allocate memory to store svm metadata\n");
4105 		return -ENOMEM;
4106 	}
4107 	if (*priv_data_offset + svm_priv_data_size > max_priv_data_size) {
4108 		ret = -EINVAL;
4109 		goto exit;
4110 	}
4111 
4112 	ret = copy_from_user(&criu_svm_md->data, user_priv_ptr + *priv_data_offset,
4113 			     svm_priv_data_size);
4114 	if (ret) {
4115 		ret = -EFAULT;
4116 		goto exit;
4117 	}
4118 	*priv_data_offset += svm_priv_data_size;
4119 
4120 	list_add_tail(&criu_svm_md->list, &svms->criu_svm_metadata_list);
4121 
4122 	return 0;
4123 
4124 
4125 exit:
4126 	kfree(criu_svm_md);
4127 	return ret;
4128 }
4129 
4130 void svm_range_get_info(struct kfd_process *p, uint32_t *num_svm_ranges,
4131 			uint64_t *svm_priv_data_size)
4132 {
4133 	uint64_t total_size, accessibility_size, common_attr_size;
4134 	int nattr_common = 4, nattr_accessibility = 1;
4135 	int num_devices = p->n_pdds;
4136 	struct svm_range_list *svms;
4137 	struct svm_range *prange;
4138 	uint32_t count = 0;
4139 
4140 	*svm_priv_data_size = 0;
4141 
4142 	svms = &p->svms;
4143 
4144 	mutex_lock(&svms->lock);
4145 	list_for_each_entry(prange, &svms->list, list) {
4146 		pr_debug("prange: 0x%p start: 0x%lx\t npages: 0x%llx\t end: 0x%llx\n",
4147 			 prange, prange->start, prange->npages,
4148 			 prange->start + prange->npages - 1);
4149 		count++;
4150 	}
4151 	mutex_unlock(&svms->lock);
4152 
4153 	*num_svm_ranges = count;
4154 	/* Only the accessbility attributes need to be queried for all the gpus
4155 	 * individually, remaining ones are spanned across the entire process
4156 	 * regardless of the various gpu nodes. Of the remaining attributes,
4157 	 * KFD_IOCTL_SVM_ATTR_CLR_FLAGS need not be saved.
4158 	 *
4159 	 * KFD_IOCTL_SVM_ATTR_PREFERRED_LOC
4160 	 * KFD_IOCTL_SVM_ATTR_PREFETCH_LOC
4161 	 * KFD_IOCTL_SVM_ATTR_SET_FLAGS
4162 	 * KFD_IOCTL_SVM_ATTR_GRANULARITY
4163 	 *
4164 	 * ** ACCESSBILITY ATTRIBUTES **
4165 	 * (Considered as one, type is altered during query, value is gpuid)
4166 	 * KFD_IOCTL_SVM_ATTR_ACCESS
4167 	 * KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE
4168 	 * KFD_IOCTL_SVM_ATTR_NO_ACCESS
4169 	 */
4170 	if (*num_svm_ranges > 0) {
4171 		common_attr_size = sizeof(struct kfd_ioctl_svm_attribute) *
4172 			nattr_common;
4173 		accessibility_size = sizeof(struct kfd_ioctl_svm_attribute) *
4174 			nattr_accessibility * num_devices;
4175 
4176 		total_size = sizeof(struct kfd_criu_svm_range_priv_data) +
4177 			common_attr_size + accessibility_size;
4178 
4179 		*svm_priv_data_size = *num_svm_ranges * total_size;
4180 	}
4181 
4182 	pr_debug("num_svm_ranges %u total_priv_size %llu\n", *num_svm_ranges,
4183 		 *svm_priv_data_size);
4184 }
4185 
4186 int kfd_criu_checkpoint_svm(struct kfd_process *p,
4187 			    uint8_t __user *user_priv_data,
4188 			    uint64_t *priv_data_offset)
4189 {
4190 	struct kfd_criu_svm_range_priv_data *svm_priv = NULL;
4191 	struct kfd_ioctl_svm_attribute *query_attr = NULL;
4192 	uint64_t svm_priv_data_size, query_attr_size = 0;
4193 	int index, nattr_common = 4, ret = 0;
4194 	struct svm_range_list *svms;
4195 	int num_devices = p->n_pdds;
4196 	struct svm_range *prange;
4197 	struct mm_struct *mm;
4198 
4199 	svms = &p->svms;
4200 
4201 	mm = get_task_mm(p->lead_thread);
4202 	if (!mm) {
4203 		pr_err("failed to get mm for the target process\n");
4204 		return -ESRCH;
4205 	}
4206 
4207 	query_attr_size = sizeof(struct kfd_ioctl_svm_attribute) *
4208 				(nattr_common + num_devices);
4209 
4210 	query_attr = kzalloc(query_attr_size, GFP_KERNEL);
4211 	if (!query_attr) {
4212 		ret = -ENOMEM;
4213 		goto exit;
4214 	}
4215 
4216 	query_attr[0].type = KFD_IOCTL_SVM_ATTR_PREFERRED_LOC;
4217 	query_attr[1].type = KFD_IOCTL_SVM_ATTR_PREFETCH_LOC;
4218 	query_attr[2].type = KFD_IOCTL_SVM_ATTR_SET_FLAGS;
4219 	query_attr[3].type = KFD_IOCTL_SVM_ATTR_GRANULARITY;
4220 
4221 	for (index = 0; index < num_devices; index++) {
4222 		struct kfd_process_device *pdd = p->pdds[index];
4223 
4224 		query_attr[index + nattr_common].type =
4225 			KFD_IOCTL_SVM_ATTR_ACCESS;
4226 		query_attr[index + nattr_common].value = pdd->user_gpu_id;
4227 	}
4228 
4229 	svm_priv_data_size = sizeof(*svm_priv) + query_attr_size;
4230 
4231 	svm_priv = kzalloc(svm_priv_data_size, GFP_KERNEL);
4232 	if (!svm_priv) {
4233 		ret = -ENOMEM;
4234 		goto exit_query;
4235 	}
4236 
4237 	index = 0;
4238 	list_for_each_entry(prange, &svms->list, list) {
4239 
4240 		svm_priv->object_type = KFD_CRIU_OBJECT_TYPE_SVM_RANGE;
4241 		svm_priv->start_addr = prange->start;
4242 		svm_priv->size = prange->npages;
4243 		memcpy(&svm_priv->attrs, query_attr, query_attr_size);
4244 		pr_debug("CRIU: prange: 0x%p start: 0x%lx\t npages: 0x%llx end: 0x%llx\t size: 0x%llx\n",
4245 			 prange, prange->start, prange->npages,
4246 			 prange->start + prange->npages - 1,
4247 			 prange->npages * PAGE_SIZE);
4248 
4249 		ret = svm_range_get_attr(p, mm, svm_priv->start_addr,
4250 					 svm_priv->size,
4251 					 (nattr_common + num_devices),
4252 					 svm_priv->attrs);
4253 		if (ret) {
4254 			pr_err("CRIU: failed to obtain range attributes\n");
4255 			goto exit_priv;
4256 		}
4257 
4258 		if (copy_to_user(user_priv_data + *priv_data_offset, svm_priv,
4259 				 svm_priv_data_size)) {
4260 			pr_err("Failed to copy svm priv to user\n");
4261 			ret = -EFAULT;
4262 			goto exit_priv;
4263 		}
4264 
4265 		*priv_data_offset += svm_priv_data_size;
4266 
4267 	}
4268 
4269 
4270 exit_priv:
4271 	kfree(svm_priv);
4272 exit_query:
4273 	kfree(query_attr);
4274 exit:
4275 	mmput(mm);
4276 	return ret;
4277 }
4278 
4279 int
4280 svm_ioctl(struct kfd_process *p, enum kfd_ioctl_svm_op op, uint64_t start,
4281 	  uint64_t size, uint32_t nattrs, struct kfd_ioctl_svm_attribute *attrs)
4282 {
4283 	struct mm_struct *mm = current->mm;
4284 	int r;
4285 
4286 	start >>= PAGE_SHIFT;
4287 	size >>= PAGE_SHIFT;
4288 
4289 	switch (op) {
4290 	case KFD_IOCTL_SVM_OP_SET_ATTR:
4291 		r = svm_range_set_attr(p, mm, start, size, nattrs, attrs);
4292 		break;
4293 	case KFD_IOCTL_SVM_OP_GET_ATTR:
4294 		r = svm_range_get_attr(p, mm, start, size, nattrs, attrs);
4295 		break;
4296 	default:
4297 		r = -EINVAL;
4298 		break;
4299 	}
4300 
4301 	return r;
4302 }
4303