xref: /linux/drivers/gpu/drm/amd/amdkfd/kfd_svm.c (revision b3ee1e4609512dfff642a96b34d7e5dfcdc92d05)
1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /*
3  * Copyright 2020-2021 Advanced Micro Devices, Inc.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21  * OTHER DEALINGS IN THE SOFTWARE.
22  */
23 
24 #include <linux/types.h>
25 #include <linux/sched/task.h>
26 #include <linux/dynamic_debug.h>
27 #include <drm/ttm/ttm_tt.h>
28 #include <drm/drm_exec.h>
29 
30 #include "amdgpu_sync.h"
31 #include "amdgpu_object.h"
32 #include "amdgpu_vm.h"
33 #include "amdgpu_hmm.h"
34 #include "amdgpu.h"
35 #include "amdgpu_xgmi.h"
36 #include "kfd_priv.h"
37 #include "kfd_svm.h"
38 #include "kfd_migrate.h"
39 #include "kfd_smi_events.h"
40 
41 #ifdef dev_fmt
42 #undef dev_fmt
43 #endif
44 #define dev_fmt(fmt) "kfd_svm: %s: " fmt, __func__
45 
46 #define AMDGPU_SVM_RANGE_RESTORE_DELAY_MS 1
47 
48 /* Long enough to ensure no retry fault comes after svm range is restored and
49  * page table is updated.
50  */
51 #define AMDGPU_SVM_RANGE_RETRY_FAULT_PENDING	(2UL * NSEC_PER_MSEC)
52 #if IS_ENABLED(CONFIG_DYNAMIC_DEBUG)
53 #define dynamic_svm_range_dump(svms) \
54 	_dynamic_func_call_no_desc("svm_range_dump", svm_range_debug_dump, svms)
55 #else
56 #define dynamic_svm_range_dump(svms) \
57 	do { if (0) svm_range_debug_dump(svms); } while (0)
58 #endif
59 
60 /* Giant svm range split into smaller ranges based on this, it is decided using
61  * minimum of all dGPU/APU 1/32 VRAM size, between 2MB to 1GB and alignment to
62  * power of 2MB.
63  */
64 static uint64_t max_svm_range_pages;
65 
66 struct criu_svm_metadata {
67 	struct list_head list;
68 	struct kfd_criu_svm_range_priv_data data;
69 };
70 
71 static void svm_range_evict_svm_bo_worker(struct work_struct *work);
72 static bool
73 svm_range_cpu_invalidate_pagetables(struct mmu_interval_notifier *mni,
74 				    const struct mmu_notifier_range *range,
75 				    unsigned long cur_seq);
76 static int
77 svm_range_check_vm(struct kfd_process *p, uint64_t start, uint64_t last,
78 		   uint64_t *bo_s, uint64_t *bo_l);
79 static const struct mmu_interval_notifier_ops svm_range_mn_ops = {
80 	.invalidate = svm_range_cpu_invalidate_pagetables,
81 };
82 
83 /**
84  * svm_range_unlink - unlink svm_range from lists and interval tree
85  * @prange: svm range structure to be removed
86  *
87  * Remove the svm_range from the svms and svm_bo lists and the svms
88  * interval tree.
89  *
90  * Context: The caller must hold svms->lock
91  */
svm_range_unlink(struct svm_range * prange)92 static void svm_range_unlink(struct svm_range *prange)
93 {
94 	pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx]\n", prange->svms,
95 		 prange, prange->start, prange->last);
96 
97 	if (prange->svm_bo) {
98 		spin_lock(&prange->svm_bo->list_lock);
99 		list_del(&prange->svm_bo_list);
100 		spin_unlock(&prange->svm_bo->list_lock);
101 	}
102 
103 	list_del(&prange->list);
104 	if (prange->it_node.start != 0 && prange->it_node.last != 0)
105 		interval_tree_remove(&prange->it_node, &prange->svms->objects);
106 }
107 
108 static void
svm_range_add_notifier_locked(struct mm_struct * mm,struct svm_range * prange)109 svm_range_add_notifier_locked(struct mm_struct *mm, struct svm_range *prange)
110 {
111 	pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx]\n", prange->svms,
112 		 prange, prange->start, prange->last);
113 
114 	mmu_interval_notifier_insert_locked(&prange->notifier, mm,
115 				     prange->start << PAGE_SHIFT,
116 				     prange->npages << PAGE_SHIFT,
117 				     &svm_range_mn_ops);
118 }
119 
120 /**
121  * svm_range_add_to_svms - add svm range to svms
122  * @prange: svm range structure to be added
123  *
124  * Add the svm range to svms interval tree and link list
125  *
126  * Context: The caller must hold svms->lock
127  */
svm_range_add_to_svms(struct svm_range * prange)128 static void svm_range_add_to_svms(struct svm_range *prange)
129 {
130 	pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx]\n", prange->svms,
131 		 prange, prange->start, prange->last);
132 
133 	list_move_tail(&prange->list, &prange->svms->list);
134 	prange->it_node.start = prange->start;
135 	prange->it_node.last = prange->last;
136 	interval_tree_insert(&prange->it_node, &prange->svms->objects);
137 }
138 
svm_range_remove_notifier(struct svm_range * prange)139 static void svm_range_remove_notifier(struct svm_range *prange)
140 {
141 	pr_debug("remove notifier svms 0x%p prange 0x%p [0x%lx 0x%lx]\n",
142 		 prange->svms, prange,
143 		 prange->notifier.interval_tree.start >> PAGE_SHIFT,
144 		 prange->notifier.interval_tree.last >> PAGE_SHIFT);
145 
146 	if (prange->notifier.interval_tree.start != 0 &&
147 	    prange->notifier.interval_tree.last != 0)
148 		mmu_interval_notifier_remove(&prange->notifier);
149 }
150 
151 static bool
svm_is_valid_dma_mapping_addr(struct device * dev,dma_addr_t dma_addr)152 svm_is_valid_dma_mapping_addr(struct device *dev, dma_addr_t dma_addr)
153 {
154 	return dma_addr && !dma_mapping_error(dev, dma_addr) &&
155 	       !(dma_addr & SVM_RANGE_VRAM_DOMAIN);
156 }
157 
158 static int
svm_range_dma_map_dev(struct amdgpu_device * adev,struct svm_range * prange,unsigned long offset,unsigned long npages,unsigned long * hmm_pfns,uint32_t gpuidx)159 svm_range_dma_map_dev(struct amdgpu_device *adev, struct svm_range *prange,
160 		      unsigned long offset, unsigned long npages,
161 		      unsigned long *hmm_pfns, uint32_t gpuidx)
162 {
163 	enum dma_data_direction dir = DMA_BIDIRECTIONAL;
164 	dma_addr_t *addr = prange->dma_addr[gpuidx];
165 	struct device *dev = adev->dev;
166 	struct page *page;
167 	int i, r;
168 
169 	if (!addr) {
170 		addr = kvcalloc(prange->npages, sizeof(*addr), GFP_KERNEL);
171 		if (!addr)
172 			return -ENOMEM;
173 		prange->dma_addr[gpuidx] = addr;
174 	}
175 
176 	addr += offset;
177 	for (i = 0; i < npages; i++) {
178 		if (svm_is_valid_dma_mapping_addr(dev, addr[i]))
179 			dma_unmap_page(dev, addr[i], PAGE_SIZE, dir);
180 
181 		page = hmm_pfn_to_page(hmm_pfns[i]);
182 		if (is_zone_device_page(page)) {
183 			struct amdgpu_device *bo_adev = prange->svm_bo->node->adev;
184 
185 			addr[i] = (hmm_pfns[i] << PAGE_SHIFT) +
186 				   bo_adev->vm_manager.vram_base_offset -
187 				   bo_adev->kfd.pgmap.range.start;
188 			addr[i] |= SVM_RANGE_VRAM_DOMAIN;
189 			pr_debug_ratelimited("vram address: 0x%llx\n", addr[i]);
190 			continue;
191 		}
192 		addr[i] = dma_map_page(dev, page, 0, PAGE_SIZE, dir);
193 		r = dma_mapping_error(dev, addr[i]);
194 		if (r) {
195 			dev_err(dev, "failed %d dma_map_page\n", r);
196 			return r;
197 		}
198 		pr_debug_ratelimited("dma mapping 0x%llx for page addr 0x%lx\n",
199 				     addr[i] >> PAGE_SHIFT, page_to_pfn(page));
200 	}
201 
202 	return 0;
203 }
204 
205 static int
svm_range_dma_map(struct svm_range * prange,unsigned long * bitmap,unsigned long offset,unsigned long npages,unsigned long * hmm_pfns)206 svm_range_dma_map(struct svm_range *prange, unsigned long *bitmap,
207 		  unsigned long offset, unsigned long npages,
208 		  unsigned long *hmm_pfns)
209 {
210 	struct kfd_process *p;
211 	uint32_t gpuidx;
212 	int r;
213 
214 	p = container_of(prange->svms, struct kfd_process, svms);
215 
216 	for_each_set_bit(gpuidx, bitmap, MAX_GPU_INSTANCE) {
217 		struct kfd_process_device *pdd;
218 
219 		pr_debug("mapping to gpu idx 0x%x\n", gpuidx);
220 		pdd = kfd_process_device_from_gpuidx(p, gpuidx);
221 		if (!pdd) {
222 			pr_debug("failed to find device idx %d\n", gpuidx);
223 			return -EINVAL;
224 		}
225 
226 		r = svm_range_dma_map_dev(pdd->dev->adev, prange, offset, npages,
227 					  hmm_pfns, gpuidx);
228 		if (r)
229 			break;
230 	}
231 
232 	return r;
233 }
234 
svm_range_dma_unmap_dev(struct device * dev,dma_addr_t * dma_addr,unsigned long offset,unsigned long npages)235 void svm_range_dma_unmap_dev(struct device *dev, dma_addr_t *dma_addr,
236 			 unsigned long offset, unsigned long npages)
237 {
238 	enum dma_data_direction dir = DMA_BIDIRECTIONAL;
239 	int i;
240 
241 	if (!dma_addr)
242 		return;
243 
244 	for (i = offset; i < offset + npages; i++) {
245 		if (!svm_is_valid_dma_mapping_addr(dev, dma_addr[i]))
246 			continue;
247 		pr_debug_ratelimited("unmap 0x%llx\n", dma_addr[i] >> PAGE_SHIFT);
248 		dma_unmap_page(dev, dma_addr[i], PAGE_SIZE, dir);
249 		dma_addr[i] = 0;
250 	}
251 }
252 
svm_range_dma_unmap(struct svm_range * prange)253 void svm_range_dma_unmap(struct svm_range *prange)
254 {
255 	struct kfd_process_device *pdd;
256 	dma_addr_t *dma_addr;
257 	struct device *dev;
258 	struct kfd_process *p;
259 	uint32_t gpuidx;
260 
261 	p = container_of(prange->svms, struct kfd_process, svms);
262 
263 	for (gpuidx = 0; gpuidx < MAX_GPU_INSTANCE; gpuidx++) {
264 		dma_addr = prange->dma_addr[gpuidx];
265 		if (!dma_addr)
266 			continue;
267 
268 		pdd = kfd_process_device_from_gpuidx(p, gpuidx);
269 		if (!pdd) {
270 			pr_debug("failed to find device idx %d\n", gpuidx);
271 			continue;
272 		}
273 		dev = &pdd->dev->adev->pdev->dev;
274 
275 		svm_range_dma_unmap_dev(dev, dma_addr, 0, prange->npages);
276 	}
277 }
278 
svm_range_free(struct svm_range * prange,bool do_unmap)279 static void svm_range_free(struct svm_range *prange, bool do_unmap)
280 {
281 	uint64_t size = (prange->last - prange->start + 1) << PAGE_SHIFT;
282 	struct kfd_process *p = container_of(prange->svms, struct kfd_process, svms);
283 	uint32_t gpuidx;
284 
285 	pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx]\n", prange->svms, prange,
286 		 prange->start, prange->last);
287 
288 	svm_range_vram_node_free(prange);
289 	if (do_unmap)
290 		svm_range_dma_unmap(prange);
291 
292 	if (do_unmap && !p->xnack_enabled) {
293 		pr_debug("unreserve prange 0x%p size: 0x%llx\n", prange, size);
294 		amdgpu_amdkfd_unreserve_mem_limit(NULL, size,
295 					KFD_IOC_ALLOC_MEM_FLAGS_USERPTR, 0);
296 	}
297 
298 	/* free dma_addr array for each gpu */
299 	for (gpuidx = 0; gpuidx < MAX_GPU_INSTANCE; gpuidx++) {
300 		if (prange->dma_addr[gpuidx]) {
301 			kvfree(prange->dma_addr[gpuidx]);
302 			prange->dma_addr[gpuidx] = NULL;
303 		}
304 	}
305 
306 	mutex_destroy(&prange->lock);
307 	mutex_destroy(&prange->migrate_mutex);
308 	kfree(prange);
309 }
310 
311 static void
svm_range_set_default_attributes(struct svm_range_list * svms,int32_t * location,int32_t * prefetch_loc,uint8_t * granularity,uint32_t * flags)312 svm_range_set_default_attributes(struct svm_range_list *svms, int32_t *location,
313 				 int32_t *prefetch_loc, uint8_t *granularity,
314 				 uint32_t *flags)
315 {
316 	*location = KFD_IOCTL_SVM_LOCATION_UNDEFINED;
317 	*prefetch_loc = KFD_IOCTL_SVM_LOCATION_UNDEFINED;
318 	*granularity = svms->default_granularity;
319 	*flags =
320 		KFD_IOCTL_SVM_FLAG_HOST_ACCESS | KFD_IOCTL_SVM_FLAG_COHERENT;
321 }
322 
323 static struct
svm_range_new(struct svm_range_list * svms,uint64_t start,uint64_t last,bool update_mem_usage)324 svm_range *svm_range_new(struct svm_range_list *svms, uint64_t start,
325 			 uint64_t last, bool update_mem_usage)
326 {
327 	uint64_t size = last - start + 1;
328 	struct svm_range *prange;
329 	struct kfd_process *p;
330 
331 	prange = kzalloc(sizeof(*prange), GFP_KERNEL);
332 	if (!prange)
333 		return NULL;
334 
335 	p = container_of(svms, struct kfd_process, svms);
336 	if (!p->xnack_enabled && update_mem_usage &&
337 	    amdgpu_amdkfd_reserve_mem_limit(NULL, size << PAGE_SHIFT,
338 				    KFD_IOC_ALLOC_MEM_FLAGS_USERPTR, 0)) {
339 		pr_info("SVM mapping failed, exceeds resident system memory limit\n");
340 		kfree(prange);
341 		return NULL;
342 	}
343 	prange->npages = size;
344 	prange->svms = svms;
345 	prange->start = start;
346 	prange->last = last;
347 	INIT_LIST_HEAD(&prange->list);
348 	INIT_LIST_HEAD(&prange->update_list);
349 	INIT_LIST_HEAD(&prange->svm_bo_list);
350 	INIT_LIST_HEAD(&prange->deferred_list);
351 	INIT_LIST_HEAD(&prange->child_list);
352 	atomic_set(&prange->invalid, 0);
353 	prange->validate_timestamp = 0;
354 	prange->vram_pages = 0;
355 	mutex_init(&prange->migrate_mutex);
356 	mutex_init(&prange->lock);
357 
358 	if (p->xnack_enabled)
359 		bitmap_copy(prange->bitmap_access, svms->bitmap_supported,
360 			    MAX_GPU_INSTANCE);
361 
362 	svm_range_set_default_attributes(svms, &prange->preferred_loc,
363 					 &prange->prefetch_loc,
364 					 &prange->granularity, &prange->flags);
365 
366 	pr_debug("svms 0x%p [0x%llx 0x%llx]\n", svms, start, last);
367 
368 	return prange;
369 }
370 
svm_bo_ref_unless_zero(struct svm_range_bo * svm_bo)371 static bool svm_bo_ref_unless_zero(struct svm_range_bo *svm_bo)
372 {
373 	if (!svm_bo || !kref_get_unless_zero(&svm_bo->kref))
374 		return false;
375 
376 	return true;
377 }
378 
svm_range_bo_release(struct kref * kref)379 static void svm_range_bo_release(struct kref *kref)
380 {
381 	struct svm_range_bo *svm_bo;
382 
383 	svm_bo = container_of(kref, struct svm_range_bo, kref);
384 	pr_debug("svm_bo 0x%p\n", svm_bo);
385 
386 	spin_lock(&svm_bo->list_lock);
387 	while (!list_empty(&svm_bo->range_list)) {
388 		struct svm_range *prange =
389 				list_first_entry(&svm_bo->range_list,
390 						struct svm_range, svm_bo_list);
391 		/* list_del_init tells a concurrent svm_range_vram_node_new when
392 		 * it's safe to reuse the svm_bo pointer and svm_bo_list head.
393 		 */
394 		list_del_init(&prange->svm_bo_list);
395 		spin_unlock(&svm_bo->list_lock);
396 
397 		pr_debug("svms 0x%p [0x%lx 0x%lx]\n", prange->svms,
398 			 prange->start, prange->last);
399 		mutex_lock(&prange->lock);
400 		prange->svm_bo = NULL;
401 		/* prange should not hold vram page now */
402 		WARN_ONCE(prange->actual_loc, "prange should not hold vram page");
403 		mutex_unlock(&prange->lock);
404 
405 		spin_lock(&svm_bo->list_lock);
406 	}
407 	spin_unlock(&svm_bo->list_lock);
408 
409 	if (mmget_not_zero(svm_bo->eviction_fence->mm)) {
410 		struct kfd_process_device *pdd;
411 		struct kfd_process *p;
412 		struct mm_struct *mm;
413 
414 		mm = svm_bo->eviction_fence->mm;
415 		/*
416 		 * The forked child process takes svm_bo device pages ref, svm_bo could be
417 		 * released after parent process is gone.
418 		 */
419 		p = kfd_lookup_process_by_mm(mm);
420 		if (p) {
421 			pdd = kfd_get_process_device_data(svm_bo->node, p);
422 			if (pdd)
423 				atomic64_sub(amdgpu_bo_size(svm_bo->bo), &pdd->vram_usage);
424 			kfd_unref_process(p);
425 		}
426 		mmput(mm);
427 	}
428 
429 	if (!dma_fence_is_signaled(&svm_bo->eviction_fence->base))
430 		/* We're not in the eviction worker. Signal the fence. */
431 		dma_fence_signal(&svm_bo->eviction_fence->base);
432 	dma_fence_put(&svm_bo->eviction_fence->base);
433 	amdgpu_bo_unref(&svm_bo->bo);
434 	kfree(svm_bo);
435 }
436 
svm_range_bo_wq_release(struct work_struct * work)437 static void svm_range_bo_wq_release(struct work_struct *work)
438 {
439 	struct svm_range_bo *svm_bo;
440 
441 	svm_bo = container_of(work, struct svm_range_bo, release_work);
442 	svm_range_bo_release(&svm_bo->kref);
443 }
444 
svm_range_bo_release_async(struct kref * kref)445 static void svm_range_bo_release_async(struct kref *kref)
446 {
447 	struct svm_range_bo *svm_bo;
448 
449 	svm_bo = container_of(kref, struct svm_range_bo, kref);
450 	pr_debug("svm_bo 0x%p\n", svm_bo);
451 	INIT_WORK(&svm_bo->release_work, svm_range_bo_wq_release);
452 	schedule_work(&svm_bo->release_work);
453 }
454 
svm_range_bo_unref_async(struct svm_range_bo * svm_bo)455 void svm_range_bo_unref_async(struct svm_range_bo *svm_bo)
456 {
457 	kref_put(&svm_bo->kref, svm_range_bo_release_async);
458 }
459 
svm_range_bo_unref(struct svm_range_bo * svm_bo)460 static void svm_range_bo_unref(struct svm_range_bo *svm_bo)
461 {
462 	if (svm_bo)
463 		kref_put(&svm_bo->kref, svm_range_bo_release);
464 }
465 
466 static bool
svm_range_validate_svm_bo(struct kfd_node * node,struct svm_range * prange)467 svm_range_validate_svm_bo(struct kfd_node *node, struct svm_range *prange)
468 {
469 	mutex_lock(&prange->lock);
470 	if (!prange->svm_bo) {
471 		mutex_unlock(&prange->lock);
472 		return false;
473 	}
474 	if (prange->ttm_res) {
475 		/* We still have a reference, all is well */
476 		mutex_unlock(&prange->lock);
477 		return true;
478 	}
479 	if (svm_bo_ref_unless_zero(prange->svm_bo)) {
480 		/*
481 		 * Migrate from GPU to GPU, remove range from source svm_bo->node
482 		 * range list, and return false to allocate svm_bo from destination
483 		 * node.
484 		 */
485 		if (prange->svm_bo->node != node) {
486 			mutex_unlock(&prange->lock);
487 
488 			spin_lock(&prange->svm_bo->list_lock);
489 			list_del_init(&prange->svm_bo_list);
490 			spin_unlock(&prange->svm_bo->list_lock);
491 
492 			svm_range_bo_unref(prange->svm_bo);
493 			return false;
494 		}
495 		if (READ_ONCE(prange->svm_bo->evicting)) {
496 			struct dma_fence *f;
497 			struct svm_range_bo *svm_bo;
498 			/* The BO is getting evicted,
499 			 * we need to get a new one
500 			 */
501 			mutex_unlock(&prange->lock);
502 			svm_bo = prange->svm_bo;
503 			f = dma_fence_get(&svm_bo->eviction_fence->base);
504 			svm_range_bo_unref(prange->svm_bo);
505 			/* wait for the fence to avoid long spin-loop
506 			 * at list_empty_careful
507 			 */
508 			dma_fence_wait(f, false);
509 			dma_fence_put(f);
510 		} else {
511 			/* The BO was still around and we got
512 			 * a new reference to it
513 			 */
514 			mutex_unlock(&prange->lock);
515 			pr_debug("reuse old bo svms 0x%p [0x%lx 0x%lx]\n",
516 				 prange->svms, prange->start, prange->last);
517 
518 			prange->ttm_res = prange->svm_bo->bo->tbo.resource;
519 			return true;
520 		}
521 
522 	} else {
523 		mutex_unlock(&prange->lock);
524 	}
525 
526 	/* We need a new svm_bo. Spin-loop to wait for concurrent
527 	 * svm_range_bo_release to finish removing this range from
528 	 * its range list and set prange->svm_bo to null. After this,
529 	 * it is safe to reuse the svm_bo pointer and svm_bo_list head.
530 	 */
531 	while (!list_empty_careful(&prange->svm_bo_list) || prange->svm_bo)
532 		cond_resched();
533 
534 	return false;
535 }
536 
svm_range_bo_new(void)537 static struct svm_range_bo *svm_range_bo_new(void)
538 {
539 	struct svm_range_bo *svm_bo;
540 
541 	svm_bo = kzalloc(sizeof(*svm_bo), GFP_KERNEL);
542 	if (!svm_bo)
543 		return NULL;
544 
545 	kref_init(&svm_bo->kref);
546 	INIT_LIST_HEAD(&svm_bo->range_list);
547 	spin_lock_init(&svm_bo->list_lock);
548 
549 	return svm_bo;
550 }
551 
552 int
svm_range_vram_node_new(struct kfd_node * node,struct svm_range * prange,bool clear)553 svm_range_vram_node_new(struct kfd_node *node, struct svm_range *prange,
554 			bool clear)
555 {
556 	struct kfd_process_device *pdd;
557 	struct amdgpu_bo_param bp;
558 	struct svm_range_bo *svm_bo;
559 	struct amdgpu_bo_user *ubo;
560 	struct amdgpu_bo *bo;
561 	struct kfd_process *p;
562 	struct mm_struct *mm;
563 	int r;
564 
565 	p = container_of(prange->svms, struct kfd_process, svms);
566 	pr_debug("pasid: %x svms 0x%p [0x%lx 0x%lx]\n", p->pasid, prange->svms,
567 		 prange->start, prange->last);
568 
569 	if (svm_range_validate_svm_bo(node, prange))
570 		return 0;
571 
572 	svm_bo = svm_range_bo_new();
573 	if (!svm_bo) {
574 		pr_debug("failed to alloc svm bo\n");
575 		return -ENOMEM;
576 	}
577 	mm = get_task_mm(p->lead_thread);
578 	if (!mm) {
579 		pr_debug("failed to get mm\n");
580 		kfree(svm_bo);
581 		return -ESRCH;
582 	}
583 	svm_bo->node = node;
584 	svm_bo->eviction_fence =
585 		amdgpu_amdkfd_fence_create(dma_fence_context_alloc(1),
586 					   mm,
587 					   svm_bo);
588 	mmput(mm);
589 	INIT_WORK(&svm_bo->eviction_work, svm_range_evict_svm_bo_worker);
590 	svm_bo->evicting = 0;
591 	memset(&bp, 0, sizeof(bp));
592 	bp.size = prange->npages * PAGE_SIZE;
593 	bp.byte_align = PAGE_SIZE;
594 	bp.domain = AMDGPU_GEM_DOMAIN_VRAM;
595 	bp.flags = AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
596 	bp.flags |= clear ? AMDGPU_GEM_CREATE_VRAM_CLEARED : 0;
597 	bp.flags |= AMDGPU_GEM_CREATE_DISCARDABLE;
598 	bp.type = ttm_bo_type_device;
599 	bp.resv = NULL;
600 	if (node->xcp)
601 		bp.xcp_id_plus1 = node->xcp->id + 1;
602 
603 	r = amdgpu_bo_create_user(node->adev, &bp, &ubo);
604 	if (r) {
605 		pr_debug("failed %d to create bo\n", r);
606 		goto create_bo_failed;
607 	}
608 	bo = &ubo->bo;
609 
610 	pr_debug("alloc bo at offset 0x%lx size 0x%lx on partition %d\n",
611 		 bo->tbo.resource->start << PAGE_SHIFT, bp.size,
612 		 bp.xcp_id_plus1 - 1);
613 
614 	r = amdgpu_bo_reserve(bo, true);
615 	if (r) {
616 		pr_debug("failed %d to reserve bo\n", r);
617 		goto reserve_bo_failed;
618 	}
619 
620 	if (clear) {
621 		r = amdgpu_bo_sync_wait(bo, AMDGPU_FENCE_OWNER_KFD, false);
622 		if (r) {
623 			pr_debug("failed %d to sync bo\n", r);
624 			amdgpu_bo_unreserve(bo);
625 			goto reserve_bo_failed;
626 		}
627 	}
628 
629 	r = dma_resv_reserve_fences(bo->tbo.base.resv, 1);
630 	if (r) {
631 		pr_debug("failed %d to reserve bo\n", r);
632 		amdgpu_bo_unreserve(bo);
633 		goto reserve_bo_failed;
634 	}
635 	amdgpu_bo_fence(bo, &svm_bo->eviction_fence->base, true);
636 
637 	amdgpu_bo_unreserve(bo);
638 
639 	svm_bo->bo = bo;
640 	prange->svm_bo = svm_bo;
641 	prange->ttm_res = bo->tbo.resource;
642 	prange->offset = 0;
643 
644 	spin_lock(&svm_bo->list_lock);
645 	list_add(&prange->svm_bo_list, &svm_bo->range_list);
646 	spin_unlock(&svm_bo->list_lock);
647 
648 	pdd = svm_range_get_pdd_by_node(prange, node);
649 	if (pdd)
650 		atomic64_add(amdgpu_bo_size(bo), &pdd->vram_usage);
651 
652 	return 0;
653 
654 reserve_bo_failed:
655 	amdgpu_bo_unref(&bo);
656 create_bo_failed:
657 	dma_fence_put(&svm_bo->eviction_fence->base);
658 	kfree(svm_bo);
659 	prange->ttm_res = NULL;
660 
661 	return r;
662 }
663 
svm_range_vram_node_free(struct svm_range * prange)664 void svm_range_vram_node_free(struct svm_range *prange)
665 {
666 	/* serialize prange->svm_bo unref */
667 	mutex_lock(&prange->lock);
668 	/* prange->svm_bo has not been unref */
669 	if (prange->ttm_res) {
670 		prange->ttm_res = NULL;
671 		mutex_unlock(&prange->lock);
672 		svm_range_bo_unref(prange->svm_bo);
673 	} else
674 		mutex_unlock(&prange->lock);
675 }
676 
677 struct kfd_node *
svm_range_get_node_by_id(struct svm_range * prange,uint32_t gpu_id)678 svm_range_get_node_by_id(struct svm_range *prange, uint32_t gpu_id)
679 {
680 	struct kfd_process *p;
681 	struct kfd_process_device *pdd;
682 
683 	p = container_of(prange->svms, struct kfd_process, svms);
684 	pdd = kfd_process_device_data_by_id(p, gpu_id);
685 	if (!pdd) {
686 		pr_debug("failed to get kfd process device by id 0x%x\n", gpu_id);
687 		return NULL;
688 	}
689 
690 	return pdd->dev;
691 }
692 
693 struct kfd_process_device *
svm_range_get_pdd_by_node(struct svm_range * prange,struct kfd_node * node)694 svm_range_get_pdd_by_node(struct svm_range *prange, struct kfd_node *node)
695 {
696 	struct kfd_process *p;
697 
698 	p = container_of(prange->svms, struct kfd_process, svms);
699 
700 	return kfd_get_process_device_data(node, p);
701 }
702 
svm_range_bo_validate(void * param,struct amdgpu_bo * bo)703 static int svm_range_bo_validate(void *param, struct amdgpu_bo *bo)
704 {
705 	struct ttm_operation_ctx ctx = { false, false };
706 
707 	amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM);
708 
709 	return ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
710 }
711 
712 static int
svm_range_check_attr(struct kfd_process * p,uint32_t nattr,struct kfd_ioctl_svm_attribute * attrs)713 svm_range_check_attr(struct kfd_process *p,
714 		     uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs)
715 {
716 	uint32_t i;
717 
718 	for (i = 0; i < nattr; i++) {
719 		uint32_t val = attrs[i].value;
720 		int gpuidx = MAX_GPU_INSTANCE;
721 
722 		switch (attrs[i].type) {
723 		case KFD_IOCTL_SVM_ATTR_PREFERRED_LOC:
724 			if (val != KFD_IOCTL_SVM_LOCATION_SYSMEM &&
725 			    val != KFD_IOCTL_SVM_LOCATION_UNDEFINED)
726 				gpuidx = kfd_process_gpuidx_from_gpuid(p, val);
727 			break;
728 		case KFD_IOCTL_SVM_ATTR_PREFETCH_LOC:
729 			if (val != KFD_IOCTL_SVM_LOCATION_SYSMEM)
730 				gpuidx = kfd_process_gpuidx_from_gpuid(p, val);
731 			break;
732 		case KFD_IOCTL_SVM_ATTR_ACCESS:
733 		case KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE:
734 		case KFD_IOCTL_SVM_ATTR_NO_ACCESS:
735 			gpuidx = kfd_process_gpuidx_from_gpuid(p, val);
736 			break;
737 		case KFD_IOCTL_SVM_ATTR_SET_FLAGS:
738 			break;
739 		case KFD_IOCTL_SVM_ATTR_CLR_FLAGS:
740 			break;
741 		case KFD_IOCTL_SVM_ATTR_GRANULARITY:
742 			break;
743 		default:
744 			pr_debug("unknown attr type 0x%x\n", attrs[i].type);
745 			return -EINVAL;
746 		}
747 
748 		if (gpuidx < 0) {
749 			pr_debug("no GPU 0x%x found\n", val);
750 			return -EINVAL;
751 		} else if (gpuidx < MAX_GPU_INSTANCE &&
752 			   !test_bit(gpuidx, p->svms.bitmap_supported)) {
753 			pr_debug("GPU 0x%x not supported\n", val);
754 			return -EINVAL;
755 		}
756 	}
757 
758 	return 0;
759 }
760 
761 static void
svm_range_apply_attrs(struct kfd_process * p,struct svm_range * prange,uint32_t nattr,struct kfd_ioctl_svm_attribute * attrs,bool * update_mapping)762 svm_range_apply_attrs(struct kfd_process *p, struct svm_range *prange,
763 		      uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs,
764 		      bool *update_mapping)
765 {
766 	uint32_t i;
767 	int gpuidx;
768 
769 	for (i = 0; i < nattr; i++) {
770 		switch (attrs[i].type) {
771 		case KFD_IOCTL_SVM_ATTR_PREFERRED_LOC:
772 			prange->preferred_loc = attrs[i].value;
773 			break;
774 		case KFD_IOCTL_SVM_ATTR_PREFETCH_LOC:
775 			prange->prefetch_loc = attrs[i].value;
776 			break;
777 		case KFD_IOCTL_SVM_ATTR_ACCESS:
778 		case KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE:
779 		case KFD_IOCTL_SVM_ATTR_NO_ACCESS:
780 			if (!p->xnack_enabled)
781 				*update_mapping = true;
782 
783 			gpuidx = kfd_process_gpuidx_from_gpuid(p,
784 							       attrs[i].value);
785 			if (attrs[i].type == KFD_IOCTL_SVM_ATTR_NO_ACCESS) {
786 				bitmap_clear(prange->bitmap_access, gpuidx, 1);
787 				bitmap_clear(prange->bitmap_aip, gpuidx, 1);
788 			} else if (attrs[i].type == KFD_IOCTL_SVM_ATTR_ACCESS) {
789 				bitmap_set(prange->bitmap_access, gpuidx, 1);
790 				bitmap_clear(prange->bitmap_aip, gpuidx, 1);
791 			} else {
792 				bitmap_clear(prange->bitmap_access, gpuidx, 1);
793 				bitmap_set(prange->bitmap_aip, gpuidx, 1);
794 			}
795 			break;
796 		case KFD_IOCTL_SVM_ATTR_SET_FLAGS:
797 			*update_mapping = true;
798 			prange->flags |= attrs[i].value;
799 			break;
800 		case KFD_IOCTL_SVM_ATTR_CLR_FLAGS:
801 			*update_mapping = true;
802 			prange->flags &= ~attrs[i].value;
803 			break;
804 		case KFD_IOCTL_SVM_ATTR_GRANULARITY:
805 			prange->granularity = min_t(uint32_t, attrs[i].value, 0x3F);
806 			break;
807 		default:
808 			WARN_ONCE(1, "svm_range_check_attrs wasn't called?");
809 		}
810 	}
811 }
812 
813 static bool
svm_range_is_same_attrs(struct kfd_process * p,struct svm_range * prange,uint32_t nattr,struct kfd_ioctl_svm_attribute * attrs)814 svm_range_is_same_attrs(struct kfd_process *p, struct svm_range *prange,
815 			uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs)
816 {
817 	uint32_t i;
818 	int gpuidx;
819 
820 	for (i = 0; i < nattr; i++) {
821 		switch (attrs[i].type) {
822 		case KFD_IOCTL_SVM_ATTR_PREFERRED_LOC:
823 			if (prange->preferred_loc != attrs[i].value)
824 				return false;
825 			break;
826 		case KFD_IOCTL_SVM_ATTR_PREFETCH_LOC:
827 			/* Prefetch should always trigger a migration even
828 			 * if the value of the attribute didn't change.
829 			 */
830 			return false;
831 		case KFD_IOCTL_SVM_ATTR_ACCESS:
832 		case KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE:
833 		case KFD_IOCTL_SVM_ATTR_NO_ACCESS:
834 			gpuidx = kfd_process_gpuidx_from_gpuid(p,
835 							       attrs[i].value);
836 			if (attrs[i].type == KFD_IOCTL_SVM_ATTR_NO_ACCESS) {
837 				if (test_bit(gpuidx, prange->bitmap_access) ||
838 				    test_bit(gpuidx, prange->bitmap_aip))
839 					return false;
840 			} else if (attrs[i].type == KFD_IOCTL_SVM_ATTR_ACCESS) {
841 				if (!test_bit(gpuidx, prange->bitmap_access))
842 					return false;
843 			} else {
844 				if (!test_bit(gpuidx, prange->bitmap_aip))
845 					return false;
846 			}
847 			break;
848 		case KFD_IOCTL_SVM_ATTR_SET_FLAGS:
849 			if ((prange->flags & attrs[i].value) != attrs[i].value)
850 				return false;
851 			break;
852 		case KFD_IOCTL_SVM_ATTR_CLR_FLAGS:
853 			if ((prange->flags & attrs[i].value) != 0)
854 				return false;
855 			break;
856 		case KFD_IOCTL_SVM_ATTR_GRANULARITY:
857 			if (prange->granularity != attrs[i].value)
858 				return false;
859 			break;
860 		default:
861 			WARN_ONCE(1, "svm_range_check_attrs wasn't called?");
862 		}
863 	}
864 
865 	return true;
866 }
867 
868 /**
869  * svm_range_debug_dump - print all range information from svms
870  * @svms: svm range list header
871  *
872  * debug output svm range start, end, prefetch location from svms
873  * interval tree and link list
874  *
875  * Context: The caller must hold svms->lock
876  */
svm_range_debug_dump(struct svm_range_list * svms)877 static void svm_range_debug_dump(struct svm_range_list *svms)
878 {
879 	struct interval_tree_node *node;
880 	struct svm_range *prange;
881 
882 	pr_debug("dump svms 0x%p list\n", svms);
883 	pr_debug("range\tstart\tpage\tend\t\tlocation\n");
884 
885 	list_for_each_entry(prange, &svms->list, list) {
886 		pr_debug("0x%p 0x%lx\t0x%llx\t0x%llx\t0x%x\n",
887 			 prange, prange->start, prange->npages,
888 			 prange->start + prange->npages - 1,
889 			 prange->actual_loc);
890 	}
891 
892 	pr_debug("dump svms 0x%p interval tree\n", svms);
893 	pr_debug("range\tstart\tpage\tend\t\tlocation\n");
894 	node = interval_tree_iter_first(&svms->objects, 0, ~0ULL);
895 	while (node) {
896 		prange = container_of(node, struct svm_range, it_node);
897 		pr_debug("0x%p 0x%lx\t0x%llx\t0x%llx\t0x%x\n",
898 			 prange, prange->start, prange->npages,
899 			 prange->start + prange->npages - 1,
900 			 prange->actual_loc);
901 		node = interval_tree_iter_next(node, 0, ~0ULL);
902 	}
903 }
904 
905 static void *
svm_range_copy_array(void * psrc,size_t size,uint64_t num_elements,uint64_t offset,uint64_t * vram_pages)906 svm_range_copy_array(void *psrc, size_t size, uint64_t num_elements,
907 		     uint64_t offset, uint64_t *vram_pages)
908 {
909 	unsigned char *src = (unsigned char *)psrc + offset;
910 	unsigned char *dst;
911 	uint64_t i;
912 
913 	dst = kvmalloc_array(num_elements, size, GFP_KERNEL);
914 	if (!dst)
915 		return NULL;
916 
917 	if (!vram_pages) {
918 		memcpy(dst, src, num_elements * size);
919 		return (void *)dst;
920 	}
921 
922 	*vram_pages = 0;
923 	for (i = 0; i < num_elements; i++) {
924 		dma_addr_t *temp;
925 		temp = (dma_addr_t *)dst + i;
926 		*temp = *((dma_addr_t *)src + i);
927 		if (*temp&SVM_RANGE_VRAM_DOMAIN)
928 			(*vram_pages)++;
929 	}
930 
931 	return (void *)dst;
932 }
933 
934 static int
svm_range_copy_dma_addrs(struct svm_range * dst,struct svm_range * src)935 svm_range_copy_dma_addrs(struct svm_range *dst, struct svm_range *src)
936 {
937 	int i;
938 
939 	for (i = 0; i < MAX_GPU_INSTANCE; i++) {
940 		if (!src->dma_addr[i])
941 			continue;
942 		dst->dma_addr[i] = svm_range_copy_array(src->dma_addr[i],
943 					sizeof(*src->dma_addr[i]), src->npages, 0, NULL);
944 		if (!dst->dma_addr[i])
945 			return -ENOMEM;
946 	}
947 
948 	return 0;
949 }
950 
951 static int
svm_range_split_array(void * ppnew,void * ppold,size_t size,uint64_t old_start,uint64_t old_n,uint64_t new_start,uint64_t new_n,uint64_t * new_vram_pages)952 svm_range_split_array(void *ppnew, void *ppold, size_t size,
953 		      uint64_t old_start, uint64_t old_n,
954 		      uint64_t new_start, uint64_t new_n, uint64_t *new_vram_pages)
955 {
956 	unsigned char *new, *old, *pold;
957 	uint64_t d;
958 
959 	if (!ppold)
960 		return 0;
961 	pold = *(unsigned char **)ppold;
962 	if (!pold)
963 		return 0;
964 
965 	d = (new_start - old_start) * size;
966 	/* get dma addr array for new range and calculte its vram page number */
967 	new = svm_range_copy_array(pold, size, new_n, d, new_vram_pages);
968 	if (!new)
969 		return -ENOMEM;
970 	d = (new_start == old_start) ? new_n * size : 0;
971 	old = svm_range_copy_array(pold, size, old_n, d, NULL);
972 	if (!old) {
973 		kvfree(new);
974 		return -ENOMEM;
975 	}
976 	kvfree(pold);
977 	*(void **)ppold = old;
978 	*(void **)ppnew = new;
979 
980 	return 0;
981 }
982 
983 static int
svm_range_split_pages(struct svm_range * new,struct svm_range * old,uint64_t start,uint64_t last)984 svm_range_split_pages(struct svm_range *new, struct svm_range *old,
985 		      uint64_t start, uint64_t last)
986 {
987 	uint64_t npages = last - start + 1;
988 	int i, r;
989 
990 	for (i = 0; i < MAX_GPU_INSTANCE; i++) {
991 		r = svm_range_split_array(&new->dma_addr[i], &old->dma_addr[i],
992 					  sizeof(*old->dma_addr[i]), old->start,
993 					  npages, new->start, new->npages,
994 					  old->actual_loc ? &new->vram_pages : NULL);
995 		if (r)
996 			return r;
997 	}
998 	if (old->actual_loc)
999 		old->vram_pages -= new->vram_pages;
1000 
1001 	return 0;
1002 }
1003 
1004 static int
svm_range_split_nodes(struct svm_range * new,struct svm_range * old,uint64_t start,uint64_t last)1005 svm_range_split_nodes(struct svm_range *new, struct svm_range *old,
1006 		      uint64_t start, uint64_t last)
1007 {
1008 	uint64_t npages = last - start + 1;
1009 
1010 	pr_debug("svms 0x%p new prange 0x%p start 0x%lx [0x%llx 0x%llx]\n",
1011 		 new->svms, new, new->start, start, last);
1012 
1013 	if (new->start == old->start) {
1014 		new->offset = old->offset;
1015 		old->offset += new->npages;
1016 	} else {
1017 		new->offset = old->offset + npages;
1018 	}
1019 
1020 	new->svm_bo = svm_range_bo_ref(old->svm_bo);
1021 	new->ttm_res = old->ttm_res;
1022 
1023 	spin_lock(&new->svm_bo->list_lock);
1024 	list_add(&new->svm_bo_list, &new->svm_bo->range_list);
1025 	spin_unlock(&new->svm_bo->list_lock);
1026 
1027 	return 0;
1028 }
1029 
1030 /**
1031  * svm_range_split_adjust - split range and adjust
1032  *
1033  * @new: new range
1034  * @old: the old range
1035  * @start: the old range adjust to start address in pages
1036  * @last: the old range adjust to last address in pages
1037  *
1038  * Copy system memory dma_addr or vram ttm_res in old range to new
1039  * range from new_start up to size new->npages, the remaining old range is from
1040  * start to last
1041  *
1042  * Return:
1043  * 0 - OK, -ENOMEM - out of memory
1044  */
1045 static int
svm_range_split_adjust(struct svm_range * new,struct svm_range * old,uint64_t start,uint64_t last)1046 svm_range_split_adjust(struct svm_range *new, struct svm_range *old,
1047 		      uint64_t start, uint64_t last)
1048 {
1049 	int r;
1050 
1051 	pr_debug("svms 0x%p new 0x%lx old [0x%lx 0x%lx] => [0x%llx 0x%llx]\n",
1052 		 new->svms, new->start, old->start, old->last, start, last);
1053 
1054 	if (new->start < old->start ||
1055 	    new->last > old->last) {
1056 		WARN_ONCE(1, "invalid new range start or last\n");
1057 		return -EINVAL;
1058 	}
1059 
1060 	r = svm_range_split_pages(new, old, start, last);
1061 	if (r)
1062 		return r;
1063 
1064 	if (old->actual_loc && old->ttm_res) {
1065 		r = svm_range_split_nodes(new, old, start, last);
1066 		if (r)
1067 			return r;
1068 	}
1069 
1070 	old->npages = last - start + 1;
1071 	old->start = start;
1072 	old->last = last;
1073 	new->flags = old->flags;
1074 	new->preferred_loc = old->preferred_loc;
1075 	new->prefetch_loc = old->prefetch_loc;
1076 	new->actual_loc = old->actual_loc;
1077 	new->granularity = old->granularity;
1078 	new->mapped_to_gpu = old->mapped_to_gpu;
1079 	bitmap_copy(new->bitmap_access, old->bitmap_access, MAX_GPU_INSTANCE);
1080 	bitmap_copy(new->bitmap_aip, old->bitmap_aip, MAX_GPU_INSTANCE);
1081 	atomic_set(&new->queue_refcount, atomic_read(&old->queue_refcount));
1082 
1083 	return 0;
1084 }
1085 
1086 /**
1087  * svm_range_split - split a range in 2 ranges
1088  *
1089  * @prange: the svm range to split
1090  * @start: the remaining range start address in pages
1091  * @last: the remaining range last address in pages
1092  * @new: the result new range generated
1093  *
1094  * Two cases only:
1095  * case 1: if start == prange->start
1096  *         prange ==> prange[start, last]
1097  *         new range [last + 1, prange->last]
1098  *
1099  * case 2: if last == prange->last
1100  *         prange ==> prange[start, last]
1101  *         new range [prange->start, start - 1]
1102  *
1103  * Return:
1104  * 0 - OK, -ENOMEM - out of memory, -EINVAL - invalid start, last
1105  */
1106 static int
svm_range_split(struct svm_range * prange,uint64_t start,uint64_t last,struct svm_range ** new)1107 svm_range_split(struct svm_range *prange, uint64_t start, uint64_t last,
1108 		struct svm_range **new)
1109 {
1110 	uint64_t old_start = prange->start;
1111 	uint64_t old_last = prange->last;
1112 	struct svm_range_list *svms;
1113 	int r = 0;
1114 
1115 	pr_debug("svms 0x%p [0x%llx 0x%llx] to [0x%llx 0x%llx]\n", prange->svms,
1116 		 old_start, old_last, start, last);
1117 
1118 	if (old_start != start && old_last != last)
1119 		return -EINVAL;
1120 	if (start < old_start || last > old_last)
1121 		return -EINVAL;
1122 
1123 	svms = prange->svms;
1124 	if (old_start == start)
1125 		*new = svm_range_new(svms, last + 1, old_last, false);
1126 	else
1127 		*new = svm_range_new(svms, old_start, start - 1, false);
1128 	if (!*new)
1129 		return -ENOMEM;
1130 
1131 	r = svm_range_split_adjust(*new, prange, start, last);
1132 	if (r) {
1133 		pr_debug("failed %d split [0x%llx 0x%llx] to [0x%llx 0x%llx]\n",
1134 			 r, old_start, old_last, start, last);
1135 		svm_range_free(*new, false);
1136 		*new = NULL;
1137 	}
1138 
1139 	return r;
1140 }
1141 
1142 static int
svm_range_split_tail(struct svm_range * prange,uint64_t new_last,struct list_head * insert_list,struct list_head * remap_list)1143 svm_range_split_tail(struct svm_range *prange, uint64_t new_last,
1144 		     struct list_head *insert_list, struct list_head *remap_list)
1145 {
1146 	struct svm_range *tail = NULL;
1147 	int r = svm_range_split(prange, prange->start, new_last, &tail);
1148 
1149 	if (!r) {
1150 		list_add(&tail->list, insert_list);
1151 		if (!IS_ALIGNED(new_last + 1, 1UL << prange->granularity))
1152 			list_add(&tail->update_list, remap_list);
1153 	}
1154 	return r;
1155 }
1156 
1157 static int
svm_range_split_head(struct svm_range * prange,uint64_t new_start,struct list_head * insert_list,struct list_head * remap_list)1158 svm_range_split_head(struct svm_range *prange, uint64_t new_start,
1159 		     struct list_head *insert_list, struct list_head *remap_list)
1160 {
1161 	struct svm_range *head = NULL;
1162 	int r = svm_range_split(prange, new_start, prange->last, &head);
1163 
1164 	if (!r) {
1165 		list_add(&head->list, insert_list);
1166 		if (!IS_ALIGNED(new_start, 1UL << prange->granularity))
1167 			list_add(&head->update_list, remap_list);
1168 	}
1169 	return r;
1170 }
1171 
1172 static void
svm_range_add_child(struct svm_range * prange,struct mm_struct * mm,struct svm_range * pchild,enum svm_work_list_ops op)1173 svm_range_add_child(struct svm_range *prange, struct mm_struct *mm,
1174 		    struct svm_range *pchild, enum svm_work_list_ops op)
1175 {
1176 	pr_debug("add child 0x%p [0x%lx 0x%lx] to prange 0x%p child list %d\n",
1177 		 pchild, pchild->start, pchild->last, prange, op);
1178 
1179 	pchild->work_item.mm = mm;
1180 	pchild->work_item.op = op;
1181 	list_add_tail(&pchild->child_list, &prange->child_list);
1182 }
1183 
1184 static bool
svm_nodes_in_same_hive(struct kfd_node * node_a,struct kfd_node * node_b)1185 svm_nodes_in_same_hive(struct kfd_node *node_a, struct kfd_node *node_b)
1186 {
1187 	return (node_a->adev == node_b->adev ||
1188 		amdgpu_xgmi_same_hive(node_a->adev, node_b->adev));
1189 }
1190 
1191 static uint64_t
svm_range_get_pte_flags(struct kfd_node * node,struct svm_range * prange,int domain)1192 svm_range_get_pte_flags(struct kfd_node *node,
1193 			struct svm_range *prange, int domain)
1194 {
1195 	struct kfd_node *bo_node;
1196 	uint32_t flags = prange->flags;
1197 	uint32_t mapping_flags = 0;
1198 	uint32_t gc_ip_version = KFD_GC_VERSION(node);
1199 	uint64_t pte_flags;
1200 	bool snoop = (domain != SVM_RANGE_VRAM_DOMAIN);
1201 	bool coherent = flags & (KFD_IOCTL_SVM_FLAG_COHERENT | KFD_IOCTL_SVM_FLAG_EXT_COHERENT);
1202 	bool ext_coherent = flags & KFD_IOCTL_SVM_FLAG_EXT_COHERENT;
1203 	unsigned int mtype_local;
1204 
1205 	if (domain == SVM_RANGE_VRAM_DOMAIN)
1206 		bo_node = prange->svm_bo->node;
1207 
1208 	switch (gc_ip_version) {
1209 	case IP_VERSION(9, 4, 1):
1210 		if (domain == SVM_RANGE_VRAM_DOMAIN) {
1211 			if (bo_node == node) {
1212 				mapping_flags |= coherent ?
1213 					AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW;
1214 			} else {
1215 				mapping_flags |= coherent ?
1216 					AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
1217 				if (svm_nodes_in_same_hive(node, bo_node))
1218 					snoop = true;
1219 			}
1220 		} else {
1221 			mapping_flags |= coherent ?
1222 				AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
1223 		}
1224 		break;
1225 	case IP_VERSION(9, 4, 2):
1226 		if (domain == SVM_RANGE_VRAM_DOMAIN) {
1227 			if (bo_node == node) {
1228 				mapping_flags |= coherent ?
1229 					AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW;
1230 				if (node->adev->gmc.xgmi.connected_to_cpu)
1231 					snoop = true;
1232 			} else {
1233 				mapping_flags |= coherent ?
1234 					AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
1235 				if (svm_nodes_in_same_hive(node, bo_node))
1236 					snoop = true;
1237 			}
1238 		} else {
1239 			mapping_flags |= coherent ?
1240 				AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
1241 		}
1242 		break;
1243 	case IP_VERSION(9, 4, 3):
1244 	case IP_VERSION(9, 4, 4):
1245 	case IP_VERSION(9, 5, 0):
1246 		if (ext_coherent)
1247 			mtype_local = (gc_ip_version < IP_VERSION(9, 5, 0) && !node->adev->rev_id) ?
1248 					AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_CC;
1249 		else
1250 			mtype_local = amdgpu_mtype_local == 1 ? AMDGPU_VM_MTYPE_NC :
1251 				amdgpu_mtype_local == 2 ? AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW;
1252 		snoop = true;
1253 		if (domain == SVM_RANGE_VRAM_DOMAIN) {
1254 			/* local HBM region close to partition */
1255 			if (bo_node->adev == node->adev &&
1256 			    (!bo_node->xcp || !node->xcp || bo_node->xcp->mem_id == node->xcp->mem_id))
1257 				mapping_flags |= mtype_local;
1258 			/* local HBM region far from partition or remote XGMI GPU
1259 			 * with regular system scope coherence
1260 			 */
1261 			else if (svm_nodes_in_same_hive(bo_node, node) && !ext_coherent)
1262 				mapping_flags |= AMDGPU_VM_MTYPE_NC;
1263 			/* PCIe P2P on GPUs pre-9.5.0 */
1264 			else if (gc_ip_version < IP_VERSION(9, 5, 0) &&
1265 				 !svm_nodes_in_same_hive(bo_node, node))
1266 				mapping_flags |= AMDGPU_VM_MTYPE_UC;
1267 			/* Other remote memory */
1268 			else
1269 				mapping_flags |= ext_coherent ? AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
1270 		/* system memory accessed by the APU */
1271 		} else if (node->adev->flags & AMD_IS_APU) {
1272 			/* On NUMA systems, locality is determined per-page
1273 			 * in amdgpu_gmc_override_vm_pte_flags
1274 			 */
1275 			if (num_possible_nodes() <= 1)
1276 				mapping_flags |= mtype_local;
1277 			else
1278 				mapping_flags |= ext_coherent ? AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
1279 		/* system memory accessed by the dGPU */
1280 		} else {
1281 			if (gc_ip_version < IP_VERSION(9, 5, 0))
1282 				mapping_flags |= AMDGPU_VM_MTYPE_UC;
1283 			else
1284 				mapping_flags |= AMDGPU_VM_MTYPE_NC;
1285 		}
1286 		break;
1287 	case IP_VERSION(12, 0, 0):
1288 	case IP_VERSION(12, 0, 1):
1289 		mapping_flags |= AMDGPU_VM_MTYPE_NC;
1290 		break;
1291 	default:
1292 		mapping_flags |= coherent ?
1293 			AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
1294 	}
1295 
1296 	mapping_flags |= AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_PAGE_WRITEABLE;
1297 
1298 	if (flags & KFD_IOCTL_SVM_FLAG_GPU_RO)
1299 		mapping_flags &= ~AMDGPU_VM_PAGE_WRITEABLE;
1300 	if (flags & KFD_IOCTL_SVM_FLAG_GPU_EXEC)
1301 		mapping_flags |= AMDGPU_VM_PAGE_EXECUTABLE;
1302 
1303 	pte_flags = AMDGPU_PTE_VALID;
1304 	pte_flags |= (domain == SVM_RANGE_VRAM_DOMAIN) ? 0 : AMDGPU_PTE_SYSTEM;
1305 	pte_flags |= snoop ? AMDGPU_PTE_SNOOPED : 0;
1306 	if (gc_ip_version >= IP_VERSION(12, 0, 0))
1307 		pte_flags |= AMDGPU_PTE_IS_PTE;
1308 
1309 	pte_flags |= amdgpu_gem_va_map_flags(node->adev, mapping_flags);
1310 	return pte_flags;
1311 }
1312 
1313 static int
svm_range_unmap_from_gpu(struct amdgpu_device * adev,struct amdgpu_vm * vm,uint64_t start,uint64_t last,struct dma_fence ** fence)1314 svm_range_unmap_from_gpu(struct amdgpu_device *adev, struct amdgpu_vm *vm,
1315 			 uint64_t start, uint64_t last,
1316 			 struct dma_fence **fence)
1317 {
1318 	uint64_t init_pte_value = 0;
1319 
1320 	pr_debug("[0x%llx 0x%llx]\n", start, last);
1321 
1322 	return amdgpu_vm_update_range(adev, vm, false, true, true, false, NULL, start,
1323 				      last, init_pte_value, 0, 0, NULL, NULL,
1324 				      fence);
1325 }
1326 
1327 static int
svm_range_unmap_from_gpus(struct svm_range * prange,unsigned long start,unsigned long last,uint32_t trigger)1328 svm_range_unmap_from_gpus(struct svm_range *prange, unsigned long start,
1329 			  unsigned long last, uint32_t trigger)
1330 {
1331 	DECLARE_BITMAP(bitmap, MAX_GPU_INSTANCE);
1332 	struct kfd_process_device *pdd;
1333 	struct dma_fence *fence = NULL;
1334 	struct kfd_process *p;
1335 	uint32_t gpuidx;
1336 	int r = 0;
1337 
1338 	if (!prange->mapped_to_gpu) {
1339 		pr_debug("prange 0x%p [0x%lx 0x%lx] not mapped to GPU\n",
1340 			 prange, prange->start, prange->last);
1341 		return 0;
1342 	}
1343 
1344 	if (prange->start == start && prange->last == last) {
1345 		pr_debug("unmap svms 0x%p prange 0x%p\n", prange->svms, prange);
1346 		prange->mapped_to_gpu = false;
1347 	}
1348 
1349 	bitmap_or(bitmap, prange->bitmap_access, prange->bitmap_aip,
1350 		  MAX_GPU_INSTANCE);
1351 	p = container_of(prange->svms, struct kfd_process, svms);
1352 
1353 	for_each_set_bit(gpuidx, bitmap, MAX_GPU_INSTANCE) {
1354 		pr_debug("unmap from gpu idx 0x%x\n", gpuidx);
1355 		pdd = kfd_process_device_from_gpuidx(p, gpuidx);
1356 		if (!pdd) {
1357 			pr_debug("failed to find device idx %d\n", gpuidx);
1358 			return -EINVAL;
1359 		}
1360 
1361 		kfd_smi_event_unmap_from_gpu(pdd->dev, p->lead_thread->pid,
1362 					     start, last, trigger);
1363 
1364 		r = svm_range_unmap_from_gpu(pdd->dev->adev,
1365 					     drm_priv_to_vm(pdd->drm_priv),
1366 					     start, last, &fence);
1367 		if (r)
1368 			break;
1369 
1370 		if (fence) {
1371 			r = dma_fence_wait(fence, false);
1372 			dma_fence_put(fence);
1373 			fence = NULL;
1374 			if (r)
1375 				break;
1376 		}
1377 		kfd_flush_tlb(pdd, TLB_FLUSH_HEAVYWEIGHT);
1378 	}
1379 
1380 	return r;
1381 }
1382 
1383 static int
svm_range_map_to_gpu(struct kfd_process_device * pdd,struct svm_range * prange,unsigned long offset,unsigned long npages,bool readonly,dma_addr_t * dma_addr,struct amdgpu_device * bo_adev,struct dma_fence ** fence,bool flush_tlb)1384 svm_range_map_to_gpu(struct kfd_process_device *pdd, struct svm_range *prange,
1385 		     unsigned long offset, unsigned long npages, bool readonly,
1386 		     dma_addr_t *dma_addr, struct amdgpu_device *bo_adev,
1387 		     struct dma_fence **fence, bool flush_tlb)
1388 {
1389 	struct amdgpu_device *adev = pdd->dev->adev;
1390 	struct amdgpu_vm *vm = drm_priv_to_vm(pdd->drm_priv);
1391 	uint64_t pte_flags;
1392 	unsigned long last_start;
1393 	int last_domain;
1394 	int r = 0;
1395 	int64_t i, j;
1396 
1397 	last_start = prange->start + offset;
1398 
1399 	pr_debug("svms 0x%p [0x%lx 0x%lx] readonly %d\n", prange->svms,
1400 		 last_start, last_start + npages - 1, readonly);
1401 
1402 	for (i = offset; i < offset + npages; i++) {
1403 		last_domain = dma_addr[i] & SVM_RANGE_VRAM_DOMAIN;
1404 		dma_addr[i] &= ~SVM_RANGE_VRAM_DOMAIN;
1405 
1406 		/* Collect all pages in the same address range and memory domain
1407 		 * that can be mapped with a single call to update mapping.
1408 		 */
1409 		if (i < offset + npages - 1 &&
1410 		    last_domain == (dma_addr[i + 1] & SVM_RANGE_VRAM_DOMAIN))
1411 			continue;
1412 
1413 		pr_debug("Mapping range [0x%lx 0x%llx] on domain: %s\n",
1414 			 last_start, prange->start + i, last_domain ? "GPU" : "CPU");
1415 
1416 		pte_flags = svm_range_get_pte_flags(pdd->dev, prange, last_domain);
1417 		if (readonly)
1418 			pte_flags &= ~AMDGPU_PTE_WRITEABLE;
1419 
1420 		pr_debug("svms 0x%p map [0x%lx 0x%llx] vram %d PTE 0x%llx\n",
1421 			 prange->svms, last_start, prange->start + i,
1422 			 (last_domain == SVM_RANGE_VRAM_DOMAIN) ? 1 : 0,
1423 			 pte_flags);
1424 
1425 		/* For dGPU mode, we use same vm_manager to allocate VRAM for
1426 		 * different memory partition based on fpfn/lpfn, we should use
1427 		 * same vm_manager.vram_base_offset regardless memory partition.
1428 		 */
1429 		r = amdgpu_vm_update_range(adev, vm, false, false, flush_tlb, true,
1430 					   NULL, last_start, prange->start + i,
1431 					   pte_flags,
1432 					   (last_start - prange->start) << PAGE_SHIFT,
1433 					   bo_adev ? bo_adev->vm_manager.vram_base_offset : 0,
1434 					   NULL, dma_addr, &vm->last_update);
1435 
1436 		for (j = last_start - prange->start; j <= i; j++)
1437 			dma_addr[j] |= last_domain;
1438 
1439 		if (r) {
1440 			pr_debug("failed %d to map to gpu 0x%lx\n", r, prange->start);
1441 			goto out;
1442 		}
1443 		last_start = prange->start + i + 1;
1444 	}
1445 
1446 	r = amdgpu_vm_update_pdes(adev, vm, false);
1447 	if (r) {
1448 		pr_debug("failed %d to update directories 0x%lx\n", r,
1449 			 prange->start);
1450 		goto out;
1451 	}
1452 
1453 	if (fence)
1454 		*fence = dma_fence_get(vm->last_update);
1455 
1456 out:
1457 	return r;
1458 }
1459 
1460 static int
svm_range_map_to_gpus(struct svm_range * prange,unsigned long offset,unsigned long npages,bool readonly,unsigned long * bitmap,bool wait,bool flush_tlb)1461 svm_range_map_to_gpus(struct svm_range *prange, unsigned long offset,
1462 		      unsigned long npages, bool readonly,
1463 		      unsigned long *bitmap, bool wait, bool flush_tlb)
1464 {
1465 	struct kfd_process_device *pdd;
1466 	struct amdgpu_device *bo_adev = NULL;
1467 	struct kfd_process *p;
1468 	struct dma_fence *fence = NULL;
1469 	uint32_t gpuidx;
1470 	int r = 0;
1471 
1472 	if (prange->svm_bo && prange->ttm_res)
1473 		bo_adev = prange->svm_bo->node->adev;
1474 
1475 	p = container_of(prange->svms, struct kfd_process, svms);
1476 	for_each_set_bit(gpuidx, bitmap, MAX_GPU_INSTANCE) {
1477 		pr_debug("mapping to gpu idx 0x%x\n", gpuidx);
1478 		pdd = kfd_process_device_from_gpuidx(p, gpuidx);
1479 		if (!pdd) {
1480 			pr_debug("failed to find device idx %d\n", gpuidx);
1481 			return -EINVAL;
1482 		}
1483 
1484 		pdd = kfd_bind_process_to_device(pdd->dev, p);
1485 		if (IS_ERR(pdd))
1486 			return -EINVAL;
1487 
1488 		if (bo_adev && pdd->dev->adev != bo_adev &&
1489 		    !amdgpu_xgmi_same_hive(pdd->dev->adev, bo_adev)) {
1490 			pr_debug("cannot map to device idx %d\n", gpuidx);
1491 			continue;
1492 		}
1493 
1494 		r = svm_range_map_to_gpu(pdd, prange, offset, npages, readonly,
1495 					 prange->dma_addr[gpuidx],
1496 					 bo_adev, wait ? &fence : NULL,
1497 					 flush_tlb);
1498 		if (r)
1499 			break;
1500 
1501 		if (fence) {
1502 			r = dma_fence_wait(fence, false);
1503 			dma_fence_put(fence);
1504 			fence = NULL;
1505 			if (r) {
1506 				pr_debug("failed %d to dma fence wait\n", r);
1507 				break;
1508 			}
1509 		}
1510 
1511 		kfd_flush_tlb(pdd, TLB_FLUSH_LEGACY);
1512 	}
1513 
1514 	return r;
1515 }
1516 
1517 struct svm_validate_context {
1518 	struct kfd_process *process;
1519 	struct svm_range *prange;
1520 	bool intr;
1521 	DECLARE_BITMAP(bitmap, MAX_GPU_INSTANCE);
1522 	struct drm_exec exec;
1523 };
1524 
svm_range_reserve_bos(struct svm_validate_context * ctx,bool intr)1525 static int svm_range_reserve_bos(struct svm_validate_context *ctx, bool intr)
1526 {
1527 	struct kfd_process_device *pdd;
1528 	struct amdgpu_vm *vm;
1529 	uint32_t gpuidx;
1530 	int r;
1531 
1532 	drm_exec_init(&ctx->exec, intr ? DRM_EXEC_INTERRUPTIBLE_WAIT: 0, 0);
1533 	drm_exec_until_all_locked(&ctx->exec) {
1534 		for_each_set_bit(gpuidx, ctx->bitmap, MAX_GPU_INSTANCE) {
1535 			pdd = kfd_process_device_from_gpuidx(ctx->process, gpuidx);
1536 			if (!pdd) {
1537 				pr_debug("failed to find device idx %d\n", gpuidx);
1538 				r = -EINVAL;
1539 				goto unreserve_out;
1540 			}
1541 			vm = drm_priv_to_vm(pdd->drm_priv);
1542 
1543 			r = amdgpu_vm_lock_pd(vm, &ctx->exec, 2);
1544 			drm_exec_retry_on_contention(&ctx->exec);
1545 			if (unlikely(r)) {
1546 				pr_debug("failed %d to reserve bo\n", r);
1547 				goto unreserve_out;
1548 			}
1549 		}
1550 	}
1551 
1552 	for_each_set_bit(gpuidx, ctx->bitmap, MAX_GPU_INSTANCE) {
1553 		pdd = kfd_process_device_from_gpuidx(ctx->process, gpuidx);
1554 		if (!pdd) {
1555 			pr_debug("failed to find device idx %d\n", gpuidx);
1556 			r = -EINVAL;
1557 			goto unreserve_out;
1558 		}
1559 
1560 		r = amdgpu_vm_validate(pdd->dev->adev,
1561 				       drm_priv_to_vm(pdd->drm_priv), NULL,
1562 				       svm_range_bo_validate, NULL);
1563 		if (r) {
1564 			pr_debug("failed %d validate pt bos\n", r);
1565 			goto unreserve_out;
1566 		}
1567 	}
1568 
1569 	return 0;
1570 
1571 unreserve_out:
1572 	drm_exec_fini(&ctx->exec);
1573 	return r;
1574 }
1575 
svm_range_unreserve_bos(struct svm_validate_context * ctx)1576 static void svm_range_unreserve_bos(struct svm_validate_context *ctx)
1577 {
1578 	drm_exec_fini(&ctx->exec);
1579 }
1580 
kfd_svm_page_owner(struct kfd_process * p,int32_t gpuidx)1581 static void *kfd_svm_page_owner(struct kfd_process *p, int32_t gpuidx)
1582 {
1583 	struct kfd_process_device *pdd;
1584 
1585 	pdd = kfd_process_device_from_gpuidx(p, gpuidx);
1586 	if (!pdd)
1587 		return NULL;
1588 
1589 	return SVM_ADEV_PGMAP_OWNER(pdd->dev->adev);
1590 }
1591 
1592 /*
1593  * Validation+GPU mapping with concurrent invalidation (MMU notifiers)
1594  *
1595  * To prevent concurrent destruction or change of range attributes, the
1596  * svm_read_lock must be held. The caller must not hold the svm_write_lock
1597  * because that would block concurrent evictions and lead to deadlocks. To
1598  * serialize concurrent migrations or validations of the same range, the
1599  * prange->migrate_mutex must be held.
1600  *
1601  * For VRAM ranges, the SVM BO must be allocated and valid (protected by its
1602  * eviction fence.
1603  *
1604  * The following sequence ensures race-free validation and GPU mapping:
1605  *
1606  * 1. Reserve page table (and SVM BO if range is in VRAM)
1607  * 2. hmm_range_fault to get page addresses (if system memory)
1608  * 3. DMA-map pages (if system memory)
1609  * 4-a. Take notifier lock
1610  * 4-b. Check that pages still valid (mmu_interval_read_retry)
1611  * 4-c. Check that the range was not split or otherwise invalidated
1612  * 4-d. Update GPU page table
1613  * 4.e. Release notifier lock
1614  * 5. Release page table (and SVM BO) reservation
1615  */
svm_range_validate_and_map(struct mm_struct * mm,unsigned long map_start,unsigned long map_last,struct svm_range * prange,int32_t gpuidx,bool intr,bool wait,bool flush_tlb)1616 static int svm_range_validate_and_map(struct mm_struct *mm,
1617 				      unsigned long map_start, unsigned long map_last,
1618 				      struct svm_range *prange, int32_t gpuidx,
1619 				      bool intr, bool wait, bool flush_tlb)
1620 {
1621 	struct svm_validate_context *ctx;
1622 	unsigned long start, end, addr;
1623 	struct kfd_process *p;
1624 	void *owner;
1625 	int32_t idx;
1626 	int r = 0;
1627 
1628 	ctx = kzalloc(sizeof(struct svm_validate_context), GFP_KERNEL);
1629 	if (!ctx)
1630 		return -ENOMEM;
1631 	ctx->process = container_of(prange->svms, struct kfd_process, svms);
1632 	ctx->prange = prange;
1633 	ctx->intr = intr;
1634 
1635 	if (gpuidx < MAX_GPU_INSTANCE) {
1636 		bitmap_zero(ctx->bitmap, MAX_GPU_INSTANCE);
1637 		bitmap_set(ctx->bitmap, gpuidx, 1);
1638 	} else if (ctx->process->xnack_enabled) {
1639 		bitmap_copy(ctx->bitmap, prange->bitmap_aip, MAX_GPU_INSTANCE);
1640 
1641 		/* If prefetch range to GPU, or GPU retry fault migrate range to
1642 		 * GPU, which has ACCESS attribute to the range, create mapping
1643 		 * on that GPU.
1644 		 */
1645 		if (prange->actual_loc) {
1646 			gpuidx = kfd_process_gpuidx_from_gpuid(ctx->process,
1647 							prange->actual_loc);
1648 			if (gpuidx < 0) {
1649 				WARN_ONCE(1, "failed get device by id 0x%x\n",
1650 					 prange->actual_loc);
1651 				r = -EINVAL;
1652 				goto free_ctx;
1653 			}
1654 			if (test_bit(gpuidx, prange->bitmap_access))
1655 				bitmap_set(ctx->bitmap, gpuidx, 1);
1656 		}
1657 
1658 		/*
1659 		 * If prange is already mapped or with always mapped flag,
1660 		 * update mapping on GPUs with ACCESS attribute
1661 		 */
1662 		if (bitmap_empty(ctx->bitmap, MAX_GPU_INSTANCE)) {
1663 			if (prange->mapped_to_gpu ||
1664 			    prange->flags & KFD_IOCTL_SVM_FLAG_GPU_ALWAYS_MAPPED)
1665 				bitmap_copy(ctx->bitmap, prange->bitmap_access, MAX_GPU_INSTANCE);
1666 		}
1667 	} else {
1668 		bitmap_or(ctx->bitmap, prange->bitmap_access,
1669 			  prange->bitmap_aip, MAX_GPU_INSTANCE);
1670 	}
1671 
1672 	if (bitmap_empty(ctx->bitmap, MAX_GPU_INSTANCE)) {
1673 		r = 0;
1674 		goto free_ctx;
1675 	}
1676 
1677 	if (prange->actual_loc && !prange->ttm_res) {
1678 		/* This should never happen. actual_loc gets set by
1679 		 * svm_migrate_ram_to_vram after allocating a BO.
1680 		 */
1681 		WARN_ONCE(1, "VRAM BO missing during validation\n");
1682 		r = -EINVAL;
1683 		goto free_ctx;
1684 	}
1685 
1686 	r = svm_range_reserve_bos(ctx, intr);
1687 	if (r)
1688 		goto free_ctx;
1689 
1690 	p = container_of(prange->svms, struct kfd_process, svms);
1691 	owner = kfd_svm_page_owner(p, find_first_bit(ctx->bitmap,
1692 						MAX_GPU_INSTANCE));
1693 	for_each_set_bit(idx, ctx->bitmap, MAX_GPU_INSTANCE) {
1694 		if (kfd_svm_page_owner(p, idx) != owner) {
1695 			owner = NULL;
1696 			break;
1697 		}
1698 	}
1699 
1700 	start = map_start << PAGE_SHIFT;
1701 	end = (map_last + 1) << PAGE_SHIFT;
1702 	for (addr = start; !r && addr < end; ) {
1703 		struct hmm_range *hmm_range = NULL;
1704 		unsigned long map_start_vma;
1705 		unsigned long map_last_vma;
1706 		struct vm_area_struct *vma;
1707 		unsigned long next = 0;
1708 		unsigned long offset;
1709 		unsigned long npages;
1710 		bool readonly;
1711 
1712 		vma = vma_lookup(mm, addr);
1713 		if (vma) {
1714 			readonly = !(vma->vm_flags & VM_WRITE);
1715 
1716 			next = min(vma->vm_end, end);
1717 			npages = (next - addr) >> PAGE_SHIFT;
1718 			WRITE_ONCE(p->svms.faulting_task, current);
1719 			r = amdgpu_hmm_range_get_pages(&prange->notifier, addr, npages,
1720 						       readonly, owner, NULL,
1721 						       &hmm_range);
1722 			WRITE_ONCE(p->svms.faulting_task, NULL);
1723 			if (r)
1724 				pr_debug("failed %d to get svm range pages\n", r);
1725 		} else {
1726 			r = -EFAULT;
1727 		}
1728 
1729 		if (!r) {
1730 			offset = (addr >> PAGE_SHIFT) - prange->start;
1731 			r = svm_range_dma_map(prange, ctx->bitmap, offset, npages,
1732 					      hmm_range->hmm_pfns);
1733 			if (r)
1734 				pr_debug("failed %d to dma map range\n", r);
1735 		}
1736 
1737 		svm_range_lock(prange);
1738 
1739 		/* Free backing memory of hmm_range if it was initialized
1740 		 * Overrride return value to TRY AGAIN only if prior returns
1741 		 * were successful
1742 		 */
1743 		if (hmm_range && amdgpu_hmm_range_get_pages_done(hmm_range) && !r) {
1744 			pr_debug("hmm update the range, need validate again\n");
1745 			r = -EAGAIN;
1746 		}
1747 
1748 		if (!r && !list_empty(&prange->child_list)) {
1749 			pr_debug("range split by unmap in parallel, validate again\n");
1750 			r = -EAGAIN;
1751 		}
1752 
1753 		if (!r) {
1754 			map_start_vma = max(map_start, prange->start + offset);
1755 			map_last_vma = min(map_last, prange->start + offset + npages - 1);
1756 			if (map_start_vma <= map_last_vma) {
1757 				offset = map_start_vma - prange->start;
1758 				npages = map_last_vma - map_start_vma + 1;
1759 				r = svm_range_map_to_gpus(prange, offset, npages, readonly,
1760 							  ctx->bitmap, wait, flush_tlb);
1761 			}
1762 		}
1763 
1764 		if (!r && next == end)
1765 			prange->mapped_to_gpu = true;
1766 
1767 		svm_range_unlock(prange);
1768 
1769 		addr = next;
1770 	}
1771 
1772 	svm_range_unreserve_bos(ctx);
1773 	if (!r)
1774 		prange->validate_timestamp = ktime_get_boottime();
1775 
1776 free_ctx:
1777 	kfree(ctx);
1778 
1779 	return r;
1780 }
1781 
1782 /**
1783  * svm_range_list_lock_and_flush_work - flush pending deferred work
1784  *
1785  * @svms: the svm range list
1786  * @mm: the mm structure
1787  *
1788  * Context: Returns with mmap write lock held, pending deferred work flushed
1789  *
1790  */
1791 void
svm_range_list_lock_and_flush_work(struct svm_range_list * svms,struct mm_struct * mm)1792 svm_range_list_lock_and_flush_work(struct svm_range_list *svms,
1793 				   struct mm_struct *mm)
1794 {
1795 retry_flush_work:
1796 	flush_work(&svms->deferred_list_work);
1797 	mmap_write_lock(mm);
1798 
1799 	if (list_empty(&svms->deferred_range_list))
1800 		return;
1801 	mmap_write_unlock(mm);
1802 	pr_debug("retry flush\n");
1803 	goto retry_flush_work;
1804 }
1805 
svm_range_restore_work(struct work_struct * work)1806 static void svm_range_restore_work(struct work_struct *work)
1807 {
1808 	struct delayed_work *dwork = to_delayed_work(work);
1809 	struct amdkfd_process_info *process_info;
1810 	struct svm_range_list *svms;
1811 	struct svm_range *prange;
1812 	struct kfd_process *p;
1813 	struct mm_struct *mm;
1814 	int evicted_ranges;
1815 	int invalid;
1816 	int r;
1817 
1818 	svms = container_of(dwork, struct svm_range_list, restore_work);
1819 	evicted_ranges = atomic_read(&svms->evicted_ranges);
1820 	if (!evicted_ranges)
1821 		return;
1822 
1823 	pr_debug("restore svm ranges\n");
1824 
1825 	p = container_of(svms, struct kfd_process, svms);
1826 	process_info = p->kgd_process_info;
1827 
1828 	/* Keep mm reference when svm_range_validate_and_map ranges */
1829 	mm = get_task_mm(p->lead_thread);
1830 	if (!mm) {
1831 		pr_debug("svms 0x%p process mm gone\n", svms);
1832 		return;
1833 	}
1834 
1835 	mutex_lock(&process_info->lock);
1836 	svm_range_list_lock_and_flush_work(svms, mm);
1837 	mutex_lock(&svms->lock);
1838 
1839 	evicted_ranges = atomic_read(&svms->evicted_ranges);
1840 
1841 	list_for_each_entry(prange, &svms->list, list) {
1842 		invalid = atomic_read(&prange->invalid);
1843 		if (!invalid)
1844 			continue;
1845 
1846 		pr_debug("restoring svms 0x%p prange 0x%p [0x%lx %lx] inv %d\n",
1847 			 prange->svms, prange, prange->start, prange->last,
1848 			 invalid);
1849 
1850 		/*
1851 		 * If range is migrating, wait for migration is done.
1852 		 */
1853 		mutex_lock(&prange->migrate_mutex);
1854 
1855 		r = svm_range_validate_and_map(mm, prange->start, prange->last, prange,
1856 					       MAX_GPU_INSTANCE, false, true, false);
1857 		if (r)
1858 			pr_debug("failed %d to map 0x%lx to gpus\n", r,
1859 				 prange->start);
1860 
1861 		mutex_unlock(&prange->migrate_mutex);
1862 		if (r)
1863 			goto out_reschedule;
1864 
1865 		if (atomic_cmpxchg(&prange->invalid, invalid, 0) != invalid)
1866 			goto out_reschedule;
1867 	}
1868 
1869 	if (atomic_cmpxchg(&svms->evicted_ranges, evicted_ranges, 0) !=
1870 	    evicted_ranges)
1871 		goto out_reschedule;
1872 
1873 	evicted_ranges = 0;
1874 
1875 	r = kgd2kfd_resume_mm(mm);
1876 	if (r) {
1877 		/* No recovery from this failure. Probably the CP is
1878 		 * hanging. No point trying again.
1879 		 */
1880 		pr_debug("failed %d to resume KFD\n", r);
1881 	}
1882 
1883 	pr_debug("restore svm ranges successfully\n");
1884 
1885 out_reschedule:
1886 	mutex_unlock(&svms->lock);
1887 	mmap_write_unlock(mm);
1888 	mutex_unlock(&process_info->lock);
1889 
1890 	/* If validation failed, reschedule another attempt */
1891 	if (evicted_ranges) {
1892 		pr_debug("reschedule to restore svm range\n");
1893 		queue_delayed_work(system_freezable_wq, &svms->restore_work,
1894 			msecs_to_jiffies(AMDGPU_SVM_RANGE_RESTORE_DELAY_MS));
1895 
1896 		kfd_smi_event_queue_restore_rescheduled(mm);
1897 	}
1898 	mmput(mm);
1899 }
1900 
1901 /**
1902  * svm_range_evict - evict svm range
1903  * @prange: svm range structure
1904  * @mm: current process mm_struct
1905  * @start: starting process queue number
1906  * @last: last process queue number
1907  * @event: mmu notifier event when range is evicted or migrated
1908  *
1909  * Stop all queues of the process to ensure GPU doesn't access the memory, then
1910  * return to let CPU evict the buffer and proceed CPU pagetable update.
1911  *
1912  * Don't need use lock to sync cpu pagetable invalidation with GPU execution.
1913  * If invalidation happens while restore work is running, restore work will
1914  * restart to ensure to get the latest CPU pages mapping to GPU, then start
1915  * the queues.
1916  */
1917 static int
svm_range_evict(struct svm_range * prange,struct mm_struct * mm,unsigned long start,unsigned long last,enum mmu_notifier_event event)1918 svm_range_evict(struct svm_range *prange, struct mm_struct *mm,
1919 		unsigned long start, unsigned long last,
1920 		enum mmu_notifier_event event)
1921 {
1922 	struct svm_range_list *svms = prange->svms;
1923 	struct svm_range *pchild;
1924 	struct kfd_process *p;
1925 	int r = 0;
1926 
1927 	p = container_of(svms, struct kfd_process, svms);
1928 
1929 	pr_debug("invalidate svms 0x%p prange [0x%lx 0x%lx] [0x%lx 0x%lx]\n",
1930 		 svms, prange->start, prange->last, start, last);
1931 
1932 	if (!p->xnack_enabled ||
1933 	    (prange->flags & KFD_IOCTL_SVM_FLAG_GPU_ALWAYS_MAPPED)) {
1934 		int evicted_ranges;
1935 		bool mapped = prange->mapped_to_gpu;
1936 
1937 		list_for_each_entry(pchild, &prange->child_list, child_list) {
1938 			if (!pchild->mapped_to_gpu)
1939 				continue;
1940 			mapped = true;
1941 			mutex_lock_nested(&pchild->lock, 1);
1942 			if (pchild->start <= last && pchild->last >= start) {
1943 				pr_debug("increment pchild invalid [0x%lx 0x%lx]\n",
1944 					 pchild->start, pchild->last);
1945 				atomic_inc(&pchild->invalid);
1946 			}
1947 			mutex_unlock(&pchild->lock);
1948 		}
1949 
1950 		if (!mapped)
1951 			return r;
1952 
1953 		if (prange->start <= last && prange->last >= start)
1954 			atomic_inc(&prange->invalid);
1955 
1956 		evicted_ranges = atomic_inc_return(&svms->evicted_ranges);
1957 		if (evicted_ranges != 1)
1958 			return r;
1959 
1960 		pr_debug("evicting svms 0x%p range [0x%lx 0x%lx]\n",
1961 			 prange->svms, prange->start, prange->last);
1962 
1963 		/* First eviction, stop the queues */
1964 		r = kgd2kfd_quiesce_mm(mm, KFD_QUEUE_EVICTION_TRIGGER_SVM);
1965 		if (r)
1966 			pr_debug("failed to quiesce KFD\n");
1967 
1968 		pr_debug("schedule to restore svm %p ranges\n", svms);
1969 		queue_delayed_work(system_freezable_wq, &svms->restore_work,
1970 			msecs_to_jiffies(AMDGPU_SVM_RANGE_RESTORE_DELAY_MS));
1971 	} else {
1972 		unsigned long s, l;
1973 		uint32_t trigger;
1974 
1975 		if (event == MMU_NOTIFY_MIGRATE)
1976 			trigger = KFD_SVM_UNMAP_TRIGGER_MMU_NOTIFY_MIGRATE;
1977 		else
1978 			trigger = KFD_SVM_UNMAP_TRIGGER_MMU_NOTIFY;
1979 
1980 		pr_debug("invalidate unmap svms 0x%p [0x%lx 0x%lx] from GPUs\n",
1981 			 prange->svms, start, last);
1982 		list_for_each_entry(pchild, &prange->child_list, child_list) {
1983 			mutex_lock_nested(&pchild->lock, 1);
1984 			s = max(start, pchild->start);
1985 			l = min(last, pchild->last);
1986 			if (l >= s)
1987 				svm_range_unmap_from_gpus(pchild, s, l, trigger);
1988 			mutex_unlock(&pchild->lock);
1989 		}
1990 		s = max(start, prange->start);
1991 		l = min(last, prange->last);
1992 		if (l >= s)
1993 			svm_range_unmap_from_gpus(prange, s, l, trigger);
1994 	}
1995 
1996 	return r;
1997 }
1998 
svm_range_clone(struct svm_range * old)1999 static struct svm_range *svm_range_clone(struct svm_range *old)
2000 {
2001 	struct svm_range *new;
2002 
2003 	new = svm_range_new(old->svms, old->start, old->last, false);
2004 	if (!new)
2005 		return NULL;
2006 	if (svm_range_copy_dma_addrs(new, old)) {
2007 		svm_range_free(new, false);
2008 		return NULL;
2009 	}
2010 	if (old->svm_bo) {
2011 		new->ttm_res = old->ttm_res;
2012 		new->offset = old->offset;
2013 		new->svm_bo = svm_range_bo_ref(old->svm_bo);
2014 		spin_lock(&new->svm_bo->list_lock);
2015 		list_add(&new->svm_bo_list, &new->svm_bo->range_list);
2016 		spin_unlock(&new->svm_bo->list_lock);
2017 	}
2018 	new->flags = old->flags;
2019 	new->preferred_loc = old->preferred_loc;
2020 	new->prefetch_loc = old->prefetch_loc;
2021 	new->actual_loc = old->actual_loc;
2022 	new->granularity = old->granularity;
2023 	new->mapped_to_gpu = old->mapped_to_gpu;
2024 	new->vram_pages = old->vram_pages;
2025 	bitmap_copy(new->bitmap_access, old->bitmap_access, MAX_GPU_INSTANCE);
2026 	bitmap_copy(new->bitmap_aip, old->bitmap_aip, MAX_GPU_INSTANCE);
2027 	atomic_set(&new->queue_refcount, atomic_read(&old->queue_refcount));
2028 
2029 	return new;
2030 }
2031 
svm_range_set_max_pages(struct amdgpu_device * adev)2032 void svm_range_set_max_pages(struct amdgpu_device *adev)
2033 {
2034 	uint64_t max_pages;
2035 	uint64_t pages, _pages;
2036 	uint64_t min_pages = 0;
2037 	int i, id;
2038 
2039 	for (i = 0; i < adev->kfd.dev->num_nodes; i++) {
2040 		if (adev->kfd.dev->nodes[i]->xcp)
2041 			id = adev->kfd.dev->nodes[i]->xcp->id;
2042 		else
2043 			id = -1;
2044 		pages = KFD_XCP_MEMORY_SIZE(adev, id) >> 17;
2045 		pages = clamp(pages, 1ULL << 9, 1ULL << 18);
2046 		pages = rounddown_pow_of_two(pages);
2047 		min_pages = min_not_zero(min_pages, pages);
2048 	}
2049 
2050 	do {
2051 		max_pages = READ_ONCE(max_svm_range_pages);
2052 		_pages = min_not_zero(max_pages, min_pages);
2053 	} while (cmpxchg(&max_svm_range_pages, max_pages, _pages) != max_pages);
2054 }
2055 
2056 static int
svm_range_split_new(struct svm_range_list * svms,uint64_t start,uint64_t last,uint64_t max_pages,struct list_head * insert_list,struct list_head * update_list)2057 svm_range_split_new(struct svm_range_list *svms, uint64_t start, uint64_t last,
2058 		    uint64_t max_pages, struct list_head *insert_list,
2059 		    struct list_head *update_list)
2060 {
2061 	struct svm_range *prange;
2062 	uint64_t l;
2063 
2064 	pr_debug("max_svm_range_pages 0x%llx adding [0x%llx 0x%llx]\n",
2065 		 max_pages, start, last);
2066 
2067 	while (last >= start) {
2068 		l = min(last, ALIGN_DOWN(start + max_pages, max_pages) - 1);
2069 
2070 		prange = svm_range_new(svms, start, l, true);
2071 		if (!prange)
2072 			return -ENOMEM;
2073 		list_add(&prange->list, insert_list);
2074 		list_add(&prange->update_list, update_list);
2075 
2076 		start = l + 1;
2077 	}
2078 	return 0;
2079 }
2080 
2081 /**
2082  * svm_range_add - add svm range and handle overlap
2083  * @p: the range add to this process svms
2084  * @start: page size aligned
2085  * @size: page size aligned
2086  * @nattr: number of attributes
2087  * @attrs: array of attributes
2088  * @update_list: output, the ranges need validate and update GPU mapping
2089  * @insert_list: output, the ranges need insert to svms
2090  * @remove_list: output, the ranges are replaced and need remove from svms
2091  * @remap_list: output, remap unaligned svm ranges
2092  *
2093  * Check if the virtual address range has overlap with any existing ranges,
2094  * split partly overlapping ranges and add new ranges in the gaps. All changes
2095  * should be applied to the range_list and interval tree transactionally. If
2096  * any range split or allocation fails, the entire update fails. Therefore any
2097  * existing overlapping svm_ranges are cloned and the original svm_ranges left
2098  * unchanged.
2099  *
2100  * If the transaction succeeds, the caller can update and insert clones and
2101  * new ranges, then free the originals.
2102  *
2103  * Otherwise the caller can free the clones and new ranges, while the old
2104  * svm_ranges remain unchanged.
2105  *
2106  * Context: Process context, caller must hold svms->lock
2107  *
2108  * Return:
2109  * 0 - OK, otherwise error code
2110  */
2111 static int
svm_range_add(struct kfd_process * p,uint64_t start,uint64_t size,uint32_t nattr,struct kfd_ioctl_svm_attribute * attrs,struct list_head * update_list,struct list_head * insert_list,struct list_head * remove_list,struct list_head * remap_list)2112 svm_range_add(struct kfd_process *p, uint64_t start, uint64_t size,
2113 	      uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs,
2114 	      struct list_head *update_list, struct list_head *insert_list,
2115 	      struct list_head *remove_list, struct list_head *remap_list)
2116 {
2117 	unsigned long last = start + size - 1UL;
2118 	struct svm_range_list *svms = &p->svms;
2119 	struct interval_tree_node *node;
2120 	struct svm_range *prange;
2121 	struct svm_range *tmp;
2122 	struct list_head new_list;
2123 	int r = 0;
2124 
2125 	pr_debug("svms 0x%p [0x%llx 0x%lx]\n", &p->svms, start, last);
2126 
2127 	INIT_LIST_HEAD(update_list);
2128 	INIT_LIST_HEAD(insert_list);
2129 	INIT_LIST_HEAD(remove_list);
2130 	INIT_LIST_HEAD(&new_list);
2131 	INIT_LIST_HEAD(remap_list);
2132 
2133 	node = interval_tree_iter_first(&svms->objects, start, last);
2134 	while (node) {
2135 		struct interval_tree_node *next;
2136 		unsigned long next_start;
2137 
2138 		pr_debug("found overlap node [0x%lx 0x%lx]\n", node->start,
2139 			 node->last);
2140 
2141 		prange = container_of(node, struct svm_range, it_node);
2142 		next = interval_tree_iter_next(node, start, last);
2143 		next_start = min(node->last, last) + 1;
2144 
2145 		if (svm_range_is_same_attrs(p, prange, nattr, attrs) &&
2146 		    prange->mapped_to_gpu) {
2147 			/* nothing to do */
2148 		} else if (node->start < start || node->last > last) {
2149 			/* node intersects the update range and its attributes
2150 			 * will change. Clone and split it, apply updates only
2151 			 * to the overlapping part
2152 			 */
2153 			struct svm_range *old = prange;
2154 
2155 			prange = svm_range_clone(old);
2156 			if (!prange) {
2157 				r = -ENOMEM;
2158 				goto out;
2159 			}
2160 
2161 			list_add(&old->update_list, remove_list);
2162 			list_add(&prange->list, insert_list);
2163 			list_add(&prange->update_list, update_list);
2164 
2165 			if (node->start < start) {
2166 				pr_debug("change old range start\n");
2167 				r = svm_range_split_head(prange, start,
2168 							 insert_list, remap_list);
2169 				if (r)
2170 					goto out;
2171 			}
2172 			if (node->last > last) {
2173 				pr_debug("change old range last\n");
2174 				r = svm_range_split_tail(prange, last,
2175 							 insert_list, remap_list);
2176 				if (r)
2177 					goto out;
2178 			}
2179 		} else {
2180 			/* The node is contained within start..last,
2181 			 * just update it
2182 			 */
2183 			list_add(&prange->update_list, update_list);
2184 		}
2185 
2186 		/* insert a new node if needed */
2187 		if (node->start > start) {
2188 			r = svm_range_split_new(svms, start, node->start - 1,
2189 						READ_ONCE(max_svm_range_pages),
2190 						&new_list, update_list);
2191 			if (r)
2192 				goto out;
2193 		}
2194 
2195 		node = next;
2196 		start = next_start;
2197 	}
2198 
2199 	/* add a final range at the end if needed */
2200 	if (start <= last)
2201 		r = svm_range_split_new(svms, start, last,
2202 					READ_ONCE(max_svm_range_pages),
2203 					&new_list, update_list);
2204 
2205 out:
2206 	if (r) {
2207 		list_for_each_entry_safe(prange, tmp, insert_list, list)
2208 			svm_range_free(prange, false);
2209 		list_for_each_entry_safe(prange, tmp, &new_list, list)
2210 			svm_range_free(prange, true);
2211 	} else {
2212 		list_splice(&new_list, insert_list);
2213 	}
2214 
2215 	return r;
2216 }
2217 
2218 static void
svm_range_update_notifier_and_interval_tree(struct mm_struct * mm,struct svm_range * prange)2219 svm_range_update_notifier_and_interval_tree(struct mm_struct *mm,
2220 					    struct svm_range *prange)
2221 {
2222 	unsigned long start;
2223 	unsigned long last;
2224 
2225 	start = prange->notifier.interval_tree.start >> PAGE_SHIFT;
2226 	last = prange->notifier.interval_tree.last >> PAGE_SHIFT;
2227 
2228 	if (prange->start == start && prange->last == last)
2229 		return;
2230 
2231 	pr_debug("up notifier 0x%p prange 0x%p [0x%lx 0x%lx] [0x%lx 0x%lx]\n",
2232 		  prange->svms, prange, start, last, prange->start,
2233 		  prange->last);
2234 
2235 	if (start != 0 && last != 0) {
2236 		interval_tree_remove(&prange->it_node, &prange->svms->objects);
2237 		svm_range_remove_notifier(prange);
2238 	}
2239 	prange->it_node.start = prange->start;
2240 	prange->it_node.last = prange->last;
2241 
2242 	interval_tree_insert(&prange->it_node, &prange->svms->objects);
2243 	svm_range_add_notifier_locked(mm, prange);
2244 }
2245 
2246 static void
svm_range_handle_list_op(struct svm_range_list * svms,struct svm_range * prange,struct mm_struct * mm)2247 svm_range_handle_list_op(struct svm_range_list *svms, struct svm_range *prange,
2248 			 struct mm_struct *mm)
2249 {
2250 	switch (prange->work_item.op) {
2251 	case SVM_OP_NULL:
2252 		pr_debug("NULL OP 0x%p prange 0x%p [0x%lx 0x%lx]\n",
2253 			 svms, prange, prange->start, prange->last);
2254 		break;
2255 	case SVM_OP_UNMAP_RANGE:
2256 		pr_debug("remove 0x%p prange 0x%p [0x%lx 0x%lx]\n",
2257 			 svms, prange, prange->start, prange->last);
2258 		svm_range_unlink(prange);
2259 		svm_range_remove_notifier(prange);
2260 		svm_range_free(prange, true);
2261 		break;
2262 	case SVM_OP_UPDATE_RANGE_NOTIFIER:
2263 		pr_debug("update notifier 0x%p prange 0x%p [0x%lx 0x%lx]\n",
2264 			 svms, prange, prange->start, prange->last);
2265 		svm_range_update_notifier_and_interval_tree(mm, prange);
2266 		break;
2267 	case SVM_OP_UPDATE_RANGE_NOTIFIER_AND_MAP:
2268 		pr_debug("update and map 0x%p prange 0x%p [0x%lx 0x%lx]\n",
2269 			 svms, prange, prange->start, prange->last);
2270 		svm_range_update_notifier_and_interval_tree(mm, prange);
2271 		/* TODO: implement deferred validation and mapping */
2272 		break;
2273 	case SVM_OP_ADD_RANGE:
2274 		pr_debug("add 0x%p prange 0x%p [0x%lx 0x%lx]\n", svms, prange,
2275 			 prange->start, prange->last);
2276 		svm_range_add_to_svms(prange);
2277 		svm_range_add_notifier_locked(mm, prange);
2278 		break;
2279 	case SVM_OP_ADD_RANGE_AND_MAP:
2280 		pr_debug("add and map 0x%p prange 0x%p [0x%lx 0x%lx]\n", svms,
2281 			 prange, prange->start, prange->last);
2282 		svm_range_add_to_svms(prange);
2283 		svm_range_add_notifier_locked(mm, prange);
2284 		/* TODO: implement deferred validation and mapping */
2285 		break;
2286 	default:
2287 		WARN_ONCE(1, "Unknown prange 0x%p work op %d\n", prange,
2288 			 prange->work_item.op);
2289 	}
2290 }
2291 
svm_range_drain_retry_fault(struct svm_range_list * svms)2292 static void svm_range_drain_retry_fault(struct svm_range_list *svms)
2293 {
2294 	struct kfd_process_device *pdd;
2295 	struct kfd_process *p;
2296 	uint32_t i;
2297 
2298 	p = container_of(svms, struct kfd_process, svms);
2299 
2300 	for_each_set_bit(i, svms->bitmap_supported, p->n_pdds) {
2301 		pdd = p->pdds[i];
2302 		if (!pdd)
2303 			continue;
2304 
2305 		pr_debug("drain retry fault gpu %d svms %p\n", i, svms);
2306 
2307 		amdgpu_ih_wait_on_checkpoint_process_ts(pdd->dev->adev,
2308 				pdd->dev->adev->irq.retry_cam_enabled ?
2309 				&pdd->dev->adev->irq.ih :
2310 				&pdd->dev->adev->irq.ih1);
2311 
2312 		if (pdd->dev->adev->irq.retry_cam_enabled)
2313 			amdgpu_ih_wait_on_checkpoint_process_ts(pdd->dev->adev,
2314 				&pdd->dev->adev->irq.ih_soft);
2315 
2316 
2317 		pr_debug("drain retry fault gpu %d svms 0x%p done\n", i, svms);
2318 	}
2319 }
2320 
svm_range_deferred_list_work(struct work_struct * work)2321 static void svm_range_deferred_list_work(struct work_struct *work)
2322 {
2323 	struct svm_range_list *svms;
2324 	struct svm_range *prange;
2325 	struct mm_struct *mm;
2326 
2327 	svms = container_of(work, struct svm_range_list, deferred_list_work);
2328 	pr_debug("enter svms 0x%p\n", svms);
2329 
2330 	spin_lock(&svms->deferred_list_lock);
2331 	while (!list_empty(&svms->deferred_range_list)) {
2332 		prange = list_first_entry(&svms->deferred_range_list,
2333 					  struct svm_range, deferred_list);
2334 		spin_unlock(&svms->deferred_list_lock);
2335 
2336 		pr_debug("prange 0x%p [0x%lx 0x%lx] op %d\n", prange,
2337 			 prange->start, prange->last, prange->work_item.op);
2338 
2339 		mm = prange->work_item.mm;
2340 
2341 		mmap_write_lock(mm);
2342 
2343 		/* Remove from deferred_list must be inside mmap write lock, for
2344 		 * two race cases:
2345 		 * 1. unmap_from_cpu may change work_item.op and add the range
2346 		 *    to deferred_list again, cause use after free bug.
2347 		 * 2. svm_range_list_lock_and_flush_work may hold mmap write
2348 		 *    lock and continue because deferred_list is empty, but
2349 		 *    deferred_list work is actually waiting for mmap lock.
2350 		 */
2351 		spin_lock(&svms->deferred_list_lock);
2352 		list_del_init(&prange->deferred_list);
2353 		spin_unlock(&svms->deferred_list_lock);
2354 
2355 		mutex_lock(&svms->lock);
2356 		mutex_lock(&prange->migrate_mutex);
2357 		while (!list_empty(&prange->child_list)) {
2358 			struct svm_range *pchild;
2359 
2360 			pchild = list_first_entry(&prange->child_list,
2361 						struct svm_range, child_list);
2362 			pr_debug("child prange 0x%p op %d\n", pchild,
2363 				 pchild->work_item.op);
2364 			list_del_init(&pchild->child_list);
2365 			svm_range_handle_list_op(svms, pchild, mm);
2366 		}
2367 		mutex_unlock(&prange->migrate_mutex);
2368 
2369 		svm_range_handle_list_op(svms, prange, mm);
2370 		mutex_unlock(&svms->lock);
2371 		mmap_write_unlock(mm);
2372 
2373 		/* Pairs with mmget in svm_range_add_list_work. If dropping the
2374 		 * last mm refcount, schedule release work to avoid circular locking
2375 		 */
2376 		mmput_async(mm);
2377 
2378 		spin_lock(&svms->deferred_list_lock);
2379 	}
2380 	spin_unlock(&svms->deferred_list_lock);
2381 	pr_debug("exit svms 0x%p\n", svms);
2382 }
2383 
2384 void
svm_range_add_list_work(struct svm_range_list * svms,struct svm_range * prange,struct mm_struct * mm,enum svm_work_list_ops op)2385 svm_range_add_list_work(struct svm_range_list *svms, struct svm_range *prange,
2386 			struct mm_struct *mm, enum svm_work_list_ops op)
2387 {
2388 	spin_lock(&svms->deferred_list_lock);
2389 	/* if prange is on the deferred list */
2390 	if (!list_empty(&prange->deferred_list)) {
2391 		pr_debug("update exist prange 0x%p work op %d\n", prange, op);
2392 		WARN_ONCE(prange->work_item.mm != mm, "unmatch mm\n");
2393 		if (op != SVM_OP_NULL &&
2394 		    prange->work_item.op != SVM_OP_UNMAP_RANGE)
2395 			prange->work_item.op = op;
2396 	} else {
2397 		prange->work_item.op = op;
2398 
2399 		/* Pairs with mmput in deferred_list_work */
2400 		mmget(mm);
2401 		prange->work_item.mm = mm;
2402 		list_add_tail(&prange->deferred_list,
2403 			      &prange->svms->deferred_range_list);
2404 		pr_debug("add prange 0x%p [0x%lx 0x%lx] to work list op %d\n",
2405 			 prange, prange->start, prange->last, op);
2406 	}
2407 	spin_unlock(&svms->deferred_list_lock);
2408 }
2409 
schedule_deferred_list_work(struct svm_range_list * svms)2410 void schedule_deferred_list_work(struct svm_range_list *svms)
2411 {
2412 	spin_lock(&svms->deferred_list_lock);
2413 	if (!list_empty(&svms->deferred_range_list))
2414 		schedule_work(&svms->deferred_list_work);
2415 	spin_unlock(&svms->deferred_list_lock);
2416 }
2417 
2418 static void
svm_range_unmap_split(struct mm_struct * mm,struct svm_range * parent,struct svm_range * prange,unsigned long start,unsigned long last)2419 svm_range_unmap_split(struct mm_struct *mm, struct svm_range *parent,
2420 		      struct svm_range *prange, unsigned long start,
2421 		      unsigned long last)
2422 {
2423 	struct svm_range *head;
2424 	struct svm_range *tail;
2425 
2426 	if (prange->work_item.op == SVM_OP_UNMAP_RANGE) {
2427 		pr_debug("prange 0x%p [0x%lx 0x%lx] is already freed\n", prange,
2428 			 prange->start, prange->last);
2429 		return;
2430 	}
2431 	if (start > prange->last || last < prange->start)
2432 		return;
2433 
2434 	head = tail = prange;
2435 	if (start > prange->start)
2436 		svm_range_split(prange, prange->start, start - 1, &tail);
2437 	if (last < tail->last)
2438 		svm_range_split(tail, last + 1, tail->last, &head);
2439 
2440 	if (head != prange && tail != prange) {
2441 		svm_range_add_child(parent, mm, head, SVM_OP_UNMAP_RANGE);
2442 		svm_range_add_child(parent, mm, tail, SVM_OP_ADD_RANGE);
2443 	} else if (tail != prange) {
2444 		svm_range_add_child(parent, mm, tail, SVM_OP_UNMAP_RANGE);
2445 	} else if (head != prange) {
2446 		svm_range_add_child(parent, mm, head, SVM_OP_UNMAP_RANGE);
2447 	} else if (parent != prange) {
2448 		prange->work_item.op = SVM_OP_UNMAP_RANGE;
2449 	}
2450 }
2451 
2452 static void
svm_range_unmap_from_cpu(struct mm_struct * mm,struct svm_range * prange,unsigned long start,unsigned long last)2453 svm_range_unmap_from_cpu(struct mm_struct *mm, struct svm_range *prange,
2454 			 unsigned long start, unsigned long last)
2455 {
2456 	uint32_t trigger = KFD_SVM_UNMAP_TRIGGER_UNMAP_FROM_CPU;
2457 	struct svm_range_list *svms;
2458 	struct svm_range *pchild;
2459 	struct kfd_process *p;
2460 	unsigned long s, l;
2461 	bool unmap_parent;
2462 	uint32_t i;
2463 
2464 	if (atomic_read(&prange->queue_refcount)) {
2465 		int r;
2466 
2467 		pr_warn("Freeing queue vital buffer 0x%lx, queue evicted\n",
2468 			prange->start << PAGE_SHIFT);
2469 		r = kgd2kfd_quiesce_mm(mm, KFD_QUEUE_EVICTION_TRIGGER_SVM);
2470 		if (r)
2471 			pr_debug("failed %d to quiesce KFD queues\n", r);
2472 	}
2473 
2474 	p = kfd_lookup_process_by_mm(mm);
2475 	if (!p)
2476 		return;
2477 	svms = &p->svms;
2478 
2479 	pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx] [0x%lx 0x%lx]\n", svms,
2480 		 prange, prange->start, prange->last, start, last);
2481 
2482 	/* calculate time stamps that are used to decide which page faults need be
2483 	 * dropped or handled before unmap pages from gpu vm
2484 	 */
2485 	for_each_set_bit(i, svms->bitmap_supported, p->n_pdds) {
2486 		struct kfd_process_device *pdd;
2487 		struct amdgpu_device *adev;
2488 		struct amdgpu_ih_ring *ih;
2489 		uint32_t checkpoint_wptr;
2490 
2491 		pdd = p->pdds[i];
2492 		if (!pdd)
2493 			continue;
2494 
2495 		adev = pdd->dev->adev;
2496 
2497 		/* Check and drain ih1 ring if cam not available */
2498 		if (adev->irq.ih1.ring_size) {
2499 			ih = &adev->irq.ih1;
2500 			checkpoint_wptr = amdgpu_ih_get_wptr(adev, ih);
2501 			if (ih->rptr != checkpoint_wptr) {
2502 				svms->checkpoint_ts[i] =
2503 					amdgpu_ih_decode_iv_ts(adev, ih, checkpoint_wptr, -1);
2504 				continue;
2505 			}
2506 		}
2507 
2508 		/* check if dev->irq.ih_soft is not empty */
2509 		ih = &adev->irq.ih_soft;
2510 		checkpoint_wptr = amdgpu_ih_get_wptr(adev, ih);
2511 		if (ih->rptr != checkpoint_wptr)
2512 			svms->checkpoint_ts[i] = amdgpu_ih_decode_iv_ts(adev, ih, checkpoint_wptr, -1);
2513 	}
2514 
2515 	unmap_parent = start <= prange->start && last >= prange->last;
2516 
2517 	list_for_each_entry(pchild, &prange->child_list, child_list) {
2518 		mutex_lock_nested(&pchild->lock, 1);
2519 		s = max(start, pchild->start);
2520 		l = min(last, pchild->last);
2521 		if (l >= s)
2522 			svm_range_unmap_from_gpus(pchild, s, l, trigger);
2523 		svm_range_unmap_split(mm, prange, pchild, start, last);
2524 		mutex_unlock(&pchild->lock);
2525 	}
2526 	s = max(start, prange->start);
2527 	l = min(last, prange->last);
2528 	if (l >= s)
2529 		svm_range_unmap_from_gpus(prange, s, l, trigger);
2530 	svm_range_unmap_split(mm, prange, prange, start, last);
2531 
2532 	if (unmap_parent)
2533 		svm_range_add_list_work(svms, prange, mm, SVM_OP_UNMAP_RANGE);
2534 	else
2535 		svm_range_add_list_work(svms, prange, mm,
2536 					SVM_OP_UPDATE_RANGE_NOTIFIER);
2537 	schedule_deferred_list_work(svms);
2538 
2539 	kfd_unref_process(p);
2540 }
2541 
2542 /**
2543  * svm_range_cpu_invalidate_pagetables - interval notifier callback
2544  * @mni: mmu_interval_notifier struct
2545  * @range: mmu_notifier_range struct
2546  * @cur_seq: value to pass to mmu_interval_set_seq()
2547  *
2548  * If event is MMU_NOTIFY_UNMAP, this is from CPU unmap range, otherwise, it
2549  * is from migration, or CPU page invalidation callback.
2550  *
2551  * For unmap event, unmap range from GPUs, remove prange from svms in a delayed
2552  * work thread, and split prange if only part of prange is unmapped.
2553  *
2554  * For invalidation event, if GPU retry fault is not enabled, evict the queues,
2555  * then schedule svm_range_restore_work to update GPU mapping and resume queues.
2556  * If GPU retry fault is enabled, unmap the svm range from GPU, retry fault will
2557  * update GPU mapping to recover.
2558  *
2559  * Context: mmap lock, notifier_invalidate_start lock are held
2560  *          for invalidate event, prange lock is held if this is from migration
2561  */
2562 static bool
svm_range_cpu_invalidate_pagetables(struct mmu_interval_notifier * mni,const struct mmu_notifier_range * range,unsigned long cur_seq)2563 svm_range_cpu_invalidate_pagetables(struct mmu_interval_notifier *mni,
2564 				    const struct mmu_notifier_range *range,
2565 				    unsigned long cur_seq)
2566 {
2567 	struct svm_range *prange;
2568 	unsigned long start;
2569 	unsigned long last;
2570 
2571 	if (range->event == MMU_NOTIFY_RELEASE)
2572 		return true;
2573 	if (!mmget_not_zero(mni->mm))
2574 		return true;
2575 
2576 	start = mni->interval_tree.start;
2577 	last = mni->interval_tree.last;
2578 	start = max(start, range->start) >> PAGE_SHIFT;
2579 	last = min(last, range->end - 1) >> PAGE_SHIFT;
2580 	pr_debug("[0x%lx 0x%lx] range[0x%lx 0x%lx] notifier[0x%lx 0x%lx] %d\n",
2581 		 start, last, range->start >> PAGE_SHIFT,
2582 		 (range->end - 1) >> PAGE_SHIFT,
2583 		 mni->interval_tree.start >> PAGE_SHIFT,
2584 		 mni->interval_tree.last >> PAGE_SHIFT, range->event);
2585 
2586 	prange = container_of(mni, struct svm_range, notifier);
2587 
2588 	svm_range_lock(prange);
2589 	mmu_interval_set_seq(mni, cur_seq);
2590 
2591 	switch (range->event) {
2592 	case MMU_NOTIFY_UNMAP:
2593 		svm_range_unmap_from_cpu(mni->mm, prange, start, last);
2594 		break;
2595 	default:
2596 		svm_range_evict(prange, mni->mm, start, last, range->event);
2597 		break;
2598 	}
2599 
2600 	svm_range_unlock(prange);
2601 	mmput(mni->mm);
2602 
2603 	return true;
2604 }
2605 
2606 /**
2607  * svm_range_from_addr - find svm range from fault address
2608  * @svms: svm range list header
2609  * @addr: address to search range interval tree, in pages
2610  * @parent: parent range if range is on child list
2611  *
2612  * Context: The caller must hold svms->lock
2613  *
2614  * Return: the svm_range found or NULL
2615  */
2616 struct svm_range *
svm_range_from_addr(struct svm_range_list * svms,unsigned long addr,struct svm_range ** parent)2617 svm_range_from_addr(struct svm_range_list *svms, unsigned long addr,
2618 		    struct svm_range **parent)
2619 {
2620 	struct interval_tree_node *node;
2621 	struct svm_range *prange;
2622 	struct svm_range *pchild;
2623 
2624 	node = interval_tree_iter_first(&svms->objects, addr, addr);
2625 	if (!node)
2626 		return NULL;
2627 
2628 	prange = container_of(node, struct svm_range, it_node);
2629 	pr_debug("address 0x%lx prange [0x%lx 0x%lx] node [0x%lx 0x%lx]\n",
2630 		 addr, prange->start, prange->last, node->start, node->last);
2631 
2632 	if (addr >= prange->start && addr <= prange->last) {
2633 		if (parent)
2634 			*parent = prange;
2635 		return prange;
2636 	}
2637 	list_for_each_entry(pchild, &prange->child_list, child_list)
2638 		if (addr >= pchild->start && addr <= pchild->last) {
2639 			pr_debug("found address 0x%lx pchild [0x%lx 0x%lx]\n",
2640 				 addr, pchild->start, pchild->last);
2641 			if (parent)
2642 				*parent = prange;
2643 			return pchild;
2644 		}
2645 
2646 	return NULL;
2647 }
2648 
2649 /* svm_range_best_restore_location - decide the best fault restore location
2650  * @prange: svm range structure
2651  * @adev: the GPU on which vm fault happened
2652  *
2653  * This is only called when xnack is on, to decide the best location to restore
2654  * the range mapping after GPU vm fault. Caller uses the best location to do
2655  * migration if actual loc is not best location, then update GPU page table
2656  * mapping to the best location.
2657  *
2658  * If the preferred loc is accessible by faulting GPU, use preferred loc.
2659  * If vm fault gpu idx is on range ACCESSIBLE bitmap, best_loc is vm fault gpu
2660  * If vm fault gpu idx is on range ACCESSIBLE_IN_PLACE bitmap, then
2661  *    if range actual loc is cpu, best_loc is cpu
2662  *    if vm fault gpu is on xgmi same hive of range actual loc gpu, best_loc is
2663  *    range actual loc.
2664  * Otherwise, GPU no access, best_loc is -1.
2665  *
2666  * Return:
2667  * -1 means vm fault GPU no access
2668  * 0 for CPU or GPU id
2669  */
2670 static int32_t
svm_range_best_restore_location(struct svm_range * prange,struct kfd_node * node,int32_t * gpuidx)2671 svm_range_best_restore_location(struct svm_range *prange,
2672 				struct kfd_node *node,
2673 				int32_t *gpuidx)
2674 {
2675 	struct kfd_node *bo_node, *preferred_node;
2676 	struct kfd_process *p;
2677 	uint32_t gpuid;
2678 	int r;
2679 
2680 	p = container_of(prange->svms, struct kfd_process, svms);
2681 
2682 	r = kfd_process_gpuid_from_node(p, node, &gpuid, gpuidx);
2683 	if (r < 0) {
2684 		pr_debug("failed to get gpuid from kgd\n");
2685 		return -1;
2686 	}
2687 
2688 	if (node->adev->flags & AMD_IS_APU)
2689 		return 0;
2690 
2691 	if (prange->preferred_loc == gpuid ||
2692 	    prange->preferred_loc == KFD_IOCTL_SVM_LOCATION_SYSMEM) {
2693 		return prange->preferred_loc;
2694 	} else if (prange->preferred_loc != KFD_IOCTL_SVM_LOCATION_UNDEFINED) {
2695 		preferred_node = svm_range_get_node_by_id(prange, prange->preferred_loc);
2696 		if (preferred_node && svm_nodes_in_same_hive(node, preferred_node))
2697 			return prange->preferred_loc;
2698 		/* fall through */
2699 	}
2700 
2701 	if (test_bit(*gpuidx, prange->bitmap_access))
2702 		return gpuid;
2703 
2704 	if (test_bit(*gpuidx, prange->bitmap_aip)) {
2705 		if (!prange->actual_loc)
2706 			return 0;
2707 
2708 		bo_node = svm_range_get_node_by_id(prange, prange->actual_loc);
2709 		if (bo_node && svm_nodes_in_same_hive(node, bo_node))
2710 			return prange->actual_loc;
2711 		else
2712 			return 0;
2713 	}
2714 
2715 	return -1;
2716 }
2717 
2718 static int
svm_range_get_range_boundaries(struct kfd_process * p,int64_t addr,unsigned long * start,unsigned long * last,bool * is_heap_stack)2719 svm_range_get_range_boundaries(struct kfd_process *p, int64_t addr,
2720 			       unsigned long *start, unsigned long *last,
2721 			       bool *is_heap_stack)
2722 {
2723 	struct vm_area_struct *vma;
2724 	struct interval_tree_node *node;
2725 	struct rb_node *rb_node;
2726 	unsigned long start_limit, end_limit;
2727 
2728 	vma = vma_lookup(p->mm, addr << PAGE_SHIFT);
2729 	if (!vma) {
2730 		pr_debug("VMA does not exist in address [0x%llx]\n", addr);
2731 		return -EFAULT;
2732 	}
2733 
2734 	*is_heap_stack = vma_is_initial_heap(vma) || vma_is_initial_stack(vma);
2735 
2736 	start_limit = max(vma->vm_start >> PAGE_SHIFT,
2737 		      (unsigned long)ALIGN_DOWN(addr, 1UL << p->svms.default_granularity));
2738 	end_limit = min(vma->vm_end >> PAGE_SHIFT,
2739 		    (unsigned long)ALIGN(addr + 1, 1UL << p->svms.default_granularity));
2740 
2741 	/* First range that starts after the fault address */
2742 	node = interval_tree_iter_first(&p->svms.objects, addr + 1, ULONG_MAX);
2743 	if (node) {
2744 		end_limit = min(end_limit, node->start);
2745 		/* Last range that ends before the fault address */
2746 		rb_node = rb_prev(&node->rb);
2747 	} else {
2748 		/* Last range must end before addr because
2749 		 * there was no range after addr
2750 		 */
2751 		rb_node = rb_last(&p->svms.objects.rb_root);
2752 	}
2753 	if (rb_node) {
2754 		node = container_of(rb_node, struct interval_tree_node, rb);
2755 		if (node->last >= addr) {
2756 			WARN(1, "Overlap with prev node and page fault addr\n");
2757 			return -EFAULT;
2758 		}
2759 		start_limit = max(start_limit, node->last + 1);
2760 	}
2761 
2762 	*start = start_limit;
2763 	*last = end_limit - 1;
2764 
2765 	pr_debug("vma [0x%lx 0x%lx] range [0x%lx 0x%lx] is_heap_stack %d\n",
2766 		 vma->vm_start >> PAGE_SHIFT, vma->vm_end >> PAGE_SHIFT,
2767 		 *start, *last, *is_heap_stack);
2768 
2769 	return 0;
2770 }
2771 
2772 static int
svm_range_check_vm_userptr(struct kfd_process * p,uint64_t start,uint64_t last,uint64_t * bo_s,uint64_t * bo_l)2773 svm_range_check_vm_userptr(struct kfd_process *p, uint64_t start, uint64_t last,
2774 			   uint64_t *bo_s, uint64_t *bo_l)
2775 {
2776 	struct amdgpu_bo_va_mapping *mapping;
2777 	struct interval_tree_node *node;
2778 	struct amdgpu_bo *bo = NULL;
2779 	unsigned long userptr;
2780 	uint32_t i;
2781 	int r;
2782 
2783 	for (i = 0; i < p->n_pdds; i++) {
2784 		struct amdgpu_vm *vm;
2785 
2786 		if (!p->pdds[i]->drm_priv)
2787 			continue;
2788 
2789 		vm = drm_priv_to_vm(p->pdds[i]->drm_priv);
2790 		r = amdgpu_bo_reserve(vm->root.bo, false);
2791 		if (r)
2792 			return r;
2793 
2794 		/* Check userptr by searching entire vm->va interval tree */
2795 		node = interval_tree_iter_first(&vm->va, 0, ~0ULL);
2796 		while (node) {
2797 			mapping = container_of((struct rb_node *)node,
2798 					       struct amdgpu_bo_va_mapping, rb);
2799 			bo = mapping->bo_va->base.bo;
2800 
2801 			if (!amdgpu_ttm_tt_affect_userptr(bo->tbo.ttm,
2802 							 start << PAGE_SHIFT,
2803 							 last << PAGE_SHIFT,
2804 							 &userptr)) {
2805 				node = interval_tree_iter_next(node, 0, ~0ULL);
2806 				continue;
2807 			}
2808 
2809 			pr_debug("[0x%llx 0x%llx] already userptr mapped\n",
2810 				 start, last);
2811 			if (bo_s && bo_l) {
2812 				*bo_s = userptr >> PAGE_SHIFT;
2813 				*bo_l = *bo_s + bo->tbo.ttm->num_pages - 1;
2814 			}
2815 			amdgpu_bo_unreserve(vm->root.bo);
2816 			return -EADDRINUSE;
2817 		}
2818 		amdgpu_bo_unreserve(vm->root.bo);
2819 	}
2820 	return 0;
2821 }
2822 
2823 static struct
svm_range_create_unregistered_range(struct kfd_node * node,struct kfd_process * p,struct mm_struct * mm,int64_t addr)2824 svm_range *svm_range_create_unregistered_range(struct kfd_node *node,
2825 						struct kfd_process *p,
2826 						struct mm_struct *mm,
2827 						int64_t addr)
2828 {
2829 	struct svm_range *prange = NULL;
2830 	unsigned long start, last;
2831 	uint32_t gpuid, gpuidx;
2832 	bool is_heap_stack;
2833 	uint64_t bo_s = 0;
2834 	uint64_t bo_l = 0;
2835 	int r;
2836 
2837 	if (svm_range_get_range_boundaries(p, addr, &start, &last,
2838 					   &is_heap_stack))
2839 		return NULL;
2840 
2841 	r = svm_range_check_vm(p, start, last, &bo_s, &bo_l);
2842 	if (r != -EADDRINUSE)
2843 		r = svm_range_check_vm_userptr(p, start, last, &bo_s, &bo_l);
2844 
2845 	if (r == -EADDRINUSE) {
2846 		if (addr >= bo_s && addr <= bo_l)
2847 			return NULL;
2848 
2849 		/* Create one page svm range if 2MB range overlapping */
2850 		start = addr;
2851 		last = addr;
2852 	}
2853 
2854 	prange = svm_range_new(&p->svms, start, last, true);
2855 	if (!prange) {
2856 		pr_debug("Failed to create prange in address [0x%llx]\n", addr);
2857 		return NULL;
2858 	}
2859 	if (kfd_process_gpuid_from_node(p, node, &gpuid, &gpuidx)) {
2860 		pr_debug("failed to get gpuid from kgd\n");
2861 		svm_range_free(prange, true);
2862 		return NULL;
2863 	}
2864 
2865 	if (is_heap_stack)
2866 		prange->preferred_loc = KFD_IOCTL_SVM_LOCATION_SYSMEM;
2867 
2868 	svm_range_add_to_svms(prange);
2869 	svm_range_add_notifier_locked(mm, prange);
2870 
2871 	return prange;
2872 }
2873 
2874 /* svm_range_skip_recover - decide if prange can be recovered
2875  * @prange: svm range structure
2876  *
2877  * GPU vm retry fault handle skip recover the range for cases:
2878  * 1. prange is on deferred list to be removed after unmap, it is stale fault,
2879  *    deferred list work will drain the stale fault before free the prange.
2880  * 2. prange is on deferred list to add interval notifier after split, or
2881  * 3. prange is child range, it is split from parent prange, recover later
2882  *    after interval notifier is added.
2883  *
2884  * Return: true to skip recover, false to recover
2885  */
svm_range_skip_recover(struct svm_range * prange)2886 static bool svm_range_skip_recover(struct svm_range *prange)
2887 {
2888 	struct svm_range_list *svms = prange->svms;
2889 
2890 	spin_lock(&svms->deferred_list_lock);
2891 	if (list_empty(&prange->deferred_list) &&
2892 	    list_empty(&prange->child_list)) {
2893 		spin_unlock(&svms->deferred_list_lock);
2894 		return false;
2895 	}
2896 	spin_unlock(&svms->deferred_list_lock);
2897 
2898 	if (prange->work_item.op == SVM_OP_UNMAP_RANGE) {
2899 		pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx] unmapped\n",
2900 			 svms, prange, prange->start, prange->last);
2901 		return true;
2902 	}
2903 	if (prange->work_item.op == SVM_OP_ADD_RANGE_AND_MAP ||
2904 	    prange->work_item.op == SVM_OP_ADD_RANGE) {
2905 		pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx] not added yet\n",
2906 			 svms, prange, prange->start, prange->last);
2907 		return true;
2908 	}
2909 	return false;
2910 }
2911 
2912 static void
svm_range_count_fault(struct kfd_node * node,struct kfd_process * p,int32_t gpuidx)2913 svm_range_count_fault(struct kfd_node *node, struct kfd_process *p,
2914 		      int32_t gpuidx)
2915 {
2916 	struct kfd_process_device *pdd;
2917 
2918 	/* fault is on different page of same range
2919 	 * or fault is skipped to recover later
2920 	 * or fault is on invalid virtual address
2921 	 */
2922 	if (gpuidx == MAX_GPU_INSTANCE) {
2923 		uint32_t gpuid;
2924 		int r;
2925 
2926 		r = kfd_process_gpuid_from_node(p, node, &gpuid, &gpuidx);
2927 		if (r < 0)
2928 			return;
2929 	}
2930 
2931 	/* fault is recovered
2932 	 * or fault cannot recover because GPU no access on the range
2933 	 */
2934 	pdd = kfd_process_device_from_gpuidx(p, gpuidx);
2935 	if (pdd)
2936 		WRITE_ONCE(pdd->faults, pdd->faults + 1);
2937 }
2938 
2939 static bool
svm_fault_allowed(struct vm_area_struct * vma,bool write_fault)2940 svm_fault_allowed(struct vm_area_struct *vma, bool write_fault)
2941 {
2942 	unsigned long requested = VM_READ;
2943 
2944 	if (write_fault)
2945 		requested |= VM_WRITE;
2946 
2947 	pr_debug("requested 0x%lx, vma permission flags 0x%lx\n", requested,
2948 		vma->vm_flags);
2949 	return (vma->vm_flags & requested) == requested;
2950 }
2951 
2952 int
svm_range_restore_pages(struct amdgpu_device * adev,unsigned int pasid,uint32_t vmid,uint32_t node_id,uint64_t addr,uint64_t ts,bool write_fault)2953 svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid,
2954 			uint32_t vmid, uint32_t node_id,
2955 			uint64_t addr, uint64_t ts, bool write_fault)
2956 {
2957 	unsigned long start, last, size;
2958 	struct mm_struct *mm = NULL;
2959 	struct svm_range_list *svms;
2960 	struct svm_range *prange;
2961 	struct kfd_process *p;
2962 	ktime_t timestamp = ktime_get_boottime();
2963 	struct kfd_node *node;
2964 	int32_t best_loc;
2965 	int32_t gpuid, gpuidx = MAX_GPU_INSTANCE;
2966 	bool write_locked = false;
2967 	struct vm_area_struct *vma;
2968 	bool migration = false;
2969 	int r = 0;
2970 
2971 	if (!KFD_IS_SVM_API_SUPPORTED(adev)) {
2972 		pr_debug("device does not support SVM\n");
2973 		return -EFAULT;
2974 	}
2975 
2976 	p = kfd_lookup_process_by_pasid(pasid);
2977 	if (!p) {
2978 		pr_debug("kfd process not founded pasid 0x%x\n", pasid);
2979 		return 0;
2980 	}
2981 	svms = &p->svms;
2982 
2983 	pr_debug("restoring svms 0x%p fault address 0x%llx\n", svms, addr);
2984 
2985 	if (atomic_read(&svms->drain_pagefaults)) {
2986 		pr_debug("page fault handling disabled, drop fault 0x%llx\n", addr);
2987 		r = 0;
2988 		goto out;
2989 	}
2990 
2991 	node = kfd_node_by_irq_ids(adev, node_id, vmid);
2992 	if (!node) {
2993 		pr_debug("kfd node does not exist node_id: %d, vmid: %d\n", node_id,
2994 			 vmid);
2995 		r = -EFAULT;
2996 		goto out;
2997 	}
2998 
2999 	if (kfd_process_gpuid_from_node(p, node, &gpuid, &gpuidx)) {
3000 		pr_debug("failed to get gpuid/gpuidex for node_id: %d\n", node_id);
3001 		r = -EFAULT;
3002 		goto out;
3003 	}
3004 
3005 	/* check if this page fault time stamp is before svms->checkpoint_ts */
3006 	if (svms->checkpoint_ts[gpuidx] != 0) {
3007 		if (amdgpu_ih_ts_after(ts,  svms->checkpoint_ts[gpuidx])) {
3008 			pr_debug("draining retry fault, drop fault 0x%llx\n", addr);
3009 			r = 0;
3010 			goto out;
3011 		} else
3012 			/* ts is after svms->checkpoint_ts now, reset svms->checkpoint_ts
3013 			 * to zero to avoid following ts wrap around give wrong comparing
3014 			 */
3015 			svms->checkpoint_ts[gpuidx] = 0;
3016 	}
3017 
3018 	if (!p->xnack_enabled) {
3019 		pr_debug("XNACK not enabled for pasid 0x%x\n", pasid);
3020 		r = -EFAULT;
3021 		goto out;
3022 	}
3023 
3024 	/* p->lead_thread is available as kfd_process_wq_release flush the work
3025 	 * before releasing task ref.
3026 	 */
3027 	mm = get_task_mm(p->lead_thread);
3028 	if (!mm) {
3029 		pr_debug("svms 0x%p failed to get mm\n", svms);
3030 		r = 0;
3031 		goto out;
3032 	}
3033 
3034 	mmap_read_lock(mm);
3035 retry_write_locked:
3036 	mutex_lock(&svms->lock);
3037 	prange = svm_range_from_addr(svms, addr, NULL);
3038 	if (!prange) {
3039 		pr_debug("failed to find prange svms 0x%p address [0x%llx]\n",
3040 			 svms, addr);
3041 		if (!write_locked) {
3042 			/* Need the write lock to create new range with MMU notifier.
3043 			 * Also flush pending deferred work to make sure the interval
3044 			 * tree is up to date before we add a new range
3045 			 */
3046 			mutex_unlock(&svms->lock);
3047 			mmap_read_unlock(mm);
3048 			mmap_write_lock(mm);
3049 			write_locked = true;
3050 			goto retry_write_locked;
3051 		}
3052 		prange = svm_range_create_unregistered_range(node, p, mm, addr);
3053 		if (!prange) {
3054 			pr_debug("failed to create unregistered range svms 0x%p address [0x%llx]\n",
3055 				 svms, addr);
3056 			mmap_write_downgrade(mm);
3057 			r = -EFAULT;
3058 			goto out_unlock_svms;
3059 		}
3060 	}
3061 	if (write_locked)
3062 		mmap_write_downgrade(mm);
3063 
3064 	mutex_lock(&prange->migrate_mutex);
3065 
3066 	if (svm_range_skip_recover(prange)) {
3067 		amdgpu_gmc_filter_faults_remove(node->adev, addr, pasid);
3068 		r = 0;
3069 		goto out_unlock_range;
3070 	}
3071 
3072 	/* skip duplicate vm fault on different pages of same range */
3073 	if (ktime_before(timestamp, ktime_add_ns(prange->validate_timestamp,
3074 				AMDGPU_SVM_RANGE_RETRY_FAULT_PENDING))) {
3075 		pr_debug("svms 0x%p [0x%lx %lx] already restored\n",
3076 			 svms, prange->start, prange->last);
3077 		r = 0;
3078 		goto out_unlock_range;
3079 	}
3080 
3081 	/* __do_munmap removed VMA, return success as we are handling stale
3082 	 * retry fault.
3083 	 */
3084 	vma = vma_lookup(mm, addr << PAGE_SHIFT);
3085 	if (!vma) {
3086 		pr_debug("address 0x%llx VMA is removed\n", addr);
3087 		r = 0;
3088 		goto out_unlock_range;
3089 	}
3090 
3091 	if (!svm_fault_allowed(vma, write_fault)) {
3092 		pr_debug("fault addr 0x%llx no %s permission\n", addr,
3093 			write_fault ? "write" : "read");
3094 		r = -EPERM;
3095 		goto out_unlock_range;
3096 	}
3097 
3098 	best_loc = svm_range_best_restore_location(prange, node, &gpuidx);
3099 	if (best_loc == -1) {
3100 		pr_debug("svms %p failed get best restore loc [0x%lx 0x%lx]\n",
3101 			 svms, prange->start, prange->last);
3102 		r = -EACCES;
3103 		goto out_unlock_range;
3104 	}
3105 
3106 	pr_debug("svms %p [0x%lx 0x%lx] best restore 0x%x, actual loc 0x%x\n",
3107 		 svms, prange->start, prange->last, best_loc,
3108 		 prange->actual_loc);
3109 
3110 	kfd_smi_event_page_fault_start(node, p->lead_thread->pid, addr,
3111 				       write_fault, timestamp);
3112 
3113 	/* Align migration range start and size to granularity size */
3114 	size = 1UL << prange->granularity;
3115 	start = max_t(unsigned long, ALIGN_DOWN(addr, size), prange->start);
3116 	last = min_t(unsigned long, ALIGN(addr + 1, size) - 1, prange->last);
3117 	if (prange->actual_loc != 0 || best_loc != 0) {
3118 		if (best_loc) {
3119 			r = svm_migrate_to_vram(prange, best_loc, start, last,
3120 					mm, KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU);
3121 			if (r) {
3122 				pr_debug("svm_migrate_to_vram failed (%d) at %llx, falling back to system memory\n",
3123 					 r, addr);
3124 				/* Fallback to system memory if migration to
3125 				 * VRAM failed
3126 				 */
3127 				if (prange->actual_loc && prange->actual_loc != best_loc)
3128 					r = svm_migrate_vram_to_ram(prange, mm, start, last,
3129 						KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU, NULL);
3130 				else
3131 					r = 0;
3132 			}
3133 		} else {
3134 			r = svm_migrate_vram_to_ram(prange, mm, start, last,
3135 					KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU, NULL);
3136 		}
3137 		if (r) {
3138 			pr_debug("failed %d to migrate svms %p [0x%lx 0x%lx]\n",
3139 				 r, svms, start, last);
3140 			goto out_migrate_fail;
3141 		} else {
3142 			migration = true;
3143 		}
3144 	}
3145 
3146 	r = svm_range_validate_and_map(mm, start, last, prange, gpuidx, false,
3147 				       false, false);
3148 	if (r)
3149 		pr_debug("failed %d to map svms 0x%p [0x%lx 0x%lx] to gpus\n",
3150 			 r, svms, start, last);
3151 
3152 out_migrate_fail:
3153 	kfd_smi_event_page_fault_end(node, p->lead_thread->pid, addr,
3154 				     migration);
3155 
3156 out_unlock_range:
3157 	mutex_unlock(&prange->migrate_mutex);
3158 out_unlock_svms:
3159 	mutex_unlock(&svms->lock);
3160 	mmap_read_unlock(mm);
3161 
3162 	svm_range_count_fault(node, p, gpuidx);
3163 
3164 	mmput(mm);
3165 out:
3166 	kfd_unref_process(p);
3167 
3168 	if (r == -EAGAIN) {
3169 		pr_debug("recover vm fault later\n");
3170 		amdgpu_gmc_filter_faults_remove(node->adev, addr, pasid);
3171 		r = 0;
3172 	}
3173 	return r;
3174 }
3175 
3176 int
svm_range_switch_xnack_reserve_mem(struct kfd_process * p,bool xnack_enabled)3177 svm_range_switch_xnack_reserve_mem(struct kfd_process *p, bool xnack_enabled)
3178 {
3179 	struct svm_range *prange, *pchild;
3180 	uint64_t reserved_size = 0;
3181 	uint64_t size;
3182 	int r = 0;
3183 
3184 	pr_debug("switching xnack from %d to %d\n", p->xnack_enabled, xnack_enabled);
3185 
3186 	mutex_lock(&p->svms.lock);
3187 
3188 	list_for_each_entry(prange, &p->svms.list, list) {
3189 		svm_range_lock(prange);
3190 		list_for_each_entry(pchild, &prange->child_list, child_list) {
3191 			size = (pchild->last - pchild->start + 1) << PAGE_SHIFT;
3192 			if (xnack_enabled) {
3193 				amdgpu_amdkfd_unreserve_mem_limit(NULL, size,
3194 					KFD_IOC_ALLOC_MEM_FLAGS_USERPTR, 0);
3195 			} else {
3196 				r = amdgpu_amdkfd_reserve_mem_limit(NULL, size,
3197 					KFD_IOC_ALLOC_MEM_FLAGS_USERPTR, 0);
3198 				if (r)
3199 					goto out_unlock;
3200 				reserved_size += size;
3201 			}
3202 		}
3203 
3204 		size = (prange->last - prange->start + 1) << PAGE_SHIFT;
3205 		if (xnack_enabled) {
3206 			amdgpu_amdkfd_unreserve_mem_limit(NULL, size,
3207 					KFD_IOC_ALLOC_MEM_FLAGS_USERPTR, 0);
3208 		} else {
3209 			r = amdgpu_amdkfd_reserve_mem_limit(NULL, size,
3210 					KFD_IOC_ALLOC_MEM_FLAGS_USERPTR, 0);
3211 			if (r)
3212 				goto out_unlock;
3213 			reserved_size += size;
3214 		}
3215 out_unlock:
3216 		svm_range_unlock(prange);
3217 		if (r)
3218 			break;
3219 	}
3220 
3221 	if (r)
3222 		amdgpu_amdkfd_unreserve_mem_limit(NULL, reserved_size,
3223 					KFD_IOC_ALLOC_MEM_FLAGS_USERPTR, 0);
3224 	else
3225 		/* Change xnack mode must be inside svms lock, to avoid race with
3226 		 * svm_range_deferred_list_work unreserve memory in parallel.
3227 		 */
3228 		p->xnack_enabled = xnack_enabled;
3229 
3230 	mutex_unlock(&p->svms.lock);
3231 	return r;
3232 }
3233 
svm_range_list_fini(struct kfd_process * p)3234 void svm_range_list_fini(struct kfd_process *p)
3235 {
3236 	struct svm_range *prange;
3237 	struct svm_range *next;
3238 
3239 	pr_debug("pasid 0x%x svms 0x%p\n", p->pasid, &p->svms);
3240 
3241 	cancel_delayed_work_sync(&p->svms.restore_work);
3242 
3243 	/* Ensure list work is finished before process is destroyed */
3244 	flush_work(&p->svms.deferred_list_work);
3245 
3246 	/*
3247 	 * Ensure no retry fault comes in afterwards, as page fault handler will
3248 	 * not find kfd process and take mm lock to recover fault.
3249 	 * stop kfd page fault handing, then wait pending page faults got drained
3250 	 */
3251 	atomic_set(&p->svms.drain_pagefaults, 1);
3252 	svm_range_drain_retry_fault(&p->svms);
3253 
3254 	list_for_each_entry_safe(prange, next, &p->svms.list, list) {
3255 		svm_range_unlink(prange);
3256 		svm_range_remove_notifier(prange);
3257 		svm_range_free(prange, true);
3258 	}
3259 
3260 	mutex_destroy(&p->svms.lock);
3261 
3262 	pr_debug("pasid 0x%x svms 0x%p done\n", p->pasid, &p->svms);
3263 }
3264 
svm_range_list_init(struct kfd_process * p)3265 int svm_range_list_init(struct kfd_process *p)
3266 {
3267 	struct svm_range_list *svms = &p->svms;
3268 	int i;
3269 
3270 	svms->objects = RB_ROOT_CACHED;
3271 	mutex_init(&svms->lock);
3272 	INIT_LIST_HEAD(&svms->list);
3273 	atomic_set(&svms->evicted_ranges, 0);
3274 	atomic_set(&svms->drain_pagefaults, 0);
3275 	INIT_DELAYED_WORK(&svms->restore_work, svm_range_restore_work);
3276 	INIT_WORK(&svms->deferred_list_work, svm_range_deferred_list_work);
3277 	INIT_LIST_HEAD(&svms->deferred_range_list);
3278 	INIT_LIST_HEAD(&svms->criu_svm_metadata_list);
3279 	spin_lock_init(&svms->deferred_list_lock);
3280 
3281 	for (i = 0; i < p->n_pdds; i++)
3282 		if (KFD_IS_SVM_API_SUPPORTED(p->pdds[i]->dev->adev))
3283 			bitmap_set(svms->bitmap_supported, i, 1);
3284 
3285 	 /* Value of default granularity cannot exceed 0x1B, the
3286 	  * number of pages supported by a 4-level paging table
3287 	  */
3288 	svms->default_granularity = min_t(u8, amdgpu_svm_default_granularity, 0x1B);
3289 	pr_debug("Default SVM Granularity to use: %d\n", svms->default_granularity);
3290 
3291 	return 0;
3292 }
3293 
3294 /**
3295  * svm_range_check_vm - check if virtual address range mapped already
3296  * @p: current kfd_process
3297  * @start: range start address, in pages
3298  * @last: range last address, in pages
3299  * @bo_s: mapping start address in pages if address range already mapped
3300  * @bo_l: mapping last address in pages if address range already mapped
3301  *
3302  * The purpose is to avoid virtual address ranges already allocated by
3303  * kfd_ioctl_alloc_memory_of_gpu ioctl.
3304  * It looks for each pdd in the kfd_process.
3305  *
3306  * Context: Process context
3307  *
3308  * Return 0 - OK, if the range is not mapped.
3309  * Otherwise error code:
3310  * -EADDRINUSE - if address is mapped already by kfd_ioctl_alloc_memory_of_gpu
3311  * -ERESTARTSYS - A wait for the buffer to become unreserved was interrupted by
3312  * a signal. Release all buffer reservations and return to user-space.
3313  */
3314 static int
svm_range_check_vm(struct kfd_process * p,uint64_t start,uint64_t last,uint64_t * bo_s,uint64_t * bo_l)3315 svm_range_check_vm(struct kfd_process *p, uint64_t start, uint64_t last,
3316 		   uint64_t *bo_s, uint64_t *bo_l)
3317 {
3318 	struct amdgpu_bo_va_mapping *mapping;
3319 	struct interval_tree_node *node;
3320 	uint32_t i;
3321 	int r;
3322 
3323 	for (i = 0; i < p->n_pdds; i++) {
3324 		struct amdgpu_vm *vm;
3325 
3326 		if (!p->pdds[i]->drm_priv)
3327 			continue;
3328 
3329 		vm = drm_priv_to_vm(p->pdds[i]->drm_priv);
3330 		r = amdgpu_bo_reserve(vm->root.bo, false);
3331 		if (r)
3332 			return r;
3333 
3334 		node = interval_tree_iter_first(&vm->va, start, last);
3335 		if (node) {
3336 			pr_debug("range [0x%llx 0x%llx] already TTM mapped\n",
3337 				 start, last);
3338 			mapping = container_of((struct rb_node *)node,
3339 					       struct amdgpu_bo_va_mapping, rb);
3340 			if (bo_s && bo_l) {
3341 				*bo_s = mapping->start;
3342 				*bo_l = mapping->last;
3343 			}
3344 			amdgpu_bo_unreserve(vm->root.bo);
3345 			return -EADDRINUSE;
3346 		}
3347 		amdgpu_bo_unreserve(vm->root.bo);
3348 	}
3349 
3350 	return 0;
3351 }
3352 
3353 /**
3354  * svm_range_is_valid - check if virtual address range is valid
3355  * @p: current kfd_process
3356  * @start: range start address, in pages
3357  * @size: range size, in pages
3358  *
3359  * Valid virtual address range means it belongs to one or more VMAs
3360  *
3361  * Context: Process context
3362  *
3363  * Return:
3364  *  0 - OK, otherwise error code
3365  */
3366 static int
svm_range_is_valid(struct kfd_process * p,uint64_t start,uint64_t size)3367 svm_range_is_valid(struct kfd_process *p, uint64_t start, uint64_t size)
3368 {
3369 	const unsigned long device_vma = VM_IO | VM_PFNMAP | VM_MIXEDMAP;
3370 	struct vm_area_struct *vma;
3371 	unsigned long end;
3372 	unsigned long start_unchg = start;
3373 
3374 	start <<= PAGE_SHIFT;
3375 	end = start + (size << PAGE_SHIFT);
3376 	do {
3377 		vma = vma_lookup(p->mm, start);
3378 		if (!vma || (vma->vm_flags & device_vma))
3379 			return -EFAULT;
3380 		start = min(end, vma->vm_end);
3381 	} while (start < end);
3382 
3383 	return svm_range_check_vm(p, start_unchg, (end - 1) >> PAGE_SHIFT, NULL,
3384 				  NULL);
3385 }
3386 
3387 /**
3388  * svm_range_best_prefetch_location - decide the best prefetch location
3389  * @prange: svm range structure
3390  *
3391  * For xnack off:
3392  * If range map to single GPU, the best prefetch location is prefetch_loc, which
3393  * can be CPU or GPU.
3394  *
3395  * If range is ACCESS or ACCESS_IN_PLACE by mGPUs, only if mGPU connection on
3396  * XGMI same hive, the best prefetch location is prefetch_loc GPU, othervise
3397  * the best prefetch location is always CPU, because GPU can not have coherent
3398  * mapping VRAM of other GPUs even with large-BAR PCIe connection.
3399  *
3400  * For xnack on:
3401  * If range is not ACCESS_IN_PLACE by mGPUs, the best prefetch location is
3402  * prefetch_loc, other GPU access will generate vm fault and trigger migration.
3403  *
3404  * If range is ACCESS_IN_PLACE by mGPUs, only if mGPU connection on XGMI same
3405  * hive, the best prefetch location is prefetch_loc GPU, otherwise the best
3406  * prefetch location is always CPU.
3407  *
3408  * Context: Process context
3409  *
3410  * Return:
3411  * 0 for CPU or GPU id
3412  */
3413 static uint32_t
svm_range_best_prefetch_location(struct svm_range * prange)3414 svm_range_best_prefetch_location(struct svm_range *prange)
3415 {
3416 	DECLARE_BITMAP(bitmap, MAX_GPU_INSTANCE);
3417 	uint32_t best_loc = prange->prefetch_loc;
3418 	struct kfd_process_device *pdd;
3419 	struct kfd_node *bo_node;
3420 	struct kfd_process *p;
3421 	uint32_t gpuidx;
3422 
3423 	p = container_of(prange->svms, struct kfd_process, svms);
3424 
3425 	if (!best_loc || best_loc == KFD_IOCTL_SVM_LOCATION_UNDEFINED)
3426 		goto out;
3427 
3428 	bo_node = svm_range_get_node_by_id(prange, best_loc);
3429 	if (!bo_node) {
3430 		WARN_ONCE(1, "failed to get valid kfd node at id%x\n", best_loc);
3431 		best_loc = 0;
3432 		goto out;
3433 	}
3434 
3435 	if (bo_node->adev->flags & AMD_IS_APU) {
3436 		best_loc = 0;
3437 		goto out;
3438 	}
3439 
3440 	if (p->xnack_enabled)
3441 		bitmap_copy(bitmap, prange->bitmap_aip, MAX_GPU_INSTANCE);
3442 	else
3443 		bitmap_or(bitmap, prange->bitmap_access, prange->bitmap_aip,
3444 			  MAX_GPU_INSTANCE);
3445 
3446 	for_each_set_bit(gpuidx, bitmap, MAX_GPU_INSTANCE) {
3447 		pdd = kfd_process_device_from_gpuidx(p, gpuidx);
3448 		if (!pdd) {
3449 			pr_debug("failed to get device by idx 0x%x\n", gpuidx);
3450 			continue;
3451 		}
3452 
3453 		if (pdd->dev->adev == bo_node->adev)
3454 			continue;
3455 
3456 		if (!svm_nodes_in_same_hive(pdd->dev, bo_node)) {
3457 			best_loc = 0;
3458 			break;
3459 		}
3460 	}
3461 
3462 out:
3463 	pr_debug("xnack %d svms 0x%p [0x%lx 0x%lx] best loc 0x%x\n",
3464 		 p->xnack_enabled, &p->svms, prange->start, prange->last,
3465 		 best_loc);
3466 
3467 	return best_loc;
3468 }
3469 
3470 /* svm_range_trigger_migration - start page migration if prefetch loc changed
3471  * @mm: current process mm_struct
3472  * @prange: svm range structure
3473  * @migrated: output, true if migration is triggered
3474  *
3475  * If range perfetch_loc is GPU, actual loc is cpu 0, then migrate the range
3476  * from ram to vram.
3477  * If range prefetch_loc is cpu 0, actual loc is GPU, then migrate the range
3478  * from vram to ram.
3479  *
3480  * If GPU vm fault retry is not enabled, migration interact with MMU notifier
3481  * and restore work:
3482  * 1. migrate_vma_setup invalidate pages, MMU notifier callback svm_range_evict
3483  *    stops all queues, schedule restore work
3484  * 2. svm_range_restore_work wait for migration is done by
3485  *    a. svm_range_validate_vram takes prange->migrate_mutex
3486  *    b. svm_range_validate_ram HMM get pages wait for CPU fault handle returns
3487  * 3. restore work update mappings of GPU, resume all queues.
3488  *
3489  * Context: Process context
3490  *
3491  * Return:
3492  * 0 - OK, otherwise - error code of migration
3493  */
3494 static int
svm_range_trigger_migration(struct mm_struct * mm,struct svm_range * prange,bool * migrated)3495 svm_range_trigger_migration(struct mm_struct *mm, struct svm_range *prange,
3496 			    bool *migrated)
3497 {
3498 	uint32_t best_loc;
3499 	int r = 0;
3500 
3501 	*migrated = false;
3502 	best_loc = svm_range_best_prefetch_location(prange);
3503 
3504 	/* when best_loc is a gpu node and same as prange->actual_loc
3505 	 * we still need do migration as prange->actual_loc !=0 does
3506 	 * not mean all pages in prange are vram. hmm migrate will pick
3507 	 * up right pages during migration.
3508 	 */
3509 	if ((best_loc == KFD_IOCTL_SVM_LOCATION_UNDEFINED) ||
3510 	    (best_loc == 0 && prange->actual_loc == 0))
3511 		return 0;
3512 
3513 	if (!best_loc) {
3514 		r = svm_migrate_vram_to_ram(prange, mm, prange->start, prange->last,
3515 					KFD_MIGRATE_TRIGGER_PREFETCH, NULL);
3516 		*migrated = !r;
3517 		return r;
3518 	}
3519 
3520 	r = svm_migrate_to_vram(prange, best_loc, prange->start, prange->last,
3521 				mm, KFD_MIGRATE_TRIGGER_PREFETCH);
3522 	*migrated = !r;
3523 
3524 	return 0;
3525 }
3526 
svm_range_schedule_evict_svm_bo(struct amdgpu_amdkfd_fence * fence)3527 int svm_range_schedule_evict_svm_bo(struct amdgpu_amdkfd_fence *fence)
3528 {
3529 	/* Dereferencing fence->svm_bo is safe here because the fence hasn't
3530 	 * signaled yet and we're under the protection of the fence->lock.
3531 	 * After the fence is signaled in svm_range_bo_release, we cannot get
3532 	 * here any more.
3533 	 *
3534 	 * Reference is dropped in svm_range_evict_svm_bo_worker.
3535 	 */
3536 	if (svm_bo_ref_unless_zero(fence->svm_bo)) {
3537 		WRITE_ONCE(fence->svm_bo->evicting, 1);
3538 		schedule_work(&fence->svm_bo->eviction_work);
3539 	}
3540 
3541 	return 0;
3542 }
3543 
svm_range_evict_svm_bo_worker(struct work_struct * work)3544 static void svm_range_evict_svm_bo_worker(struct work_struct *work)
3545 {
3546 	struct svm_range_bo *svm_bo;
3547 	struct mm_struct *mm;
3548 	int r = 0;
3549 
3550 	svm_bo = container_of(work, struct svm_range_bo, eviction_work);
3551 
3552 	if (mmget_not_zero(svm_bo->eviction_fence->mm)) {
3553 		mm = svm_bo->eviction_fence->mm;
3554 	} else {
3555 		svm_range_bo_unref(svm_bo);
3556 		return;
3557 	}
3558 
3559 	mmap_read_lock(mm);
3560 	spin_lock(&svm_bo->list_lock);
3561 	while (!list_empty(&svm_bo->range_list) && !r) {
3562 		struct svm_range *prange =
3563 				list_first_entry(&svm_bo->range_list,
3564 						struct svm_range, svm_bo_list);
3565 		int retries = 3;
3566 
3567 		list_del_init(&prange->svm_bo_list);
3568 		spin_unlock(&svm_bo->list_lock);
3569 
3570 		pr_debug("svms 0x%p [0x%lx 0x%lx]\n", prange->svms,
3571 			 prange->start, prange->last);
3572 
3573 		mutex_lock(&prange->migrate_mutex);
3574 		do {
3575 			/* migrate all vram pages in this prange to sys ram
3576 			 * after that prange->actual_loc should be zero
3577 			 */
3578 			r = svm_migrate_vram_to_ram(prange, mm,
3579 					prange->start, prange->last,
3580 					KFD_MIGRATE_TRIGGER_TTM_EVICTION, NULL);
3581 		} while (!r && prange->actual_loc && --retries);
3582 
3583 		if (!r && prange->actual_loc)
3584 			pr_info_once("Migration failed during eviction");
3585 
3586 		if (!prange->actual_loc) {
3587 			mutex_lock(&prange->lock);
3588 			prange->svm_bo = NULL;
3589 			mutex_unlock(&prange->lock);
3590 		}
3591 		mutex_unlock(&prange->migrate_mutex);
3592 
3593 		spin_lock(&svm_bo->list_lock);
3594 	}
3595 	spin_unlock(&svm_bo->list_lock);
3596 	mmap_read_unlock(mm);
3597 	mmput(mm);
3598 
3599 	dma_fence_signal(&svm_bo->eviction_fence->base);
3600 
3601 	/* This is the last reference to svm_bo, after svm_range_vram_node_free
3602 	 * has been called in svm_migrate_vram_to_ram
3603 	 */
3604 	WARN_ONCE(!r && kref_read(&svm_bo->kref) != 1, "This was not the last reference\n");
3605 	svm_range_bo_unref(svm_bo);
3606 }
3607 
3608 static int
svm_range_set_attr(struct kfd_process * p,struct mm_struct * mm,uint64_t start,uint64_t size,uint32_t nattr,struct kfd_ioctl_svm_attribute * attrs)3609 svm_range_set_attr(struct kfd_process *p, struct mm_struct *mm,
3610 		   uint64_t start, uint64_t size, uint32_t nattr,
3611 		   struct kfd_ioctl_svm_attribute *attrs)
3612 {
3613 	struct amdkfd_process_info *process_info = p->kgd_process_info;
3614 	struct list_head update_list;
3615 	struct list_head insert_list;
3616 	struct list_head remove_list;
3617 	struct list_head remap_list;
3618 	struct svm_range_list *svms;
3619 	struct svm_range *prange;
3620 	struct svm_range *next;
3621 	bool update_mapping = false;
3622 	bool flush_tlb;
3623 	int r, ret = 0;
3624 
3625 	pr_debug("pasid 0x%x svms 0x%p [0x%llx 0x%llx] pages 0x%llx\n",
3626 		 p->pasid, &p->svms, start, start + size - 1, size);
3627 
3628 	r = svm_range_check_attr(p, nattr, attrs);
3629 	if (r)
3630 		return r;
3631 
3632 	svms = &p->svms;
3633 
3634 	mutex_lock(&process_info->lock);
3635 
3636 	svm_range_list_lock_and_flush_work(svms, mm);
3637 
3638 	r = svm_range_is_valid(p, start, size);
3639 	if (r) {
3640 		pr_debug("invalid range r=%d\n", r);
3641 		mmap_write_unlock(mm);
3642 		goto out;
3643 	}
3644 
3645 	mutex_lock(&svms->lock);
3646 
3647 	/* Add new range and split existing ranges as needed */
3648 	r = svm_range_add(p, start, size, nattr, attrs, &update_list,
3649 			  &insert_list, &remove_list, &remap_list);
3650 	if (r) {
3651 		mutex_unlock(&svms->lock);
3652 		mmap_write_unlock(mm);
3653 		goto out;
3654 	}
3655 	/* Apply changes as a transaction */
3656 	list_for_each_entry_safe(prange, next, &insert_list, list) {
3657 		svm_range_add_to_svms(prange);
3658 		svm_range_add_notifier_locked(mm, prange);
3659 	}
3660 	list_for_each_entry(prange, &update_list, update_list) {
3661 		svm_range_apply_attrs(p, prange, nattr, attrs, &update_mapping);
3662 		/* TODO: unmap ranges from GPU that lost access */
3663 	}
3664 	list_for_each_entry_safe(prange, next, &remove_list, update_list) {
3665 		pr_debug("unlink old 0x%p prange 0x%p [0x%lx 0x%lx]\n",
3666 			 prange->svms, prange, prange->start,
3667 			 prange->last);
3668 		svm_range_unlink(prange);
3669 		svm_range_remove_notifier(prange);
3670 		svm_range_free(prange, false);
3671 	}
3672 
3673 	mmap_write_downgrade(mm);
3674 	/* Trigger migrations and revalidate and map to GPUs as needed. If
3675 	 * this fails we may be left with partially completed actions. There
3676 	 * is no clean way of rolling back to the previous state in such a
3677 	 * case because the rollback wouldn't be guaranteed to work either.
3678 	 */
3679 	list_for_each_entry(prange, &update_list, update_list) {
3680 		bool migrated;
3681 
3682 		mutex_lock(&prange->migrate_mutex);
3683 
3684 		r = svm_range_trigger_migration(mm, prange, &migrated);
3685 		if (r)
3686 			goto out_unlock_range;
3687 
3688 		if (migrated && (!p->xnack_enabled ||
3689 		    (prange->flags & KFD_IOCTL_SVM_FLAG_GPU_ALWAYS_MAPPED)) &&
3690 		    prange->mapped_to_gpu) {
3691 			pr_debug("restore_work will update mappings of GPUs\n");
3692 			mutex_unlock(&prange->migrate_mutex);
3693 			continue;
3694 		}
3695 
3696 		if (!migrated && !update_mapping) {
3697 			mutex_unlock(&prange->migrate_mutex);
3698 			continue;
3699 		}
3700 
3701 		flush_tlb = !migrated && update_mapping && prange->mapped_to_gpu;
3702 
3703 		r = svm_range_validate_and_map(mm, prange->start, prange->last, prange,
3704 					       MAX_GPU_INSTANCE, true, true, flush_tlb);
3705 		if (r)
3706 			pr_debug("failed %d to map svm range\n", r);
3707 
3708 out_unlock_range:
3709 		mutex_unlock(&prange->migrate_mutex);
3710 		if (r)
3711 			ret = r;
3712 	}
3713 
3714 	list_for_each_entry(prange, &remap_list, update_list) {
3715 		pr_debug("Remapping prange 0x%p [0x%lx 0x%lx]\n",
3716 			 prange, prange->start, prange->last);
3717 		mutex_lock(&prange->migrate_mutex);
3718 		r = svm_range_validate_and_map(mm,  prange->start, prange->last, prange,
3719 					       MAX_GPU_INSTANCE, true, true, prange->mapped_to_gpu);
3720 		if (r)
3721 			pr_debug("failed %d on remap svm range\n", r);
3722 		mutex_unlock(&prange->migrate_mutex);
3723 		if (r)
3724 			ret = r;
3725 	}
3726 
3727 	dynamic_svm_range_dump(svms);
3728 
3729 	mutex_unlock(&svms->lock);
3730 	mmap_read_unlock(mm);
3731 out:
3732 	mutex_unlock(&process_info->lock);
3733 
3734 	pr_debug("pasid 0x%x svms 0x%p [0x%llx 0x%llx] done, r=%d\n", p->pasid,
3735 		 &p->svms, start, start + size - 1, r);
3736 
3737 	return ret ? ret : r;
3738 }
3739 
3740 static int
svm_range_get_attr(struct kfd_process * p,struct mm_struct * mm,uint64_t start,uint64_t size,uint32_t nattr,struct kfd_ioctl_svm_attribute * attrs)3741 svm_range_get_attr(struct kfd_process *p, struct mm_struct *mm,
3742 		   uint64_t start, uint64_t size, uint32_t nattr,
3743 		   struct kfd_ioctl_svm_attribute *attrs)
3744 {
3745 	DECLARE_BITMAP(bitmap_access, MAX_GPU_INSTANCE);
3746 	DECLARE_BITMAP(bitmap_aip, MAX_GPU_INSTANCE);
3747 	bool get_preferred_loc = false;
3748 	bool get_prefetch_loc = false;
3749 	bool get_granularity = false;
3750 	bool get_accessible = false;
3751 	bool get_flags = false;
3752 	uint64_t last = start + size - 1UL;
3753 	uint8_t granularity = 0xff;
3754 	struct interval_tree_node *node;
3755 	struct svm_range_list *svms;
3756 	struct svm_range *prange;
3757 	uint32_t prefetch_loc = KFD_IOCTL_SVM_LOCATION_UNDEFINED;
3758 	uint32_t location = KFD_IOCTL_SVM_LOCATION_UNDEFINED;
3759 	uint32_t flags_and = 0xffffffff;
3760 	uint32_t flags_or = 0;
3761 	int gpuidx;
3762 	uint32_t i;
3763 	int r = 0;
3764 
3765 	pr_debug("svms 0x%p [0x%llx 0x%llx] nattr 0x%x\n", &p->svms, start,
3766 		 start + size - 1, nattr);
3767 
3768 	/* Flush pending deferred work to avoid racing with deferred actions from
3769 	 * previous memory map changes (e.g. munmap). Concurrent memory map changes
3770 	 * can still race with get_attr because we don't hold the mmap lock. But that
3771 	 * would be a race condition in the application anyway, and undefined
3772 	 * behaviour is acceptable in that case.
3773 	 */
3774 	flush_work(&p->svms.deferred_list_work);
3775 
3776 	mmap_read_lock(mm);
3777 	r = svm_range_is_valid(p, start, size);
3778 	mmap_read_unlock(mm);
3779 	if (r) {
3780 		pr_debug("invalid range r=%d\n", r);
3781 		return r;
3782 	}
3783 
3784 	for (i = 0; i < nattr; i++) {
3785 		switch (attrs[i].type) {
3786 		case KFD_IOCTL_SVM_ATTR_PREFERRED_LOC:
3787 			get_preferred_loc = true;
3788 			break;
3789 		case KFD_IOCTL_SVM_ATTR_PREFETCH_LOC:
3790 			get_prefetch_loc = true;
3791 			break;
3792 		case KFD_IOCTL_SVM_ATTR_ACCESS:
3793 			get_accessible = true;
3794 			break;
3795 		case KFD_IOCTL_SVM_ATTR_SET_FLAGS:
3796 		case KFD_IOCTL_SVM_ATTR_CLR_FLAGS:
3797 			get_flags = true;
3798 			break;
3799 		case KFD_IOCTL_SVM_ATTR_GRANULARITY:
3800 			get_granularity = true;
3801 			break;
3802 		case KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE:
3803 		case KFD_IOCTL_SVM_ATTR_NO_ACCESS:
3804 			fallthrough;
3805 		default:
3806 			pr_debug("get invalid attr type 0x%x\n", attrs[i].type);
3807 			return -EINVAL;
3808 		}
3809 	}
3810 
3811 	svms = &p->svms;
3812 
3813 	mutex_lock(&svms->lock);
3814 
3815 	node = interval_tree_iter_first(&svms->objects, start, last);
3816 	if (!node) {
3817 		pr_debug("range attrs not found return default values\n");
3818 		svm_range_set_default_attributes(svms, &location, &prefetch_loc,
3819 						 &granularity, &flags_and);
3820 		flags_or = flags_and;
3821 		if (p->xnack_enabled)
3822 			bitmap_copy(bitmap_access, svms->bitmap_supported,
3823 				    MAX_GPU_INSTANCE);
3824 		else
3825 			bitmap_zero(bitmap_access, MAX_GPU_INSTANCE);
3826 		bitmap_zero(bitmap_aip, MAX_GPU_INSTANCE);
3827 		goto fill_values;
3828 	}
3829 	bitmap_copy(bitmap_access, svms->bitmap_supported, MAX_GPU_INSTANCE);
3830 	bitmap_copy(bitmap_aip, svms->bitmap_supported, MAX_GPU_INSTANCE);
3831 
3832 	while (node) {
3833 		struct interval_tree_node *next;
3834 
3835 		prange = container_of(node, struct svm_range, it_node);
3836 		next = interval_tree_iter_next(node, start, last);
3837 
3838 		if (get_preferred_loc) {
3839 			if (prange->preferred_loc ==
3840 					KFD_IOCTL_SVM_LOCATION_UNDEFINED ||
3841 			    (location != KFD_IOCTL_SVM_LOCATION_UNDEFINED &&
3842 			     location != prange->preferred_loc)) {
3843 				location = KFD_IOCTL_SVM_LOCATION_UNDEFINED;
3844 				get_preferred_loc = false;
3845 			} else {
3846 				location = prange->preferred_loc;
3847 			}
3848 		}
3849 		if (get_prefetch_loc) {
3850 			if (prange->prefetch_loc ==
3851 					KFD_IOCTL_SVM_LOCATION_UNDEFINED ||
3852 			    (prefetch_loc != KFD_IOCTL_SVM_LOCATION_UNDEFINED &&
3853 			     prefetch_loc != prange->prefetch_loc)) {
3854 				prefetch_loc = KFD_IOCTL_SVM_LOCATION_UNDEFINED;
3855 				get_prefetch_loc = false;
3856 			} else {
3857 				prefetch_loc = prange->prefetch_loc;
3858 			}
3859 		}
3860 		if (get_accessible) {
3861 			bitmap_and(bitmap_access, bitmap_access,
3862 				   prange->bitmap_access, MAX_GPU_INSTANCE);
3863 			bitmap_and(bitmap_aip, bitmap_aip,
3864 				   prange->bitmap_aip, MAX_GPU_INSTANCE);
3865 		}
3866 		if (get_flags) {
3867 			flags_and &= prange->flags;
3868 			flags_or |= prange->flags;
3869 		}
3870 
3871 		if (get_granularity && prange->granularity < granularity)
3872 			granularity = prange->granularity;
3873 
3874 		node = next;
3875 	}
3876 fill_values:
3877 	mutex_unlock(&svms->lock);
3878 
3879 	for (i = 0; i < nattr; i++) {
3880 		switch (attrs[i].type) {
3881 		case KFD_IOCTL_SVM_ATTR_PREFERRED_LOC:
3882 			attrs[i].value = location;
3883 			break;
3884 		case KFD_IOCTL_SVM_ATTR_PREFETCH_LOC:
3885 			attrs[i].value = prefetch_loc;
3886 			break;
3887 		case KFD_IOCTL_SVM_ATTR_ACCESS:
3888 			gpuidx = kfd_process_gpuidx_from_gpuid(p,
3889 							       attrs[i].value);
3890 			if (gpuidx < 0) {
3891 				pr_debug("invalid gpuid %x\n", attrs[i].value);
3892 				return -EINVAL;
3893 			}
3894 			if (test_bit(gpuidx, bitmap_access))
3895 				attrs[i].type = KFD_IOCTL_SVM_ATTR_ACCESS;
3896 			else if (test_bit(gpuidx, bitmap_aip))
3897 				attrs[i].type =
3898 					KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE;
3899 			else
3900 				attrs[i].type = KFD_IOCTL_SVM_ATTR_NO_ACCESS;
3901 			break;
3902 		case KFD_IOCTL_SVM_ATTR_SET_FLAGS:
3903 			attrs[i].value = flags_and;
3904 			break;
3905 		case KFD_IOCTL_SVM_ATTR_CLR_FLAGS:
3906 			attrs[i].value = ~flags_or;
3907 			break;
3908 		case KFD_IOCTL_SVM_ATTR_GRANULARITY:
3909 			attrs[i].value = (uint32_t)granularity;
3910 			break;
3911 		}
3912 	}
3913 
3914 	return 0;
3915 }
3916 
kfd_criu_resume_svm(struct kfd_process * p)3917 int kfd_criu_resume_svm(struct kfd_process *p)
3918 {
3919 	struct kfd_ioctl_svm_attribute *set_attr_new, *set_attr = NULL;
3920 	int nattr_common = 4, nattr_accessibility = 1;
3921 	struct criu_svm_metadata *criu_svm_md = NULL;
3922 	struct svm_range_list *svms = &p->svms;
3923 	struct criu_svm_metadata *next = NULL;
3924 	uint32_t set_flags = 0xffffffff;
3925 	int i, j, num_attrs, ret = 0;
3926 	uint64_t set_attr_size;
3927 	struct mm_struct *mm;
3928 
3929 	if (list_empty(&svms->criu_svm_metadata_list)) {
3930 		pr_debug("No SVM data from CRIU restore stage 2\n");
3931 		return ret;
3932 	}
3933 
3934 	mm = get_task_mm(p->lead_thread);
3935 	if (!mm) {
3936 		pr_err("failed to get mm for the target process\n");
3937 		return -ESRCH;
3938 	}
3939 
3940 	num_attrs = nattr_common + (nattr_accessibility * p->n_pdds);
3941 
3942 	i = j = 0;
3943 	list_for_each_entry(criu_svm_md, &svms->criu_svm_metadata_list, list) {
3944 		pr_debug("criu_svm_md[%d]\n\tstart: 0x%llx size: 0x%llx (npages)\n",
3945 			 i, criu_svm_md->data.start_addr, criu_svm_md->data.size);
3946 
3947 		for (j = 0; j < num_attrs; j++) {
3948 			pr_debug("\ncriu_svm_md[%d]->attrs[%d].type : 0x%x\ncriu_svm_md[%d]->attrs[%d].value : 0x%x\n",
3949 				 i, j, criu_svm_md->data.attrs[j].type,
3950 				 i, j, criu_svm_md->data.attrs[j].value);
3951 			switch (criu_svm_md->data.attrs[j].type) {
3952 			/* During Checkpoint operation, the query for
3953 			 * KFD_IOCTL_SVM_ATTR_PREFETCH_LOC attribute might
3954 			 * return KFD_IOCTL_SVM_LOCATION_UNDEFINED if they were
3955 			 * not used by the range which was checkpointed. Care
3956 			 * must be taken to not restore with an invalid value
3957 			 * otherwise the gpuidx value will be invalid and
3958 			 * set_attr would eventually fail so just replace those
3959 			 * with another dummy attribute such as
3960 			 * KFD_IOCTL_SVM_ATTR_SET_FLAGS.
3961 			 */
3962 			case KFD_IOCTL_SVM_ATTR_PREFETCH_LOC:
3963 				if (criu_svm_md->data.attrs[j].value ==
3964 				    KFD_IOCTL_SVM_LOCATION_UNDEFINED) {
3965 					criu_svm_md->data.attrs[j].type =
3966 						KFD_IOCTL_SVM_ATTR_SET_FLAGS;
3967 					criu_svm_md->data.attrs[j].value = 0;
3968 				}
3969 				break;
3970 			case KFD_IOCTL_SVM_ATTR_SET_FLAGS:
3971 				set_flags = criu_svm_md->data.attrs[j].value;
3972 				break;
3973 			default:
3974 				break;
3975 			}
3976 		}
3977 
3978 		/* CLR_FLAGS is not available via get_attr during checkpoint but
3979 		 * it needs to be inserted before restoring the ranges so
3980 		 * allocate extra space for it before calling set_attr
3981 		 */
3982 		set_attr_size = sizeof(struct kfd_ioctl_svm_attribute) *
3983 						(num_attrs + 1);
3984 		set_attr_new = krealloc(set_attr, set_attr_size,
3985 					    GFP_KERNEL);
3986 		if (!set_attr_new) {
3987 			ret = -ENOMEM;
3988 			goto exit;
3989 		}
3990 		set_attr = set_attr_new;
3991 
3992 		memcpy(set_attr, criu_svm_md->data.attrs, num_attrs *
3993 					sizeof(struct kfd_ioctl_svm_attribute));
3994 		set_attr[num_attrs].type = KFD_IOCTL_SVM_ATTR_CLR_FLAGS;
3995 		set_attr[num_attrs].value = ~set_flags;
3996 
3997 		ret = svm_range_set_attr(p, mm, criu_svm_md->data.start_addr,
3998 					 criu_svm_md->data.size, num_attrs + 1,
3999 					 set_attr);
4000 		if (ret) {
4001 			pr_err("CRIU: failed to set range attributes\n");
4002 			goto exit;
4003 		}
4004 
4005 		i++;
4006 	}
4007 exit:
4008 	kfree(set_attr);
4009 	list_for_each_entry_safe(criu_svm_md, next, &svms->criu_svm_metadata_list, list) {
4010 		pr_debug("freeing criu_svm_md[]\n\tstart: 0x%llx\n",
4011 						criu_svm_md->data.start_addr);
4012 		kfree(criu_svm_md);
4013 	}
4014 
4015 	mmput(mm);
4016 	return ret;
4017 
4018 }
4019 
kfd_criu_restore_svm(struct kfd_process * p,uint8_t __user * user_priv_ptr,uint64_t * priv_data_offset,uint64_t max_priv_data_size)4020 int kfd_criu_restore_svm(struct kfd_process *p,
4021 			 uint8_t __user *user_priv_ptr,
4022 			 uint64_t *priv_data_offset,
4023 			 uint64_t max_priv_data_size)
4024 {
4025 	uint64_t svm_priv_data_size, svm_object_md_size, svm_attrs_size;
4026 	int nattr_common = 4, nattr_accessibility = 1;
4027 	struct criu_svm_metadata *criu_svm_md = NULL;
4028 	struct svm_range_list *svms = &p->svms;
4029 	uint32_t num_devices;
4030 	int ret = 0;
4031 
4032 	num_devices = p->n_pdds;
4033 	/* Handle one SVM range object at a time, also the number of gpus are
4034 	 * assumed to be same on the restore node, checking must be done while
4035 	 * evaluating the topology earlier
4036 	 */
4037 
4038 	svm_attrs_size = sizeof(struct kfd_ioctl_svm_attribute) *
4039 		(nattr_common + nattr_accessibility * num_devices);
4040 	svm_object_md_size = sizeof(struct criu_svm_metadata) + svm_attrs_size;
4041 
4042 	svm_priv_data_size = sizeof(struct kfd_criu_svm_range_priv_data) +
4043 								svm_attrs_size;
4044 
4045 	criu_svm_md = kzalloc(svm_object_md_size, GFP_KERNEL);
4046 	if (!criu_svm_md) {
4047 		pr_err("failed to allocate memory to store svm metadata\n");
4048 		return -ENOMEM;
4049 	}
4050 	if (*priv_data_offset + svm_priv_data_size > max_priv_data_size) {
4051 		ret = -EINVAL;
4052 		goto exit;
4053 	}
4054 
4055 	ret = copy_from_user(&criu_svm_md->data, user_priv_ptr + *priv_data_offset,
4056 			     svm_priv_data_size);
4057 	if (ret) {
4058 		ret = -EFAULT;
4059 		goto exit;
4060 	}
4061 	*priv_data_offset += svm_priv_data_size;
4062 
4063 	list_add_tail(&criu_svm_md->list, &svms->criu_svm_metadata_list);
4064 
4065 	return 0;
4066 
4067 
4068 exit:
4069 	kfree(criu_svm_md);
4070 	return ret;
4071 }
4072 
svm_range_get_info(struct kfd_process * p,uint32_t * num_svm_ranges,uint64_t * svm_priv_data_size)4073 int svm_range_get_info(struct kfd_process *p, uint32_t *num_svm_ranges,
4074 		       uint64_t *svm_priv_data_size)
4075 {
4076 	uint64_t total_size, accessibility_size, common_attr_size;
4077 	int nattr_common = 4, nattr_accessibility = 1;
4078 	int num_devices = p->n_pdds;
4079 	struct svm_range_list *svms;
4080 	struct svm_range *prange;
4081 	uint32_t count = 0;
4082 
4083 	*svm_priv_data_size = 0;
4084 
4085 	svms = &p->svms;
4086 	if (!svms)
4087 		return -EINVAL;
4088 
4089 	mutex_lock(&svms->lock);
4090 	list_for_each_entry(prange, &svms->list, list) {
4091 		pr_debug("prange: 0x%p start: 0x%lx\t npages: 0x%llx\t end: 0x%llx\n",
4092 			 prange, prange->start, prange->npages,
4093 			 prange->start + prange->npages - 1);
4094 		count++;
4095 	}
4096 	mutex_unlock(&svms->lock);
4097 
4098 	*num_svm_ranges = count;
4099 	/* Only the accessbility attributes need to be queried for all the gpus
4100 	 * individually, remaining ones are spanned across the entire process
4101 	 * regardless of the various gpu nodes. Of the remaining attributes,
4102 	 * KFD_IOCTL_SVM_ATTR_CLR_FLAGS need not be saved.
4103 	 *
4104 	 * KFD_IOCTL_SVM_ATTR_PREFERRED_LOC
4105 	 * KFD_IOCTL_SVM_ATTR_PREFETCH_LOC
4106 	 * KFD_IOCTL_SVM_ATTR_SET_FLAGS
4107 	 * KFD_IOCTL_SVM_ATTR_GRANULARITY
4108 	 *
4109 	 * ** ACCESSBILITY ATTRIBUTES **
4110 	 * (Considered as one, type is altered during query, value is gpuid)
4111 	 * KFD_IOCTL_SVM_ATTR_ACCESS
4112 	 * KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE
4113 	 * KFD_IOCTL_SVM_ATTR_NO_ACCESS
4114 	 */
4115 	if (*num_svm_ranges > 0) {
4116 		common_attr_size = sizeof(struct kfd_ioctl_svm_attribute) *
4117 			nattr_common;
4118 		accessibility_size = sizeof(struct kfd_ioctl_svm_attribute) *
4119 			nattr_accessibility * num_devices;
4120 
4121 		total_size = sizeof(struct kfd_criu_svm_range_priv_data) +
4122 			common_attr_size + accessibility_size;
4123 
4124 		*svm_priv_data_size = *num_svm_ranges * total_size;
4125 	}
4126 
4127 	pr_debug("num_svm_ranges %u total_priv_size %llu\n", *num_svm_ranges,
4128 		 *svm_priv_data_size);
4129 	return 0;
4130 }
4131 
kfd_criu_checkpoint_svm(struct kfd_process * p,uint8_t __user * user_priv_data,uint64_t * priv_data_offset)4132 int kfd_criu_checkpoint_svm(struct kfd_process *p,
4133 			    uint8_t __user *user_priv_data,
4134 			    uint64_t *priv_data_offset)
4135 {
4136 	struct kfd_criu_svm_range_priv_data *svm_priv = NULL;
4137 	struct kfd_ioctl_svm_attribute *query_attr = NULL;
4138 	uint64_t svm_priv_data_size, query_attr_size = 0;
4139 	int index, nattr_common = 4, ret = 0;
4140 	struct svm_range_list *svms;
4141 	int num_devices = p->n_pdds;
4142 	struct svm_range *prange;
4143 	struct mm_struct *mm;
4144 
4145 	svms = &p->svms;
4146 	if (!svms)
4147 		return -EINVAL;
4148 
4149 	mm = get_task_mm(p->lead_thread);
4150 	if (!mm) {
4151 		pr_err("failed to get mm for the target process\n");
4152 		return -ESRCH;
4153 	}
4154 
4155 	query_attr_size = sizeof(struct kfd_ioctl_svm_attribute) *
4156 				(nattr_common + num_devices);
4157 
4158 	query_attr = kzalloc(query_attr_size, GFP_KERNEL);
4159 	if (!query_attr) {
4160 		ret = -ENOMEM;
4161 		goto exit;
4162 	}
4163 
4164 	query_attr[0].type = KFD_IOCTL_SVM_ATTR_PREFERRED_LOC;
4165 	query_attr[1].type = KFD_IOCTL_SVM_ATTR_PREFETCH_LOC;
4166 	query_attr[2].type = KFD_IOCTL_SVM_ATTR_SET_FLAGS;
4167 	query_attr[3].type = KFD_IOCTL_SVM_ATTR_GRANULARITY;
4168 
4169 	for (index = 0; index < num_devices; index++) {
4170 		struct kfd_process_device *pdd = p->pdds[index];
4171 
4172 		query_attr[index + nattr_common].type =
4173 			KFD_IOCTL_SVM_ATTR_ACCESS;
4174 		query_attr[index + nattr_common].value = pdd->user_gpu_id;
4175 	}
4176 
4177 	svm_priv_data_size = sizeof(*svm_priv) + query_attr_size;
4178 
4179 	svm_priv = kzalloc(svm_priv_data_size, GFP_KERNEL);
4180 	if (!svm_priv) {
4181 		ret = -ENOMEM;
4182 		goto exit_query;
4183 	}
4184 
4185 	index = 0;
4186 	list_for_each_entry(prange, &svms->list, list) {
4187 
4188 		svm_priv->object_type = KFD_CRIU_OBJECT_TYPE_SVM_RANGE;
4189 		svm_priv->start_addr = prange->start;
4190 		svm_priv->size = prange->npages;
4191 		memcpy(&svm_priv->attrs, query_attr, query_attr_size);
4192 		pr_debug("CRIU: prange: 0x%p start: 0x%lx\t npages: 0x%llx end: 0x%llx\t size: 0x%llx\n",
4193 			 prange, prange->start, prange->npages,
4194 			 prange->start + prange->npages - 1,
4195 			 prange->npages * PAGE_SIZE);
4196 
4197 		ret = svm_range_get_attr(p, mm, svm_priv->start_addr,
4198 					 svm_priv->size,
4199 					 (nattr_common + num_devices),
4200 					 svm_priv->attrs);
4201 		if (ret) {
4202 			pr_err("CRIU: failed to obtain range attributes\n");
4203 			goto exit_priv;
4204 		}
4205 
4206 		if (copy_to_user(user_priv_data + *priv_data_offset, svm_priv,
4207 				 svm_priv_data_size)) {
4208 			pr_err("Failed to copy svm priv to user\n");
4209 			ret = -EFAULT;
4210 			goto exit_priv;
4211 		}
4212 
4213 		*priv_data_offset += svm_priv_data_size;
4214 
4215 	}
4216 
4217 
4218 exit_priv:
4219 	kfree(svm_priv);
4220 exit_query:
4221 	kfree(query_attr);
4222 exit:
4223 	mmput(mm);
4224 	return ret;
4225 }
4226 
4227 int
svm_ioctl(struct kfd_process * p,enum kfd_ioctl_svm_op op,uint64_t start,uint64_t size,uint32_t nattrs,struct kfd_ioctl_svm_attribute * attrs)4228 svm_ioctl(struct kfd_process *p, enum kfd_ioctl_svm_op op, uint64_t start,
4229 	  uint64_t size, uint32_t nattrs, struct kfd_ioctl_svm_attribute *attrs)
4230 {
4231 	struct mm_struct *mm = current->mm;
4232 	int r;
4233 
4234 	start >>= PAGE_SHIFT;
4235 	size >>= PAGE_SHIFT;
4236 
4237 	switch (op) {
4238 	case KFD_IOCTL_SVM_OP_SET_ATTR:
4239 		r = svm_range_set_attr(p, mm, start, size, nattrs, attrs);
4240 		break;
4241 	case KFD_IOCTL_SVM_OP_GET_ATTR:
4242 		r = svm_range_get_attr(p, mm, start, size, nattrs, attrs);
4243 		break;
4244 	default:
4245 		r = EINVAL;
4246 		break;
4247 	}
4248 
4249 	return r;
4250 }
4251