1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /*
3 * Copyright 2020-2021 Advanced Micro Devices, Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
22 */
23
24 #include <linux/types.h>
25 #include <linux/sched/task.h>
26 #include <linux/dynamic_debug.h>
27 #include <drm/ttm/ttm_tt.h>
28 #include <drm/drm_exec.h>
29
30 #include "amdgpu_sync.h"
31 #include "amdgpu_object.h"
32 #include "amdgpu_vm.h"
33 #include "amdgpu_hmm.h"
34 #include "amdgpu.h"
35 #include "amdgpu_xgmi.h"
36 #include "kfd_priv.h"
37 #include "kfd_svm.h"
38 #include "kfd_migrate.h"
39 #include "kfd_smi_events.h"
40
41 #ifdef dev_fmt
42 #undef dev_fmt
43 #endif
44 #define dev_fmt(fmt) "kfd_svm: %s: " fmt, __func__
45
46 #define AMDGPU_SVM_RANGE_RESTORE_DELAY_MS 1
47
48 /* Long enough to ensure no retry fault comes after svm range is restored and
49 * page table is updated.
50 */
51 #define AMDGPU_SVM_RANGE_RETRY_FAULT_PENDING (2UL * NSEC_PER_MSEC)
52 #if IS_ENABLED(CONFIG_DYNAMIC_DEBUG)
53 #define dynamic_svm_range_dump(svms) \
54 _dynamic_func_call_no_desc("svm_range_dump", svm_range_debug_dump, svms)
55 #else
56 #define dynamic_svm_range_dump(svms) \
57 do { if (0) svm_range_debug_dump(svms); } while (0)
58 #endif
59
60 /* Giant svm range split into smaller ranges based on this, it is decided using
61 * minimum of all dGPU/APU 1/32 VRAM size, between 2MB to 1GB and alignment to
62 * power of 2MB.
63 */
64 static uint64_t max_svm_range_pages;
65
66 struct criu_svm_metadata {
67 struct list_head list;
68 struct kfd_criu_svm_range_priv_data data;
69 };
70
71 static void svm_range_evict_svm_bo_worker(struct work_struct *work);
72 static bool
73 svm_range_cpu_invalidate_pagetables(struct mmu_interval_notifier *mni,
74 const struct mmu_notifier_range *range,
75 unsigned long cur_seq);
76 static int
77 svm_range_check_vm(struct kfd_process *p, uint64_t start, uint64_t last,
78 uint64_t *bo_s, uint64_t *bo_l);
79 static const struct mmu_interval_notifier_ops svm_range_mn_ops = {
80 .invalidate = svm_range_cpu_invalidate_pagetables,
81 };
82
83 /**
84 * svm_range_unlink - unlink svm_range from lists and interval tree
85 * @prange: svm range structure to be removed
86 *
87 * Remove the svm_range from the svms and svm_bo lists and the svms
88 * interval tree.
89 *
90 * Context: The caller must hold svms->lock
91 */
svm_range_unlink(struct svm_range * prange)92 static void svm_range_unlink(struct svm_range *prange)
93 {
94 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx]\n", prange->svms,
95 prange, prange->start, prange->last);
96
97 if (prange->svm_bo) {
98 spin_lock(&prange->svm_bo->list_lock);
99 list_del(&prange->svm_bo_list);
100 spin_unlock(&prange->svm_bo->list_lock);
101 }
102
103 list_del(&prange->list);
104 if (prange->it_node.start != 0 && prange->it_node.last != 0)
105 interval_tree_remove(&prange->it_node, &prange->svms->objects);
106 }
107
108 static void
svm_range_add_notifier_locked(struct mm_struct * mm,struct svm_range * prange)109 svm_range_add_notifier_locked(struct mm_struct *mm, struct svm_range *prange)
110 {
111 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx]\n", prange->svms,
112 prange, prange->start, prange->last);
113
114 mmu_interval_notifier_insert_locked(&prange->notifier, mm,
115 prange->start << PAGE_SHIFT,
116 prange->npages << PAGE_SHIFT,
117 &svm_range_mn_ops);
118 }
119
120 /**
121 * svm_range_add_to_svms - add svm range to svms
122 * @prange: svm range structure to be added
123 *
124 * Add the svm range to svms interval tree and link list
125 *
126 * Context: The caller must hold svms->lock
127 */
svm_range_add_to_svms(struct svm_range * prange)128 static void svm_range_add_to_svms(struct svm_range *prange)
129 {
130 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx]\n", prange->svms,
131 prange, prange->start, prange->last);
132
133 list_move_tail(&prange->list, &prange->svms->list);
134 prange->it_node.start = prange->start;
135 prange->it_node.last = prange->last;
136 interval_tree_insert(&prange->it_node, &prange->svms->objects);
137 }
138
svm_range_remove_notifier(struct svm_range * prange)139 static void svm_range_remove_notifier(struct svm_range *prange)
140 {
141 pr_debug("remove notifier svms 0x%p prange 0x%p [0x%lx 0x%lx]\n",
142 prange->svms, prange,
143 prange->notifier.interval_tree.start >> PAGE_SHIFT,
144 prange->notifier.interval_tree.last >> PAGE_SHIFT);
145
146 if (prange->notifier.interval_tree.start != 0 &&
147 prange->notifier.interval_tree.last != 0)
148 mmu_interval_notifier_remove(&prange->notifier);
149 }
150
151 static bool
svm_is_valid_dma_mapping_addr(struct device * dev,dma_addr_t dma_addr)152 svm_is_valid_dma_mapping_addr(struct device *dev, dma_addr_t dma_addr)
153 {
154 return dma_addr && !dma_mapping_error(dev, dma_addr) &&
155 !(dma_addr & SVM_RANGE_VRAM_DOMAIN);
156 }
157
158 static int
svm_range_dma_map_dev(struct amdgpu_device * adev,struct svm_range * prange,unsigned long offset,unsigned long npages,unsigned long * hmm_pfns,uint32_t gpuidx)159 svm_range_dma_map_dev(struct amdgpu_device *adev, struct svm_range *prange,
160 unsigned long offset, unsigned long npages,
161 unsigned long *hmm_pfns, uint32_t gpuidx)
162 {
163 enum dma_data_direction dir = DMA_BIDIRECTIONAL;
164 dma_addr_t *addr = prange->dma_addr[gpuidx];
165 struct device *dev = adev->dev;
166 struct page *page;
167 int i, r;
168
169 if (!addr) {
170 addr = kvcalloc(prange->npages, sizeof(*addr), GFP_KERNEL);
171 if (!addr)
172 return -ENOMEM;
173 prange->dma_addr[gpuidx] = addr;
174 }
175
176 addr += offset;
177 for (i = 0; i < npages; i++) {
178 if (svm_is_valid_dma_mapping_addr(dev, addr[i]))
179 dma_unmap_page(dev, addr[i], PAGE_SIZE, dir);
180
181 page = hmm_pfn_to_page(hmm_pfns[i]);
182 if (is_zone_device_page(page)) {
183 struct amdgpu_device *bo_adev = prange->svm_bo->node->adev;
184
185 addr[i] = (hmm_pfns[i] << PAGE_SHIFT) +
186 bo_adev->vm_manager.vram_base_offset -
187 bo_adev->kfd.pgmap.range.start;
188 addr[i] |= SVM_RANGE_VRAM_DOMAIN;
189 pr_debug_ratelimited("vram address: 0x%llx\n", addr[i]);
190 continue;
191 }
192 addr[i] = dma_map_page(dev, page, 0, PAGE_SIZE, dir);
193 r = dma_mapping_error(dev, addr[i]);
194 if (r) {
195 dev_err(dev, "failed %d dma_map_page\n", r);
196 return r;
197 }
198 pr_debug_ratelimited("dma mapping 0x%llx for page addr 0x%lx\n",
199 addr[i] >> PAGE_SHIFT, page_to_pfn(page));
200 }
201
202 return 0;
203 }
204
205 static int
svm_range_dma_map(struct svm_range * prange,unsigned long * bitmap,unsigned long offset,unsigned long npages,unsigned long * hmm_pfns)206 svm_range_dma_map(struct svm_range *prange, unsigned long *bitmap,
207 unsigned long offset, unsigned long npages,
208 unsigned long *hmm_pfns)
209 {
210 struct kfd_process *p;
211 uint32_t gpuidx;
212 int r;
213
214 p = container_of(prange->svms, struct kfd_process, svms);
215
216 for_each_set_bit(gpuidx, bitmap, MAX_GPU_INSTANCE) {
217 struct kfd_process_device *pdd;
218
219 pr_debug("mapping to gpu idx 0x%x\n", gpuidx);
220 pdd = kfd_process_device_from_gpuidx(p, gpuidx);
221 if (!pdd) {
222 pr_debug("failed to find device idx %d\n", gpuidx);
223 return -EINVAL;
224 }
225
226 r = svm_range_dma_map_dev(pdd->dev->adev, prange, offset, npages,
227 hmm_pfns, gpuidx);
228 if (r)
229 break;
230 }
231
232 return r;
233 }
234
svm_range_dma_unmap_dev(struct device * dev,dma_addr_t * dma_addr,unsigned long offset,unsigned long npages)235 void svm_range_dma_unmap_dev(struct device *dev, dma_addr_t *dma_addr,
236 unsigned long offset, unsigned long npages)
237 {
238 enum dma_data_direction dir = DMA_BIDIRECTIONAL;
239 int i;
240
241 if (!dma_addr)
242 return;
243
244 for (i = offset; i < offset + npages; i++) {
245 if (!svm_is_valid_dma_mapping_addr(dev, dma_addr[i]))
246 continue;
247 pr_debug_ratelimited("unmap 0x%llx\n", dma_addr[i] >> PAGE_SHIFT);
248 dma_unmap_page(dev, dma_addr[i], PAGE_SIZE, dir);
249 dma_addr[i] = 0;
250 }
251 }
252
svm_range_dma_unmap(struct svm_range * prange)253 void svm_range_dma_unmap(struct svm_range *prange)
254 {
255 struct kfd_process_device *pdd;
256 dma_addr_t *dma_addr;
257 struct device *dev;
258 struct kfd_process *p;
259 uint32_t gpuidx;
260
261 p = container_of(prange->svms, struct kfd_process, svms);
262
263 for (gpuidx = 0; gpuidx < MAX_GPU_INSTANCE; gpuidx++) {
264 dma_addr = prange->dma_addr[gpuidx];
265 if (!dma_addr)
266 continue;
267
268 pdd = kfd_process_device_from_gpuidx(p, gpuidx);
269 if (!pdd) {
270 pr_debug("failed to find device idx %d\n", gpuidx);
271 continue;
272 }
273 dev = &pdd->dev->adev->pdev->dev;
274
275 svm_range_dma_unmap_dev(dev, dma_addr, 0, prange->npages);
276 }
277 }
278
svm_range_free(struct svm_range * prange,bool do_unmap)279 static void svm_range_free(struct svm_range *prange, bool do_unmap)
280 {
281 uint64_t size = (prange->last - prange->start + 1) << PAGE_SHIFT;
282 struct kfd_process *p = container_of(prange->svms, struct kfd_process, svms);
283 uint32_t gpuidx;
284
285 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx]\n", prange->svms, prange,
286 prange->start, prange->last);
287
288 svm_range_vram_node_free(prange);
289 if (do_unmap)
290 svm_range_dma_unmap(prange);
291
292 if (do_unmap && !p->xnack_enabled) {
293 pr_debug("unreserve prange 0x%p size: 0x%llx\n", prange, size);
294 amdgpu_amdkfd_unreserve_mem_limit(NULL, size,
295 KFD_IOC_ALLOC_MEM_FLAGS_USERPTR, 0);
296 }
297
298 /* free dma_addr array for each gpu */
299 for (gpuidx = 0; gpuidx < MAX_GPU_INSTANCE; gpuidx++) {
300 if (prange->dma_addr[gpuidx]) {
301 kvfree(prange->dma_addr[gpuidx]);
302 prange->dma_addr[gpuidx] = NULL;
303 }
304 }
305
306 mutex_destroy(&prange->lock);
307 mutex_destroy(&prange->migrate_mutex);
308 kfree(prange);
309 }
310
311 static void
svm_range_set_default_attributes(struct svm_range_list * svms,int32_t * location,int32_t * prefetch_loc,uint8_t * granularity,uint32_t * flags)312 svm_range_set_default_attributes(struct svm_range_list *svms, int32_t *location,
313 int32_t *prefetch_loc, uint8_t *granularity,
314 uint32_t *flags)
315 {
316 *location = KFD_IOCTL_SVM_LOCATION_UNDEFINED;
317 *prefetch_loc = KFD_IOCTL_SVM_LOCATION_UNDEFINED;
318 *granularity = svms->default_granularity;
319 *flags =
320 KFD_IOCTL_SVM_FLAG_HOST_ACCESS | KFD_IOCTL_SVM_FLAG_COHERENT;
321 }
322
323 static struct
svm_range_new(struct svm_range_list * svms,uint64_t start,uint64_t last,bool update_mem_usage)324 svm_range *svm_range_new(struct svm_range_list *svms, uint64_t start,
325 uint64_t last, bool update_mem_usage)
326 {
327 uint64_t size = last - start + 1;
328 struct svm_range *prange;
329 struct kfd_process *p;
330
331 prange = kzalloc(sizeof(*prange), GFP_KERNEL);
332 if (!prange)
333 return NULL;
334
335 p = container_of(svms, struct kfd_process, svms);
336 if (!p->xnack_enabled && update_mem_usage &&
337 amdgpu_amdkfd_reserve_mem_limit(NULL, size << PAGE_SHIFT,
338 KFD_IOC_ALLOC_MEM_FLAGS_USERPTR, 0)) {
339 pr_info("SVM mapping failed, exceeds resident system memory limit\n");
340 kfree(prange);
341 return NULL;
342 }
343 prange->npages = size;
344 prange->svms = svms;
345 prange->start = start;
346 prange->last = last;
347 INIT_LIST_HEAD(&prange->list);
348 INIT_LIST_HEAD(&prange->update_list);
349 INIT_LIST_HEAD(&prange->svm_bo_list);
350 INIT_LIST_HEAD(&prange->deferred_list);
351 INIT_LIST_HEAD(&prange->child_list);
352 atomic_set(&prange->invalid, 0);
353 prange->validate_timestamp = 0;
354 prange->vram_pages = 0;
355 mutex_init(&prange->migrate_mutex);
356 mutex_init(&prange->lock);
357
358 if (p->xnack_enabled)
359 bitmap_copy(prange->bitmap_access, svms->bitmap_supported,
360 MAX_GPU_INSTANCE);
361
362 svm_range_set_default_attributes(svms, &prange->preferred_loc,
363 &prange->prefetch_loc,
364 &prange->granularity, &prange->flags);
365
366 pr_debug("svms 0x%p [0x%llx 0x%llx]\n", svms, start, last);
367
368 return prange;
369 }
370
svm_bo_ref_unless_zero(struct svm_range_bo * svm_bo)371 static bool svm_bo_ref_unless_zero(struct svm_range_bo *svm_bo)
372 {
373 if (!svm_bo || !kref_get_unless_zero(&svm_bo->kref))
374 return false;
375
376 return true;
377 }
378
svm_range_bo_release(struct kref * kref)379 static void svm_range_bo_release(struct kref *kref)
380 {
381 struct svm_range_bo *svm_bo;
382
383 svm_bo = container_of(kref, struct svm_range_bo, kref);
384 pr_debug("svm_bo 0x%p\n", svm_bo);
385
386 spin_lock(&svm_bo->list_lock);
387 while (!list_empty(&svm_bo->range_list)) {
388 struct svm_range *prange =
389 list_first_entry(&svm_bo->range_list,
390 struct svm_range, svm_bo_list);
391 /* list_del_init tells a concurrent svm_range_vram_node_new when
392 * it's safe to reuse the svm_bo pointer and svm_bo_list head.
393 */
394 list_del_init(&prange->svm_bo_list);
395 spin_unlock(&svm_bo->list_lock);
396
397 pr_debug("svms 0x%p [0x%lx 0x%lx]\n", prange->svms,
398 prange->start, prange->last);
399 mutex_lock(&prange->lock);
400 prange->svm_bo = NULL;
401 /* prange should not hold vram page now */
402 WARN_ONCE(prange->actual_loc, "prange should not hold vram page");
403 mutex_unlock(&prange->lock);
404
405 spin_lock(&svm_bo->list_lock);
406 }
407 spin_unlock(&svm_bo->list_lock);
408
409 if (mmget_not_zero(svm_bo->eviction_fence->mm)) {
410 struct kfd_process_device *pdd;
411 struct kfd_process *p;
412 struct mm_struct *mm;
413
414 mm = svm_bo->eviction_fence->mm;
415 /*
416 * The forked child process takes svm_bo device pages ref, svm_bo could be
417 * released after parent process is gone.
418 */
419 p = kfd_lookup_process_by_mm(mm);
420 if (p) {
421 pdd = kfd_get_process_device_data(svm_bo->node, p);
422 if (pdd)
423 atomic64_sub(amdgpu_bo_size(svm_bo->bo), &pdd->vram_usage);
424 kfd_unref_process(p);
425 }
426 mmput(mm);
427 }
428
429 if (!dma_fence_is_signaled(&svm_bo->eviction_fence->base))
430 /* We're not in the eviction worker. Signal the fence. */
431 dma_fence_signal(&svm_bo->eviction_fence->base);
432 dma_fence_put(&svm_bo->eviction_fence->base);
433 amdgpu_bo_unref(&svm_bo->bo);
434 kfree(svm_bo);
435 }
436
svm_range_bo_wq_release(struct work_struct * work)437 static void svm_range_bo_wq_release(struct work_struct *work)
438 {
439 struct svm_range_bo *svm_bo;
440
441 svm_bo = container_of(work, struct svm_range_bo, release_work);
442 svm_range_bo_release(&svm_bo->kref);
443 }
444
svm_range_bo_release_async(struct kref * kref)445 static void svm_range_bo_release_async(struct kref *kref)
446 {
447 struct svm_range_bo *svm_bo;
448
449 svm_bo = container_of(kref, struct svm_range_bo, kref);
450 pr_debug("svm_bo 0x%p\n", svm_bo);
451 INIT_WORK(&svm_bo->release_work, svm_range_bo_wq_release);
452 schedule_work(&svm_bo->release_work);
453 }
454
svm_range_bo_unref_async(struct svm_range_bo * svm_bo)455 void svm_range_bo_unref_async(struct svm_range_bo *svm_bo)
456 {
457 kref_put(&svm_bo->kref, svm_range_bo_release_async);
458 }
459
svm_range_bo_unref(struct svm_range_bo * svm_bo)460 static void svm_range_bo_unref(struct svm_range_bo *svm_bo)
461 {
462 if (svm_bo)
463 kref_put(&svm_bo->kref, svm_range_bo_release);
464 }
465
466 static bool
svm_range_validate_svm_bo(struct kfd_node * node,struct svm_range * prange)467 svm_range_validate_svm_bo(struct kfd_node *node, struct svm_range *prange)
468 {
469 mutex_lock(&prange->lock);
470 if (!prange->svm_bo) {
471 mutex_unlock(&prange->lock);
472 return false;
473 }
474 if (prange->ttm_res) {
475 /* We still have a reference, all is well */
476 mutex_unlock(&prange->lock);
477 return true;
478 }
479 if (svm_bo_ref_unless_zero(prange->svm_bo)) {
480 /*
481 * Migrate from GPU to GPU, remove range from source svm_bo->node
482 * range list, and return false to allocate svm_bo from destination
483 * node.
484 */
485 if (prange->svm_bo->node != node) {
486 mutex_unlock(&prange->lock);
487
488 spin_lock(&prange->svm_bo->list_lock);
489 list_del_init(&prange->svm_bo_list);
490 spin_unlock(&prange->svm_bo->list_lock);
491
492 svm_range_bo_unref(prange->svm_bo);
493 return false;
494 }
495 if (READ_ONCE(prange->svm_bo->evicting)) {
496 struct dma_fence *f;
497 struct svm_range_bo *svm_bo;
498 /* The BO is getting evicted,
499 * we need to get a new one
500 */
501 mutex_unlock(&prange->lock);
502 svm_bo = prange->svm_bo;
503 f = dma_fence_get(&svm_bo->eviction_fence->base);
504 svm_range_bo_unref(prange->svm_bo);
505 /* wait for the fence to avoid long spin-loop
506 * at list_empty_careful
507 */
508 dma_fence_wait(f, false);
509 dma_fence_put(f);
510 } else {
511 /* The BO was still around and we got
512 * a new reference to it
513 */
514 mutex_unlock(&prange->lock);
515 pr_debug("reuse old bo svms 0x%p [0x%lx 0x%lx]\n",
516 prange->svms, prange->start, prange->last);
517
518 prange->ttm_res = prange->svm_bo->bo->tbo.resource;
519 return true;
520 }
521
522 } else {
523 mutex_unlock(&prange->lock);
524 }
525
526 /* We need a new svm_bo. Spin-loop to wait for concurrent
527 * svm_range_bo_release to finish removing this range from
528 * its range list and set prange->svm_bo to null. After this,
529 * it is safe to reuse the svm_bo pointer and svm_bo_list head.
530 */
531 while (!list_empty_careful(&prange->svm_bo_list) || prange->svm_bo)
532 cond_resched();
533
534 return false;
535 }
536
svm_range_bo_new(void)537 static struct svm_range_bo *svm_range_bo_new(void)
538 {
539 struct svm_range_bo *svm_bo;
540
541 svm_bo = kzalloc(sizeof(*svm_bo), GFP_KERNEL);
542 if (!svm_bo)
543 return NULL;
544
545 kref_init(&svm_bo->kref);
546 INIT_LIST_HEAD(&svm_bo->range_list);
547 spin_lock_init(&svm_bo->list_lock);
548
549 return svm_bo;
550 }
551
552 int
svm_range_vram_node_new(struct kfd_node * node,struct svm_range * prange,bool clear)553 svm_range_vram_node_new(struct kfd_node *node, struct svm_range *prange,
554 bool clear)
555 {
556 struct kfd_process_device *pdd;
557 struct amdgpu_bo_param bp;
558 struct svm_range_bo *svm_bo;
559 struct amdgpu_bo_user *ubo;
560 struct amdgpu_bo *bo;
561 struct kfd_process *p;
562 struct mm_struct *mm;
563 int r;
564
565 p = container_of(prange->svms, struct kfd_process, svms);
566 pr_debug("pasid: %x svms 0x%p [0x%lx 0x%lx]\n", p->pasid, prange->svms,
567 prange->start, prange->last);
568
569 if (svm_range_validate_svm_bo(node, prange))
570 return 0;
571
572 svm_bo = svm_range_bo_new();
573 if (!svm_bo) {
574 pr_debug("failed to alloc svm bo\n");
575 return -ENOMEM;
576 }
577 mm = get_task_mm(p->lead_thread);
578 if (!mm) {
579 pr_debug("failed to get mm\n");
580 kfree(svm_bo);
581 return -ESRCH;
582 }
583 svm_bo->node = node;
584 svm_bo->eviction_fence =
585 amdgpu_amdkfd_fence_create(dma_fence_context_alloc(1),
586 mm,
587 svm_bo);
588 mmput(mm);
589 INIT_WORK(&svm_bo->eviction_work, svm_range_evict_svm_bo_worker);
590 svm_bo->evicting = 0;
591 memset(&bp, 0, sizeof(bp));
592 bp.size = prange->npages * PAGE_SIZE;
593 bp.byte_align = PAGE_SIZE;
594 bp.domain = AMDGPU_GEM_DOMAIN_VRAM;
595 bp.flags = AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
596 bp.flags |= clear ? AMDGPU_GEM_CREATE_VRAM_CLEARED : 0;
597 bp.flags |= AMDGPU_GEM_CREATE_DISCARDABLE;
598 bp.type = ttm_bo_type_device;
599 bp.resv = NULL;
600 if (node->xcp)
601 bp.xcp_id_plus1 = node->xcp->id + 1;
602
603 r = amdgpu_bo_create_user(node->adev, &bp, &ubo);
604 if (r) {
605 pr_debug("failed %d to create bo\n", r);
606 goto create_bo_failed;
607 }
608 bo = &ubo->bo;
609
610 pr_debug("alloc bo at offset 0x%lx size 0x%lx on partition %d\n",
611 bo->tbo.resource->start << PAGE_SHIFT, bp.size,
612 bp.xcp_id_plus1 - 1);
613
614 r = amdgpu_bo_reserve(bo, true);
615 if (r) {
616 pr_debug("failed %d to reserve bo\n", r);
617 goto reserve_bo_failed;
618 }
619
620 if (clear) {
621 r = amdgpu_bo_sync_wait(bo, AMDGPU_FENCE_OWNER_KFD, false);
622 if (r) {
623 pr_debug("failed %d to sync bo\n", r);
624 amdgpu_bo_unreserve(bo);
625 goto reserve_bo_failed;
626 }
627 }
628
629 r = dma_resv_reserve_fences(bo->tbo.base.resv, 1);
630 if (r) {
631 pr_debug("failed %d to reserve bo\n", r);
632 amdgpu_bo_unreserve(bo);
633 goto reserve_bo_failed;
634 }
635 amdgpu_bo_fence(bo, &svm_bo->eviction_fence->base, true);
636
637 amdgpu_bo_unreserve(bo);
638
639 svm_bo->bo = bo;
640 prange->svm_bo = svm_bo;
641 prange->ttm_res = bo->tbo.resource;
642 prange->offset = 0;
643
644 spin_lock(&svm_bo->list_lock);
645 list_add(&prange->svm_bo_list, &svm_bo->range_list);
646 spin_unlock(&svm_bo->list_lock);
647
648 pdd = svm_range_get_pdd_by_node(prange, node);
649 if (pdd)
650 atomic64_add(amdgpu_bo_size(bo), &pdd->vram_usage);
651
652 return 0;
653
654 reserve_bo_failed:
655 amdgpu_bo_unref(&bo);
656 create_bo_failed:
657 dma_fence_put(&svm_bo->eviction_fence->base);
658 kfree(svm_bo);
659 prange->ttm_res = NULL;
660
661 return r;
662 }
663
svm_range_vram_node_free(struct svm_range * prange)664 void svm_range_vram_node_free(struct svm_range *prange)
665 {
666 /* serialize prange->svm_bo unref */
667 mutex_lock(&prange->lock);
668 /* prange->svm_bo has not been unref */
669 if (prange->ttm_res) {
670 prange->ttm_res = NULL;
671 mutex_unlock(&prange->lock);
672 svm_range_bo_unref(prange->svm_bo);
673 } else
674 mutex_unlock(&prange->lock);
675 }
676
677 struct kfd_node *
svm_range_get_node_by_id(struct svm_range * prange,uint32_t gpu_id)678 svm_range_get_node_by_id(struct svm_range *prange, uint32_t gpu_id)
679 {
680 struct kfd_process *p;
681 struct kfd_process_device *pdd;
682
683 p = container_of(prange->svms, struct kfd_process, svms);
684 pdd = kfd_process_device_data_by_id(p, gpu_id);
685 if (!pdd) {
686 pr_debug("failed to get kfd process device by id 0x%x\n", gpu_id);
687 return NULL;
688 }
689
690 return pdd->dev;
691 }
692
693 struct kfd_process_device *
svm_range_get_pdd_by_node(struct svm_range * prange,struct kfd_node * node)694 svm_range_get_pdd_by_node(struct svm_range *prange, struct kfd_node *node)
695 {
696 struct kfd_process *p;
697
698 p = container_of(prange->svms, struct kfd_process, svms);
699
700 return kfd_get_process_device_data(node, p);
701 }
702
svm_range_bo_validate(void * param,struct amdgpu_bo * bo)703 static int svm_range_bo_validate(void *param, struct amdgpu_bo *bo)
704 {
705 struct ttm_operation_ctx ctx = { false, false };
706
707 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM);
708
709 return ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
710 }
711
712 static int
svm_range_check_attr(struct kfd_process * p,uint32_t nattr,struct kfd_ioctl_svm_attribute * attrs)713 svm_range_check_attr(struct kfd_process *p,
714 uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs)
715 {
716 uint32_t i;
717
718 for (i = 0; i < nattr; i++) {
719 uint32_t val = attrs[i].value;
720 int gpuidx = MAX_GPU_INSTANCE;
721
722 switch (attrs[i].type) {
723 case KFD_IOCTL_SVM_ATTR_PREFERRED_LOC:
724 if (val != KFD_IOCTL_SVM_LOCATION_SYSMEM &&
725 val != KFD_IOCTL_SVM_LOCATION_UNDEFINED)
726 gpuidx = kfd_process_gpuidx_from_gpuid(p, val);
727 break;
728 case KFD_IOCTL_SVM_ATTR_PREFETCH_LOC:
729 if (val != KFD_IOCTL_SVM_LOCATION_SYSMEM)
730 gpuidx = kfd_process_gpuidx_from_gpuid(p, val);
731 break;
732 case KFD_IOCTL_SVM_ATTR_ACCESS:
733 case KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE:
734 case KFD_IOCTL_SVM_ATTR_NO_ACCESS:
735 gpuidx = kfd_process_gpuidx_from_gpuid(p, val);
736 break;
737 case KFD_IOCTL_SVM_ATTR_SET_FLAGS:
738 break;
739 case KFD_IOCTL_SVM_ATTR_CLR_FLAGS:
740 break;
741 case KFD_IOCTL_SVM_ATTR_GRANULARITY:
742 break;
743 default:
744 pr_debug("unknown attr type 0x%x\n", attrs[i].type);
745 return -EINVAL;
746 }
747
748 if (gpuidx < 0) {
749 pr_debug("no GPU 0x%x found\n", val);
750 return -EINVAL;
751 } else if (gpuidx < MAX_GPU_INSTANCE &&
752 !test_bit(gpuidx, p->svms.bitmap_supported)) {
753 pr_debug("GPU 0x%x not supported\n", val);
754 return -EINVAL;
755 }
756 }
757
758 return 0;
759 }
760
761 static void
svm_range_apply_attrs(struct kfd_process * p,struct svm_range * prange,uint32_t nattr,struct kfd_ioctl_svm_attribute * attrs,bool * update_mapping)762 svm_range_apply_attrs(struct kfd_process *p, struct svm_range *prange,
763 uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs,
764 bool *update_mapping)
765 {
766 uint32_t i;
767 int gpuidx;
768
769 for (i = 0; i < nattr; i++) {
770 switch (attrs[i].type) {
771 case KFD_IOCTL_SVM_ATTR_PREFERRED_LOC:
772 prange->preferred_loc = attrs[i].value;
773 break;
774 case KFD_IOCTL_SVM_ATTR_PREFETCH_LOC:
775 prange->prefetch_loc = attrs[i].value;
776 break;
777 case KFD_IOCTL_SVM_ATTR_ACCESS:
778 case KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE:
779 case KFD_IOCTL_SVM_ATTR_NO_ACCESS:
780 if (!p->xnack_enabled)
781 *update_mapping = true;
782
783 gpuidx = kfd_process_gpuidx_from_gpuid(p,
784 attrs[i].value);
785 if (attrs[i].type == KFD_IOCTL_SVM_ATTR_NO_ACCESS) {
786 bitmap_clear(prange->bitmap_access, gpuidx, 1);
787 bitmap_clear(prange->bitmap_aip, gpuidx, 1);
788 } else if (attrs[i].type == KFD_IOCTL_SVM_ATTR_ACCESS) {
789 bitmap_set(prange->bitmap_access, gpuidx, 1);
790 bitmap_clear(prange->bitmap_aip, gpuidx, 1);
791 } else {
792 bitmap_clear(prange->bitmap_access, gpuidx, 1);
793 bitmap_set(prange->bitmap_aip, gpuidx, 1);
794 }
795 break;
796 case KFD_IOCTL_SVM_ATTR_SET_FLAGS:
797 *update_mapping = true;
798 prange->flags |= attrs[i].value;
799 break;
800 case KFD_IOCTL_SVM_ATTR_CLR_FLAGS:
801 *update_mapping = true;
802 prange->flags &= ~attrs[i].value;
803 break;
804 case KFD_IOCTL_SVM_ATTR_GRANULARITY:
805 prange->granularity = min_t(uint32_t, attrs[i].value, 0x3F);
806 break;
807 default:
808 WARN_ONCE(1, "svm_range_check_attrs wasn't called?");
809 }
810 }
811 }
812
813 static bool
svm_range_is_same_attrs(struct kfd_process * p,struct svm_range * prange,uint32_t nattr,struct kfd_ioctl_svm_attribute * attrs)814 svm_range_is_same_attrs(struct kfd_process *p, struct svm_range *prange,
815 uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs)
816 {
817 uint32_t i;
818 int gpuidx;
819
820 for (i = 0; i < nattr; i++) {
821 switch (attrs[i].type) {
822 case KFD_IOCTL_SVM_ATTR_PREFERRED_LOC:
823 if (prange->preferred_loc != attrs[i].value)
824 return false;
825 break;
826 case KFD_IOCTL_SVM_ATTR_PREFETCH_LOC:
827 /* Prefetch should always trigger a migration even
828 * if the value of the attribute didn't change.
829 */
830 return false;
831 case KFD_IOCTL_SVM_ATTR_ACCESS:
832 case KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE:
833 case KFD_IOCTL_SVM_ATTR_NO_ACCESS:
834 gpuidx = kfd_process_gpuidx_from_gpuid(p,
835 attrs[i].value);
836 if (attrs[i].type == KFD_IOCTL_SVM_ATTR_NO_ACCESS) {
837 if (test_bit(gpuidx, prange->bitmap_access) ||
838 test_bit(gpuidx, prange->bitmap_aip))
839 return false;
840 } else if (attrs[i].type == KFD_IOCTL_SVM_ATTR_ACCESS) {
841 if (!test_bit(gpuidx, prange->bitmap_access))
842 return false;
843 } else {
844 if (!test_bit(gpuidx, prange->bitmap_aip))
845 return false;
846 }
847 break;
848 case KFD_IOCTL_SVM_ATTR_SET_FLAGS:
849 if ((prange->flags & attrs[i].value) != attrs[i].value)
850 return false;
851 break;
852 case KFD_IOCTL_SVM_ATTR_CLR_FLAGS:
853 if ((prange->flags & attrs[i].value) != 0)
854 return false;
855 break;
856 case KFD_IOCTL_SVM_ATTR_GRANULARITY:
857 if (prange->granularity != attrs[i].value)
858 return false;
859 break;
860 default:
861 WARN_ONCE(1, "svm_range_check_attrs wasn't called?");
862 }
863 }
864
865 return true;
866 }
867
868 /**
869 * svm_range_debug_dump - print all range information from svms
870 * @svms: svm range list header
871 *
872 * debug output svm range start, end, prefetch location from svms
873 * interval tree and link list
874 *
875 * Context: The caller must hold svms->lock
876 */
svm_range_debug_dump(struct svm_range_list * svms)877 static void svm_range_debug_dump(struct svm_range_list *svms)
878 {
879 struct interval_tree_node *node;
880 struct svm_range *prange;
881
882 pr_debug("dump svms 0x%p list\n", svms);
883 pr_debug("range\tstart\tpage\tend\t\tlocation\n");
884
885 list_for_each_entry(prange, &svms->list, list) {
886 pr_debug("0x%p 0x%lx\t0x%llx\t0x%llx\t0x%x\n",
887 prange, prange->start, prange->npages,
888 prange->start + prange->npages - 1,
889 prange->actual_loc);
890 }
891
892 pr_debug("dump svms 0x%p interval tree\n", svms);
893 pr_debug("range\tstart\tpage\tend\t\tlocation\n");
894 node = interval_tree_iter_first(&svms->objects, 0, ~0ULL);
895 while (node) {
896 prange = container_of(node, struct svm_range, it_node);
897 pr_debug("0x%p 0x%lx\t0x%llx\t0x%llx\t0x%x\n",
898 prange, prange->start, prange->npages,
899 prange->start + prange->npages - 1,
900 prange->actual_loc);
901 node = interval_tree_iter_next(node, 0, ~0ULL);
902 }
903 }
904
905 static void *
svm_range_copy_array(void * psrc,size_t size,uint64_t num_elements,uint64_t offset,uint64_t * vram_pages)906 svm_range_copy_array(void *psrc, size_t size, uint64_t num_elements,
907 uint64_t offset, uint64_t *vram_pages)
908 {
909 unsigned char *src = (unsigned char *)psrc + offset;
910 unsigned char *dst;
911 uint64_t i;
912
913 dst = kvmalloc_array(num_elements, size, GFP_KERNEL);
914 if (!dst)
915 return NULL;
916
917 if (!vram_pages) {
918 memcpy(dst, src, num_elements * size);
919 return (void *)dst;
920 }
921
922 *vram_pages = 0;
923 for (i = 0; i < num_elements; i++) {
924 dma_addr_t *temp;
925 temp = (dma_addr_t *)dst + i;
926 *temp = *((dma_addr_t *)src + i);
927 if (*temp&SVM_RANGE_VRAM_DOMAIN)
928 (*vram_pages)++;
929 }
930
931 return (void *)dst;
932 }
933
934 static int
svm_range_copy_dma_addrs(struct svm_range * dst,struct svm_range * src)935 svm_range_copy_dma_addrs(struct svm_range *dst, struct svm_range *src)
936 {
937 int i;
938
939 for (i = 0; i < MAX_GPU_INSTANCE; i++) {
940 if (!src->dma_addr[i])
941 continue;
942 dst->dma_addr[i] = svm_range_copy_array(src->dma_addr[i],
943 sizeof(*src->dma_addr[i]), src->npages, 0, NULL);
944 if (!dst->dma_addr[i])
945 return -ENOMEM;
946 }
947
948 return 0;
949 }
950
951 static int
svm_range_split_array(void * ppnew,void * ppold,size_t size,uint64_t old_start,uint64_t old_n,uint64_t new_start,uint64_t new_n,uint64_t * new_vram_pages)952 svm_range_split_array(void *ppnew, void *ppold, size_t size,
953 uint64_t old_start, uint64_t old_n,
954 uint64_t new_start, uint64_t new_n, uint64_t *new_vram_pages)
955 {
956 unsigned char *new, *old, *pold;
957 uint64_t d;
958
959 if (!ppold)
960 return 0;
961 pold = *(unsigned char **)ppold;
962 if (!pold)
963 return 0;
964
965 d = (new_start - old_start) * size;
966 /* get dma addr array for new range and calculte its vram page number */
967 new = svm_range_copy_array(pold, size, new_n, d, new_vram_pages);
968 if (!new)
969 return -ENOMEM;
970 d = (new_start == old_start) ? new_n * size : 0;
971 old = svm_range_copy_array(pold, size, old_n, d, NULL);
972 if (!old) {
973 kvfree(new);
974 return -ENOMEM;
975 }
976 kvfree(pold);
977 *(void **)ppold = old;
978 *(void **)ppnew = new;
979
980 return 0;
981 }
982
983 static int
svm_range_split_pages(struct svm_range * new,struct svm_range * old,uint64_t start,uint64_t last)984 svm_range_split_pages(struct svm_range *new, struct svm_range *old,
985 uint64_t start, uint64_t last)
986 {
987 uint64_t npages = last - start + 1;
988 int i, r;
989
990 for (i = 0; i < MAX_GPU_INSTANCE; i++) {
991 r = svm_range_split_array(&new->dma_addr[i], &old->dma_addr[i],
992 sizeof(*old->dma_addr[i]), old->start,
993 npages, new->start, new->npages,
994 old->actual_loc ? &new->vram_pages : NULL);
995 if (r)
996 return r;
997 }
998 if (old->actual_loc)
999 old->vram_pages -= new->vram_pages;
1000
1001 return 0;
1002 }
1003
1004 static int
svm_range_split_nodes(struct svm_range * new,struct svm_range * old,uint64_t start,uint64_t last)1005 svm_range_split_nodes(struct svm_range *new, struct svm_range *old,
1006 uint64_t start, uint64_t last)
1007 {
1008 uint64_t npages = last - start + 1;
1009
1010 pr_debug("svms 0x%p new prange 0x%p start 0x%lx [0x%llx 0x%llx]\n",
1011 new->svms, new, new->start, start, last);
1012
1013 if (new->start == old->start) {
1014 new->offset = old->offset;
1015 old->offset += new->npages;
1016 } else {
1017 new->offset = old->offset + npages;
1018 }
1019
1020 new->svm_bo = svm_range_bo_ref(old->svm_bo);
1021 new->ttm_res = old->ttm_res;
1022
1023 spin_lock(&new->svm_bo->list_lock);
1024 list_add(&new->svm_bo_list, &new->svm_bo->range_list);
1025 spin_unlock(&new->svm_bo->list_lock);
1026
1027 return 0;
1028 }
1029
1030 /**
1031 * svm_range_split_adjust - split range and adjust
1032 *
1033 * @new: new range
1034 * @old: the old range
1035 * @start: the old range adjust to start address in pages
1036 * @last: the old range adjust to last address in pages
1037 *
1038 * Copy system memory dma_addr or vram ttm_res in old range to new
1039 * range from new_start up to size new->npages, the remaining old range is from
1040 * start to last
1041 *
1042 * Return:
1043 * 0 - OK, -ENOMEM - out of memory
1044 */
1045 static int
svm_range_split_adjust(struct svm_range * new,struct svm_range * old,uint64_t start,uint64_t last)1046 svm_range_split_adjust(struct svm_range *new, struct svm_range *old,
1047 uint64_t start, uint64_t last)
1048 {
1049 int r;
1050
1051 pr_debug("svms 0x%p new 0x%lx old [0x%lx 0x%lx] => [0x%llx 0x%llx]\n",
1052 new->svms, new->start, old->start, old->last, start, last);
1053
1054 if (new->start < old->start ||
1055 new->last > old->last) {
1056 WARN_ONCE(1, "invalid new range start or last\n");
1057 return -EINVAL;
1058 }
1059
1060 r = svm_range_split_pages(new, old, start, last);
1061 if (r)
1062 return r;
1063
1064 if (old->actual_loc && old->ttm_res) {
1065 r = svm_range_split_nodes(new, old, start, last);
1066 if (r)
1067 return r;
1068 }
1069
1070 old->npages = last - start + 1;
1071 old->start = start;
1072 old->last = last;
1073 new->flags = old->flags;
1074 new->preferred_loc = old->preferred_loc;
1075 new->prefetch_loc = old->prefetch_loc;
1076 new->actual_loc = old->actual_loc;
1077 new->granularity = old->granularity;
1078 new->mapped_to_gpu = old->mapped_to_gpu;
1079 bitmap_copy(new->bitmap_access, old->bitmap_access, MAX_GPU_INSTANCE);
1080 bitmap_copy(new->bitmap_aip, old->bitmap_aip, MAX_GPU_INSTANCE);
1081 atomic_set(&new->queue_refcount, atomic_read(&old->queue_refcount));
1082
1083 return 0;
1084 }
1085
1086 /**
1087 * svm_range_split - split a range in 2 ranges
1088 *
1089 * @prange: the svm range to split
1090 * @start: the remaining range start address in pages
1091 * @last: the remaining range last address in pages
1092 * @new: the result new range generated
1093 *
1094 * Two cases only:
1095 * case 1: if start == prange->start
1096 * prange ==> prange[start, last]
1097 * new range [last + 1, prange->last]
1098 *
1099 * case 2: if last == prange->last
1100 * prange ==> prange[start, last]
1101 * new range [prange->start, start - 1]
1102 *
1103 * Return:
1104 * 0 - OK, -ENOMEM - out of memory, -EINVAL - invalid start, last
1105 */
1106 static int
svm_range_split(struct svm_range * prange,uint64_t start,uint64_t last,struct svm_range ** new)1107 svm_range_split(struct svm_range *prange, uint64_t start, uint64_t last,
1108 struct svm_range **new)
1109 {
1110 uint64_t old_start = prange->start;
1111 uint64_t old_last = prange->last;
1112 struct svm_range_list *svms;
1113 int r = 0;
1114
1115 pr_debug("svms 0x%p [0x%llx 0x%llx] to [0x%llx 0x%llx]\n", prange->svms,
1116 old_start, old_last, start, last);
1117
1118 if (old_start != start && old_last != last)
1119 return -EINVAL;
1120 if (start < old_start || last > old_last)
1121 return -EINVAL;
1122
1123 svms = prange->svms;
1124 if (old_start == start)
1125 *new = svm_range_new(svms, last + 1, old_last, false);
1126 else
1127 *new = svm_range_new(svms, old_start, start - 1, false);
1128 if (!*new)
1129 return -ENOMEM;
1130
1131 r = svm_range_split_adjust(*new, prange, start, last);
1132 if (r) {
1133 pr_debug("failed %d split [0x%llx 0x%llx] to [0x%llx 0x%llx]\n",
1134 r, old_start, old_last, start, last);
1135 svm_range_free(*new, false);
1136 *new = NULL;
1137 }
1138
1139 return r;
1140 }
1141
1142 static int
svm_range_split_tail(struct svm_range * prange,uint64_t new_last,struct list_head * insert_list,struct list_head * remap_list)1143 svm_range_split_tail(struct svm_range *prange, uint64_t new_last,
1144 struct list_head *insert_list, struct list_head *remap_list)
1145 {
1146 struct svm_range *tail = NULL;
1147 int r = svm_range_split(prange, prange->start, new_last, &tail);
1148
1149 if (!r) {
1150 list_add(&tail->list, insert_list);
1151 if (!IS_ALIGNED(new_last + 1, 1UL << prange->granularity))
1152 list_add(&tail->update_list, remap_list);
1153 }
1154 return r;
1155 }
1156
1157 static int
svm_range_split_head(struct svm_range * prange,uint64_t new_start,struct list_head * insert_list,struct list_head * remap_list)1158 svm_range_split_head(struct svm_range *prange, uint64_t new_start,
1159 struct list_head *insert_list, struct list_head *remap_list)
1160 {
1161 struct svm_range *head = NULL;
1162 int r = svm_range_split(prange, new_start, prange->last, &head);
1163
1164 if (!r) {
1165 list_add(&head->list, insert_list);
1166 if (!IS_ALIGNED(new_start, 1UL << prange->granularity))
1167 list_add(&head->update_list, remap_list);
1168 }
1169 return r;
1170 }
1171
1172 static void
svm_range_add_child(struct svm_range * prange,struct mm_struct * mm,struct svm_range * pchild,enum svm_work_list_ops op)1173 svm_range_add_child(struct svm_range *prange, struct mm_struct *mm,
1174 struct svm_range *pchild, enum svm_work_list_ops op)
1175 {
1176 pr_debug("add child 0x%p [0x%lx 0x%lx] to prange 0x%p child list %d\n",
1177 pchild, pchild->start, pchild->last, prange, op);
1178
1179 pchild->work_item.mm = mm;
1180 pchild->work_item.op = op;
1181 list_add_tail(&pchild->child_list, &prange->child_list);
1182 }
1183
1184 static bool
svm_nodes_in_same_hive(struct kfd_node * node_a,struct kfd_node * node_b)1185 svm_nodes_in_same_hive(struct kfd_node *node_a, struct kfd_node *node_b)
1186 {
1187 return (node_a->adev == node_b->adev ||
1188 amdgpu_xgmi_same_hive(node_a->adev, node_b->adev));
1189 }
1190
1191 static uint64_t
svm_range_get_pte_flags(struct kfd_node * node,struct svm_range * prange,int domain)1192 svm_range_get_pte_flags(struct kfd_node *node,
1193 struct svm_range *prange, int domain)
1194 {
1195 struct kfd_node *bo_node;
1196 uint32_t flags = prange->flags;
1197 uint32_t mapping_flags = 0;
1198 uint32_t gc_ip_version = KFD_GC_VERSION(node);
1199 uint64_t pte_flags;
1200 bool snoop = (domain != SVM_RANGE_VRAM_DOMAIN);
1201 bool coherent = flags & (KFD_IOCTL_SVM_FLAG_COHERENT | KFD_IOCTL_SVM_FLAG_EXT_COHERENT);
1202 bool ext_coherent = flags & KFD_IOCTL_SVM_FLAG_EXT_COHERENT;
1203 unsigned int mtype_local;
1204
1205 if (domain == SVM_RANGE_VRAM_DOMAIN)
1206 bo_node = prange->svm_bo->node;
1207
1208 switch (gc_ip_version) {
1209 case IP_VERSION(9, 4, 1):
1210 if (domain == SVM_RANGE_VRAM_DOMAIN) {
1211 if (bo_node == node) {
1212 mapping_flags |= coherent ?
1213 AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW;
1214 } else {
1215 mapping_flags |= coherent ?
1216 AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
1217 if (svm_nodes_in_same_hive(node, bo_node))
1218 snoop = true;
1219 }
1220 } else {
1221 mapping_flags |= coherent ?
1222 AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
1223 }
1224 break;
1225 case IP_VERSION(9, 4, 2):
1226 if (domain == SVM_RANGE_VRAM_DOMAIN) {
1227 if (bo_node == node) {
1228 mapping_flags |= coherent ?
1229 AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW;
1230 if (node->adev->gmc.xgmi.connected_to_cpu)
1231 snoop = true;
1232 } else {
1233 mapping_flags |= coherent ?
1234 AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
1235 if (svm_nodes_in_same_hive(node, bo_node))
1236 snoop = true;
1237 }
1238 } else {
1239 mapping_flags |= coherent ?
1240 AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
1241 }
1242 break;
1243 case IP_VERSION(9, 4, 3):
1244 case IP_VERSION(9, 4, 4):
1245 case IP_VERSION(9, 5, 0):
1246 if (ext_coherent)
1247 mtype_local = (gc_ip_version < IP_VERSION(9, 5, 0) && !node->adev->rev_id) ?
1248 AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_CC;
1249 else
1250 mtype_local = amdgpu_mtype_local == 1 ? AMDGPU_VM_MTYPE_NC :
1251 amdgpu_mtype_local == 2 ? AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW;
1252 snoop = true;
1253 if (domain == SVM_RANGE_VRAM_DOMAIN) {
1254 /* local HBM region close to partition */
1255 if (bo_node->adev == node->adev &&
1256 (!bo_node->xcp || !node->xcp || bo_node->xcp->mem_id == node->xcp->mem_id))
1257 mapping_flags |= mtype_local;
1258 /* local HBM region far from partition or remote XGMI GPU
1259 * with regular system scope coherence
1260 */
1261 else if (svm_nodes_in_same_hive(bo_node, node) && !ext_coherent)
1262 mapping_flags |= AMDGPU_VM_MTYPE_NC;
1263 /* PCIe P2P on GPUs pre-9.5.0 */
1264 else if (gc_ip_version < IP_VERSION(9, 5, 0) &&
1265 !svm_nodes_in_same_hive(bo_node, node))
1266 mapping_flags |= AMDGPU_VM_MTYPE_UC;
1267 /* Other remote memory */
1268 else
1269 mapping_flags |= ext_coherent ? AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
1270 /* system memory accessed by the APU */
1271 } else if (node->adev->flags & AMD_IS_APU) {
1272 /* On NUMA systems, locality is determined per-page
1273 * in amdgpu_gmc_override_vm_pte_flags
1274 */
1275 if (num_possible_nodes() <= 1)
1276 mapping_flags |= mtype_local;
1277 else
1278 mapping_flags |= ext_coherent ? AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
1279 /* system memory accessed by the dGPU */
1280 } else {
1281 if (gc_ip_version < IP_VERSION(9, 5, 0))
1282 mapping_flags |= AMDGPU_VM_MTYPE_UC;
1283 else
1284 mapping_flags |= AMDGPU_VM_MTYPE_NC;
1285 }
1286 break;
1287 case IP_VERSION(12, 0, 0):
1288 case IP_VERSION(12, 0, 1):
1289 if (domain == SVM_RANGE_VRAM_DOMAIN) {
1290 if (bo_node != node)
1291 mapping_flags |= AMDGPU_VM_MTYPE_NC;
1292 } else {
1293 mapping_flags |= coherent ?
1294 AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
1295 }
1296 break;
1297 default:
1298 mapping_flags |= coherent ?
1299 AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
1300 }
1301
1302 mapping_flags |= AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_PAGE_WRITEABLE;
1303
1304 if (flags & KFD_IOCTL_SVM_FLAG_GPU_RO)
1305 mapping_flags &= ~AMDGPU_VM_PAGE_WRITEABLE;
1306 if (flags & KFD_IOCTL_SVM_FLAG_GPU_EXEC)
1307 mapping_flags |= AMDGPU_VM_PAGE_EXECUTABLE;
1308
1309 pte_flags = AMDGPU_PTE_VALID;
1310 pte_flags |= (domain == SVM_RANGE_VRAM_DOMAIN) ? 0 : AMDGPU_PTE_SYSTEM;
1311 pte_flags |= snoop ? AMDGPU_PTE_SNOOPED : 0;
1312 if (gc_ip_version >= IP_VERSION(12, 0, 0))
1313 pte_flags |= AMDGPU_PTE_IS_PTE;
1314
1315 pte_flags |= amdgpu_gem_va_map_flags(node->adev, mapping_flags);
1316 return pte_flags;
1317 }
1318
1319 static int
svm_range_unmap_from_gpu(struct amdgpu_device * adev,struct amdgpu_vm * vm,uint64_t start,uint64_t last,struct dma_fence ** fence)1320 svm_range_unmap_from_gpu(struct amdgpu_device *adev, struct amdgpu_vm *vm,
1321 uint64_t start, uint64_t last,
1322 struct dma_fence **fence)
1323 {
1324 uint64_t init_pte_value = 0;
1325
1326 pr_debug("[0x%llx 0x%llx]\n", start, last);
1327
1328 return amdgpu_vm_update_range(adev, vm, false, true, true, false, NULL, start,
1329 last, init_pte_value, 0, 0, NULL, NULL,
1330 fence);
1331 }
1332
1333 static int
svm_range_unmap_from_gpus(struct svm_range * prange,unsigned long start,unsigned long last,uint32_t trigger)1334 svm_range_unmap_from_gpus(struct svm_range *prange, unsigned long start,
1335 unsigned long last, uint32_t trigger)
1336 {
1337 DECLARE_BITMAP(bitmap, MAX_GPU_INSTANCE);
1338 struct kfd_process_device *pdd;
1339 struct dma_fence *fence = NULL;
1340 struct kfd_process *p;
1341 uint32_t gpuidx;
1342 int r = 0;
1343
1344 if (!prange->mapped_to_gpu) {
1345 pr_debug("prange 0x%p [0x%lx 0x%lx] not mapped to GPU\n",
1346 prange, prange->start, prange->last);
1347 return 0;
1348 }
1349
1350 if (prange->start == start && prange->last == last) {
1351 pr_debug("unmap svms 0x%p prange 0x%p\n", prange->svms, prange);
1352 prange->mapped_to_gpu = false;
1353 }
1354
1355 bitmap_or(bitmap, prange->bitmap_access, prange->bitmap_aip,
1356 MAX_GPU_INSTANCE);
1357 p = container_of(prange->svms, struct kfd_process, svms);
1358
1359 for_each_set_bit(gpuidx, bitmap, MAX_GPU_INSTANCE) {
1360 pr_debug("unmap from gpu idx 0x%x\n", gpuidx);
1361 pdd = kfd_process_device_from_gpuidx(p, gpuidx);
1362 if (!pdd) {
1363 pr_debug("failed to find device idx %d\n", gpuidx);
1364 return -EINVAL;
1365 }
1366
1367 kfd_smi_event_unmap_from_gpu(pdd->dev, p->lead_thread->pid,
1368 start, last, trigger);
1369
1370 r = svm_range_unmap_from_gpu(pdd->dev->adev,
1371 drm_priv_to_vm(pdd->drm_priv),
1372 start, last, &fence);
1373 if (r)
1374 break;
1375
1376 if (fence) {
1377 r = dma_fence_wait(fence, false);
1378 dma_fence_put(fence);
1379 fence = NULL;
1380 if (r)
1381 break;
1382 }
1383 kfd_flush_tlb(pdd, TLB_FLUSH_HEAVYWEIGHT);
1384 }
1385
1386 return r;
1387 }
1388
1389 static int
svm_range_map_to_gpu(struct kfd_process_device * pdd,struct svm_range * prange,unsigned long offset,unsigned long npages,bool readonly,dma_addr_t * dma_addr,struct amdgpu_device * bo_adev,struct dma_fence ** fence,bool flush_tlb)1390 svm_range_map_to_gpu(struct kfd_process_device *pdd, struct svm_range *prange,
1391 unsigned long offset, unsigned long npages, bool readonly,
1392 dma_addr_t *dma_addr, struct amdgpu_device *bo_adev,
1393 struct dma_fence **fence, bool flush_tlb)
1394 {
1395 struct amdgpu_device *adev = pdd->dev->adev;
1396 struct amdgpu_vm *vm = drm_priv_to_vm(pdd->drm_priv);
1397 uint64_t pte_flags;
1398 unsigned long last_start;
1399 int last_domain;
1400 int r = 0;
1401 int64_t i, j;
1402
1403 last_start = prange->start + offset;
1404
1405 pr_debug("svms 0x%p [0x%lx 0x%lx] readonly %d\n", prange->svms,
1406 last_start, last_start + npages - 1, readonly);
1407
1408 for (i = offset; i < offset + npages; i++) {
1409 last_domain = dma_addr[i] & SVM_RANGE_VRAM_DOMAIN;
1410 dma_addr[i] &= ~SVM_RANGE_VRAM_DOMAIN;
1411
1412 /* Collect all pages in the same address range and memory domain
1413 * that can be mapped with a single call to update mapping.
1414 */
1415 if (i < offset + npages - 1 &&
1416 last_domain == (dma_addr[i + 1] & SVM_RANGE_VRAM_DOMAIN))
1417 continue;
1418
1419 pr_debug("Mapping range [0x%lx 0x%llx] on domain: %s\n",
1420 last_start, prange->start + i, last_domain ? "GPU" : "CPU");
1421
1422 pte_flags = svm_range_get_pte_flags(pdd->dev, prange, last_domain);
1423 if (readonly)
1424 pte_flags &= ~AMDGPU_PTE_WRITEABLE;
1425
1426 pr_debug("svms 0x%p map [0x%lx 0x%llx] vram %d PTE 0x%llx\n",
1427 prange->svms, last_start, prange->start + i,
1428 (last_domain == SVM_RANGE_VRAM_DOMAIN) ? 1 : 0,
1429 pte_flags);
1430
1431 /* For dGPU mode, we use same vm_manager to allocate VRAM for
1432 * different memory partition based on fpfn/lpfn, we should use
1433 * same vm_manager.vram_base_offset regardless memory partition.
1434 */
1435 r = amdgpu_vm_update_range(adev, vm, false, false, flush_tlb, true,
1436 NULL, last_start, prange->start + i,
1437 pte_flags,
1438 (last_start - prange->start) << PAGE_SHIFT,
1439 bo_adev ? bo_adev->vm_manager.vram_base_offset : 0,
1440 NULL, dma_addr, &vm->last_update);
1441
1442 for (j = last_start - prange->start; j <= i; j++)
1443 dma_addr[j] |= last_domain;
1444
1445 if (r) {
1446 pr_debug("failed %d to map to gpu 0x%lx\n", r, prange->start);
1447 goto out;
1448 }
1449 last_start = prange->start + i + 1;
1450 }
1451
1452 r = amdgpu_vm_update_pdes(adev, vm, false);
1453 if (r) {
1454 pr_debug("failed %d to update directories 0x%lx\n", r,
1455 prange->start);
1456 goto out;
1457 }
1458
1459 if (fence)
1460 *fence = dma_fence_get(vm->last_update);
1461
1462 out:
1463 return r;
1464 }
1465
1466 static int
svm_range_map_to_gpus(struct svm_range * prange,unsigned long offset,unsigned long npages,bool readonly,unsigned long * bitmap,bool wait,bool flush_tlb)1467 svm_range_map_to_gpus(struct svm_range *prange, unsigned long offset,
1468 unsigned long npages, bool readonly,
1469 unsigned long *bitmap, bool wait, bool flush_tlb)
1470 {
1471 struct kfd_process_device *pdd;
1472 struct amdgpu_device *bo_adev = NULL;
1473 struct kfd_process *p;
1474 struct dma_fence *fence = NULL;
1475 uint32_t gpuidx;
1476 int r = 0;
1477
1478 if (prange->svm_bo && prange->ttm_res)
1479 bo_adev = prange->svm_bo->node->adev;
1480
1481 p = container_of(prange->svms, struct kfd_process, svms);
1482 for_each_set_bit(gpuidx, bitmap, MAX_GPU_INSTANCE) {
1483 pr_debug("mapping to gpu idx 0x%x\n", gpuidx);
1484 pdd = kfd_process_device_from_gpuidx(p, gpuidx);
1485 if (!pdd) {
1486 pr_debug("failed to find device idx %d\n", gpuidx);
1487 return -EINVAL;
1488 }
1489
1490 pdd = kfd_bind_process_to_device(pdd->dev, p);
1491 if (IS_ERR(pdd))
1492 return -EINVAL;
1493
1494 if (bo_adev && pdd->dev->adev != bo_adev &&
1495 !amdgpu_xgmi_same_hive(pdd->dev->adev, bo_adev)) {
1496 pr_debug("cannot map to device idx %d\n", gpuidx);
1497 continue;
1498 }
1499
1500 r = svm_range_map_to_gpu(pdd, prange, offset, npages, readonly,
1501 prange->dma_addr[gpuidx],
1502 bo_adev, wait ? &fence : NULL,
1503 flush_tlb);
1504 if (r)
1505 break;
1506
1507 if (fence) {
1508 r = dma_fence_wait(fence, false);
1509 dma_fence_put(fence);
1510 fence = NULL;
1511 if (r) {
1512 pr_debug("failed %d to dma fence wait\n", r);
1513 break;
1514 }
1515 }
1516
1517 kfd_flush_tlb(pdd, TLB_FLUSH_LEGACY);
1518 }
1519
1520 return r;
1521 }
1522
1523 struct svm_validate_context {
1524 struct kfd_process *process;
1525 struct svm_range *prange;
1526 bool intr;
1527 DECLARE_BITMAP(bitmap, MAX_GPU_INSTANCE);
1528 struct drm_exec exec;
1529 };
1530
svm_range_reserve_bos(struct svm_validate_context * ctx,bool intr)1531 static int svm_range_reserve_bos(struct svm_validate_context *ctx, bool intr)
1532 {
1533 struct kfd_process_device *pdd;
1534 struct amdgpu_vm *vm;
1535 uint32_t gpuidx;
1536 int r;
1537
1538 drm_exec_init(&ctx->exec, intr ? DRM_EXEC_INTERRUPTIBLE_WAIT: 0, 0);
1539 drm_exec_until_all_locked(&ctx->exec) {
1540 for_each_set_bit(gpuidx, ctx->bitmap, MAX_GPU_INSTANCE) {
1541 pdd = kfd_process_device_from_gpuidx(ctx->process, gpuidx);
1542 if (!pdd) {
1543 pr_debug("failed to find device idx %d\n", gpuidx);
1544 r = -EINVAL;
1545 goto unreserve_out;
1546 }
1547 vm = drm_priv_to_vm(pdd->drm_priv);
1548
1549 r = amdgpu_vm_lock_pd(vm, &ctx->exec, 2);
1550 drm_exec_retry_on_contention(&ctx->exec);
1551 if (unlikely(r)) {
1552 pr_debug("failed %d to reserve bo\n", r);
1553 goto unreserve_out;
1554 }
1555 }
1556 }
1557
1558 for_each_set_bit(gpuidx, ctx->bitmap, MAX_GPU_INSTANCE) {
1559 pdd = kfd_process_device_from_gpuidx(ctx->process, gpuidx);
1560 if (!pdd) {
1561 pr_debug("failed to find device idx %d\n", gpuidx);
1562 r = -EINVAL;
1563 goto unreserve_out;
1564 }
1565
1566 r = amdgpu_vm_validate(pdd->dev->adev,
1567 drm_priv_to_vm(pdd->drm_priv), NULL,
1568 svm_range_bo_validate, NULL);
1569 if (r) {
1570 pr_debug("failed %d validate pt bos\n", r);
1571 goto unreserve_out;
1572 }
1573 }
1574
1575 return 0;
1576
1577 unreserve_out:
1578 drm_exec_fini(&ctx->exec);
1579 return r;
1580 }
1581
svm_range_unreserve_bos(struct svm_validate_context * ctx)1582 static void svm_range_unreserve_bos(struct svm_validate_context *ctx)
1583 {
1584 drm_exec_fini(&ctx->exec);
1585 }
1586
kfd_svm_page_owner(struct kfd_process * p,int32_t gpuidx)1587 static void *kfd_svm_page_owner(struct kfd_process *p, int32_t gpuidx)
1588 {
1589 struct kfd_process_device *pdd;
1590
1591 pdd = kfd_process_device_from_gpuidx(p, gpuidx);
1592 if (!pdd)
1593 return NULL;
1594
1595 return SVM_ADEV_PGMAP_OWNER(pdd->dev->adev);
1596 }
1597
1598 /*
1599 * Validation+GPU mapping with concurrent invalidation (MMU notifiers)
1600 *
1601 * To prevent concurrent destruction or change of range attributes, the
1602 * svm_read_lock must be held. The caller must not hold the svm_write_lock
1603 * because that would block concurrent evictions and lead to deadlocks. To
1604 * serialize concurrent migrations or validations of the same range, the
1605 * prange->migrate_mutex must be held.
1606 *
1607 * For VRAM ranges, the SVM BO must be allocated and valid (protected by its
1608 * eviction fence.
1609 *
1610 * The following sequence ensures race-free validation and GPU mapping:
1611 *
1612 * 1. Reserve page table (and SVM BO if range is in VRAM)
1613 * 2. hmm_range_fault to get page addresses (if system memory)
1614 * 3. DMA-map pages (if system memory)
1615 * 4-a. Take notifier lock
1616 * 4-b. Check that pages still valid (mmu_interval_read_retry)
1617 * 4-c. Check that the range was not split or otherwise invalidated
1618 * 4-d. Update GPU page table
1619 * 4.e. Release notifier lock
1620 * 5. Release page table (and SVM BO) reservation
1621 */
svm_range_validate_and_map(struct mm_struct * mm,unsigned long map_start,unsigned long map_last,struct svm_range * prange,int32_t gpuidx,bool intr,bool wait,bool flush_tlb)1622 static int svm_range_validate_and_map(struct mm_struct *mm,
1623 unsigned long map_start, unsigned long map_last,
1624 struct svm_range *prange, int32_t gpuidx,
1625 bool intr, bool wait, bool flush_tlb)
1626 {
1627 struct svm_validate_context *ctx;
1628 unsigned long start, end, addr;
1629 struct kfd_process *p;
1630 void *owner;
1631 int32_t idx;
1632 int r = 0;
1633
1634 ctx = kzalloc(sizeof(struct svm_validate_context), GFP_KERNEL);
1635 if (!ctx)
1636 return -ENOMEM;
1637 ctx->process = container_of(prange->svms, struct kfd_process, svms);
1638 ctx->prange = prange;
1639 ctx->intr = intr;
1640
1641 if (gpuidx < MAX_GPU_INSTANCE) {
1642 bitmap_zero(ctx->bitmap, MAX_GPU_INSTANCE);
1643 bitmap_set(ctx->bitmap, gpuidx, 1);
1644 } else if (ctx->process->xnack_enabled) {
1645 bitmap_copy(ctx->bitmap, prange->bitmap_aip, MAX_GPU_INSTANCE);
1646
1647 /* If prefetch range to GPU, or GPU retry fault migrate range to
1648 * GPU, which has ACCESS attribute to the range, create mapping
1649 * on that GPU.
1650 */
1651 if (prange->actual_loc) {
1652 gpuidx = kfd_process_gpuidx_from_gpuid(ctx->process,
1653 prange->actual_loc);
1654 if (gpuidx < 0) {
1655 WARN_ONCE(1, "failed get device by id 0x%x\n",
1656 prange->actual_loc);
1657 r = -EINVAL;
1658 goto free_ctx;
1659 }
1660 if (test_bit(gpuidx, prange->bitmap_access))
1661 bitmap_set(ctx->bitmap, gpuidx, 1);
1662 }
1663
1664 /*
1665 * If prange is already mapped or with always mapped flag,
1666 * update mapping on GPUs with ACCESS attribute
1667 */
1668 if (bitmap_empty(ctx->bitmap, MAX_GPU_INSTANCE)) {
1669 if (prange->mapped_to_gpu ||
1670 prange->flags & KFD_IOCTL_SVM_FLAG_GPU_ALWAYS_MAPPED)
1671 bitmap_copy(ctx->bitmap, prange->bitmap_access, MAX_GPU_INSTANCE);
1672 }
1673 } else {
1674 bitmap_or(ctx->bitmap, prange->bitmap_access,
1675 prange->bitmap_aip, MAX_GPU_INSTANCE);
1676 }
1677
1678 if (bitmap_empty(ctx->bitmap, MAX_GPU_INSTANCE)) {
1679 r = 0;
1680 goto free_ctx;
1681 }
1682
1683 if (prange->actual_loc && !prange->ttm_res) {
1684 /* This should never happen. actual_loc gets set by
1685 * svm_migrate_ram_to_vram after allocating a BO.
1686 */
1687 WARN_ONCE(1, "VRAM BO missing during validation\n");
1688 r = -EINVAL;
1689 goto free_ctx;
1690 }
1691
1692 r = svm_range_reserve_bos(ctx, intr);
1693 if (r)
1694 goto free_ctx;
1695
1696 p = container_of(prange->svms, struct kfd_process, svms);
1697 owner = kfd_svm_page_owner(p, find_first_bit(ctx->bitmap,
1698 MAX_GPU_INSTANCE));
1699 for_each_set_bit(idx, ctx->bitmap, MAX_GPU_INSTANCE) {
1700 if (kfd_svm_page_owner(p, idx) != owner) {
1701 owner = NULL;
1702 break;
1703 }
1704 }
1705
1706 start = map_start << PAGE_SHIFT;
1707 end = (map_last + 1) << PAGE_SHIFT;
1708 for (addr = start; !r && addr < end; ) {
1709 struct hmm_range *hmm_range = NULL;
1710 unsigned long map_start_vma;
1711 unsigned long map_last_vma;
1712 struct vm_area_struct *vma;
1713 unsigned long next = 0;
1714 unsigned long offset;
1715 unsigned long npages;
1716 bool readonly;
1717
1718 vma = vma_lookup(mm, addr);
1719 if (vma) {
1720 readonly = !(vma->vm_flags & VM_WRITE);
1721
1722 next = min(vma->vm_end, end);
1723 npages = (next - addr) >> PAGE_SHIFT;
1724 WRITE_ONCE(p->svms.faulting_task, current);
1725 r = amdgpu_hmm_range_get_pages(&prange->notifier, addr, npages,
1726 readonly, owner, NULL,
1727 &hmm_range);
1728 WRITE_ONCE(p->svms.faulting_task, NULL);
1729 if (r)
1730 pr_debug("failed %d to get svm range pages\n", r);
1731 } else {
1732 r = -EFAULT;
1733 }
1734
1735 if (!r) {
1736 offset = (addr >> PAGE_SHIFT) - prange->start;
1737 r = svm_range_dma_map(prange, ctx->bitmap, offset, npages,
1738 hmm_range->hmm_pfns);
1739 if (r)
1740 pr_debug("failed %d to dma map range\n", r);
1741 }
1742
1743 svm_range_lock(prange);
1744
1745 /* Free backing memory of hmm_range if it was initialized
1746 * Overrride return value to TRY AGAIN only if prior returns
1747 * were successful
1748 */
1749 if (hmm_range && amdgpu_hmm_range_get_pages_done(hmm_range) && !r) {
1750 pr_debug("hmm update the range, need validate again\n");
1751 r = -EAGAIN;
1752 }
1753
1754 if (!r && !list_empty(&prange->child_list)) {
1755 pr_debug("range split by unmap in parallel, validate again\n");
1756 r = -EAGAIN;
1757 }
1758
1759 if (!r) {
1760 map_start_vma = max(map_start, prange->start + offset);
1761 map_last_vma = min(map_last, prange->start + offset + npages - 1);
1762 if (map_start_vma <= map_last_vma) {
1763 offset = map_start_vma - prange->start;
1764 npages = map_last_vma - map_start_vma + 1;
1765 r = svm_range_map_to_gpus(prange, offset, npages, readonly,
1766 ctx->bitmap, wait, flush_tlb);
1767 }
1768 }
1769
1770 if (!r && next == end)
1771 prange->mapped_to_gpu = true;
1772
1773 svm_range_unlock(prange);
1774
1775 addr = next;
1776 }
1777
1778 svm_range_unreserve_bos(ctx);
1779 if (!r)
1780 prange->validate_timestamp = ktime_get_boottime();
1781
1782 free_ctx:
1783 kfree(ctx);
1784
1785 return r;
1786 }
1787
1788 /**
1789 * svm_range_list_lock_and_flush_work - flush pending deferred work
1790 *
1791 * @svms: the svm range list
1792 * @mm: the mm structure
1793 *
1794 * Context: Returns with mmap write lock held, pending deferred work flushed
1795 *
1796 */
1797 void
svm_range_list_lock_and_flush_work(struct svm_range_list * svms,struct mm_struct * mm)1798 svm_range_list_lock_and_flush_work(struct svm_range_list *svms,
1799 struct mm_struct *mm)
1800 {
1801 retry_flush_work:
1802 flush_work(&svms->deferred_list_work);
1803 mmap_write_lock(mm);
1804
1805 if (list_empty(&svms->deferred_range_list))
1806 return;
1807 mmap_write_unlock(mm);
1808 pr_debug("retry flush\n");
1809 goto retry_flush_work;
1810 }
1811
svm_range_restore_work(struct work_struct * work)1812 static void svm_range_restore_work(struct work_struct *work)
1813 {
1814 struct delayed_work *dwork = to_delayed_work(work);
1815 struct amdkfd_process_info *process_info;
1816 struct svm_range_list *svms;
1817 struct svm_range *prange;
1818 struct kfd_process *p;
1819 struct mm_struct *mm;
1820 int evicted_ranges;
1821 int invalid;
1822 int r;
1823
1824 svms = container_of(dwork, struct svm_range_list, restore_work);
1825 evicted_ranges = atomic_read(&svms->evicted_ranges);
1826 if (!evicted_ranges)
1827 return;
1828
1829 pr_debug("restore svm ranges\n");
1830
1831 p = container_of(svms, struct kfd_process, svms);
1832 process_info = p->kgd_process_info;
1833
1834 /* Keep mm reference when svm_range_validate_and_map ranges */
1835 mm = get_task_mm(p->lead_thread);
1836 if (!mm) {
1837 pr_debug("svms 0x%p process mm gone\n", svms);
1838 return;
1839 }
1840
1841 mutex_lock(&process_info->lock);
1842 svm_range_list_lock_and_flush_work(svms, mm);
1843 mutex_lock(&svms->lock);
1844
1845 evicted_ranges = atomic_read(&svms->evicted_ranges);
1846
1847 list_for_each_entry(prange, &svms->list, list) {
1848 invalid = atomic_read(&prange->invalid);
1849 if (!invalid)
1850 continue;
1851
1852 pr_debug("restoring svms 0x%p prange 0x%p [0x%lx %lx] inv %d\n",
1853 prange->svms, prange, prange->start, prange->last,
1854 invalid);
1855
1856 /*
1857 * If range is migrating, wait for migration is done.
1858 */
1859 mutex_lock(&prange->migrate_mutex);
1860
1861 r = svm_range_validate_and_map(mm, prange->start, prange->last, prange,
1862 MAX_GPU_INSTANCE, false, true, false);
1863 if (r)
1864 pr_debug("failed %d to map 0x%lx to gpus\n", r,
1865 prange->start);
1866
1867 mutex_unlock(&prange->migrate_mutex);
1868 if (r)
1869 goto out_reschedule;
1870
1871 if (atomic_cmpxchg(&prange->invalid, invalid, 0) != invalid)
1872 goto out_reschedule;
1873 }
1874
1875 if (atomic_cmpxchg(&svms->evicted_ranges, evicted_ranges, 0) !=
1876 evicted_ranges)
1877 goto out_reschedule;
1878
1879 evicted_ranges = 0;
1880
1881 r = kgd2kfd_resume_mm(mm);
1882 if (r) {
1883 /* No recovery from this failure. Probably the CP is
1884 * hanging. No point trying again.
1885 */
1886 pr_debug("failed %d to resume KFD\n", r);
1887 }
1888
1889 pr_debug("restore svm ranges successfully\n");
1890
1891 out_reschedule:
1892 mutex_unlock(&svms->lock);
1893 mmap_write_unlock(mm);
1894 mutex_unlock(&process_info->lock);
1895
1896 /* If validation failed, reschedule another attempt */
1897 if (evicted_ranges) {
1898 pr_debug("reschedule to restore svm range\n");
1899 queue_delayed_work(system_freezable_wq, &svms->restore_work,
1900 msecs_to_jiffies(AMDGPU_SVM_RANGE_RESTORE_DELAY_MS));
1901
1902 kfd_smi_event_queue_restore_rescheduled(mm);
1903 }
1904 mmput(mm);
1905 }
1906
1907 /**
1908 * svm_range_evict - evict svm range
1909 * @prange: svm range structure
1910 * @mm: current process mm_struct
1911 * @start: starting process queue number
1912 * @last: last process queue number
1913 * @event: mmu notifier event when range is evicted or migrated
1914 *
1915 * Stop all queues of the process to ensure GPU doesn't access the memory, then
1916 * return to let CPU evict the buffer and proceed CPU pagetable update.
1917 *
1918 * Don't need use lock to sync cpu pagetable invalidation with GPU execution.
1919 * If invalidation happens while restore work is running, restore work will
1920 * restart to ensure to get the latest CPU pages mapping to GPU, then start
1921 * the queues.
1922 */
1923 static int
svm_range_evict(struct svm_range * prange,struct mm_struct * mm,unsigned long start,unsigned long last,enum mmu_notifier_event event)1924 svm_range_evict(struct svm_range *prange, struct mm_struct *mm,
1925 unsigned long start, unsigned long last,
1926 enum mmu_notifier_event event)
1927 {
1928 struct svm_range_list *svms = prange->svms;
1929 struct svm_range *pchild;
1930 struct kfd_process *p;
1931 int r = 0;
1932
1933 p = container_of(svms, struct kfd_process, svms);
1934
1935 pr_debug("invalidate svms 0x%p prange [0x%lx 0x%lx] [0x%lx 0x%lx]\n",
1936 svms, prange->start, prange->last, start, last);
1937
1938 if (!p->xnack_enabled ||
1939 (prange->flags & KFD_IOCTL_SVM_FLAG_GPU_ALWAYS_MAPPED)) {
1940 int evicted_ranges;
1941 bool mapped = prange->mapped_to_gpu;
1942
1943 list_for_each_entry(pchild, &prange->child_list, child_list) {
1944 if (!pchild->mapped_to_gpu)
1945 continue;
1946 mapped = true;
1947 mutex_lock_nested(&pchild->lock, 1);
1948 if (pchild->start <= last && pchild->last >= start) {
1949 pr_debug("increment pchild invalid [0x%lx 0x%lx]\n",
1950 pchild->start, pchild->last);
1951 atomic_inc(&pchild->invalid);
1952 }
1953 mutex_unlock(&pchild->lock);
1954 }
1955
1956 if (!mapped)
1957 return r;
1958
1959 if (prange->start <= last && prange->last >= start)
1960 atomic_inc(&prange->invalid);
1961
1962 evicted_ranges = atomic_inc_return(&svms->evicted_ranges);
1963 if (evicted_ranges != 1)
1964 return r;
1965
1966 pr_debug("evicting svms 0x%p range [0x%lx 0x%lx]\n",
1967 prange->svms, prange->start, prange->last);
1968
1969 /* First eviction, stop the queues */
1970 r = kgd2kfd_quiesce_mm(mm, KFD_QUEUE_EVICTION_TRIGGER_SVM);
1971 if (r)
1972 pr_debug("failed to quiesce KFD\n");
1973
1974 pr_debug("schedule to restore svm %p ranges\n", svms);
1975 queue_delayed_work(system_freezable_wq, &svms->restore_work,
1976 msecs_to_jiffies(AMDGPU_SVM_RANGE_RESTORE_DELAY_MS));
1977 } else {
1978 unsigned long s, l;
1979 uint32_t trigger;
1980
1981 if (event == MMU_NOTIFY_MIGRATE)
1982 trigger = KFD_SVM_UNMAP_TRIGGER_MMU_NOTIFY_MIGRATE;
1983 else
1984 trigger = KFD_SVM_UNMAP_TRIGGER_MMU_NOTIFY;
1985
1986 pr_debug("invalidate unmap svms 0x%p [0x%lx 0x%lx] from GPUs\n",
1987 prange->svms, start, last);
1988 list_for_each_entry(pchild, &prange->child_list, child_list) {
1989 mutex_lock_nested(&pchild->lock, 1);
1990 s = max(start, pchild->start);
1991 l = min(last, pchild->last);
1992 if (l >= s)
1993 svm_range_unmap_from_gpus(pchild, s, l, trigger);
1994 mutex_unlock(&pchild->lock);
1995 }
1996 s = max(start, prange->start);
1997 l = min(last, prange->last);
1998 if (l >= s)
1999 svm_range_unmap_from_gpus(prange, s, l, trigger);
2000 }
2001
2002 return r;
2003 }
2004
svm_range_clone(struct svm_range * old)2005 static struct svm_range *svm_range_clone(struct svm_range *old)
2006 {
2007 struct svm_range *new;
2008
2009 new = svm_range_new(old->svms, old->start, old->last, false);
2010 if (!new)
2011 return NULL;
2012 if (svm_range_copy_dma_addrs(new, old)) {
2013 svm_range_free(new, false);
2014 return NULL;
2015 }
2016 if (old->svm_bo) {
2017 new->ttm_res = old->ttm_res;
2018 new->offset = old->offset;
2019 new->svm_bo = svm_range_bo_ref(old->svm_bo);
2020 spin_lock(&new->svm_bo->list_lock);
2021 list_add(&new->svm_bo_list, &new->svm_bo->range_list);
2022 spin_unlock(&new->svm_bo->list_lock);
2023 }
2024 new->flags = old->flags;
2025 new->preferred_loc = old->preferred_loc;
2026 new->prefetch_loc = old->prefetch_loc;
2027 new->actual_loc = old->actual_loc;
2028 new->granularity = old->granularity;
2029 new->mapped_to_gpu = old->mapped_to_gpu;
2030 new->vram_pages = old->vram_pages;
2031 bitmap_copy(new->bitmap_access, old->bitmap_access, MAX_GPU_INSTANCE);
2032 bitmap_copy(new->bitmap_aip, old->bitmap_aip, MAX_GPU_INSTANCE);
2033 atomic_set(&new->queue_refcount, atomic_read(&old->queue_refcount));
2034
2035 return new;
2036 }
2037
svm_range_set_max_pages(struct amdgpu_device * adev)2038 void svm_range_set_max_pages(struct amdgpu_device *adev)
2039 {
2040 uint64_t max_pages;
2041 uint64_t pages, _pages;
2042 uint64_t min_pages = 0;
2043 int i, id;
2044
2045 for (i = 0; i < adev->kfd.dev->num_nodes; i++) {
2046 if (adev->kfd.dev->nodes[i]->xcp)
2047 id = adev->kfd.dev->nodes[i]->xcp->id;
2048 else
2049 id = -1;
2050 pages = KFD_XCP_MEMORY_SIZE(adev, id) >> 17;
2051 pages = clamp(pages, 1ULL << 9, 1ULL << 18);
2052 pages = rounddown_pow_of_two(pages);
2053 min_pages = min_not_zero(min_pages, pages);
2054 }
2055
2056 do {
2057 max_pages = READ_ONCE(max_svm_range_pages);
2058 _pages = min_not_zero(max_pages, min_pages);
2059 } while (cmpxchg(&max_svm_range_pages, max_pages, _pages) != max_pages);
2060 }
2061
2062 static int
svm_range_split_new(struct svm_range_list * svms,uint64_t start,uint64_t last,uint64_t max_pages,struct list_head * insert_list,struct list_head * update_list)2063 svm_range_split_new(struct svm_range_list *svms, uint64_t start, uint64_t last,
2064 uint64_t max_pages, struct list_head *insert_list,
2065 struct list_head *update_list)
2066 {
2067 struct svm_range *prange;
2068 uint64_t l;
2069
2070 pr_debug("max_svm_range_pages 0x%llx adding [0x%llx 0x%llx]\n",
2071 max_pages, start, last);
2072
2073 while (last >= start) {
2074 l = min(last, ALIGN_DOWN(start + max_pages, max_pages) - 1);
2075
2076 prange = svm_range_new(svms, start, l, true);
2077 if (!prange)
2078 return -ENOMEM;
2079 list_add(&prange->list, insert_list);
2080 list_add(&prange->update_list, update_list);
2081
2082 start = l + 1;
2083 }
2084 return 0;
2085 }
2086
2087 /**
2088 * svm_range_add - add svm range and handle overlap
2089 * @p: the range add to this process svms
2090 * @start: page size aligned
2091 * @size: page size aligned
2092 * @nattr: number of attributes
2093 * @attrs: array of attributes
2094 * @update_list: output, the ranges need validate and update GPU mapping
2095 * @insert_list: output, the ranges need insert to svms
2096 * @remove_list: output, the ranges are replaced and need remove from svms
2097 * @remap_list: output, remap unaligned svm ranges
2098 *
2099 * Check if the virtual address range has overlap with any existing ranges,
2100 * split partly overlapping ranges and add new ranges in the gaps. All changes
2101 * should be applied to the range_list and interval tree transactionally. If
2102 * any range split or allocation fails, the entire update fails. Therefore any
2103 * existing overlapping svm_ranges are cloned and the original svm_ranges left
2104 * unchanged.
2105 *
2106 * If the transaction succeeds, the caller can update and insert clones and
2107 * new ranges, then free the originals.
2108 *
2109 * Otherwise the caller can free the clones and new ranges, while the old
2110 * svm_ranges remain unchanged.
2111 *
2112 * Context: Process context, caller must hold svms->lock
2113 *
2114 * Return:
2115 * 0 - OK, otherwise error code
2116 */
2117 static int
svm_range_add(struct kfd_process * p,uint64_t start,uint64_t size,uint32_t nattr,struct kfd_ioctl_svm_attribute * attrs,struct list_head * update_list,struct list_head * insert_list,struct list_head * remove_list,struct list_head * remap_list)2118 svm_range_add(struct kfd_process *p, uint64_t start, uint64_t size,
2119 uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs,
2120 struct list_head *update_list, struct list_head *insert_list,
2121 struct list_head *remove_list, struct list_head *remap_list)
2122 {
2123 unsigned long last = start + size - 1UL;
2124 struct svm_range_list *svms = &p->svms;
2125 struct interval_tree_node *node;
2126 struct svm_range *prange;
2127 struct svm_range *tmp;
2128 struct list_head new_list;
2129 int r = 0;
2130
2131 pr_debug("svms 0x%p [0x%llx 0x%lx]\n", &p->svms, start, last);
2132
2133 INIT_LIST_HEAD(update_list);
2134 INIT_LIST_HEAD(insert_list);
2135 INIT_LIST_HEAD(remove_list);
2136 INIT_LIST_HEAD(&new_list);
2137 INIT_LIST_HEAD(remap_list);
2138
2139 node = interval_tree_iter_first(&svms->objects, start, last);
2140 while (node) {
2141 struct interval_tree_node *next;
2142 unsigned long next_start;
2143
2144 pr_debug("found overlap node [0x%lx 0x%lx]\n", node->start,
2145 node->last);
2146
2147 prange = container_of(node, struct svm_range, it_node);
2148 next = interval_tree_iter_next(node, start, last);
2149 next_start = min(node->last, last) + 1;
2150
2151 if (svm_range_is_same_attrs(p, prange, nattr, attrs) &&
2152 prange->mapped_to_gpu) {
2153 /* nothing to do */
2154 } else if (node->start < start || node->last > last) {
2155 /* node intersects the update range and its attributes
2156 * will change. Clone and split it, apply updates only
2157 * to the overlapping part
2158 */
2159 struct svm_range *old = prange;
2160
2161 prange = svm_range_clone(old);
2162 if (!prange) {
2163 r = -ENOMEM;
2164 goto out;
2165 }
2166
2167 list_add(&old->update_list, remove_list);
2168 list_add(&prange->list, insert_list);
2169 list_add(&prange->update_list, update_list);
2170
2171 if (node->start < start) {
2172 pr_debug("change old range start\n");
2173 r = svm_range_split_head(prange, start,
2174 insert_list, remap_list);
2175 if (r)
2176 goto out;
2177 }
2178 if (node->last > last) {
2179 pr_debug("change old range last\n");
2180 r = svm_range_split_tail(prange, last,
2181 insert_list, remap_list);
2182 if (r)
2183 goto out;
2184 }
2185 } else {
2186 /* The node is contained within start..last,
2187 * just update it
2188 */
2189 list_add(&prange->update_list, update_list);
2190 }
2191
2192 /* insert a new node if needed */
2193 if (node->start > start) {
2194 r = svm_range_split_new(svms, start, node->start - 1,
2195 READ_ONCE(max_svm_range_pages),
2196 &new_list, update_list);
2197 if (r)
2198 goto out;
2199 }
2200
2201 node = next;
2202 start = next_start;
2203 }
2204
2205 /* add a final range at the end if needed */
2206 if (start <= last)
2207 r = svm_range_split_new(svms, start, last,
2208 READ_ONCE(max_svm_range_pages),
2209 &new_list, update_list);
2210
2211 out:
2212 if (r) {
2213 list_for_each_entry_safe(prange, tmp, insert_list, list)
2214 svm_range_free(prange, false);
2215 list_for_each_entry_safe(prange, tmp, &new_list, list)
2216 svm_range_free(prange, true);
2217 } else {
2218 list_splice(&new_list, insert_list);
2219 }
2220
2221 return r;
2222 }
2223
2224 static void
svm_range_update_notifier_and_interval_tree(struct mm_struct * mm,struct svm_range * prange)2225 svm_range_update_notifier_and_interval_tree(struct mm_struct *mm,
2226 struct svm_range *prange)
2227 {
2228 unsigned long start;
2229 unsigned long last;
2230
2231 start = prange->notifier.interval_tree.start >> PAGE_SHIFT;
2232 last = prange->notifier.interval_tree.last >> PAGE_SHIFT;
2233
2234 if (prange->start == start && prange->last == last)
2235 return;
2236
2237 pr_debug("up notifier 0x%p prange 0x%p [0x%lx 0x%lx] [0x%lx 0x%lx]\n",
2238 prange->svms, prange, start, last, prange->start,
2239 prange->last);
2240
2241 if (start != 0 && last != 0) {
2242 interval_tree_remove(&prange->it_node, &prange->svms->objects);
2243 svm_range_remove_notifier(prange);
2244 }
2245 prange->it_node.start = prange->start;
2246 prange->it_node.last = prange->last;
2247
2248 interval_tree_insert(&prange->it_node, &prange->svms->objects);
2249 svm_range_add_notifier_locked(mm, prange);
2250 }
2251
2252 static void
svm_range_handle_list_op(struct svm_range_list * svms,struct svm_range * prange,struct mm_struct * mm)2253 svm_range_handle_list_op(struct svm_range_list *svms, struct svm_range *prange,
2254 struct mm_struct *mm)
2255 {
2256 switch (prange->work_item.op) {
2257 case SVM_OP_NULL:
2258 pr_debug("NULL OP 0x%p prange 0x%p [0x%lx 0x%lx]\n",
2259 svms, prange, prange->start, prange->last);
2260 break;
2261 case SVM_OP_UNMAP_RANGE:
2262 pr_debug("remove 0x%p prange 0x%p [0x%lx 0x%lx]\n",
2263 svms, prange, prange->start, prange->last);
2264 svm_range_unlink(prange);
2265 svm_range_remove_notifier(prange);
2266 svm_range_free(prange, true);
2267 break;
2268 case SVM_OP_UPDATE_RANGE_NOTIFIER:
2269 pr_debug("update notifier 0x%p prange 0x%p [0x%lx 0x%lx]\n",
2270 svms, prange, prange->start, prange->last);
2271 svm_range_update_notifier_and_interval_tree(mm, prange);
2272 break;
2273 case SVM_OP_UPDATE_RANGE_NOTIFIER_AND_MAP:
2274 pr_debug("update and map 0x%p prange 0x%p [0x%lx 0x%lx]\n",
2275 svms, prange, prange->start, prange->last);
2276 svm_range_update_notifier_and_interval_tree(mm, prange);
2277 /* TODO: implement deferred validation and mapping */
2278 break;
2279 case SVM_OP_ADD_RANGE:
2280 pr_debug("add 0x%p prange 0x%p [0x%lx 0x%lx]\n", svms, prange,
2281 prange->start, prange->last);
2282 svm_range_add_to_svms(prange);
2283 svm_range_add_notifier_locked(mm, prange);
2284 break;
2285 case SVM_OP_ADD_RANGE_AND_MAP:
2286 pr_debug("add and map 0x%p prange 0x%p [0x%lx 0x%lx]\n", svms,
2287 prange, prange->start, prange->last);
2288 svm_range_add_to_svms(prange);
2289 svm_range_add_notifier_locked(mm, prange);
2290 /* TODO: implement deferred validation and mapping */
2291 break;
2292 default:
2293 WARN_ONCE(1, "Unknown prange 0x%p work op %d\n", prange,
2294 prange->work_item.op);
2295 }
2296 }
2297
svm_range_drain_retry_fault(struct svm_range_list * svms)2298 static void svm_range_drain_retry_fault(struct svm_range_list *svms)
2299 {
2300 struct kfd_process_device *pdd;
2301 struct kfd_process *p;
2302 uint32_t i;
2303
2304 p = container_of(svms, struct kfd_process, svms);
2305
2306 for_each_set_bit(i, svms->bitmap_supported, p->n_pdds) {
2307 pdd = p->pdds[i];
2308 if (!pdd)
2309 continue;
2310
2311 pr_debug("drain retry fault gpu %d svms %p\n", i, svms);
2312
2313 amdgpu_ih_wait_on_checkpoint_process_ts(pdd->dev->adev,
2314 pdd->dev->adev->irq.retry_cam_enabled ?
2315 &pdd->dev->adev->irq.ih :
2316 &pdd->dev->adev->irq.ih1);
2317
2318 if (pdd->dev->adev->irq.retry_cam_enabled)
2319 amdgpu_ih_wait_on_checkpoint_process_ts(pdd->dev->adev,
2320 &pdd->dev->adev->irq.ih_soft);
2321
2322
2323 pr_debug("drain retry fault gpu %d svms 0x%p done\n", i, svms);
2324 }
2325 }
2326
svm_range_deferred_list_work(struct work_struct * work)2327 static void svm_range_deferred_list_work(struct work_struct *work)
2328 {
2329 struct svm_range_list *svms;
2330 struct svm_range *prange;
2331 struct mm_struct *mm;
2332
2333 svms = container_of(work, struct svm_range_list, deferred_list_work);
2334 pr_debug("enter svms 0x%p\n", svms);
2335
2336 spin_lock(&svms->deferred_list_lock);
2337 while (!list_empty(&svms->deferred_range_list)) {
2338 prange = list_first_entry(&svms->deferred_range_list,
2339 struct svm_range, deferred_list);
2340 spin_unlock(&svms->deferred_list_lock);
2341
2342 pr_debug("prange 0x%p [0x%lx 0x%lx] op %d\n", prange,
2343 prange->start, prange->last, prange->work_item.op);
2344
2345 mm = prange->work_item.mm;
2346
2347 mmap_write_lock(mm);
2348
2349 /* Remove from deferred_list must be inside mmap write lock, for
2350 * two race cases:
2351 * 1. unmap_from_cpu may change work_item.op and add the range
2352 * to deferred_list again, cause use after free bug.
2353 * 2. svm_range_list_lock_and_flush_work may hold mmap write
2354 * lock and continue because deferred_list is empty, but
2355 * deferred_list work is actually waiting for mmap lock.
2356 */
2357 spin_lock(&svms->deferred_list_lock);
2358 list_del_init(&prange->deferred_list);
2359 spin_unlock(&svms->deferred_list_lock);
2360
2361 mutex_lock(&svms->lock);
2362 mutex_lock(&prange->migrate_mutex);
2363 while (!list_empty(&prange->child_list)) {
2364 struct svm_range *pchild;
2365
2366 pchild = list_first_entry(&prange->child_list,
2367 struct svm_range, child_list);
2368 pr_debug("child prange 0x%p op %d\n", pchild,
2369 pchild->work_item.op);
2370 list_del_init(&pchild->child_list);
2371 svm_range_handle_list_op(svms, pchild, mm);
2372 }
2373 mutex_unlock(&prange->migrate_mutex);
2374
2375 svm_range_handle_list_op(svms, prange, mm);
2376 mutex_unlock(&svms->lock);
2377 mmap_write_unlock(mm);
2378
2379 /* Pairs with mmget in svm_range_add_list_work. If dropping the
2380 * last mm refcount, schedule release work to avoid circular locking
2381 */
2382 mmput_async(mm);
2383
2384 spin_lock(&svms->deferred_list_lock);
2385 }
2386 spin_unlock(&svms->deferred_list_lock);
2387 pr_debug("exit svms 0x%p\n", svms);
2388 }
2389
2390 void
svm_range_add_list_work(struct svm_range_list * svms,struct svm_range * prange,struct mm_struct * mm,enum svm_work_list_ops op)2391 svm_range_add_list_work(struct svm_range_list *svms, struct svm_range *prange,
2392 struct mm_struct *mm, enum svm_work_list_ops op)
2393 {
2394 spin_lock(&svms->deferred_list_lock);
2395 /* if prange is on the deferred list */
2396 if (!list_empty(&prange->deferred_list)) {
2397 pr_debug("update exist prange 0x%p work op %d\n", prange, op);
2398 WARN_ONCE(prange->work_item.mm != mm, "unmatch mm\n");
2399 if (op != SVM_OP_NULL &&
2400 prange->work_item.op != SVM_OP_UNMAP_RANGE)
2401 prange->work_item.op = op;
2402 } else {
2403 prange->work_item.op = op;
2404
2405 /* Pairs with mmput in deferred_list_work */
2406 mmget(mm);
2407 prange->work_item.mm = mm;
2408 list_add_tail(&prange->deferred_list,
2409 &prange->svms->deferred_range_list);
2410 pr_debug("add prange 0x%p [0x%lx 0x%lx] to work list op %d\n",
2411 prange, prange->start, prange->last, op);
2412 }
2413 spin_unlock(&svms->deferred_list_lock);
2414 }
2415
schedule_deferred_list_work(struct svm_range_list * svms)2416 void schedule_deferred_list_work(struct svm_range_list *svms)
2417 {
2418 spin_lock(&svms->deferred_list_lock);
2419 if (!list_empty(&svms->deferred_range_list))
2420 schedule_work(&svms->deferred_list_work);
2421 spin_unlock(&svms->deferred_list_lock);
2422 }
2423
2424 static void
svm_range_unmap_split(struct mm_struct * mm,struct svm_range * parent,struct svm_range * prange,unsigned long start,unsigned long last)2425 svm_range_unmap_split(struct mm_struct *mm, struct svm_range *parent,
2426 struct svm_range *prange, unsigned long start,
2427 unsigned long last)
2428 {
2429 struct svm_range *head;
2430 struct svm_range *tail;
2431
2432 if (prange->work_item.op == SVM_OP_UNMAP_RANGE) {
2433 pr_debug("prange 0x%p [0x%lx 0x%lx] is already freed\n", prange,
2434 prange->start, prange->last);
2435 return;
2436 }
2437 if (start > prange->last || last < prange->start)
2438 return;
2439
2440 head = tail = prange;
2441 if (start > prange->start)
2442 svm_range_split(prange, prange->start, start - 1, &tail);
2443 if (last < tail->last)
2444 svm_range_split(tail, last + 1, tail->last, &head);
2445
2446 if (head != prange && tail != prange) {
2447 svm_range_add_child(parent, mm, head, SVM_OP_UNMAP_RANGE);
2448 svm_range_add_child(parent, mm, tail, SVM_OP_ADD_RANGE);
2449 } else if (tail != prange) {
2450 svm_range_add_child(parent, mm, tail, SVM_OP_UNMAP_RANGE);
2451 } else if (head != prange) {
2452 svm_range_add_child(parent, mm, head, SVM_OP_UNMAP_RANGE);
2453 } else if (parent != prange) {
2454 prange->work_item.op = SVM_OP_UNMAP_RANGE;
2455 }
2456 }
2457
2458 static void
svm_range_unmap_from_cpu(struct mm_struct * mm,struct svm_range * prange,unsigned long start,unsigned long last)2459 svm_range_unmap_from_cpu(struct mm_struct *mm, struct svm_range *prange,
2460 unsigned long start, unsigned long last)
2461 {
2462 uint32_t trigger = KFD_SVM_UNMAP_TRIGGER_UNMAP_FROM_CPU;
2463 struct svm_range_list *svms;
2464 struct svm_range *pchild;
2465 struct kfd_process *p;
2466 unsigned long s, l;
2467 bool unmap_parent;
2468 uint32_t i;
2469
2470 if (atomic_read(&prange->queue_refcount)) {
2471 int r;
2472
2473 pr_warn("Freeing queue vital buffer 0x%lx, queue evicted\n",
2474 prange->start << PAGE_SHIFT);
2475 r = kgd2kfd_quiesce_mm(mm, KFD_QUEUE_EVICTION_TRIGGER_SVM);
2476 if (r)
2477 pr_debug("failed %d to quiesce KFD queues\n", r);
2478 }
2479
2480 p = kfd_lookup_process_by_mm(mm);
2481 if (!p)
2482 return;
2483 svms = &p->svms;
2484
2485 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx] [0x%lx 0x%lx]\n", svms,
2486 prange, prange->start, prange->last, start, last);
2487
2488 /* calculate time stamps that are used to decide which page faults need be
2489 * dropped or handled before unmap pages from gpu vm
2490 */
2491 for_each_set_bit(i, svms->bitmap_supported, p->n_pdds) {
2492 struct kfd_process_device *pdd;
2493 struct amdgpu_device *adev;
2494 struct amdgpu_ih_ring *ih;
2495 uint32_t checkpoint_wptr;
2496
2497 pdd = p->pdds[i];
2498 if (!pdd)
2499 continue;
2500
2501 adev = pdd->dev->adev;
2502
2503 /* Check and drain ih1 ring if cam not available */
2504 if (adev->irq.ih1.ring_size) {
2505 ih = &adev->irq.ih1;
2506 checkpoint_wptr = amdgpu_ih_get_wptr(adev, ih);
2507 if (ih->rptr != checkpoint_wptr) {
2508 svms->checkpoint_ts[i] =
2509 amdgpu_ih_decode_iv_ts(adev, ih, checkpoint_wptr, -1);
2510 continue;
2511 }
2512 }
2513
2514 /* check if dev->irq.ih_soft is not empty */
2515 ih = &adev->irq.ih_soft;
2516 checkpoint_wptr = amdgpu_ih_get_wptr(adev, ih);
2517 if (ih->rptr != checkpoint_wptr)
2518 svms->checkpoint_ts[i] = amdgpu_ih_decode_iv_ts(adev, ih, checkpoint_wptr, -1);
2519 }
2520
2521 unmap_parent = start <= prange->start && last >= prange->last;
2522
2523 list_for_each_entry(pchild, &prange->child_list, child_list) {
2524 mutex_lock_nested(&pchild->lock, 1);
2525 s = max(start, pchild->start);
2526 l = min(last, pchild->last);
2527 if (l >= s)
2528 svm_range_unmap_from_gpus(pchild, s, l, trigger);
2529 svm_range_unmap_split(mm, prange, pchild, start, last);
2530 mutex_unlock(&pchild->lock);
2531 }
2532 s = max(start, prange->start);
2533 l = min(last, prange->last);
2534 if (l >= s)
2535 svm_range_unmap_from_gpus(prange, s, l, trigger);
2536 svm_range_unmap_split(mm, prange, prange, start, last);
2537
2538 if (unmap_parent)
2539 svm_range_add_list_work(svms, prange, mm, SVM_OP_UNMAP_RANGE);
2540 else
2541 svm_range_add_list_work(svms, prange, mm,
2542 SVM_OP_UPDATE_RANGE_NOTIFIER);
2543 schedule_deferred_list_work(svms);
2544
2545 kfd_unref_process(p);
2546 }
2547
2548 /**
2549 * svm_range_cpu_invalidate_pagetables - interval notifier callback
2550 * @mni: mmu_interval_notifier struct
2551 * @range: mmu_notifier_range struct
2552 * @cur_seq: value to pass to mmu_interval_set_seq()
2553 *
2554 * If event is MMU_NOTIFY_UNMAP, this is from CPU unmap range, otherwise, it
2555 * is from migration, or CPU page invalidation callback.
2556 *
2557 * For unmap event, unmap range from GPUs, remove prange from svms in a delayed
2558 * work thread, and split prange if only part of prange is unmapped.
2559 *
2560 * For invalidation event, if GPU retry fault is not enabled, evict the queues,
2561 * then schedule svm_range_restore_work to update GPU mapping and resume queues.
2562 * If GPU retry fault is enabled, unmap the svm range from GPU, retry fault will
2563 * update GPU mapping to recover.
2564 *
2565 * Context: mmap lock, notifier_invalidate_start lock are held
2566 * for invalidate event, prange lock is held if this is from migration
2567 */
2568 static bool
svm_range_cpu_invalidate_pagetables(struct mmu_interval_notifier * mni,const struct mmu_notifier_range * range,unsigned long cur_seq)2569 svm_range_cpu_invalidate_pagetables(struct mmu_interval_notifier *mni,
2570 const struct mmu_notifier_range *range,
2571 unsigned long cur_seq)
2572 {
2573 struct svm_range *prange;
2574 unsigned long start;
2575 unsigned long last;
2576
2577 if (range->event == MMU_NOTIFY_RELEASE)
2578 return true;
2579 if (!mmget_not_zero(mni->mm))
2580 return true;
2581
2582 start = mni->interval_tree.start;
2583 last = mni->interval_tree.last;
2584 start = max(start, range->start) >> PAGE_SHIFT;
2585 last = min(last, range->end - 1) >> PAGE_SHIFT;
2586 pr_debug("[0x%lx 0x%lx] range[0x%lx 0x%lx] notifier[0x%lx 0x%lx] %d\n",
2587 start, last, range->start >> PAGE_SHIFT,
2588 (range->end - 1) >> PAGE_SHIFT,
2589 mni->interval_tree.start >> PAGE_SHIFT,
2590 mni->interval_tree.last >> PAGE_SHIFT, range->event);
2591
2592 prange = container_of(mni, struct svm_range, notifier);
2593
2594 svm_range_lock(prange);
2595 mmu_interval_set_seq(mni, cur_seq);
2596
2597 switch (range->event) {
2598 case MMU_NOTIFY_UNMAP:
2599 svm_range_unmap_from_cpu(mni->mm, prange, start, last);
2600 break;
2601 default:
2602 svm_range_evict(prange, mni->mm, start, last, range->event);
2603 break;
2604 }
2605
2606 svm_range_unlock(prange);
2607 mmput(mni->mm);
2608
2609 return true;
2610 }
2611
2612 /**
2613 * svm_range_from_addr - find svm range from fault address
2614 * @svms: svm range list header
2615 * @addr: address to search range interval tree, in pages
2616 * @parent: parent range if range is on child list
2617 *
2618 * Context: The caller must hold svms->lock
2619 *
2620 * Return: the svm_range found or NULL
2621 */
2622 struct svm_range *
svm_range_from_addr(struct svm_range_list * svms,unsigned long addr,struct svm_range ** parent)2623 svm_range_from_addr(struct svm_range_list *svms, unsigned long addr,
2624 struct svm_range **parent)
2625 {
2626 struct interval_tree_node *node;
2627 struct svm_range *prange;
2628 struct svm_range *pchild;
2629
2630 node = interval_tree_iter_first(&svms->objects, addr, addr);
2631 if (!node)
2632 return NULL;
2633
2634 prange = container_of(node, struct svm_range, it_node);
2635 pr_debug("address 0x%lx prange [0x%lx 0x%lx] node [0x%lx 0x%lx]\n",
2636 addr, prange->start, prange->last, node->start, node->last);
2637
2638 if (addr >= prange->start && addr <= prange->last) {
2639 if (parent)
2640 *parent = prange;
2641 return prange;
2642 }
2643 list_for_each_entry(pchild, &prange->child_list, child_list)
2644 if (addr >= pchild->start && addr <= pchild->last) {
2645 pr_debug("found address 0x%lx pchild [0x%lx 0x%lx]\n",
2646 addr, pchild->start, pchild->last);
2647 if (parent)
2648 *parent = prange;
2649 return pchild;
2650 }
2651
2652 return NULL;
2653 }
2654
2655 /* svm_range_best_restore_location - decide the best fault restore location
2656 * @prange: svm range structure
2657 * @adev: the GPU on which vm fault happened
2658 *
2659 * This is only called when xnack is on, to decide the best location to restore
2660 * the range mapping after GPU vm fault. Caller uses the best location to do
2661 * migration if actual loc is not best location, then update GPU page table
2662 * mapping to the best location.
2663 *
2664 * If the preferred loc is accessible by faulting GPU, use preferred loc.
2665 * If vm fault gpu idx is on range ACCESSIBLE bitmap, best_loc is vm fault gpu
2666 * If vm fault gpu idx is on range ACCESSIBLE_IN_PLACE bitmap, then
2667 * if range actual loc is cpu, best_loc is cpu
2668 * if vm fault gpu is on xgmi same hive of range actual loc gpu, best_loc is
2669 * range actual loc.
2670 * Otherwise, GPU no access, best_loc is -1.
2671 *
2672 * Return:
2673 * -1 means vm fault GPU no access
2674 * 0 for CPU or GPU id
2675 */
2676 static int32_t
svm_range_best_restore_location(struct svm_range * prange,struct kfd_node * node,int32_t * gpuidx)2677 svm_range_best_restore_location(struct svm_range *prange,
2678 struct kfd_node *node,
2679 int32_t *gpuidx)
2680 {
2681 struct kfd_node *bo_node, *preferred_node;
2682 struct kfd_process *p;
2683 uint32_t gpuid;
2684 int r;
2685
2686 p = container_of(prange->svms, struct kfd_process, svms);
2687
2688 r = kfd_process_gpuid_from_node(p, node, &gpuid, gpuidx);
2689 if (r < 0) {
2690 pr_debug("failed to get gpuid from kgd\n");
2691 return -1;
2692 }
2693
2694 if (node->adev->flags & AMD_IS_APU)
2695 return 0;
2696
2697 if (prange->preferred_loc == gpuid ||
2698 prange->preferred_loc == KFD_IOCTL_SVM_LOCATION_SYSMEM) {
2699 return prange->preferred_loc;
2700 } else if (prange->preferred_loc != KFD_IOCTL_SVM_LOCATION_UNDEFINED) {
2701 preferred_node = svm_range_get_node_by_id(prange, prange->preferred_loc);
2702 if (preferred_node && svm_nodes_in_same_hive(node, preferred_node))
2703 return prange->preferred_loc;
2704 /* fall through */
2705 }
2706
2707 if (test_bit(*gpuidx, prange->bitmap_access))
2708 return gpuid;
2709
2710 if (test_bit(*gpuidx, prange->bitmap_aip)) {
2711 if (!prange->actual_loc)
2712 return 0;
2713
2714 bo_node = svm_range_get_node_by_id(prange, prange->actual_loc);
2715 if (bo_node && svm_nodes_in_same_hive(node, bo_node))
2716 return prange->actual_loc;
2717 else
2718 return 0;
2719 }
2720
2721 return -1;
2722 }
2723
2724 static int
svm_range_get_range_boundaries(struct kfd_process * p,int64_t addr,unsigned long * start,unsigned long * last,bool * is_heap_stack)2725 svm_range_get_range_boundaries(struct kfd_process *p, int64_t addr,
2726 unsigned long *start, unsigned long *last,
2727 bool *is_heap_stack)
2728 {
2729 struct vm_area_struct *vma;
2730 struct interval_tree_node *node;
2731 struct rb_node *rb_node;
2732 unsigned long start_limit, end_limit;
2733
2734 vma = vma_lookup(p->mm, addr << PAGE_SHIFT);
2735 if (!vma) {
2736 pr_debug("VMA does not exist in address [0x%llx]\n", addr);
2737 return -EFAULT;
2738 }
2739
2740 *is_heap_stack = vma_is_initial_heap(vma) || vma_is_initial_stack(vma);
2741
2742 start_limit = max(vma->vm_start >> PAGE_SHIFT,
2743 (unsigned long)ALIGN_DOWN(addr, 1UL << p->svms.default_granularity));
2744 end_limit = min(vma->vm_end >> PAGE_SHIFT,
2745 (unsigned long)ALIGN(addr + 1, 1UL << p->svms.default_granularity));
2746
2747 /* First range that starts after the fault address */
2748 node = interval_tree_iter_first(&p->svms.objects, addr + 1, ULONG_MAX);
2749 if (node) {
2750 end_limit = min(end_limit, node->start);
2751 /* Last range that ends before the fault address */
2752 rb_node = rb_prev(&node->rb);
2753 } else {
2754 /* Last range must end before addr because
2755 * there was no range after addr
2756 */
2757 rb_node = rb_last(&p->svms.objects.rb_root);
2758 }
2759 if (rb_node) {
2760 node = container_of(rb_node, struct interval_tree_node, rb);
2761 if (node->last >= addr) {
2762 WARN(1, "Overlap with prev node and page fault addr\n");
2763 return -EFAULT;
2764 }
2765 start_limit = max(start_limit, node->last + 1);
2766 }
2767
2768 *start = start_limit;
2769 *last = end_limit - 1;
2770
2771 pr_debug("vma [0x%lx 0x%lx] range [0x%lx 0x%lx] is_heap_stack %d\n",
2772 vma->vm_start >> PAGE_SHIFT, vma->vm_end >> PAGE_SHIFT,
2773 *start, *last, *is_heap_stack);
2774
2775 return 0;
2776 }
2777
2778 static int
svm_range_check_vm_userptr(struct kfd_process * p,uint64_t start,uint64_t last,uint64_t * bo_s,uint64_t * bo_l)2779 svm_range_check_vm_userptr(struct kfd_process *p, uint64_t start, uint64_t last,
2780 uint64_t *bo_s, uint64_t *bo_l)
2781 {
2782 struct amdgpu_bo_va_mapping *mapping;
2783 struct interval_tree_node *node;
2784 struct amdgpu_bo *bo = NULL;
2785 unsigned long userptr;
2786 uint32_t i;
2787 int r;
2788
2789 for (i = 0; i < p->n_pdds; i++) {
2790 struct amdgpu_vm *vm;
2791
2792 if (!p->pdds[i]->drm_priv)
2793 continue;
2794
2795 vm = drm_priv_to_vm(p->pdds[i]->drm_priv);
2796 r = amdgpu_bo_reserve(vm->root.bo, false);
2797 if (r)
2798 return r;
2799
2800 /* Check userptr by searching entire vm->va interval tree */
2801 node = interval_tree_iter_first(&vm->va, 0, ~0ULL);
2802 while (node) {
2803 mapping = container_of((struct rb_node *)node,
2804 struct amdgpu_bo_va_mapping, rb);
2805 bo = mapping->bo_va->base.bo;
2806
2807 if (!amdgpu_ttm_tt_affect_userptr(bo->tbo.ttm,
2808 start << PAGE_SHIFT,
2809 last << PAGE_SHIFT,
2810 &userptr)) {
2811 node = interval_tree_iter_next(node, 0, ~0ULL);
2812 continue;
2813 }
2814
2815 pr_debug("[0x%llx 0x%llx] already userptr mapped\n",
2816 start, last);
2817 if (bo_s && bo_l) {
2818 *bo_s = userptr >> PAGE_SHIFT;
2819 *bo_l = *bo_s + bo->tbo.ttm->num_pages - 1;
2820 }
2821 amdgpu_bo_unreserve(vm->root.bo);
2822 return -EADDRINUSE;
2823 }
2824 amdgpu_bo_unreserve(vm->root.bo);
2825 }
2826 return 0;
2827 }
2828
2829 static struct
svm_range_create_unregistered_range(struct kfd_node * node,struct kfd_process * p,struct mm_struct * mm,int64_t addr)2830 svm_range *svm_range_create_unregistered_range(struct kfd_node *node,
2831 struct kfd_process *p,
2832 struct mm_struct *mm,
2833 int64_t addr)
2834 {
2835 struct svm_range *prange = NULL;
2836 unsigned long start, last;
2837 uint32_t gpuid, gpuidx;
2838 bool is_heap_stack;
2839 uint64_t bo_s = 0;
2840 uint64_t bo_l = 0;
2841 int r;
2842
2843 if (svm_range_get_range_boundaries(p, addr, &start, &last,
2844 &is_heap_stack))
2845 return NULL;
2846
2847 r = svm_range_check_vm(p, start, last, &bo_s, &bo_l);
2848 if (r != -EADDRINUSE)
2849 r = svm_range_check_vm_userptr(p, start, last, &bo_s, &bo_l);
2850
2851 if (r == -EADDRINUSE) {
2852 if (addr >= bo_s && addr <= bo_l)
2853 return NULL;
2854
2855 /* Create one page svm range if 2MB range overlapping */
2856 start = addr;
2857 last = addr;
2858 }
2859
2860 prange = svm_range_new(&p->svms, start, last, true);
2861 if (!prange) {
2862 pr_debug("Failed to create prange in address [0x%llx]\n", addr);
2863 return NULL;
2864 }
2865 if (kfd_process_gpuid_from_node(p, node, &gpuid, &gpuidx)) {
2866 pr_debug("failed to get gpuid from kgd\n");
2867 svm_range_free(prange, true);
2868 return NULL;
2869 }
2870
2871 if (is_heap_stack)
2872 prange->preferred_loc = KFD_IOCTL_SVM_LOCATION_SYSMEM;
2873
2874 svm_range_add_to_svms(prange);
2875 svm_range_add_notifier_locked(mm, prange);
2876
2877 return prange;
2878 }
2879
2880 /* svm_range_skip_recover - decide if prange can be recovered
2881 * @prange: svm range structure
2882 *
2883 * GPU vm retry fault handle skip recover the range for cases:
2884 * 1. prange is on deferred list to be removed after unmap, it is stale fault,
2885 * deferred list work will drain the stale fault before free the prange.
2886 * 2. prange is on deferred list to add interval notifier after split, or
2887 * 3. prange is child range, it is split from parent prange, recover later
2888 * after interval notifier is added.
2889 *
2890 * Return: true to skip recover, false to recover
2891 */
svm_range_skip_recover(struct svm_range * prange)2892 static bool svm_range_skip_recover(struct svm_range *prange)
2893 {
2894 struct svm_range_list *svms = prange->svms;
2895
2896 spin_lock(&svms->deferred_list_lock);
2897 if (list_empty(&prange->deferred_list) &&
2898 list_empty(&prange->child_list)) {
2899 spin_unlock(&svms->deferred_list_lock);
2900 return false;
2901 }
2902 spin_unlock(&svms->deferred_list_lock);
2903
2904 if (prange->work_item.op == SVM_OP_UNMAP_RANGE) {
2905 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx] unmapped\n",
2906 svms, prange, prange->start, prange->last);
2907 return true;
2908 }
2909 if (prange->work_item.op == SVM_OP_ADD_RANGE_AND_MAP ||
2910 prange->work_item.op == SVM_OP_ADD_RANGE) {
2911 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx] not added yet\n",
2912 svms, prange, prange->start, prange->last);
2913 return true;
2914 }
2915 return false;
2916 }
2917
2918 static void
svm_range_count_fault(struct kfd_node * node,struct kfd_process * p,int32_t gpuidx)2919 svm_range_count_fault(struct kfd_node *node, struct kfd_process *p,
2920 int32_t gpuidx)
2921 {
2922 struct kfd_process_device *pdd;
2923
2924 /* fault is on different page of same range
2925 * or fault is skipped to recover later
2926 * or fault is on invalid virtual address
2927 */
2928 if (gpuidx == MAX_GPU_INSTANCE) {
2929 uint32_t gpuid;
2930 int r;
2931
2932 r = kfd_process_gpuid_from_node(p, node, &gpuid, &gpuidx);
2933 if (r < 0)
2934 return;
2935 }
2936
2937 /* fault is recovered
2938 * or fault cannot recover because GPU no access on the range
2939 */
2940 pdd = kfd_process_device_from_gpuidx(p, gpuidx);
2941 if (pdd)
2942 WRITE_ONCE(pdd->faults, pdd->faults + 1);
2943 }
2944
2945 static bool
svm_fault_allowed(struct vm_area_struct * vma,bool write_fault)2946 svm_fault_allowed(struct vm_area_struct *vma, bool write_fault)
2947 {
2948 unsigned long requested = VM_READ;
2949
2950 if (write_fault)
2951 requested |= VM_WRITE;
2952
2953 pr_debug("requested 0x%lx, vma permission flags 0x%lx\n", requested,
2954 vma->vm_flags);
2955 return (vma->vm_flags & requested) == requested;
2956 }
2957
2958 int
svm_range_restore_pages(struct amdgpu_device * adev,unsigned int pasid,uint32_t vmid,uint32_t node_id,uint64_t addr,uint64_t ts,bool write_fault)2959 svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid,
2960 uint32_t vmid, uint32_t node_id,
2961 uint64_t addr, uint64_t ts, bool write_fault)
2962 {
2963 unsigned long start, last, size;
2964 struct mm_struct *mm = NULL;
2965 struct svm_range_list *svms;
2966 struct svm_range *prange;
2967 struct kfd_process *p;
2968 ktime_t timestamp = ktime_get_boottime();
2969 struct kfd_node *node;
2970 int32_t best_loc;
2971 int32_t gpuid, gpuidx = MAX_GPU_INSTANCE;
2972 bool write_locked = false;
2973 struct vm_area_struct *vma;
2974 bool migration = false;
2975 int r = 0;
2976
2977 if (!KFD_IS_SVM_API_SUPPORTED(adev)) {
2978 pr_debug("device does not support SVM\n");
2979 return -EFAULT;
2980 }
2981
2982 p = kfd_lookup_process_by_pasid(pasid);
2983 if (!p) {
2984 pr_debug("kfd process not founded pasid 0x%x\n", pasid);
2985 return 0;
2986 }
2987 svms = &p->svms;
2988
2989 pr_debug("restoring svms 0x%p fault address 0x%llx\n", svms, addr);
2990
2991 if (atomic_read(&svms->drain_pagefaults)) {
2992 pr_debug("page fault handling disabled, drop fault 0x%llx\n", addr);
2993 r = 0;
2994 goto out;
2995 }
2996
2997 node = kfd_node_by_irq_ids(adev, node_id, vmid);
2998 if (!node) {
2999 pr_debug("kfd node does not exist node_id: %d, vmid: %d\n", node_id,
3000 vmid);
3001 r = -EFAULT;
3002 goto out;
3003 }
3004
3005 if (kfd_process_gpuid_from_node(p, node, &gpuid, &gpuidx)) {
3006 pr_debug("failed to get gpuid/gpuidex for node_id: %d\n", node_id);
3007 r = -EFAULT;
3008 goto out;
3009 }
3010
3011 /* check if this page fault time stamp is before svms->checkpoint_ts */
3012 if (svms->checkpoint_ts[gpuidx] != 0) {
3013 if (amdgpu_ih_ts_after(ts, svms->checkpoint_ts[gpuidx])) {
3014 pr_debug("draining retry fault, drop fault 0x%llx\n", addr);
3015 r = 0;
3016 goto out;
3017 } else
3018 /* ts is after svms->checkpoint_ts now, reset svms->checkpoint_ts
3019 * to zero to avoid following ts wrap around give wrong comparing
3020 */
3021 svms->checkpoint_ts[gpuidx] = 0;
3022 }
3023
3024 if (!p->xnack_enabled) {
3025 pr_debug("XNACK not enabled for pasid 0x%x\n", pasid);
3026 r = -EFAULT;
3027 goto out;
3028 }
3029
3030 /* p->lead_thread is available as kfd_process_wq_release flush the work
3031 * before releasing task ref.
3032 */
3033 mm = get_task_mm(p->lead_thread);
3034 if (!mm) {
3035 pr_debug("svms 0x%p failed to get mm\n", svms);
3036 r = 0;
3037 goto out;
3038 }
3039
3040 mmap_read_lock(mm);
3041 retry_write_locked:
3042 mutex_lock(&svms->lock);
3043 prange = svm_range_from_addr(svms, addr, NULL);
3044 if (!prange) {
3045 pr_debug("failed to find prange svms 0x%p address [0x%llx]\n",
3046 svms, addr);
3047 if (!write_locked) {
3048 /* Need the write lock to create new range with MMU notifier.
3049 * Also flush pending deferred work to make sure the interval
3050 * tree is up to date before we add a new range
3051 */
3052 mutex_unlock(&svms->lock);
3053 mmap_read_unlock(mm);
3054 mmap_write_lock(mm);
3055 write_locked = true;
3056 goto retry_write_locked;
3057 }
3058 prange = svm_range_create_unregistered_range(node, p, mm, addr);
3059 if (!prange) {
3060 pr_debug("failed to create unregistered range svms 0x%p address [0x%llx]\n",
3061 svms, addr);
3062 mmap_write_downgrade(mm);
3063 r = -EFAULT;
3064 goto out_unlock_svms;
3065 }
3066 }
3067 if (write_locked)
3068 mmap_write_downgrade(mm);
3069
3070 mutex_lock(&prange->migrate_mutex);
3071
3072 if (svm_range_skip_recover(prange)) {
3073 amdgpu_gmc_filter_faults_remove(node->adev, addr, pasid);
3074 r = 0;
3075 goto out_unlock_range;
3076 }
3077
3078 /* skip duplicate vm fault on different pages of same range */
3079 if (ktime_before(timestamp, ktime_add_ns(prange->validate_timestamp,
3080 AMDGPU_SVM_RANGE_RETRY_FAULT_PENDING))) {
3081 pr_debug("svms 0x%p [0x%lx %lx] already restored\n",
3082 svms, prange->start, prange->last);
3083 r = 0;
3084 goto out_unlock_range;
3085 }
3086
3087 /* __do_munmap removed VMA, return success as we are handling stale
3088 * retry fault.
3089 */
3090 vma = vma_lookup(mm, addr << PAGE_SHIFT);
3091 if (!vma) {
3092 pr_debug("address 0x%llx VMA is removed\n", addr);
3093 r = 0;
3094 goto out_unlock_range;
3095 }
3096
3097 if (!svm_fault_allowed(vma, write_fault)) {
3098 pr_debug("fault addr 0x%llx no %s permission\n", addr,
3099 write_fault ? "write" : "read");
3100 r = -EPERM;
3101 goto out_unlock_range;
3102 }
3103
3104 best_loc = svm_range_best_restore_location(prange, node, &gpuidx);
3105 if (best_loc == -1) {
3106 pr_debug("svms %p failed get best restore loc [0x%lx 0x%lx]\n",
3107 svms, prange->start, prange->last);
3108 r = -EACCES;
3109 goto out_unlock_range;
3110 }
3111
3112 pr_debug("svms %p [0x%lx 0x%lx] best restore 0x%x, actual loc 0x%x\n",
3113 svms, prange->start, prange->last, best_loc,
3114 prange->actual_loc);
3115
3116 kfd_smi_event_page_fault_start(node, p->lead_thread->pid, addr,
3117 write_fault, timestamp);
3118
3119 /* Align migration range start and size to granularity size */
3120 size = 1UL << prange->granularity;
3121 start = max_t(unsigned long, ALIGN_DOWN(addr, size), prange->start);
3122 last = min_t(unsigned long, ALIGN(addr + 1, size) - 1, prange->last);
3123 if (prange->actual_loc != 0 || best_loc != 0) {
3124 if (best_loc) {
3125 r = svm_migrate_to_vram(prange, best_loc, start, last,
3126 mm, KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU);
3127 if (r) {
3128 pr_debug("svm_migrate_to_vram failed (%d) at %llx, falling back to system memory\n",
3129 r, addr);
3130 /* Fallback to system memory if migration to
3131 * VRAM failed
3132 */
3133 if (prange->actual_loc && prange->actual_loc != best_loc)
3134 r = svm_migrate_vram_to_ram(prange, mm, start, last,
3135 KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU, NULL);
3136 else
3137 r = 0;
3138 }
3139 } else {
3140 r = svm_migrate_vram_to_ram(prange, mm, start, last,
3141 KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU, NULL);
3142 }
3143 if (r) {
3144 pr_debug("failed %d to migrate svms %p [0x%lx 0x%lx]\n",
3145 r, svms, start, last);
3146 goto out_migrate_fail;
3147 } else {
3148 migration = true;
3149 }
3150 }
3151
3152 r = svm_range_validate_and_map(mm, start, last, prange, gpuidx, false,
3153 false, false);
3154 if (r)
3155 pr_debug("failed %d to map svms 0x%p [0x%lx 0x%lx] to gpus\n",
3156 r, svms, start, last);
3157
3158 out_migrate_fail:
3159 kfd_smi_event_page_fault_end(node, p->lead_thread->pid, addr,
3160 migration);
3161
3162 out_unlock_range:
3163 mutex_unlock(&prange->migrate_mutex);
3164 out_unlock_svms:
3165 mutex_unlock(&svms->lock);
3166 mmap_read_unlock(mm);
3167
3168 svm_range_count_fault(node, p, gpuidx);
3169
3170 mmput(mm);
3171 out:
3172 kfd_unref_process(p);
3173
3174 if (r == -EAGAIN) {
3175 pr_debug("recover vm fault later\n");
3176 amdgpu_gmc_filter_faults_remove(node->adev, addr, pasid);
3177 r = 0;
3178 }
3179 return r;
3180 }
3181
3182 int
svm_range_switch_xnack_reserve_mem(struct kfd_process * p,bool xnack_enabled)3183 svm_range_switch_xnack_reserve_mem(struct kfd_process *p, bool xnack_enabled)
3184 {
3185 struct svm_range *prange, *pchild;
3186 uint64_t reserved_size = 0;
3187 uint64_t size;
3188 int r = 0;
3189
3190 pr_debug("switching xnack from %d to %d\n", p->xnack_enabled, xnack_enabled);
3191
3192 mutex_lock(&p->svms.lock);
3193
3194 list_for_each_entry(prange, &p->svms.list, list) {
3195 svm_range_lock(prange);
3196 list_for_each_entry(pchild, &prange->child_list, child_list) {
3197 size = (pchild->last - pchild->start + 1) << PAGE_SHIFT;
3198 if (xnack_enabled) {
3199 amdgpu_amdkfd_unreserve_mem_limit(NULL, size,
3200 KFD_IOC_ALLOC_MEM_FLAGS_USERPTR, 0);
3201 } else {
3202 r = amdgpu_amdkfd_reserve_mem_limit(NULL, size,
3203 KFD_IOC_ALLOC_MEM_FLAGS_USERPTR, 0);
3204 if (r)
3205 goto out_unlock;
3206 reserved_size += size;
3207 }
3208 }
3209
3210 size = (prange->last - prange->start + 1) << PAGE_SHIFT;
3211 if (xnack_enabled) {
3212 amdgpu_amdkfd_unreserve_mem_limit(NULL, size,
3213 KFD_IOC_ALLOC_MEM_FLAGS_USERPTR, 0);
3214 } else {
3215 r = amdgpu_amdkfd_reserve_mem_limit(NULL, size,
3216 KFD_IOC_ALLOC_MEM_FLAGS_USERPTR, 0);
3217 if (r)
3218 goto out_unlock;
3219 reserved_size += size;
3220 }
3221 out_unlock:
3222 svm_range_unlock(prange);
3223 if (r)
3224 break;
3225 }
3226
3227 if (r)
3228 amdgpu_amdkfd_unreserve_mem_limit(NULL, reserved_size,
3229 KFD_IOC_ALLOC_MEM_FLAGS_USERPTR, 0);
3230 else
3231 /* Change xnack mode must be inside svms lock, to avoid race with
3232 * svm_range_deferred_list_work unreserve memory in parallel.
3233 */
3234 p->xnack_enabled = xnack_enabled;
3235
3236 mutex_unlock(&p->svms.lock);
3237 return r;
3238 }
3239
svm_range_list_fini(struct kfd_process * p)3240 void svm_range_list_fini(struct kfd_process *p)
3241 {
3242 struct svm_range *prange;
3243 struct svm_range *next;
3244
3245 pr_debug("pasid 0x%x svms 0x%p\n", p->pasid, &p->svms);
3246
3247 cancel_delayed_work_sync(&p->svms.restore_work);
3248
3249 /* Ensure list work is finished before process is destroyed */
3250 flush_work(&p->svms.deferred_list_work);
3251
3252 /*
3253 * Ensure no retry fault comes in afterwards, as page fault handler will
3254 * not find kfd process and take mm lock to recover fault.
3255 * stop kfd page fault handing, then wait pending page faults got drained
3256 */
3257 atomic_set(&p->svms.drain_pagefaults, 1);
3258 svm_range_drain_retry_fault(&p->svms);
3259
3260 list_for_each_entry_safe(prange, next, &p->svms.list, list) {
3261 svm_range_unlink(prange);
3262 svm_range_remove_notifier(prange);
3263 svm_range_free(prange, true);
3264 }
3265
3266 mutex_destroy(&p->svms.lock);
3267
3268 pr_debug("pasid 0x%x svms 0x%p done\n", p->pasid, &p->svms);
3269 }
3270
svm_range_list_init(struct kfd_process * p)3271 int svm_range_list_init(struct kfd_process *p)
3272 {
3273 struct svm_range_list *svms = &p->svms;
3274 int i;
3275
3276 svms->objects = RB_ROOT_CACHED;
3277 mutex_init(&svms->lock);
3278 INIT_LIST_HEAD(&svms->list);
3279 atomic_set(&svms->evicted_ranges, 0);
3280 atomic_set(&svms->drain_pagefaults, 0);
3281 INIT_DELAYED_WORK(&svms->restore_work, svm_range_restore_work);
3282 INIT_WORK(&svms->deferred_list_work, svm_range_deferred_list_work);
3283 INIT_LIST_HEAD(&svms->deferred_range_list);
3284 INIT_LIST_HEAD(&svms->criu_svm_metadata_list);
3285 spin_lock_init(&svms->deferred_list_lock);
3286
3287 for (i = 0; i < p->n_pdds; i++)
3288 if (KFD_IS_SVM_API_SUPPORTED(p->pdds[i]->dev->adev))
3289 bitmap_set(svms->bitmap_supported, i, 1);
3290
3291 /* Value of default granularity cannot exceed 0x1B, the
3292 * number of pages supported by a 4-level paging table
3293 */
3294 svms->default_granularity = min_t(u8, amdgpu_svm_default_granularity, 0x1B);
3295 pr_debug("Default SVM Granularity to use: %d\n", svms->default_granularity);
3296
3297 return 0;
3298 }
3299
3300 /**
3301 * svm_range_check_vm - check if virtual address range mapped already
3302 * @p: current kfd_process
3303 * @start: range start address, in pages
3304 * @last: range last address, in pages
3305 * @bo_s: mapping start address in pages if address range already mapped
3306 * @bo_l: mapping last address in pages if address range already mapped
3307 *
3308 * The purpose is to avoid virtual address ranges already allocated by
3309 * kfd_ioctl_alloc_memory_of_gpu ioctl.
3310 * It looks for each pdd in the kfd_process.
3311 *
3312 * Context: Process context
3313 *
3314 * Return 0 - OK, if the range is not mapped.
3315 * Otherwise error code:
3316 * -EADDRINUSE - if address is mapped already by kfd_ioctl_alloc_memory_of_gpu
3317 * -ERESTARTSYS - A wait for the buffer to become unreserved was interrupted by
3318 * a signal. Release all buffer reservations and return to user-space.
3319 */
3320 static int
svm_range_check_vm(struct kfd_process * p,uint64_t start,uint64_t last,uint64_t * bo_s,uint64_t * bo_l)3321 svm_range_check_vm(struct kfd_process *p, uint64_t start, uint64_t last,
3322 uint64_t *bo_s, uint64_t *bo_l)
3323 {
3324 struct amdgpu_bo_va_mapping *mapping;
3325 struct interval_tree_node *node;
3326 uint32_t i;
3327 int r;
3328
3329 for (i = 0; i < p->n_pdds; i++) {
3330 struct amdgpu_vm *vm;
3331
3332 if (!p->pdds[i]->drm_priv)
3333 continue;
3334
3335 vm = drm_priv_to_vm(p->pdds[i]->drm_priv);
3336 r = amdgpu_bo_reserve(vm->root.bo, false);
3337 if (r)
3338 return r;
3339
3340 node = interval_tree_iter_first(&vm->va, start, last);
3341 if (node) {
3342 pr_debug("range [0x%llx 0x%llx] already TTM mapped\n",
3343 start, last);
3344 mapping = container_of((struct rb_node *)node,
3345 struct amdgpu_bo_va_mapping, rb);
3346 if (bo_s && bo_l) {
3347 *bo_s = mapping->start;
3348 *bo_l = mapping->last;
3349 }
3350 amdgpu_bo_unreserve(vm->root.bo);
3351 return -EADDRINUSE;
3352 }
3353 amdgpu_bo_unreserve(vm->root.bo);
3354 }
3355
3356 return 0;
3357 }
3358
3359 /**
3360 * svm_range_is_valid - check if virtual address range is valid
3361 * @p: current kfd_process
3362 * @start: range start address, in pages
3363 * @size: range size, in pages
3364 *
3365 * Valid virtual address range means it belongs to one or more VMAs
3366 *
3367 * Context: Process context
3368 *
3369 * Return:
3370 * 0 - OK, otherwise error code
3371 */
3372 static int
svm_range_is_valid(struct kfd_process * p,uint64_t start,uint64_t size)3373 svm_range_is_valid(struct kfd_process *p, uint64_t start, uint64_t size)
3374 {
3375 const unsigned long device_vma = VM_IO | VM_PFNMAP | VM_MIXEDMAP;
3376 struct vm_area_struct *vma;
3377 unsigned long end;
3378 unsigned long start_unchg = start;
3379
3380 start <<= PAGE_SHIFT;
3381 end = start + (size << PAGE_SHIFT);
3382 do {
3383 vma = vma_lookup(p->mm, start);
3384 if (!vma || (vma->vm_flags & device_vma))
3385 return -EFAULT;
3386 start = min(end, vma->vm_end);
3387 } while (start < end);
3388
3389 return svm_range_check_vm(p, start_unchg, (end - 1) >> PAGE_SHIFT, NULL,
3390 NULL);
3391 }
3392
3393 /**
3394 * svm_range_best_prefetch_location - decide the best prefetch location
3395 * @prange: svm range structure
3396 *
3397 * For xnack off:
3398 * If range map to single GPU, the best prefetch location is prefetch_loc, which
3399 * can be CPU or GPU.
3400 *
3401 * If range is ACCESS or ACCESS_IN_PLACE by mGPUs, only if mGPU connection on
3402 * XGMI same hive, the best prefetch location is prefetch_loc GPU, othervise
3403 * the best prefetch location is always CPU, because GPU can not have coherent
3404 * mapping VRAM of other GPUs even with large-BAR PCIe connection.
3405 *
3406 * For xnack on:
3407 * If range is not ACCESS_IN_PLACE by mGPUs, the best prefetch location is
3408 * prefetch_loc, other GPU access will generate vm fault and trigger migration.
3409 *
3410 * If range is ACCESS_IN_PLACE by mGPUs, only if mGPU connection on XGMI same
3411 * hive, the best prefetch location is prefetch_loc GPU, otherwise the best
3412 * prefetch location is always CPU.
3413 *
3414 * Context: Process context
3415 *
3416 * Return:
3417 * 0 for CPU or GPU id
3418 */
3419 static uint32_t
svm_range_best_prefetch_location(struct svm_range * prange)3420 svm_range_best_prefetch_location(struct svm_range *prange)
3421 {
3422 DECLARE_BITMAP(bitmap, MAX_GPU_INSTANCE);
3423 uint32_t best_loc = prange->prefetch_loc;
3424 struct kfd_process_device *pdd;
3425 struct kfd_node *bo_node;
3426 struct kfd_process *p;
3427 uint32_t gpuidx;
3428
3429 p = container_of(prange->svms, struct kfd_process, svms);
3430
3431 if (!best_loc || best_loc == KFD_IOCTL_SVM_LOCATION_UNDEFINED)
3432 goto out;
3433
3434 bo_node = svm_range_get_node_by_id(prange, best_loc);
3435 if (!bo_node) {
3436 WARN_ONCE(1, "failed to get valid kfd node at id%x\n", best_loc);
3437 best_loc = 0;
3438 goto out;
3439 }
3440
3441 if (bo_node->adev->flags & AMD_IS_APU) {
3442 best_loc = 0;
3443 goto out;
3444 }
3445
3446 if (p->xnack_enabled)
3447 bitmap_copy(bitmap, prange->bitmap_aip, MAX_GPU_INSTANCE);
3448 else
3449 bitmap_or(bitmap, prange->bitmap_access, prange->bitmap_aip,
3450 MAX_GPU_INSTANCE);
3451
3452 for_each_set_bit(gpuidx, bitmap, MAX_GPU_INSTANCE) {
3453 pdd = kfd_process_device_from_gpuidx(p, gpuidx);
3454 if (!pdd) {
3455 pr_debug("failed to get device by idx 0x%x\n", gpuidx);
3456 continue;
3457 }
3458
3459 if (pdd->dev->adev == bo_node->adev)
3460 continue;
3461
3462 if (!svm_nodes_in_same_hive(pdd->dev, bo_node)) {
3463 best_loc = 0;
3464 break;
3465 }
3466 }
3467
3468 out:
3469 pr_debug("xnack %d svms 0x%p [0x%lx 0x%lx] best loc 0x%x\n",
3470 p->xnack_enabled, &p->svms, prange->start, prange->last,
3471 best_loc);
3472
3473 return best_loc;
3474 }
3475
3476 /* svm_range_trigger_migration - start page migration if prefetch loc changed
3477 * @mm: current process mm_struct
3478 * @prange: svm range structure
3479 * @migrated: output, true if migration is triggered
3480 *
3481 * If range perfetch_loc is GPU, actual loc is cpu 0, then migrate the range
3482 * from ram to vram.
3483 * If range prefetch_loc is cpu 0, actual loc is GPU, then migrate the range
3484 * from vram to ram.
3485 *
3486 * If GPU vm fault retry is not enabled, migration interact with MMU notifier
3487 * and restore work:
3488 * 1. migrate_vma_setup invalidate pages, MMU notifier callback svm_range_evict
3489 * stops all queues, schedule restore work
3490 * 2. svm_range_restore_work wait for migration is done by
3491 * a. svm_range_validate_vram takes prange->migrate_mutex
3492 * b. svm_range_validate_ram HMM get pages wait for CPU fault handle returns
3493 * 3. restore work update mappings of GPU, resume all queues.
3494 *
3495 * Context: Process context
3496 *
3497 * Return:
3498 * 0 - OK, otherwise - error code of migration
3499 */
3500 static int
svm_range_trigger_migration(struct mm_struct * mm,struct svm_range * prange,bool * migrated)3501 svm_range_trigger_migration(struct mm_struct *mm, struct svm_range *prange,
3502 bool *migrated)
3503 {
3504 uint32_t best_loc;
3505 int r = 0;
3506
3507 *migrated = false;
3508 best_loc = svm_range_best_prefetch_location(prange);
3509
3510 /* when best_loc is a gpu node and same as prange->actual_loc
3511 * we still need do migration as prange->actual_loc !=0 does
3512 * not mean all pages in prange are vram. hmm migrate will pick
3513 * up right pages during migration.
3514 */
3515 if ((best_loc == KFD_IOCTL_SVM_LOCATION_UNDEFINED) ||
3516 (best_loc == 0 && prange->actual_loc == 0))
3517 return 0;
3518
3519 if (!best_loc) {
3520 r = svm_migrate_vram_to_ram(prange, mm, prange->start, prange->last,
3521 KFD_MIGRATE_TRIGGER_PREFETCH, NULL);
3522 *migrated = !r;
3523 return r;
3524 }
3525
3526 r = svm_migrate_to_vram(prange, best_loc, prange->start, prange->last,
3527 mm, KFD_MIGRATE_TRIGGER_PREFETCH);
3528 *migrated = !r;
3529
3530 return 0;
3531 }
3532
svm_range_schedule_evict_svm_bo(struct amdgpu_amdkfd_fence * fence)3533 int svm_range_schedule_evict_svm_bo(struct amdgpu_amdkfd_fence *fence)
3534 {
3535 /* Dereferencing fence->svm_bo is safe here because the fence hasn't
3536 * signaled yet and we're under the protection of the fence->lock.
3537 * After the fence is signaled in svm_range_bo_release, we cannot get
3538 * here any more.
3539 *
3540 * Reference is dropped in svm_range_evict_svm_bo_worker.
3541 */
3542 if (svm_bo_ref_unless_zero(fence->svm_bo)) {
3543 WRITE_ONCE(fence->svm_bo->evicting, 1);
3544 schedule_work(&fence->svm_bo->eviction_work);
3545 }
3546
3547 return 0;
3548 }
3549
svm_range_evict_svm_bo_worker(struct work_struct * work)3550 static void svm_range_evict_svm_bo_worker(struct work_struct *work)
3551 {
3552 struct svm_range_bo *svm_bo;
3553 struct mm_struct *mm;
3554 int r = 0;
3555
3556 svm_bo = container_of(work, struct svm_range_bo, eviction_work);
3557
3558 if (mmget_not_zero(svm_bo->eviction_fence->mm)) {
3559 mm = svm_bo->eviction_fence->mm;
3560 } else {
3561 svm_range_bo_unref(svm_bo);
3562 return;
3563 }
3564
3565 mmap_read_lock(mm);
3566 spin_lock(&svm_bo->list_lock);
3567 while (!list_empty(&svm_bo->range_list) && !r) {
3568 struct svm_range *prange =
3569 list_first_entry(&svm_bo->range_list,
3570 struct svm_range, svm_bo_list);
3571 int retries = 3;
3572
3573 list_del_init(&prange->svm_bo_list);
3574 spin_unlock(&svm_bo->list_lock);
3575
3576 pr_debug("svms 0x%p [0x%lx 0x%lx]\n", prange->svms,
3577 prange->start, prange->last);
3578
3579 mutex_lock(&prange->migrate_mutex);
3580 do {
3581 /* migrate all vram pages in this prange to sys ram
3582 * after that prange->actual_loc should be zero
3583 */
3584 r = svm_migrate_vram_to_ram(prange, mm,
3585 prange->start, prange->last,
3586 KFD_MIGRATE_TRIGGER_TTM_EVICTION, NULL);
3587 } while (!r && prange->actual_loc && --retries);
3588
3589 if (!r && prange->actual_loc)
3590 pr_info_once("Migration failed during eviction");
3591
3592 if (!prange->actual_loc) {
3593 mutex_lock(&prange->lock);
3594 prange->svm_bo = NULL;
3595 mutex_unlock(&prange->lock);
3596 }
3597 mutex_unlock(&prange->migrate_mutex);
3598
3599 spin_lock(&svm_bo->list_lock);
3600 }
3601 spin_unlock(&svm_bo->list_lock);
3602 mmap_read_unlock(mm);
3603 mmput(mm);
3604
3605 dma_fence_signal(&svm_bo->eviction_fence->base);
3606
3607 /* This is the last reference to svm_bo, after svm_range_vram_node_free
3608 * has been called in svm_migrate_vram_to_ram
3609 */
3610 WARN_ONCE(!r && kref_read(&svm_bo->kref) != 1, "This was not the last reference\n");
3611 svm_range_bo_unref(svm_bo);
3612 }
3613
3614 static int
svm_range_set_attr(struct kfd_process * p,struct mm_struct * mm,uint64_t start,uint64_t size,uint32_t nattr,struct kfd_ioctl_svm_attribute * attrs)3615 svm_range_set_attr(struct kfd_process *p, struct mm_struct *mm,
3616 uint64_t start, uint64_t size, uint32_t nattr,
3617 struct kfd_ioctl_svm_attribute *attrs)
3618 {
3619 struct amdkfd_process_info *process_info = p->kgd_process_info;
3620 struct list_head update_list;
3621 struct list_head insert_list;
3622 struct list_head remove_list;
3623 struct list_head remap_list;
3624 struct svm_range_list *svms;
3625 struct svm_range *prange;
3626 struct svm_range *next;
3627 bool update_mapping = false;
3628 bool flush_tlb;
3629 int r, ret = 0;
3630
3631 pr_debug("pasid 0x%x svms 0x%p [0x%llx 0x%llx] pages 0x%llx\n",
3632 p->pasid, &p->svms, start, start + size - 1, size);
3633
3634 r = svm_range_check_attr(p, nattr, attrs);
3635 if (r)
3636 return r;
3637
3638 svms = &p->svms;
3639
3640 mutex_lock(&process_info->lock);
3641
3642 svm_range_list_lock_and_flush_work(svms, mm);
3643
3644 r = svm_range_is_valid(p, start, size);
3645 if (r) {
3646 pr_debug("invalid range r=%d\n", r);
3647 mmap_write_unlock(mm);
3648 goto out;
3649 }
3650
3651 mutex_lock(&svms->lock);
3652
3653 /* Add new range and split existing ranges as needed */
3654 r = svm_range_add(p, start, size, nattr, attrs, &update_list,
3655 &insert_list, &remove_list, &remap_list);
3656 if (r) {
3657 mutex_unlock(&svms->lock);
3658 mmap_write_unlock(mm);
3659 goto out;
3660 }
3661 /* Apply changes as a transaction */
3662 list_for_each_entry_safe(prange, next, &insert_list, list) {
3663 svm_range_add_to_svms(prange);
3664 svm_range_add_notifier_locked(mm, prange);
3665 }
3666 list_for_each_entry(prange, &update_list, update_list) {
3667 svm_range_apply_attrs(p, prange, nattr, attrs, &update_mapping);
3668 /* TODO: unmap ranges from GPU that lost access */
3669 }
3670 list_for_each_entry_safe(prange, next, &remove_list, update_list) {
3671 pr_debug("unlink old 0x%p prange 0x%p [0x%lx 0x%lx]\n",
3672 prange->svms, prange, prange->start,
3673 prange->last);
3674 svm_range_unlink(prange);
3675 svm_range_remove_notifier(prange);
3676 svm_range_free(prange, false);
3677 }
3678
3679 mmap_write_downgrade(mm);
3680 /* Trigger migrations and revalidate and map to GPUs as needed. If
3681 * this fails we may be left with partially completed actions. There
3682 * is no clean way of rolling back to the previous state in such a
3683 * case because the rollback wouldn't be guaranteed to work either.
3684 */
3685 list_for_each_entry(prange, &update_list, update_list) {
3686 bool migrated;
3687
3688 mutex_lock(&prange->migrate_mutex);
3689
3690 r = svm_range_trigger_migration(mm, prange, &migrated);
3691 if (r)
3692 goto out_unlock_range;
3693
3694 if (migrated && (!p->xnack_enabled ||
3695 (prange->flags & KFD_IOCTL_SVM_FLAG_GPU_ALWAYS_MAPPED)) &&
3696 prange->mapped_to_gpu) {
3697 pr_debug("restore_work will update mappings of GPUs\n");
3698 mutex_unlock(&prange->migrate_mutex);
3699 continue;
3700 }
3701
3702 if (!migrated && !update_mapping) {
3703 mutex_unlock(&prange->migrate_mutex);
3704 continue;
3705 }
3706
3707 flush_tlb = !migrated && update_mapping && prange->mapped_to_gpu;
3708
3709 r = svm_range_validate_and_map(mm, prange->start, prange->last, prange,
3710 MAX_GPU_INSTANCE, true, true, flush_tlb);
3711 if (r)
3712 pr_debug("failed %d to map svm range\n", r);
3713
3714 out_unlock_range:
3715 mutex_unlock(&prange->migrate_mutex);
3716 if (r)
3717 ret = r;
3718 }
3719
3720 list_for_each_entry(prange, &remap_list, update_list) {
3721 pr_debug("Remapping prange 0x%p [0x%lx 0x%lx]\n",
3722 prange, prange->start, prange->last);
3723 mutex_lock(&prange->migrate_mutex);
3724 r = svm_range_validate_and_map(mm, prange->start, prange->last, prange,
3725 MAX_GPU_INSTANCE, true, true, prange->mapped_to_gpu);
3726 if (r)
3727 pr_debug("failed %d on remap svm range\n", r);
3728 mutex_unlock(&prange->migrate_mutex);
3729 if (r)
3730 ret = r;
3731 }
3732
3733 dynamic_svm_range_dump(svms);
3734
3735 mutex_unlock(&svms->lock);
3736 mmap_read_unlock(mm);
3737 out:
3738 mutex_unlock(&process_info->lock);
3739
3740 pr_debug("pasid 0x%x svms 0x%p [0x%llx 0x%llx] done, r=%d\n", p->pasid,
3741 &p->svms, start, start + size - 1, r);
3742
3743 return ret ? ret : r;
3744 }
3745
3746 static int
svm_range_get_attr(struct kfd_process * p,struct mm_struct * mm,uint64_t start,uint64_t size,uint32_t nattr,struct kfd_ioctl_svm_attribute * attrs)3747 svm_range_get_attr(struct kfd_process *p, struct mm_struct *mm,
3748 uint64_t start, uint64_t size, uint32_t nattr,
3749 struct kfd_ioctl_svm_attribute *attrs)
3750 {
3751 DECLARE_BITMAP(bitmap_access, MAX_GPU_INSTANCE);
3752 DECLARE_BITMAP(bitmap_aip, MAX_GPU_INSTANCE);
3753 bool get_preferred_loc = false;
3754 bool get_prefetch_loc = false;
3755 bool get_granularity = false;
3756 bool get_accessible = false;
3757 bool get_flags = false;
3758 uint64_t last = start + size - 1UL;
3759 uint8_t granularity = 0xff;
3760 struct interval_tree_node *node;
3761 struct svm_range_list *svms;
3762 struct svm_range *prange;
3763 uint32_t prefetch_loc = KFD_IOCTL_SVM_LOCATION_UNDEFINED;
3764 uint32_t location = KFD_IOCTL_SVM_LOCATION_UNDEFINED;
3765 uint32_t flags_and = 0xffffffff;
3766 uint32_t flags_or = 0;
3767 int gpuidx;
3768 uint32_t i;
3769 int r = 0;
3770
3771 pr_debug("svms 0x%p [0x%llx 0x%llx] nattr 0x%x\n", &p->svms, start,
3772 start + size - 1, nattr);
3773
3774 /* Flush pending deferred work to avoid racing with deferred actions from
3775 * previous memory map changes (e.g. munmap). Concurrent memory map changes
3776 * can still race with get_attr because we don't hold the mmap lock. But that
3777 * would be a race condition in the application anyway, and undefined
3778 * behaviour is acceptable in that case.
3779 */
3780 flush_work(&p->svms.deferred_list_work);
3781
3782 mmap_read_lock(mm);
3783 r = svm_range_is_valid(p, start, size);
3784 mmap_read_unlock(mm);
3785 if (r) {
3786 pr_debug("invalid range r=%d\n", r);
3787 return r;
3788 }
3789
3790 for (i = 0; i < nattr; i++) {
3791 switch (attrs[i].type) {
3792 case KFD_IOCTL_SVM_ATTR_PREFERRED_LOC:
3793 get_preferred_loc = true;
3794 break;
3795 case KFD_IOCTL_SVM_ATTR_PREFETCH_LOC:
3796 get_prefetch_loc = true;
3797 break;
3798 case KFD_IOCTL_SVM_ATTR_ACCESS:
3799 get_accessible = true;
3800 break;
3801 case KFD_IOCTL_SVM_ATTR_SET_FLAGS:
3802 case KFD_IOCTL_SVM_ATTR_CLR_FLAGS:
3803 get_flags = true;
3804 break;
3805 case KFD_IOCTL_SVM_ATTR_GRANULARITY:
3806 get_granularity = true;
3807 break;
3808 case KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE:
3809 case KFD_IOCTL_SVM_ATTR_NO_ACCESS:
3810 fallthrough;
3811 default:
3812 pr_debug("get invalid attr type 0x%x\n", attrs[i].type);
3813 return -EINVAL;
3814 }
3815 }
3816
3817 svms = &p->svms;
3818
3819 mutex_lock(&svms->lock);
3820
3821 node = interval_tree_iter_first(&svms->objects, start, last);
3822 if (!node) {
3823 pr_debug("range attrs not found return default values\n");
3824 svm_range_set_default_attributes(svms, &location, &prefetch_loc,
3825 &granularity, &flags_and);
3826 flags_or = flags_and;
3827 if (p->xnack_enabled)
3828 bitmap_copy(bitmap_access, svms->bitmap_supported,
3829 MAX_GPU_INSTANCE);
3830 else
3831 bitmap_zero(bitmap_access, MAX_GPU_INSTANCE);
3832 bitmap_zero(bitmap_aip, MAX_GPU_INSTANCE);
3833 goto fill_values;
3834 }
3835 bitmap_copy(bitmap_access, svms->bitmap_supported, MAX_GPU_INSTANCE);
3836 bitmap_copy(bitmap_aip, svms->bitmap_supported, MAX_GPU_INSTANCE);
3837
3838 while (node) {
3839 struct interval_tree_node *next;
3840
3841 prange = container_of(node, struct svm_range, it_node);
3842 next = interval_tree_iter_next(node, start, last);
3843
3844 if (get_preferred_loc) {
3845 if (prange->preferred_loc ==
3846 KFD_IOCTL_SVM_LOCATION_UNDEFINED ||
3847 (location != KFD_IOCTL_SVM_LOCATION_UNDEFINED &&
3848 location != prange->preferred_loc)) {
3849 location = KFD_IOCTL_SVM_LOCATION_UNDEFINED;
3850 get_preferred_loc = false;
3851 } else {
3852 location = prange->preferred_loc;
3853 }
3854 }
3855 if (get_prefetch_loc) {
3856 if (prange->prefetch_loc ==
3857 KFD_IOCTL_SVM_LOCATION_UNDEFINED ||
3858 (prefetch_loc != KFD_IOCTL_SVM_LOCATION_UNDEFINED &&
3859 prefetch_loc != prange->prefetch_loc)) {
3860 prefetch_loc = KFD_IOCTL_SVM_LOCATION_UNDEFINED;
3861 get_prefetch_loc = false;
3862 } else {
3863 prefetch_loc = prange->prefetch_loc;
3864 }
3865 }
3866 if (get_accessible) {
3867 bitmap_and(bitmap_access, bitmap_access,
3868 prange->bitmap_access, MAX_GPU_INSTANCE);
3869 bitmap_and(bitmap_aip, bitmap_aip,
3870 prange->bitmap_aip, MAX_GPU_INSTANCE);
3871 }
3872 if (get_flags) {
3873 flags_and &= prange->flags;
3874 flags_or |= prange->flags;
3875 }
3876
3877 if (get_granularity && prange->granularity < granularity)
3878 granularity = prange->granularity;
3879
3880 node = next;
3881 }
3882 fill_values:
3883 mutex_unlock(&svms->lock);
3884
3885 for (i = 0; i < nattr; i++) {
3886 switch (attrs[i].type) {
3887 case KFD_IOCTL_SVM_ATTR_PREFERRED_LOC:
3888 attrs[i].value = location;
3889 break;
3890 case KFD_IOCTL_SVM_ATTR_PREFETCH_LOC:
3891 attrs[i].value = prefetch_loc;
3892 break;
3893 case KFD_IOCTL_SVM_ATTR_ACCESS:
3894 gpuidx = kfd_process_gpuidx_from_gpuid(p,
3895 attrs[i].value);
3896 if (gpuidx < 0) {
3897 pr_debug("invalid gpuid %x\n", attrs[i].value);
3898 return -EINVAL;
3899 }
3900 if (test_bit(gpuidx, bitmap_access))
3901 attrs[i].type = KFD_IOCTL_SVM_ATTR_ACCESS;
3902 else if (test_bit(gpuidx, bitmap_aip))
3903 attrs[i].type =
3904 KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE;
3905 else
3906 attrs[i].type = KFD_IOCTL_SVM_ATTR_NO_ACCESS;
3907 break;
3908 case KFD_IOCTL_SVM_ATTR_SET_FLAGS:
3909 attrs[i].value = flags_and;
3910 break;
3911 case KFD_IOCTL_SVM_ATTR_CLR_FLAGS:
3912 attrs[i].value = ~flags_or;
3913 break;
3914 case KFD_IOCTL_SVM_ATTR_GRANULARITY:
3915 attrs[i].value = (uint32_t)granularity;
3916 break;
3917 }
3918 }
3919
3920 return 0;
3921 }
3922
kfd_criu_resume_svm(struct kfd_process * p)3923 int kfd_criu_resume_svm(struct kfd_process *p)
3924 {
3925 struct kfd_ioctl_svm_attribute *set_attr_new, *set_attr = NULL;
3926 int nattr_common = 4, nattr_accessibility = 1;
3927 struct criu_svm_metadata *criu_svm_md = NULL;
3928 struct svm_range_list *svms = &p->svms;
3929 struct criu_svm_metadata *next = NULL;
3930 uint32_t set_flags = 0xffffffff;
3931 int i, j, num_attrs, ret = 0;
3932 uint64_t set_attr_size;
3933 struct mm_struct *mm;
3934
3935 if (list_empty(&svms->criu_svm_metadata_list)) {
3936 pr_debug("No SVM data from CRIU restore stage 2\n");
3937 return ret;
3938 }
3939
3940 mm = get_task_mm(p->lead_thread);
3941 if (!mm) {
3942 pr_err("failed to get mm for the target process\n");
3943 return -ESRCH;
3944 }
3945
3946 num_attrs = nattr_common + (nattr_accessibility * p->n_pdds);
3947
3948 i = j = 0;
3949 list_for_each_entry(criu_svm_md, &svms->criu_svm_metadata_list, list) {
3950 pr_debug("criu_svm_md[%d]\n\tstart: 0x%llx size: 0x%llx (npages)\n",
3951 i, criu_svm_md->data.start_addr, criu_svm_md->data.size);
3952
3953 for (j = 0; j < num_attrs; j++) {
3954 pr_debug("\ncriu_svm_md[%d]->attrs[%d].type : 0x%x\ncriu_svm_md[%d]->attrs[%d].value : 0x%x\n",
3955 i, j, criu_svm_md->data.attrs[j].type,
3956 i, j, criu_svm_md->data.attrs[j].value);
3957 switch (criu_svm_md->data.attrs[j].type) {
3958 /* During Checkpoint operation, the query for
3959 * KFD_IOCTL_SVM_ATTR_PREFETCH_LOC attribute might
3960 * return KFD_IOCTL_SVM_LOCATION_UNDEFINED if they were
3961 * not used by the range which was checkpointed. Care
3962 * must be taken to not restore with an invalid value
3963 * otherwise the gpuidx value will be invalid and
3964 * set_attr would eventually fail so just replace those
3965 * with another dummy attribute such as
3966 * KFD_IOCTL_SVM_ATTR_SET_FLAGS.
3967 */
3968 case KFD_IOCTL_SVM_ATTR_PREFETCH_LOC:
3969 if (criu_svm_md->data.attrs[j].value ==
3970 KFD_IOCTL_SVM_LOCATION_UNDEFINED) {
3971 criu_svm_md->data.attrs[j].type =
3972 KFD_IOCTL_SVM_ATTR_SET_FLAGS;
3973 criu_svm_md->data.attrs[j].value = 0;
3974 }
3975 break;
3976 case KFD_IOCTL_SVM_ATTR_SET_FLAGS:
3977 set_flags = criu_svm_md->data.attrs[j].value;
3978 break;
3979 default:
3980 break;
3981 }
3982 }
3983
3984 /* CLR_FLAGS is not available via get_attr during checkpoint but
3985 * it needs to be inserted before restoring the ranges so
3986 * allocate extra space for it before calling set_attr
3987 */
3988 set_attr_size = sizeof(struct kfd_ioctl_svm_attribute) *
3989 (num_attrs + 1);
3990 set_attr_new = krealloc(set_attr, set_attr_size,
3991 GFP_KERNEL);
3992 if (!set_attr_new) {
3993 ret = -ENOMEM;
3994 goto exit;
3995 }
3996 set_attr = set_attr_new;
3997
3998 memcpy(set_attr, criu_svm_md->data.attrs, num_attrs *
3999 sizeof(struct kfd_ioctl_svm_attribute));
4000 set_attr[num_attrs].type = KFD_IOCTL_SVM_ATTR_CLR_FLAGS;
4001 set_attr[num_attrs].value = ~set_flags;
4002
4003 ret = svm_range_set_attr(p, mm, criu_svm_md->data.start_addr,
4004 criu_svm_md->data.size, num_attrs + 1,
4005 set_attr);
4006 if (ret) {
4007 pr_err("CRIU: failed to set range attributes\n");
4008 goto exit;
4009 }
4010
4011 i++;
4012 }
4013 exit:
4014 kfree(set_attr);
4015 list_for_each_entry_safe(criu_svm_md, next, &svms->criu_svm_metadata_list, list) {
4016 pr_debug("freeing criu_svm_md[]\n\tstart: 0x%llx\n",
4017 criu_svm_md->data.start_addr);
4018 kfree(criu_svm_md);
4019 }
4020
4021 mmput(mm);
4022 return ret;
4023
4024 }
4025
kfd_criu_restore_svm(struct kfd_process * p,uint8_t __user * user_priv_ptr,uint64_t * priv_data_offset,uint64_t max_priv_data_size)4026 int kfd_criu_restore_svm(struct kfd_process *p,
4027 uint8_t __user *user_priv_ptr,
4028 uint64_t *priv_data_offset,
4029 uint64_t max_priv_data_size)
4030 {
4031 uint64_t svm_priv_data_size, svm_object_md_size, svm_attrs_size;
4032 int nattr_common = 4, nattr_accessibility = 1;
4033 struct criu_svm_metadata *criu_svm_md = NULL;
4034 struct svm_range_list *svms = &p->svms;
4035 uint32_t num_devices;
4036 int ret = 0;
4037
4038 num_devices = p->n_pdds;
4039 /* Handle one SVM range object at a time, also the number of gpus are
4040 * assumed to be same on the restore node, checking must be done while
4041 * evaluating the topology earlier
4042 */
4043
4044 svm_attrs_size = sizeof(struct kfd_ioctl_svm_attribute) *
4045 (nattr_common + nattr_accessibility * num_devices);
4046 svm_object_md_size = sizeof(struct criu_svm_metadata) + svm_attrs_size;
4047
4048 svm_priv_data_size = sizeof(struct kfd_criu_svm_range_priv_data) +
4049 svm_attrs_size;
4050
4051 criu_svm_md = kzalloc(svm_object_md_size, GFP_KERNEL);
4052 if (!criu_svm_md) {
4053 pr_err("failed to allocate memory to store svm metadata\n");
4054 return -ENOMEM;
4055 }
4056 if (*priv_data_offset + svm_priv_data_size > max_priv_data_size) {
4057 ret = -EINVAL;
4058 goto exit;
4059 }
4060
4061 ret = copy_from_user(&criu_svm_md->data, user_priv_ptr + *priv_data_offset,
4062 svm_priv_data_size);
4063 if (ret) {
4064 ret = -EFAULT;
4065 goto exit;
4066 }
4067 *priv_data_offset += svm_priv_data_size;
4068
4069 list_add_tail(&criu_svm_md->list, &svms->criu_svm_metadata_list);
4070
4071 return 0;
4072
4073
4074 exit:
4075 kfree(criu_svm_md);
4076 return ret;
4077 }
4078
svm_range_get_info(struct kfd_process * p,uint32_t * num_svm_ranges,uint64_t * svm_priv_data_size)4079 int svm_range_get_info(struct kfd_process *p, uint32_t *num_svm_ranges,
4080 uint64_t *svm_priv_data_size)
4081 {
4082 uint64_t total_size, accessibility_size, common_attr_size;
4083 int nattr_common = 4, nattr_accessibility = 1;
4084 int num_devices = p->n_pdds;
4085 struct svm_range_list *svms;
4086 struct svm_range *prange;
4087 uint32_t count = 0;
4088
4089 *svm_priv_data_size = 0;
4090
4091 svms = &p->svms;
4092 if (!svms)
4093 return -EINVAL;
4094
4095 mutex_lock(&svms->lock);
4096 list_for_each_entry(prange, &svms->list, list) {
4097 pr_debug("prange: 0x%p start: 0x%lx\t npages: 0x%llx\t end: 0x%llx\n",
4098 prange, prange->start, prange->npages,
4099 prange->start + prange->npages - 1);
4100 count++;
4101 }
4102 mutex_unlock(&svms->lock);
4103
4104 *num_svm_ranges = count;
4105 /* Only the accessbility attributes need to be queried for all the gpus
4106 * individually, remaining ones are spanned across the entire process
4107 * regardless of the various gpu nodes. Of the remaining attributes,
4108 * KFD_IOCTL_SVM_ATTR_CLR_FLAGS need not be saved.
4109 *
4110 * KFD_IOCTL_SVM_ATTR_PREFERRED_LOC
4111 * KFD_IOCTL_SVM_ATTR_PREFETCH_LOC
4112 * KFD_IOCTL_SVM_ATTR_SET_FLAGS
4113 * KFD_IOCTL_SVM_ATTR_GRANULARITY
4114 *
4115 * ** ACCESSBILITY ATTRIBUTES **
4116 * (Considered as one, type is altered during query, value is gpuid)
4117 * KFD_IOCTL_SVM_ATTR_ACCESS
4118 * KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE
4119 * KFD_IOCTL_SVM_ATTR_NO_ACCESS
4120 */
4121 if (*num_svm_ranges > 0) {
4122 common_attr_size = sizeof(struct kfd_ioctl_svm_attribute) *
4123 nattr_common;
4124 accessibility_size = sizeof(struct kfd_ioctl_svm_attribute) *
4125 nattr_accessibility * num_devices;
4126
4127 total_size = sizeof(struct kfd_criu_svm_range_priv_data) +
4128 common_attr_size + accessibility_size;
4129
4130 *svm_priv_data_size = *num_svm_ranges * total_size;
4131 }
4132
4133 pr_debug("num_svm_ranges %u total_priv_size %llu\n", *num_svm_ranges,
4134 *svm_priv_data_size);
4135 return 0;
4136 }
4137
kfd_criu_checkpoint_svm(struct kfd_process * p,uint8_t __user * user_priv_data,uint64_t * priv_data_offset)4138 int kfd_criu_checkpoint_svm(struct kfd_process *p,
4139 uint8_t __user *user_priv_data,
4140 uint64_t *priv_data_offset)
4141 {
4142 struct kfd_criu_svm_range_priv_data *svm_priv = NULL;
4143 struct kfd_ioctl_svm_attribute *query_attr = NULL;
4144 uint64_t svm_priv_data_size, query_attr_size = 0;
4145 int index, nattr_common = 4, ret = 0;
4146 struct svm_range_list *svms;
4147 int num_devices = p->n_pdds;
4148 struct svm_range *prange;
4149 struct mm_struct *mm;
4150
4151 svms = &p->svms;
4152 if (!svms)
4153 return -EINVAL;
4154
4155 mm = get_task_mm(p->lead_thread);
4156 if (!mm) {
4157 pr_err("failed to get mm for the target process\n");
4158 return -ESRCH;
4159 }
4160
4161 query_attr_size = sizeof(struct kfd_ioctl_svm_attribute) *
4162 (nattr_common + num_devices);
4163
4164 query_attr = kzalloc(query_attr_size, GFP_KERNEL);
4165 if (!query_attr) {
4166 ret = -ENOMEM;
4167 goto exit;
4168 }
4169
4170 query_attr[0].type = KFD_IOCTL_SVM_ATTR_PREFERRED_LOC;
4171 query_attr[1].type = KFD_IOCTL_SVM_ATTR_PREFETCH_LOC;
4172 query_attr[2].type = KFD_IOCTL_SVM_ATTR_SET_FLAGS;
4173 query_attr[3].type = KFD_IOCTL_SVM_ATTR_GRANULARITY;
4174
4175 for (index = 0; index < num_devices; index++) {
4176 struct kfd_process_device *pdd = p->pdds[index];
4177
4178 query_attr[index + nattr_common].type =
4179 KFD_IOCTL_SVM_ATTR_ACCESS;
4180 query_attr[index + nattr_common].value = pdd->user_gpu_id;
4181 }
4182
4183 svm_priv_data_size = sizeof(*svm_priv) + query_attr_size;
4184
4185 svm_priv = kzalloc(svm_priv_data_size, GFP_KERNEL);
4186 if (!svm_priv) {
4187 ret = -ENOMEM;
4188 goto exit_query;
4189 }
4190
4191 index = 0;
4192 list_for_each_entry(prange, &svms->list, list) {
4193
4194 svm_priv->object_type = KFD_CRIU_OBJECT_TYPE_SVM_RANGE;
4195 svm_priv->start_addr = prange->start;
4196 svm_priv->size = prange->npages;
4197 memcpy(&svm_priv->attrs, query_attr, query_attr_size);
4198 pr_debug("CRIU: prange: 0x%p start: 0x%lx\t npages: 0x%llx end: 0x%llx\t size: 0x%llx\n",
4199 prange, prange->start, prange->npages,
4200 prange->start + prange->npages - 1,
4201 prange->npages * PAGE_SIZE);
4202
4203 ret = svm_range_get_attr(p, mm, svm_priv->start_addr,
4204 svm_priv->size,
4205 (nattr_common + num_devices),
4206 svm_priv->attrs);
4207 if (ret) {
4208 pr_err("CRIU: failed to obtain range attributes\n");
4209 goto exit_priv;
4210 }
4211
4212 if (copy_to_user(user_priv_data + *priv_data_offset, svm_priv,
4213 svm_priv_data_size)) {
4214 pr_err("Failed to copy svm priv to user\n");
4215 ret = -EFAULT;
4216 goto exit_priv;
4217 }
4218
4219 *priv_data_offset += svm_priv_data_size;
4220
4221 }
4222
4223
4224 exit_priv:
4225 kfree(svm_priv);
4226 exit_query:
4227 kfree(query_attr);
4228 exit:
4229 mmput(mm);
4230 return ret;
4231 }
4232
4233 int
svm_ioctl(struct kfd_process * p,enum kfd_ioctl_svm_op op,uint64_t start,uint64_t size,uint32_t nattrs,struct kfd_ioctl_svm_attribute * attrs)4234 svm_ioctl(struct kfd_process *p, enum kfd_ioctl_svm_op op, uint64_t start,
4235 uint64_t size, uint32_t nattrs, struct kfd_ioctl_svm_attribute *attrs)
4236 {
4237 struct mm_struct *mm = current->mm;
4238 int r;
4239
4240 start >>= PAGE_SHIFT;
4241 size >>= PAGE_SHIFT;
4242
4243 switch (op) {
4244 case KFD_IOCTL_SVM_OP_SET_ATTR:
4245 r = svm_range_set_attr(p, mm, start, size, nattrs, attrs);
4246 break;
4247 case KFD_IOCTL_SVM_OP_GET_ATTR:
4248 r = svm_range_get_attr(p, mm, start, size, nattrs, attrs);
4249 break;
4250 default:
4251 r = EINVAL;
4252 break;
4253 }
4254
4255 return r;
4256 }
4257