1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /*
3 * Copyright 2020-2021 Advanced Micro Devices, Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
22 */
23
24 #include <linux/types.h>
25 #include <linux/sched/task.h>
26 #include <linux/dynamic_debug.h>
27 #include <drm/ttm/ttm_tt.h>
28 #include <drm/drm_exec.h>
29
30 #include "amdgpu_sync.h"
31 #include "amdgpu_object.h"
32 #include "amdgpu_vm.h"
33 #include "amdgpu_hmm.h"
34 #include "amdgpu.h"
35 #include "amdgpu_xgmi.h"
36 #include "amdgpu_reset.h"
37 #include "kfd_priv.h"
38 #include "kfd_svm.h"
39 #include "kfd_migrate.h"
40 #include "kfd_smi_events.h"
41
42 #ifdef dev_fmt
43 #undef dev_fmt
44 #endif
45 #define dev_fmt(fmt) "kfd_svm: %s: " fmt, __func__
46
47 #define AMDGPU_SVM_RANGE_RESTORE_DELAY_MS 1
48
49 /* Long enough to ensure no retry fault comes after svm range is restored and
50 * page table is updated.
51 */
52 #define AMDGPU_SVM_RANGE_RETRY_FAULT_PENDING (2UL * NSEC_PER_MSEC)
53 #if IS_ENABLED(CONFIG_DYNAMIC_DEBUG)
54 #define dynamic_svm_range_dump(svms) \
55 _dynamic_func_call_no_desc("svm_range_dump", svm_range_debug_dump, svms)
56 #else
57 #define dynamic_svm_range_dump(svms) \
58 do { if (0) svm_range_debug_dump(svms); } while (0)
59 #endif
60
61 /* Giant svm range split into smaller ranges based on this, it is decided using
62 * minimum of all dGPU/APU 1/32 VRAM size, between 2MB to 1GB and alignment to
63 * power of 2MB.
64 */
65 static uint64_t max_svm_range_pages;
66
67 struct criu_svm_metadata {
68 struct list_head list;
69 struct kfd_criu_svm_range_priv_data data;
70 };
71
72 static void svm_range_evict_svm_bo_worker(struct work_struct *work);
73 static bool
74 svm_range_cpu_invalidate_pagetables(struct mmu_interval_notifier *mni,
75 const struct mmu_notifier_range *range,
76 unsigned long cur_seq);
77 static int
78 svm_range_check_vm(struct kfd_process *p, uint64_t start, uint64_t last,
79 uint64_t *bo_s, uint64_t *bo_l);
80 static const struct mmu_interval_notifier_ops svm_range_mn_ops = {
81 .invalidate = svm_range_cpu_invalidate_pagetables,
82 };
83
84 /**
85 * svm_range_unlink - unlink svm_range from lists and interval tree
86 * @prange: svm range structure to be removed
87 *
88 * Remove the svm_range from the svms and svm_bo lists and the svms
89 * interval tree.
90 *
91 * Context: The caller must hold svms->lock
92 */
svm_range_unlink(struct svm_range * prange)93 static void svm_range_unlink(struct svm_range *prange)
94 {
95 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx]\n", prange->svms,
96 prange, prange->start, prange->last);
97
98 if (prange->svm_bo) {
99 spin_lock(&prange->svm_bo->list_lock);
100 list_del(&prange->svm_bo_list);
101 spin_unlock(&prange->svm_bo->list_lock);
102 }
103
104 list_del(&prange->list);
105 if (prange->it_node.start != 0 && prange->it_node.last != 0)
106 interval_tree_remove(&prange->it_node, &prange->svms->objects);
107 }
108
109 static void
svm_range_add_notifier_locked(struct mm_struct * mm,struct svm_range * prange)110 svm_range_add_notifier_locked(struct mm_struct *mm, struct svm_range *prange)
111 {
112 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx]\n", prange->svms,
113 prange, prange->start, prange->last);
114
115 mmu_interval_notifier_insert_locked(&prange->notifier, mm,
116 prange->start << PAGE_SHIFT,
117 prange->npages << PAGE_SHIFT,
118 &svm_range_mn_ops);
119 }
120
121 /**
122 * svm_range_add_to_svms - add svm range to svms
123 * @prange: svm range structure to be added
124 *
125 * Add the svm range to svms interval tree and link list
126 *
127 * Context: The caller must hold svms->lock
128 */
svm_range_add_to_svms(struct svm_range * prange)129 static void svm_range_add_to_svms(struct svm_range *prange)
130 {
131 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx]\n", prange->svms,
132 prange, prange->start, prange->last);
133
134 list_move_tail(&prange->list, &prange->svms->list);
135 prange->it_node.start = prange->start;
136 prange->it_node.last = prange->last;
137 interval_tree_insert(&prange->it_node, &prange->svms->objects);
138 }
139
svm_range_remove_notifier(struct svm_range * prange)140 static void svm_range_remove_notifier(struct svm_range *prange)
141 {
142 pr_debug("remove notifier svms 0x%p prange 0x%p [0x%lx 0x%lx]\n",
143 prange->svms, prange,
144 prange->notifier.interval_tree.start >> PAGE_SHIFT,
145 prange->notifier.interval_tree.last >> PAGE_SHIFT);
146
147 if (prange->notifier.interval_tree.start != 0 &&
148 prange->notifier.interval_tree.last != 0)
149 mmu_interval_notifier_remove(&prange->notifier);
150 }
151
152 static bool
svm_is_valid_dma_mapping_addr(struct device * dev,dma_addr_t dma_addr)153 svm_is_valid_dma_mapping_addr(struct device *dev, dma_addr_t dma_addr)
154 {
155 return dma_addr && !dma_mapping_error(dev, dma_addr) &&
156 !(dma_addr & SVM_RANGE_VRAM_DOMAIN);
157 }
158
159 static int
svm_range_dma_map_dev(struct amdgpu_device * adev,struct svm_range * prange,unsigned long offset,unsigned long npages,unsigned long * hmm_pfns,uint32_t gpuidx)160 svm_range_dma_map_dev(struct amdgpu_device *adev, struct svm_range *prange,
161 unsigned long offset, unsigned long npages,
162 unsigned long *hmm_pfns, uint32_t gpuidx)
163 {
164 enum dma_data_direction dir = DMA_BIDIRECTIONAL;
165 dma_addr_t *addr = prange->dma_addr[gpuidx];
166 struct device *dev = adev->dev;
167 struct page *page;
168 int i, r;
169
170 if (!addr) {
171 addr = kvzalloc_objs(*addr, prange->npages);
172 if (!addr)
173 return -ENOMEM;
174 prange->dma_addr[gpuidx] = addr;
175 }
176
177 addr += offset;
178 for (i = 0; i < npages; i++) {
179 if (svm_is_valid_dma_mapping_addr(dev, addr[i]))
180 dma_unmap_page(dev, addr[i], PAGE_SIZE, dir);
181
182 page = hmm_pfn_to_page(hmm_pfns[i]);
183 if (is_zone_device_page(page)) {
184 struct amdgpu_device *bo_adev = prange->svm_bo->node->adev;
185
186 addr[i] = (hmm_pfns[i] << PAGE_SHIFT) +
187 bo_adev->vm_manager.vram_base_offset -
188 bo_adev->kfd.pgmap.range.start;
189 addr[i] |= SVM_RANGE_VRAM_DOMAIN;
190 pr_debug_ratelimited("vram address: 0x%llx\n", addr[i]);
191 continue;
192 }
193 addr[i] = dma_map_page(dev, page, 0, PAGE_SIZE, dir);
194 r = dma_mapping_error(dev, addr[i]);
195 if (r) {
196 dev_err(dev, "failed %d dma_map_page\n", r);
197 return r;
198 }
199 pr_debug_ratelimited("dma mapping 0x%llx for page addr 0x%lx\n",
200 addr[i] >> PAGE_SHIFT, page_to_pfn(page));
201 }
202
203 return 0;
204 }
205
206 static int
svm_range_dma_map(struct svm_range * prange,unsigned long * bitmap,unsigned long offset,unsigned long npages,unsigned long * hmm_pfns)207 svm_range_dma_map(struct svm_range *prange, unsigned long *bitmap,
208 unsigned long offset, unsigned long npages,
209 unsigned long *hmm_pfns)
210 {
211 struct kfd_process *p;
212 uint32_t gpuidx;
213 int r;
214
215 p = container_of(prange->svms, struct kfd_process, svms);
216
217 for_each_set_bit(gpuidx, bitmap, MAX_GPU_INSTANCE) {
218 struct kfd_process_device *pdd;
219
220 pr_debug("mapping to gpu idx 0x%x\n", gpuidx);
221 pdd = kfd_process_device_from_gpuidx(p, gpuidx);
222 if (!pdd) {
223 pr_debug("failed to find device idx %d\n", gpuidx);
224 return -EINVAL;
225 }
226
227 r = svm_range_dma_map_dev(pdd->dev->adev, prange, offset, npages,
228 hmm_pfns, gpuidx);
229 if (r)
230 break;
231 }
232
233 return r;
234 }
235
svm_range_dma_unmap_dev(struct device * dev,dma_addr_t * dma_addr,unsigned long offset,unsigned long npages)236 void svm_range_dma_unmap_dev(struct device *dev, dma_addr_t *dma_addr,
237 unsigned long offset, unsigned long npages)
238 {
239 enum dma_data_direction dir = DMA_BIDIRECTIONAL;
240 int i;
241
242 if (!dma_addr)
243 return;
244
245 for (i = offset; i < offset + npages; i++) {
246 if (!svm_is_valid_dma_mapping_addr(dev, dma_addr[i]))
247 continue;
248 pr_debug_ratelimited("unmap 0x%llx\n", dma_addr[i] >> PAGE_SHIFT);
249 dma_unmap_page(dev, dma_addr[i], PAGE_SIZE, dir);
250 dma_addr[i] = 0;
251 }
252 }
253
svm_range_dma_unmap(struct svm_range * prange)254 void svm_range_dma_unmap(struct svm_range *prange)
255 {
256 struct kfd_process_device *pdd;
257 dma_addr_t *dma_addr;
258 struct device *dev;
259 struct kfd_process *p;
260 uint32_t gpuidx;
261
262 p = container_of(prange->svms, struct kfd_process, svms);
263
264 for (gpuidx = 0; gpuidx < MAX_GPU_INSTANCE; gpuidx++) {
265 dma_addr = prange->dma_addr[gpuidx];
266 if (!dma_addr)
267 continue;
268
269 pdd = kfd_process_device_from_gpuidx(p, gpuidx);
270 if (!pdd) {
271 pr_debug("failed to find device idx %d\n", gpuidx);
272 continue;
273 }
274 dev = &pdd->dev->adev->pdev->dev;
275
276 svm_range_dma_unmap_dev(dev, dma_addr, 0, prange->npages);
277 }
278 }
279
svm_range_free(struct svm_range * prange,bool do_unmap)280 static void svm_range_free(struct svm_range *prange, bool do_unmap)
281 {
282 uint64_t size = (prange->last - prange->start + 1) << PAGE_SHIFT;
283 struct kfd_process *p = container_of(prange->svms, struct kfd_process, svms);
284 uint32_t gpuidx;
285
286 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx]\n", prange->svms, prange,
287 prange->start, prange->last);
288
289 svm_range_vram_node_free(prange);
290 if (do_unmap)
291 svm_range_dma_unmap(prange);
292
293 if (do_unmap && !p->xnack_enabled) {
294 pr_debug("unreserve prange 0x%p size: 0x%llx\n", prange, size);
295 amdgpu_amdkfd_unreserve_mem_limit(NULL, size,
296 KFD_IOC_ALLOC_MEM_FLAGS_USERPTR, 0);
297 }
298
299 /* free dma_addr array for each gpu */
300 for (gpuidx = 0; gpuidx < MAX_GPU_INSTANCE; gpuidx++) {
301 if (prange->dma_addr[gpuidx]) {
302 kvfree(prange->dma_addr[gpuidx]);
303 prange->dma_addr[gpuidx] = NULL;
304 }
305 }
306
307 mutex_destroy(&prange->lock);
308 mutex_destroy(&prange->migrate_mutex);
309 kfree(prange);
310 }
311
312 static void
svm_range_set_default_attributes(struct svm_range_list * svms,int32_t * location,int32_t * prefetch_loc,uint8_t * granularity,uint32_t * flags)313 svm_range_set_default_attributes(struct svm_range_list *svms, int32_t *location,
314 int32_t *prefetch_loc, uint8_t *granularity,
315 uint32_t *flags)
316 {
317 *location = KFD_IOCTL_SVM_LOCATION_UNDEFINED;
318 *prefetch_loc = KFD_IOCTL_SVM_LOCATION_UNDEFINED;
319 *granularity = svms->default_granularity;
320 *flags =
321 KFD_IOCTL_SVM_FLAG_HOST_ACCESS | KFD_IOCTL_SVM_FLAG_COHERENT;
322 }
323
324 static struct
svm_range_new(struct svm_range_list * svms,uint64_t start,uint64_t last,bool update_mem_usage)325 svm_range *svm_range_new(struct svm_range_list *svms, uint64_t start,
326 uint64_t last, bool update_mem_usage)
327 {
328 uint64_t size = last - start + 1;
329 struct svm_range *prange;
330 struct kfd_process *p;
331
332 prange = kzalloc_obj(*prange);
333 if (!prange)
334 return NULL;
335
336 p = container_of(svms, struct kfd_process, svms);
337 if (!p->xnack_enabled && update_mem_usage &&
338 amdgpu_amdkfd_reserve_mem_limit(NULL, size << PAGE_SHIFT,
339 KFD_IOC_ALLOC_MEM_FLAGS_USERPTR, 0)) {
340 pr_info("SVM mapping failed, exceeds resident system memory limit\n");
341 kfree(prange);
342 return NULL;
343 }
344 prange->npages = size;
345 prange->svms = svms;
346 prange->start = start;
347 prange->last = last;
348 INIT_LIST_HEAD(&prange->list);
349 INIT_LIST_HEAD(&prange->update_list);
350 INIT_LIST_HEAD(&prange->svm_bo_list);
351 INIT_LIST_HEAD(&prange->deferred_list);
352 INIT_LIST_HEAD(&prange->child_list);
353 atomic_set(&prange->invalid, 0);
354 prange->validate_timestamp = 0;
355 prange->vram_pages = 0;
356 mutex_init(&prange->migrate_mutex);
357 mutex_init(&prange->lock);
358
359 if (p->xnack_enabled)
360 bitmap_copy(prange->bitmap_access, svms->bitmap_supported,
361 MAX_GPU_INSTANCE);
362
363 svm_range_set_default_attributes(svms, &prange->preferred_loc,
364 &prange->prefetch_loc,
365 &prange->granularity, &prange->flags);
366
367 pr_debug("svms 0x%p [0x%llx 0x%llx]\n", svms, start, last);
368
369 return prange;
370 }
371
svm_bo_ref_unless_zero(struct svm_range_bo * svm_bo)372 static bool svm_bo_ref_unless_zero(struct svm_range_bo *svm_bo)
373 {
374 if (!svm_bo || !kref_get_unless_zero(&svm_bo->kref))
375 return false;
376
377 return true;
378 }
379
svm_range_bo_release(struct kref * kref)380 static void svm_range_bo_release(struct kref *kref)
381 {
382 struct svm_range_bo *svm_bo;
383
384 svm_bo = container_of(kref, struct svm_range_bo, kref);
385 pr_debug("svm_bo 0x%p\n", svm_bo);
386
387 spin_lock(&svm_bo->list_lock);
388 while (!list_empty(&svm_bo->range_list)) {
389 struct svm_range *prange =
390 list_first_entry(&svm_bo->range_list,
391 struct svm_range, svm_bo_list);
392 /* list_del_init tells a concurrent svm_range_vram_node_new when
393 * it's safe to reuse the svm_bo pointer and svm_bo_list head.
394 */
395 list_del_init(&prange->svm_bo_list);
396 spin_unlock(&svm_bo->list_lock);
397
398 pr_debug("svms 0x%p [0x%lx 0x%lx]\n", prange->svms,
399 prange->start, prange->last);
400 mutex_lock(&prange->lock);
401 prange->svm_bo = NULL;
402 /* prange should not hold vram page now */
403 WARN_ONCE(prange->actual_loc, "prange should not hold vram page");
404 mutex_unlock(&prange->lock);
405
406 spin_lock(&svm_bo->list_lock);
407 }
408 spin_unlock(&svm_bo->list_lock);
409
410 if (mmget_not_zero(svm_bo->eviction_fence->mm)) {
411 struct kfd_process_device *pdd;
412 struct kfd_process *p;
413 struct mm_struct *mm;
414
415 mm = svm_bo->eviction_fence->mm;
416 /*
417 * The forked child process takes svm_bo device pages ref, svm_bo could be
418 * released after parent process is gone.
419 */
420 p = kfd_lookup_process_by_mm(mm);
421 if (p) {
422 pdd = kfd_get_process_device_data(svm_bo->node, p);
423 if (pdd)
424 atomic64_sub(amdgpu_bo_size(svm_bo->bo), &pdd->vram_usage);
425 kfd_unref_process(p);
426 }
427 mmput(mm);
428 }
429
430 if (!dma_fence_is_signaled(&svm_bo->eviction_fence->base))
431 /* We're not in the eviction worker. Signal the fence. */
432 dma_fence_signal(&svm_bo->eviction_fence->base);
433 dma_fence_put(&svm_bo->eviction_fence->base);
434 amdgpu_bo_unref(&svm_bo->bo);
435 kfree(svm_bo);
436 }
437
svm_range_bo_wq_release(struct work_struct * work)438 static void svm_range_bo_wq_release(struct work_struct *work)
439 {
440 struct svm_range_bo *svm_bo;
441
442 svm_bo = container_of(work, struct svm_range_bo, release_work);
443 svm_range_bo_release(&svm_bo->kref);
444 }
445
svm_range_bo_release_async(struct kref * kref)446 static void svm_range_bo_release_async(struct kref *kref)
447 {
448 struct svm_range_bo *svm_bo;
449
450 svm_bo = container_of(kref, struct svm_range_bo, kref);
451 pr_debug("svm_bo 0x%p\n", svm_bo);
452 INIT_WORK(&svm_bo->release_work, svm_range_bo_wq_release);
453 schedule_work(&svm_bo->release_work);
454 }
455
svm_range_bo_unref_async(struct svm_range_bo * svm_bo)456 void svm_range_bo_unref_async(struct svm_range_bo *svm_bo)
457 {
458 kref_put(&svm_bo->kref, svm_range_bo_release_async);
459 }
460
svm_range_bo_unref(struct svm_range_bo * svm_bo)461 static void svm_range_bo_unref(struct svm_range_bo *svm_bo)
462 {
463 if (svm_bo)
464 kref_put(&svm_bo->kref, svm_range_bo_release);
465 }
466
467 static bool
svm_range_validate_svm_bo(struct kfd_node * node,struct svm_range * prange)468 svm_range_validate_svm_bo(struct kfd_node *node, struct svm_range *prange)
469 {
470 mutex_lock(&prange->lock);
471 if (!prange->svm_bo) {
472 mutex_unlock(&prange->lock);
473 return false;
474 }
475 if (prange->ttm_res) {
476 /* We still have a reference, all is well */
477 mutex_unlock(&prange->lock);
478 return true;
479 }
480 if (svm_bo_ref_unless_zero(prange->svm_bo)) {
481 /*
482 * Migrate from GPU to GPU, remove range from source svm_bo->node
483 * range list, and return false to allocate svm_bo from destination
484 * node.
485 */
486 if (prange->svm_bo->node != node) {
487 mutex_unlock(&prange->lock);
488
489 spin_lock(&prange->svm_bo->list_lock);
490 list_del_init(&prange->svm_bo_list);
491 spin_unlock(&prange->svm_bo->list_lock);
492
493 svm_range_bo_unref(prange->svm_bo);
494 return false;
495 }
496 if (READ_ONCE(prange->svm_bo->evicting)) {
497 struct dma_fence *f;
498 struct svm_range_bo *svm_bo;
499 /* The BO is getting evicted,
500 * we need to get a new one
501 */
502 mutex_unlock(&prange->lock);
503 svm_bo = prange->svm_bo;
504 f = dma_fence_get(&svm_bo->eviction_fence->base);
505 svm_range_bo_unref(prange->svm_bo);
506 /* wait for the fence to avoid long spin-loop
507 * at list_empty_careful
508 */
509 dma_fence_wait(f, false);
510 dma_fence_put(f);
511 } else {
512 /* The BO was still around and we got
513 * a new reference to it
514 */
515 mutex_unlock(&prange->lock);
516 pr_debug("reuse old bo svms 0x%p [0x%lx 0x%lx]\n",
517 prange->svms, prange->start, prange->last);
518
519 prange->ttm_res = prange->svm_bo->bo->tbo.resource;
520 return true;
521 }
522
523 } else {
524 mutex_unlock(&prange->lock);
525 }
526
527 /* We need a new svm_bo. Spin-loop to wait for concurrent
528 * svm_range_bo_release to finish removing this range from
529 * its range list and set prange->svm_bo to null. After this,
530 * it is safe to reuse the svm_bo pointer and svm_bo_list head.
531 */
532 while (!list_empty_careful(&prange->svm_bo_list) || prange->svm_bo)
533 cond_resched();
534
535 return false;
536 }
537
svm_range_bo_new(void)538 static struct svm_range_bo *svm_range_bo_new(void)
539 {
540 struct svm_range_bo *svm_bo;
541
542 svm_bo = kzalloc_obj(*svm_bo);
543 if (!svm_bo)
544 return NULL;
545
546 kref_init(&svm_bo->kref);
547 INIT_LIST_HEAD(&svm_bo->range_list);
548 spin_lock_init(&svm_bo->list_lock);
549
550 return svm_bo;
551 }
552
553 int
svm_range_vram_node_new(struct kfd_node * node,struct svm_range * prange,bool clear)554 svm_range_vram_node_new(struct kfd_node *node, struct svm_range *prange,
555 bool clear)
556 {
557 struct kfd_process_device *pdd;
558 struct amdgpu_bo_param bp;
559 struct svm_range_bo *svm_bo;
560 struct amdgpu_bo_user *ubo;
561 struct amdgpu_bo *bo;
562 struct kfd_process *p;
563 struct mm_struct *mm;
564 int r;
565
566 p = container_of(prange->svms, struct kfd_process, svms);
567 pr_debug("process pid: %d svms 0x%p [0x%lx 0x%lx]\n",
568 p->lead_thread->pid, prange->svms,
569 prange->start, prange->last);
570
571 if (svm_range_validate_svm_bo(node, prange))
572 return 0;
573
574 svm_bo = svm_range_bo_new();
575 if (!svm_bo) {
576 pr_debug("failed to alloc svm bo\n");
577 return -ENOMEM;
578 }
579 mm = get_task_mm(p->lead_thread);
580 if (!mm) {
581 pr_debug("failed to get mm\n");
582 kfree(svm_bo);
583 return -ESRCH;
584 }
585 svm_bo->node = node;
586 svm_bo->eviction_fence =
587 amdgpu_amdkfd_fence_create(dma_fence_context_alloc(1),
588 mm,
589 svm_bo, p->context_id);
590 mmput(mm);
591 INIT_WORK(&svm_bo->eviction_work, svm_range_evict_svm_bo_worker);
592 svm_bo->evicting = 0;
593 memset(&bp, 0, sizeof(bp));
594 bp.size = prange->npages * PAGE_SIZE;
595 bp.byte_align = PAGE_SIZE;
596 bp.domain = AMDGPU_GEM_DOMAIN_VRAM;
597 bp.flags = AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
598 bp.flags |= clear ? AMDGPU_GEM_CREATE_VRAM_CLEARED : 0;
599 bp.flags |= AMDGPU_GEM_CREATE_DISCARDABLE;
600 bp.type = ttm_bo_type_device;
601 bp.resv = NULL;
602 if (node->xcp)
603 bp.xcp_id_plus1 = node->xcp->id + 1;
604
605 r = amdgpu_bo_create_user(node->adev, &bp, &ubo);
606 if (r) {
607 pr_debug("failed %d to create bo\n", r);
608 goto create_bo_failed;
609 }
610 bo = &ubo->bo;
611
612 pr_debug("alloc bo at offset 0x%lx size 0x%lx on partition %d\n",
613 bo->tbo.resource->start << PAGE_SHIFT, bp.size,
614 bp.xcp_id_plus1 - 1);
615
616 r = amdgpu_bo_reserve(bo, true);
617 if (r) {
618 pr_debug("failed %d to reserve bo\n", r);
619 goto reserve_bo_failed;
620 }
621
622 if (clear) {
623 r = amdgpu_bo_sync_wait(bo, AMDGPU_FENCE_OWNER_KFD, false);
624 if (r) {
625 pr_debug("failed %d to sync bo\n", r);
626 amdgpu_bo_unreserve(bo);
627 goto reserve_bo_failed;
628 }
629 }
630
631 r = dma_resv_reserve_fences(bo->tbo.base.resv, 1);
632 if (r) {
633 pr_debug("failed %d to reserve bo\n", r);
634 amdgpu_bo_unreserve(bo);
635 goto reserve_bo_failed;
636 }
637 amdgpu_bo_fence(bo, &svm_bo->eviction_fence->base, true);
638
639 amdgpu_bo_unreserve(bo);
640
641 svm_bo->bo = bo;
642 prange->svm_bo = svm_bo;
643 prange->ttm_res = bo->tbo.resource;
644 prange->offset = 0;
645
646 spin_lock(&svm_bo->list_lock);
647 list_add(&prange->svm_bo_list, &svm_bo->range_list);
648 spin_unlock(&svm_bo->list_lock);
649
650 pdd = svm_range_get_pdd_by_node(prange, node);
651 if (pdd)
652 atomic64_add(amdgpu_bo_size(bo), &pdd->vram_usage);
653
654 return 0;
655
656 reserve_bo_failed:
657 amdgpu_bo_unref(&bo);
658 create_bo_failed:
659 dma_fence_put(&svm_bo->eviction_fence->base);
660 kfree(svm_bo);
661 prange->ttm_res = NULL;
662
663 return r;
664 }
665
svm_range_vram_node_free(struct svm_range * prange)666 void svm_range_vram_node_free(struct svm_range *prange)
667 {
668 /* serialize prange->svm_bo unref */
669 mutex_lock(&prange->lock);
670 /* prange->svm_bo has not been unref */
671 if (prange->ttm_res) {
672 prange->ttm_res = NULL;
673 mutex_unlock(&prange->lock);
674 svm_range_bo_unref(prange->svm_bo);
675 } else
676 mutex_unlock(&prange->lock);
677 }
678
679 struct kfd_node *
svm_range_get_node_by_id(struct svm_range * prange,uint32_t gpu_id)680 svm_range_get_node_by_id(struct svm_range *prange, uint32_t gpu_id)
681 {
682 struct kfd_process *p;
683 struct kfd_process_device *pdd;
684
685 p = container_of(prange->svms, struct kfd_process, svms);
686 pdd = kfd_process_device_data_by_id(p, gpu_id);
687 if (!pdd) {
688 pr_debug("failed to get kfd process device by id 0x%x\n", gpu_id);
689 return NULL;
690 }
691
692 return pdd->dev;
693 }
694
695 struct kfd_process_device *
svm_range_get_pdd_by_node(struct svm_range * prange,struct kfd_node * node)696 svm_range_get_pdd_by_node(struct svm_range *prange, struct kfd_node *node)
697 {
698 struct kfd_process *p;
699
700 p = container_of(prange->svms, struct kfd_process, svms);
701
702 return kfd_get_process_device_data(node, p);
703 }
704
svm_range_bo_validate(void * param,struct amdgpu_bo * bo)705 static int svm_range_bo_validate(void *param, struct amdgpu_bo *bo)
706 {
707 struct ttm_operation_ctx ctx = { false, false };
708
709 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM);
710
711 return ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
712 }
713
714 static int
svm_range_check_attr(struct kfd_process * p,uint32_t nattr,struct kfd_ioctl_svm_attribute * attrs)715 svm_range_check_attr(struct kfd_process *p,
716 uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs)
717 {
718 uint32_t i;
719
720 for (i = 0; i < nattr; i++) {
721 uint32_t val = attrs[i].value;
722 int gpuidx = MAX_GPU_INSTANCE;
723
724 switch (attrs[i].type) {
725 case KFD_IOCTL_SVM_ATTR_PREFERRED_LOC:
726 if (val != KFD_IOCTL_SVM_LOCATION_SYSMEM &&
727 val != KFD_IOCTL_SVM_LOCATION_UNDEFINED)
728 gpuidx = kfd_process_gpuidx_from_gpuid(p, val);
729 break;
730 case KFD_IOCTL_SVM_ATTR_PREFETCH_LOC:
731 if (val != KFD_IOCTL_SVM_LOCATION_SYSMEM)
732 gpuidx = kfd_process_gpuidx_from_gpuid(p, val);
733 break;
734 case KFD_IOCTL_SVM_ATTR_ACCESS:
735 case KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE:
736 case KFD_IOCTL_SVM_ATTR_NO_ACCESS:
737 gpuidx = kfd_process_gpuidx_from_gpuid(p, val);
738 break;
739 case KFD_IOCTL_SVM_ATTR_SET_FLAGS:
740 break;
741 case KFD_IOCTL_SVM_ATTR_CLR_FLAGS:
742 break;
743 case KFD_IOCTL_SVM_ATTR_GRANULARITY:
744 break;
745 default:
746 pr_debug("unknown attr type 0x%x\n", attrs[i].type);
747 return -EINVAL;
748 }
749
750 if (gpuidx < 0) {
751 pr_debug("no GPU 0x%x found\n", val);
752 return -EINVAL;
753 } else if (gpuidx < MAX_GPU_INSTANCE &&
754 !test_bit(gpuidx, p->svms.bitmap_supported)) {
755 pr_debug("GPU 0x%x not supported\n", val);
756 return -EINVAL;
757 }
758 }
759
760 return 0;
761 }
762
763 static void
svm_range_apply_attrs(struct kfd_process * p,struct svm_range * prange,uint32_t nattr,struct kfd_ioctl_svm_attribute * attrs,bool * update_mapping)764 svm_range_apply_attrs(struct kfd_process *p, struct svm_range *prange,
765 uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs,
766 bool *update_mapping)
767 {
768 uint32_t i;
769 int gpuidx;
770
771 for (i = 0; i < nattr; i++) {
772 switch (attrs[i].type) {
773 case KFD_IOCTL_SVM_ATTR_PREFERRED_LOC:
774 prange->preferred_loc = attrs[i].value;
775 break;
776 case KFD_IOCTL_SVM_ATTR_PREFETCH_LOC:
777 prange->prefetch_loc = attrs[i].value;
778 break;
779 case KFD_IOCTL_SVM_ATTR_ACCESS:
780 case KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE:
781 case KFD_IOCTL_SVM_ATTR_NO_ACCESS:
782 if (!p->xnack_enabled)
783 *update_mapping = true;
784
785 gpuidx = kfd_process_gpuidx_from_gpuid(p,
786 attrs[i].value);
787 if (attrs[i].type == KFD_IOCTL_SVM_ATTR_NO_ACCESS) {
788 bitmap_clear(prange->bitmap_access, gpuidx, 1);
789 bitmap_clear(prange->bitmap_aip, gpuidx, 1);
790 } else if (attrs[i].type == KFD_IOCTL_SVM_ATTR_ACCESS) {
791 bitmap_set(prange->bitmap_access, gpuidx, 1);
792 bitmap_clear(prange->bitmap_aip, gpuidx, 1);
793 } else {
794 bitmap_clear(prange->bitmap_access, gpuidx, 1);
795 bitmap_set(prange->bitmap_aip, gpuidx, 1);
796 }
797 break;
798 case KFD_IOCTL_SVM_ATTR_SET_FLAGS:
799 *update_mapping = true;
800 prange->flags |= attrs[i].value;
801 break;
802 case KFD_IOCTL_SVM_ATTR_CLR_FLAGS:
803 *update_mapping = true;
804 prange->flags &= ~attrs[i].value;
805 break;
806 case KFD_IOCTL_SVM_ATTR_GRANULARITY:
807 prange->granularity = min_t(uint32_t, attrs[i].value, 0x3F);
808 break;
809 default:
810 WARN_ONCE(1, "svm_range_check_attrs wasn't called?");
811 }
812 }
813 }
814
815 static bool
svm_range_is_same_attrs(struct kfd_process * p,struct svm_range * prange,uint32_t nattr,struct kfd_ioctl_svm_attribute * attrs)816 svm_range_is_same_attrs(struct kfd_process *p, struct svm_range *prange,
817 uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs)
818 {
819 uint32_t i;
820 int gpuidx;
821
822 for (i = 0; i < nattr; i++) {
823 switch (attrs[i].type) {
824 case KFD_IOCTL_SVM_ATTR_PREFERRED_LOC:
825 if (prange->preferred_loc != attrs[i].value)
826 return false;
827 break;
828 case KFD_IOCTL_SVM_ATTR_PREFETCH_LOC:
829 /* Prefetch should always trigger a migration even
830 * if the value of the attribute didn't change.
831 */
832 return false;
833 case KFD_IOCTL_SVM_ATTR_ACCESS:
834 case KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE:
835 case KFD_IOCTL_SVM_ATTR_NO_ACCESS:
836 gpuidx = kfd_process_gpuidx_from_gpuid(p,
837 attrs[i].value);
838 if (attrs[i].type == KFD_IOCTL_SVM_ATTR_NO_ACCESS) {
839 if (test_bit(gpuidx, prange->bitmap_access) ||
840 test_bit(gpuidx, prange->bitmap_aip))
841 return false;
842 } else if (attrs[i].type == KFD_IOCTL_SVM_ATTR_ACCESS) {
843 if (!test_bit(gpuidx, prange->bitmap_access))
844 return false;
845 } else {
846 if (!test_bit(gpuidx, prange->bitmap_aip))
847 return false;
848 }
849 break;
850 case KFD_IOCTL_SVM_ATTR_SET_FLAGS:
851 if ((prange->flags & attrs[i].value) != attrs[i].value)
852 return false;
853 break;
854 case KFD_IOCTL_SVM_ATTR_CLR_FLAGS:
855 if ((prange->flags & attrs[i].value) != 0)
856 return false;
857 break;
858 case KFD_IOCTL_SVM_ATTR_GRANULARITY:
859 if (prange->granularity != attrs[i].value)
860 return false;
861 break;
862 default:
863 WARN_ONCE(1, "svm_range_check_attrs wasn't called?");
864 }
865 }
866
867 return true;
868 }
869
870 /**
871 * svm_range_debug_dump - print all range information from svms
872 * @svms: svm range list header
873 *
874 * debug output svm range start, end, prefetch location from svms
875 * interval tree and link list
876 *
877 * Context: The caller must hold svms->lock
878 */
svm_range_debug_dump(struct svm_range_list * svms)879 static void svm_range_debug_dump(struct svm_range_list *svms)
880 {
881 struct interval_tree_node *node;
882 struct svm_range *prange;
883
884 pr_debug("dump svms 0x%p list\n", svms);
885 pr_debug("range\tstart\tpage\tend\t\tlocation\n");
886
887 list_for_each_entry(prange, &svms->list, list) {
888 pr_debug("0x%p 0x%lx\t0x%llx\t0x%llx\t0x%x\n",
889 prange, prange->start, prange->npages,
890 prange->start + prange->npages - 1,
891 prange->actual_loc);
892 }
893
894 pr_debug("dump svms 0x%p interval tree\n", svms);
895 pr_debug("range\tstart\tpage\tend\t\tlocation\n");
896 node = interval_tree_iter_first(&svms->objects, 0, ~0ULL);
897 while (node) {
898 prange = container_of(node, struct svm_range, it_node);
899 pr_debug("0x%p 0x%lx\t0x%llx\t0x%llx\t0x%x\n",
900 prange, prange->start, prange->npages,
901 prange->start + prange->npages - 1,
902 prange->actual_loc);
903 node = interval_tree_iter_next(node, 0, ~0ULL);
904 }
905 }
906
907 static void *
svm_range_copy_array(void * psrc,size_t size,uint64_t num_elements,uint64_t offset,uint64_t * vram_pages)908 svm_range_copy_array(void *psrc, size_t size, uint64_t num_elements,
909 uint64_t offset, uint64_t *vram_pages)
910 {
911 unsigned char *src = (unsigned char *)psrc + offset;
912 unsigned char *dst;
913 uint64_t i;
914
915 dst = kvmalloc_array(num_elements, size, GFP_KERNEL);
916 if (!dst)
917 return NULL;
918
919 if (!vram_pages) {
920 memcpy(dst, src, num_elements * size);
921 return (void *)dst;
922 }
923
924 *vram_pages = 0;
925 for (i = 0; i < num_elements; i++) {
926 dma_addr_t *temp;
927 temp = (dma_addr_t *)dst + i;
928 *temp = *((dma_addr_t *)src + i);
929 if (*temp&SVM_RANGE_VRAM_DOMAIN)
930 (*vram_pages)++;
931 }
932
933 return (void *)dst;
934 }
935
936 static int
svm_range_copy_dma_addrs(struct svm_range * dst,struct svm_range * src)937 svm_range_copy_dma_addrs(struct svm_range *dst, struct svm_range *src)
938 {
939 int i;
940
941 for (i = 0; i < MAX_GPU_INSTANCE; i++) {
942 if (!src->dma_addr[i])
943 continue;
944 dst->dma_addr[i] = svm_range_copy_array(src->dma_addr[i],
945 sizeof(*src->dma_addr[i]), src->npages, 0, NULL);
946 if (!dst->dma_addr[i])
947 return -ENOMEM;
948 }
949
950 return 0;
951 }
952
953 static int
svm_range_split_array(void * ppnew,void * ppold,size_t size,uint64_t old_start,uint64_t old_n,uint64_t new_start,uint64_t new_n,uint64_t * new_vram_pages)954 svm_range_split_array(void *ppnew, void *ppold, size_t size,
955 uint64_t old_start, uint64_t old_n,
956 uint64_t new_start, uint64_t new_n, uint64_t *new_vram_pages)
957 {
958 unsigned char *new, *old, *pold;
959 uint64_t d;
960
961 if (!ppold)
962 return 0;
963 pold = *(unsigned char **)ppold;
964 if (!pold)
965 return 0;
966
967 d = (new_start - old_start) * size;
968 /* get dma addr array for new range and calculte its vram page number */
969 new = svm_range_copy_array(pold, size, new_n, d, new_vram_pages);
970 if (!new)
971 return -ENOMEM;
972 d = (new_start == old_start) ? new_n * size : 0;
973 old = svm_range_copy_array(pold, size, old_n, d, NULL);
974 if (!old) {
975 kvfree(new);
976 return -ENOMEM;
977 }
978 kvfree(pold);
979 *(void **)ppold = old;
980 *(void **)ppnew = new;
981
982 return 0;
983 }
984
985 static int
svm_range_split_pages(struct svm_range * new,struct svm_range * old,uint64_t start,uint64_t last)986 svm_range_split_pages(struct svm_range *new, struct svm_range *old,
987 uint64_t start, uint64_t last)
988 {
989 uint64_t npages = last - start + 1;
990 int i, r;
991
992 for (i = 0; i < MAX_GPU_INSTANCE; i++) {
993 r = svm_range_split_array(&new->dma_addr[i], &old->dma_addr[i],
994 sizeof(*old->dma_addr[i]), old->start,
995 npages, new->start, new->npages,
996 old->actual_loc ? &new->vram_pages : NULL);
997 if (r)
998 return r;
999 }
1000 if (old->actual_loc)
1001 old->vram_pages -= new->vram_pages;
1002
1003 return 0;
1004 }
1005
1006 static int
svm_range_split_nodes(struct svm_range * new,struct svm_range * old,uint64_t start,uint64_t last)1007 svm_range_split_nodes(struct svm_range *new, struct svm_range *old,
1008 uint64_t start, uint64_t last)
1009 {
1010 uint64_t npages = last - start + 1;
1011
1012 pr_debug("svms 0x%p new prange 0x%p start 0x%lx [0x%llx 0x%llx]\n",
1013 new->svms, new, new->start, start, last);
1014
1015 if (new->start == old->start) {
1016 new->offset = old->offset;
1017 old->offset += new->npages;
1018 } else {
1019 new->offset = old->offset + npages;
1020 }
1021
1022 new->svm_bo = svm_range_bo_ref(old->svm_bo);
1023 new->ttm_res = old->ttm_res;
1024
1025 spin_lock(&new->svm_bo->list_lock);
1026 list_add(&new->svm_bo_list, &new->svm_bo->range_list);
1027 spin_unlock(&new->svm_bo->list_lock);
1028
1029 return 0;
1030 }
1031
1032 /**
1033 * svm_range_split_adjust - split range and adjust
1034 *
1035 * @new: new range
1036 * @old: the old range
1037 * @start: the old range adjust to start address in pages
1038 * @last: the old range adjust to last address in pages
1039 *
1040 * Copy system memory dma_addr or vram ttm_res in old range to new
1041 * range from new_start up to size new->npages, the remaining old range is from
1042 * start to last
1043 *
1044 * Return:
1045 * 0 - OK, -ENOMEM - out of memory
1046 */
1047 static int
svm_range_split_adjust(struct svm_range * new,struct svm_range * old,uint64_t start,uint64_t last)1048 svm_range_split_adjust(struct svm_range *new, struct svm_range *old,
1049 uint64_t start, uint64_t last)
1050 {
1051 int r;
1052
1053 pr_debug("svms 0x%p new 0x%lx old [0x%lx 0x%lx] => [0x%llx 0x%llx]\n",
1054 new->svms, new->start, old->start, old->last, start, last);
1055
1056 if (new->start < old->start ||
1057 new->last > old->last) {
1058 WARN_ONCE(1, "invalid new range start or last\n");
1059 return -EINVAL;
1060 }
1061
1062 r = svm_range_split_pages(new, old, start, last);
1063 if (r)
1064 return r;
1065
1066 if (old->actual_loc && old->ttm_res) {
1067 r = svm_range_split_nodes(new, old, start, last);
1068 if (r)
1069 return r;
1070 }
1071
1072 old->npages = last - start + 1;
1073 old->start = start;
1074 old->last = last;
1075 new->flags = old->flags;
1076 new->preferred_loc = old->preferred_loc;
1077 new->prefetch_loc = old->prefetch_loc;
1078 new->actual_loc = old->actual_loc;
1079 new->granularity = old->granularity;
1080 new->mapped_to_gpu = old->mapped_to_gpu;
1081 bitmap_copy(new->bitmap_access, old->bitmap_access, MAX_GPU_INSTANCE);
1082 bitmap_copy(new->bitmap_aip, old->bitmap_aip, MAX_GPU_INSTANCE);
1083 atomic_set(&new->queue_refcount, atomic_read(&old->queue_refcount));
1084
1085 return 0;
1086 }
1087
1088 /**
1089 * svm_range_split - split a range in 2 ranges
1090 *
1091 * @prange: the svm range to split
1092 * @start: the remaining range start address in pages
1093 * @last: the remaining range last address in pages
1094 * @new: the result new range generated
1095 *
1096 * Two cases only:
1097 * case 1: if start == prange->start
1098 * prange ==> prange[start, last]
1099 * new range [last + 1, prange->last]
1100 *
1101 * case 2: if last == prange->last
1102 * prange ==> prange[start, last]
1103 * new range [prange->start, start - 1]
1104 *
1105 * Return:
1106 * 0 - OK, -ENOMEM - out of memory, -EINVAL - invalid start, last
1107 */
1108 static int
svm_range_split(struct svm_range * prange,uint64_t start,uint64_t last,struct svm_range ** new)1109 svm_range_split(struct svm_range *prange, uint64_t start, uint64_t last,
1110 struct svm_range **new)
1111 {
1112 uint64_t old_start = prange->start;
1113 uint64_t old_last = prange->last;
1114 struct svm_range_list *svms;
1115 int r = 0;
1116
1117 pr_debug("svms 0x%p [0x%llx 0x%llx] to [0x%llx 0x%llx]\n", prange->svms,
1118 old_start, old_last, start, last);
1119
1120 if (old_start != start && old_last != last)
1121 return -EINVAL;
1122 if (start < old_start || last > old_last)
1123 return -EINVAL;
1124
1125 svms = prange->svms;
1126 if (old_start == start)
1127 *new = svm_range_new(svms, last + 1, old_last, false);
1128 else
1129 *new = svm_range_new(svms, old_start, start - 1, false);
1130 if (!*new)
1131 return -ENOMEM;
1132
1133 r = svm_range_split_adjust(*new, prange, start, last);
1134 if (r) {
1135 pr_debug("failed %d split [0x%llx 0x%llx] to [0x%llx 0x%llx]\n",
1136 r, old_start, old_last, start, last);
1137 svm_range_free(*new, false);
1138 *new = NULL;
1139 }
1140
1141 return r;
1142 }
1143
1144 static int
svm_range_split_tail(struct svm_range * prange,uint64_t new_last,struct list_head * insert_list,struct list_head * remap_list)1145 svm_range_split_tail(struct svm_range *prange, uint64_t new_last,
1146 struct list_head *insert_list, struct list_head *remap_list)
1147 {
1148 unsigned long last_align_down = ALIGN_DOWN(prange->last, 512);
1149 unsigned long start_align = ALIGN(prange->start, 512);
1150 bool huge_page_mapping = last_align_down > start_align;
1151 struct svm_range *tail = NULL;
1152 int r;
1153
1154 r = svm_range_split(prange, prange->start, new_last, &tail);
1155
1156 if (r)
1157 return r;
1158
1159 list_add(&tail->list, insert_list);
1160
1161 if (huge_page_mapping && tail->start > start_align &&
1162 tail->start < last_align_down && (!IS_ALIGNED(tail->start, 512)))
1163 list_add(&tail->update_list, remap_list);
1164
1165 return 0;
1166 }
1167
1168 static int
svm_range_split_head(struct svm_range * prange,uint64_t new_start,struct list_head * insert_list,struct list_head * remap_list)1169 svm_range_split_head(struct svm_range *prange, uint64_t new_start,
1170 struct list_head *insert_list, struct list_head *remap_list)
1171 {
1172 unsigned long last_align_down = ALIGN_DOWN(prange->last, 512);
1173 unsigned long start_align = ALIGN(prange->start, 512);
1174 bool huge_page_mapping = last_align_down > start_align;
1175 struct svm_range *head = NULL;
1176 int r;
1177
1178 r = svm_range_split(prange, new_start, prange->last, &head);
1179
1180 if (r)
1181 return r;
1182
1183 list_add(&head->list, insert_list);
1184
1185 if (huge_page_mapping && head->last + 1 > start_align &&
1186 head->last + 1 < last_align_down && (!IS_ALIGNED(head->last, 512)))
1187 list_add(&head->update_list, remap_list);
1188
1189 return 0;
1190 }
1191
1192 static void
svm_range_add_child(struct svm_range * prange,struct svm_range * pchild,enum svm_work_list_ops op)1193 svm_range_add_child(struct svm_range *prange, struct svm_range *pchild, enum svm_work_list_ops op)
1194 {
1195 pr_debug("add child 0x%p [0x%lx 0x%lx] to prange 0x%p child list %d\n",
1196 pchild, pchild->start, pchild->last, prange, op);
1197
1198 pchild->work_item.mm = NULL;
1199 pchild->work_item.op = op;
1200 list_add_tail(&pchild->child_list, &prange->child_list);
1201 }
1202
1203 static bool
svm_nodes_in_same_hive(struct kfd_node * node_a,struct kfd_node * node_b)1204 svm_nodes_in_same_hive(struct kfd_node *node_a, struct kfd_node *node_b)
1205 {
1206 return (node_a->adev == node_b->adev ||
1207 amdgpu_xgmi_same_hive(node_a->adev, node_b->adev));
1208 }
1209
1210 static uint64_t
svm_range_get_pte_flags(struct kfd_node * node,struct amdgpu_vm * vm,struct svm_range * prange,int domain)1211 svm_range_get_pte_flags(struct kfd_node *node, struct amdgpu_vm *vm,
1212 struct svm_range *prange, int domain)
1213 {
1214 struct kfd_node *bo_node;
1215 uint32_t flags = prange->flags;
1216 uint32_t mapping_flags = 0;
1217 uint32_t gc_ip_version = KFD_GC_VERSION(node);
1218 uint64_t pte_flags;
1219 bool snoop = (domain != SVM_RANGE_VRAM_DOMAIN);
1220 bool coherent = flags & (KFD_IOCTL_SVM_FLAG_COHERENT | KFD_IOCTL_SVM_FLAG_EXT_COHERENT);
1221 bool ext_coherent = flags & KFD_IOCTL_SVM_FLAG_EXT_COHERENT;
1222 unsigned int mtype_local;
1223
1224 if (domain == SVM_RANGE_VRAM_DOMAIN)
1225 bo_node = prange->svm_bo->node;
1226
1227 switch (gc_ip_version) {
1228 case IP_VERSION(9, 4, 1):
1229 if (domain == SVM_RANGE_VRAM_DOMAIN) {
1230 if (bo_node == node) {
1231 mapping_flags |= coherent ?
1232 AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW;
1233 } else {
1234 mapping_flags |= coherent ?
1235 AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
1236 if (svm_nodes_in_same_hive(node, bo_node))
1237 snoop = true;
1238 }
1239 } else {
1240 mapping_flags |= coherent ?
1241 AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
1242 }
1243 break;
1244 case IP_VERSION(9, 4, 2):
1245 if (domain == SVM_RANGE_VRAM_DOMAIN) {
1246 if (bo_node == node) {
1247 mapping_flags |= coherent ?
1248 AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW;
1249 if (node->adev->gmc.xgmi.connected_to_cpu)
1250 snoop = true;
1251 } else {
1252 mapping_flags |= coherent ?
1253 AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
1254 if (svm_nodes_in_same_hive(node, bo_node))
1255 snoop = true;
1256 }
1257 } else {
1258 mapping_flags |= coherent ?
1259 AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
1260 }
1261 break;
1262 case IP_VERSION(9, 4, 3):
1263 case IP_VERSION(9, 4, 4):
1264 case IP_VERSION(9, 5, 0):
1265 if (ext_coherent)
1266 mtype_local = AMDGPU_VM_MTYPE_CC;
1267 else
1268 mtype_local = amdgpu_mtype_local == 1 ? AMDGPU_VM_MTYPE_NC :
1269 amdgpu_mtype_local == 2 ? AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW;
1270 snoop = true;
1271 if (domain == SVM_RANGE_VRAM_DOMAIN) {
1272 /* local HBM region close to partition */
1273 if (bo_node->adev == node->adev &&
1274 (!bo_node->xcp || !node->xcp || bo_node->xcp->mem_id == node->xcp->mem_id))
1275 mapping_flags |= mtype_local;
1276 /* local HBM region far from partition or remote XGMI GPU
1277 * with regular system scope coherence
1278 */
1279 else if (svm_nodes_in_same_hive(bo_node, node) && !ext_coherent)
1280 mapping_flags |= AMDGPU_VM_MTYPE_NC;
1281 /* PCIe P2P on GPUs pre-9.5.0 */
1282 else if (gc_ip_version < IP_VERSION(9, 5, 0) &&
1283 !svm_nodes_in_same_hive(bo_node, node))
1284 mapping_flags |= AMDGPU_VM_MTYPE_UC;
1285 /* Other remote memory */
1286 else
1287 mapping_flags |= ext_coherent ? AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
1288 /* system memory accessed by the APU */
1289 } else if (node->adev->flags & AMD_IS_APU) {
1290 /* On NUMA systems, locality is determined per-page
1291 * in amdgpu_gmc_override_vm_pte_flags
1292 */
1293 if (num_possible_nodes() <= 1)
1294 mapping_flags |= mtype_local;
1295 else
1296 mapping_flags |= ext_coherent ? AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
1297 /* system memory accessed by the dGPU */
1298 } else {
1299 if (gc_ip_version < IP_VERSION(9, 5, 0) || ext_coherent)
1300 mapping_flags |= AMDGPU_VM_MTYPE_UC;
1301 else
1302 mapping_flags |= AMDGPU_VM_MTYPE_NC;
1303 }
1304 break;
1305 case IP_VERSION(12, 0, 0):
1306 case IP_VERSION(12, 0, 1):
1307 mapping_flags |= AMDGPU_VM_MTYPE_NC;
1308 break;
1309 case IP_VERSION(12, 1, 0):
1310 snoop = true;
1311 if (domain == SVM_RANGE_VRAM_DOMAIN) {
1312 mtype_local = amdgpu_mtype_local == 1 ? AMDGPU_VM_MTYPE_NC :
1313 AMDGPU_VM_MTYPE_RW;
1314 /* local HBM */
1315 if (bo_node->adev == node->adev)
1316 mapping_flags |= mtype_local;
1317 /* Remote GPU memory */
1318 else
1319 mapping_flags |= ext_coherent ? AMDGPU_VM_MTYPE_UC :
1320 AMDGPU_VM_MTYPE_NC;
1321 /* system memory accessed by the dGPU */
1322 } else {
1323 mapping_flags |= ext_coherent ? AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
1324 }
1325 break;
1326 default:
1327 mapping_flags |= coherent ?
1328 AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
1329 }
1330
1331 if (flags & KFD_IOCTL_SVM_FLAG_GPU_EXEC)
1332 mapping_flags |= AMDGPU_VM_PAGE_EXECUTABLE;
1333
1334 pte_flags = AMDGPU_PTE_VALID;
1335 pte_flags |= (domain == SVM_RANGE_VRAM_DOMAIN) ? 0 : AMDGPU_PTE_SYSTEM;
1336 pte_flags |= snoop ? AMDGPU_PTE_SNOOPED : 0;
1337 if (gc_ip_version >= IP_VERSION(12, 0, 0))
1338 pte_flags |= AMDGPU_PTE_IS_PTE;
1339
1340 amdgpu_gmc_get_vm_pte(node->adev, vm, NULL, mapping_flags, &pte_flags);
1341 pte_flags |= AMDGPU_PTE_READABLE;
1342 if (!(flags & KFD_IOCTL_SVM_FLAG_GPU_RO))
1343 pte_flags |= AMDGPU_PTE_WRITEABLE;
1344
1345 if ((gc_ip_version == IP_VERSION(12, 1, 0)) &&
1346 node->adev->have_atomics_support)
1347 pte_flags |= AMDGPU_PTE_BUS_ATOMICS;
1348
1349 return pte_flags;
1350 }
1351
1352 static int
svm_range_unmap_from_gpu(struct amdgpu_device * adev,struct amdgpu_vm * vm,uint64_t start,uint64_t last,struct dma_fence ** fence)1353 svm_range_unmap_from_gpu(struct amdgpu_device *adev, struct amdgpu_vm *vm,
1354 uint64_t start, uint64_t last,
1355 struct dma_fence **fence)
1356 {
1357 uint64_t init_pte_value = adev->gmc.init_pte_flags;
1358 uint64_t gpu_start, gpu_end;
1359
1360 /* Convert CPU page range to GPU page range */
1361 gpu_start = start * AMDGPU_GPU_PAGES_IN_CPU_PAGE;
1362 gpu_end = (last + 1) * AMDGPU_GPU_PAGES_IN_CPU_PAGE - 1;
1363
1364 pr_debug("CPU[0x%llx 0x%llx] -> GPU[0x%llx 0x%llx]\n", start, last,
1365 gpu_start, gpu_end);
1366 return amdgpu_vm_update_range(adev, vm, false, true, true, false, NULL, gpu_start,
1367 gpu_end, init_pte_value, 0, 0, NULL, NULL,
1368 fence);
1369 }
1370
1371 static int
svm_range_unmap_from_gpus(struct svm_range * prange,unsigned long start,unsigned long last,uint32_t trigger)1372 svm_range_unmap_from_gpus(struct svm_range *prange, unsigned long start,
1373 unsigned long last, uint32_t trigger)
1374 {
1375 struct kfd_process_device *pdd;
1376 struct dma_fence *fence = NULL;
1377 struct kfd_process *p;
1378 uint32_t gpuidx;
1379 int r = 0;
1380
1381 if (!prange->mapped_to_gpu) {
1382 pr_debug("prange 0x%p [0x%lx 0x%lx] not mapped to GPU\n",
1383 prange, prange->start, prange->last);
1384 return 0;
1385 }
1386
1387 if (prange->start == start && prange->last == last) {
1388 pr_debug("unmap svms 0x%p prange 0x%p\n", prange->svms, prange);
1389 prange->mapped_to_gpu = false;
1390 }
1391
1392 p = container_of(prange->svms, struct kfd_process, svms);
1393
1394 for_each_or_bit(gpuidx, prange->bitmap_access, prange->bitmap_aip, MAX_GPU_INSTANCE) {
1395 pr_debug("unmap from gpu idx 0x%x\n", gpuidx);
1396 pdd = kfd_process_device_from_gpuidx(p, gpuidx);
1397 if (!pdd) {
1398 pr_debug("failed to find device idx %d\n", gpuidx);
1399 return -EINVAL;
1400 }
1401
1402 kfd_smi_event_unmap_from_gpu(pdd->dev, p->lead_thread->pid,
1403 start, last, trigger);
1404
1405 r = svm_range_unmap_from_gpu(pdd->dev->adev,
1406 drm_priv_to_vm(pdd->drm_priv),
1407 start, last, &fence);
1408 if (r)
1409 break;
1410
1411 if (fence) {
1412 r = dma_fence_wait(fence, false);
1413 dma_fence_put(fence);
1414 fence = NULL;
1415 if (r)
1416 break;
1417 }
1418 kfd_flush_tlb(pdd, TLB_FLUSH_HEAVYWEIGHT);
1419 }
1420
1421 return r;
1422 }
1423
1424 static int
svm_range_map_to_gpu(struct kfd_process_device * pdd,struct svm_range * prange,unsigned long offset,unsigned long npages,bool readonly,dma_addr_t * dma_addr,struct amdgpu_device * bo_adev,struct dma_fence ** fence,bool flush_tlb)1425 svm_range_map_to_gpu(struct kfd_process_device *pdd, struct svm_range *prange,
1426 unsigned long offset, unsigned long npages, bool readonly,
1427 dma_addr_t *dma_addr, struct amdgpu_device *bo_adev,
1428 struct dma_fence **fence, bool flush_tlb)
1429 {
1430 struct amdgpu_device *adev = pdd->dev->adev;
1431 struct amdgpu_vm *vm = drm_priv_to_vm(pdd->drm_priv);
1432 uint64_t pte_flags;
1433 unsigned long last_start;
1434 int last_domain;
1435 int r = 0;
1436 int64_t i, j;
1437
1438 last_start = prange->start + offset;
1439
1440 pr_debug("svms 0x%p [0x%lx 0x%lx] readonly %d\n", prange->svms,
1441 last_start, last_start + npages - 1, readonly);
1442
1443 for (i = offset; i < offset + npages; i++) {
1444 uint64_t gpu_start;
1445 uint64_t gpu_end;
1446
1447 last_domain = dma_addr[i] & SVM_RANGE_VRAM_DOMAIN;
1448 dma_addr[i] &= ~SVM_RANGE_VRAM_DOMAIN;
1449
1450 /* Collect all pages in the same address range and memory domain
1451 * that can be mapped with a single call to update mapping.
1452 */
1453 if (i < offset + npages - 1 &&
1454 last_domain == (dma_addr[i + 1] & SVM_RANGE_VRAM_DOMAIN))
1455 continue;
1456
1457 pr_debug("Mapping range [0x%lx 0x%llx] on domain: %s\n",
1458 last_start, prange->start + i, last_domain ? "GPU" : "CPU");
1459
1460 pte_flags = svm_range_get_pte_flags(pdd->dev, vm, prange, last_domain);
1461 if (readonly)
1462 pte_flags &= ~AMDGPU_PTE_WRITEABLE;
1463
1464
1465 /* For dGPU mode, we use same vm_manager to allocate VRAM for
1466 * different memory partition based on fpfn/lpfn, we should use
1467 * same vm_manager.vram_base_offset regardless memory partition.
1468 */
1469 gpu_start = last_start * AMDGPU_GPU_PAGES_IN_CPU_PAGE;
1470 gpu_end = (prange->start + i + 1) * AMDGPU_GPU_PAGES_IN_CPU_PAGE - 1;
1471
1472 pr_debug("svms 0x%p map CPU[0x%lx 0x%llx] GPU[0x%llx 0x%llx] vram %d PTE 0x%llx\n",
1473 prange->svms, last_start, prange->start + i,
1474 gpu_start, gpu_end,
1475 (last_domain == SVM_RANGE_VRAM_DOMAIN) ? 1 : 0,
1476 pte_flags);
1477
1478 r = amdgpu_vm_update_range(adev, vm, false, false, flush_tlb, true,
1479 NULL, gpu_start, gpu_end,
1480 pte_flags,
1481 (last_start - prange->start) << PAGE_SHIFT,
1482 bo_adev ? bo_adev->vm_manager.vram_base_offset : 0,
1483 NULL, dma_addr, &vm->last_update);
1484
1485 for (j = last_start - prange->start; j <= i; j++)
1486 dma_addr[j] |= last_domain;
1487
1488 if (r) {
1489 pr_debug("failed %d to map to gpu 0x%lx\n", r, prange->start);
1490 goto out;
1491 }
1492 last_start = prange->start + i + 1;
1493 }
1494
1495 r = amdgpu_vm_update_pdes(adev, vm, false);
1496 if (r) {
1497 pr_debug("failed %d to update directories 0x%lx\n", r,
1498 prange->start);
1499 goto out;
1500 }
1501
1502 if (fence)
1503 *fence = dma_fence_get(vm->last_update);
1504
1505 out:
1506 return r;
1507 }
1508
1509 static int
svm_range_map_to_gpus(struct svm_range * prange,unsigned long offset,unsigned long npages,bool readonly,unsigned long * bitmap,bool wait,bool flush_tlb)1510 svm_range_map_to_gpus(struct svm_range *prange, unsigned long offset,
1511 unsigned long npages, bool readonly,
1512 unsigned long *bitmap, bool wait, bool flush_tlb)
1513 {
1514 struct kfd_process_device *pdd;
1515 struct amdgpu_device *bo_adev = NULL;
1516 struct kfd_process *p;
1517 struct dma_fence *fence = NULL;
1518 uint32_t gpuidx;
1519 int r = 0;
1520
1521 if (prange->svm_bo && prange->ttm_res)
1522 bo_adev = prange->svm_bo->node->adev;
1523
1524 p = container_of(prange->svms, struct kfd_process, svms);
1525 for_each_set_bit(gpuidx, bitmap, MAX_GPU_INSTANCE) {
1526 pr_debug("mapping to gpu idx 0x%x\n", gpuidx);
1527 pdd = kfd_process_device_from_gpuidx(p, gpuidx);
1528 if (!pdd) {
1529 pr_debug("failed to find device idx %d\n", gpuidx);
1530 return -EINVAL;
1531 }
1532
1533 pdd = kfd_bind_process_to_device(pdd->dev, p);
1534 if (IS_ERR(pdd))
1535 return -EINVAL;
1536
1537 if (bo_adev && pdd->dev->adev != bo_adev &&
1538 !amdgpu_xgmi_same_hive(pdd->dev->adev, bo_adev)) {
1539 pr_debug("cannot map to device idx %d\n", gpuidx);
1540 continue;
1541 }
1542
1543 r = svm_range_map_to_gpu(pdd, prange, offset, npages, readonly,
1544 prange->dma_addr[gpuidx],
1545 bo_adev, wait ? &fence : NULL,
1546 flush_tlb);
1547 if (r)
1548 break;
1549
1550 if (fence) {
1551 r = dma_fence_wait(fence, false);
1552 dma_fence_put(fence);
1553 fence = NULL;
1554 if (r) {
1555 pr_debug("failed %d to dma fence wait\n", r);
1556 break;
1557 }
1558 }
1559
1560 kfd_flush_tlb(pdd, TLB_FLUSH_LEGACY);
1561 }
1562
1563 return r;
1564 }
1565
1566 struct svm_validate_context {
1567 struct kfd_process *process;
1568 struct svm_range *prange;
1569 bool intr;
1570 DECLARE_BITMAP(bitmap, MAX_GPU_INSTANCE);
1571 struct drm_exec exec;
1572 };
1573
svm_range_reserve_bos(struct svm_validate_context * ctx,bool intr)1574 static int svm_range_reserve_bos(struct svm_validate_context *ctx, bool intr)
1575 {
1576 struct kfd_process_device *pdd;
1577 struct amdgpu_vm *vm;
1578 uint32_t gpuidx;
1579 int r;
1580
1581 drm_exec_init(&ctx->exec, intr ? DRM_EXEC_INTERRUPTIBLE_WAIT: 0, 0);
1582 drm_exec_until_all_locked(&ctx->exec) {
1583 for_each_set_bit(gpuidx, ctx->bitmap, MAX_GPU_INSTANCE) {
1584 pdd = kfd_process_device_from_gpuidx(ctx->process, gpuidx);
1585 if (!pdd) {
1586 pr_debug("failed to find device idx %d\n", gpuidx);
1587 r = -EINVAL;
1588 goto unreserve_out;
1589 }
1590 vm = drm_priv_to_vm(pdd->drm_priv);
1591
1592 r = amdgpu_vm_lock_pd(vm, &ctx->exec, 2);
1593 drm_exec_retry_on_contention(&ctx->exec);
1594 if (unlikely(r)) {
1595 pr_debug("failed %d to reserve bo\n", r);
1596 goto unreserve_out;
1597 }
1598 }
1599 }
1600
1601 for_each_set_bit(gpuidx, ctx->bitmap, MAX_GPU_INSTANCE) {
1602 pdd = kfd_process_device_from_gpuidx(ctx->process, gpuidx);
1603 if (!pdd) {
1604 pr_debug("failed to find device idx %d\n", gpuidx);
1605 r = -EINVAL;
1606 goto unreserve_out;
1607 }
1608
1609 r = amdgpu_vm_validate(pdd->dev->adev,
1610 drm_priv_to_vm(pdd->drm_priv), NULL,
1611 svm_range_bo_validate, NULL);
1612 if (r) {
1613 pr_debug("failed %d validate pt bos\n", r);
1614 goto unreserve_out;
1615 }
1616 }
1617
1618 return 0;
1619
1620 unreserve_out:
1621 drm_exec_fini(&ctx->exec);
1622 return r;
1623 }
1624
svm_range_unreserve_bos(struct svm_validate_context * ctx)1625 static void svm_range_unreserve_bos(struct svm_validate_context *ctx)
1626 {
1627 drm_exec_fini(&ctx->exec);
1628 }
1629
kfd_svm_page_owner(struct kfd_process * p,int32_t gpuidx)1630 static void *kfd_svm_page_owner(struct kfd_process *p, int32_t gpuidx)
1631 {
1632 struct kfd_process_device *pdd;
1633
1634 pdd = kfd_process_device_from_gpuidx(p, gpuidx);
1635 if (!pdd)
1636 return NULL;
1637
1638 return SVM_ADEV_PGMAP_OWNER(pdd->dev->adev);
1639 }
1640
1641 /*
1642 * Validation+GPU mapping with concurrent invalidation (MMU notifiers)
1643 *
1644 * To prevent concurrent destruction or change of range attributes, the
1645 * svm_read_lock must be held. The caller must not hold the svm_write_lock
1646 * because that would block concurrent evictions and lead to deadlocks. To
1647 * serialize concurrent migrations or validations of the same range, the
1648 * prange->migrate_mutex must be held.
1649 *
1650 * For VRAM ranges, the SVM BO must be allocated and valid (protected by its
1651 * eviction fence.
1652 *
1653 * The following sequence ensures race-free validation and GPU mapping:
1654 *
1655 * 1. Reserve page table (and SVM BO if range is in VRAM)
1656 * 2. hmm_range_fault to get page addresses (if system memory)
1657 * 3. DMA-map pages (if system memory)
1658 * 4-a. Take notifier lock
1659 * 4-b. Check that pages still valid (mmu_interval_read_retry)
1660 * 4-c. Check that the range was not split or otherwise invalidated
1661 * 4-d. Update GPU page table
1662 * 4.e. Release notifier lock
1663 * 5. Release page table (and SVM BO) reservation
1664 */
svm_range_validate_and_map(struct mm_struct * mm,unsigned long map_start,unsigned long map_last,struct svm_range * prange,int32_t gpuidx,bool intr,bool wait,bool flush_tlb)1665 static int svm_range_validate_and_map(struct mm_struct *mm,
1666 unsigned long map_start, unsigned long map_last,
1667 struct svm_range *prange, int32_t gpuidx,
1668 bool intr, bool wait, bool flush_tlb)
1669 {
1670 struct svm_validate_context *ctx;
1671 unsigned long start, end, addr;
1672 struct kfd_process *p;
1673 void *owner;
1674 int32_t idx;
1675 int r = 0;
1676
1677 ctx = kzalloc_obj(struct svm_validate_context);
1678 if (!ctx)
1679 return -ENOMEM;
1680 ctx->process = container_of(prange->svms, struct kfd_process, svms);
1681 ctx->prange = prange;
1682 ctx->intr = intr;
1683
1684 if (gpuidx < MAX_GPU_INSTANCE) {
1685 bitmap_zero(ctx->bitmap, MAX_GPU_INSTANCE);
1686 bitmap_set(ctx->bitmap, gpuidx, 1);
1687 } else if (ctx->process->xnack_enabled) {
1688 bitmap_copy(ctx->bitmap, prange->bitmap_aip, MAX_GPU_INSTANCE);
1689
1690 /* If prefetch range to GPU, or GPU retry fault migrate range to
1691 * GPU, which has ACCESS attribute to the range, create mapping
1692 * on that GPU.
1693 */
1694 if (prange->actual_loc) {
1695 gpuidx = kfd_process_gpuidx_from_gpuid(ctx->process,
1696 prange->actual_loc);
1697 if (gpuidx < 0) {
1698 WARN_ONCE(1, "failed get device by id 0x%x\n",
1699 prange->actual_loc);
1700 r = -EINVAL;
1701 goto free_ctx;
1702 }
1703 if (test_bit(gpuidx, prange->bitmap_access))
1704 bitmap_set(ctx->bitmap, gpuidx, 1);
1705 }
1706
1707 /*
1708 * If prange is already mapped or with always mapped flag,
1709 * update mapping on GPUs with ACCESS attribute
1710 */
1711 if (bitmap_empty(ctx->bitmap, MAX_GPU_INSTANCE)) {
1712 if (prange->mapped_to_gpu ||
1713 prange->flags & KFD_IOCTL_SVM_FLAG_GPU_ALWAYS_MAPPED)
1714 bitmap_copy(ctx->bitmap, prange->bitmap_access, MAX_GPU_INSTANCE);
1715 }
1716 } else {
1717 bitmap_or(ctx->bitmap, prange->bitmap_access,
1718 prange->bitmap_aip, MAX_GPU_INSTANCE);
1719 }
1720
1721 if (bitmap_empty(ctx->bitmap, MAX_GPU_INSTANCE)) {
1722 r = 0;
1723 goto free_ctx;
1724 }
1725
1726 if (prange->actual_loc && !prange->ttm_res) {
1727 /* This should never happen. actual_loc gets set by
1728 * svm_migrate_ram_to_vram after allocating a BO.
1729 */
1730 WARN_ONCE(1, "VRAM BO missing during validation\n");
1731 r = -EINVAL;
1732 goto free_ctx;
1733 }
1734
1735 r = svm_range_reserve_bos(ctx, intr);
1736 if (r)
1737 goto free_ctx;
1738
1739 p = container_of(prange->svms, struct kfd_process, svms);
1740 owner = kfd_svm_page_owner(p, find_first_bit(ctx->bitmap,
1741 MAX_GPU_INSTANCE));
1742 for_each_set_bit(idx, ctx->bitmap, MAX_GPU_INSTANCE) {
1743 if (kfd_svm_page_owner(p, idx) != owner) {
1744 owner = NULL;
1745 break;
1746 }
1747 }
1748
1749 start = map_start << PAGE_SHIFT;
1750 end = (map_last + 1) << PAGE_SHIFT;
1751 for (addr = start; !r && addr < end; ) {
1752 struct amdgpu_hmm_range *range = NULL;
1753 unsigned long map_start_vma;
1754 unsigned long map_last_vma;
1755 struct vm_area_struct *vma;
1756 unsigned long next = 0;
1757 unsigned long offset;
1758 unsigned long npages;
1759 bool readonly;
1760
1761 vma = vma_lookup(mm, addr);
1762 if (vma) {
1763 readonly = !(vma->vm_flags & VM_WRITE);
1764
1765 next = min(vma->vm_end, end);
1766 npages = (next - addr) >> PAGE_SHIFT;
1767 /* HMM requires at least READ permissions. If provided with PROT_NONE,
1768 * unmap the memory. If it's not already mapped, this is a no-op
1769 * If PROT_WRITE is provided without READ, warn first then unmap
1770 */
1771 if (!(vma->vm_flags & VM_READ)) {
1772 unsigned long e, s;
1773
1774 svm_range_lock(prange);
1775 if (vma->vm_flags & VM_WRITE)
1776 pr_debug("VM_WRITE without VM_READ is not supported");
1777 s = max(start, prange->start);
1778 e = min(end, prange->last);
1779 if (e >= s)
1780 r = svm_range_unmap_from_gpus(prange, s, e,
1781 KFD_SVM_UNMAP_TRIGGER_UNMAP_FROM_CPU);
1782 svm_range_unlock(prange);
1783 /* If unmap returns non-zero, we'll bail on the next for loop
1784 * iteration, so just leave r and continue
1785 */
1786 addr = next;
1787 continue;
1788 }
1789
1790 WRITE_ONCE(p->svms.faulting_task, current);
1791 range = amdgpu_hmm_range_alloc(NULL);
1792 if (likely(range))
1793 r = amdgpu_hmm_range_get_pages(&prange->notifier, addr, npages,
1794 readonly, owner, range);
1795 else
1796 r = -ENOMEM;
1797 WRITE_ONCE(p->svms.faulting_task, NULL);
1798 if (r)
1799 pr_debug("failed %d to get svm range pages\n", r);
1800 } else {
1801 r = -EFAULT;
1802 }
1803
1804 if (!r) {
1805 offset = (addr >> PAGE_SHIFT) - prange->start;
1806 r = svm_range_dma_map(prange, ctx->bitmap, offset, npages,
1807 range->hmm_range.hmm_pfns);
1808 if (r)
1809 pr_debug("failed %d to dma map range\n", r);
1810 }
1811
1812 svm_range_lock(prange);
1813
1814 /* Free backing memory of hmm_range if it was initialized
1815 * Override return value to TRY AGAIN only if prior returns
1816 * were successful
1817 */
1818 if (range && !amdgpu_hmm_range_valid(range) && !r) {
1819 pr_debug("hmm update the range, need validate again\n");
1820 r = -EAGAIN;
1821 }
1822
1823 /* Free the hmm range */
1824 amdgpu_hmm_range_free(range);
1825
1826 if (!r && !list_empty(&prange->child_list)) {
1827 pr_debug("range split by unmap in parallel, validate again\n");
1828 r = -EAGAIN;
1829 }
1830
1831 if (!r) {
1832 map_start_vma = max(map_start, prange->start + offset);
1833 map_last_vma = min(map_last, prange->start + offset + npages - 1);
1834 if (map_start_vma <= map_last_vma) {
1835 offset = map_start_vma - prange->start;
1836 npages = map_last_vma - map_start_vma + 1;
1837 r = svm_range_map_to_gpus(prange, offset, npages, readonly,
1838 ctx->bitmap, wait, flush_tlb);
1839 }
1840 }
1841
1842 if (!r && next == end)
1843 prange->mapped_to_gpu = true;
1844
1845 svm_range_unlock(prange);
1846
1847 addr = next;
1848 }
1849
1850 svm_range_unreserve_bos(ctx);
1851 if (!r)
1852 prange->validate_timestamp = ktime_get_boottime();
1853
1854 free_ctx:
1855 kfree(ctx);
1856
1857 return r;
1858 }
1859
1860 /**
1861 * svm_range_list_lock_and_flush_work - flush pending deferred work
1862 *
1863 * @svms: the svm range list
1864 * @mm: the mm structure
1865 *
1866 * Context: Returns with mmap write lock held, pending deferred work flushed
1867 *
1868 */
1869 void
svm_range_list_lock_and_flush_work(struct svm_range_list * svms,struct mm_struct * mm)1870 svm_range_list_lock_and_flush_work(struct svm_range_list *svms,
1871 struct mm_struct *mm)
1872 {
1873 retry_flush_work:
1874 flush_work(&svms->deferred_list_work);
1875 mmap_write_lock(mm);
1876
1877 if (list_empty(&svms->deferred_range_list))
1878 return;
1879 mmap_write_unlock(mm);
1880 pr_debug("retry flush\n");
1881 goto retry_flush_work;
1882 }
1883
svm_range_restore_work(struct work_struct * work)1884 static void svm_range_restore_work(struct work_struct *work)
1885 {
1886 struct delayed_work *dwork = to_delayed_work(work);
1887 struct amdkfd_process_info *process_info;
1888 struct svm_range_list *svms;
1889 struct svm_range *prange;
1890 struct kfd_process *p;
1891 struct mm_struct *mm;
1892 int evicted_ranges;
1893 int invalid;
1894 int r;
1895
1896 svms = container_of(dwork, struct svm_range_list, restore_work);
1897 evicted_ranges = atomic_read(&svms->evicted_ranges);
1898 if (!evicted_ranges)
1899 return;
1900
1901 pr_debug("restore svm ranges\n");
1902
1903 p = container_of(svms, struct kfd_process, svms);
1904 process_info = p->kgd_process_info;
1905
1906 /* Keep mm reference when svm_range_validate_and_map ranges */
1907 mm = get_task_mm(p->lead_thread);
1908 if (!mm) {
1909 pr_debug("svms 0x%p process mm gone\n", svms);
1910 return;
1911 }
1912
1913 mutex_lock(&process_info->lock);
1914 svm_range_list_lock_and_flush_work(svms, mm);
1915 mutex_lock(&svms->lock);
1916
1917 evicted_ranges = atomic_read(&svms->evicted_ranges);
1918
1919 list_for_each_entry(prange, &svms->list, list) {
1920 invalid = atomic_read(&prange->invalid);
1921 if (!invalid)
1922 continue;
1923
1924 pr_debug("restoring svms 0x%p prange 0x%p [0x%lx %lx] inv %d\n",
1925 prange->svms, prange, prange->start, prange->last,
1926 invalid);
1927
1928 /*
1929 * If range is migrating, wait for migration is done.
1930 */
1931 mutex_lock(&prange->migrate_mutex);
1932
1933 r = svm_range_validate_and_map(mm, prange->start, prange->last, prange,
1934 MAX_GPU_INSTANCE, false, true, false);
1935 if (r)
1936 pr_debug("failed %d to map 0x%lx to gpus\n", r,
1937 prange->start);
1938
1939 mutex_unlock(&prange->migrate_mutex);
1940 if (r)
1941 goto out_reschedule;
1942
1943 if (atomic_cmpxchg(&prange->invalid, invalid, 0) != invalid)
1944 goto out_reschedule;
1945 }
1946
1947 if (atomic_cmpxchg(&svms->evicted_ranges, evicted_ranges, 0) !=
1948 evicted_ranges)
1949 goto out_reschedule;
1950
1951 evicted_ranges = 0;
1952
1953 r = kgd2kfd_resume_mm(mm);
1954 if (r) {
1955 /* No recovery from this failure. Probably the CP is
1956 * hanging. No point trying again.
1957 */
1958 pr_debug("failed %d to resume KFD\n", r);
1959 }
1960
1961 pr_debug("restore svm ranges successfully\n");
1962
1963 out_reschedule:
1964 mutex_unlock(&svms->lock);
1965 mmap_write_unlock(mm);
1966 mutex_unlock(&process_info->lock);
1967
1968 /* If validation failed, reschedule another attempt */
1969 if (evicted_ranges) {
1970 pr_debug("reschedule to restore svm range\n");
1971 queue_delayed_work(system_freezable_wq, &svms->restore_work,
1972 msecs_to_jiffies(AMDGPU_SVM_RANGE_RESTORE_DELAY_MS));
1973
1974 kfd_smi_event_queue_restore_rescheduled(mm);
1975 }
1976 mmput(mm);
1977 }
1978
1979 /**
1980 * svm_range_evict - evict svm range
1981 * @prange: svm range structure
1982 * @mm: current process mm_struct
1983 * @start: starting process queue number
1984 * @last: last process queue number
1985 * @event: mmu notifier event when range is evicted or migrated
1986 *
1987 * Stop all queues of the process to ensure GPU doesn't access the memory, then
1988 * return to let CPU evict the buffer and proceed CPU pagetable update.
1989 *
1990 * Don't need use lock to sync cpu pagetable invalidation with GPU execution.
1991 * If invalidation happens while restore work is running, restore work will
1992 * restart to ensure to get the latest CPU pages mapping to GPU, then start
1993 * the queues.
1994 */
1995 static int
svm_range_evict(struct svm_range * prange,struct mm_struct * mm,unsigned long start,unsigned long last,enum mmu_notifier_event event)1996 svm_range_evict(struct svm_range *prange, struct mm_struct *mm,
1997 unsigned long start, unsigned long last,
1998 enum mmu_notifier_event event)
1999 {
2000 struct svm_range_list *svms = prange->svms;
2001 struct svm_range *pchild;
2002 struct kfd_process *p;
2003 int r = 0;
2004
2005 p = container_of(svms, struct kfd_process, svms);
2006
2007 pr_debug("invalidate svms 0x%p prange [0x%lx 0x%lx] [0x%lx 0x%lx]\n",
2008 svms, prange->start, prange->last, start, last);
2009
2010 if (!p->xnack_enabled ||
2011 (prange->flags & KFD_IOCTL_SVM_FLAG_GPU_ALWAYS_MAPPED)) {
2012 int evicted_ranges;
2013 bool mapped = prange->mapped_to_gpu;
2014
2015 list_for_each_entry(pchild, &prange->child_list, child_list) {
2016 if (!pchild->mapped_to_gpu)
2017 continue;
2018 mapped = true;
2019 mutex_lock_nested(&pchild->lock, 1);
2020 if (pchild->start <= last && pchild->last >= start) {
2021 pr_debug("increment pchild invalid [0x%lx 0x%lx]\n",
2022 pchild->start, pchild->last);
2023 atomic_inc(&pchild->invalid);
2024 }
2025 mutex_unlock(&pchild->lock);
2026 }
2027
2028 if (!mapped)
2029 return r;
2030
2031 if (prange->start <= last && prange->last >= start)
2032 atomic_inc(&prange->invalid);
2033
2034 evicted_ranges = atomic_inc_return(&svms->evicted_ranges);
2035 if (evicted_ranges != 1)
2036 return r;
2037
2038 pr_debug("evicting svms 0x%p range [0x%lx 0x%lx]\n",
2039 prange->svms, prange->start, prange->last);
2040
2041 /* First eviction, stop the queues */
2042 r = kgd2kfd_quiesce_mm(mm, KFD_QUEUE_EVICTION_TRIGGER_SVM);
2043 if (r)
2044 pr_debug("failed to quiesce KFD\n");
2045
2046 pr_debug("schedule to restore svm %p ranges\n", svms);
2047 queue_delayed_work(system_freezable_wq, &svms->restore_work,
2048 msecs_to_jiffies(AMDGPU_SVM_RANGE_RESTORE_DELAY_MS));
2049 } else {
2050 unsigned long s, l;
2051 uint32_t trigger;
2052
2053 if (event == MMU_NOTIFY_MIGRATE)
2054 trigger = KFD_SVM_UNMAP_TRIGGER_MMU_NOTIFY_MIGRATE;
2055 else
2056 trigger = KFD_SVM_UNMAP_TRIGGER_MMU_NOTIFY;
2057
2058 pr_debug("invalidate unmap svms 0x%p [0x%lx 0x%lx] from GPUs\n",
2059 prange->svms, start, last);
2060 list_for_each_entry(pchild, &prange->child_list, child_list) {
2061 mutex_lock_nested(&pchild->lock, 1);
2062 s = max(start, pchild->start);
2063 l = min(last, pchild->last);
2064 if (l >= s)
2065 svm_range_unmap_from_gpus(pchild, s, l, trigger);
2066 mutex_unlock(&pchild->lock);
2067 }
2068 s = max(start, prange->start);
2069 l = min(last, prange->last);
2070 if (l >= s)
2071 svm_range_unmap_from_gpus(prange, s, l, trigger);
2072 }
2073
2074 return r;
2075 }
2076
svm_range_clone(struct svm_range * old)2077 static struct svm_range *svm_range_clone(struct svm_range *old)
2078 {
2079 struct svm_range *new;
2080
2081 new = svm_range_new(old->svms, old->start, old->last, false);
2082 if (!new)
2083 return NULL;
2084 if (svm_range_copy_dma_addrs(new, old)) {
2085 svm_range_free(new, false);
2086 return NULL;
2087 }
2088 if (old->svm_bo) {
2089 new->ttm_res = old->ttm_res;
2090 new->offset = old->offset;
2091 new->svm_bo = svm_range_bo_ref(old->svm_bo);
2092 spin_lock(&new->svm_bo->list_lock);
2093 list_add(&new->svm_bo_list, &new->svm_bo->range_list);
2094 spin_unlock(&new->svm_bo->list_lock);
2095 }
2096 new->flags = old->flags;
2097 new->preferred_loc = old->preferred_loc;
2098 new->prefetch_loc = old->prefetch_loc;
2099 new->actual_loc = old->actual_loc;
2100 new->granularity = old->granularity;
2101 new->mapped_to_gpu = old->mapped_to_gpu;
2102 new->vram_pages = old->vram_pages;
2103 bitmap_copy(new->bitmap_access, old->bitmap_access, MAX_GPU_INSTANCE);
2104 bitmap_copy(new->bitmap_aip, old->bitmap_aip, MAX_GPU_INSTANCE);
2105 atomic_set(&new->queue_refcount, atomic_read(&old->queue_refcount));
2106
2107 return new;
2108 }
2109
svm_range_set_max_pages(struct amdgpu_device * adev)2110 void svm_range_set_max_pages(struct amdgpu_device *adev)
2111 {
2112 uint64_t max_pages;
2113 uint64_t pages, _pages;
2114 uint64_t min_pages = 0;
2115 int i, id;
2116
2117 for (i = 0; i < adev->kfd.dev->num_nodes; i++) {
2118 if (adev->kfd.dev->nodes[i]->xcp)
2119 id = adev->kfd.dev->nodes[i]->xcp->id;
2120 else
2121 id = -1;
2122 pages = KFD_XCP_MEMORY_SIZE(adev, id) >> 17;
2123 pages = clamp(pages, 1ULL << 9, 1ULL << 18);
2124 pages = rounddown_pow_of_two(pages);
2125 min_pages = min_not_zero(min_pages, pages);
2126 }
2127
2128 do {
2129 max_pages = READ_ONCE(max_svm_range_pages);
2130 _pages = min_not_zero(max_pages, min_pages);
2131 } while (cmpxchg(&max_svm_range_pages, max_pages, _pages) != max_pages);
2132 }
2133
2134 static int
svm_range_split_new(struct svm_range_list * svms,uint64_t start,uint64_t last,uint64_t max_pages,struct list_head * insert_list,struct list_head * update_list)2135 svm_range_split_new(struct svm_range_list *svms, uint64_t start, uint64_t last,
2136 uint64_t max_pages, struct list_head *insert_list,
2137 struct list_head *update_list)
2138 {
2139 struct svm_range *prange;
2140 uint64_t l;
2141
2142 pr_debug("max_svm_range_pages 0x%llx adding [0x%llx 0x%llx]\n",
2143 max_pages, start, last);
2144
2145 while (last >= start) {
2146 l = min(last, ALIGN_DOWN(start + max_pages, max_pages) - 1);
2147
2148 prange = svm_range_new(svms, start, l, true);
2149 if (!prange)
2150 return -ENOMEM;
2151 list_add(&prange->list, insert_list);
2152 list_add(&prange->update_list, update_list);
2153
2154 start = l + 1;
2155 }
2156 return 0;
2157 }
2158
2159 /**
2160 * svm_range_add - add svm range and handle overlap
2161 * @p: the range add to this process svms
2162 * @start: page size aligned
2163 * @size: page size aligned
2164 * @nattr: number of attributes
2165 * @attrs: array of attributes
2166 * @update_list: output, the ranges need validate and update GPU mapping
2167 * @insert_list: output, the ranges need insert to svms
2168 * @remove_list: output, the ranges are replaced and need remove from svms
2169 * @remap_list: output, remap unaligned svm ranges
2170 *
2171 * Check if the virtual address range has overlap with any existing ranges,
2172 * split partly overlapping ranges and add new ranges in the gaps. All changes
2173 * should be applied to the range_list and interval tree transactionally. If
2174 * any range split or allocation fails, the entire update fails. Therefore any
2175 * existing overlapping svm_ranges are cloned and the original svm_ranges left
2176 * unchanged.
2177 *
2178 * If the transaction succeeds, the caller can update and insert clones and
2179 * new ranges, then free the originals.
2180 *
2181 * Otherwise the caller can free the clones and new ranges, while the old
2182 * svm_ranges remain unchanged.
2183 *
2184 * Context: Process context, caller must hold svms->lock
2185 *
2186 * Return:
2187 * 0 - OK, otherwise error code
2188 */
2189 static int
svm_range_add(struct kfd_process * p,uint64_t start,uint64_t size,uint32_t nattr,struct kfd_ioctl_svm_attribute * attrs,struct list_head * update_list,struct list_head * insert_list,struct list_head * remove_list,struct list_head * remap_list)2190 svm_range_add(struct kfd_process *p, uint64_t start, uint64_t size,
2191 uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs,
2192 struct list_head *update_list, struct list_head *insert_list,
2193 struct list_head *remove_list, struct list_head *remap_list)
2194 {
2195 unsigned long last = start + size - 1UL;
2196 struct svm_range_list *svms = &p->svms;
2197 struct interval_tree_node *node;
2198 struct svm_range *prange;
2199 struct svm_range *tmp;
2200 struct list_head new_list;
2201 int r = 0;
2202
2203 pr_debug("svms 0x%p [0x%llx 0x%lx]\n", &p->svms, start, last);
2204
2205 INIT_LIST_HEAD(update_list);
2206 INIT_LIST_HEAD(insert_list);
2207 INIT_LIST_HEAD(remove_list);
2208 INIT_LIST_HEAD(&new_list);
2209 INIT_LIST_HEAD(remap_list);
2210
2211 node = interval_tree_iter_first(&svms->objects, start, last);
2212 while (node) {
2213 struct interval_tree_node *next;
2214 unsigned long next_start;
2215
2216 pr_debug("found overlap node [0x%lx 0x%lx]\n", node->start,
2217 node->last);
2218
2219 prange = container_of(node, struct svm_range, it_node);
2220 next = interval_tree_iter_next(node, start, last);
2221 next_start = min(node->last, last) + 1;
2222
2223 if (svm_range_is_same_attrs(p, prange, nattr, attrs) &&
2224 prange->mapped_to_gpu) {
2225 /* nothing to do */
2226 } else if (node->start < start || node->last > last) {
2227 /* node intersects the update range and its attributes
2228 * will change. Clone and split it, apply updates only
2229 * to the overlapping part
2230 */
2231 struct svm_range *old = prange;
2232
2233 prange = svm_range_clone(old);
2234 if (!prange) {
2235 r = -ENOMEM;
2236 goto out;
2237 }
2238
2239 list_add(&old->update_list, remove_list);
2240 list_add(&prange->list, insert_list);
2241 list_add(&prange->update_list, update_list);
2242
2243 if (node->start < start) {
2244 pr_debug("change old range start\n");
2245 r = svm_range_split_head(prange, start,
2246 insert_list, remap_list);
2247 if (r)
2248 goto out;
2249 }
2250 if (node->last > last) {
2251 pr_debug("change old range last\n");
2252 r = svm_range_split_tail(prange, last,
2253 insert_list, remap_list);
2254 if (r)
2255 goto out;
2256 }
2257 } else {
2258 /* The node is contained within start..last,
2259 * just update it
2260 */
2261 list_add(&prange->update_list, update_list);
2262 }
2263
2264 /* insert a new node if needed */
2265 if (node->start > start) {
2266 r = svm_range_split_new(svms, start, node->start - 1,
2267 READ_ONCE(max_svm_range_pages),
2268 &new_list, update_list);
2269 if (r)
2270 goto out;
2271 }
2272
2273 node = next;
2274 start = next_start;
2275 }
2276
2277 /* add a final range at the end if needed */
2278 if (start <= last)
2279 r = svm_range_split_new(svms, start, last,
2280 READ_ONCE(max_svm_range_pages),
2281 &new_list, update_list);
2282
2283 out:
2284 if (r) {
2285 list_for_each_entry_safe(prange, tmp, insert_list, list)
2286 svm_range_free(prange, false);
2287 list_for_each_entry_safe(prange, tmp, &new_list, list)
2288 svm_range_free(prange, true);
2289 } else {
2290 list_splice(&new_list, insert_list);
2291 }
2292
2293 return r;
2294 }
2295
2296 static void
svm_range_update_notifier_and_interval_tree(struct mm_struct * mm,struct svm_range * prange)2297 svm_range_update_notifier_and_interval_tree(struct mm_struct *mm,
2298 struct svm_range *prange)
2299 {
2300 unsigned long start;
2301 unsigned long last;
2302
2303 start = prange->notifier.interval_tree.start >> PAGE_SHIFT;
2304 last = prange->notifier.interval_tree.last >> PAGE_SHIFT;
2305
2306 if (prange->start == start && prange->last == last)
2307 return;
2308
2309 pr_debug("up notifier 0x%p prange 0x%p [0x%lx 0x%lx] [0x%lx 0x%lx]\n",
2310 prange->svms, prange, start, last, prange->start,
2311 prange->last);
2312
2313 if (start != 0 && last != 0) {
2314 interval_tree_remove(&prange->it_node, &prange->svms->objects);
2315 svm_range_remove_notifier(prange);
2316 }
2317 prange->it_node.start = prange->start;
2318 prange->it_node.last = prange->last;
2319
2320 interval_tree_insert(&prange->it_node, &prange->svms->objects);
2321 svm_range_add_notifier_locked(mm, prange);
2322 }
2323
2324 static void
svm_range_handle_list_op(struct svm_range_list * svms,struct svm_range * prange,struct mm_struct * mm)2325 svm_range_handle_list_op(struct svm_range_list *svms, struct svm_range *prange,
2326 struct mm_struct *mm)
2327 {
2328 switch (prange->work_item.op) {
2329 case SVM_OP_NULL:
2330 pr_debug("NULL OP 0x%p prange 0x%p [0x%lx 0x%lx]\n",
2331 svms, prange, prange->start, prange->last);
2332 break;
2333 case SVM_OP_UNMAP_RANGE:
2334 pr_debug("remove 0x%p prange 0x%p [0x%lx 0x%lx]\n",
2335 svms, prange, prange->start, prange->last);
2336 svm_range_unlink(prange);
2337 svm_range_remove_notifier(prange);
2338 svm_range_free(prange, true);
2339 break;
2340 case SVM_OP_UPDATE_RANGE_NOTIFIER:
2341 pr_debug("update notifier 0x%p prange 0x%p [0x%lx 0x%lx]\n",
2342 svms, prange, prange->start, prange->last);
2343 svm_range_update_notifier_and_interval_tree(mm, prange);
2344 break;
2345 case SVM_OP_UPDATE_RANGE_NOTIFIER_AND_MAP:
2346 pr_debug("update and map 0x%p prange 0x%p [0x%lx 0x%lx]\n",
2347 svms, prange, prange->start, prange->last);
2348 svm_range_update_notifier_and_interval_tree(mm, prange);
2349 /* TODO: implement deferred validation and mapping */
2350 break;
2351 case SVM_OP_ADD_RANGE:
2352 pr_debug("add 0x%p prange 0x%p [0x%lx 0x%lx]\n", svms, prange,
2353 prange->start, prange->last);
2354 svm_range_add_to_svms(prange);
2355 svm_range_add_notifier_locked(mm, prange);
2356 break;
2357 case SVM_OP_ADD_RANGE_AND_MAP:
2358 pr_debug("add and map 0x%p prange 0x%p [0x%lx 0x%lx]\n", svms,
2359 prange, prange->start, prange->last);
2360 svm_range_add_to_svms(prange);
2361 svm_range_add_notifier_locked(mm, prange);
2362 /* TODO: implement deferred validation and mapping */
2363 break;
2364 default:
2365 WARN_ONCE(1, "Unknown prange 0x%p work op %d\n", prange,
2366 prange->work_item.op);
2367 }
2368 }
2369
svm_range_drain_retry_fault(struct svm_range_list * svms)2370 static void svm_range_drain_retry_fault(struct svm_range_list *svms)
2371 {
2372 struct kfd_process_device *pdd;
2373 struct kfd_process *p;
2374 uint32_t i;
2375
2376 p = container_of(svms, struct kfd_process, svms);
2377
2378 for_each_set_bit(i, svms->bitmap_supported, p->n_pdds) {
2379 pdd = p->pdds[i];
2380 if (!pdd)
2381 continue;
2382
2383 pr_debug("drain retry fault gpu %d svms %p\n", i, svms);
2384
2385 if (!down_read_trylock(&pdd->dev->adev->reset_domain->sem))
2386 continue;
2387
2388 amdgpu_ih_wait_on_checkpoint_process_ts(pdd->dev->adev,
2389 pdd->dev->adev->irq.retry_cam_enabled ?
2390 &pdd->dev->adev->irq.ih :
2391 &pdd->dev->adev->irq.ih1);
2392
2393 if (pdd->dev->adev->irq.retry_cam_enabled)
2394 amdgpu_ih_wait_on_checkpoint_process_ts(pdd->dev->adev,
2395 &pdd->dev->adev->irq.ih_soft);
2396
2397 up_read(&pdd->dev->adev->reset_domain->sem);
2398
2399 pr_debug("drain retry fault gpu %d svms 0x%p done\n", i, svms);
2400 }
2401 }
2402
svm_range_deferred_list_work(struct work_struct * work)2403 static void svm_range_deferred_list_work(struct work_struct *work)
2404 {
2405 struct svm_range_list *svms;
2406 struct svm_range *prange;
2407 struct mm_struct *mm;
2408
2409 svms = container_of(work, struct svm_range_list, deferred_list_work);
2410 pr_debug("enter svms 0x%p\n", svms);
2411
2412 spin_lock(&svms->deferred_list_lock);
2413 while (!list_empty(&svms->deferred_range_list)) {
2414 prange = list_first_entry(&svms->deferred_range_list,
2415 struct svm_range, deferred_list);
2416 spin_unlock(&svms->deferred_list_lock);
2417
2418 pr_debug("prange 0x%p [0x%lx 0x%lx] op %d\n", prange,
2419 prange->start, prange->last, prange->work_item.op);
2420
2421 mm = prange->work_item.mm;
2422
2423 mmap_write_lock(mm);
2424
2425 /* Remove from deferred_list must be inside mmap write lock, for
2426 * two race cases:
2427 * 1. unmap_from_cpu may change work_item.op and add the range
2428 * to deferred_list again, cause use after free bug.
2429 * 2. svm_range_list_lock_and_flush_work may hold mmap write
2430 * lock and continue because deferred_list is empty, but
2431 * deferred_list work is actually waiting for mmap lock.
2432 */
2433 spin_lock(&svms->deferred_list_lock);
2434 list_del_init(&prange->deferred_list);
2435 spin_unlock(&svms->deferred_list_lock);
2436
2437 mutex_lock(&svms->lock);
2438 mutex_lock(&prange->migrate_mutex);
2439 while (!list_empty(&prange->child_list)) {
2440 struct svm_range *pchild;
2441
2442 pchild = list_first_entry(&prange->child_list,
2443 struct svm_range, child_list);
2444 pr_debug("child prange 0x%p op %d\n", pchild,
2445 pchild->work_item.op);
2446 list_del_init(&pchild->child_list);
2447 svm_range_handle_list_op(svms, pchild, mm);
2448 }
2449 mutex_unlock(&prange->migrate_mutex);
2450
2451 svm_range_handle_list_op(svms, prange, mm);
2452 mutex_unlock(&svms->lock);
2453 mmap_write_unlock(mm);
2454
2455 /* Pairs with mmget in svm_range_add_list_work. If dropping the
2456 * last mm refcount, schedule release work to avoid circular locking
2457 */
2458 mmput_async(mm);
2459
2460 spin_lock(&svms->deferred_list_lock);
2461 }
2462 spin_unlock(&svms->deferred_list_lock);
2463 pr_debug("exit svms 0x%p\n", svms);
2464 }
2465
2466 void
svm_range_add_list_work(struct svm_range_list * svms,struct svm_range * prange,struct mm_struct * mm,enum svm_work_list_ops op)2467 svm_range_add_list_work(struct svm_range_list *svms, struct svm_range *prange,
2468 struct mm_struct *mm, enum svm_work_list_ops op)
2469 {
2470 spin_lock(&svms->deferred_list_lock);
2471 /* if prange is on the deferred list */
2472 if (!list_empty(&prange->deferred_list)) {
2473 pr_debug("update exist prange 0x%p work op %d\n", prange, op);
2474 WARN_ONCE(prange->work_item.mm != mm, "unmatch mm\n");
2475 if (op != SVM_OP_NULL &&
2476 prange->work_item.op != SVM_OP_UNMAP_RANGE)
2477 prange->work_item.op = op;
2478 } else {
2479 /* Pairs with mmput in deferred_list_work.
2480 * If process is exiting and mm is gone, don't update mmu notifier.
2481 */
2482 if (mmget_not_zero(mm)) {
2483 prange->work_item.mm = mm;
2484 prange->work_item.op = op;
2485 list_add_tail(&prange->deferred_list,
2486 &prange->svms->deferred_range_list);
2487 pr_debug("add prange 0x%p [0x%lx 0x%lx] to work list op %d\n",
2488 prange, prange->start, prange->last, op);
2489 }
2490 }
2491 spin_unlock(&svms->deferred_list_lock);
2492 }
2493
schedule_deferred_list_work(struct svm_range_list * svms)2494 void schedule_deferred_list_work(struct svm_range_list *svms)
2495 {
2496 spin_lock(&svms->deferred_list_lock);
2497 if (!list_empty(&svms->deferred_range_list))
2498 schedule_work(&svms->deferred_list_work);
2499 spin_unlock(&svms->deferred_list_lock);
2500 }
2501
2502 static void
svm_range_unmap_split(struct svm_range * parent,struct svm_range * prange,unsigned long start,unsigned long last)2503 svm_range_unmap_split(struct svm_range *parent, struct svm_range *prange, unsigned long start,
2504 unsigned long last)
2505 {
2506 struct svm_range *head;
2507 struct svm_range *tail;
2508
2509 if (prange->work_item.op == SVM_OP_UNMAP_RANGE) {
2510 pr_debug("prange 0x%p [0x%lx 0x%lx] is already freed\n", prange,
2511 prange->start, prange->last);
2512 return;
2513 }
2514 if (start > prange->last || last < prange->start)
2515 return;
2516
2517 head = tail = prange;
2518 if (start > prange->start)
2519 svm_range_split(prange, prange->start, start - 1, &tail);
2520 if (last < tail->last)
2521 svm_range_split(tail, last + 1, tail->last, &head);
2522
2523 if (head != prange && tail != prange) {
2524 svm_range_add_child(parent, head, SVM_OP_UNMAP_RANGE);
2525 svm_range_add_child(parent, tail, SVM_OP_ADD_RANGE);
2526 } else if (tail != prange) {
2527 svm_range_add_child(parent, tail, SVM_OP_UNMAP_RANGE);
2528 } else if (head != prange) {
2529 svm_range_add_child(parent, head, SVM_OP_UNMAP_RANGE);
2530 } else if (parent != prange) {
2531 prange->work_item.op = SVM_OP_UNMAP_RANGE;
2532 }
2533 }
2534
2535 static void
svm_range_unmap_from_cpu(struct mm_struct * mm,struct svm_range * prange,unsigned long start,unsigned long last)2536 svm_range_unmap_from_cpu(struct mm_struct *mm, struct svm_range *prange,
2537 unsigned long start, unsigned long last)
2538 {
2539 uint32_t trigger = KFD_SVM_UNMAP_TRIGGER_UNMAP_FROM_CPU;
2540 struct svm_range_list *svms;
2541 struct svm_range *pchild;
2542 struct kfd_process *p;
2543 unsigned long s, l;
2544 bool unmap_parent;
2545 uint32_t i;
2546
2547 if (atomic_read(&prange->queue_refcount)) {
2548 int r;
2549
2550 pr_warn("Freeing queue vital buffer 0x%lx, queue evicted\n",
2551 prange->start << PAGE_SHIFT);
2552 r = kgd2kfd_quiesce_mm(mm, KFD_QUEUE_EVICTION_TRIGGER_SVM);
2553 if (r)
2554 pr_debug("failed %d to quiesce KFD queues\n", r);
2555 }
2556
2557 p = kfd_lookup_process_by_mm(mm);
2558 if (!p)
2559 return;
2560 svms = &p->svms;
2561
2562 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx] [0x%lx 0x%lx]\n", svms,
2563 prange, prange->start, prange->last, start, last);
2564
2565 /* calculate time stamps that are used to decide which page faults need be
2566 * dropped or handled before unmap pages from gpu vm
2567 */
2568 for_each_set_bit(i, svms->bitmap_supported, p->n_pdds) {
2569 struct kfd_process_device *pdd;
2570 struct amdgpu_device *adev;
2571 struct amdgpu_ih_ring *ih;
2572 uint32_t checkpoint_wptr;
2573
2574 pdd = p->pdds[i];
2575 if (!pdd)
2576 continue;
2577
2578 adev = pdd->dev->adev;
2579
2580 /* Check and drain ih1 ring if cam not available */
2581 if (!adev->irq.retry_cam_enabled && adev->irq.ih1.ring_size) {
2582 ih = &adev->irq.ih1;
2583 checkpoint_wptr = amdgpu_ih_get_wptr(adev, ih);
2584 if (ih->rptr != checkpoint_wptr) {
2585 svms->checkpoint_ts[i] =
2586 amdgpu_ih_decode_iv_ts(adev, ih, checkpoint_wptr, -1);
2587 continue;
2588 }
2589 }
2590
2591 /* check if dev->irq.ih_soft is not empty */
2592 ih = &adev->irq.ih_soft;
2593 checkpoint_wptr = amdgpu_ih_get_wptr(adev, ih);
2594 if (ih->rptr != checkpoint_wptr)
2595 svms->checkpoint_ts[i] = amdgpu_ih_decode_iv_ts(adev, ih, checkpoint_wptr, -1);
2596 }
2597
2598 unmap_parent = start <= prange->start && last >= prange->last;
2599
2600 list_for_each_entry(pchild, &prange->child_list, child_list) {
2601 mutex_lock_nested(&pchild->lock, 1);
2602 s = max(start, pchild->start);
2603 l = min(last, pchild->last);
2604 if (l >= s)
2605 svm_range_unmap_from_gpus(pchild, s, l, trigger);
2606 svm_range_unmap_split(prange, pchild, start, last);
2607 mutex_unlock(&pchild->lock);
2608 }
2609 s = max(start, prange->start);
2610 l = min(last, prange->last);
2611 if (l >= s)
2612 svm_range_unmap_from_gpus(prange, s, l, trigger);
2613 svm_range_unmap_split(prange, prange, start, last);
2614
2615 if (unmap_parent)
2616 svm_range_add_list_work(svms, prange, mm, SVM_OP_UNMAP_RANGE);
2617 else
2618 svm_range_add_list_work(svms, prange, mm,
2619 SVM_OP_UPDATE_RANGE_NOTIFIER);
2620 schedule_deferred_list_work(svms);
2621
2622 kfd_unref_process(p);
2623 }
2624
2625 /**
2626 * svm_range_cpu_invalidate_pagetables - interval notifier callback
2627 * @mni: mmu_interval_notifier struct
2628 * @range: mmu_notifier_range struct
2629 * @cur_seq: value to pass to mmu_interval_set_seq()
2630 *
2631 * If event is MMU_NOTIFY_UNMAP, this is from CPU unmap range, otherwise, it
2632 * is from migration, or CPU page invalidation callback.
2633 *
2634 * For unmap event, unmap range from GPUs, remove prange from svms in a delayed
2635 * work thread, and split prange if only part of prange is unmapped.
2636 *
2637 * For invalidation event, if GPU retry fault is not enabled, evict the queues,
2638 * then schedule svm_range_restore_work to update GPU mapping and resume queues.
2639 * If GPU retry fault is enabled, unmap the svm range from GPU, retry fault will
2640 * update GPU mapping to recover.
2641 *
2642 * Context: mmap lock, notifier_invalidate_start lock are held
2643 * for invalidate event, prange lock is held if this is from migration
2644 */
2645 static bool
svm_range_cpu_invalidate_pagetables(struct mmu_interval_notifier * mni,const struct mmu_notifier_range * range,unsigned long cur_seq)2646 svm_range_cpu_invalidate_pagetables(struct mmu_interval_notifier *mni,
2647 const struct mmu_notifier_range *range,
2648 unsigned long cur_seq)
2649 {
2650 struct svm_range *prange;
2651 unsigned long start;
2652 unsigned long last;
2653
2654 if (range->event == MMU_NOTIFY_RELEASE)
2655 return true;
2656
2657 start = mni->interval_tree.start;
2658 last = mni->interval_tree.last;
2659 start = max(start, range->start) >> PAGE_SHIFT;
2660 last = min(last, range->end - 1) >> PAGE_SHIFT;
2661 pr_debug("[0x%lx 0x%lx] range[0x%lx 0x%lx] notifier[0x%lx 0x%lx] %d\n",
2662 start, last, range->start >> PAGE_SHIFT,
2663 (range->end - 1) >> PAGE_SHIFT,
2664 mni->interval_tree.start >> PAGE_SHIFT,
2665 mni->interval_tree.last >> PAGE_SHIFT, range->event);
2666
2667 prange = container_of(mni, struct svm_range, notifier);
2668
2669 svm_range_lock(prange);
2670 mmu_interval_set_seq(mni, cur_seq);
2671
2672 switch (range->event) {
2673 case MMU_NOTIFY_UNMAP:
2674 svm_range_unmap_from_cpu(mni->mm, prange, start, last);
2675 break;
2676 default:
2677 svm_range_evict(prange, mni->mm, start, last, range->event);
2678 break;
2679 }
2680
2681 svm_range_unlock(prange);
2682
2683 return true;
2684 }
2685
2686 /**
2687 * svm_range_from_addr - find svm range from fault address
2688 * @svms: svm range list header
2689 * @addr: address to search range interval tree, in pages
2690 * @parent: parent range if range is on child list
2691 *
2692 * Context: The caller must hold svms->lock
2693 *
2694 * Return: the svm_range found or NULL
2695 */
2696 struct svm_range *
svm_range_from_addr(struct svm_range_list * svms,unsigned long addr,struct svm_range ** parent)2697 svm_range_from_addr(struct svm_range_list *svms, unsigned long addr,
2698 struct svm_range **parent)
2699 {
2700 struct interval_tree_node *node;
2701 struct svm_range *prange;
2702 struct svm_range *pchild;
2703
2704 node = interval_tree_iter_first(&svms->objects, addr, addr);
2705 if (!node)
2706 return NULL;
2707
2708 prange = container_of(node, struct svm_range, it_node);
2709 pr_debug("address 0x%lx prange [0x%lx 0x%lx] node [0x%lx 0x%lx]\n",
2710 addr, prange->start, prange->last, node->start, node->last);
2711
2712 if (addr >= prange->start && addr <= prange->last) {
2713 if (parent)
2714 *parent = prange;
2715 return prange;
2716 }
2717 list_for_each_entry(pchild, &prange->child_list, child_list)
2718 if (addr >= pchild->start && addr <= pchild->last) {
2719 pr_debug("found address 0x%lx pchild [0x%lx 0x%lx]\n",
2720 addr, pchild->start, pchild->last);
2721 if (parent)
2722 *parent = prange;
2723 return pchild;
2724 }
2725
2726 return NULL;
2727 }
2728
2729 /* svm_range_best_restore_location - decide the best fault restore location
2730 * @prange: svm range structure
2731 * @adev: the GPU on which vm fault happened
2732 *
2733 * This is only called when xnack is on, to decide the best location to restore
2734 * the range mapping after GPU vm fault. Caller uses the best location to do
2735 * migration if actual loc is not best location, then update GPU page table
2736 * mapping to the best location.
2737 *
2738 * If the preferred loc is accessible by faulting GPU, use preferred loc.
2739 * If vm fault gpu idx is on range ACCESSIBLE bitmap, best_loc is vm fault gpu
2740 * If vm fault gpu idx is on range ACCESSIBLE_IN_PLACE bitmap, then
2741 * if range actual loc is cpu, best_loc is cpu
2742 * if vm fault gpu is on xgmi same hive of range actual loc gpu, best_loc is
2743 * range actual loc.
2744 * Otherwise, GPU no access, best_loc is -1.
2745 *
2746 * Return:
2747 * -1 means vm fault GPU no access
2748 * 0 for CPU or GPU id
2749 */
2750 static int32_t
svm_range_best_restore_location(struct svm_range * prange,struct kfd_node * node,int32_t * gpuidx)2751 svm_range_best_restore_location(struct svm_range *prange,
2752 struct kfd_node *node,
2753 int32_t *gpuidx)
2754 {
2755 struct kfd_node *bo_node, *preferred_node;
2756 struct kfd_process *p;
2757 uint32_t gpuid;
2758 int r;
2759
2760 p = container_of(prange->svms, struct kfd_process, svms);
2761
2762 r = kfd_process_gpuid_from_node(p, node, &gpuid, gpuidx);
2763 if (r < 0) {
2764 pr_debug("failed to get gpuid from kgd\n");
2765 return -1;
2766 }
2767
2768 if (node->adev->apu_prefer_gtt)
2769 return 0;
2770
2771 if (prange->preferred_loc == gpuid ||
2772 prange->preferred_loc == KFD_IOCTL_SVM_LOCATION_SYSMEM) {
2773 return prange->preferred_loc;
2774 } else if (prange->preferred_loc != KFD_IOCTL_SVM_LOCATION_UNDEFINED) {
2775 preferred_node = svm_range_get_node_by_id(prange, prange->preferred_loc);
2776 if (preferred_node && svm_nodes_in_same_hive(node, preferred_node))
2777 return prange->preferred_loc;
2778 /* fall through */
2779 }
2780
2781 if (test_bit(*gpuidx, prange->bitmap_access))
2782 return gpuid;
2783
2784 if (test_bit(*gpuidx, prange->bitmap_aip)) {
2785 if (!prange->actual_loc)
2786 return 0;
2787
2788 bo_node = svm_range_get_node_by_id(prange, prange->actual_loc);
2789 if (bo_node && svm_nodes_in_same_hive(node, bo_node))
2790 return prange->actual_loc;
2791 else
2792 return 0;
2793 }
2794
2795 return -1;
2796 }
2797
2798 static int
svm_range_get_range_boundaries(struct kfd_process * p,int64_t addr,unsigned long * start,unsigned long * last,bool * is_heap_stack)2799 svm_range_get_range_boundaries(struct kfd_process *p, int64_t addr,
2800 unsigned long *start, unsigned long *last,
2801 bool *is_heap_stack)
2802 {
2803 struct vm_area_struct *vma;
2804 struct interval_tree_node *node;
2805 struct rb_node *rb_node;
2806 unsigned long start_limit, end_limit;
2807
2808 vma = vma_lookup(p->mm, addr << PAGE_SHIFT);
2809 if (!vma) {
2810 pr_debug("VMA does not exist in address [0x%llx]\n", addr);
2811 return -EFAULT;
2812 }
2813
2814 *is_heap_stack = vma_is_initial_heap(vma) || vma_is_initial_stack(vma);
2815
2816 start_limit = max(vma->vm_start >> PAGE_SHIFT,
2817 (unsigned long)ALIGN_DOWN(addr, 1UL << p->svms.default_granularity));
2818 end_limit = min(vma->vm_end >> PAGE_SHIFT,
2819 (unsigned long)ALIGN(addr + 1, 1UL << p->svms.default_granularity));
2820
2821 /* First range that starts after the fault address */
2822 node = interval_tree_iter_first(&p->svms.objects, addr + 1, ULONG_MAX);
2823 if (node) {
2824 end_limit = min(end_limit, node->start);
2825 /* Last range that ends before the fault address */
2826 rb_node = rb_prev(&node->rb);
2827 } else {
2828 /* Last range must end before addr because
2829 * there was no range after addr
2830 */
2831 rb_node = rb_last(&p->svms.objects.rb_root);
2832 }
2833 if (rb_node) {
2834 node = container_of(rb_node, struct interval_tree_node, rb);
2835 if (node->last >= addr) {
2836 WARN(1, "Overlap with prev node and page fault addr\n");
2837 return -EFAULT;
2838 }
2839 start_limit = max(start_limit, node->last + 1);
2840 }
2841
2842 *start = start_limit;
2843 *last = end_limit - 1;
2844
2845 pr_debug("vma [0x%lx 0x%lx] range [0x%lx 0x%lx] is_heap_stack %d\n",
2846 vma->vm_start >> PAGE_SHIFT, vma->vm_end >> PAGE_SHIFT,
2847 *start, *last, *is_heap_stack);
2848
2849 return 0;
2850 }
2851
2852 static int
svm_range_check_vm_userptr(struct kfd_process * p,uint64_t start,uint64_t last,uint64_t * bo_s,uint64_t * bo_l)2853 svm_range_check_vm_userptr(struct kfd_process *p, uint64_t start, uint64_t last,
2854 uint64_t *bo_s, uint64_t *bo_l)
2855 {
2856 struct amdgpu_bo_va_mapping *mapping;
2857 struct interval_tree_node *node;
2858 struct amdgpu_bo *bo = NULL;
2859 unsigned long userptr;
2860 uint32_t i;
2861 int r;
2862
2863 for (i = 0; i < p->n_pdds; i++) {
2864 struct amdgpu_vm *vm;
2865
2866 if (!p->pdds[i]->drm_priv)
2867 continue;
2868
2869 vm = drm_priv_to_vm(p->pdds[i]->drm_priv);
2870 r = amdgpu_bo_reserve(vm->root.bo, false);
2871 if (r)
2872 return r;
2873
2874 /* Check userptr by searching entire vm->va interval tree */
2875 node = interval_tree_iter_first(&vm->va, 0, ~0ULL);
2876 while (node) {
2877 mapping = container_of((struct rb_node *)node,
2878 struct amdgpu_bo_va_mapping, rb);
2879 bo = mapping->bo_va->base.bo;
2880
2881 if (!amdgpu_ttm_tt_affect_userptr(bo->tbo.ttm,
2882 start << PAGE_SHIFT,
2883 last << PAGE_SHIFT,
2884 &userptr)) {
2885 node = interval_tree_iter_next(node, 0, ~0ULL);
2886 continue;
2887 }
2888
2889 pr_debug("[0x%llx 0x%llx] already userptr mapped\n",
2890 start, last);
2891 if (bo_s && bo_l) {
2892 *bo_s = userptr >> PAGE_SHIFT;
2893 *bo_l = *bo_s + bo->tbo.ttm->num_pages - 1;
2894 }
2895 amdgpu_bo_unreserve(vm->root.bo);
2896 return -EADDRINUSE;
2897 }
2898 amdgpu_bo_unreserve(vm->root.bo);
2899 }
2900 return 0;
2901 }
2902
2903 static struct
svm_range_create_unregistered_range(struct kfd_node * node,struct kfd_process * p,struct mm_struct * mm,int64_t addr)2904 svm_range *svm_range_create_unregistered_range(struct kfd_node *node,
2905 struct kfd_process *p,
2906 struct mm_struct *mm,
2907 int64_t addr)
2908 {
2909 struct svm_range *prange = NULL;
2910 unsigned long start, last;
2911 uint32_t gpuid, gpuidx;
2912 bool is_heap_stack;
2913 uint64_t bo_s = 0;
2914 uint64_t bo_l = 0;
2915 int r;
2916
2917 if (svm_range_get_range_boundaries(p, addr, &start, &last,
2918 &is_heap_stack))
2919 return NULL;
2920
2921 r = svm_range_check_vm(p, start, last, &bo_s, &bo_l);
2922 if (r != -EADDRINUSE)
2923 r = svm_range_check_vm_userptr(p, start, last, &bo_s, &bo_l);
2924
2925 if (r == -EADDRINUSE) {
2926 if (addr >= bo_s && addr <= bo_l)
2927 return NULL;
2928
2929 /* Create one page svm range if 2MB range overlapping */
2930 start = addr;
2931 last = addr;
2932 }
2933
2934 prange = svm_range_new(&p->svms, start, last, true);
2935 if (!prange) {
2936 pr_debug("Failed to create prange in address [0x%llx]\n", addr);
2937 return NULL;
2938 }
2939 if (kfd_process_gpuid_from_node(p, node, &gpuid, &gpuidx)) {
2940 pr_debug("failed to get gpuid from kgd\n");
2941 svm_range_free(prange, true);
2942 return NULL;
2943 }
2944
2945 if (is_heap_stack)
2946 prange->preferred_loc = KFD_IOCTL_SVM_LOCATION_SYSMEM;
2947
2948 svm_range_add_to_svms(prange);
2949 svm_range_add_notifier_locked(mm, prange);
2950
2951 return prange;
2952 }
2953
2954 /* svm_range_skip_recover - decide if prange can be recovered
2955 * @prange: svm range structure
2956 *
2957 * GPU vm retry fault handle skip recover the range for cases:
2958 * 1. prange is on deferred list to be removed after unmap, it is stale fault,
2959 * deferred list work will drain the stale fault before free the prange.
2960 * 2. prange is on deferred list to add interval notifier after split, or
2961 * 3. prange is child range, it is split from parent prange, recover later
2962 * after interval notifier is added.
2963 *
2964 * Return: true to skip recover, false to recover
2965 */
svm_range_skip_recover(struct svm_range * prange)2966 static bool svm_range_skip_recover(struct svm_range *prange)
2967 {
2968 struct svm_range_list *svms = prange->svms;
2969
2970 spin_lock(&svms->deferred_list_lock);
2971 if (list_empty(&prange->deferred_list) &&
2972 list_empty(&prange->child_list)) {
2973 spin_unlock(&svms->deferred_list_lock);
2974 return false;
2975 }
2976 spin_unlock(&svms->deferred_list_lock);
2977
2978 if (prange->work_item.op == SVM_OP_UNMAP_RANGE) {
2979 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx] unmapped\n",
2980 svms, prange, prange->start, prange->last);
2981 return true;
2982 }
2983 if (prange->work_item.op == SVM_OP_ADD_RANGE_AND_MAP ||
2984 prange->work_item.op == SVM_OP_ADD_RANGE) {
2985 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx] not added yet\n",
2986 svms, prange, prange->start, prange->last);
2987 return true;
2988 }
2989 return false;
2990 }
2991
2992 static void
svm_range_count_fault(struct kfd_node * node,struct kfd_process * p,int32_t gpuidx)2993 svm_range_count_fault(struct kfd_node *node, struct kfd_process *p,
2994 int32_t gpuidx)
2995 {
2996 struct kfd_process_device *pdd;
2997
2998 /* fault is on different page of same range
2999 * or fault is skipped to recover later
3000 * or fault is on invalid virtual address
3001 */
3002 if (gpuidx == MAX_GPU_INSTANCE) {
3003 uint32_t gpuid;
3004 int r;
3005
3006 r = kfd_process_gpuid_from_node(p, node, &gpuid, &gpuidx);
3007 if (r < 0)
3008 return;
3009 }
3010
3011 /* fault is recovered
3012 * or fault cannot recover because GPU no access on the range
3013 */
3014 pdd = kfd_process_device_from_gpuidx(p, gpuidx);
3015 if (pdd)
3016 WRITE_ONCE(pdd->faults, pdd->faults + 1);
3017 }
3018
3019 static bool
svm_fault_allowed(struct vm_area_struct * vma,bool write_fault)3020 svm_fault_allowed(struct vm_area_struct *vma, bool write_fault)
3021 {
3022 unsigned long requested = VM_READ;
3023
3024 if (write_fault)
3025 requested |= VM_WRITE;
3026
3027 pr_debug("requested 0x%lx, vma permission flags 0x%lx\n", requested,
3028 vma->vm_flags);
3029 return (vma->vm_flags & requested) == requested;
3030 }
3031
3032 int
svm_range_restore_pages(struct amdgpu_device * adev,unsigned int pasid,uint32_t vmid,uint32_t node_id,uint64_t addr,uint64_t ts,bool write_fault)3033 svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid,
3034 uint32_t vmid, uint32_t node_id,
3035 uint64_t addr, uint64_t ts, bool write_fault)
3036 {
3037 unsigned long start, last, size;
3038 struct mm_struct *mm = NULL;
3039 struct svm_range_list *svms;
3040 struct svm_range *prange;
3041 struct kfd_process *p;
3042 ktime_t timestamp = ktime_get_boottime();
3043 struct kfd_node *node;
3044 int32_t best_loc;
3045 int32_t gpuid, gpuidx = MAX_GPU_INSTANCE;
3046 bool write_locked = false;
3047 struct vm_area_struct *vma;
3048 bool migration = false;
3049 int r = 0;
3050
3051 if (!KFD_IS_SVM_API_SUPPORTED(adev)) {
3052 pr_debug("device does not support SVM\n");
3053 return -EFAULT;
3054 }
3055
3056 p = kfd_lookup_process_by_pasid(pasid, NULL);
3057 if (!p) {
3058 pr_debug("kfd process not founded pasid 0x%x\n", pasid);
3059 return 0;
3060 }
3061 svms = &p->svms;
3062
3063 pr_debug("restoring svms 0x%p fault address 0x%llx\n", svms, addr);
3064
3065 if (atomic_read(&svms->drain_pagefaults)) {
3066 pr_debug("page fault handling disabled, drop fault 0x%llx\n", addr);
3067 r = 0;
3068 goto out;
3069 }
3070
3071 node = kfd_node_by_irq_ids(adev, node_id, vmid);
3072 if (!node) {
3073 pr_debug("kfd node does not exist node_id: %d, vmid: %d\n", node_id,
3074 vmid);
3075 r = -EFAULT;
3076 goto out;
3077 }
3078
3079 if (kfd_process_gpuid_from_node(p, node, &gpuid, &gpuidx)) {
3080 pr_debug("failed to get gpuid/gpuidex for node_id: %d\n", node_id);
3081 r = -EFAULT;
3082 goto out;
3083 }
3084
3085 if (!p->xnack_enabled) {
3086 pr_debug("XNACK not enabled for pasid 0x%x\n", pasid);
3087 r = -EFAULT;
3088 goto out;
3089 }
3090
3091 /* p->lead_thread is available as kfd_process_wq_release flush the work
3092 * before releasing task ref.
3093 */
3094 mm = get_task_mm(p->lead_thread);
3095 if (!mm) {
3096 pr_debug("svms 0x%p failed to get mm\n", svms);
3097 r = 0;
3098 goto out;
3099 }
3100
3101 mmap_read_lock(mm);
3102 retry_write_locked:
3103 mutex_lock(&svms->lock);
3104
3105 /* check if this page fault time stamp is before svms->checkpoint_ts */
3106 if (svms->checkpoint_ts[gpuidx] != 0) {
3107 if (amdgpu_ih_ts_after_or_equal(ts, svms->checkpoint_ts[gpuidx])) {
3108 pr_debug("draining retry fault, drop fault 0x%llx\n", addr);
3109 if (write_locked)
3110 mmap_write_downgrade(mm);
3111 r = -EAGAIN;
3112 goto out_unlock_svms;
3113 } else {
3114 /* ts is after svms->checkpoint_ts now, reset svms->checkpoint_ts
3115 * to zero to avoid following ts wrap around give wrong comparing
3116 */
3117 svms->checkpoint_ts[gpuidx] = 0;
3118 }
3119 }
3120
3121 prange = svm_range_from_addr(svms, addr, NULL);
3122 if (!prange) {
3123 pr_debug("failed to find prange svms 0x%p address [0x%llx]\n",
3124 svms, addr);
3125 if (!write_locked) {
3126 /* Need the write lock to create new range with MMU notifier.
3127 * Also flush pending deferred work to make sure the interval
3128 * tree is up to date before we add a new range
3129 */
3130 mutex_unlock(&svms->lock);
3131 mmap_read_unlock(mm);
3132 mmap_write_lock(mm);
3133 write_locked = true;
3134 goto retry_write_locked;
3135 }
3136 prange = svm_range_create_unregistered_range(node, p, mm, addr);
3137 if (!prange) {
3138 pr_debug("failed to create unregistered range svms 0x%p address [0x%llx]\n",
3139 svms, addr);
3140 mmap_write_downgrade(mm);
3141 r = -EFAULT;
3142 goto out_unlock_svms;
3143 }
3144 }
3145 if (write_locked)
3146 mmap_write_downgrade(mm);
3147
3148 mutex_lock(&prange->migrate_mutex);
3149
3150 if (svm_range_skip_recover(prange)) {
3151 amdgpu_gmc_filter_faults_remove(node->adev, addr, pasid);
3152 r = 0;
3153 goto out_unlock_range;
3154 }
3155
3156 /* skip duplicate vm fault on different pages of same range */
3157 if (ktime_before(timestamp, ktime_add_ns(prange->validate_timestamp,
3158 AMDGPU_SVM_RANGE_RETRY_FAULT_PENDING))) {
3159 pr_debug("svms 0x%p [0x%lx %lx] already restored\n",
3160 svms, prange->start, prange->last);
3161 r = 0;
3162 goto out_unlock_range;
3163 }
3164
3165 /* __do_munmap removed VMA, return success as we are handling stale
3166 * retry fault.
3167 */
3168 vma = vma_lookup(mm, addr << PAGE_SHIFT);
3169 if (!vma) {
3170 pr_debug("address 0x%llx VMA is removed\n", addr);
3171 r = 0;
3172 goto out_unlock_range;
3173 }
3174
3175 if (!svm_fault_allowed(vma, write_fault)) {
3176 pr_debug("fault addr 0x%llx no %s permission\n", addr,
3177 write_fault ? "write" : "read");
3178 r = -EPERM;
3179 goto out_unlock_range;
3180 }
3181
3182 best_loc = svm_range_best_restore_location(prange, node, &gpuidx);
3183 if (best_loc == -1) {
3184 pr_debug("svms %p failed get best restore loc [0x%lx 0x%lx]\n",
3185 svms, prange->start, prange->last);
3186 r = -EACCES;
3187 goto out_unlock_range;
3188 }
3189
3190 pr_debug("svms %p [0x%lx 0x%lx] best restore 0x%x, actual loc 0x%x\n",
3191 svms, prange->start, prange->last, best_loc,
3192 prange->actual_loc);
3193
3194 kfd_smi_event_page_fault_start(node, p->lead_thread->pid, addr,
3195 write_fault, timestamp);
3196
3197 /* Align migration range start and size to granularity size */
3198 size = 1UL << prange->granularity;
3199 start = max_t(unsigned long, ALIGN_DOWN(addr, size), prange->start);
3200 last = min_t(unsigned long, ALIGN(addr + 1, size) - 1, prange->last);
3201 if (prange->actual_loc != 0 || best_loc != 0) {
3202 if (best_loc) {
3203 r = svm_migrate_to_vram(prange, best_loc, start, last,
3204 mm, KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU);
3205 if (r) {
3206 pr_debug("svm_migrate_to_vram failed (%d) at %llx, falling back to system memory\n",
3207 r, addr);
3208 /* Fallback to system memory if migration to
3209 * VRAM failed
3210 */
3211 if (prange->actual_loc && prange->actual_loc != best_loc)
3212 r = svm_migrate_vram_to_ram(prange, mm, start, last,
3213 KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU, NULL);
3214 else
3215 r = 0;
3216 }
3217 } else {
3218 r = svm_migrate_vram_to_ram(prange, mm, start, last,
3219 KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU, NULL);
3220 }
3221 if (r) {
3222 pr_debug("failed %d to migrate svms %p [0x%lx 0x%lx]\n",
3223 r, svms, start, last);
3224 goto out_migrate_fail;
3225 } else {
3226 migration = true;
3227 }
3228 }
3229
3230 r = svm_range_validate_and_map(mm, start, last, prange, gpuidx, false,
3231 false, false);
3232 if (r)
3233 pr_debug("failed %d to map svms 0x%p [0x%lx 0x%lx] to gpus\n",
3234 r, svms, start, last);
3235
3236 out_migrate_fail:
3237 kfd_smi_event_page_fault_end(node, p->lead_thread->pid, addr,
3238 migration);
3239
3240 out_unlock_range:
3241 mutex_unlock(&prange->migrate_mutex);
3242 out_unlock_svms:
3243 mutex_unlock(&svms->lock);
3244 mmap_read_unlock(mm);
3245
3246 if (r != -EAGAIN)
3247 svm_range_count_fault(node, p, gpuidx);
3248
3249 mmput(mm);
3250 out:
3251 kfd_unref_process(p);
3252
3253 if (r == -EAGAIN) {
3254 pr_debug("recover vm fault later\n");
3255 amdgpu_gmc_filter_faults_remove(node->adev, addr, pasid);
3256 r = 0;
3257 }
3258 return r;
3259 }
3260
3261 int
svm_range_switch_xnack_reserve_mem(struct kfd_process * p,bool xnack_enabled)3262 svm_range_switch_xnack_reserve_mem(struct kfd_process *p, bool xnack_enabled)
3263 {
3264 struct svm_range *prange, *pchild;
3265 uint64_t reserved_size = 0;
3266 uint64_t size;
3267 int r = 0;
3268
3269 pr_debug("switching xnack from %d to %d\n", p->xnack_enabled, xnack_enabled);
3270
3271 mutex_lock(&p->svms.lock);
3272
3273 list_for_each_entry(prange, &p->svms.list, list) {
3274 svm_range_lock(prange);
3275 list_for_each_entry(pchild, &prange->child_list, child_list) {
3276 size = (pchild->last - pchild->start + 1) << PAGE_SHIFT;
3277 if (xnack_enabled) {
3278 amdgpu_amdkfd_unreserve_mem_limit(NULL, size,
3279 KFD_IOC_ALLOC_MEM_FLAGS_USERPTR, 0);
3280 } else {
3281 r = amdgpu_amdkfd_reserve_mem_limit(NULL, size,
3282 KFD_IOC_ALLOC_MEM_FLAGS_USERPTR, 0);
3283 if (r)
3284 goto out_unlock;
3285 reserved_size += size;
3286 }
3287 }
3288
3289 size = (prange->last - prange->start + 1) << PAGE_SHIFT;
3290 if (xnack_enabled) {
3291 amdgpu_amdkfd_unreserve_mem_limit(NULL, size,
3292 KFD_IOC_ALLOC_MEM_FLAGS_USERPTR, 0);
3293 } else {
3294 r = amdgpu_amdkfd_reserve_mem_limit(NULL, size,
3295 KFD_IOC_ALLOC_MEM_FLAGS_USERPTR, 0);
3296 if (r)
3297 goto out_unlock;
3298 reserved_size += size;
3299 }
3300 out_unlock:
3301 svm_range_unlock(prange);
3302 if (r)
3303 break;
3304 }
3305
3306 if (r)
3307 amdgpu_amdkfd_unreserve_mem_limit(NULL, reserved_size,
3308 KFD_IOC_ALLOC_MEM_FLAGS_USERPTR, 0);
3309 else
3310 /* Change xnack mode must be inside svms lock, to avoid race with
3311 * svm_range_deferred_list_work unreserve memory in parallel.
3312 */
3313 p->xnack_enabled = xnack_enabled;
3314
3315 mutex_unlock(&p->svms.lock);
3316 return r;
3317 }
3318
svm_range_list_fini(struct kfd_process * p)3319 void svm_range_list_fini(struct kfd_process *p)
3320 {
3321 struct svm_range *prange;
3322 struct svm_range *next;
3323
3324 pr_debug("process pid %d svms 0x%p\n", p->lead_thread->pid,
3325 &p->svms);
3326
3327 cancel_delayed_work_sync(&p->svms.restore_work);
3328
3329 /* Ensure list work is finished before process is destroyed */
3330 flush_work(&p->svms.deferred_list_work);
3331
3332 /*
3333 * Ensure no retry fault comes in afterwards, as page fault handler will
3334 * not find kfd process and take mm lock to recover fault.
3335 * stop kfd page fault handing, then wait pending page faults got drained
3336 */
3337 atomic_set(&p->svms.drain_pagefaults, 1);
3338 svm_range_drain_retry_fault(&p->svms);
3339
3340 list_for_each_entry_safe(prange, next, &p->svms.list, list) {
3341 svm_range_unlink(prange);
3342 svm_range_remove_notifier(prange);
3343 svm_range_free(prange, true);
3344 }
3345
3346 mutex_destroy(&p->svms.lock);
3347
3348 pr_debug("process pid %d svms 0x%p done\n",
3349 p->lead_thread->pid, &p->svms);
3350 }
3351
svm_range_list_init(struct kfd_process * p)3352 int svm_range_list_init(struct kfd_process *p)
3353 {
3354 struct svm_range_list *svms = &p->svms;
3355 int i;
3356
3357 svms->objects = RB_ROOT_CACHED;
3358 mutex_init(&svms->lock);
3359 INIT_LIST_HEAD(&svms->list);
3360 atomic_set(&svms->evicted_ranges, 0);
3361 atomic_set(&svms->drain_pagefaults, 0);
3362 INIT_DELAYED_WORK(&svms->restore_work, svm_range_restore_work);
3363 INIT_WORK(&svms->deferred_list_work, svm_range_deferred_list_work);
3364 INIT_LIST_HEAD(&svms->deferred_range_list);
3365 INIT_LIST_HEAD(&svms->criu_svm_metadata_list);
3366 spin_lock_init(&svms->deferred_list_lock);
3367
3368 for (i = 0; i < p->n_pdds; i++)
3369 if (KFD_IS_SVM_API_SUPPORTED(p->pdds[i]->dev->adev))
3370 bitmap_set(svms->bitmap_supported, i, 1);
3371
3372 /* Value of default granularity cannot exceed 0x1B, the
3373 * number of pages supported by a 4-level paging table
3374 */
3375 svms->default_granularity = min_t(u8, amdgpu_svm_default_granularity, 0x1B);
3376 pr_debug("Default SVM Granularity to use: %d\n", svms->default_granularity);
3377
3378 return 0;
3379 }
3380
3381 /**
3382 * svm_range_check_vm - check if virtual address range mapped already
3383 * @p: current kfd_process
3384 * @start: range start address, in pages
3385 * @last: range last address, in pages
3386 * @bo_s: mapping start address in pages if address range already mapped
3387 * @bo_l: mapping last address in pages if address range already mapped
3388 *
3389 * The purpose is to avoid virtual address ranges already allocated by
3390 * kfd_ioctl_alloc_memory_of_gpu ioctl.
3391 * It looks for each pdd in the kfd_process.
3392 *
3393 * Context: Process context
3394 *
3395 * Return 0 - OK, if the range is not mapped.
3396 * Otherwise error code:
3397 * -EADDRINUSE - if address is mapped already by kfd_ioctl_alloc_memory_of_gpu
3398 * -ERESTARTSYS - A wait for the buffer to become unreserved was interrupted by
3399 * a signal. Release all buffer reservations and return to user-space.
3400 */
3401 static int
svm_range_check_vm(struct kfd_process * p,uint64_t start,uint64_t last,uint64_t * bo_s,uint64_t * bo_l)3402 svm_range_check_vm(struct kfd_process *p, uint64_t start, uint64_t last,
3403 uint64_t *bo_s, uint64_t *bo_l)
3404 {
3405 struct amdgpu_bo_va_mapping *mapping;
3406 struct interval_tree_node *node;
3407 uint32_t i;
3408 int r;
3409
3410 for (i = 0; i < p->n_pdds; i++) {
3411 struct amdgpu_vm *vm;
3412
3413 if (!p->pdds[i]->drm_priv)
3414 continue;
3415
3416 vm = drm_priv_to_vm(p->pdds[i]->drm_priv);
3417 r = amdgpu_bo_reserve(vm->root.bo, false);
3418 if (r)
3419 return r;
3420
3421 node = interval_tree_iter_first(&vm->va, start, last);
3422 if (node) {
3423 pr_debug("range [0x%llx 0x%llx] already TTM mapped\n",
3424 start, last);
3425 mapping = container_of((struct rb_node *)node,
3426 struct amdgpu_bo_va_mapping, rb);
3427 if (bo_s && bo_l) {
3428 *bo_s = mapping->start;
3429 *bo_l = mapping->last;
3430 }
3431 amdgpu_bo_unreserve(vm->root.bo);
3432 return -EADDRINUSE;
3433 }
3434 amdgpu_bo_unreserve(vm->root.bo);
3435 }
3436
3437 return 0;
3438 }
3439
3440 /**
3441 * svm_range_is_valid - check if virtual address range is valid
3442 * @p: current kfd_process
3443 * @start: range start address, in pages
3444 * @size: range size, in pages
3445 *
3446 * Valid virtual address range means it belongs to one or more VMAs
3447 *
3448 * Context: Process context
3449 *
3450 * Return:
3451 * 0 - OK, otherwise error code
3452 */
3453 static int
svm_range_is_valid(struct kfd_process * p,uint64_t start,uint64_t size)3454 svm_range_is_valid(struct kfd_process *p, uint64_t start, uint64_t size)
3455 {
3456 const unsigned long device_vma = VM_IO | VM_PFNMAP | VM_MIXEDMAP;
3457 struct vm_area_struct *vma;
3458 unsigned long end;
3459 unsigned long start_unchg = start;
3460
3461 start <<= PAGE_SHIFT;
3462 end = start + (size << PAGE_SHIFT);
3463 do {
3464 vma = vma_lookup(p->mm, start);
3465 if (!vma || (vma->vm_flags & device_vma))
3466 return -EFAULT;
3467 start = min(end, vma->vm_end);
3468 } while (start < end);
3469
3470 return svm_range_check_vm(p, start_unchg, (end - 1) >> PAGE_SHIFT, NULL,
3471 NULL);
3472 }
3473
3474 /**
3475 * svm_range_best_prefetch_location - decide the best prefetch location
3476 * @prange: svm range structure
3477 *
3478 * For xnack off:
3479 * If range map to single GPU, the best prefetch location is prefetch_loc, which
3480 * can be CPU or GPU.
3481 *
3482 * If range is ACCESS or ACCESS_IN_PLACE by mGPUs, only if mGPU connection on
3483 * XGMI same hive, the best prefetch location is prefetch_loc GPU, othervise
3484 * the best prefetch location is always CPU, because GPU can not have coherent
3485 * mapping VRAM of other GPUs even with large-BAR PCIe connection.
3486 *
3487 * For xnack on:
3488 * If range is not ACCESS_IN_PLACE by mGPUs, the best prefetch location is
3489 * prefetch_loc, other GPU access will generate vm fault and trigger migration.
3490 *
3491 * If range is ACCESS_IN_PLACE by mGPUs, only if mGPU connection on XGMI same
3492 * hive, the best prefetch location is prefetch_loc GPU, otherwise the best
3493 * prefetch location is always CPU.
3494 *
3495 * Context: Process context
3496 *
3497 * Return:
3498 * 0 for CPU or GPU id
3499 */
3500 static uint32_t
svm_range_best_prefetch_location(struct svm_range * prange)3501 svm_range_best_prefetch_location(struct svm_range *prange)
3502 {
3503 DECLARE_BITMAP(bitmap, MAX_GPU_INSTANCE);
3504 uint32_t best_loc = prange->prefetch_loc;
3505 struct kfd_process_device *pdd;
3506 struct kfd_node *bo_node;
3507 struct kfd_process *p;
3508 uint32_t gpuidx;
3509
3510 p = container_of(prange->svms, struct kfd_process, svms);
3511
3512 if (!best_loc || best_loc == KFD_IOCTL_SVM_LOCATION_UNDEFINED)
3513 goto out;
3514
3515 bo_node = svm_range_get_node_by_id(prange, best_loc);
3516 if (!bo_node) {
3517 WARN_ONCE(1, "failed to get valid kfd node at id%x\n", best_loc);
3518 best_loc = 0;
3519 goto out;
3520 }
3521
3522 if (bo_node->adev->apu_prefer_gtt) {
3523 best_loc = 0;
3524 goto out;
3525 }
3526
3527 if (p->xnack_enabled)
3528 bitmap_copy(bitmap, prange->bitmap_aip, MAX_GPU_INSTANCE);
3529 else
3530 bitmap_or(bitmap, prange->bitmap_access, prange->bitmap_aip,
3531 MAX_GPU_INSTANCE);
3532
3533 for_each_set_bit(gpuidx, bitmap, MAX_GPU_INSTANCE) {
3534 pdd = kfd_process_device_from_gpuidx(p, gpuidx);
3535 if (!pdd) {
3536 pr_debug("failed to get device by idx 0x%x\n", gpuidx);
3537 continue;
3538 }
3539
3540 if (pdd->dev->adev == bo_node->adev)
3541 continue;
3542
3543 if (!svm_nodes_in_same_hive(pdd->dev, bo_node)) {
3544 best_loc = 0;
3545 break;
3546 }
3547 }
3548
3549 out:
3550 pr_debug("xnack %d svms 0x%p [0x%lx 0x%lx] best loc 0x%x\n",
3551 p->xnack_enabled, &p->svms, prange->start, prange->last,
3552 best_loc);
3553
3554 return best_loc;
3555 }
3556
3557 /* svm_range_trigger_migration - start page migration if prefetch loc changed
3558 * @mm: current process mm_struct
3559 * @prange: svm range structure
3560 * @migrated: output, true if migration is triggered
3561 *
3562 * If range perfetch_loc is GPU, actual loc is cpu 0, then migrate the range
3563 * from ram to vram.
3564 * If range prefetch_loc is cpu 0, actual loc is GPU, then migrate the range
3565 * from vram to ram.
3566 *
3567 * If GPU vm fault retry is not enabled, migration interact with MMU notifier
3568 * and restore work:
3569 * 1. migrate_vma_setup invalidate pages, MMU notifier callback svm_range_evict
3570 * stops all queues, schedule restore work
3571 * 2. svm_range_restore_work wait for migration is done by
3572 * a. svm_range_validate_vram takes prange->migrate_mutex
3573 * b. svm_range_validate_ram HMM get pages wait for CPU fault handle returns
3574 * 3. restore work update mappings of GPU, resume all queues.
3575 *
3576 * Context: Process context
3577 *
3578 * Return:
3579 * 0 - OK, otherwise - error code of migration
3580 */
3581 static int
svm_range_trigger_migration(struct mm_struct * mm,struct svm_range * prange,bool * migrated)3582 svm_range_trigger_migration(struct mm_struct *mm, struct svm_range *prange,
3583 bool *migrated)
3584 {
3585 uint32_t best_loc;
3586 int r = 0;
3587
3588 *migrated = false;
3589 best_loc = svm_range_best_prefetch_location(prange);
3590
3591 /* when best_loc is a gpu node and same as prange->actual_loc
3592 * we still need do migration as prange->actual_loc !=0 does
3593 * not mean all pages in prange are vram. hmm migrate will pick
3594 * up right pages during migration.
3595 */
3596 if ((best_loc == KFD_IOCTL_SVM_LOCATION_UNDEFINED) ||
3597 (best_loc == 0 && prange->actual_loc == 0))
3598 return 0;
3599
3600 if (!best_loc) {
3601 r = svm_migrate_vram_to_ram(prange, mm, prange->start, prange->last,
3602 KFD_MIGRATE_TRIGGER_PREFETCH, NULL);
3603 *migrated = !r;
3604 return r;
3605 }
3606
3607 r = svm_migrate_to_vram(prange, best_loc, prange->start, prange->last,
3608 mm, KFD_MIGRATE_TRIGGER_PREFETCH);
3609 *migrated = !r;
3610
3611 return 0;
3612 }
3613
svm_range_schedule_evict_svm_bo(struct amdgpu_amdkfd_fence * fence)3614 int svm_range_schedule_evict_svm_bo(struct amdgpu_amdkfd_fence *fence)
3615 {
3616 /* Dereferencing fence->svm_bo is safe here because the fence hasn't
3617 * signaled yet and we're under the protection of the fence->lock.
3618 * After the fence is signaled in svm_range_bo_release, we cannot get
3619 * here any more.
3620 *
3621 * Reference is dropped in svm_range_evict_svm_bo_worker.
3622 */
3623 if (svm_bo_ref_unless_zero(fence->svm_bo)) {
3624 WRITE_ONCE(fence->svm_bo->evicting, 1);
3625 schedule_work(&fence->svm_bo->eviction_work);
3626 }
3627
3628 return 0;
3629 }
3630
svm_range_evict_svm_bo_worker(struct work_struct * work)3631 static void svm_range_evict_svm_bo_worker(struct work_struct *work)
3632 {
3633 struct svm_range_bo *svm_bo;
3634 struct mm_struct *mm;
3635 int r = 0;
3636
3637 svm_bo = container_of(work, struct svm_range_bo, eviction_work);
3638
3639 if (mmget_not_zero(svm_bo->eviction_fence->mm)) {
3640 mm = svm_bo->eviction_fence->mm;
3641 } else {
3642 svm_range_bo_unref(svm_bo);
3643 return;
3644 }
3645
3646 mmap_read_lock(mm);
3647 spin_lock(&svm_bo->list_lock);
3648 while (!list_empty(&svm_bo->range_list) && !r) {
3649 struct svm_range *prange =
3650 list_first_entry(&svm_bo->range_list,
3651 struct svm_range, svm_bo_list);
3652 int retries = 3;
3653
3654 list_del_init(&prange->svm_bo_list);
3655 spin_unlock(&svm_bo->list_lock);
3656
3657 pr_debug("svms 0x%p [0x%lx 0x%lx]\n", prange->svms,
3658 prange->start, prange->last);
3659
3660 mutex_lock(&prange->migrate_mutex);
3661 do {
3662 /* migrate all vram pages in this prange to sys ram
3663 * after that prange->actual_loc should be zero
3664 */
3665 r = svm_migrate_vram_to_ram(prange, mm,
3666 prange->start, prange->last,
3667 KFD_MIGRATE_TRIGGER_TTM_EVICTION, NULL);
3668 } while (!r && prange->actual_loc && --retries);
3669
3670 if (!r && prange->actual_loc)
3671 pr_info_once("Migration failed during eviction");
3672
3673 if (!prange->actual_loc) {
3674 mutex_lock(&prange->lock);
3675 prange->svm_bo = NULL;
3676 mutex_unlock(&prange->lock);
3677 }
3678 mutex_unlock(&prange->migrate_mutex);
3679
3680 spin_lock(&svm_bo->list_lock);
3681 }
3682 spin_unlock(&svm_bo->list_lock);
3683 mmap_read_unlock(mm);
3684 mmput(mm);
3685
3686 dma_fence_signal(&svm_bo->eviction_fence->base);
3687
3688 /* This is the last reference to svm_bo, after svm_range_vram_node_free
3689 * has been called in svm_migrate_vram_to_ram
3690 */
3691 WARN_ONCE(!r && kref_read(&svm_bo->kref) != 1, "This was not the last reference\n");
3692 svm_range_bo_unref(svm_bo);
3693 }
3694
3695 static int
svm_range_set_attr(struct kfd_process * p,struct mm_struct * mm,uint64_t start,uint64_t size,uint32_t nattr,struct kfd_ioctl_svm_attribute * attrs)3696 svm_range_set_attr(struct kfd_process *p, struct mm_struct *mm,
3697 uint64_t start, uint64_t size, uint32_t nattr,
3698 struct kfd_ioctl_svm_attribute *attrs)
3699 {
3700 struct amdkfd_process_info *process_info = p->kgd_process_info;
3701 struct list_head update_list;
3702 struct list_head insert_list;
3703 struct list_head remove_list;
3704 struct list_head remap_list;
3705 struct svm_range_list *svms;
3706 struct svm_range *prange;
3707 struct svm_range *next;
3708 bool update_mapping = false;
3709 bool flush_tlb;
3710 int r, ret = 0;
3711
3712 pr_debug("process pid %d svms 0x%p [0x%llx 0x%llx] pages 0x%llx\n",
3713 p->lead_thread->pid, &p->svms, start, start + size - 1, size);
3714
3715 r = svm_range_check_attr(p, nattr, attrs);
3716 if (r)
3717 return r;
3718
3719 svms = &p->svms;
3720
3721 mutex_lock(&process_info->lock);
3722
3723 svm_range_list_lock_and_flush_work(svms, mm);
3724
3725 r = svm_range_is_valid(p, start, size);
3726 if (r) {
3727 pr_debug("invalid range r=%d\n", r);
3728 mmap_write_unlock(mm);
3729 goto out;
3730 }
3731
3732 mutex_lock(&svms->lock);
3733
3734 /* Add new range and split existing ranges as needed */
3735 r = svm_range_add(p, start, size, nattr, attrs, &update_list,
3736 &insert_list, &remove_list, &remap_list);
3737 if (r) {
3738 mutex_unlock(&svms->lock);
3739 mmap_write_unlock(mm);
3740 goto out;
3741 }
3742 /* Apply changes as a transaction */
3743 list_for_each_entry_safe(prange, next, &insert_list, list) {
3744 svm_range_add_to_svms(prange);
3745 svm_range_add_notifier_locked(mm, prange);
3746 }
3747 list_for_each_entry(prange, &update_list, update_list) {
3748 svm_range_apply_attrs(p, prange, nattr, attrs, &update_mapping);
3749 /* TODO: unmap ranges from GPU that lost access */
3750 }
3751 update_mapping |= !p->xnack_enabled && !list_empty(&remap_list);
3752
3753 list_for_each_entry_safe(prange, next, &remove_list, update_list) {
3754 pr_debug("unlink old 0x%p prange 0x%p [0x%lx 0x%lx]\n",
3755 prange->svms, prange, prange->start,
3756 prange->last);
3757 svm_range_unlink(prange);
3758 svm_range_remove_notifier(prange);
3759 svm_range_free(prange, false);
3760 }
3761
3762 mmap_write_downgrade(mm);
3763 /* Trigger migrations and revalidate and map to GPUs as needed. If
3764 * this fails we may be left with partially completed actions. There
3765 * is no clean way of rolling back to the previous state in such a
3766 * case because the rollback wouldn't be guaranteed to work either.
3767 */
3768 list_for_each_entry(prange, &update_list, update_list) {
3769 bool migrated;
3770
3771 mutex_lock(&prange->migrate_mutex);
3772
3773 r = svm_range_trigger_migration(mm, prange, &migrated);
3774 if (r)
3775 goto out_unlock_range;
3776
3777 if (migrated && (!p->xnack_enabled ||
3778 (prange->flags & KFD_IOCTL_SVM_FLAG_GPU_ALWAYS_MAPPED)) &&
3779 prange->mapped_to_gpu) {
3780 pr_debug("restore_work will update mappings of GPUs\n");
3781 mutex_unlock(&prange->migrate_mutex);
3782 continue;
3783 }
3784
3785 if (!migrated && !update_mapping) {
3786 mutex_unlock(&prange->migrate_mutex);
3787 continue;
3788 }
3789
3790 flush_tlb = !migrated && update_mapping && prange->mapped_to_gpu;
3791
3792 r = svm_range_validate_and_map(mm, prange->start, prange->last, prange,
3793 MAX_GPU_INSTANCE, true, true, flush_tlb);
3794 if (r)
3795 pr_debug("failed %d to map svm range\n", r);
3796
3797 out_unlock_range:
3798 mutex_unlock(&prange->migrate_mutex);
3799 if (r)
3800 ret = r;
3801 }
3802
3803 list_for_each_entry(prange, &remap_list, update_list) {
3804 pr_debug("Remapping prange 0x%p [0x%lx 0x%lx]\n",
3805 prange, prange->start, prange->last);
3806 mutex_lock(&prange->migrate_mutex);
3807 r = svm_range_validate_and_map(mm, prange->start, prange->last, prange,
3808 MAX_GPU_INSTANCE, true, true, prange->mapped_to_gpu);
3809 if (r)
3810 pr_debug("failed %d on remap svm range\n", r);
3811 mutex_unlock(&prange->migrate_mutex);
3812 if (r)
3813 ret = r;
3814 }
3815
3816 dynamic_svm_range_dump(svms);
3817
3818 mutex_unlock(&svms->lock);
3819 mmap_read_unlock(mm);
3820 out:
3821 mutex_unlock(&process_info->lock);
3822
3823 pr_debug("process pid %d svms 0x%p [0x%llx 0x%llx] done, r=%d\n",
3824 p->lead_thread->pid, &p->svms, start, start + size - 1, r);
3825
3826 return ret ? ret : r;
3827 }
3828
3829 static int
svm_range_get_attr(struct kfd_process * p,struct mm_struct * mm,uint64_t start,uint64_t size,uint32_t nattr,struct kfd_ioctl_svm_attribute * attrs)3830 svm_range_get_attr(struct kfd_process *p, struct mm_struct *mm,
3831 uint64_t start, uint64_t size, uint32_t nattr,
3832 struct kfd_ioctl_svm_attribute *attrs)
3833 {
3834 DECLARE_BITMAP(bitmap_access, MAX_GPU_INSTANCE);
3835 DECLARE_BITMAP(bitmap_aip, MAX_GPU_INSTANCE);
3836 bool get_preferred_loc = false;
3837 bool get_prefetch_loc = false;
3838 bool get_granularity = false;
3839 bool get_accessible = false;
3840 bool get_flags = false;
3841 uint64_t last = start + size - 1UL;
3842 uint8_t granularity = 0xff;
3843 struct interval_tree_node *node;
3844 struct svm_range_list *svms;
3845 struct svm_range *prange;
3846 uint32_t prefetch_loc = KFD_IOCTL_SVM_LOCATION_UNDEFINED;
3847 uint32_t location = KFD_IOCTL_SVM_LOCATION_UNDEFINED;
3848 uint32_t flags_and = 0xffffffff;
3849 uint32_t flags_or = 0;
3850 int gpuidx;
3851 uint32_t i;
3852 int r = 0;
3853
3854 pr_debug("svms 0x%p [0x%llx 0x%llx] nattr 0x%x\n", &p->svms, start,
3855 start + size - 1, nattr);
3856
3857 /* Flush pending deferred work to avoid racing with deferred actions from
3858 * previous memory map changes (e.g. munmap). Concurrent memory map changes
3859 * can still race with get_attr because we don't hold the mmap lock. But that
3860 * would be a race condition in the application anyway, and undefined
3861 * behaviour is acceptable in that case.
3862 */
3863 flush_work(&p->svms.deferred_list_work);
3864
3865 mmap_read_lock(mm);
3866 r = svm_range_is_valid(p, start, size);
3867 mmap_read_unlock(mm);
3868 if (r) {
3869 pr_debug("invalid range r=%d\n", r);
3870 return r;
3871 }
3872
3873 for (i = 0; i < nattr; i++) {
3874 switch (attrs[i].type) {
3875 case KFD_IOCTL_SVM_ATTR_PREFERRED_LOC:
3876 get_preferred_loc = true;
3877 break;
3878 case KFD_IOCTL_SVM_ATTR_PREFETCH_LOC:
3879 get_prefetch_loc = true;
3880 break;
3881 case KFD_IOCTL_SVM_ATTR_ACCESS:
3882 get_accessible = true;
3883 break;
3884 case KFD_IOCTL_SVM_ATTR_SET_FLAGS:
3885 case KFD_IOCTL_SVM_ATTR_CLR_FLAGS:
3886 get_flags = true;
3887 break;
3888 case KFD_IOCTL_SVM_ATTR_GRANULARITY:
3889 get_granularity = true;
3890 break;
3891 case KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE:
3892 case KFD_IOCTL_SVM_ATTR_NO_ACCESS:
3893 fallthrough;
3894 default:
3895 pr_debug("get invalid attr type 0x%x\n", attrs[i].type);
3896 return -EINVAL;
3897 }
3898 }
3899
3900 svms = &p->svms;
3901
3902 mutex_lock(&svms->lock);
3903
3904 node = interval_tree_iter_first(&svms->objects, start, last);
3905 if (!node) {
3906 pr_debug("range attrs not found return default values\n");
3907 svm_range_set_default_attributes(svms, &location, &prefetch_loc,
3908 &granularity, &flags_and);
3909 flags_or = flags_and;
3910 if (p->xnack_enabled)
3911 bitmap_copy(bitmap_access, svms->bitmap_supported,
3912 MAX_GPU_INSTANCE);
3913 else
3914 bitmap_zero(bitmap_access, MAX_GPU_INSTANCE);
3915 bitmap_zero(bitmap_aip, MAX_GPU_INSTANCE);
3916 goto fill_values;
3917 }
3918 bitmap_copy(bitmap_access, svms->bitmap_supported, MAX_GPU_INSTANCE);
3919 bitmap_copy(bitmap_aip, svms->bitmap_supported, MAX_GPU_INSTANCE);
3920
3921 while (node) {
3922 struct interval_tree_node *next;
3923
3924 prange = container_of(node, struct svm_range, it_node);
3925 next = interval_tree_iter_next(node, start, last);
3926
3927 if (get_preferred_loc) {
3928 if (prange->preferred_loc ==
3929 KFD_IOCTL_SVM_LOCATION_UNDEFINED ||
3930 (location != KFD_IOCTL_SVM_LOCATION_UNDEFINED &&
3931 location != prange->preferred_loc)) {
3932 location = KFD_IOCTL_SVM_LOCATION_UNDEFINED;
3933 get_preferred_loc = false;
3934 } else {
3935 location = prange->preferred_loc;
3936 }
3937 }
3938 if (get_prefetch_loc) {
3939 if (prange->prefetch_loc ==
3940 KFD_IOCTL_SVM_LOCATION_UNDEFINED ||
3941 (prefetch_loc != KFD_IOCTL_SVM_LOCATION_UNDEFINED &&
3942 prefetch_loc != prange->prefetch_loc)) {
3943 prefetch_loc = KFD_IOCTL_SVM_LOCATION_UNDEFINED;
3944 get_prefetch_loc = false;
3945 } else {
3946 prefetch_loc = prange->prefetch_loc;
3947 }
3948 }
3949 if (get_accessible) {
3950 bitmap_and(bitmap_access, bitmap_access,
3951 prange->bitmap_access, MAX_GPU_INSTANCE);
3952 bitmap_and(bitmap_aip, bitmap_aip,
3953 prange->bitmap_aip, MAX_GPU_INSTANCE);
3954 }
3955 if (get_flags) {
3956 flags_and &= prange->flags;
3957 flags_or |= prange->flags;
3958 }
3959
3960 if (get_granularity && prange->granularity < granularity)
3961 granularity = prange->granularity;
3962
3963 node = next;
3964 }
3965 fill_values:
3966 mutex_unlock(&svms->lock);
3967
3968 for (i = 0; i < nattr; i++) {
3969 switch (attrs[i].type) {
3970 case KFD_IOCTL_SVM_ATTR_PREFERRED_LOC:
3971 attrs[i].value = location;
3972 break;
3973 case KFD_IOCTL_SVM_ATTR_PREFETCH_LOC:
3974 attrs[i].value = prefetch_loc;
3975 break;
3976 case KFD_IOCTL_SVM_ATTR_ACCESS:
3977 gpuidx = kfd_process_gpuidx_from_gpuid(p,
3978 attrs[i].value);
3979 if (gpuidx < 0) {
3980 pr_debug("invalid gpuid %x\n", attrs[i].value);
3981 return -EINVAL;
3982 }
3983 if (test_bit(gpuidx, bitmap_access))
3984 attrs[i].type = KFD_IOCTL_SVM_ATTR_ACCESS;
3985 else if (test_bit(gpuidx, bitmap_aip))
3986 attrs[i].type =
3987 KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE;
3988 else
3989 attrs[i].type = KFD_IOCTL_SVM_ATTR_NO_ACCESS;
3990 break;
3991 case KFD_IOCTL_SVM_ATTR_SET_FLAGS:
3992 attrs[i].value = flags_and;
3993 break;
3994 case KFD_IOCTL_SVM_ATTR_CLR_FLAGS:
3995 attrs[i].value = ~flags_or;
3996 break;
3997 case KFD_IOCTL_SVM_ATTR_GRANULARITY:
3998 attrs[i].value = (uint32_t)granularity;
3999 break;
4000 }
4001 }
4002
4003 return 0;
4004 }
4005
kfd_criu_resume_svm(struct kfd_process * p)4006 int kfd_criu_resume_svm(struct kfd_process *p)
4007 {
4008 struct kfd_ioctl_svm_attribute *set_attr_new, *set_attr = NULL;
4009 int nattr_common = 4, nattr_accessibility = 1;
4010 struct criu_svm_metadata *criu_svm_md = NULL;
4011 struct svm_range_list *svms = &p->svms;
4012 struct criu_svm_metadata *next = NULL;
4013 uint32_t set_flags = 0xffffffff;
4014 int i, j, num_attrs, ret = 0;
4015 uint64_t set_attr_size;
4016 struct mm_struct *mm;
4017
4018 if (list_empty(&svms->criu_svm_metadata_list)) {
4019 pr_debug("No SVM data from CRIU restore stage 2\n");
4020 return ret;
4021 }
4022
4023 mm = get_task_mm(p->lead_thread);
4024 if (!mm) {
4025 pr_err("failed to get mm for the target process\n");
4026 return -ESRCH;
4027 }
4028
4029 num_attrs = nattr_common + (nattr_accessibility * p->n_pdds);
4030
4031 i = j = 0;
4032 list_for_each_entry(criu_svm_md, &svms->criu_svm_metadata_list, list) {
4033 pr_debug("criu_svm_md[%d]\n\tstart: 0x%llx size: 0x%llx (npages)\n",
4034 i, criu_svm_md->data.start_addr, criu_svm_md->data.size);
4035
4036 for (j = 0; j < num_attrs; j++) {
4037 pr_debug("\ncriu_svm_md[%d]->attrs[%d].type : 0x%x\ncriu_svm_md[%d]->attrs[%d].value : 0x%x\n",
4038 i, j, criu_svm_md->data.attrs[j].type,
4039 i, j, criu_svm_md->data.attrs[j].value);
4040 switch (criu_svm_md->data.attrs[j].type) {
4041 /* During Checkpoint operation, the query for
4042 * KFD_IOCTL_SVM_ATTR_PREFETCH_LOC attribute might
4043 * return KFD_IOCTL_SVM_LOCATION_UNDEFINED if they were
4044 * not used by the range which was checkpointed. Care
4045 * must be taken to not restore with an invalid value
4046 * otherwise the gpuidx value will be invalid and
4047 * set_attr would eventually fail so just replace those
4048 * with another dummy attribute such as
4049 * KFD_IOCTL_SVM_ATTR_SET_FLAGS.
4050 */
4051 case KFD_IOCTL_SVM_ATTR_PREFETCH_LOC:
4052 if (criu_svm_md->data.attrs[j].value ==
4053 KFD_IOCTL_SVM_LOCATION_UNDEFINED) {
4054 criu_svm_md->data.attrs[j].type =
4055 KFD_IOCTL_SVM_ATTR_SET_FLAGS;
4056 criu_svm_md->data.attrs[j].value = 0;
4057 }
4058 break;
4059 case KFD_IOCTL_SVM_ATTR_SET_FLAGS:
4060 set_flags = criu_svm_md->data.attrs[j].value;
4061 break;
4062 default:
4063 break;
4064 }
4065 }
4066
4067 /* CLR_FLAGS is not available via get_attr during checkpoint but
4068 * it needs to be inserted before restoring the ranges so
4069 * allocate extra space for it before calling set_attr
4070 */
4071 set_attr_size = sizeof(struct kfd_ioctl_svm_attribute) *
4072 (num_attrs + 1);
4073 set_attr_new = krealloc(set_attr, set_attr_size,
4074 GFP_KERNEL);
4075 if (!set_attr_new) {
4076 ret = -ENOMEM;
4077 goto exit;
4078 }
4079 set_attr = set_attr_new;
4080
4081 memcpy(set_attr, criu_svm_md->data.attrs, num_attrs *
4082 sizeof(struct kfd_ioctl_svm_attribute));
4083 set_attr[num_attrs].type = KFD_IOCTL_SVM_ATTR_CLR_FLAGS;
4084 set_attr[num_attrs].value = ~set_flags;
4085
4086 ret = svm_range_set_attr(p, mm, criu_svm_md->data.start_addr,
4087 criu_svm_md->data.size, num_attrs + 1,
4088 set_attr);
4089 if (ret) {
4090 pr_err("CRIU: failed to set range attributes\n");
4091 goto exit;
4092 }
4093
4094 i++;
4095 }
4096 exit:
4097 kfree(set_attr);
4098 list_for_each_entry_safe(criu_svm_md, next, &svms->criu_svm_metadata_list, list) {
4099 pr_debug("freeing criu_svm_md[]\n\tstart: 0x%llx\n",
4100 criu_svm_md->data.start_addr);
4101 kfree(criu_svm_md);
4102 }
4103
4104 mmput(mm);
4105 return ret;
4106
4107 }
4108
kfd_criu_restore_svm(struct kfd_process * p,uint8_t __user * user_priv_ptr,uint64_t * priv_data_offset,uint64_t max_priv_data_size)4109 int kfd_criu_restore_svm(struct kfd_process *p,
4110 uint8_t __user *user_priv_ptr,
4111 uint64_t *priv_data_offset,
4112 uint64_t max_priv_data_size)
4113 {
4114 uint64_t svm_priv_data_size, svm_object_md_size, svm_attrs_size;
4115 int nattr_common = 4, nattr_accessibility = 1;
4116 struct criu_svm_metadata *criu_svm_md = NULL;
4117 struct svm_range_list *svms = &p->svms;
4118 uint32_t num_devices;
4119 int ret = 0;
4120
4121 num_devices = p->n_pdds;
4122 /* Handle one SVM range object at a time, also the number of gpus are
4123 * assumed to be same on the restore node, checking must be done while
4124 * evaluating the topology earlier
4125 */
4126
4127 svm_attrs_size = sizeof(struct kfd_ioctl_svm_attribute) *
4128 (nattr_common + nattr_accessibility * num_devices);
4129 svm_object_md_size = sizeof(struct criu_svm_metadata) + svm_attrs_size;
4130
4131 svm_priv_data_size = sizeof(struct kfd_criu_svm_range_priv_data) +
4132 svm_attrs_size;
4133
4134 criu_svm_md = kzalloc(svm_object_md_size, GFP_KERNEL);
4135 if (!criu_svm_md) {
4136 pr_err("failed to allocate memory to store svm metadata\n");
4137 return -ENOMEM;
4138 }
4139 if (*priv_data_offset + svm_priv_data_size > max_priv_data_size) {
4140 ret = -EINVAL;
4141 goto exit;
4142 }
4143
4144 ret = copy_from_user(&criu_svm_md->data, user_priv_ptr + *priv_data_offset,
4145 svm_priv_data_size);
4146 if (ret) {
4147 ret = -EFAULT;
4148 goto exit;
4149 }
4150 *priv_data_offset += svm_priv_data_size;
4151
4152 list_add_tail(&criu_svm_md->list, &svms->criu_svm_metadata_list);
4153
4154 return 0;
4155
4156
4157 exit:
4158 kfree(criu_svm_md);
4159 return ret;
4160 }
4161
svm_range_get_info(struct kfd_process * p,uint32_t * num_svm_ranges,uint64_t * svm_priv_data_size)4162 void svm_range_get_info(struct kfd_process *p, uint32_t *num_svm_ranges,
4163 uint64_t *svm_priv_data_size)
4164 {
4165 uint64_t total_size, accessibility_size, common_attr_size;
4166 int nattr_common = 4, nattr_accessibility = 1;
4167 int num_devices = p->n_pdds;
4168 struct svm_range_list *svms;
4169 struct svm_range *prange;
4170 uint32_t count = 0;
4171
4172 *svm_priv_data_size = 0;
4173
4174 svms = &p->svms;
4175
4176 mutex_lock(&svms->lock);
4177 list_for_each_entry(prange, &svms->list, list) {
4178 pr_debug("prange: 0x%p start: 0x%lx\t npages: 0x%llx\t end: 0x%llx\n",
4179 prange, prange->start, prange->npages,
4180 prange->start + prange->npages - 1);
4181 count++;
4182 }
4183 mutex_unlock(&svms->lock);
4184
4185 *num_svm_ranges = count;
4186 /* Only the accessbility attributes need to be queried for all the gpus
4187 * individually, remaining ones are spanned across the entire process
4188 * regardless of the various gpu nodes. Of the remaining attributes,
4189 * KFD_IOCTL_SVM_ATTR_CLR_FLAGS need not be saved.
4190 *
4191 * KFD_IOCTL_SVM_ATTR_PREFERRED_LOC
4192 * KFD_IOCTL_SVM_ATTR_PREFETCH_LOC
4193 * KFD_IOCTL_SVM_ATTR_SET_FLAGS
4194 * KFD_IOCTL_SVM_ATTR_GRANULARITY
4195 *
4196 * ** ACCESSBILITY ATTRIBUTES **
4197 * (Considered as one, type is altered during query, value is gpuid)
4198 * KFD_IOCTL_SVM_ATTR_ACCESS
4199 * KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE
4200 * KFD_IOCTL_SVM_ATTR_NO_ACCESS
4201 */
4202 if (*num_svm_ranges > 0) {
4203 common_attr_size = sizeof(struct kfd_ioctl_svm_attribute) *
4204 nattr_common;
4205 accessibility_size = sizeof(struct kfd_ioctl_svm_attribute) *
4206 nattr_accessibility * num_devices;
4207
4208 total_size = sizeof(struct kfd_criu_svm_range_priv_data) +
4209 common_attr_size + accessibility_size;
4210
4211 *svm_priv_data_size = *num_svm_ranges * total_size;
4212 }
4213
4214 pr_debug("num_svm_ranges %u total_priv_size %llu\n", *num_svm_ranges,
4215 *svm_priv_data_size);
4216 }
4217
kfd_criu_checkpoint_svm(struct kfd_process * p,uint8_t __user * user_priv_data,uint64_t * priv_data_offset)4218 int kfd_criu_checkpoint_svm(struct kfd_process *p,
4219 uint8_t __user *user_priv_data,
4220 uint64_t *priv_data_offset)
4221 {
4222 struct kfd_criu_svm_range_priv_data *svm_priv = NULL;
4223 struct kfd_ioctl_svm_attribute *query_attr = NULL;
4224 uint64_t svm_priv_data_size, query_attr_size = 0;
4225 int index, nattr_common = 4, ret = 0;
4226 struct svm_range_list *svms;
4227 int num_devices = p->n_pdds;
4228 struct svm_range *prange;
4229 struct mm_struct *mm;
4230
4231 svms = &p->svms;
4232
4233 mm = get_task_mm(p->lead_thread);
4234 if (!mm) {
4235 pr_err("failed to get mm for the target process\n");
4236 return -ESRCH;
4237 }
4238
4239 query_attr_size = sizeof(struct kfd_ioctl_svm_attribute) *
4240 (nattr_common + num_devices);
4241
4242 query_attr = kzalloc(query_attr_size, GFP_KERNEL);
4243 if (!query_attr) {
4244 ret = -ENOMEM;
4245 goto exit;
4246 }
4247
4248 query_attr[0].type = KFD_IOCTL_SVM_ATTR_PREFERRED_LOC;
4249 query_attr[1].type = KFD_IOCTL_SVM_ATTR_PREFETCH_LOC;
4250 query_attr[2].type = KFD_IOCTL_SVM_ATTR_SET_FLAGS;
4251 query_attr[3].type = KFD_IOCTL_SVM_ATTR_GRANULARITY;
4252
4253 for (index = 0; index < num_devices; index++) {
4254 struct kfd_process_device *pdd = p->pdds[index];
4255
4256 query_attr[index + nattr_common].type =
4257 KFD_IOCTL_SVM_ATTR_ACCESS;
4258 query_attr[index + nattr_common].value = pdd->user_gpu_id;
4259 }
4260
4261 svm_priv_data_size = sizeof(*svm_priv) + query_attr_size;
4262
4263 svm_priv = kzalloc(svm_priv_data_size, GFP_KERNEL);
4264 if (!svm_priv) {
4265 ret = -ENOMEM;
4266 goto exit_query;
4267 }
4268
4269 index = 0;
4270 list_for_each_entry(prange, &svms->list, list) {
4271
4272 svm_priv->object_type = KFD_CRIU_OBJECT_TYPE_SVM_RANGE;
4273 svm_priv->start_addr = prange->start;
4274 svm_priv->size = prange->npages;
4275 memcpy(&svm_priv->attrs, query_attr, query_attr_size);
4276 pr_debug("CRIU: prange: 0x%p start: 0x%lx\t npages: 0x%llx end: 0x%llx\t size: 0x%llx\n",
4277 prange, prange->start, prange->npages,
4278 prange->start + prange->npages - 1,
4279 prange->npages * PAGE_SIZE);
4280
4281 ret = svm_range_get_attr(p, mm, svm_priv->start_addr,
4282 svm_priv->size,
4283 (nattr_common + num_devices),
4284 svm_priv->attrs);
4285 if (ret) {
4286 pr_err("CRIU: failed to obtain range attributes\n");
4287 goto exit_priv;
4288 }
4289
4290 if (copy_to_user(user_priv_data + *priv_data_offset, svm_priv,
4291 svm_priv_data_size)) {
4292 pr_err("Failed to copy svm priv to user\n");
4293 ret = -EFAULT;
4294 goto exit_priv;
4295 }
4296
4297 *priv_data_offset += svm_priv_data_size;
4298
4299 }
4300
4301
4302 exit_priv:
4303 kfree(svm_priv);
4304 exit_query:
4305 kfree(query_attr);
4306 exit:
4307 mmput(mm);
4308 return ret;
4309 }
4310
4311 int
svm_ioctl(struct kfd_process * p,enum kfd_ioctl_svm_op op,uint64_t start,uint64_t size,uint32_t nattrs,struct kfd_ioctl_svm_attribute * attrs)4312 svm_ioctl(struct kfd_process *p, enum kfd_ioctl_svm_op op, uint64_t start,
4313 uint64_t size, uint32_t nattrs, struct kfd_ioctl_svm_attribute *attrs)
4314 {
4315 struct mm_struct *mm = current->mm;
4316 int r;
4317
4318 start >>= PAGE_SHIFT;
4319 size >>= PAGE_SHIFT;
4320
4321 switch (op) {
4322 case KFD_IOCTL_SVM_OP_SET_ATTR:
4323 r = svm_range_set_attr(p, mm, start, size, nattrs, attrs);
4324 break;
4325 case KFD_IOCTL_SVM_OP_GET_ATTR:
4326 r = svm_range_get_attr(p, mm, start, size, nattrs, attrs);
4327 break;
4328 default:
4329 r = -EINVAL;
4330 break;
4331 }
4332
4333 return r;
4334 }
4335