1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /*
3 * Copyright 2020-2021 Advanced Micro Devices, Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
22 */
23
24 #include <linux/types.h>
25 #include <linux/sched/task.h>
26 #include <linux/dynamic_debug.h>
27 #include <drm/ttm/ttm_tt.h>
28 #include <drm/drm_exec.h>
29
30 #include "amdgpu_sync.h"
31 #include "amdgpu_object.h"
32 #include "amdgpu_vm.h"
33 #include "amdgpu_hmm.h"
34 #include "amdgpu.h"
35 #include "amdgpu_xgmi.h"
36 #include "amdgpu_reset.h"
37 #include "kfd_priv.h"
38 #include "kfd_svm.h"
39 #include "kfd_migrate.h"
40 #include "kfd_smi_events.h"
41
42 #ifdef dev_fmt
43 #undef dev_fmt
44 #endif
45 #define dev_fmt(fmt) "kfd_svm: %s: " fmt, __func__
46
47 #define AMDGPU_SVM_RANGE_RESTORE_DELAY_MS 1
48
49 /* Long enough to ensure no retry fault comes after svm range is restored and
50 * page table is updated.
51 */
52 #define AMDGPU_SVM_RANGE_RETRY_FAULT_PENDING (2UL * NSEC_PER_MSEC)
53 #if IS_ENABLED(CONFIG_DYNAMIC_DEBUG)
54 #define dynamic_svm_range_dump(svms) \
55 _dynamic_func_call_no_desc("svm_range_dump", svm_range_debug_dump, svms)
56 #else
57 #define dynamic_svm_range_dump(svms) \
58 do { if (0) svm_range_debug_dump(svms); } while (0)
59 #endif
60
61 /* Giant svm range split into smaller ranges based on this, it is decided using
62 * minimum of all dGPU/APU 1/32 VRAM size, between 2MB to 1GB and alignment to
63 * power of 2MB.
64 */
65 static uint64_t max_svm_range_pages;
66
67 struct criu_svm_metadata {
68 struct list_head list;
69 struct kfd_criu_svm_range_priv_data data;
70 };
71
72 static void svm_range_evict_svm_bo_worker(struct work_struct *work);
73 static bool
74 svm_range_cpu_invalidate_pagetables(struct mmu_interval_notifier *mni,
75 const struct mmu_notifier_range *range,
76 unsigned long cur_seq);
77 static int
78 svm_range_check_vm(struct kfd_process *p, uint64_t start, uint64_t last,
79 uint64_t *bo_s, uint64_t *bo_l);
80 static const struct mmu_interval_notifier_ops svm_range_mn_ops = {
81 .invalidate = svm_range_cpu_invalidate_pagetables,
82 };
83
84 /**
85 * svm_range_unlink - unlink svm_range from lists and interval tree
86 * @prange: svm range structure to be removed
87 *
88 * Remove the svm_range from the svms and svm_bo lists and the svms
89 * interval tree.
90 *
91 * Context: The caller must hold svms->lock
92 */
svm_range_unlink(struct svm_range * prange)93 static void svm_range_unlink(struct svm_range *prange)
94 {
95 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx]\n", prange->svms,
96 prange, prange->start, prange->last);
97
98 if (prange->svm_bo) {
99 spin_lock(&prange->svm_bo->list_lock);
100 list_del(&prange->svm_bo_list);
101 spin_unlock(&prange->svm_bo->list_lock);
102 }
103
104 list_del(&prange->list);
105 if (prange->it_node.start != 0 && prange->it_node.last != 0)
106 interval_tree_remove(&prange->it_node, &prange->svms->objects);
107 }
108
109 static void
svm_range_add_notifier_locked(struct mm_struct * mm,struct svm_range * prange)110 svm_range_add_notifier_locked(struct mm_struct *mm, struct svm_range *prange)
111 {
112 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx]\n", prange->svms,
113 prange, prange->start, prange->last);
114
115 mmu_interval_notifier_insert_locked(&prange->notifier, mm,
116 prange->start << PAGE_SHIFT,
117 prange->npages << PAGE_SHIFT,
118 &svm_range_mn_ops);
119 }
120
121 /**
122 * svm_range_add_to_svms - add svm range to svms
123 * @prange: svm range structure to be added
124 *
125 * Add the svm range to svms interval tree and link list
126 *
127 * Context: The caller must hold svms->lock
128 */
svm_range_add_to_svms(struct svm_range * prange)129 static void svm_range_add_to_svms(struct svm_range *prange)
130 {
131 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx]\n", prange->svms,
132 prange, prange->start, prange->last);
133
134 list_move_tail(&prange->list, &prange->svms->list);
135 prange->it_node.start = prange->start;
136 prange->it_node.last = prange->last;
137 interval_tree_insert(&prange->it_node, &prange->svms->objects);
138 }
139
svm_range_remove_notifier(struct svm_range * prange)140 static void svm_range_remove_notifier(struct svm_range *prange)
141 {
142 pr_debug("remove notifier svms 0x%p prange 0x%p [0x%lx 0x%lx]\n",
143 prange->svms, prange,
144 prange->notifier.interval_tree.start >> PAGE_SHIFT,
145 prange->notifier.interval_tree.last >> PAGE_SHIFT);
146
147 if (prange->notifier.interval_tree.start != 0 &&
148 prange->notifier.interval_tree.last != 0)
149 mmu_interval_notifier_remove(&prange->notifier);
150 }
151
152 static bool
svm_is_valid_dma_mapping_addr(struct device * dev,dma_addr_t dma_addr)153 svm_is_valid_dma_mapping_addr(struct device *dev, dma_addr_t dma_addr)
154 {
155 return dma_addr && !dma_mapping_error(dev, dma_addr) &&
156 !(dma_addr & SVM_RANGE_VRAM_DOMAIN);
157 }
158
159 static int
svm_range_dma_map_dev(struct amdgpu_device * adev,struct svm_range * prange,unsigned long offset,unsigned long npages,unsigned long * hmm_pfns,uint32_t gpuidx)160 svm_range_dma_map_dev(struct amdgpu_device *adev, struct svm_range *prange,
161 unsigned long offset, unsigned long npages,
162 unsigned long *hmm_pfns, uint32_t gpuidx)
163 {
164 enum dma_data_direction dir = DMA_BIDIRECTIONAL;
165 dma_addr_t *addr = prange->dma_addr[gpuidx];
166 struct device *dev = adev->dev;
167 struct page *page;
168 int i, r;
169
170 if (!addr) {
171 addr = kvzalloc_objs(*addr, prange->npages);
172 if (!addr)
173 return -ENOMEM;
174 prange->dma_addr[gpuidx] = addr;
175 }
176
177 addr += offset;
178 for (i = 0; i < npages; i++) {
179 if (svm_is_valid_dma_mapping_addr(dev, addr[i]))
180 dma_unmap_page(dev, addr[i], PAGE_SIZE, dir);
181
182 page = hmm_pfn_to_page(hmm_pfns[i]);
183 if (is_zone_device_page(page)) {
184 struct amdgpu_device *bo_adev = prange->svm_bo->node->adev;
185
186 addr[i] = (hmm_pfns[i] << PAGE_SHIFT) +
187 bo_adev->vm_manager.vram_base_offset -
188 bo_adev->kfd.pgmap.range.start;
189 addr[i] |= SVM_RANGE_VRAM_DOMAIN;
190 pr_debug_ratelimited("vram address: 0x%llx\n", addr[i]);
191 continue;
192 }
193 addr[i] = dma_map_page(dev, page, 0, PAGE_SIZE, dir);
194 r = dma_mapping_error(dev, addr[i]);
195 if (r) {
196 dev_err(dev, "failed %d dma_map_page\n", r);
197 return r;
198 }
199 pr_debug_ratelimited("dma mapping 0x%llx for page addr 0x%lx\n",
200 addr[i] >> PAGE_SHIFT, page_to_pfn(page));
201 }
202
203 return 0;
204 }
205
206 static int
svm_range_dma_map(struct svm_range * prange,unsigned long * bitmap,unsigned long offset,unsigned long npages,unsigned long * hmm_pfns)207 svm_range_dma_map(struct svm_range *prange, unsigned long *bitmap,
208 unsigned long offset, unsigned long npages,
209 unsigned long *hmm_pfns)
210 {
211 struct kfd_process *p;
212 uint32_t gpuidx;
213 int r;
214
215 p = container_of(prange->svms, struct kfd_process, svms);
216
217 for_each_set_bit(gpuidx, bitmap, MAX_GPU_INSTANCE) {
218 struct kfd_process_device *pdd;
219
220 pr_debug("mapping to gpu idx 0x%x\n", gpuidx);
221 pdd = kfd_process_device_from_gpuidx(p, gpuidx);
222 if (!pdd) {
223 pr_debug("failed to find device idx %d\n", gpuidx);
224 return -EINVAL;
225 }
226
227 r = svm_range_dma_map_dev(pdd->dev->adev, prange, offset, npages,
228 hmm_pfns, gpuidx);
229 if (r)
230 break;
231 }
232
233 return r;
234 }
235
svm_range_dma_unmap_dev(struct device * dev,dma_addr_t * dma_addr,unsigned long offset,unsigned long npages)236 void svm_range_dma_unmap_dev(struct device *dev, dma_addr_t *dma_addr,
237 unsigned long offset, unsigned long npages)
238 {
239 enum dma_data_direction dir = DMA_BIDIRECTIONAL;
240 int i;
241
242 if (!dma_addr)
243 return;
244
245 for (i = offset; i < offset + npages; i++) {
246 if (!svm_is_valid_dma_mapping_addr(dev, dma_addr[i]))
247 continue;
248 pr_debug_ratelimited("unmap 0x%llx\n", dma_addr[i] >> PAGE_SHIFT);
249 dma_unmap_page(dev, dma_addr[i], PAGE_SIZE, dir);
250 dma_addr[i] = 0;
251 }
252 }
253
svm_range_dma_unmap(struct svm_range * prange)254 void svm_range_dma_unmap(struct svm_range *prange)
255 {
256 struct kfd_process_device *pdd;
257 dma_addr_t *dma_addr;
258 struct device *dev;
259 struct kfd_process *p;
260 uint32_t gpuidx;
261
262 p = container_of(prange->svms, struct kfd_process, svms);
263
264 for (gpuidx = 0; gpuidx < MAX_GPU_INSTANCE; gpuidx++) {
265 dma_addr = prange->dma_addr[gpuidx];
266 if (!dma_addr)
267 continue;
268
269 pdd = kfd_process_device_from_gpuidx(p, gpuidx);
270 if (!pdd) {
271 pr_debug("failed to find device idx %d\n", gpuidx);
272 continue;
273 }
274 dev = &pdd->dev->adev->pdev->dev;
275
276 svm_range_dma_unmap_dev(dev, dma_addr, 0, prange->npages);
277 }
278 }
279
svm_range_free(struct svm_range * prange,bool do_unmap)280 static void svm_range_free(struct svm_range *prange, bool do_unmap)
281 {
282 uint64_t size = (prange->last - prange->start + 1) << PAGE_SHIFT;
283 struct kfd_process *p = container_of(prange->svms, struct kfd_process, svms);
284 uint32_t gpuidx;
285
286 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx]\n", prange->svms, prange,
287 prange->start, prange->last);
288
289 svm_range_vram_node_free(prange);
290 if (do_unmap)
291 svm_range_dma_unmap(prange);
292
293 if (do_unmap && !p->xnack_enabled) {
294 pr_debug("unreserve prange 0x%p size: 0x%llx\n", prange, size);
295 amdgpu_amdkfd_unreserve_mem_limit(NULL, size,
296 KFD_IOC_ALLOC_MEM_FLAGS_USERPTR, 0);
297 }
298
299 /* free dma_addr array for each gpu */
300 for (gpuidx = 0; gpuidx < MAX_GPU_INSTANCE; gpuidx++) {
301 if (prange->dma_addr[gpuidx]) {
302 kvfree(prange->dma_addr[gpuidx]);
303 prange->dma_addr[gpuidx] = NULL;
304 }
305 }
306
307 mutex_destroy(&prange->lock);
308 mutex_destroy(&prange->migrate_mutex);
309 kfree(prange);
310 }
311
312 static void
svm_range_set_default_attributes(struct svm_range_list * svms,int32_t * location,int32_t * prefetch_loc,uint8_t * granularity,uint32_t * flags)313 svm_range_set_default_attributes(struct svm_range_list *svms, int32_t *location,
314 int32_t *prefetch_loc, uint8_t *granularity,
315 uint32_t *flags)
316 {
317 *location = KFD_IOCTL_SVM_LOCATION_UNDEFINED;
318 *prefetch_loc = KFD_IOCTL_SVM_LOCATION_UNDEFINED;
319 *granularity = svms->default_granularity;
320 *flags =
321 KFD_IOCTL_SVM_FLAG_HOST_ACCESS | KFD_IOCTL_SVM_FLAG_COHERENT;
322 }
323
324 static struct
svm_range_new(struct svm_range_list * svms,uint64_t start,uint64_t last,bool update_mem_usage)325 svm_range *svm_range_new(struct svm_range_list *svms, uint64_t start,
326 uint64_t last, bool update_mem_usage)
327 {
328 uint64_t size = last - start + 1;
329 struct svm_range *prange;
330 struct kfd_process *p;
331
332 prange = kzalloc_obj(*prange);
333 if (!prange)
334 return NULL;
335
336 p = container_of(svms, struct kfd_process, svms);
337 if (!p->xnack_enabled && update_mem_usage &&
338 amdgpu_amdkfd_reserve_mem_limit(NULL, size << PAGE_SHIFT,
339 KFD_IOC_ALLOC_MEM_FLAGS_USERPTR, 0)) {
340 pr_info("SVM mapping failed, exceeds resident system memory limit\n");
341 kfree(prange);
342 return NULL;
343 }
344 prange->npages = size;
345 prange->svms = svms;
346 prange->start = start;
347 prange->last = last;
348 INIT_LIST_HEAD(&prange->list);
349 INIT_LIST_HEAD(&prange->update_list);
350 INIT_LIST_HEAD(&prange->svm_bo_list);
351 INIT_LIST_HEAD(&prange->deferred_list);
352 INIT_LIST_HEAD(&prange->child_list);
353 atomic_set(&prange->invalid, 0);
354 prange->validate_timestamp = 0;
355 prange->vram_pages = 0;
356 mutex_init(&prange->migrate_mutex);
357 mutex_init(&prange->lock);
358
359 if (p->xnack_enabled)
360 bitmap_copy(prange->bitmap_access, svms->bitmap_supported,
361 MAX_GPU_INSTANCE);
362
363 svm_range_set_default_attributes(svms, &prange->preferred_loc,
364 &prange->prefetch_loc,
365 &prange->granularity, &prange->flags);
366
367 pr_debug("svms 0x%p [0x%llx 0x%llx]\n", svms, start, last);
368
369 return prange;
370 }
371
svm_bo_ref_unless_zero(struct svm_range_bo * svm_bo)372 static bool svm_bo_ref_unless_zero(struct svm_range_bo *svm_bo)
373 {
374 if (!svm_bo || !kref_get_unless_zero(&svm_bo->kref))
375 return false;
376
377 return true;
378 }
379
svm_range_bo_release(struct kref * kref)380 static void svm_range_bo_release(struct kref *kref)
381 {
382 struct svm_range_bo *svm_bo;
383
384 svm_bo = container_of(kref, struct svm_range_bo, kref);
385 pr_debug("svm_bo 0x%p\n", svm_bo);
386
387 spin_lock(&svm_bo->list_lock);
388 while (!list_empty(&svm_bo->range_list)) {
389 struct svm_range *prange =
390 list_first_entry(&svm_bo->range_list,
391 struct svm_range, svm_bo_list);
392 /* list_del_init tells a concurrent svm_range_vram_node_new when
393 * it's safe to reuse the svm_bo pointer and svm_bo_list head.
394 */
395 list_del_init(&prange->svm_bo_list);
396 spin_unlock(&svm_bo->list_lock);
397
398 pr_debug("svms 0x%p [0x%lx 0x%lx]\n", prange->svms,
399 prange->start, prange->last);
400 mutex_lock(&prange->lock);
401 prange->svm_bo = NULL;
402 /* prange should not hold vram page now */
403 WARN_ONCE(prange->actual_loc, "prange should not hold vram page");
404 mutex_unlock(&prange->lock);
405
406 spin_lock(&svm_bo->list_lock);
407 }
408 spin_unlock(&svm_bo->list_lock);
409
410 if (mmget_not_zero(svm_bo->eviction_fence->mm)) {
411 struct kfd_process_device *pdd;
412 struct kfd_process *p;
413 struct mm_struct *mm;
414
415 mm = svm_bo->eviction_fence->mm;
416 /*
417 * The forked child process takes svm_bo device pages ref, svm_bo could be
418 * released after parent process is gone.
419 */
420 p = kfd_lookup_process_by_mm(mm);
421 if (p) {
422 pdd = kfd_get_process_device_data(svm_bo->node, p);
423 if (pdd)
424 atomic64_sub(amdgpu_bo_size(svm_bo->bo), &pdd->vram_usage);
425 kfd_unref_process(p);
426 }
427 mmput(mm);
428 }
429
430 if (!dma_fence_is_signaled(&svm_bo->eviction_fence->base))
431 /* We're not in the eviction worker. Signal the fence. */
432 dma_fence_signal(&svm_bo->eviction_fence->base);
433 dma_fence_put(&svm_bo->eviction_fence->base);
434 amdgpu_bo_unref(&svm_bo->bo);
435 kfree(svm_bo);
436 }
437
svm_range_bo_wq_release(struct work_struct * work)438 static void svm_range_bo_wq_release(struct work_struct *work)
439 {
440 struct svm_range_bo *svm_bo;
441
442 svm_bo = container_of(work, struct svm_range_bo, release_work);
443 svm_range_bo_release(&svm_bo->kref);
444 }
445
svm_range_bo_release_async(struct kref * kref)446 static void svm_range_bo_release_async(struct kref *kref)
447 {
448 struct svm_range_bo *svm_bo;
449
450 svm_bo = container_of(kref, struct svm_range_bo, kref);
451 pr_debug("svm_bo 0x%p\n", svm_bo);
452 INIT_WORK(&svm_bo->release_work, svm_range_bo_wq_release);
453 schedule_work(&svm_bo->release_work);
454 }
455
svm_range_bo_unref_async(struct svm_range_bo * svm_bo)456 void svm_range_bo_unref_async(struct svm_range_bo *svm_bo)
457 {
458 kref_put(&svm_bo->kref, svm_range_bo_release_async);
459 }
460
svm_range_bo_unref(struct svm_range_bo * svm_bo)461 static void svm_range_bo_unref(struct svm_range_bo *svm_bo)
462 {
463 if (svm_bo)
464 kref_put(&svm_bo->kref, svm_range_bo_release);
465 }
466
467 static bool
svm_range_validate_svm_bo(struct kfd_node * node,struct svm_range * prange)468 svm_range_validate_svm_bo(struct kfd_node *node, struct svm_range *prange)
469 {
470 mutex_lock(&prange->lock);
471 if (!prange->svm_bo) {
472 mutex_unlock(&prange->lock);
473 return false;
474 }
475 if (prange->ttm_res) {
476 /* We still have a reference, all is well */
477 mutex_unlock(&prange->lock);
478 return true;
479 }
480 if (svm_bo_ref_unless_zero(prange->svm_bo)) {
481 /*
482 * Migrate from GPU to GPU, remove range from source svm_bo->node
483 * range list, and return false to allocate svm_bo from destination
484 * node.
485 */
486 if (prange->svm_bo->node != node) {
487 mutex_unlock(&prange->lock);
488
489 spin_lock(&prange->svm_bo->list_lock);
490 list_del_init(&prange->svm_bo_list);
491 spin_unlock(&prange->svm_bo->list_lock);
492
493 svm_range_bo_unref(prange->svm_bo);
494 return false;
495 }
496 if (READ_ONCE(prange->svm_bo->evicting)) {
497 struct dma_fence *f;
498 struct svm_range_bo *svm_bo;
499 /* The BO is getting evicted,
500 * we need to get a new one
501 */
502 mutex_unlock(&prange->lock);
503 svm_bo = prange->svm_bo;
504 f = dma_fence_get(&svm_bo->eviction_fence->base);
505 svm_range_bo_unref(prange->svm_bo);
506 /* wait for the fence to avoid long spin-loop
507 * at list_empty_careful
508 */
509 dma_fence_wait(f, false);
510 dma_fence_put(f);
511 } else {
512 /* The BO was still around and we got
513 * a new reference to it
514 */
515 mutex_unlock(&prange->lock);
516 pr_debug("reuse old bo svms 0x%p [0x%lx 0x%lx]\n",
517 prange->svms, prange->start, prange->last);
518
519 prange->ttm_res = prange->svm_bo->bo->tbo.resource;
520 return true;
521 }
522
523 } else {
524 mutex_unlock(&prange->lock);
525 }
526
527 /* We need a new svm_bo. Spin-loop to wait for concurrent
528 * svm_range_bo_release to finish removing this range from
529 * its range list and set prange->svm_bo to null. After this,
530 * it is safe to reuse the svm_bo pointer and svm_bo_list head.
531 */
532 while (!list_empty_careful(&prange->svm_bo_list) || prange->svm_bo)
533 cond_resched();
534
535 return false;
536 }
537
svm_range_bo_new(void)538 static struct svm_range_bo *svm_range_bo_new(void)
539 {
540 struct svm_range_bo *svm_bo;
541
542 svm_bo = kzalloc_obj(*svm_bo);
543 if (!svm_bo)
544 return NULL;
545
546 kref_init(&svm_bo->kref);
547 INIT_LIST_HEAD(&svm_bo->range_list);
548 spin_lock_init(&svm_bo->list_lock);
549
550 return svm_bo;
551 }
552
553 int
svm_range_vram_node_new(struct kfd_node * node,struct svm_range * prange,bool clear)554 svm_range_vram_node_new(struct kfd_node *node, struct svm_range *prange,
555 bool clear)
556 {
557 struct kfd_process_device *pdd;
558 struct amdgpu_bo_param bp;
559 struct svm_range_bo *svm_bo;
560 struct amdgpu_bo_user *ubo;
561 struct amdgpu_bo *bo;
562 struct kfd_process *p;
563 struct mm_struct *mm;
564 int r;
565
566 p = container_of(prange->svms, struct kfd_process, svms);
567 pr_debug("process pid: %d svms 0x%p [0x%lx 0x%lx]\n",
568 p->lead_thread->pid, prange->svms,
569 prange->start, prange->last);
570
571 if (svm_range_validate_svm_bo(node, prange))
572 return 0;
573
574 svm_bo = svm_range_bo_new();
575 if (!svm_bo) {
576 pr_debug("failed to alloc svm bo\n");
577 return -ENOMEM;
578 }
579 mm = get_task_mm(p->lead_thread);
580 if (!mm) {
581 pr_debug("failed to get mm\n");
582 kfree(svm_bo);
583 return -ESRCH;
584 }
585 svm_bo->node = node;
586 svm_bo->eviction_fence =
587 amdgpu_amdkfd_fence_create(dma_fence_context_alloc(1),
588 mm,
589 svm_bo, p->context_id);
590 mmput(mm);
591 INIT_WORK(&svm_bo->eviction_work, svm_range_evict_svm_bo_worker);
592 svm_bo->evicting = 0;
593 memset(&bp, 0, sizeof(bp));
594 bp.size = prange->npages * PAGE_SIZE;
595 bp.byte_align = PAGE_SIZE;
596 bp.domain = AMDGPU_GEM_DOMAIN_VRAM;
597 bp.flags = AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
598 bp.flags |= clear ? AMDGPU_GEM_CREATE_VRAM_CLEARED : 0;
599 bp.flags |= AMDGPU_GEM_CREATE_DISCARDABLE;
600 bp.type = ttm_bo_type_device;
601 bp.resv = NULL;
602 if (node->xcp)
603 bp.xcp_id_plus1 = node->xcp->id + 1;
604
605 r = amdgpu_bo_create_user(node->adev, &bp, &ubo);
606 if (r) {
607 pr_debug("failed %d to create bo\n", r);
608 goto create_bo_failed;
609 }
610 bo = &ubo->bo;
611
612 pr_debug("alloc bo at offset 0x%lx size 0x%lx on partition %d\n",
613 bo->tbo.resource->start << PAGE_SHIFT, bp.size,
614 bp.xcp_id_plus1 - 1);
615
616 r = amdgpu_bo_reserve(bo, true);
617 if (r) {
618 pr_debug("failed %d to reserve bo\n", r);
619 goto reserve_bo_failed;
620 }
621
622 if (clear) {
623 r = amdgpu_bo_sync_wait(bo, AMDGPU_FENCE_OWNER_KFD, false);
624 if (r) {
625 pr_debug("failed %d to sync bo\n", r);
626 amdgpu_bo_unreserve(bo);
627 goto reserve_bo_failed;
628 }
629 }
630
631 r = dma_resv_reserve_fences(bo->tbo.base.resv, TTM_NUM_MOVE_FENCES);
632 if (r) {
633 amdgpu_bo_unreserve(bo);
634 goto reserve_bo_failed;
635 }
636 amdgpu_bo_fence(bo, &svm_bo->eviction_fence->base, true);
637
638 amdgpu_bo_unreserve(bo);
639
640 svm_bo->bo = bo;
641 prange->svm_bo = svm_bo;
642 prange->ttm_res = bo->tbo.resource;
643 prange->offset = 0;
644
645 spin_lock(&svm_bo->list_lock);
646 list_add(&prange->svm_bo_list, &svm_bo->range_list);
647 spin_unlock(&svm_bo->list_lock);
648
649 pdd = svm_range_get_pdd_by_node(prange, node);
650 if (pdd)
651 atomic64_add(amdgpu_bo_size(bo), &pdd->vram_usage);
652
653 return 0;
654
655 reserve_bo_failed:
656 amdgpu_bo_unref(&bo);
657 create_bo_failed:
658 dma_fence_put(&svm_bo->eviction_fence->base);
659 kfree(svm_bo);
660 prange->ttm_res = NULL;
661
662 return r;
663 }
664
svm_range_vram_node_free(struct svm_range * prange)665 void svm_range_vram_node_free(struct svm_range *prange)
666 {
667 /* serialize prange->svm_bo unref */
668 mutex_lock(&prange->lock);
669 /* prange->svm_bo has not been unref */
670 if (prange->ttm_res) {
671 prange->ttm_res = NULL;
672 mutex_unlock(&prange->lock);
673 svm_range_bo_unref(prange->svm_bo);
674 } else
675 mutex_unlock(&prange->lock);
676 }
677
678 struct kfd_node *
svm_range_get_node_by_id(struct svm_range * prange,uint32_t gpu_id)679 svm_range_get_node_by_id(struct svm_range *prange, uint32_t gpu_id)
680 {
681 struct kfd_process *p;
682 struct kfd_process_device *pdd;
683
684 p = container_of(prange->svms, struct kfd_process, svms);
685 pdd = kfd_process_device_data_by_id(p, gpu_id);
686 if (!pdd) {
687 pr_debug("failed to get kfd process device by id 0x%x\n", gpu_id);
688 return NULL;
689 }
690
691 return pdd->dev;
692 }
693
694 struct kfd_process_device *
svm_range_get_pdd_by_node(struct svm_range * prange,struct kfd_node * node)695 svm_range_get_pdd_by_node(struct svm_range *prange, struct kfd_node *node)
696 {
697 struct kfd_process *p;
698
699 p = container_of(prange->svms, struct kfd_process, svms);
700
701 return kfd_get_process_device_data(node, p);
702 }
703
svm_range_bo_validate(void * param,struct amdgpu_bo * bo)704 static int svm_range_bo_validate(void *param, struct amdgpu_bo *bo)
705 {
706 struct ttm_operation_ctx ctx = { false, false };
707
708 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM);
709
710 return ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
711 }
712
713 static int
svm_range_check_attr(struct kfd_process * p,uint32_t nattr,struct kfd_ioctl_svm_attribute * attrs)714 svm_range_check_attr(struct kfd_process *p,
715 uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs)
716 {
717 uint32_t i;
718
719 for (i = 0; i < nattr; i++) {
720 uint32_t val = attrs[i].value;
721 int gpuidx = MAX_GPU_INSTANCE;
722
723 switch (attrs[i].type) {
724 case KFD_IOCTL_SVM_ATTR_PREFERRED_LOC:
725 if (val != KFD_IOCTL_SVM_LOCATION_SYSMEM &&
726 val != KFD_IOCTL_SVM_LOCATION_UNDEFINED)
727 gpuidx = kfd_process_gpuidx_from_gpuid(p, val);
728 break;
729 case KFD_IOCTL_SVM_ATTR_PREFETCH_LOC:
730 if (val != KFD_IOCTL_SVM_LOCATION_SYSMEM)
731 gpuidx = kfd_process_gpuidx_from_gpuid(p, val);
732 break;
733 case KFD_IOCTL_SVM_ATTR_ACCESS:
734 case KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE:
735 case KFD_IOCTL_SVM_ATTR_NO_ACCESS:
736 gpuidx = kfd_process_gpuidx_from_gpuid(p, val);
737 break;
738 case KFD_IOCTL_SVM_ATTR_SET_FLAGS:
739 break;
740 case KFD_IOCTL_SVM_ATTR_CLR_FLAGS:
741 break;
742 case KFD_IOCTL_SVM_ATTR_GRANULARITY:
743 break;
744 default:
745 pr_debug("unknown attr type 0x%x\n", attrs[i].type);
746 return -EINVAL;
747 }
748
749 if (gpuidx < 0) {
750 pr_debug("no GPU 0x%x found\n", val);
751 return -EINVAL;
752 } else if (gpuidx < MAX_GPU_INSTANCE &&
753 !test_bit(gpuidx, p->svms.bitmap_supported)) {
754 pr_debug("GPU 0x%x not supported\n", val);
755 return -EINVAL;
756 }
757 }
758
759 return 0;
760 }
761
762 static void
svm_range_apply_attrs(struct kfd_process * p,struct svm_range * prange,uint32_t nattr,struct kfd_ioctl_svm_attribute * attrs,bool * update_mapping)763 svm_range_apply_attrs(struct kfd_process *p, struct svm_range *prange,
764 uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs,
765 bool *update_mapping)
766 {
767 uint32_t i;
768 int gpuidx;
769
770 for (i = 0; i < nattr; i++) {
771 switch (attrs[i].type) {
772 case KFD_IOCTL_SVM_ATTR_PREFERRED_LOC:
773 prange->preferred_loc = attrs[i].value;
774 break;
775 case KFD_IOCTL_SVM_ATTR_PREFETCH_LOC:
776 prange->prefetch_loc = attrs[i].value;
777 break;
778 case KFD_IOCTL_SVM_ATTR_ACCESS:
779 case KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE:
780 case KFD_IOCTL_SVM_ATTR_NO_ACCESS:
781 if (!p->xnack_enabled)
782 *update_mapping = true;
783
784 gpuidx = kfd_process_gpuidx_from_gpuid(p,
785 attrs[i].value);
786 if (attrs[i].type == KFD_IOCTL_SVM_ATTR_NO_ACCESS) {
787 bitmap_clear(prange->bitmap_access, gpuidx, 1);
788 bitmap_clear(prange->bitmap_aip, gpuidx, 1);
789 } else if (attrs[i].type == KFD_IOCTL_SVM_ATTR_ACCESS) {
790 bitmap_set(prange->bitmap_access, gpuidx, 1);
791 bitmap_clear(prange->bitmap_aip, gpuidx, 1);
792 } else {
793 bitmap_clear(prange->bitmap_access, gpuidx, 1);
794 bitmap_set(prange->bitmap_aip, gpuidx, 1);
795 }
796 break;
797 case KFD_IOCTL_SVM_ATTR_SET_FLAGS:
798 *update_mapping = true;
799 prange->flags |= attrs[i].value;
800 break;
801 case KFD_IOCTL_SVM_ATTR_CLR_FLAGS:
802 *update_mapping = true;
803 prange->flags &= ~attrs[i].value;
804 break;
805 case KFD_IOCTL_SVM_ATTR_GRANULARITY:
806 prange->granularity = min_t(uint32_t, attrs[i].value, 0x3F);
807 break;
808 default:
809 WARN_ONCE(1, "svm_range_check_attrs wasn't called?");
810 }
811 }
812 }
813
814 static bool
svm_range_is_same_attrs(struct kfd_process * p,struct svm_range * prange,uint32_t nattr,struct kfd_ioctl_svm_attribute * attrs)815 svm_range_is_same_attrs(struct kfd_process *p, struct svm_range *prange,
816 uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs)
817 {
818 uint32_t i;
819 int gpuidx;
820
821 for (i = 0; i < nattr; i++) {
822 switch (attrs[i].type) {
823 case KFD_IOCTL_SVM_ATTR_PREFERRED_LOC:
824 if (prange->preferred_loc != attrs[i].value)
825 return false;
826 break;
827 case KFD_IOCTL_SVM_ATTR_PREFETCH_LOC:
828 /* Prefetch should always trigger a migration even
829 * if the value of the attribute didn't change.
830 */
831 return false;
832 case KFD_IOCTL_SVM_ATTR_ACCESS:
833 case KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE:
834 case KFD_IOCTL_SVM_ATTR_NO_ACCESS:
835 gpuidx = kfd_process_gpuidx_from_gpuid(p,
836 attrs[i].value);
837 if (attrs[i].type == KFD_IOCTL_SVM_ATTR_NO_ACCESS) {
838 if (test_bit(gpuidx, prange->bitmap_access) ||
839 test_bit(gpuidx, prange->bitmap_aip))
840 return false;
841 } else if (attrs[i].type == KFD_IOCTL_SVM_ATTR_ACCESS) {
842 if (!test_bit(gpuidx, prange->bitmap_access))
843 return false;
844 } else {
845 if (!test_bit(gpuidx, prange->bitmap_aip))
846 return false;
847 }
848 break;
849 case KFD_IOCTL_SVM_ATTR_SET_FLAGS:
850 if ((prange->flags & attrs[i].value) != attrs[i].value)
851 return false;
852 break;
853 case KFD_IOCTL_SVM_ATTR_CLR_FLAGS:
854 if ((prange->flags & attrs[i].value) != 0)
855 return false;
856 break;
857 case KFD_IOCTL_SVM_ATTR_GRANULARITY:
858 if (prange->granularity != attrs[i].value)
859 return false;
860 break;
861 default:
862 WARN_ONCE(1, "svm_range_check_attrs wasn't called?");
863 }
864 }
865
866 return true;
867 }
868
869 /**
870 * svm_range_debug_dump - print all range information from svms
871 * @svms: svm range list header
872 *
873 * debug output svm range start, end, prefetch location from svms
874 * interval tree and link list
875 *
876 * Context: The caller must hold svms->lock
877 */
svm_range_debug_dump(struct svm_range_list * svms)878 static void svm_range_debug_dump(struct svm_range_list *svms)
879 {
880 struct interval_tree_node *node;
881 struct svm_range *prange;
882
883 pr_debug("dump svms 0x%p list\n", svms);
884 pr_debug("range\tstart\tpage\tend\t\tlocation\n");
885
886 list_for_each_entry(prange, &svms->list, list) {
887 pr_debug("0x%p 0x%lx\t0x%llx\t0x%llx\t0x%x\n",
888 prange, prange->start, prange->npages,
889 prange->start + prange->npages - 1,
890 prange->actual_loc);
891 }
892
893 pr_debug("dump svms 0x%p interval tree\n", svms);
894 pr_debug("range\tstart\tpage\tend\t\tlocation\n");
895 node = interval_tree_iter_first(&svms->objects, 0, ~0ULL);
896 while (node) {
897 prange = container_of(node, struct svm_range, it_node);
898 pr_debug("0x%p 0x%lx\t0x%llx\t0x%llx\t0x%x\n",
899 prange, prange->start, prange->npages,
900 prange->start + prange->npages - 1,
901 prange->actual_loc);
902 node = interval_tree_iter_next(node, 0, ~0ULL);
903 }
904 }
905
906 static void *
svm_range_copy_array(void * psrc,size_t size,uint64_t num_elements,uint64_t offset,uint64_t * vram_pages)907 svm_range_copy_array(void *psrc, size_t size, uint64_t num_elements,
908 uint64_t offset, uint64_t *vram_pages)
909 {
910 unsigned char *src = (unsigned char *)psrc + offset;
911 unsigned char *dst;
912 uint64_t i;
913
914 dst = kvmalloc_array(num_elements, size, GFP_KERNEL);
915 if (!dst)
916 return NULL;
917
918 if (!vram_pages) {
919 memcpy(dst, src, num_elements * size);
920 return (void *)dst;
921 }
922
923 *vram_pages = 0;
924 for (i = 0; i < num_elements; i++) {
925 dma_addr_t *temp;
926 temp = (dma_addr_t *)dst + i;
927 *temp = *((dma_addr_t *)src + i);
928 if (*temp&SVM_RANGE_VRAM_DOMAIN)
929 (*vram_pages)++;
930 }
931
932 return (void *)dst;
933 }
934
935 static int
svm_range_copy_dma_addrs(struct svm_range * dst,struct svm_range * src)936 svm_range_copy_dma_addrs(struct svm_range *dst, struct svm_range *src)
937 {
938 int i;
939
940 for (i = 0; i < MAX_GPU_INSTANCE; i++) {
941 if (!src->dma_addr[i])
942 continue;
943 dst->dma_addr[i] = svm_range_copy_array(src->dma_addr[i],
944 sizeof(*src->dma_addr[i]), src->npages, 0, NULL);
945 if (!dst->dma_addr[i])
946 return -ENOMEM;
947 }
948
949 return 0;
950 }
951
952 static int
svm_range_split_array(void * ppnew,void * ppold,size_t size,uint64_t old_start,uint64_t old_n,uint64_t new_start,uint64_t new_n,uint64_t * new_vram_pages)953 svm_range_split_array(void *ppnew, void *ppold, size_t size,
954 uint64_t old_start, uint64_t old_n,
955 uint64_t new_start, uint64_t new_n, uint64_t *new_vram_pages)
956 {
957 unsigned char *new, *old, *pold;
958 uint64_t d;
959
960 if (!ppold)
961 return 0;
962 pold = *(unsigned char **)ppold;
963 if (!pold)
964 return 0;
965
966 d = (new_start - old_start) * size;
967 /* get dma addr array for new range and calculte its vram page number */
968 new = svm_range_copy_array(pold, size, new_n, d, new_vram_pages);
969 if (!new)
970 return -ENOMEM;
971 d = (new_start == old_start) ? new_n * size : 0;
972 old = svm_range_copy_array(pold, size, old_n, d, NULL);
973 if (!old) {
974 kvfree(new);
975 return -ENOMEM;
976 }
977 kvfree(pold);
978 *(void **)ppold = old;
979 *(void **)ppnew = new;
980
981 return 0;
982 }
983
984 static int
svm_range_split_pages(struct svm_range * new,struct svm_range * old,uint64_t start,uint64_t last)985 svm_range_split_pages(struct svm_range *new, struct svm_range *old,
986 uint64_t start, uint64_t last)
987 {
988 uint64_t npages = last - start + 1;
989 int i, r;
990
991 for (i = 0; i < MAX_GPU_INSTANCE; i++) {
992 r = svm_range_split_array(&new->dma_addr[i], &old->dma_addr[i],
993 sizeof(*old->dma_addr[i]), old->start,
994 npages, new->start, new->npages,
995 old->actual_loc ? &new->vram_pages : NULL);
996 if (r)
997 return r;
998 }
999 if (old->actual_loc)
1000 old->vram_pages -= new->vram_pages;
1001
1002 return 0;
1003 }
1004
1005 static int
svm_range_split_nodes(struct svm_range * new,struct svm_range * old,uint64_t start,uint64_t last)1006 svm_range_split_nodes(struct svm_range *new, struct svm_range *old,
1007 uint64_t start, uint64_t last)
1008 {
1009 uint64_t npages = last - start + 1;
1010
1011 pr_debug("svms 0x%p new prange 0x%p start 0x%lx [0x%llx 0x%llx]\n",
1012 new->svms, new, new->start, start, last);
1013
1014 if (new->start == old->start) {
1015 new->offset = old->offset;
1016 old->offset += new->npages;
1017 } else {
1018 new->offset = old->offset + npages;
1019 }
1020
1021 new->svm_bo = svm_range_bo_ref(old->svm_bo);
1022 new->ttm_res = old->ttm_res;
1023
1024 spin_lock(&new->svm_bo->list_lock);
1025 list_add(&new->svm_bo_list, &new->svm_bo->range_list);
1026 spin_unlock(&new->svm_bo->list_lock);
1027
1028 return 0;
1029 }
1030
1031 /**
1032 * svm_range_split_adjust - split range and adjust
1033 *
1034 * @new: new range
1035 * @old: the old range
1036 * @start: the old range adjust to start address in pages
1037 * @last: the old range adjust to last address in pages
1038 *
1039 * Copy system memory dma_addr or vram ttm_res in old range to new
1040 * range from new_start up to size new->npages, the remaining old range is from
1041 * start to last
1042 *
1043 * Return:
1044 * 0 - OK, -ENOMEM - out of memory
1045 */
1046 static int
svm_range_split_adjust(struct svm_range * new,struct svm_range * old,uint64_t start,uint64_t last)1047 svm_range_split_adjust(struct svm_range *new, struct svm_range *old,
1048 uint64_t start, uint64_t last)
1049 {
1050 int r;
1051
1052 pr_debug("svms 0x%p new 0x%lx old [0x%lx 0x%lx] => [0x%llx 0x%llx]\n",
1053 new->svms, new->start, old->start, old->last, start, last);
1054
1055 if (new->start < old->start ||
1056 new->last > old->last) {
1057 WARN_ONCE(1, "invalid new range start or last\n");
1058 return -EINVAL;
1059 }
1060
1061 r = svm_range_split_pages(new, old, start, last);
1062 if (r)
1063 return r;
1064
1065 if (old->actual_loc && old->ttm_res) {
1066 r = svm_range_split_nodes(new, old, start, last);
1067 if (r)
1068 return r;
1069 }
1070
1071 old->npages = last - start + 1;
1072 old->start = start;
1073 old->last = last;
1074 new->flags = old->flags;
1075 new->preferred_loc = old->preferred_loc;
1076 new->prefetch_loc = old->prefetch_loc;
1077 new->actual_loc = old->actual_loc;
1078 new->granularity = old->granularity;
1079 new->mapped_to_gpu = old->mapped_to_gpu;
1080 bitmap_copy(new->bitmap_access, old->bitmap_access, MAX_GPU_INSTANCE);
1081 bitmap_copy(new->bitmap_aip, old->bitmap_aip, MAX_GPU_INSTANCE);
1082 atomic_set(&new->queue_refcount, atomic_read(&old->queue_refcount));
1083
1084 return 0;
1085 }
1086
1087 /**
1088 * svm_range_split - split a range in 2 ranges
1089 *
1090 * @prange: the svm range to split
1091 * @start: the remaining range start address in pages
1092 * @last: the remaining range last address in pages
1093 * @new: the result new range generated
1094 *
1095 * Two cases only:
1096 * case 1: if start == prange->start
1097 * prange ==> prange[start, last]
1098 * new range [last + 1, prange->last]
1099 *
1100 * case 2: if last == prange->last
1101 * prange ==> prange[start, last]
1102 * new range [prange->start, start - 1]
1103 *
1104 * Return:
1105 * 0 - OK, -ENOMEM - out of memory, -EINVAL - invalid start, last
1106 */
1107 static int
svm_range_split(struct svm_range * prange,uint64_t start,uint64_t last,struct svm_range ** new)1108 svm_range_split(struct svm_range *prange, uint64_t start, uint64_t last,
1109 struct svm_range **new)
1110 {
1111 uint64_t old_start = prange->start;
1112 uint64_t old_last = prange->last;
1113 struct svm_range_list *svms;
1114 int r = 0;
1115
1116 pr_debug("svms 0x%p [0x%llx 0x%llx] to [0x%llx 0x%llx]\n", prange->svms,
1117 old_start, old_last, start, last);
1118
1119 if (old_start != start && old_last != last)
1120 return -EINVAL;
1121 if (start < old_start || last > old_last)
1122 return -EINVAL;
1123
1124 svms = prange->svms;
1125 if (old_start == start)
1126 *new = svm_range_new(svms, last + 1, old_last, false);
1127 else
1128 *new = svm_range_new(svms, old_start, start - 1, false);
1129 if (!*new)
1130 return -ENOMEM;
1131
1132 r = svm_range_split_adjust(*new, prange, start, last);
1133 if (r) {
1134 pr_debug("failed %d split [0x%llx 0x%llx] to [0x%llx 0x%llx]\n",
1135 r, old_start, old_last, start, last);
1136 svm_range_free(*new, false);
1137 *new = NULL;
1138 }
1139
1140 return r;
1141 }
1142
1143 static int
svm_range_split_tail(struct svm_range * prange,uint64_t new_last,struct list_head * insert_list,struct list_head * remap_list)1144 svm_range_split_tail(struct svm_range *prange, uint64_t new_last,
1145 struct list_head *insert_list, struct list_head *remap_list)
1146 {
1147 unsigned long last_align_down = ALIGN_DOWN(prange->last, 512);
1148 unsigned long start_align = ALIGN(prange->start, 512);
1149 bool huge_page_mapping = last_align_down > start_align;
1150 struct svm_range *tail = NULL;
1151 int r;
1152
1153 r = svm_range_split(prange, prange->start, new_last, &tail);
1154
1155 if (r)
1156 return r;
1157
1158 list_add(&tail->list, insert_list);
1159
1160 if (huge_page_mapping && tail->start > start_align &&
1161 tail->start < last_align_down && (!IS_ALIGNED(tail->start, 512)))
1162 list_add(&tail->update_list, remap_list);
1163
1164 return 0;
1165 }
1166
1167 static int
svm_range_split_head(struct svm_range * prange,uint64_t new_start,struct list_head * insert_list,struct list_head * remap_list)1168 svm_range_split_head(struct svm_range *prange, uint64_t new_start,
1169 struct list_head *insert_list, struct list_head *remap_list)
1170 {
1171 unsigned long last_align_down = ALIGN_DOWN(prange->last, 512);
1172 unsigned long start_align = ALIGN(prange->start, 512);
1173 bool huge_page_mapping = last_align_down > start_align;
1174 struct svm_range *head = NULL;
1175 int r;
1176
1177 r = svm_range_split(prange, new_start, prange->last, &head);
1178
1179 if (r)
1180 return r;
1181
1182 list_add(&head->list, insert_list);
1183
1184 if (huge_page_mapping && head->last + 1 > start_align &&
1185 head->last + 1 < last_align_down && (!IS_ALIGNED(head->last, 512)))
1186 list_add(&head->update_list, remap_list);
1187
1188 return 0;
1189 }
1190
1191 static void
svm_range_add_child(struct svm_range * prange,struct svm_range * pchild,enum svm_work_list_ops op)1192 svm_range_add_child(struct svm_range *prange, struct svm_range *pchild, enum svm_work_list_ops op)
1193 {
1194 pr_debug("add child 0x%p [0x%lx 0x%lx] to prange 0x%p child list %d\n",
1195 pchild, pchild->start, pchild->last, prange, op);
1196
1197 pchild->work_item.mm = NULL;
1198 pchild->work_item.op = op;
1199 list_add_tail(&pchild->child_list, &prange->child_list);
1200 }
1201
1202 static bool
svm_nodes_in_same_hive(struct kfd_node * node_a,struct kfd_node * node_b)1203 svm_nodes_in_same_hive(struct kfd_node *node_a, struct kfd_node *node_b)
1204 {
1205 return (node_a->adev == node_b->adev ||
1206 amdgpu_xgmi_same_hive(node_a->adev, node_b->adev));
1207 }
1208
1209 static uint64_t
svm_range_get_pte_flags(struct kfd_node * node,struct amdgpu_vm * vm,struct svm_range * prange,int domain)1210 svm_range_get_pte_flags(struct kfd_node *node, struct amdgpu_vm *vm,
1211 struct svm_range *prange, int domain)
1212 {
1213 struct kfd_node *bo_node;
1214 uint32_t flags = prange->flags;
1215 uint32_t mapping_flags = 0;
1216 uint32_t gc_ip_version = KFD_GC_VERSION(node);
1217 uint64_t pte_flags;
1218 bool snoop = (domain != SVM_RANGE_VRAM_DOMAIN);
1219 bool coherent = flags & (KFD_IOCTL_SVM_FLAG_COHERENT | KFD_IOCTL_SVM_FLAG_EXT_COHERENT);
1220 bool ext_coherent = flags & KFD_IOCTL_SVM_FLAG_EXT_COHERENT;
1221 unsigned int mtype_local, mtype_remote;
1222 bool is_aid_a1, is_local;
1223
1224 if (domain == SVM_RANGE_VRAM_DOMAIN)
1225 bo_node = prange->svm_bo->node;
1226
1227 switch (gc_ip_version) {
1228 case IP_VERSION(9, 4, 1):
1229 if (domain == SVM_RANGE_VRAM_DOMAIN) {
1230 if (bo_node == node) {
1231 mapping_flags |= coherent ?
1232 AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW;
1233 } else {
1234 mapping_flags |= coherent ?
1235 AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
1236 if (svm_nodes_in_same_hive(node, bo_node))
1237 snoop = true;
1238 }
1239 } else {
1240 mapping_flags |= coherent ?
1241 AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
1242 }
1243 break;
1244 case IP_VERSION(9, 4, 2):
1245 if (domain == SVM_RANGE_VRAM_DOMAIN) {
1246 if (bo_node == node) {
1247 mapping_flags |= coherent ?
1248 AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW;
1249 if (node->adev->gmc.xgmi.connected_to_cpu)
1250 snoop = true;
1251 } else {
1252 mapping_flags |= coherent ?
1253 AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
1254 if (svm_nodes_in_same_hive(node, bo_node))
1255 snoop = true;
1256 }
1257 } else {
1258 mapping_flags |= coherent ?
1259 AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
1260 }
1261 break;
1262 case IP_VERSION(9, 4, 3):
1263 case IP_VERSION(9, 4, 4):
1264 case IP_VERSION(9, 5, 0):
1265 if (ext_coherent)
1266 mtype_local = AMDGPU_VM_MTYPE_CC;
1267 else
1268 mtype_local = amdgpu_mtype_local == 1 ? AMDGPU_VM_MTYPE_NC :
1269 amdgpu_mtype_local == 2 ? AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW;
1270 snoop = true;
1271 if (domain == SVM_RANGE_VRAM_DOMAIN) {
1272 /* local HBM region close to partition */
1273 if (bo_node->adev == node->adev &&
1274 (!bo_node->xcp || !node->xcp || bo_node->xcp->mem_id == node->xcp->mem_id))
1275 mapping_flags |= mtype_local;
1276 /* local HBM region far from partition or remote XGMI GPU
1277 * with regular system scope coherence
1278 */
1279 else if (svm_nodes_in_same_hive(bo_node, node) && !ext_coherent)
1280 mapping_flags |= AMDGPU_VM_MTYPE_NC;
1281 /* PCIe P2P on GPUs pre-9.5.0 */
1282 else if (gc_ip_version < IP_VERSION(9, 5, 0) &&
1283 !svm_nodes_in_same_hive(bo_node, node))
1284 mapping_flags |= AMDGPU_VM_MTYPE_UC;
1285 /* Other remote memory */
1286 else
1287 mapping_flags |= ext_coherent ? AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
1288 /* system memory accessed by the APU */
1289 } else if (node->adev->flags & AMD_IS_APU) {
1290 /* On NUMA systems, locality is determined per-page
1291 * in amdgpu_gmc_override_vm_pte_flags
1292 */
1293 if (num_possible_nodes() <= 1)
1294 mapping_flags |= mtype_local;
1295 else
1296 mapping_flags |= ext_coherent ? AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
1297 /* system memory accessed by the dGPU */
1298 } else {
1299 if (gc_ip_version < IP_VERSION(9, 5, 0) || ext_coherent)
1300 mapping_flags |= AMDGPU_VM_MTYPE_UC;
1301 else
1302 mapping_flags |= AMDGPU_VM_MTYPE_NC;
1303 }
1304 break;
1305 case IP_VERSION(12, 0, 0):
1306 case IP_VERSION(12, 0, 1):
1307 mapping_flags |= AMDGPU_VM_MTYPE_NC;
1308 break;
1309 case IP_VERSION(12, 1, 0):
1310 is_aid_a1 = (node->adev->rev_id & 0x10);
1311 is_local = (domain == SVM_RANGE_VRAM_DOMAIN) &&
1312 (bo_node->adev == node->adev);
1313
1314 mtype_local = amdgpu_mtype_local == 0 ? AMDGPU_VM_MTYPE_RW :
1315 amdgpu_mtype_local == 1 ? AMDGPU_VM_MTYPE_NC :
1316 is_aid_a1 ? AMDGPU_VM_MTYPE_RW : AMDGPU_VM_MTYPE_NC;
1317 mtype_remote = is_aid_a1 ? AMDGPU_VM_MTYPE_NC : AMDGPU_VM_MTYPE_UC;
1318 snoop = true;
1319
1320 if (is_local) /* local HBM */ {
1321 mapping_flags |= mtype_local;
1322 } else if (ext_coherent) {
1323 mapping_flags |= AMDGPU_VM_MTYPE_UC;
1324 } else {
1325 /* system memory or remote VRAM */
1326 mapping_flags |= mtype_remote;
1327 }
1328 break;
1329 default:
1330 mapping_flags |= coherent ?
1331 AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
1332 }
1333
1334 if (flags & KFD_IOCTL_SVM_FLAG_GPU_EXEC)
1335 mapping_flags |= AMDGPU_VM_PAGE_EXECUTABLE;
1336
1337 pte_flags = AMDGPU_PTE_VALID;
1338 pte_flags |= (domain == SVM_RANGE_VRAM_DOMAIN) ? 0 : AMDGPU_PTE_SYSTEM;
1339 pte_flags |= snoop ? AMDGPU_PTE_SNOOPED : 0;
1340 if (gc_ip_version >= IP_VERSION(12, 0, 0))
1341 pte_flags |= AMDGPU_PTE_IS_PTE;
1342
1343 amdgpu_gmc_get_vm_pte(node->adev, vm, NULL, mapping_flags, &pte_flags);
1344 pte_flags |= AMDGPU_PTE_READABLE;
1345 if (!(flags & KFD_IOCTL_SVM_FLAG_GPU_RO))
1346 pte_flags |= AMDGPU_PTE_WRITEABLE;
1347
1348 if ((gc_ip_version == IP_VERSION(12, 1, 0)) &&
1349 node->adev->have_atomics_support)
1350 pte_flags |= AMDGPU_PTE_BUS_ATOMICS;
1351
1352 return pte_flags;
1353 }
1354
1355 static int
svm_range_unmap_from_gpu(struct amdgpu_device * adev,struct amdgpu_vm * vm,uint64_t start,uint64_t last,struct dma_fence ** fence)1356 svm_range_unmap_from_gpu(struct amdgpu_device *adev, struct amdgpu_vm *vm,
1357 uint64_t start, uint64_t last,
1358 struct dma_fence **fence)
1359 {
1360 uint64_t init_pte_value = adev->gmc.init_pte_flags;
1361 uint64_t gpu_start, gpu_end;
1362
1363 /* Convert CPU page range to GPU page range */
1364 gpu_start = start * AMDGPU_GPU_PAGES_IN_CPU_PAGE;
1365 gpu_end = (last + 1) * AMDGPU_GPU_PAGES_IN_CPU_PAGE - 1;
1366
1367 pr_debug("CPU[0x%llx 0x%llx] -> GPU[0x%llx 0x%llx]\n", start, last,
1368 gpu_start, gpu_end);
1369
1370 if (!amdgpu_vm_ready(vm)) {
1371 pr_debug("VM not ready, canceling unmap\n");
1372 return -EINVAL;
1373 }
1374
1375 return amdgpu_vm_update_range(adev, vm, false, true, true, false, NULL, gpu_start,
1376 gpu_end, init_pte_value, 0, 0, NULL, NULL,
1377 fence);
1378 }
1379
1380 static int
svm_range_unmap_from_gpus(struct svm_range * prange,unsigned long start,unsigned long last,uint32_t trigger)1381 svm_range_unmap_from_gpus(struct svm_range *prange, unsigned long start,
1382 unsigned long last, uint32_t trigger)
1383 {
1384 struct kfd_process_device *pdd;
1385 struct dma_fence *fence = NULL;
1386 struct kfd_process *p;
1387 uint32_t gpuidx;
1388 int r = 0;
1389
1390 if (!prange->mapped_to_gpu) {
1391 pr_debug("prange 0x%p [0x%lx 0x%lx] not mapped to GPU\n",
1392 prange, prange->start, prange->last);
1393 return 0;
1394 }
1395
1396 if (prange->start == start && prange->last == last) {
1397 pr_debug("unmap svms 0x%p prange 0x%p\n", prange->svms, prange);
1398 prange->mapped_to_gpu = false;
1399 }
1400
1401 p = container_of(prange->svms, struct kfd_process, svms);
1402
1403 for_each_or_bit(gpuidx, prange->bitmap_access, prange->bitmap_aip, MAX_GPU_INSTANCE) {
1404 pr_debug("unmap from gpu idx 0x%x\n", gpuidx);
1405 pdd = kfd_process_device_from_gpuidx(p, gpuidx);
1406 if (!pdd) {
1407 pr_debug("failed to find device idx %d\n", gpuidx);
1408 return -EINVAL;
1409 }
1410
1411 kfd_smi_event_unmap_from_gpu(pdd->dev, p->lead_thread->pid,
1412 start, last, trigger);
1413
1414 r = svm_range_unmap_from_gpu(pdd->dev->adev,
1415 drm_priv_to_vm(pdd->drm_priv),
1416 start, last, &fence);
1417 if (r)
1418 break;
1419
1420 if (fence) {
1421 r = dma_fence_wait(fence, false);
1422 dma_fence_put(fence);
1423 fence = NULL;
1424 if (r)
1425 break;
1426 }
1427 kfd_flush_tlb(pdd);
1428 }
1429
1430 return r;
1431 }
1432
1433 static int
svm_range_map_to_gpu(struct kfd_process_device * pdd,struct svm_range * prange,unsigned long offset,unsigned long npages,bool readonly,dma_addr_t * dma_addr,struct amdgpu_device * bo_adev,struct dma_fence ** fence,bool flush_tlb)1434 svm_range_map_to_gpu(struct kfd_process_device *pdd, struct svm_range *prange,
1435 unsigned long offset, unsigned long npages, bool readonly,
1436 dma_addr_t *dma_addr, struct amdgpu_device *bo_adev,
1437 struct dma_fence **fence, bool flush_tlb)
1438 {
1439 struct amdgpu_device *adev = pdd->dev->adev;
1440 struct amdgpu_vm *vm = drm_priv_to_vm(pdd->drm_priv);
1441 uint64_t pte_flags;
1442 unsigned long last_start;
1443 int last_domain;
1444 int r = 0;
1445 int64_t i, j;
1446
1447 last_start = prange->start + offset;
1448
1449 pr_debug("svms 0x%p [0x%lx 0x%lx] readonly %d\n", prange->svms,
1450 last_start, last_start + npages - 1, readonly);
1451
1452 if (!amdgpu_vm_ready(vm)) {
1453 pr_debug("VM not ready, canceling map\n");
1454 return -EINVAL;
1455 }
1456
1457 for (i = offset; i < offset + npages; i++) {
1458 uint64_t gpu_start;
1459 uint64_t gpu_end;
1460
1461 last_domain = dma_addr[i] & SVM_RANGE_VRAM_DOMAIN;
1462 dma_addr[i] &= ~SVM_RANGE_VRAM_DOMAIN;
1463
1464 /* Collect all pages in the same address range and memory domain
1465 * that can be mapped with a single call to update mapping.
1466 */
1467 if (i < offset + npages - 1 &&
1468 last_domain == (dma_addr[i + 1] & SVM_RANGE_VRAM_DOMAIN))
1469 continue;
1470
1471 pr_debug("Mapping range [0x%lx 0x%llx] on domain: %s\n",
1472 last_start, prange->start + i, last_domain ? "GPU" : "CPU");
1473
1474 pte_flags = svm_range_get_pte_flags(pdd->dev, vm, prange, last_domain);
1475 if (readonly)
1476 pte_flags &= ~AMDGPU_PTE_WRITEABLE;
1477
1478
1479 /* For dGPU mode, we use same vm_manager to allocate VRAM for
1480 * different memory partition based on fpfn/lpfn, we should use
1481 * same vm_manager.vram_base_offset regardless memory partition.
1482 */
1483 gpu_start = last_start * AMDGPU_GPU_PAGES_IN_CPU_PAGE;
1484 gpu_end = (prange->start + i + 1) * AMDGPU_GPU_PAGES_IN_CPU_PAGE - 1;
1485
1486 pr_debug("svms 0x%p map CPU[0x%lx 0x%llx] GPU[0x%llx 0x%llx] vram %d PTE 0x%llx\n",
1487 prange->svms, last_start, prange->start + i,
1488 gpu_start, gpu_end,
1489 (last_domain == SVM_RANGE_VRAM_DOMAIN) ? 1 : 0,
1490 pte_flags);
1491
1492 r = amdgpu_vm_update_range(adev, vm, false, false, flush_tlb, true,
1493 NULL, gpu_start, gpu_end,
1494 pte_flags,
1495 (last_start - prange->start) << PAGE_SHIFT,
1496 bo_adev ? bo_adev->vm_manager.vram_base_offset : 0,
1497 NULL, dma_addr, &vm->last_update);
1498
1499 for (j = last_start - prange->start; j <= i; j++)
1500 dma_addr[j] |= last_domain;
1501
1502 if (r) {
1503 pr_debug("failed %d to map to gpu 0x%lx\n", r, prange->start);
1504 goto out;
1505 }
1506 last_start = prange->start + i + 1;
1507 }
1508
1509 r = amdgpu_vm_update_pdes(adev, vm, false);
1510 if (r) {
1511 pr_debug("failed %d to update directories 0x%lx\n", r,
1512 prange->start);
1513 goto out;
1514 }
1515
1516 if (fence)
1517 *fence = dma_fence_get(vm->last_update);
1518
1519 out:
1520 return r;
1521 }
1522
1523 static int
svm_range_map_to_gpus(struct svm_range * prange,unsigned long offset,unsigned long npages,bool readonly,unsigned long * bitmap,bool wait,bool flush_tlb)1524 svm_range_map_to_gpus(struct svm_range *prange, unsigned long offset,
1525 unsigned long npages, bool readonly,
1526 unsigned long *bitmap, bool wait, bool flush_tlb)
1527 {
1528 struct kfd_process_device *pdd;
1529 struct amdgpu_device *bo_adev = NULL;
1530 struct kfd_process *p;
1531 struct dma_fence *fence = NULL;
1532 uint32_t gpuidx;
1533 int r = 0;
1534
1535 if (prange->svm_bo && prange->ttm_res)
1536 bo_adev = prange->svm_bo->node->adev;
1537
1538 p = container_of(prange->svms, struct kfd_process, svms);
1539 for_each_set_bit(gpuidx, bitmap, MAX_GPU_INSTANCE) {
1540 pr_debug("mapping to gpu idx 0x%x\n", gpuidx);
1541 pdd = kfd_process_device_from_gpuidx(p, gpuidx);
1542 if (!pdd) {
1543 pr_debug("failed to find device idx %d\n", gpuidx);
1544 return -EINVAL;
1545 }
1546
1547 pdd = kfd_bind_process_to_device(pdd->dev, p);
1548 if (IS_ERR(pdd))
1549 return -EINVAL;
1550
1551 if (bo_adev && pdd->dev->adev != bo_adev &&
1552 !amdgpu_xgmi_same_hive(pdd->dev->adev, bo_adev)) {
1553 pr_debug("cannot map to device idx %d\n", gpuidx);
1554 continue;
1555 }
1556
1557 r = svm_range_map_to_gpu(pdd, prange, offset, npages, readonly,
1558 prange->dma_addr[gpuidx],
1559 bo_adev, wait ? &fence : NULL,
1560 flush_tlb);
1561 if (r)
1562 break;
1563
1564 if (fence) {
1565 r = dma_fence_wait(fence, false);
1566 dma_fence_put(fence);
1567 fence = NULL;
1568 if (r) {
1569 pr_debug("failed %d to dma fence wait\n", r);
1570 break;
1571 }
1572 }
1573
1574 kfd_flush_tlb(pdd);
1575 }
1576
1577 return r;
1578 }
1579
1580 struct svm_validate_context {
1581 struct kfd_process *process;
1582 struct svm_range *prange;
1583 bool intr;
1584 DECLARE_BITMAP(bitmap, MAX_GPU_INSTANCE);
1585 struct drm_exec exec;
1586 };
1587
svm_range_reserve_bos(struct svm_validate_context * ctx,bool intr)1588 static int svm_range_reserve_bos(struct svm_validate_context *ctx, bool intr)
1589 {
1590 struct kfd_process_device *pdd;
1591 struct amdgpu_vm *vm;
1592 uint32_t gpuidx;
1593 int r;
1594
1595 drm_exec_init(&ctx->exec, intr ? DRM_EXEC_INTERRUPTIBLE_WAIT: 0, 0);
1596 drm_exec_until_all_locked(&ctx->exec) {
1597 for_each_set_bit(gpuidx, ctx->bitmap, MAX_GPU_INSTANCE) {
1598 pdd = kfd_process_device_from_gpuidx(ctx->process, gpuidx);
1599 if (!pdd) {
1600 pr_debug("failed to find device idx %d\n", gpuidx);
1601 r = -EINVAL;
1602 goto unreserve_out;
1603 }
1604 vm = drm_priv_to_vm(pdd->drm_priv);
1605
1606 r = amdgpu_vm_lock_pd(vm, &ctx->exec, 2);
1607 drm_exec_retry_on_contention(&ctx->exec);
1608 if (unlikely(r)) {
1609 pr_debug("failed %d to reserve bo\n", r);
1610 goto unreserve_out;
1611 }
1612 }
1613 }
1614
1615 for_each_set_bit(gpuidx, ctx->bitmap, MAX_GPU_INSTANCE) {
1616 pdd = kfd_process_device_from_gpuidx(ctx->process, gpuidx);
1617 if (!pdd) {
1618 pr_debug("failed to find device idx %d\n", gpuidx);
1619 r = -EINVAL;
1620 goto unreserve_out;
1621 }
1622
1623 r = amdgpu_vm_validate(pdd->dev->adev,
1624 drm_priv_to_vm(pdd->drm_priv), NULL,
1625 svm_range_bo_validate, NULL);
1626 if (r) {
1627 pr_debug("failed %d validate pt bos\n", r);
1628 goto unreserve_out;
1629 }
1630 }
1631
1632 return 0;
1633
1634 unreserve_out:
1635 drm_exec_fini(&ctx->exec);
1636 return r;
1637 }
1638
svm_range_unreserve_bos(struct svm_validate_context * ctx)1639 static void svm_range_unreserve_bos(struct svm_validate_context *ctx)
1640 {
1641 drm_exec_fini(&ctx->exec);
1642 }
1643
kfd_svm_page_owner(struct kfd_process * p,int32_t gpuidx)1644 static void *kfd_svm_page_owner(struct kfd_process *p, int32_t gpuidx)
1645 {
1646 struct kfd_process_device *pdd;
1647
1648 pdd = kfd_process_device_from_gpuidx(p, gpuidx);
1649 if (!pdd)
1650 return NULL;
1651
1652 return SVM_ADEV_PGMAP_OWNER(pdd->dev->adev);
1653 }
1654
1655 /*
1656 * Validation+GPU mapping with concurrent invalidation (MMU notifiers)
1657 *
1658 * To prevent concurrent destruction or change of range attributes, the
1659 * svm_read_lock must be held. The caller must not hold the svm_write_lock
1660 * because that would block concurrent evictions and lead to deadlocks. To
1661 * serialize concurrent migrations or validations of the same range, the
1662 * prange->migrate_mutex must be held.
1663 *
1664 * For VRAM ranges, the SVM BO must be allocated and valid (protected by its
1665 * eviction fence.
1666 *
1667 * The following sequence ensures race-free validation and GPU mapping:
1668 *
1669 * 1. Reserve page table (and SVM BO if range is in VRAM)
1670 * 2. hmm_range_fault to get page addresses (if system memory)
1671 * 3. DMA-map pages (if system memory)
1672 * 4-a. Take notifier lock
1673 * 4-b. Check that pages still valid (mmu_interval_read_retry)
1674 * 4-c. Check that the range was not split or otherwise invalidated
1675 * 4-d. Update GPU page table
1676 * 4.e. Release notifier lock
1677 * 5. Release page table (and SVM BO) reservation
1678 */
svm_range_validate_and_map(struct mm_struct * mm,unsigned long map_start,unsigned long map_last,struct svm_range * prange,int32_t gpuidx,bool intr,bool wait,bool flush_tlb)1679 static int svm_range_validate_and_map(struct mm_struct *mm,
1680 unsigned long map_start, unsigned long map_last,
1681 struct svm_range *prange, int32_t gpuidx,
1682 bool intr, bool wait, bool flush_tlb)
1683 {
1684 struct svm_validate_context *ctx;
1685 unsigned long start, end, addr;
1686 struct kfd_process *p;
1687 void *owner;
1688 int32_t idx;
1689 int r = 0;
1690
1691 ctx = kzalloc_obj(struct svm_validate_context);
1692 if (!ctx)
1693 return -ENOMEM;
1694 ctx->process = container_of(prange->svms, struct kfd_process, svms);
1695 ctx->prange = prange;
1696 ctx->intr = intr;
1697
1698 if (gpuidx < MAX_GPU_INSTANCE) {
1699 bitmap_zero(ctx->bitmap, MAX_GPU_INSTANCE);
1700 bitmap_set(ctx->bitmap, gpuidx, 1);
1701 } else if (ctx->process->xnack_enabled) {
1702 bitmap_copy(ctx->bitmap, prange->bitmap_aip, MAX_GPU_INSTANCE);
1703
1704 /* If prefetch range to GPU, or GPU retry fault migrate range to
1705 * GPU, which has ACCESS attribute to the range, create mapping
1706 * on that GPU.
1707 */
1708 if (prange->actual_loc) {
1709 gpuidx = kfd_process_gpuidx_from_gpuid(ctx->process,
1710 prange->actual_loc);
1711 if (gpuidx < 0) {
1712 WARN_ONCE(1, "failed get device by id 0x%x\n",
1713 prange->actual_loc);
1714 r = -EINVAL;
1715 goto free_ctx;
1716 }
1717 if (test_bit(gpuidx, prange->bitmap_access))
1718 bitmap_set(ctx->bitmap, gpuidx, 1);
1719 }
1720
1721 /*
1722 * If prange is already mapped or with always mapped flag,
1723 * update mapping on GPUs with ACCESS attribute
1724 */
1725 if (bitmap_empty(ctx->bitmap, MAX_GPU_INSTANCE)) {
1726 if (prange->mapped_to_gpu ||
1727 prange->flags & KFD_IOCTL_SVM_FLAG_GPU_ALWAYS_MAPPED)
1728 bitmap_copy(ctx->bitmap, prange->bitmap_access, MAX_GPU_INSTANCE);
1729 }
1730 } else {
1731 bitmap_or(ctx->bitmap, prange->bitmap_access,
1732 prange->bitmap_aip, MAX_GPU_INSTANCE);
1733 }
1734
1735 if (bitmap_empty(ctx->bitmap, MAX_GPU_INSTANCE)) {
1736 r = 0;
1737 goto free_ctx;
1738 }
1739
1740 if (prange->actual_loc && !prange->ttm_res) {
1741 /* This should never happen. actual_loc gets set by
1742 * svm_migrate_ram_to_vram after allocating a BO.
1743 */
1744 WARN_ONCE(1, "VRAM BO missing during validation\n");
1745 r = -EINVAL;
1746 goto free_ctx;
1747 }
1748
1749 r = svm_range_reserve_bos(ctx, intr);
1750 if (r)
1751 goto free_ctx;
1752
1753 p = container_of(prange->svms, struct kfd_process, svms);
1754 owner = kfd_svm_page_owner(p, find_first_bit(ctx->bitmap,
1755 MAX_GPU_INSTANCE));
1756 for_each_set_bit(idx, ctx->bitmap, MAX_GPU_INSTANCE) {
1757 if (kfd_svm_page_owner(p, idx) != owner) {
1758 owner = NULL;
1759 break;
1760 }
1761 }
1762
1763 start = map_start << PAGE_SHIFT;
1764 end = (map_last + 1) << PAGE_SHIFT;
1765 for (addr = start; !r && addr < end; ) {
1766 struct amdgpu_hmm_range *range = NULL;
1767 unsigned long map_start_vma;
1768 unsigned long map_last_vma;
1769 struct vm_area_struct *vma;
1770 unsigned long next = 0;
1771 unsigned long offset;
1772 unsigned long npages;
1773 bool readonly;
1774
1775 vma = vma_lookup(mm, addr);
1776 if (vma) {
1777 readonly = !(vma->vm_flags & VM_WRITE);
1778
1779 next = min(vma->vm_end, end);
1780 npages = (next - addr) >> PAGE_SHIFT;
1781 /* HMM requires at least READ permissions. If provided with PROT_NONE,
1782 * unmap the memory. If it's not already mapped, this is a no-op
1783 * If PROT_WRITE is provided without READ, warn first then unmap
1784 */
1785 if (!(vma->vm_flags & VM_READ)) {
1786 unsigned long e, s;
1787
1788 svm_range_lock(prange);
1789 if (vma->vm_flags & VM_WRITE)
1790 pr_debug("VM_WRITE without VM_READ is not supported");
1791 s = max(start, prange->start);
1792 e = min(end, prange->last);
1793 if (e >= s)
1794 r = svm_range_unmap_from_gpus(prange, s, e,
1795 KFD_SVM_UNMAP_TRIGGER_UNMAP_FROM_CPU);
1796 svm_range_unlock(prange);
1797 /* If unmap returns non-zero, we'll bail on the next for loop
1798 * iteration, so just leave r and continue
1799 */
1800 addr = next;
1801 continue;
1802 }
1803
1804 WRITE_ONCE(p->svms.faulting_task, current);
1805 range = amdgpu_hmm_range_alloc(NULL);
1806 if (likely(range))
1807 r = amdgpu_hmm_range_get_pages(&prange->notifier, addr, npages,
1808 readonly, owner, range);
1809 else
1810 r = -ENOMEM;
1811 WRITE_ONCE(p->svms.faulting_task, NULL);
1812 if (r)
1813 pr_debug("failed %d to get svm range pages\n", r);
1814 } else {
1815 r = -EFAULT;
1816 }
1817
1818 if (!r) {
1819 offset = (addr >> PAGE_SHIFT) - prange->start;
1820 r = svm_range_dma_map(prange, ctx->bitmap, offset, npages,
1821 range->hmm_range.hmm_pfns);
1822 if (r)
1823 pr_debug("failed %d to dma map range\n", r);
1824 }
1825
1826 svm_range_lock(prange);
1827
1828 /* Free backing memory of hmm_range if it was initialized
1829 * Override return value to TRY AGAIN only if prior returns
1830 * were successful
1831 */
1832 if (range && !amdgpu_hmm_range_valid(range) && !r) {
1833 pr_debug("hmm update the range, need validate again\n");
1834 r = -EAGAIN;
1835 }
1836
1837 /* Free the hmm range */
1838 amdgpu_hmm_range_free(range);
1839
1840 if (!r && !list_empty(&prange->child_list)) {
1841 pr_debug("range split by unmap in parallel, validate again\n");
1842 r = -EAGAIN;
1843 }
1844
1845 if (!r) {
1846 map_start_vma = max(map_start, prange->start + offset);
1847 map_last_vma = min(map_last, prange->start + offset + npages - 1);
1848 if (map_start_vma <= map_last_vma) {
1849 offset = map_start_vma - prange->start;
1850 npages = map_last_vma - map_start_vma + 1;
1851 r = svm_range_map_to_gpus(prange, offset, npages, readonly,
1852 ctx->bitmap, wait, flush_tlb);
1853 }
1854 }
1855
1856 if (!r && next == end)
1857 prange->mapped_to_gpu = true;
1858
1859 svm_range_unlock(prange);
1860
1861 addr = next;
1862 }
1863
1864 svm_range_unreserve_bos(ctx);
1865 if (!r)
1866 prange->validate_timestamp = ktime_get_boottime();
1867
1868 free_ctx:
1869 kfree(ctx);
1870
1871 return r;
1872 }
1873
1874 /**
1875 * svm_range_list_lock_and_flush_work - flush pending deferred work
1876 *
1877 * @svms: the svm range list
1878 * @mm: the mm structure
1879 *
1880 * Context: Returns with mmap write lock held, pending deferred work flushed
1881 *
1882 */
1883 void
svm_range_list_lock_and_flush_work(struct svm_range_list * svms,struct mm_struct * mm)1884 svm_range_list_lock_and_flush_work(struct svm_range_list *svms,
1885 struct mm_struct *mm)
1886 {
1887 retry_flush_work:
1888 flush_work(&svms->deferred_list_work);
1889 mmap_write_lock(mm);
1890
1891 if (list_empty(&svms->deferred_range_list))
1892 return;
1893 mmap_write_unlock(mm);
1894 pr_debug("retry flush\n");
1895 goto retry_flush_work;
1896 }
1897
svm_range_restore_work(struct work_struct * work)1898 static void svm_range_restore_work(struct work_struct *work)
1899 {
1900 struct delayed_work *dwork = to_delayed_work(work);
1901 struct amdkfd_process_info *process_info;
1902 struct svm_range_list *svms;
1903 struct svm_range *prange;
1904 struct kfd_process *p;
1905 struct mm_struct *mm;
1906 int evicted_ranges;
1907 int invalid;
1908 int r;
1909
1910 svms = container_of(dwork, struct svm_range_list, restore_work);
1911 evicted_ranges = atomic_read(&svms->evicted_ranges);
1912 if (!evicted_ranges)
1913 return;
1914
1915 pr_debug("restore svm ranges\n");
1916
1917 p = container_of(svms, struct kfd_process, svms);
1918 process_info = p->kgd_process_info;
1919
1920 /* Keep mm reference when svm_range_validate_and_map ranges */
1921 mm = get_task_mm(p->lead_thread);
1922 if (!mm) {
1923 pr_debug("svms 0x%p process mm gone\n", svms);
1924 return;
1925 }
1926
1927 mutex_lock(&process_info->lock);
1928 svm_range_list_lock_and_flush_work(svms, mm);
1929 mutex_lock(&svms->lock);
1930
1931 evicted_ranges = atomic_read(&svms->evicted_ranges);
1932
1933 list_for_each_entry(prange, &svms->list, list) {
1934 invalid = atomic_read(&prange->invalid);
1935 if (!invalid)
1936 continue;
1937
1938 pr_debug("restoring svms 0x%p prange 0x%p [0x%lx %lx] inv %d\n",
1939 prange->svms, prange, prange->start, prange->last,
1940 invalid);
1941
1942 /*
1943 * If range is migrating, wait for migration is done.
1944 */
1945 mutex_lock(&prange->migrate_mutex);
1946
1947 r = svm_range_validate_and_map(mm, prange->start, prange->last, prange,
1948 MAX_GPU_INSTANCE, false, true, false);
1949 if (r)
1950 pr_debug("failed %d to map 0x%lx to gpus\n", r,
1951 prange->start);
1952
1953 mutex_unlock(&prange->migrate_mutex);
1954 if (r)
1955 goto out_reschedule;
1956
1957 if (atomic_cmpxchg(&prange->invalid, invalid, 0) != invalid)
1958 goto out_reschedule;
1959 }
1960
1961 if (atomic_cmpxchg(&svms->evicted_ranges, evicted_ranges, 0) !=
1962 evicted_ranges)
1963 goto out_reschedule;
1964
1965 evicted_ranges = 0;
1966
1967 r = kgd2kfd_resume_mm(mm);
1968 if (r) {
1969 /* No recovery from this failure. Probably the CP is
1970 * hanging. No point trying again.
1971 */
1972 pr_debug("failed %d to resume KFD\n", r);
1973 }
1974
1975 pr_debug("restore svm ranges successfully\n");
1976
1977 out_reschedule:
1978 mutex_unlock(&svms->lock);
1979 mmap_write_unlock(mm);
1980 mutex_unlock(&process_info->lock);
1981
1982 /* If validation failed, reschedule another attempt */
1983 if (evicted_ranges) {
1984 pr_debug("reschedule to restore svm range\n");
1985 queue_delayed_work(system_freezable_wq, &svms->restore_work,
1986 msecs_to_jiffies(AMDGPU_SVM_RANGE_RESTORE_DELAY_MS));
1987
1988 kfd_smi_event_queue_restore_rescheduled(mm);
1989 }
1990 mmput(mm);
1991 }
1992
1993 /**
1994 * svm_range_evict - evict svm range
1995 * @prange: svm range structure
1996 * @mm: current process mm_struct
1997 * @start: starting process queue number
1998 * @last: last process queue number
1999 * @event: mmu notifier event when range is evicted or migrated
2000 *
2001 * Stop all queues of the process to ensure GPU doesn't access the memory, then
2002 * return to let CPU evict the buffer and proceed CPU pagetable update.
2003 *
2004 * Don't need use lock to sync cpu pagetable invalidation with GPU execution.
2005 * If invalidation happens while restore work is running, restore work will
2006 * restart to ensure to get the latest CPU pages mapping to GPU, then start
2007 * the queues.
2008 */
2009 static int
svm_range_evict(struct svm_range * prange,struct mm_struct * mm,unsigned long start,unsigned long last,enum mmu_notifier_event event)2010 svm_range_evict(struct svm_range *prange, struct mm_struct *mm,
2011 unsigned long start, unsigned long last,
2012 enum mmu_notifier_event event)
2013 {
2014 struct svm_range_list *svms = prange->svms;
2015 struct svm_range *pchild;
2016 struct kfd_process *p;
2017 int r = 0;
2018
2019 p = container_of(svms, struct kfd_process, svms);
2020
2021 pr_debug("invalidate svms 0x%p prange [0x%lx 0x%lx] [0x%lx 0x%lx]\n",
2022 svms, prange->start, prange->last, start, last);
2023
2024 if (!p->xnack_enabled ||
2025 (prange->flags & KFD_IOCTL_SVM_FLAG_GPU_ALWAYS_MAPPED)) {
2026 int evicted_ranges;
2027 bool mapped = prange->mapped_to_gpu;
2028
2029 list_for_each_entry(pchild, &prange->child_list, child_list) {
2030 if (!pchild->mapped_to_gpu)
2031 continue;
2032 mapped = true;
2033 mutex_lock_nested(&pchild->lock, 1);
2034 if (pchild->start <= last && pchild->last >= start) {
2035 pr_debug("increment pchild invalid [0x%lx 0x%lx]\n",
2036 pchild->start, pchild->last);
2037 atomic_inc(&pchild->invalid);
2038 }
2039 mutex_unlock(&pchild->lock);
2040 }
2041
2042 if (!mapped)
2043 return r;
2044
2045 if (prange->start <= last && prange->last >= start)
2046 atomic_inc(&prange->invalid);
2047
2048 evicted_ranges = atomic_inc_return(&svms->evicted_ranges);
2049 if (evicted_ranges != 1)
2050 return r;
2051
2052 pr_debug("evicting svms 0x%p range [0x%lx 0x%lx]\n",
2053 prange->svms, prange->start, prange->last);
2054
2055 /* First eviction, stop the queues */
2056 r = kgd2kfd_quiesce_mm(mm, KFD_QUEUE_EVICTION_TRIGGER_SVM);
2057 if (r)
2058 pr_debug("failed to quiesce KFD\n");
2059
2060 pr_debug("schedule to restore svm %p ranges\n", svms);
2061 queue_delayed_work(system_freezable_wq, &svms->restore_work,
2062 msecs_to_jiffies(AMDGPU_SVM_RANGE_RESTORE_DELAY_MS));
2063 } else {
2064 unsigned long s, l;
2065 uint32_t trigger;
2066
2067 if (event == MMU_NOTIFY_MIGRATE)
2068 trigger = KFD_SVM_UNMAP_TRIGGER_MMU_NOTIFY_MIGRATE;
2069 else
2070 trigger = KFD_SVM_UNMAP_TRIGGER_MMU_NOTIFY;
2071
2072 pr_debug("invalidate unmap svms 0x%p [0x%lx 0x%lx] from GPUs\n",
2073 prange->svms, start, last);
2074 list_for_each_entry(pchild, &prange->child_list, child_list) {
2075 mutex_lock_nested(&pchild->lock, 1);
2076 s = max(start, pchild->start);
2077 l = min(last, pchild->last);
2078 if (l >= s)
2079 svm_range_unmap_from_gpus(pchild, s, l, trigger);
2080 mutex_unlock(&pchild->lock);
2081 }
2082 s = max(start, prange->start);
2083 l = min(last, prange->last);
2084 if (l >= s)
2085 svm_range_unmap_from_gpus(prange, s, l, trigger);
2086 }
2087
2088 return r;
2089 }
2090
svm_range_clone(struct svm_range * old)2091 static struct svm_range *svm_range_clone(struct svm_range *old)
2092 {
2093 struct svm_range *new;
2094
2095 new = svm_range_new(old->svms, old->start, old->last, false);
2096 if (!new)
2097 return NULL;
2098 if (svm_range_copy_dma_addrs(new, old)) {
2099 svm_range_free(new, false);
2100 return NULL;
2101 }
2102 if (old->svm_bo) {
2103 new->ttm_res = old->ttm_res;
2104 new->offset = old->offset;
2105 new->svm_bo = svm_range_bo_ref(old->svm_bo);
2106 spin_lock(&new->svm_bo->list_lock);
2107 list_add(&new->svm_bo_list, &new->svm_bo->range_list);
2108 spin_unlock(&new->svm_bo->list_lock);
2109 }
2110 new->flags = old->flags;
2111 new->preferred_loc = old->preferred_loc;
2112 new->prefetch_loc = old->prefetch_loc;
2113 new->actual_loc = old->actual_loc;
2114 new->granularity = old->granularity;
2115 new->mapped_to_gpu = old->mapped_to_gpu;
2116 new->vram_pages = old->vram_pages;
2117 bitmap_copy(new->bitmap_access, old->bitmap_access, MAX_GPU_INSTANCE);
2118 bitmap_copy(new->bitmap_aip, old->bitmap_aip, MAX_GPU_INSTANCE);
2119 atomic_set(&new->queue_refcount, atomic_read(&old->queue_refcount));
2120
2121 return new;
2122 }
2123
svm_range_set_max_pages(struct amdgpu_device * adev)2124 void svm_range_set_max_pages(struct amdgpu_device *adev)
2125 {
2126 uint64_t max_pages;
2127 uint64_t pages, _pages;
2128 uint64_t min_pages = 0;
2129 int i, id;
2130
2131 for (i = 0; i < adev->kfd.dev->num_nodes; i++) {
2132 if (adev->kfd.dev->nodes[i]->xcp)
2133 id = adev->kfd.dev->nodes[i]->xcp->id;
2134 else
2135 id = -1;
2136 pages = KFD_XCP_MEMORY_SIZE(adev, id) >> 17;
2137 pages = clamp(pages, 1ULL << 9, 1ULL << 18);
2138 pages = rounddown_pow_of_two(pages);
2139 min_pages = min_not_zero(min_pages, pages);
2140 }
2141
2142 do {
2143 max_pages = READ_ONCE(max_svm_range_pages);
2144 _pages = min_not_zero(max_pages, min_pages);
2145 } while (cmpxchg(&max_svm_range_pages, max_pages, _pages) != max_pages);
2146 }
2147
2148 static int
svm_range_split_new(struct svm_range_list * svms,uint64_t start,uint64_t last,uint64_t max_pages,struct list_head * insert_list,struct list_head * update_list)2149 svm_range_split_new(struct svm_range_list *svms, uint64_t start, uint64_t last,
2150 uint64_t max_pages, struct list_head *insert_list,
2151 struct list_head *update_list)
2152 {
2153 struct svm_range *prange;
2154 uint64_t l;
2155
2156 pr_debug("max_svm_range_pages 0x%llx adding [0x%llx 0x%llx]\n",
2157 max_pages, start, last);
2158
2159 while (last >= start) {
2160 l = min(last, ALIGN_DOWN(start + max_pages, max_pages) - 1);
2161
2162 prange = svm_range_new(svms, start, l, true);
2163 if (!prange)
2164 return -ENOMEM;
2165 list_add(&prange->list, insert_list);
2166 list_add(&prange->update_list, update_list);
2167
2168 start = l + 1;
2169 }
2170 return 0;
2171 }
2172
2173 /**
2174 * svm_range_add - add svm range and handle overlap
2175 * @p: the range add to this process svms
2176 * @start: page size aligned
2177 * @size: page size aligned
2178 * @nattr: number of attributes
2179 * @attrs: array of attributes
2180 * @update_list: output, the ranges need validate and update GPU mapping
2181 * @insert_list: output, the ranges need insert to svms
2182 * @remove_list: output, the ranges are replaced and need remove from svms
2183 * @remap_list: output, remap unaligned svm ranges
2184 *
2185 * Check if the virtual address range has overlap with any existing ranges,
2186 * split partly overlapping ranges and add new ranges in the gaps. All changes
2187 * should be applied to the range_list and interval tree transactionally. If
2188 * any range split or allocation fails, the entire update fails. Therefore any
2189 * existing overlapping svm_ranges are cloned and the original svm_ranges left
2190 * unchanged.
2191 *
2192 * If the transaction succeeds, the caller can update and insert clones and
2193 * new ranges, then free the originals.
2194 *
2195 * Otherwise the caller can free the clones and new ranges, while the old
2196 * svm_ranges remain unchanged.
2197 *
2198 * Context: Process context, caller must hold svms->lock
2199 *
2200 * Return:
2201 * 0 - OK, otherwise error code
2202 */
2203 static int
svm_range_add(struct kfd_process * p,uint64_t start,uint64_t size,uint32_t nattr,struct kfd_ioctl_svm_attribute * attrs,struct list_head * update_list,struct list_head * insert_list,struct list_head * remove_list,struct list_head * remap_list)2204 svm_range_add(struct kfd_process *p, uint64_t start, uint64_t size,
2205 uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs,
2206 struct list_head *update_list, struct list_head *insert_list,
2207 struct list_head *remove_list, struct list_head *remap_list)
2208 {
2209 unsigned long last = start + size - 1UL;
2210 struct svm_range_list *svms = &p->svms;
2211 struct interval_tree_node *node;
2212 struct svm_range *prange;
2213 struct svm_range *tmp;
2214 struct list_head new_list;
2215 int r = 0;
2216
2217 pr_debug("svms 0x%p [0x%llx 0x%lx]\n", &p->svms, start, last);
2218
2219 INIT_LIST_HEAD(update_list);
2220 INIT_LIST_HEAD(insert_list);
2221 INIT_LIST_HEAD(remove_list);
2222 INIT_LIST_HEAD(&new_list);
2223 INIT_LIST_HEAD(remap_list);
2224
2225 node = interval_tree_iter_first(&svms->objects, start, last);
2226 while (node) {
2227 struct interval_tree_node *next;
2228 unsigned long next_start;
2229
2230 pr_debug("found overlap node [0x%lx 0x%lx]\n", node->start,
2231 node->last);
2232
2233 prange = container_of(node, struct svm_range, it_node);
2234 next = interval_tree_iter_next(node, start, last);
2235 next_start = min(node->last, last) + 1;
2236
2237 if (svm_range_is_same_attrs(p, prange, nattr, attrs) &&
2238 prange->mapped_to_gpu) {
2239 /* nothing to do */
2240 } else if (node->start < start || node->last > last) {
2241 /* node intersects the update range and its attributes
2242 * will change. Clone and split it, apply updates only
2243 * to the overlapping part
2244 */
2245 struct svm_range *old = prange;
2246
2247 prange = svm_range_clone(old);
2248 if (!prange) {
2249 r = -ENOMEM;
2250 goto out;
2251 }
2252
2253 list_add(&old->update_list, remove_list);
2254 list_add(&prange->list, insert_list);
2255 list_add(&prange->update_list, update_list);
2256
2257 if (node->start < start) {
2258 pr_debug("change old range start\n");
2259 r = svm_range_split_head(prange, start,
2260 insert_list, remap_list);
2261 if (r)
2262 goto out;
2263 }
2264 if (node->last > last) {
2265 pr_debug("change old range last\n");
2266 r = svm_range_split_tail(prange, last,
2267 insert_list, remap_list);
2268 if (r)
2269 goto out;
2270 }
2271 } else {
2272 /* The node is contained within start..last,
2273 * just update it
2274 */
2275 list_add(&prange->update_list, update_list);
2276 }
2277
2278 /* insert a new node if needed */
2279 if (node->start > start) {
2280 r = svm_range_split_new(svms, start, node->start - 1,
2281 READ_ONCE(max_svm_range_pages),
2282 &new_list, update_list);
2283 if (r)
2284 goto out;
2285 }
2286
2287 node = next;
2288 start = next_start;
2289 }
2290
2291 /* add a final range at the end if needed */
2292 if (start <= last)
2293 r = svm_range_split_new(svms, start, last,
2294 READ_ONCE(max_svm_range_pages),
2295 &new_list, update_list);
2296
2297 out:
2298 if (r) {
2299 list_for_each_entry_safe(prange, tmp, insert_list, list)
2300 svm_range_free(prange, false);
2301 list_for_each_entry_safe(prange, tmp, &new_list, list)
2302 svm_range_free(prange, true);
2303 } else {
2304 list_splice(&new_list, insert_list);
2305 }
2306
2307 return r;
2308 }
2309
2310 static void
svm_range_update_notifier_and_interval_tree(struct mm_struct * mm,struct svm_range * prange)2311 svm_range_update_notifier_and_interval_tree(struct mm_struct *mm,
2312 struct svm_range *prange)
2313 {
2314 unsigned long start;
2315 unsigned long last;
2316
2317 start = prange->notifier.interval_tree.start >> PAGE_SHIFT;
2318 last = prange->notifier.interval_tree.last >> PAGE_SHIFT;
2319
2320 if (prange->start == start && prange->last == last)
2321 return;
2322
2323 pr_debug("up notifier 0x%p prange 0x%p [0x%lx 0x%lx] [0x%lx 0x%lx]\n",
2324 prange->svms, prange, start, last, prange->start,
2325 prange->last);
2326
2327 if (start != 0 && last != 0) {
2328 interval_tree_remove(&prange->it_node, &prange->svms->objects);
2329 svm_range_remove_notifier(prange);
2330 }
2331 prange->it_node.start = prange->start;
2332 prange->it_node.last = prange->last;
2333
2334 interval_tree_insert(&prange->it_node, &prange->svms->objects);
2335 svm_range_add_notifier_locked(mm, prange);
2336 }
2337
2338 static void
svm_range_handle_list_op(struct svm_range_list * svms,struct svm_range * prange,struct mm_struct * mm)2339 svm_range_handle_list_op(struct svm_range_list *svms, struct svm_range *prange,
2340 struct mm_struct *mm)
2341 {
2342 switch (prange->work_item.op) {
2343 case SVM_OP_NULL:
2344 pr_debug("NULL OP 0x%p prange 0x%p [0x%lx 0x%lx]\n",
2345 svms, prange, prange->start, prange->last);
2346 break;
2347 case SVM_OP_UNMAP_RANGE:
2348 pr_debug("remove 0x%p prange 0x%p [0x%lx 0x%lx]\n",
2349 svms, prange, prange->start, prange->last);
2350 svm_range_unlink(prange);
2351 svm_range_remove_notifier(prange);
2352 svm_range_free(prange, true);
2353 break;
2354 case SVM_OP_UPDATE_RANGE_NOTIFIER:
2355 pr_debug("update notifier 0x%p prange 0x%p [0x%lx 0x%lx]\n",
2356 svms, prange, prange->start, prange->last);
2357 svm_range_update_notifier_and_interval_tree(mm, prange);
2358 break;
2359 case SVM_OP_UPDATE_RANGE_NOTIFIER_AND_MAP:
2360 pr_debug("update and map 0x%p prange 0x%p [0x%lx 0x%lx]\n",
2361 svms, prange, prange->start, prange->last);
2362 svm_range_update_notifier_and_interval_tree(mm, prange);
2363 /* TODO: implement deferred validation and mapping */
2364 break;
2365 case SVM_OP_ADD_RANGE:
2366 pr_debug("add 0x%p prange 0x%p [0x%lx 0x%lx]\n", svms, prange,
2367 prange->start, prange->last);
2368 svm_range_add_to_svms(prange);
2369 svm_range_add_notifier_locked(mm, prange);
2370 break;
2371 case SVM_OP_ADD_RANGE_AND_MAP:
2372 pr_debug("add and map 0x%p prange 0x%p [0x%lx 0x%lx]\n", svms,
2373 prange, prange->start, prange->last);
2374 svm_range_add_to_svms(prange);
2375 svm_range_add_notifier_locked(mm, prange);
2376 /* TODO: implement deferred validation and mapping */
2377 break;
2378 default:
2379 WARN_ONCE(1, "Unknown prange 0x%p work op %d\n", prange,
2380 prange->work_item.op);
2381 }
2382 }
2383
svm_range_drain_retry_fault(struct svm_range_list * svms)2384 static void svm_range_drain_retry_fault(struct svm_range_list *svms)
2385 {
2386 struct kfd_process_device *pdd;
2387 struct kfd_process *p;
2388 uint32_t i;
2389
2390 p = container_of(svms, struct kfd_process, svms);
2391
2392 for_each_set_bit(i, svms->bitmap_supported, p->n_pdds) {
2393 pdd = p->pdds[i];
2394 if (!pdd)
2395 continue;
2396
2397 pr_debug("drain retry fault gpu %d svms %p\n", i, svms);
2398
2399 if (!down_read_trylock(&pdd->dev->adev->reset_domain->sem))
2400 continue;
2401
2402 amdgpu_ih_wait_on_checkpoint_process_ts(pdd->dev->adev,
2403 pdd->dev->adev->irq.retry_cam_enabled ?
2404 &pdd->dev->adev->irq.ih :
2405 &pdd->dev->adev->irq.ih1);
2406
2407 if (pdd->dev->adev->irq.retry_cam_enabled)
2408 amdgpu_ih_wait_on_checkpoint_process_ts(pdd->dev->adev,
2409 &pdd->dev->adev->irq.ih_soft);
2410
2411 up_read(&pdd->dev->adev->reset_domain->sem);
2412
2413 pr_debug("drain retry fault gpu %d svms 0x%p done\n", i, svms);
2414 }
2415 }
2416
svm_range_deferred_list_work(struct work_struct * work)2417 static void svm_range_deferred_list_work(struct work_struct *work)
2418 {
2419 struct svm_range_list *svms;
2420 struct svm_range *prange;
2421 struct mm_struct *mm;
2422
2423 svms = container_of(work, struct svm_range_list, deferred_list_work);
2424 pr_debug("enter svms 0x%p\n", svms);
2425
2426 spin_lock(&svms->deferred_list_lock);
2427 while (!list_empty(&svms->deferred_range_list)) {
2428 prange = list_first_entry(&svms->deferred_range_list,
2429 struct svm_range, deferred_list);
2430 spin_unlock(&svms->deferred_list_lock);
2431
2432 pr_debug("prange 0x%p [0x%lx 0x%lx] op %d\n", prange,
2433 prange->start, prange->last, prange->work_item.op);
2434
2435 mm = prange->work_item.mm;
2436
2437 mmap_write_lock(mm);
2438
2439 /* Remove from deferred_list must be inside mmap write lock, for
2440 * two race cases:
2441 * 1. unmap_from_cpu may change work_item.op and add the range
2442 * to deferred_list again, cause use after free bug.
2443 * 2. svm_range_list_lock_and_flush_work may hold mmap write
2444 * lock and continue because deferred_list is empty, but
2445 * deferred_list work is actually waiting for mmap lock.
2446 */
2447 spin_lock(&svms->deferred_list_lock);
2448 list_del_init(&prange->deferred_list);
2449 spin_unlock(&svms->deferred_list_lock);
2450
2451 mutex_lock(&svms->lock);
2452 mutex_lock(&prange->migrate_mutex);
2453 while (!list_empty(&prange->child_list)) {
2454 struct svm_range *pchild;
2455
2456 pchild = list_first_entry(&prange->child_list,
2457 struct svm_range, child_list);
2458 pr_debug("child prange 0x%p op %d\n", pchild,
2459 pchild->work_item.op);
2460 list_del_init(&pchild->child_list);
2461 svm_range_handle_list_op(svms, pchild, mm);
2462 }
2463 mutex_unlock(&prange->migrate_mutex);
2464
2465 svm_range_handle_list_op(svms, prange, mm);
2466 mutex_unlock(&svms->lock);
2467 mmap_write_unlock(mm);
2468
2469 /* Pairs with mmget in svm_range_add_list_work. If dropping the
2470 * last mm refcount, schedule release work to avoid circular locking
2471 */
2472 mmput_async(mm);
2473
2474 spin_lock(&svms->deferred_list_lock);
2475 }
2476 spin_unlock(&svms->deferred_list_lock);
2477 pr_debug("exit svms 0x%p\n", svms);
2478 }
2479
2480 void
svm_range_add_list_work(struct svm_range_list * svms,struct svm_range * prange,struct mm_struct * mm,enum svm_work_list_ops op)2481 svm_range_add_list_work(struct svm_range_list *svms, struct svm_range *prange,
2482 struct mm_struct *mm, enum svm_work_list_ops op)
2483 {
2484 spin_lock(&svms->deferred_list_lock);
2485 /* if prange is on the deferred list */
2486 if (!list_empty(&prange->deferred_list)) {
2487 pr_debug("update exist prange 0x%p work op %d\n", prange, op);
2488 WARN_ONCE(prange->work_item.mm != mm, "unmatch mm\n");
2489 if (op != SVM_OP_NULL &&
2490 prange->work_item.op != SVM_OP_UNMAP_RANGE)
2491 prange->work_item.op = op;
2492 } else {
2493 /* Pairs with mmput in deferred_list_work.
2494 * If process is exiting and mm is gone, don't update mmu notifier.
2495 */
2496 if (mmget_not_zero(mm)) {
2497 prange->work_item.mm = mm;
2498 prange->work_item.op = op;
2499 list_add_tail(&prange->deferred_list,
2500 &prange->svms->deferred_range_list);
2501 pr_debug("add prange 0x%p [0x%lx 0x%lx] to work list op %d\n",
2502 prange, prange->start, prange->last, op);
2503 }
2504 }
2505 spin_unlock(&svms->deferred_list_lock);
2506 }
2507
schedule_deferred_list_work(struct svm_range_list * svms)2508 void schedule_deferred_list_work(struct svm_range_list *svms)
2509 {
2510 spin_lock(&svms->deferred_list_lock);
2511 if (!list_empty(&svms->deferred_range_list))
2512 schedule_work(&svms->deferred_list_work);
2513 spin_unlock(&svms->deferred_list_lock);
2514 }
2515
2516 static void
svm_range_unmap_split(struct svm_range * parent,struct svm_range * prange,unsigned long start,unsigned long last)2517 svm_range_unmap_split(struct svm_range *parent, struct svm_range *prange, unsigned long start,
2518 unsigned long last)
2519 {
2520 struct svm_range *head;
2521 struct svm_range *tail;
2522
2523 if (prange->work_item.op == SVM_OP_UNMAP_RANGE) {
2524 pr_debug("prange 0x%p [0x%lx 0x%lx] is already freed\n", prange,
2525 prange->start, prange->last);
2526 return;
2527 }
2528 if (start > prange->last || last < prange->start)
2529 return;
2530
2531 head = tail = prange;
2532 if (start > prange->start)
2533 svm_range_split(prange, prange->start, start - 1, &tail);
2534 if (last < tail->last)
2535 svm_range_split(tail, last + 1, tail->last, &head);
2536
2537 if (head != prange && tail != prange) {
2538 svm_range_add_child(parent, head, SVM_OP_UNMAP_RANGE);
2539 svm_range_add_child(parent, tail, SVM_OP_ADD_RANGE);
2540 } else if (tail != prange) {
2541 svm_range_add_child(parent, tail, SVM_OP_UNMAP_RANGE);
2542 } else if (head != prange) {
2543 svm_range_add_child(parent, head, SVM_OP_UNMAP_RANGE);
2544 } else if (parent != prange) {
2545 prange->work_item.op = SVM_OP_UNMAP_RANGE;
2546 }
2547 }
2548
2549 static void
svm_range_unmap_from_cpu(struct mm_struct * mm,struct svm_range * prange,unsigned long start,unsigned long last)2550 svm_range_unmap_from_cpu(struct mm_struct *mm, struct svm_range *prange,
2551 unsigned long start, unsigned long last)
2552 {
2553 uint32_t trigger = KFD_SVM_UNMAP_TRIGGER_UNMAP_FROM_CPU;
2554 struct svm_range_list *svms;
2555 struct svm_range *pchild;
2556 struct kfd_process *p;
2557 unsigned long s, l;
2558 bool unmap_parent;
2559 uint32_t i;
2560
2561 if (atomic_read(&prange->queue_refcount)) {
2562 int r;
2563
2564 pr_warn("Freeing queue vital buffer 0x%lx, queue evicted\n",
2565 prange->start << PAGE_SHIFT);
2566 r = kgd2kfd_quiesce_mm(mm, KFD_QUEUE_EVICTION_TRIGGER_SVM);
2567 if (r)
2568 pr_debug("failed %d to quiesce KFD queues\n", r);
2569 }
2570
2571 p = kfd_lookup_process_by_mm(mm);
2572 if (!p)
2573 return;
2574 svms = &p->svms;
2575
2576 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx] [0x%lx 0x%lx]\n", svms,
2577 prange, prange->start, prange->last, start, last);
2578
2579 /* calculate time stamps that are used to decide which page faults need be
2580 * dropped or handled before unmap pages from gpu vm
2581 */
2582 for_each_set_bit(i, svms->bitmap_supported, p->n_pdds) {
2583 struct kfd_process_device *pdd;
2584 struct amdgpu_device *adev;
2585 struct amdgpu_ih_ring *ih;
2586 uint32_t checkpoint_wptr;
2587
2588 pdd = p->pdds[i];
2589 if (!pdd)
2590 continue;
2591
2592 adev = pdd->dev->adev;
2593
2594 /* Check and drain ih1 ring if cam not available */
2595 if (!adev->irq.retry_cam_enabled && adev->irq.ih1.ring_size) {
2596 ih = &adev->irq.ih1;
2597 checkpoint_wptr = amdgpu_ih_get_wptr(adev, ih);
2598 if (ih->rptr != checkpoint_wptr) {
2599 svms->checkpoint_ts[i] =
2600 amdgpu_ih_decode_iv_ts(adev, ih, checkpoint_wptr, -1);
2601 continue;
2602 }
2603 }
2604
2605 /* check if dev->irq.ih_soft is not empty */
2606 ih = &adev->irq.ih_soft;
2607 checkpoint_wptr = amdgpu_ih_get_wptr(adev, ih);
2608 if (ih->rptr != checkpoint_wptr)
2609 svms->checkpoint_ts[i] = amdgpu_ih_decode_iv_ts(adev, ih, checkpoint_wptr, -1);
2610 }
2611
2612 unmap_parent = start <= prange->start && last >= prange->last;
2613
2614 list_for_each_entry(pchild, &prange->child_list, child_list) {
2615 mutex_lock_nested(&pchild->lock, 1);
2616 s = max(start, pchild->start);
2617 l = min(last, pchild->last);
2618 if (l >= s)
2619 svm_range_unmap_from_gpus(pchild, s, l, trigger);
2620 svm_range_unmap_split(prange, pchild, start, last);
2621 mutex_unlock(&pchild->lock);
2622 }
2623 s = max(start, prange->start);
2624 l = min(last, prange->last);
2625 if (l >= s)
2626 svm_range_unmap_from_gpus(prange, s, l, trigger);
2627 svm_range_unmap_split(prange, prange, start, last);
2628
2629 if (unmap_parent)
2630 svm_range_add_list_work(svms, prange, mm, SVM_OP_UNMAP_RANGE);
2631 else
2632 svm_range_add_list_work(svms, prange, mm,
2633 SVM_OP_UPDATE_RANGE_NOTIFIER);
2634 schedule_deferred_list_work(svms);
2635
2636 kfd_unref_process(p);
2637 }
2638
2639 /**
2640 * svm_range_cpu_invalidate_pagetables - interval notifier callback
2641 * @mni: mmu_interval_notifier struct
2642 * @range: mmu_notifier_range struct
2643 * @cur_seq: value to pass to mmu_interval_set_seq()
2644 *
2645 * If event is MMU_NOTIFY_UNMAP, this is from CPU unmap range, otherwise, it
2646 * is from migration, or CPU page invalidation callback.
2647 *
2648 * For unmap event, unmap range from GPUs, remove prange from svms in a delayed
2649 * work thread, and split prange if only part of prange is unmapped.
2650 *
2651 * For invalidation event, if GPU retry fault is not enabled, evict the queues,
2652 * then schedule svm_range_restore_work to update GPU mapping and resume queues.
2653 * If GPU retry fault is enabled, unmap the svm range from GPU, retry fault will
2654 * update GPU mapping to recover.
2655 *
2656 * Context: mmap lock, notifier_invalidate_start lock are held
2657 * for invalidate event, prange lock is held if this is from migration
2658 */
2659 static bool
svm_range_cpu_invalidate_pagetables(struct mmu_interval_notifier * mni,const struct mmu_notifier_range * range,unsigned long cur_seq)2660 svm_range_cpu_invalidate_pagetables(struct mmu_interval_notifier *mni,
2661 const struct mmu_notifier_range *range,
2662 unsigned long cur_seq)
2663 {
2664 struct svm_range *prange;
2665 unsigned long start;
2666 unsigned long last;
2667
2668 if (range->event == MMU_NOTIFY_RELEASE)
2669 return true;
2670
2671 start = mni->interval_tree.start;
2672 last = mni->interval_tree.last;
2673 start = max(start, range->start) >> PAGE_SHIFT;
2674 last = min(last, range->end - 1) >> PAGE_SHIFT;
2675 pr_debug("[0x%lx 0x%lx] range[0x%lx 0x%lx] notifier[0x%lx 0x%lx] %d\n",
2676 start, last, range->start >> PAGE_SHIFT,
2677 (range->end - 1) >> PAGE_SHIFT,
2678 mni->interval_tree.start >> PAGE_SHIFT,
2679 mni->interval_tree.last >> PAGE_SHIFT, range->event);
2680
2681 prange = container_of(mni, struct svm_range, notifier);
2682
2683 svm_range_lock(prange);
2684 mmu_interval_set_seq(mni, cur_seq);
2685
2686 switch (range->event) {
2687 case MMU_NOTIFY_UNMAP:
2688 svm_range_unmap_from_cpu(mni->mm, prange, start, last);
2689 break;
2690 default:
2691 svm_range_evict(prange, mni->mm, start, last, range->event);
2692 break;
2693 }
2694
2695 svm_range_unlock(prange);
2696
2697 return true;
2698 }
2699
2700 /**
2701 * svm_range_from_addr - find svm range from fault address
2702 * @svms: svm range list header
2703 * @addr: address to search range interval tree, in pages
2704 * @parent: parent range if range is on child list
2705 *
2706 * Context: The caller must hold svms->lock
2707 *
2708 * Return: the svm_range found or NULL
2709 */
2710 struct svm_range *
svm_range_from_addr(struct svm_range_list * svms,unsigned long addr,struct svm_range ** parent)2711 svm_range_from_addr(struct svm_range_list *svms, unsigned long addr,
2712 struct svm_range **parent)
2713 {
2714 struct interval_tree_node *node;
2715 struct svm_range *prange;
2716 struct svm_range *pchild;
2717
2718 node = interval_tree_iter_first(&svms->objects, addr, addr);
2719 if (!node)
2720 return NULL;
2721
2722 prange = container_of(node, struct svm_range, it_node);
2723 pr_debug("address 0x%lx prange [0x%lx 0x%lx] node [0x%lx 0x%lx]\n",
2724 addr, prange->start, prange->last, node->start, node->last);
2725
2726 if (addr >= prange->start && addr <= prange->last) {
2727 if (parent)
2728 *parent = prange;
2729 return prange;
2730 }
2731 list_for_each_entry(pchild, &prange->child_list, child_list)
2732 if (addr >= pchild->start && addr <= pchild->last) {
2733 pr_debug("found address 0x%lx pchild [0x%lx 0x%lx]\n",
2734 addr, pchild->start, pchild->last);
2735 if (parent)
2736 *parent = prange;
2737 return pchild;
2738 }
2739
2740 return NULL;
2741 }
2742
2743 /* svm_range_best_restore_location - decide the best fault restore location
2744 * @prange: svm range structure
2745 * @adev: the GPU on which vm fault happened
2746 *
2747 * This is only called when xnack is on, to decide the best location to restore
2748 * the range mapping after GPU vm fault. Caller uses the best location to do
2749 * migration if actual loc is not best location, then update GPU page table
2750 * mapping to the best location.
2751 *
2752 * If the preferred loc is accessible by faulting GPU, use preferred loc.
2753 * If vm fault gpu idx is on range ACCESSIBLE bitmap, best_loc is vm fault gpu
2754 * If vm fault gpu idx is on range ACCESSIBLE_IN_PLACE bitmap, then
2755 * if range actual loc is cpu, best_loc is cpu
2756 * if vm fault gpu is on xgmi same hive of range actual loc gpu, best_loc is
2757 * range actual loc.
2758 * Otherwise, GPU no access, best_loc is -1.
2759 *
2760 * Return:
2761 * -1 means vm fault GPU no access
2762 * 0 for CPU or GPU id
2763 */
2764 static int32_t
svm_range_best_restore_location(struct svm_range * prange,struct kfd_node * node,int32_t * gpuidx)2765 svm_range_best_restore_location(struct svm_range *prange,
2766 struct kfd_node *node,
2767 int32_t *gpuidx)
2768 {
2769 struct kfd_node *bo_node, *preferred_node;
2770 struct kfd_process *p;
2771 uint32_t gpuid;
2772 int r;
2773
2774 p = container_of(prange->svms, struct kfd_process, svms);
2775
2776 r = kfd_process_gpuid_from_node(p, node, &gpuid, gpuidx);
2777 if (r < 0) {
2778 pr_debug("failed to get gpuid from kgd\n");
2779 return -1;
2780 }
2781
2782 if (node->adev->apu_prefer_gtt)
2783 return 0;
2784
2785 if (prange->preferred_loc == gpuid ||
2786 prange->preferred_loc == KFD_IOCTL_SVM_LOCATION_SYSMEM) {
2787 return prange->preferred_loc;
2788 } else if (prange->preferred_loc != KFD_IOCTL_SVM_LOCATION_UNDEFINED) {
2789 preferred_node = svm_range_get_node_by_id(prange, prange->preferred_loc);
2790 if (preferred_node && svm_nodes_in_same_hive(node, preferred_node))
2791 return prange->preferred_loc;
2792 /* fall through */
2793 }
2794
2795 if (test_bit(*gpuidx, prange->bitmap_access))
2796 return gpuid;
2797
2798 if (test_bit(*gpuidx, prange->bitmap_aip)) {
2799 if (!prange->actual_loc)
2800 return 0;
2801
2802 bo_node = svm_range_get_node_by_id(prange, prange->actual_loc);
2803 if (bo_node && svm_nodes_in_same_hive(node, bo_node))
2804 return prange->actual_loc;
2805 else
2806 return 0;
2807 }
2808
2809 return -1;
2810 }
2811
2812 static int
svm_range_get_range_boundaries(struct kfd_process * p,int64_t addr,unsigned long * start,unsigned long * last,bool * is_heap_stack)2813 svm_range_get_range_boundaries(struct kfd_process *p, int64_t addr,
2814 unsigned long *start, unsigned long *last,
2815 bool *is_heap_stack)
2816 {
2817 struct vm_area_struct *vma;
2818 struct interval_tree_node *node;
2819 struct rb_node *rb_node;
2820 unsigned long start_limit, end_limit;
2821
2822 vma = vma_lookup(p->mm, addr << PAGE_SHIFT);
2823 if (!vma) {
2824 pr_debug("VMA does not exist in address [0x%llx]\n", addr);
2825 return -EFAULT;
2826 }
2827
2828 *is_heap_stack = vma_is_initial_heap(vma) || vma_is_initial_stack(vma);
2829
2830 start_limit = max(vma->vm_start >> PAGE_SHIFT,
2831 (unsigned long)ALIGN_DOWN(addr, 1UL << p->svms.default_granularity));
2832 end_limit = min(vma->vm_end >> PAGE_SHIFT,
2833 (unsigned long)ALIGN(addr + 1, 1UL << p->svms.default_granularity));
2834
2835 /* First range that starts after the fault address */
2836 node = interval_tree_iter_first(&p->svms.objects, addr + 1, ULONG_MAX);
2837 if (node) {
2838 end_limit = min(end_limit, node->start);
2839 /* Last range that ends before the fault address */
2840 rb_node = rb_prev(&node->rb);
2841 } else {
2842 /* Last range must end before addr because
2843 * there was no range after addr
2844 */
2845 rb_node = rb_last(&p->svms.objects.rb_root);
2846 }
2847 if (rb_node) {
2848 node = container_of(rb_node, struct interval_tree_node, rb);
2849 if (node->last >= addr) {
2850 WARN(1, "Overlap with prev node and page fault addr\n");
2851 return -EFAULT;
2852 }
2853 start_limit = max(start_limit, node->last + 1);
2854 }
2855
2856 *start = start_limit;
2857 *last = end_limit - 1;
2858
2859 pr_debug("vma [0x%lx 0x%lx] range [0x%lx 0x%lx] is_heap_stack %d\n",
2860 vma->vm_start >> PAGE_SHIFT, vma->vm_end >> PAGE_SHIFT,
2861 *start, *last, *is_heap_stack);
2862
2863 return 0;
2864 }
2865
2866 static int
svm_range_check_vm_userptr(struct kfd_process * p,uint64_t start,uint64_t last,uint64_t * bo_s,uint64_t * bo_l)2867 svm_range_check_vm_userptr(struct kfd_process *p, uint64_t start, uint64_t last,
2868 uint64_t *bo_s, uint64_t *bo_l)
2869 {
2870 struct amdgpu_bo_va_mapping *mapping;
2871 struct interval_tree_node *node;
2872 struct amdgpu_bo *bo = NULL;
2873 unsigned long userptr;
2874 uint32_t i;
2875 int r;
2876
2877 for (i = 0; i < p->n_pdds; i++) {
2878 struct amdgpu_vm *vm;
2879
2880 if (!p->pdds[i]->drm_priv)
2881 continue;
2882
2883 vm = drm_priv_to_vm(p->pdds[i]->drm_priv);
2884 r = amdgpu_bo_reserve(vm->root.bo, false);
2885 if (r)
2886 return r;
2887
2888 /* Check userptr by searching entire vm->va interval tree */
2889 node = interval_tree_iter_first(&vm->va, 0, ~0ULL);
2890 while (node) {
2891 mapping = container_of((struct rb_node *)node,
2892 struct amdgpu_bo_va_mapping, rb);
2893 bo = mapping->bo_va->base.bo;
2894
2895 if (!amdgpu_ttm_tt_affect_userptr(bo->tbo.ttm,
2896 start << PAGE_SHIFT,
2897 last << PAGE_SHIFT,
2898 &userptr)) {
2899 node = interval_tree_iter_next(node, 0, ~0ULL);
2900 continue;
2901 }
2902
2903 pr_debug("[0x%llx 0x%llx] already userptr mapped\n",
2904 start, last);
2905 if (bo_s && bo_l) {
2906 *bo_s = userptr >> PAGE_SHIFT;
2907 *bo_l = *bo_s + bo->tbo.ttm->num_pages - 1;
2908 }
2909 amdgpu_bo_unreserve(vm->root.bo);
2910 return -EADDRINUSE;
2911 }
2912 amdgpu_bo_unreserve(vm->root.bo);
2913 }
2914 return 0;
2915 }
2916
2917 static struct
svm_range_create_unregistered_range(struct kfd_node * node,struct kfd_process * p,struct mm_struct * mm,int64_t addr)2918 svm_range *svm_range_create_unregistered_range(struct kfd_node *node,
2919 struct kfd_process *p,
2920 struct mm_struct *mm,
2921 int64_t addr)
2922 {
2923 struct svm_range *prange = NULL;
2924 unsigned long start, last;
2925 uint32_t gpuid, gpuidx;
2926 bool is_heap_stack;
2927 uint64_t bo_s = 0;
2928 uint64_t bo_l = 0;
2929 int r;
2930
2931 if (svm_range_get_range_boundaries(p, addr, &start, &last,
2932 &is_heap_stack))
2933 return NULL;
2934
2935 r = svm_range_check_vm(p, start, last, &bo_s, &bo_l);
2936 if (r != -EADDRINUSE)
2937 r = svm_range_check_vm_userptr(p, start, last, &bo_s, &bo_l);
2938
2939 if (r == -EADDRINUSE) {
2940 if (addr >= bo_s && addr <= bo_l)
2941 return NULL;
2942
2943 /* Create one page svm range if 2MB range overlapping */
2944 start = addr;
2945 last = addr;
2946 }
2947
2948 prange = svm_range_new(&p->svms, start, last, true);
2949 if (!prange) {
2950 pr_debug("Failed to create prange in address [0x%llx]\n", addr);
2951 return NULL;
2952 }
2953 if (kfd_process_gpuid_from_node(p, node, &gpuid, &gpuidx)) {
2954 pr_debug("failed to get gpuid from kgd\n");
2955 svm_range_free(prange, true);
2956 return NULL;
2957 }
2958
2959 if (is_heap_stack)
2960 prange->preferred_loc = KFD_IOCTL_SVM_LOCATION_SYSMEM;
2961
2962 svm_range_add_to_svms(prange);
2963 svm_range_add_notifier_locked(mm, prange);
2964
2965 return prange;
2966 }
2967
2968 /* svm_range_skip_recover - decide if prange can be recovered
2969 * @prange: svm range structure
2970 *
2971 * GPU vm retry fault handle skip recover the range for cases:
2972 * 1. prange is on deferred list to be removed after unmap, it is stale fault,
2973 * deferred list work will drain the stale fault before free the prange.
2974 * 2. prange is on deferred list to add interval notifier after split, or
2975 * 3. prange is child range, it is split from parent prange, recover later
2976 * after interval notifier is added.
2977 *
2978 * Return: true to skip recover, false to recover
2979 */
svm_range_skip_recover(struct svm_range * prange)2980 static bool svm_range_skip_recover(struct svm_range *prange)
2981 {
2982 struct svm_range_list *svms = prange->svms;
2983
2984 spin_lock(&svms->deferred_list_lock);
2985 if (list_empty(&prange->deferred_list) &&
2986 list_empty(&prange->child_list)) {
2987 spin_unlock(&svms->deferred_list_lock);
2988 return false;
2989 }
2990 spin_unlock(&svms->deferred_list_lock);
2991
2992 if (prange->work_item.op == SVM_OP_UNMAP_RANGE) {
2993 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx] unmapped\n",
2994 svms, prange, prange->start, prange->last);
2995 return true;
2996 }
2997 if (prange->work_item.op == SVM_OP_ADD_RANGE_AND_MAP ||
2998 prange->work_item.op == SVM_OP_ADD_RANGE) {
2999 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx] not added yet\n",
3000 svms, prange, prange->start, prange->last);
3001 return true;
3002 }
3003 return false;
3004 }
3005
3006 static void
svm_range_count_fault(struct kfd_node * node,struct kfd_process * p,int32_t gpuidx)3007 svm_range_count_fault(struct kfd_node *node, struct kfd_process *p,
3008 int32_t gpuidx)
3009 {
3010 struct kfd_process_device *pdd;
3011
3012 /* fault is on different page of same range
3013 * or fault is skipped to recover later
3014 * or fault is on invalid virtual address
3015 */
3016 if (gpuidx == MAX_GPU_INSTANCE) {
3017 uint32_t gpuid;
3018 int r;
3019
3020 r = kfd_process_gpuid_from_node(p, node, &gpuid, &gpuidx);
3021 if (r < 0)
3022 return;
3023 }
3024
3025 /* fault is recovered
3026 * or fault cannot recover because GPU no access on the range
3027 */
3028 pdd = kfd_process_device_from_gpuidx(p, gpuidx);
3029 if (pdd)
3030 WRITE_ONCE(pdd->faults, pdd->faults + 1);
3031 }
3032
3033 static bool
svm_fault_allowed(struct vm_area_struct * vma,bool write_fault)3034 svm_fault_allowed(struct vm_area_struct *vma, bool write_fault)
3035 {
3036 unsigned long requested = VM_READ;
3037
3038 if (write_fault)
3039 requested |= VM_WRITE;
3040
3041 pr_debug("requested 0x%lx, vma permission flags 0x%lx\n", requested,
3042 vma->vm_flags);
3043 return (vma->vm_flags & requested) == requested;
3044 }
3045
3046 int
svm_range_restore_pages(struct amdgpu_device * adev,unsigned int pasid,uint32_t vmid,uint32_t node_id,uint64_t addr,uint64_t ts,bool write_fault)3047 svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid,
3048 uint32_t vmid, uint32_t node_id,
3049 uint64_t addr, uint64_t ts, bool write_fault)
3050 {
3051 unsigned long start, last, size;
3052 struct mm_struct *mm = NULL;
3053 struct svm_range_list *svms;
3054 struct svm_range *prange;
3055 struct kfd_process *p;
3056 ktime_t timestamp = ktime_get_boottime();
3057 struct kfd_node *node;
3058 int32_t best_loc;
3059 int32_t gpuid, gpuidx = MAX_GPU_INSTANCE;
3060 bool write_locked = false;
3061 struct vm_area_struct *vma;
3062 bool migration = false;
3063 int r = 0;
3064
3065 if (!KFD_IS_SVM_API_SUPPORTED(adev)) {
3066 pr_debug("device does not support SVM\n");
3067 return -EFAULT;
3068 }
3069
3070 p = kfd_lookup_process_by_pasid(pasid, NULL);
3071 if (!p) {
3072 pr_debug("kfd process not founded pasid 0x%x\n", pasid);
3073 return 0;
3074 }
3075 svms = &p->svms;
3076
3077 pr_debug("restoring svms 0x%p fault address 0x%llx\n", svms, addr);
3078
3079 if (atomic_read(&svms->drain_pagefaults)) {
3080 pr_debug("page fault handling disabled, drop fault 0x%llx\n", addr);
3081 r = 0;
3082 goto out;
3083 }
3084
3085 node = kfd_node_by_irq_ids(adev, node_id, vmid);
3086 if (!node) {
3087 pr_debug("kfd node does not exist node_id: %d, vmid: %d\n", node_id,
3088 vmid);
3089 r = -EFAULT;
3090 goto out;
3091 }
3092
3093 if (kfd_process_gpuid_from_node(p, node, &gpuid, &gpuidx)) {
3094 pr_debug("failed to get gpuid/gpuidex for node_id: %d\n", node_id);
3095 r = -EFAULT;
3096 goto out;
3097 }
3098
3099 if (!p->xnack_enabled) {
3100 pr_debug("XNACK not enabled for pasid 0x%x\n", pasid);
3101 r = -EFAULT;
3102 goto out;
3103 }
3104
3105 /* p->lead_thread is available as kfd_process_wq_release flush the work
3106 * before releasing task ref.
3107 */
3108 mm = get_task_mm(p->lead_thread);
3109 if (!mm) {
3110 pr_debug("svms 0x%p failed to get mm\n", svms);
3111 r = 0;
3112 goto out;
3113 }
3114
3115 mmap_read_lock(mm);
3116 retry_write_locked:
3117 mutex_lock(&svms->lock);
3118
3119 /* check if this page fault time stamp is before svms->checkpoint_ts */
3120 if (svms->checkpoint_ts[gpuidx] != 0) {
3121 if (amdgpu_ih_ts_after_or_equal(ts, svms->checkpoint_ts[gpuidx])) {
3122 pr_debug("draining retry fault, drop fault 0x%llx\n", addr);
3123 if (write_locked)
3124 mmap_write_downgrade(mm);
3125 r = -EAGAIN;
3126 goto out_unlock_svms;
3127 } else {
3128 /* ts is after svms->checkpoint_ts now, reset svms->checkpoint_ts
3129 * to zero to avoid following ts wrap around give wrong comparing
3130 */
3131 svms->checkpoint_ts[gpuidx] = 0;
3132 }
3133 }
3134
3135 prange = svm_range_from_addr(svms, addr, NULL);
3136 if (!prange) {
3137 pr_debug("failed to find prange svms 0x%p address [0x%llx]\n",
3138 svms, addr);
3139 if (!write_locked) {
3140 /* Need the write lock to create new range with MMU notifier.
3141 * Also flush pending deferred work to make sure the interval
3142 * tree is up to date before we add a new range
3143 */
3144 mutex_unlock(&svms->lock);
3145 mmap_read_unlock(mm);
3146 mmap_write_lock(mm);
3147 write_locked = true;
3148 goto retry_write_locked;
3149 }
3150 prange = svm_range_create_unregistered_range(node, p, mm, addr);
3151 if (!prange) {
3152 pr_debug("failed to create unregistered range svms 0x%p address [0x%llx]\n",
3153 svms, addr);
3154 mmap_write_downgrade(mm);
3155 r = -EFAULT;
3156 goto out_unlock_svms;
3157 }
3158 }
3159 if (write_locked)
3160 mmap_write_downgrade(mm);
3161
3162 mutex_lock(&prange->migrate_mutex);
3163
3164 if (svm_range_skip_recover(prange)) {
3165 amdgpu_gmc_filter_faults_remove(node->adev, addr, pasid);
3166 r = 0;
3167 goto out_unlock_range;
3168 }
3169
3170 /* skip duplicate vm fault on different pages of same range */
3171 if (ktime_before(timestamp, ktime_add_ns(prange->validate_timestamp,
3172 AMDGPU_SVM_RANGE_RETRY_FAULT_PENDING))) {
3173 pr_debug("svms 0x%p [0x%lx %lx] already restored\n",
3174 svms, prange->start, prange->last);
3175 r = 0;
3176 goto out_unlock_range;
3177 }
3178
3179 /* __do_munmap removed VMA, return success as we are handling stale
3180 * retry fault.
3181 */
3182 vma = vma_lookup(mm, addr << PAGE_SHIFT);
3183 if (!vma) {
3184 pr_debug("address 0x%llx VMA is removed\n", addr);
3185 r = 0;
3186 goto out_unlock_range;
3187 }
3188
3189 if (!svm_fault_allowed(vma, write_fault)) {
3190 pr_debug("fault addr 0x%llx no %s permission\n", addr,
3191 write_fault ? "write" : "read");
3192 r = -EPERM;
3193 goto out_unlock_range;
3194 }
3195
3196 best_loc = svm_range_best_restore_location(prange, node, &gpuidx);
3197 if (best_loc == -1) {
3198 pr_debug("svms %p failed get best restore loc [0x%lx 0x%lx]\n",
3199 svms, prange->start, prange->last);
3200 r = -EACCES;
3201 goto out_unlock_range;
3202 }
3203
3204 pr_debug("svms %p [0x%lx 0x%lx] best restore 0x%x, actual loc 0x%x\n",
3205 svms, prange->start, prange->last, best_loc,
3206 prange->actual_loc);
3207
3208 kfd_smi_event_page_fault_start(node, p->lead_thread->pid, addr,
3209 write_fault, timestamp);
3210
3211 /* Align migration range start and size to granularity size */
3212 size = 1UL << prange->granularity;
3213 start = max_t(unsigned long, ALIGN_DOWN(addr, size), prange->start);
3214 last = min_t(unsigned long, ALIGN(addr + 1, size) - 1, prange->last);
3215 if (prange->actual_loc != 0 || best_loc != 0) {
3216 if (best_loc) {
3217 r = svm_migrate_to_vram(prange, best_loc, start, last,
3218 mm, KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU);
3219 if (r) {
3220 pr_debug("svm_migrate_to_vram failed (%d) at %llx, falling back to system memory\n",
3221 r, addr);
3222 /* Fallback to system memory if migration to
3223 * VRAM failed
3224 */
3225 if (prange->actual_loc && prange->actual_loc != best_loc)
3226 r = svm_migrate_vram_to_ram(prange, mm, start, last,
3227 KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU, NULL);
3228 else
3229 r = 0;
3230 }
3231 } else {
3232 r = svm_migrate_vram_to_ram(prange, mm, start, last,
3233 KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU, NULL);
3234 }
3235 if (r) {
3236 pr_debug("failed %d to migrate svms %p [0x%lx 0x%lx]\n",
3237 r, svms, start, last);
3238 goto out_migrate_fail;
3239 } else {
3240 migration = true;
3241 }
3242 }
3243
3244 r = svm_range_validate_and_map(mm, start, last, prange, gpuidx, false,
3245 false, false);
3246 if (r)
3247 pr_debug("failed %d to map svms 0x%p [0x%lx 0x%lx] to gpus\n",
3248 r, svms, start, last);
3249
3250 out_migrate_fail:
3251 kfd_smi_event_page_fault_end(node, p->lead_thread->pid, addr,
3252 migration);
3253
3254 out_unlock_range:
3255 mutex_unlock(&prange->migrate_mutex);
3256 out_unlock_svms:
3257 mutex_unlock(&svms->lock);
3258 mmap_read_unlock(mm);
3259
3260 if (r != -EAGAIN)
3261 svm_range_count_fault(node, p, gpuidx);
3262
3263 mmput(mm);
3264 out:
3265 kfd_unref_process(p);
3266
3267 if (r == -EAGAIN) {
3268 pr_debug("recover vm fault later\n");
3269 amdgpu_gmc_filter_faults_remove(node->adev, addr, pasid);
3270 r = 0;
3271 }
3272 return r;
3273 }
3274
3275 int
svm_range_switch_xnack_reserve_mem(struct kfd_process * p,bool xnack_enabled)3276 svm_range_switch_xnack_reserve_mem(struct kfd_process *p, bool xnack_enabled)
3277 {
3278 struct svm_range *prange, *pchild;
3279 uint64_t reserved_size = 0;
3280 uint64_t size;
3281 int r = 0;
3282
3283 pr_debug("switching xnack from %d to %d\n", p->xnack_enabled, xnack_enabled);
3284
3285 mutex_lock(&p->svms.lock);
3286
3287 list_for_each_entry(prange, &p->svms.list, list) {
3288 svm_range_lock(prange);
3289 list_for_each_entry(pchild, &prange->child_list, child_list) {
3290 size = (pchild->last - pchild->start + 1) << PAGE_SHIFT;
3291 if (xnack_enabled) {
3292 amdgpu_amdkfd_unreserve_mem_limit(NULL, size,
3293 KFD_IOC_ALLOC_MEM_FLAGS_USERPTR, 0);
3294 } else {
3295 r = amdgpu_amdkfd_reserve_mem_limit(NULL, size,
3296 KFD_IOC_ALLOC_MEM_FLAGS_USERPTR, 0);
3297 if (r)
3298 goto out_unlock;
3299 reserved_size += size;
3300 }
3301 }
3302
3303 size = (prange->last - prange->start + 1) << PAGE_SHIFT;
3304 if (xnack_enabled) {
3305 amdgpu_amdkfd_unreserve_mem_limit(NULL, size,
3306 KFD_IOC_ALLOC_MEM_FLAGS_USERPTR, 0);
3307 } else {
3308 r = amdgpu_amdkfd_reserve_mem_limit(NULL, size,
3309 KFD_IOC_ALLOC_MEM_FLAGS_USERPTR, 0);
3310 if (r)
3311 goto out_unlock;
3312 reserved_size += size;
3313 }
3314 out_unlock:
3315 svm_range_unlock(prange);
3316 if (r)
3317 break;
3318 }
3319
3320 if (r)
3321 amdgpu_amdkfd_unreserve_mem_limit(NULL, reserved_size,
3322 KFD_IOC_ALLOC_MEM_FLAGS_USERPTR, 0);
3323 else
3324 /* Change xnack mode must be inside svms lock, to avoid race with
3325 * svm_range_deferred_list_work unreserve memory in parallel.
3326 */
3327 p->xnack_enabled = xnack_enabled;
3328
3329 mutex_unlock(&p->svms.lock);
3330 return r;
3331 }
3332
svm_range_list_fini(struct kfd_process * p)3333 void svm_range_list_fini(struct kfd_process *p)
3334 {
3335 struct svm_range *prange;
3336 struct svm_range *next;
3337
3338 pr_debug("process pid %d svms 0x%p\n", p->lead_thread->pid,
3339 &p->svms);
3340
3341 cancel_delayed_work_sync(&p->svms.restore_work);
3342
3343 /* Ensure list work is finished before process is destroyed */
3344 flush_work(&p->svms.deferred_list_work);
3345
3346 /*
3347 * Ensure no retry fault comes in afterwards, as page fault handler will
3348 * not find kfd process and take mm lock to recover fault.
3349 * stop kfd page fault handing, then wait pending page faults got drained
3350 */
3351 atomic_set(&p->svms.drain_pagefaults, 1);
3352 svm_range_drain_retry_fault(&p->svms);
3353
3354 list_for_each_entry_safe(prange, next, &p->svms.list, list) {
3355 svm_range_unlink(prange);
3356 svm_range_remove_notifier(prange);
3357 svm_range_free(prange, true);
3358 }
3359
3360 mutex_destroy(&p->svms.lock);
3361
3362 pr_debug("process pid %d svms 0x%p done\n",
3363 p->lead_thread->pid, &p->svms);
3364 }
3365
svm_range_list_init(struct kfd_process * p)3366 int svm_range_list_init(struct kfd_process *p)
3367 {
3368 struct svm_range_list *svms = &p->svms;
3369 int i;
3370
3371 svms->objects = RB_ROOT_CACHED;
3372 mutex_init(&svms->lock);
3373 INIT_LIST_HEAD(&svms->list);
3374 atomic_set(&svms->evicted_ranges, 0);
3375 atomic_set(&svms->drain_pagefaults, 0);
3376 INIT_DELAYED_WORK(&svms->restore_work, svm_range_restore_work);
3377 INIT_WORK(&svms->deferred_list_work, svm_range_deferred_list_work);
3378 INIT_LIST_HEAD(&svms->deferred_range_list);
3379 INIT_LIST_HEAD(&svms->criu_svm_metadata_list);
3380 spin_lock_init(&svms->deferred_list_lock);
3381
3382 for (i = 0; i < p->n_pdds; i++)
3383 if (KFD_IS_SVM_API_SUPPORTED(p->pdds[i]->dev->adev))
3384 bitmap_set(svms->bitmap_supported, i, 1);
3385
3386 /* Value of default granularity cannot exceed 0x1B, the
3387 * number of pages supported by a 4-level paging table
3388 */
3389 svms->default_granularity = min_t(u8, amdgpu_svm_default_granularity, 0x1B);
3390 pr_debug("Default SVM Granularity to use: %d\n", svms->default_granularity);
3391
3392 return 0;
3393 }
3394
3395 /**
3396 * svm_range_check_vm - check if virtual address range mapped already
3397 * @p: current kfd_process
3398 * @start: range start address, in pages
3399 * @last: range last address, in pages
3400 * @bo_s: mapping start address in pages if address range already mapped
3401 * @bo_l: mapping last address in pages if address range already mapped
3402 *
3403 * The purpose is to avoid virtual address ranges already allocated by
3404 * kfd_ioctl_alloc_memory_of_gpu ioctl.
3405 * It looks for each pdd in the kfd_process.
3406 *
3407 * Context: Process context
3408 *
3409 * Return 0 - OK, if the range is not mapped.
3410 * Otherwise error code:
3411 * -EADDRINUSE - if address is mapped already by kfd_ioctl_alloc_memory_of_gpu
3412 * -ERESTARTSYS - A wait for the buffer to become unreserved was interrupted by
3413 * a signal. Release all buffer reservations and return to user-space.
3414 */
3415 static int
svm_range_check_vm(struct kfd_process * p,uint64_t start,uint64_t last,uint64_t * bo_s,uint64_t * bo_l)3416 svm_range_check_vm(struct kfd_process *p, uint64_t start, uint64_t last,
3417 uint64_t *bo_s, uint64_t *bo_l)
3418 {
3419 struct amdgpu_bo_va_mapping *mapping;
3420 struct interval_tree_node *node;
3421 uint32_t i;
3422 int r;
3423
3424 for (i = 0; i < p->n_pdds; i++) {
3425 struct amdgpu_vm *vm;
3426
3427 if (!p->pdds[i]->drm_priv)
3428 continue;
3429
3430 vm = drm_priv_to_vm(p->pdds[i]->drm_priv);
3431 r = amdgpu_bo_reserve(vm->root.bo, false);
3432 if (r)
3433 return r;
3434
3435 node = interval_tree_iter_first(&vm->va, start, last);
3436 if (node) {
3437 pr_debug("range [0x%llx 0x%llx] already TTM mapped\n",
3438 start, last);
3439 mapping = container_of((struct rb_node *)node,
3440 struct amdgpu_bo_va_mapping, rb);
3441 if (bo_s && bo_l) {
3442 *bo_s = mapping->start;
3443 *bo_l = mapping->last;
3444 }
3445 amdgpu_bo_unreserve(vm->root.bo);
3446 return -EADDRINUSE;
3447 }
3448 amdgpu_bo_unreserve(vm->root.bo);
3449 }
3450
3451 return 0;
3452 }
3453
3454 /**
3455 * svm_range_is_valid - check if virtual address range is valid
3456 * @p: current kfd_process
3457 * @start: range start address, in pages
3458 * @size: range size, in pages
3459 *
3460 * Valid virtual address range means it belongs to one or more VMAs
3461 *
3462 * Context: Process context
3463 *
3464 * Return:
3465 * 0 - OK, otherwise error code
3466 */
3467 static int
svm_range_is_valid(struct kfd_process * p,uint64_t start,uint64_t size)3468 svm_range_is_valid(struct kfd_process *p, uint64_t start, uint64_t size)
3469 {
3470 const unsigned long device_vma = VM_IO | VM_PFNMAP | VM_MIXEDMAP;
3471 struct vm_area_struct *vma;
3472 unsigned long end;
3473 unsigned long start_unchg = start;
3474
3475 start <<= PAGE_SHIFT;
3476 end = start + (size << PAGE_SHIFT);
3477 do {
3478 vma = vma_lookup(p->mm, start);
3479 if (!vma || (vma->vm_flags & device_vma))
3480 return -EFAULT;
3481 start = min(end, vma->vm_end);
3482 } while (start < end);
3483
3484 return svm_range_check_vm(p, start_unchg, (end - 1) >> PAGE_SHIFT, NULL,
3485 NULL);
3486 }
3487
3488 /**
3489 * svm_range_best_prefetch_location - decide the best prefetch location
3490 * @prange: svm range structure
3491 *
3492 * For xnack off:
3493 * If range map to single GPU, the best prefetch location is prefetch_loc, which
3494 * can be CPU or GPU.
3495 *
3496 * If range is ACCESS or ACCESS_IN_PLACE by mGPUs, only if mGPU connection on
3497 * XGMI same hive, the best prefetch location is prefetch_loc GPU, othervise
3498 * the best prefetch location is always CPU, because GPU can not have coherent
3499 * mapping VRAM of other GPUs even with large-BAR PCIe connection.
3500 *
3501 * For xnack on:
3502 * If range is not ACCESS_IN_PLACE by mGPUs, the best prefetch location is
3503 * prefetch_loc, other GPU access will generate vm fault and trigger migration.
3504 *
3505 * If range is ACCESS_IN_PLACE by mGPUs, only if mGPU connection on XGMI same
3506 * hive, the best prefetch location is prefetch_loc GPU, otherwise the best
3507 * prefetch location is always CPU.
3508 *
3509 * Context: Process context
3510 *
3511 * Return:
3512 * 0 for CPU or GPU id
3513 */
3514 static uint32_t
svm_range_best_prefetch_location(struct svm_range * prange)3515 svm_range_best_prefetch_location(struct svm_range *prange)
3516 {
3517 DECLARE_BITMAP(bitmap, MAX_GPU_INSTANCE);
3518 uint32_t best_loc = prange->prefetch_loc;
3519 struct kfd_process_device *pdd;
3520 struct kfd_node *bo_node;
3521 struct kfd_process *p;
3522 uint32_t gpuidx;
3523
3524 p = container_of(prange->svms, struct kfd_process, svms);
3525
3526 if (!best_loc || best_loc == KFD_IOCTL_SVM_LOCATION_UNDEFINED)
3527 goto out;
3528
3529 bo_node = svm_range_get_node_by_id(prange, best_loc);
3530 if (!bo_node) {
3531 WARN_ONCE(1, "failed to get valid kfd node at id%x\n", best_loc);
3532 best_loc = 0;
3533 goto out;
3534 }
3535
3536 if (bo_node->adev->apu_prefer_gtt) {
3537 best_loc = 0;
3538 goto out;
3539 }
3540
3541 if (p->xnack_enabled)
3542 bitmap_copy(bitmap, prange->bitmap_aip, MAX_GPU_INSTANCE);
3543 else
3544 bitmap_or(bitmap, prange->bitmap_access, prange->bitmap_aip,
3545 MAX_GPU_INSTANCE);
3546
3547 for_each_set_bit(gpuidx, bitmap, MAX_GPU_INSTANCE) {
3548 pdd = kfd_process_device_from_gpuidx(p, gpuidx);
3549 if (!pdd) {
3550 pr_debug("failed to get device by idx 0x%x\n", gpuidx);
3551 continue;
3552 }
3553
3554 if (pdd->dev->adev == bo_node->adev)
3555 continue;
3556
3557 if (!svm_nodes_in_same_hive(pdd->dev, bo_node)) {
3558 best_loc = 0;
3559 break;
3560 }
3561 }
3562
3563 out:
3564 pr_debug("xnack %d svms 0x%p [0x%lx 0x%lx] best loc 0x%x\n",
3565 p->xnack_enabled, &p->svms, prange->start, prange->last,
3566 best_loc);
3567
3568 return best_loc;
3569 }
3570
3571 /* svm_range_trigger_migration - start page migration if prefetch loc changed
3572 * @mm: current process mm_struct
3573 * @prange: svm range structure
3574 * @migrated: output, true if migration is triggered
3575 *
3576 * If range perfetch_loc is GPU, actual loc is cpu 0, then migrate the range
3577 * from ram to vram.
3578 * If range prefetch_loc is cpu 0, actual loc is GPU, then migrate the range
3579 * from vram to ram.
3580 *
3581 * If GPU vm fault retry is not enabled, migration interact with MMU notifier
3582 * and restore work:
3583 * 1. migrate_vma_setup invalidate pages, MMU notifier callback svm_range_evict
3584 * stops all queues, schedule restore work
3585 * 2. svm_range_restore_work wait for migration is done by
3586 * a. svm_range_validate_vram takes prange->migrate_mutex
3587 * b. svm_range_validate_ram HMM get pages wait for CPU fault handle returns
3588 * 3. restore work update mappings of GPU, resume all queues.
3589 *
3590 * Context: Process context
3591 *
3592 * Return:
3593 * 0 - OK, otherwise - error code of migration
3594 */
3595 static int
svm_range_trigger_migration(struct mm_struct * mm,struct svm_range * prange,bool * migrated)3596 svm_range_trigger_migration(struct mm_struct *mm, struct svm_range *prange,
3597 bool *migrated)
3598 {
3599 uint32_t best_loc;
3600 int r = 0;
3601
3602 *migrated = false;
3603 best_loc = svm_range_best_prefetch_location(prange);
3604
3605 /* when best_loc is a gpu node and same as prange->actual_loc
3606 * we still need do migration as prange->actual_loc !=0 does
3607 * not mean all pages in prange are vram. hmm migrate will pick
3608 * up right pages during migration.
3609 */
3610 if ((best_loc == KFD_IOCTL_SVM_LOCATION_UNDEFINED) ||
3611 (best_loc == 0 && prange->actual_loc == 0))
3612 return 0;
3613
3614 if (!best_loc) {
3615 r = svm_migrate_vram_to_ram(prange, mm, prange->start, prange->last,
3616 KFD_MIGRATE_TRIGGER_PREFETCH, NULL);
3617 *migrated = !r;
3618 return r;
3619 }
3620
3621 r = svm_migrate_to_vram(prange, best_loc, prange->start, prange->last,
3622 mm, KFD_MIGRATE_TRIGGER_PREFETCH);
3623 *migrated = !r;
3624
3625 return 0;
3626 }
3627
svm_range_schedule_evict_svm_bo(struct amdgpu_amdkfd_fence * fence)3628 int svm_range_schedule_evict_svm_bo(struct amdgpu_amdkfd_fence *fence)
3629 {
3630 /* Dereferencing fence->svm_bo is safe here because the fence hasn't
3631 * signaled yet and we're under the protection of the fence->lock.
3632 * After the fence is signaled in svm_range_bo_release, we cannot get
3633 * here any more.
3634 *
3635 * Reference is dropped in svm_range_evict_svm_bo_worker.
3636 */
3637 if (svm_bo_ref_unless_zero(fence->svm_bo)) {
3638 WRITE_ONCE(fence->svm_bo->evicting, 1);
3639 schedule_work(&fence->svm_bo->eviction_work);
3640 }
3641
3642 return 0;
3643 }
3644
svm_range_evict_svm_bo_worker(struct work_struct * work)3645 static void svm_range_evict_svm_bo_worker(struct work_struct *work)
3646 {
3647 struct svm_range_bo *svm_bo;
3648 struct mm_struct *mm;
3649 int r = 0;
3650
3651 svm_bo = container_of(work, struct svm_range_bo, eviction_work);
3652
3653 if (mmget_not_zero(svm_bo->eviction_fence->mm)) {
3654 mm = svm_bo->eviction_fence->mm;
3655 } else {
3656 svm_range_bo_unref(svm_bo);
3657 return;
3658 }
3659
3660 mmap_read_lock(mm);
3661 spin_lock(&svm_bo->list_lock);
3662 while (!list_empty(&svm_bo->range_list) && !r) {
3663 struct svm_range *prange =
3664 list_first_entry(&svm_bo->range_list,
3665 struct svm_range, svm_bo_list);
3666 int retries = 3;
3667
3668 list_del_init(&prange->svm_bo_list);
3669 spin_unlock(&svm_bo->list_lock);
3670
3671 pr_debug("svms 0x%p [0x%lx 0x%lx]\n", prange->svms,
3672 prange->start, prange->last);
3673
3674 mutex_lock(&prange->migrate_mutex);
3675 do {
3676 /* migrate all vram pages in this prange to sys ram
3677 * after that prange->actual_loc should be zero
3678 */
3679 r = svm_migrate_vram_to_ram(prange, mm,
3680 prange->start, prange->last,
3681 KFD_MIGRATE_TRIGGER_TTM_EVICTION, NULL);
3682 } while (!r && prange->actual_loc && --retries);
3683
3684 if (!r && prange->actual_loc)
3685 pr_info_once("Migration failed during eviction");
3686
3687 if (!prange->actual_loc) {
3688 mutex_lock(&prange->lock);
3689 prange->svm_bo = NULL;
3690 mutex_unlock(&prange->lock);
3691 }
3692 mutex_unlock(&prange->migrate_mutex);
3693
3694 spin_lock(&svm_bo->list_lock);
3695 }
3696 spin_unlock(&svm_bo->list_lock);
3697 mmap_read_unlock(mm);
3698 mmput(mm);
3699
3700 dma_fence_signal(&svm_bo->eviction_fence->base);
3701
3702 /* This is the last reference to svm_bo, after svm_range_vram_node_free
3703 * has been called in svm_migrate_vram_to_ram
3704 */
3705 WARN_ONCE(!r && kref_read(&svm_bo->kref) != 1, "This was not the last reference\n");
3706 svm_range_bo_unref(svm_bo);
3707 }
3708
3709 static int
svm_range_set_attr(struct kfd_process * p,struct mm_struct * mm,uint64_t start,uint64_t size,uint32_t nattr,struct kfd_ioctl_svm_attribute * attrs)3710 svm_range_set_attr(struct kfd_process *p, struct mm_struct *mm,
3711 uint64_t start, uint64_t size, uint32_t nattr,
3712 struct kfd_ioctl_svm_attribute *attrs)
3713 {
3714 struct amdkfd_process_info *process_info = p->kgd_process_info;
3715 struct list_head update_list;
3716 struct list_head insert_list;
3717 struct list_head remove_list;
3718 struct list_head remap_list;
3719 struct svm_range_list *svms;
3720 struct svm_range *prange;
3721 struct svm_range *next;
3722 bool update_mapping = false;
3723 bool flush_tlb;
3724 int r, ret = 0;
3725
3726 pr_debug("process pid %d svms 0x%p [0x%llx 0x%llx] pages 0x%llx\n",
3727 p->lead_thread->pid, &p->svms, start, start + size - 1, size);
3728
3729 r = svm_range_check_attr(p, nattr, attrs);
3730 if (r)
3731 return r;
3732
3733 svms = &p->svms;
3734
3735 mutex_lock(&process_info->lock);
3736
3737 svm_range_list_lock_and_flush_work(svms, mm);
3738
3739 r = svm_range_is_valid(p, start, size);
3740 if (r) {
3741 pr_debug("invalid range r=%d\n", r);
3742 mmap_write_unlock(mm);
3743 goto out;
3744 }
3745
3746 mutex_lock(&svms->lock);
3747
3748 /* Add new range and split existing ranges as needed */
3749 r = svm_range_add(p, start, size, nattr, attrs, &update_list,
3750 &insert_list, &remove_list, &remap_list);
3751 if (r) {
3752 mutex_unlock(&svms->lock);
3753 mmap_write_unlock(mm);
3754 goto out;
3755 }
3756 /* Apply changes as a transaction */
3757 list_for_each_entry_safe(prange, next, &insert_list, list) {
3758 svm_range_add_to_svms(prange);
3759 svm_range_add_notifier_locked(mm, prange);
3760 }
3761 list_for_each_entry(prange, &update_list, update_list) {
3762 svm_range_apply_attrs(p, prange, nattr, attrs, &update_mapping);
3763 /* TODO: unmap ranges from GPU that lost access */
3764 }
3765 update_mapping |= !p->xnack_enabled && !list_empty(&remap_list);
3766
3767 list_for_each_entry_safe(prange, next, &remove_list, update_list) {
3768 pr_debug("unlink old 0x%p prange 0x%p [0x%lx 0x%lx]\n",
3769 prange->svms, prange, prange->start,
3770 prange->last);
3771 svm_range_unlink(prange);
3772 svm_range_remove_notifier(prange);
3773 svm_range_free(prange, false);
3774 }
3775
3776 mmap_write_downgrade(mm);
3777 /* Trigger migrations and revalidate and map to GPUs as needed. If
3778 * this fails we may be left with partially completed actions. There
3779 * is no clean way of rolling back to the previous state in such a
3780 * case because the rollback wouldn't be guaranteed to work either.
3781 */
3782 list_for_each_entry(prange, &update_list, update_list) {
3783 bool migrated;
3784
3785 mutex_lock(&prange->migrate_mutex);
3786
3787 r = svm_range_trigger_migration(mm, prange, &migrated);
3788 if (r)
3789 goto out_unlock_range;
3790
3791 if (migrated && (!p->xnack_enabled ||
3792 (prange->flags & KFD_IOCTL_SVM_FLAG_GPU_ALWAYS_MAPPED)) &&
3793 prange->mapped_to_gpu) {
3794 pr_debug("restore_work will update mappings of GPUs\n");
3795 mutex_unlock(&prange->migrate_mutex);
3796 continue;
3797 }
3798
3799 if (!migrated && !update_mapping) {
3800 mutex_unlock(&prange->migrate_mutex);
3801 continue;
3802 }
3803
3804 flush_tlb = !migrated && update_mapping && prange->mapped_to_gpu;
3805
3806 r = svm_range_validate_and_map(mm, prange->start, prange->last, prange,
3807 MAX_GPU_INSTANCE, true, true, flush_tlb);
3808 if (r)
3809 pr_debug("failed %d to map svm range\n", r);
3810
3811 out_unlock_range:
3812 mutex_unlock(&prange->migrate_mutex);
3813 if (r)
3814 ret = r;
3815 }
3816
3817 list_for_each_entry(prange, &remap_list, update_list) {
3818 pr_debug("Remapping prange 0x%p [0x%lx 0x%lx]\n",
3819 prange, prange->start, prange->last);
3820 mutex_lock(&prange->migrate_mutex);
3821 r = svm_range_validate_and_map(mm, prange->start, prange->last, prange,
3822 MAX_GPU_INSTANCE, true, true, prange->mapped_to_gpu);
3823 if (r)
3824 pr_debug("failed %d on remap svm range\n", r);
3825 mutex_unlock(&prange->migrate_mutex);
3826 if (r)
3827 ret = r;
3828 }
3829
3830 dynamic_svm_range_dump(svms);
3831
3832 mutex_unlock(&svms->lock);
3833 mmap_read_unlock(mm);
3834 out:
3835 mutex_unlock(&process_info->lock);
3836
3837 pr_debug("process pid %d svms 0x%p [0x%llx 0x%llx] done, r=%d\n",
3838 p->lead_thread->pid, &p->svms, start, start + size - 1, r);
3839
3840 return ret ? ret : r;
3841 }
3842
3843 static int
svm_range_get_attr(struct kfd_process * p,struct mm_struct * mm,uint64_t start,uint64_t size,uint32_t nattr,struct kfd_ioctl_svm_attribute * attrs)3844 svm_range_get_attr(struct kfd_process *p, struct mm_struct *mm,
3845 uint64_t start, uint64_t size, uint32_t nattr,
3846 struct kfd_ioctl_svm_attribute *attrs)
3847 {
3848 DECLARE_BITMAP(bitmap_access, MAX_GPU_INSTANCE);
3849 DECLARE_BITMAP(bitmap_aip, MAX_GPU_INSTANCE);
3850 bool get_preferred_loc = false;
3851 bool get_prefetch_loc = false;
3852 bool get_granularity = false;
3853 bool get_accessible = false;
3854 bool get_flags = false;
3855 uint64_t last = start + size - 1UL;
3856 uint8_t granularity = 0xff;
3857 struct interval_tree_node *node;
3858 struct svm_range_list *svms;
3859 struct svm_range *prange;
3860 uint32_t prefetch_loc = KFD_IOCTL_SVM_LOCATION_UNDEFINED;
3861 uint32_t location = KFD_IOCTL_SVM_LOCATION_UNDEFINED;
3862 uint32_t flags_and = 0xffffffff;
3863 uint32_t flags_or = 0;
3864 int gpuidx;
3865 uint32_t i;
3866 int r = 0;
3867
3868 pr_debug("svms 0x%p [0x%llx 0x%llx] nattr 0x%x\n", &p->svms, start,
3869 start + size - 1, nattr);
3870
3871 /* Flush pending deferred work to avoid racing with deferred actions from
3872 * previous memory map changes (e.g. munmap). Concurrent memory map changes
3873 * can still race with get_attr because we don't hold the mmap lock. But that
3874 * would be a race condition in the application anyway, and undefined
3875 * behaviour is acceptable in that case.
3876 */
3877 flush_work(&p->svms.deferred_list_work);
3878
3879 mmap_read_lock(mm);
3880 r = svm_range_is_valid(p, start, size);
3881 mmap_read_unlock(mm);
3882 if (r) {
3883 pr_debug("invalid range r=%d\n", r);
3884 return r;
3885 }
3886
3887 for (i = 0; i < nattr; i++) {
3888 switch (attrs[i].type) {
3889 case KFD_IOCTL_SVM_ATTR_PREFERRED_LOC:
3890 get_preferred_loc = true;
3891 break;
3892 case KFD_IOCTL_SVM_ATTR_PREFETCH_LOC:
3893 get_prefetch_loc = true;
3894 break;
3895 case KFD_IOCTL_SVM_ATTR_ACCESS:
3896 get_accessible = true;
3897 break;
3898 case KFD_IOCTL_SVM_ATTR_SET_FLAGS:
3899 case KFD_IOCTL_SVM_ATTR_CLR_FLAGS:
3900 get_flags = true;
3901 break;
3902 case KFD_IOCTL_SVM_ATTR_GRANULARITY:
3903 get_granularity = true;
3904 break;
3905 case KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE:
3906 case KFD_IOCTL_SVM_ATTR_NO_ACCESS:
3907 fallthrough;
3908 default:
3909 pr_debug("get invalid attr type 0x%x\n", attrs[i].type);
3910 return -EINVAL;
3911 }
3912 }
3913
3914 svms = &p->svms;
3915
3916 mutex_lock(&svms->lock);
3917
3918 node = interval_tree_iter_first(&svms->objects, start, last);
3919 if (!node) {
3920 pr_debug("range attrs not found return default values\n");
3921 svm_range_set_default_attributes(svms, &location, &prefetch_loc,
3922 &granularity, &flags_and);
3923 flags_or = flags_and;
3924 if (p->xnack_enabled)
3925 bitmap_copy(bitmap_access, svms->bitmap_supported,
3926 MAX_GPU_INSTANCE);
3927 else
3928 bitmap_zero(bitmap_access, MAX_GPU_INSTANCE);
3929 bitmap_zero(bitmap_aip, MAX_GPU_INSTANCE);
3930 goto fill_values;
3931 }
3932 bitmap_copy(bitmap_access, svms->bitmap_supported, MAX_GPU_INSTANCE);
3933 bitmap_copy(bitmap_aip, svms->bitmap_supported, MAX_GPU_INSTANCE);
3934
3935 while (node) {
3936 struct interval_tree_node *next;
3937
3938 prange = container_of(node, struct svm_range, it_node);
3939 next = interval_tree_iter_next(node, start, last);
3940
3941 if (get_preferred_loc) {
3942 if (prange->preferred_loc ==
3943 KFD_IOCTL_SVM_LOCATION_UNDEFINED ||
3944 (location != KFD_IOCTL_SVM_LOCATION_UNDEFINED &&
3945 location != prange->preferred_loc)) {
3946 location = KFD_IOCTL_SVM_LOCATION_UNDEFINED;
3947 get_preferred_loc = false;
3948 } else {
3949 location = prange->preferred_loc;
3950 }
3951 }
3952 if (get_prefetch_loc) {
3953 if (prange->prefetch_loc ==
3954 KFD_IOCTL_SVM_LOCATION_UNDEFINED ||
3955 (prefetch_loc != KFD_IOCTL_SVM_LOCATION_UNDEFINED &&
3956 prefetch_loc != prange->prefetch_loc)) {
3957 prefetch_loc = KFD_IOCTL_SVM_LOCATION_UNDEFINED;
3958 get_prefetch_loc = false;
3959 } else {
3960 prefetch_loc = prange->prefetch_loc;
3961 }
3962 }
3963 if (get_accessible) {
3964 bitmap_and(bitmap_access, bitmap_access,
3965 prange->bitmap_access, MAX_GPU_INSTANCE);
3966 bitmap_and(bitmap_aip, bitmap_aip,
3967 prange->bitmap_aip, MAX_GPU_INSTANCE);
3968 }
3969 if (get_flags) {
3970 flags_and &= prange->flags;
3971 flags_or |= prange->flags;
3972 }
3973
3974 if (get_granularity && prange->granularity < granularity)
3975 granularity = prange->granularity;
3976
3977 node = next;
3978 }
3979 fill_values:
3980 mutex_unlock(&svms->lock);
3981
3982 for (i = 0; i < nattr; i++) {
3983 switch (attrs[i].type) {
3984 case KFD_IOCTL_SVM_ATTR_PREFERRED_LOC:
3985 attrs[i].value = location;
3986 break;
3987 case KFD_IOCTL_SVM_ATTR_PREFETCH_LOC:
3988 attrs[i].value = prefetch_loc;
3989 break;
3990 case KFD_IOCTL_SVM_ATTR_ACCESS:
3991 gpuidx = kfd_process_gpuidx_from_gpuid(p,
3992 attrs[i].value);
3993 if (gpuidx < 0) {
3994 pr_debug("invalid gpuid %x\n", attrs[i].value);
3995 return -EINVAL;
3996 }
3997 if (test_bit(gpuidx, bitmap_access))
3998 attrs[i].type = KFD_IOCTL_SVM_ATTR_ACCESS;
3999 else if (test_bit(gpuidx, bitmap_aip))
4000 attrs[i].type =
4001 KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE;
4002 else
4003 attrs[i].type = KFD_IOCTL_SVM_ATTR_NO_ACCESS;
4004 break;
4005 case KFD_IOCTL_SVM_ATTR_SET_FLAGS:
4006 attrs[i].value = flags_and;
4007 break;
4008 case KFD_IOCTL_SVM_ATTR_CLR_FLAGS:
4009 attrs[i].value = ~flags_or;
4010 break;
4011 case KFD_IOCTL_SVM_ATTR_GRANULARITY:
4012 attrs[i].value = (uint32_t)granularity;
4013 break;
4014 }
4015 }
4016
4017 return 0;
4018 }
4019
kfd_criu_resume_svm(struct kfd_process * p)4020 int kfd_criu_resume_svm(struct kfd_process *p)
4021 {
4022 struct kfd_ioctl_svm_attribute *set_attr_new, *set_attr = NULL;
4023 int nattr_common = 4, nattr_accessibility = 1;
4024 struct criu_svm_metadata *criu_svm_md = NULL;
4025 struct svm_range_list *svms = &p->svms;
4026 struct criu_svm_metadata *next = NULL;
4027 uint32_t set_flags = 0xffffffff;
4028 int i, j, num_attrs, ret = 0;
4029 uint64_t set_attr_size;
4030 struct mm_struct *mm;
4031
4032 if (list_empty(&svms->criu_svm_metadata_list)) {
4033 pr_debug("No SVM data from CRIU restore stage 2\n");
4034 return ret;
4035 }
4036
4037 mm = get_task_mm(p->lead_thread);
4038 if (!mm) {
4039 pr_err("failed to get mm for the target process\n");
4040 return -ESRCH;
4041 }
4042
4043 num_attrs = nattr_common + (nattr_accessibility * p->n_pdds);
4044
4045 i = j = 0;
4046 list_for_each_entry(criu_svm_md, &svms->criu_svm_metadata_list, list) {
4047 pr_debug("criu_svm_md[%d]\n\tstart: 0x%llx size: 0x%llx (npages)\n",
4048 i, criu_svm_md->data.start_addr, criu_svm_md->data.size);
4049
4050 for (j = 0; j < num_attrs; j++) {
4051 pr_debug("\ncriu_svm_md[%d]->attrs[%d].type : 0x%x\ncriu_svm_md[%d]->attrs[%d].value : 0x%x\n",
4052 i, j, criu_svm_md->data.attrs[j].type,
4053 i, j, criu_svm_md->data.attrs[j].value);
4054 switch (criu_svm_md->data.attrs[j].type) {
4055 /* During Checkpoint operation, the query for
4056 * KFD_IOCTL_SVM_ATTR_PREFETCH_LOC attribute might
4057 * return KFD_IOCTL_SVM_LOCATION_UNDEFINED if they were
4058 * not used by the range which was checkpointed. Care
4059 * must be taken to not restore with an invalid value
4060 * otherwise the gpuidx value will be invalid and
4061 * set_attr would eventually fail so just replace those
4062 * with another dummy attribute such as
4063 * KFD_IOCTL_SVM_ATTR_SET_FLAGS.
4064 */
4065 case KFD_IOCTL_SVM_ATTR_PREFETCH_LOC:
4066 if (criu_svm_md->data.attrs[j].value ==
4067 KFD_IOCTL_SVM_LOCATION_UNDEFINED) {
4068 criu_svm_md->data.attrs[j].type =
4069 KFD_IOCTL_SVM_ATTR_SET_FLAGS;
4070 criu_svm_md->data.attrs[j].value = 0;
4071 }
4072 break;
4073 case KFD_IOCTL_SVM_ATTR_SET_FLAGS:
4074 set_flags = criu_svm_md->data.attrs[j].value;
4075 break;
4076 default:
4077 break;
4078 }
4079 }
4080
4081 /* CLR_FLAGS is not available via get_attr during checkpoint but
4082 * it needs to be inserted before restoring the ranges so
4083 * allocate extra space for it before calling set_attr
4084 */
4085 set_attr_size = sizeof(struct kfd_ioctl_svm_attribute) *
4086 (num_attrs + 1);
4087 set_attr_new = krealloc(set_attr, set_attr_size,
4088 GFP_KERNEL);
4089 if (!set_attr_new) {
4090 ret = -ENOMEM;
4091 goto exit;
4092 }
4093 set_attr = set_attr_new;
4094
4095 memcpy(set_attr, criu_svm_md->data.attrs, num_attrs *
4096 sizeof(struct kfd_ioctl_svm_attribute));
4097 set_attr[num_attrs].type = KFD_IOCTL_SVM_ATTR_CLR_FLAGS;
4098 set_attr[num_attrs].value = ~set_flags;
4099
4100 ret = svm_range_set_attr(p, mm, criu_svm_md->data.start_addr,
4101 criu_svm_md->data.size, num_attrs + 1,
4102 set_attr);
4103 if (ret) {
4104 pr_err("CRIU: failed to set range attributes\n");
4105 goto exit;
4106 }
4107
4108 i++;
4109 }
4110 exit:
4111 kfree(set_attr);
4112 list_for_each_entry_safe(criu_svm_md, next, &svms->criu_svm_metadata_list, list) {
4113 pr_debug("freeing criu_svm_md[]\n\tstart: 0x%llx\n",
4114 criu_svm_md->data.start_addr);
4115 kfree(criu_svm_md);
4116 }
4117
4118 mmput(mm);
4119 return ret;
4120
4121 }
4122
kfd_criu_restore_svm(struct kfd_process * p,uint8_t __user * user_priv_ptr,uint64_t * priv_data_offset,uint64_t max_priv_data_size)4123 int kfd_criu_restore_svm(struct kfd_process *p,
4124 uint8_t __user *user_priv_ptr,
4125 uint64_t *priv_data_offset,
4126 uint64_t max_priv_data_size)
4127 {
4128 uint64_t svm_priv_data_size, svm_object_md_size, svm_attrs_size;
4129 int nattr_common = 4, nattr_accessibility = 1;
4130 struct criu_svm_metadata *criu_svm_md = NULL;
4131 struct svm_range_list *svms = &p->svms;
4132 uint32_t num_devices;
4133 int ret = 0;
4134
4135 num_devices = p->n_pdds;
4136 /* Handle one SVM range object at a time, also the number of gpus are
4137 * assumed to be same on the restore node, checking must be done while
4138 * evaluating the topology earlier
4139 */
4140
4141 svm_attrs_size = sizeof(struct kfd_ioctl_svm_attribute) *
4142 (nattr_common + nattr_accessibility * num_devices);
4143 svm_object_md_size = sizeof(struct criu_svm_metadata) + svm_attrs_size;
4144
4145 svm_priv_data_size = sizeof(struct kfd_criu_svm_range_priv_data) +
4146 svm_attrs_size;
4147
4148 criu_svm_md = kzalloc(svm_object_md_size, GFP_KERNEL);
4149 if (!criu_svm_md) {
4150 pr_err("failed to allocate memory to store svm metadata\n");
4151 return -ENOMEM;
4152 }
4153 if (*priv_data_offset + svm_priv_data_size > max_priv_data_size) {
4154 ret = -EINVAL;
4155 goto exit;
4156 }
4157
4158 ret = copy_from_user(&criu_svm_md->data, user_priv_ptr + *priv_data_offset,
4159 svm_priv_data_size);
4160 if (ret) {
4161 ret = -EFAULT;
4162 goto exit;
4163 }
4164 *priv_data_offset += svm_priv_data_size;
4165
4166 list_add_tail(&criu_svm_md->list, &svms->criu_svm_metadata_list);
4167
4168 return 0;
4169
4170
4171 exit:
4172 kfree(criu_svm_md);
4173 return ret;
4174 }
4175
svm_range_get_info(struct kfd_process * p,uint32_t * num_svm_ranges,uint64_t * svm_priv_data_size)4176 void svm_range_get_info(struct kfd_process *p, uint32_t *num_svm_ranges,
4177 uint64_t *svm_priv_data_size)
4178 {
4179 uint64_t total_size, accessibility_size, common_attr_size;
4180 int nattr_common = 4, nattr_accessibility = 1;
4181 int num_devices = p->n_pdds;
4182 struct svm_range_list *svms;
4183 struct svm_range *prange;
4184 uint32_t count = 0;
4185
4186 *svm_priv_data_size = 0;
4187
4188 svms = &p->svms;
4189
4190 mutex_lock(&svms->lock);
4191 list_for_each_entry(prange, &svms->list, list) {
4192 pr_debug("prange: 0x%p start: 0x%lx\t npages: 0x%llx\t end: 0x%llx\n",
4193 prange, prange->start, prange->npages,
4194 prange->start + prange->npages - 1);
4195 count++;
4196 }
4197 mutex_unlock(&svms->lock);
4198
4199 *num_svm_ranges = count;
4200 /* Only the accessbility attributes need to be queried for all the gpus
4201 * individually, remaining ones are spanned across the entire process
4202 * regardless of the various gpu nodes. Of the remaining attributes,
4203 * KFD_IOCTL_SVM_ATTR_CLR_FLAGS need not be saved.
4204 *
4205 * KFD_IOCTL_SVM_ATTR_PREFERRED_LOC
4206 * KFD_IOCTL_SVM_ATTR_PREFETCH_LOC
4207 * KFD_IOCTL_SVM_ATTR_SET_FLAGS
4208 * KFD_IOCTL_SVM_ATTR_GRANULARITY
4209 *
4210 * ** ACCESSBILITY ATTRIBUTES **
4211 * (Considered as one, type is altered during query, value is gpuid)
4212 * KFD_IOCTL_SVM_ATTR_ACCESS
4213 * KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE
4214 * KFD_IOCTL_SVM_ATTR_NO_ACCESS
4215 */
4216 if (*num_svm_ranges > 0) {
4217 common_attr_size = sizeof(struct kfd_ioctl_svm_attribute) *
4218 nattr_common;
4219 accessibility_size = sizeof(struct kfd_ioctl_svm_attribute) *
4220 nattr_accessibility * num_devices;
4221
4222 total_size = sizeof(struct kfd_criu_svm_range_priv_data) +
4223 common_attr_size + accessibility_size;
4224
4225 *svm_priv_data_size = *num_svm_ranges * total_size;
4226 }
4227
4228 pr_debug("num_svm_ranges %u total_priv_size %llu\n", *num_svm_ranges,
4229 *svm_priv_data_size);
4230 }
4231
kfd_criu_checkpoint_svm(struct kfd_process * p,uint8_t __user * user_priv_data,uint64_t * priv_data_offset)4232 int kfd_criu_checkpoint_svm(struct kfd_process *p,
4233 uint8_t __user *user_priv_data,
4234 uint64_t *priv_data_offset)
4235 {
4236 struct kfd_criu_svm_range_priv_data *svm_priv = NULL;
4237 struct kfd_ioctl_svm_attribute *query_attr = NULL;
4238 uint64_t svm_priv_data_size, query_attr_size = 0;
4239 int index, nattr_common = 4, ret = 0;
4240 struct svm_range_list *svms;
4241 int num_devices = p->n_pdds;
4242 struct svm_range *prange;
4243 struct mm_struct *mm;
4244
4245 svms = &p->svms;
4246
4247 mm = get_task_mm(p->lead_thread);
4248 if (!mm) {
4249 pr_err("failed to get mm for the target process\n");
4250 return -ESRCH;
4251 }
4252
4253 query_attr_size = sizeof(struct kfd_ioctl_svm_attribute) *
4254 (nattr_common + num_devices);
4255
4256 query_attr = kzalloc(query_attr_size, GFP_KERNEL);
4257 if (!query_attr) {
4258 ret = -ENOMEM;
4259 goto exit;
4260 }
4261
4262 query_attr[0].type = KFD_IOCTL_SVM_ATTR_PREFERRED_LOC;
4263 query_attr[1].type = KFD_IOCTL_SVM_ATTR_PREFETCH_LOC;
4264 query_attr[2].type = KFD_IOCTL_SVM_ATTR_SET_FLAGS;
4265 query_attr[3].type = KFD_IOCTL_SVM_ATTR_GRANULARITY;
4266
4267 for (index = 0; index < num_devices; index++) {
4268 struct kfd_process_device *pdd = p->pdds[index];
4269
4270 query_attr[index + nattr_common].type =
4271 KFD_IOCTL_SVM_ATTR_ACCESS;
4272 query_attr[index + nattr_common].value = pdd->user_gpu_id;
4273 }
4274
4275 svm_priv_data_size = sizeof(*svm_priv) + query_attr_size;
4276
4277 svm_priv = kzalloc(svm_priv_data_size, GFP_KERNEL);
4278 if (!svm_priv) {
4279 ret = -ENOMEM;
4280 goto exit_query;
4281 }
4282
4283 index = 0;
4284 list_for_each_entry(prange, &svms->list, list) {
4285
4286 svm_priv->object_type = KFD_CRIU_OBJECT_TYPE_SVM_RANGE;
4287 svm_priv->start_addr = prange->start;
4288 svm_priv->size = prange->npages;
4289 memcpy(&svm_priv->attrs, query_attr, query_attr_size);
4290 pr_debug("CRIU: prange: 0x%p start: 0x%lx\t npages: 0x%llx end: 0x%llx\t size: 0x%llx\n",
4291 prange, prange->start, prange->npages,
4292 prange->start + prange->npages - 1,
4293 prange->npages * PAGE_SIZE);
4294
4295 ret = svm_range_get_attr(p, mm, svm_priv->start_addr,
4296 svm_priv->size,
4297 (nattr_common + num_devices),
4298 svm_priv->attrs);
4299 if (ret) {
4300 pr_err("CRIU: failed to obtain range attributes\n");
4301 goto exit_priv;
4302 }
4303
4304 if (copy_to_user(user_priv_data + *priv_data_offset, svm_priv,
4305 svm_priv_data_size)) {
4306 pr_err("Failed to copy svm priv to user\n");
4307 ret = -EFAULT;
4308 goto exit_priv;
4309 }
4310
4311 *priv_data_offset += svm_priv_data_size;
4312
4313 }
4314
4315
4316 exit_priv:
4317 kfree(svm_priv);
4318 exit_query:
4319 kfree(query_attr);
4320 exit:
4321 mmput(mm);
4322 return ret;
4323 }
4324
4325 int
svm_ioctl(struct kfd_process * p,enum kfd_ioctl_svm_op op,uint64_t start,uint64_t size,uint32_t nattrs,struct kfd_ioctl_svm_attribute * attrs)4326 svm_ioctl(struct kfd_process *p, enum kfd_ioctl_svm_op op, uint64_t start,
4327 uint64_t size, uint32_t nattrs, struct kfd_ioctl_svm_attribute *attrs)
4328 {
4329 struct mm_struct *mm = current->mm;
4330 int r;
4331
4332 start >>= PAGE_SHIFT;
4333 size >>= PAGE_SHIFT;
4334
4335 switch (op) {
4336 case KFD_IOCTL_SVM_OP_SET_ATTR:
4337 r = svm_range_set_attr(p, mm, start, size, nattrs, attrs);
4338 break;
4339 case KFD_IOCTL_SVM_OP_GET_ATTR:
4340 r = svm_range_get_attr(p, mm, start, size, nattrs, attrs);
4341 break;
4342 default:
4343 r = -EINVAL;
4344 break;
4345 }
4346
4347 return r;
4348 }
4349