1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Copyright 2013 Red Hat Inc.
4 *
5 * Authors: Jérôme Glisse <jglisse@redhat.com>
6 */
7 /*
8 * Refer to include/linux/hmm.h for information about heterogeneous memory
9 * management or HMM for short.
10 */
11 #include <linux/pagewalk.h>
12 #include <linux/hmm.h>
13 #include <linux/hmm-dma.h>
14 #include <linux/init.h>
15 #include <linux/rmap.h>
16 #include <linux/swap.h>
17 #include <linux/slab.h>
18 #include <linux/sched.h>
19 #include <linux/mmzone.h>
20 #include <linux/pagemap.h>
21 #include <linux/leafops.h>
22 #include <linux/hugetlb.h>
23 #include <linux/memremap.h>
24 #include <linux/sched/mm.h>
25 #include <linux/jump_label.h>
26 #include <linux/dma-mapping.h>
27 #include <linux/pci-p2pdma.h>
28 #include <linux/mmu_notifier.h>
29 #include <linux/memory_hotplug.h>
30
31 #include "internal.h"
32
33 struct hmm_vma_walk {
34 struct hmm_range *range;
35 unsigned long last;
36 };
37
38 enum {
39 HMM_NEED_FAULT = 1 << 0,
40 HMM_NEED_WRITE_FAULT = 1 << 1,
41 HMM_NEED_ALL_BITS = HMM_NEED_FAULT | HMM_NEED_WRITE_FAULT,
42 };
43
44 enum {
45 /* These flags are carried from input-to-output */
46 HMM_PFN_INOUT_FLAGS = HMM_PFN_DMA_MAPPED | HMM_PFN_P2PDMA |
47 HMM_PFN_P2PDMA_BUS,
48 };
49
hmm_pfns_fill(unsigned long addr,unsigned long end,struct hmm_range * range,unsigned long cpu_flags)50 static int hmm_pfns_fill(unsigned long addr, unsigned long end,
51 struct hmm_range *range, unsigned long cpu_flags)
52 {
53 unsigned long i = (addr - range->start) >> PAGE_SHIFT;
54
55 for (; addr < end; addr += PAGE_SIZE, i++) {
56 range->hmm_pfns[i] &= HMM_PFN_INOUT_FLAGS;
57 range->hmm_pfns[i] |= cpu_flags;
58 }
59 return 0;
60 }
61
62 /*
63 * hmm_vma_fault() - fault in a range lacking valid pmd or pte(s)
64 * @addr: range virtual start address (inclusive)
65 * @end: range virtual end address (exclusive)
66 * @required_fault: HMM_NEED_* flags
67 * @walk: mm_walk structure
68 * Return: -EBUSY after page fault, or page fault error
69 *
70 * This function will be called whenever pmd_none() or pte_none() returns true,
71 * or whenever there is no page directory covering the virtual address range.
72 */
hmm_vma_fault(unsigned long addr,unsigned long end,unsigned int required_fault,struct mm_walk * walk)73 static int hmm_vma_fault(unsigned long addr, unsigned long end,
74 unsigned int required_fault, struct mm_walk *walk)
75 {
76 struct hmm_vma_walk *hmm_vma_walk = walk->private;
77 struct vm_area_struct *vma = walk->vma;
78 unsigned int fault_flags = FAULT_FLAG_REMOTE;
79
80 WARN_ON_ONCE(!required_fault);
81 hmm_vma_walk->last = addr;
82
83 if (required_fault & HMM_NEED_WRITE_FAULT) {
84 if (!(vma->vm_flags & VM_WRITE))
85 return -EPERM;
86 fault_flags |= FAULT_FLAG_WRITE;
87 }
88
89 for (; addr < end; addr += PAGE_SIZE)
90 if (handle_mm_fault(vma, addr, fault_flags, NULL) &
91 VM_FAULT_ERROR)
92 return -EFAULT;
93 return -EBUSY;
94 }
95
hmm_pte_need_fault(const struct hmm_vma_walk * hmm_vma_walk,unsigned long pfn_req_flags,unsigned long cpu_flags)96 static unsigned int hmm_pte_need_fault(const struct hmm_vma_walk *hmm_vma_walk,
97 unsigned long pfn_req_flags,
98 unsigned long cpu_flags)
99 {
100 struct hmm_range *range = hmm_vma_walk->range;
101
102 /*
103 * So we not only consider the individual per page request we also
104 * consider the default flags requested for the range. The API can
105 * be used 2 ways. The first one where the HMM user coalesces
106 * multiple page faults into one request and sets flags per pfn for
107 * those faults. The second one where the HMM user wants to pre-
108 * fault a range with specific flags. For the latter one it is a
109 * waste to have the user pre-fill the pfn arrays with a default
110 * flags value.
111 */
112 pfn_req_flags &= range->pfn_flags_mask;
113 pfn_req_flags |= range->default_flags;
114
115 /* We aren't ask to do anything ... */
116 if (!(pfn_req_flags & HMM_PFN_REQ_FAULT))
117 return 0;
118
119 /* Need to write fault ? */
120 if ((pfn_req_flags & HMM_PFN_REQ_WRITE) &&
121 !(cpu_flags & HMM_PFN_WRITE))
122 return HMM_NEED_FAULT | HMM_NEED_WRITE_FAULT;
123
124 /* If CPU page table is not valid then we need to fault */
125 if (!(cpu_flags & HMM_PFN_VALID))
126 return HMM_NEED_FAULT;
127 return 0;
128 }
129
130 static unsigned int
hmm_range_need_fault(const struct hmm_vma_walk * hmm_vma_walk,const unsigned long hmm_pfns[],unsigned long npages,unsigned long cpu_flags)131 hmm_range_need_fault(const struct hmm_vma_walk *hmm_vma_walk,
132 const unsigned long hmm_pfns[], unsigned long npages,
133 unsigned long cpu_flags)
134 {
135 struct hmm_range *range = hmm_vma_walk->range;
136 unsigned int required_fault = 0;
137 unsigned long i;
138
139 /*
140 * If the default flags do not request to fault pages, and the mask does
141 * not allow for individual pages to be faulted, then
142 * hmm_pte_need_fault() will always return 0.
143 */
144 if (!((range->default_flags | range->pfn_flags_mask) &
145 HMM_PFN_REQ_FAULT))
146 return 0;
147
148 for (i = 0; i < npages; ++i) {
149 required_fault |= hmm_pte_need_fault(hmm_vma_walk, hmm_pfns[i],
150 cpu_flags);
151 if (required_fault == HMM_NEED_ALL_BITS)
152 return required_fault;
153 }
154 return required_fault;
155 }
156
hmm_vma_walk_hole(unsigned long addr,unsigned long end,__always_unused int depth,struct mm_walk * walk)157 static int hmm_vma_walk_hole(unsigned long addr, unsigned long end,
158 __always_unused int depth, struct mm_walk *walk)
159 {
160 struct hmm_vma_walk *hmm_vma_walk = walk->private;
161 struct hmm_range *range = hmm_vma_walk->range;
162 unsigned int required_fault;
163 unsigned long i, npages;
164 unsigned long *hmm_pfns;
165
166 i = (addr - range->start) >> PAGE_SHIFT;
167 npages = (end - addr) >> PAGE_SHIFT;
168 hmm_pfns = &range->hmm_pfns[i];
169 required_fault =
170 hmm_range_need_fault(hmm_vma_walk, hmm_pfns, npages, 0);
171 if (!walk->vma) {
172 if (required_fault)
173 return -EFAULT;
174 return hmm_pfns_fill(addr, end, range, HMM_PFN_ERROR);
175 }
176 if (required_fault)
177 return hmm_vma_fault(addr, end, required_fault, walk);
178 return hmm_pfns_fill(addr, end, range, 0);
179 }
180
hmm_pfn_flags_order(unsigned long order)181 static inline unsigned long hmm_pfn_flags_order(unsigned long order)
182 {
183 return order << HMM_PFN_ORDER_SHIFT;
184 }
185
186 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
pmd_to_hmm_pfn_flags(struct hmm_range * range,pmd_t pmd)187 static inline unsigned long pmd_to_hmm_pfn_flags(struct hmm_range *range,
188 pmd_t pmd)
189 {
190 if (pmd_protnone(pmd))
191 return 0;
192 return (pmd_write(pmd) ? (HMM_PFN_VALID | HMM_PFN_WRITE) :
193 HMM_PFN_VALID) |
194 hmm_pfn_flags_order(PMD_SHIFT - PAGE_SHIFT);
195 }
196
hmm_vma_handle_pmd(struct mm_walk * walk,unsigned long addr,unsigned long end,unsigned long hmm_pfns[],pmd_t pmd)197 static int hmm_vma_handle_pmd(struct mm_walk *walk, unsigned long addr,
198 unsigned long end, unsigned long hmm_pfns[],
199 pmd_t pmd)
200 {
201 struct hmm_vma_walk *hmm_vma_walk = walk->private;
202 struct hmm_range *range = hmm_vma_walk->range;
203 unsigned long pfn, npages, i;
204 unsigned int required_fault;
205 unsigned long cpu_flags;
206
207 npages = (end - addr) >> PAGE_SHIFT;
208 cpu_flags = pmd_to_hmm_pfn_flags(range, pmd);
209 required_fault =
210 hmm_range_need_fault(hmm_vma_walk, hmm_pfns, npages, cpu_flags);
211 if (required_fault)
212 return hmm_vma_fault(addr, end, required_fault, walk);
213
214 pfn = pmd_pfn(pmd) + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
215 for (i = 0; addr < end; addr += PAGE_SIZE, i++, pfn++) {
216 hmm_pfns[i] &= HMM_PFN_INOUT_FLAGS;
217 hmm_pfns[i] |= pfn | cpu_flags;
218 }
219 return 0;
220 }
221 #else /* CONFIG_TRANSPARENT_HUGEPAGE */
222 /* stub to allow the code below to compile */
223 int hmm_vma_handle_pmd(struct mm_walk *walk, unsigned long addr,
224 unsigned long end, unsigned long hmm_pfns[], pmd_t pmd);
225 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
226
pte_to_hmm_pfn_flags(struct hmm_range * range,pte_t pte)227 static inline unsigned long pte_to_hmm_pfn_flags(struct hmm_range *range,
228 pte_t pte)
229 {
230 if (pte_none(pte) || !pte_present(pte) || pte_protnone(pte))
231 return 0;
232 return pte_write(pte) ? (HMM_PFN_VALID | HMM_PFN_WRITE) : HMM_PFN_VALID;
233 }
234
hmm_vma_handle_pte(struct mm_walk * walk,unsigned long addr,unsigned long end,pmd_t * pmdp,pte_t * ptep,unsigned long * hmm_pfn)235 static int hmm_vma_handle_pte(struct mm_walk *walk, unsigned long addr,
236 unsigned long end, pmd_t *pmdp, pte_t *ptep,
237 unsigned long *hmm_pfn)
238 {
239 struct hmm_vma_walk *hmm_vma_walk = walk->private;
240 struct hmm_range *range = hmm_vma_walk->range;
241 unsigned int required_fault;
242 unsigned long cpu_flags;
243 pte_t pte = ptep_get(ptep);
244 uint64_t pfn_req_flags = *hmm_pfn;
245 uint64_t new_pfn_flags = 0;
246
247 /*
248 * Any other marker than a UFFD WP marker will result in a fault error
249 * that will be correctly handled, so we need only check for UFFD WP
250 * here.
251 */
252 if (pte_none(pte) || pte_is_uffd_wp_marker(pte)) {
253 required_fault =
254 hmm_pte_need_fault(hmm_vma_walk, pfn_req_flags, 0);
255 if (required_fault)
256 goto fault;
257 goto out;
258 }
259
260 if (!pte_present(pte)) {
261 const softleaf_t entry = softleaf_from_pte(pte);
262
263 /*
264 * Don't fault in device private pages owned by the caller,
265 * just report the PFN.
266 */
267 if (softleaf_is_device_private(entry) &&
268 page_pgmap(softleaf_to_page(entry))->owner ==
269 range->dev_private_owner) {
270 cpu_flags = HMM_PFN_VALID;
271 if (softleaf_is_device_private_write(entry))
272 cpu_flags |= HMM_PFN_WRITE;
273 new_pfn_flags = softleaf_to_pfn(entry) | cpu_flags;
274 goto out;
275 }
276
277 required_fault =
278 hmm_pte_need_fault(hmm_vma_walk, pfn_req_flags, 0);
279 if (!required_fault)
280 goto out;
281
282 if (softleaf_is_swap(entry))
283 goto fault;
284
285 if (softleaf_is_device_private(entry))
286 goto fault;
287
288 if (softleaf_is_device_exclusive(entry))
289 goto fault;
290
291 if (softleaf_is_migration(entry)) {
292 pte_unmap(ptep);
293 hmm_vma_walk->last = addr;
294 migration_entry_wait(walk->mm, pmdp, addr);
295 return -EBUSY;
296 }
297
298 /* Report error for everything else */
299 pte_unmap(ptep);
300 return -EFAULT;
301 }
302
303 cpu_flags = pte_to_hmm_pfn_flags(range, pte);
304 required_fault =
305 hmm_pte_need_fault(hmm_vma_walk, pfn_req_flags, cpu_flags);
306 if (required_fault)
307 goto fault;
308
309 /*
310 * Since each architecture defines a struct page for the zero page, just
311 * fall through and treat it like a normal page.
312 */
313 if (!vm_normal_page(walk->vma, addr, pte) &&
314 !is_zero_pfn(pte_pfn(pte))) {
315 if (hmm_pte_need_fault(hmm_vma_walk, pfn_req_flags, 0)) {
316 pte_unmap(ptep);
317 return -EFAULT;
318 }
319 new_pfn_flags = HMM_PFN_ERROR;
320 goto out;
321 }
322
323 new_pfn_flags = pte_pfn(pte) | cpu_flags;
324 out:
325 *hmm_pfn = (*hmm_pfn & HMM_PFN_INOUT_FLAGS) | new_pfn_flags;
326 return 0;
327
328 fault:
329 pte_unmap(ptep);
330 /* Fault any virtual address we were asked to fault */
331 return hmm_vma_fault(addr, end, required_fault, walk);
332 }
333
334 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
hmm_vma_handle_absent_pmd(struct mm_walk * walk,unsigned long start,unsigned long end,unsigned long * hmm_pfns,pmd_t pmd)335 static int hmm_vma_handle_absent_pmd(struct mm_walk *walk, unsigned long start,
336 unsigned long end, unsigned long *hmm_pfns,
337 pmd_t pmd)
338 {
339 struct hmm_vma_walk *hmm_vma_walk = walk->private;
340 struct hmm_range *range = hmm_vma_walk->range;
341 unsigned long npages = (end - start) >> PAGE_SHIFT;
342 const softleaf_t entry = softleaf_from_pmd(pmd);
343 unsigned long addr = start;
344 unsigned int required_fault;
345
346 if (softleaf_is_device_private(entry) &&
347 softleaf_to_folio(entry)->pgmap->owner ==
348 range->dev_private_owner) {
349 unsigned long cpu_flags = HMM_PFN_VALID |
350 hmm_pfn_flags_order(PMD_SHIFT - PAGE_SHIFT);
351 unsigned long pfn = softleaf_to_pfn(entry);
352 unsigned long i;
353
354 if (softleaf_is_device_private_write(entry))
355 cpu_flags |= HMM_PFN_WRITE;
356
357 /*
358 * Fully populate the PFN list though subsequent PFNs could be
359 * inferred, because drivers which are not yet aware of large
360 * folios probably do not support sparsely populated PFN lists.
361 */
362 for (i = 0; addr < end; addr += PAGE_SIZE, i++, pfn++) {
363 hmm_pfns[i] &= HMM_PFN_INOUT_FLAGS;
364 hmm_pfns[i] |= pfn | cpu_flags;
365 }
366
367 return 0;
368 }
369
370 required_fault = hmm_range_need_fault(hmm_vma_walk, hmm_pfns,
371 npages, 0);
372 if (required_fault) {
373 if (softleaf_is_device_private(entry))
374 return hmm_vma_fault(addr, end, required_fault, walk);
375 else
376 return -EFAULT;
377 }
378
379 return hmm_pfns_fill(start, end, range, HMM_PFN_ERROR);
380 }
381 #else
hmm_vma_handle_absent_pmd(struct mm_walk * walk,unsigned long start,unsigned long end,unsigned long * hmm_pfns,pmd_t pmd)382 static int hmm_vma_handle_absent_pmd(struct mm_walk *walk, unsigned long start,
383 unsigned long end, unsigned long *hmm_pfns,
384 pmd_t pmd)
385 {
386 struct hmm_vma_walk *hmm_vma_walk = walk->private;
387 struct hmm_range *range = hmm_vma_walk->range;
388 unsigned long npages = (end - start) >> PAGE_SHIFT;
389
390 if (hmm_range_need_fault(hmm_vma_walk, hmm_pfns, npages, 0))
391 return -EFAULT;
392 return hmm_pfns_fill(start, end, range, HMM_PFN_ERROR);
393 }
394 #endif /* CONFIG_ARCH_ENABLE_THP_MIGRATION */
395
hmm_vma_walk_pmd(pmd_t * pmdp,unsigned long start,unsigned long end,struct mm_walk * walk)396 static int hmm_vma_walk_pmd(pmd_t *pmdp,
397 unsigned long start,
398 unsigned long end,
399 struct mm_walk *walk)
400 {
401 struct hmm_vma_walk *hmm_vma_walk = walk->private;
402 struct hmm_range *range = hmm_vma_walk->range;
403 unsigned long *hmm_pfns =
404 &range->hmm_pfns[(start - range->start) >> PAGE_SHIFT];
405 unsigned long npages = (end - start) >> PAGE_SHIFT;
406 unsigned long addr = start;
407 pte_t *ptep;
408 pmd_t pmd;
409
410 again:
411 pmd = pmdp_get_lockless(pmdp);
412 if (pmd_none(pmd))
413 return hmm_vma_walk_hole(start, end, -1, walk);
414
415 if (thp_migration_supported() && pmd_is_migration_entry(pmd)) {
416 if (hmm_range_need_fault(hmm_vma_walk, hmm_pfns, npages, 0)) {
417 hmm_vma_walk->last = addr;
418 pmd_migration_entry_wait(walk->mm, pmdp);
419 return -EBUSY;
420 }
421 return hmm_pfns_fill(start, end, range, 0);
422 }
423
424 if (!pmd_present(pmd))
425 return hmm_vma_handle_absent_pmd(walk, start, end, hmm_pfns,
426 pmd);
427
428 if (pmd_trans_huge(pmd)) {
429 /*
430 * No need to take pmd_lock here, even if some other thread
431 * is splitting the huge pmd we will get that event through
432 * mmu_notifier callback.
433 *
434 * So just read pmd value and check again it's a transparent
435 * huge or device mapping one and compute corresponding pfn
436 * values.
437 */
438 pmd = pmdp_get_lockless(pmdp);
439 if (!pmd_trans_huge(pmd))
440 goto again;
441
442 return hmm_vma_handle_pmd(walk, addr, end, hmm_pfns, pmd);
443 }
444
445 /*
446 * We have handled all the valid cases above ie either none, migration,
447 * huge or transparent huge. At this point either it is a valid pmd
448 * entry pointing to pte directory or it is a bad pmd that will not
449 * recover.
450 */
451 if (pmd_bad(pmd)) {
452 if (hmm_range_need_fault(hmm_vma_walk, hmm_pfns, npages, 0))
453 return -EFAULT;
454 return hmm_pfns_fill(start, end, range, HMM_PFN_ERROR);
455 }
456
457 ptep = pte_offset_map(pmdp, addr);
458 if (!ptep)
459 goto again;
460 for (; addr < end; addr += PAGE_SIZE, ptep++, hmm_pfns++) {
461 int r;
462
463 r = hmm_vma_handle_pte(walk, addr, end, pmdp, ptep, hmm_pfns);
464 if (r) {
465 /* hmm_vma_handle_pte() did pte_unmap() */
466 return r;
467 }
468 }
469 pte_unmap(ptep - 1);
470 return 0;
471 }
472
473 #if defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
pud_to_hmm_pfn_flags(struct hmm_range * range,pud_t pud)474 static inline unsigned long pud_to_hmm_pfn_flags(struct hmm_range *range,
475 pud_t pud)
476 {
477 if (!pud_present(pud))
478 return 0;
479 return (pud_write(pud) ? (HMM_PFN_VALID | HMM_PFN_WRITE) :
480 HMM_PFN_VALID) |
481 hmm_pfn_flags_order(PUD_SHIFT - PAGE_SHIFT);
482 }
483
hmm_vma_walk_pud(pud_t * pudp,unsigned long start,unsigned long end,struct mm_walk * walk)484 static int hmm_vma_walk_pud(pud_t *pudp, unsigned long start, unsigned long end,
485 struct mm_walk *walk)
486 {
487 struct hmm_vma_walk *hmm_vma_walk = walk->private;
488 struct hmm_range *range = hmm_vma_walk->range;
489 unsigned long addr = start;
490 pud_t pud;
491 spinlock_t *ptl = pud_trans_huge_lock(pudp, walk->vma);
492
493 if (!ptl)
494 return 0;
495
496 /* Normally we don't want to split the huge page */
497 walk->action = ACTION_CONTINUE;
498
499 pud = pudp_get(pudp);
500 if (!pud_present(pud)) {
501 spin_unlock(ptl);
502 return hmm_vma_walk_hole(start, end, -1, walk);
503 }
504
505 if (pud_leaf(pud)) {
506 unsigned long i, npages, pfn;
507 unsigned int required_fault;
508 unsigned long *hmm_pfns;
509 unsigned long cpu_flags;
510
511 i = (addr - range->start) >> PAGE_SHIFT;
512 npages = (end - addr) >> PAGE_SHIFT;
513 hmm_pfns = &range->hmm_pfns[i];
514
515 cpu_flags = pud_to_hmm_pfn_flags(range, pud);
516 required_fault = hmm_range_need_fault(hmm_vma_walk, hmm_pfns,
517 npages, cpu_flags);
518 if (required_fault) {
519 spin_unlock(ptl);
520 return hmm_vma_fault(addr, end, required_fault, walk);
521 }
522
523 pfn = pud_pfn(pud) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
524 for (i = 0; i < npages; ++i, ++pfn) {
525 hmm_pfns[i] &= HMM_PFN_INOUT_FLAGS;
526 hmm_pfns[i] |= pfn | cpu_flags;
527 }
528 goto out_unlock;
529 }
530
531 /* Ask for the PUD to be split */
532 walk->action = ACTION_SUBTREE;
533
534 out_unlock:
535 spin_unlock(ptl);
536 return 0;
537 }
538 #else
539 #define hmm_vma_walk_pud NULL
540 #endif
541
542 #ifdef CONFIG_HUGETLB_PAGE
hmm_vma_walk_hugetlb_entry(pte_t * pte,unsigned long hmask,unsigned long start,unsigned long end,struct mm_walk * walk)543 static int hmm_vma_walk_hugetlb_entry(pte_t *pte, unsigned long hmask,
544 unsigned long start, unsigned long end,
545 struct mm_walk *walk)
546 {
547 unsigned long addr = start, i, pfn;
548 struct hmm_vma_walk *hmm_vma_walk = walk->private;
549 struct hmm_range *range = hmm_vma_walk->range;
550 struct vm_area_struct *vma = walk->vma;
551 unsigned int required_fault;
552 unsigned long pfn_req_flags;
553 unsigned long cpu_flags;
554 spinlock_t *ptl;
555 pte_t entry;
556
557 ptl = huge_pte_lock(hstate_vma(vma), walk->mm, pte);
558 entry = huge_ptep_get(walk->mm, addr, pte);
559
560 i = (start - range->start) >> PAGE_SHIFT;
561 pfn_req_flags = range->hmm_pfns[i];
562 cpu_flags = pte_to_hmm_pfn_flags(range, entry) |
563 hmm_pfn_flags_order(huge_page_order(hstate_vma(vma)));
564 required_fault =
565 hmm_pte_need_fault(hmm_vma_walk, pfn_req_flags, cpu_flags);
566 if (required_fault) {
567 int ret;
568
569 spin_unlock(ptl);
570 hugetlb_vma_unlock_read(vma);
571 /*
572 * Avoid deadlock: drop the vma lock before calling
573 * hmm_vma_fault(), which will itself potentially take and
574 * drop the vma lock. This is also correct from a
575 * protection point of view, because there is no further
576 * use here of either pte or ptl after dropping the vma
577 * lock.
578 */
579 ret = hmm_vma_fault(addr, end, required_fault, walk);
580 hugetlb_vma_lock_read(vma);
581 return ret;
582 }
583
584 pfn = pte_pfn(entry) + ((start & ~hmask) >> PAGE_SHIFT);
585 for (; addr < end; addr += PAGE_SIZE, i++, pfn++) {
586 range->hmm_pfns[i] &= HMM_PFN_INOUT_FLAGS;
587 range->hmm_pfns[i] |= pfn | cpu_flags;
588 }
589
590 spin_unlock(ptl);
591 return 0;
592 }
593 #else
594 #define hmm_vma_walk_hugetlb_entry NULL
595 #endif /* CONFIG_HUGETLB_PAGE */
596
hmm_vma_walk_test(unsigned long start,unsigned long end,struct mm_walk * walk)597 static int hmm_vma_walk_test(unsigned long start, unsigned long end,
598 struct mm_walk *walk)
599 {
600 struct hmm_vma_walk *hmm_vma_walk = walk->private;
601 struct hmm_range *range = hmm_vma_walk->range;
602 struct vm_area_struct *vma = walk->vma;
603
604 if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)) &&
605 vma->vm_flags & VM_READ)
606 return 0;
607
608 /*
609 * vma ranges that don't have struct page backing them or map I/O
610 * devices directly cannot be handled by hmm_range_fault().
611 *
612 * If the vma does not allow read access, then assume that it does not
613 * allow write access either. HMM does not support architectures that
614 * allow write without read.
615 *
616 * If a fault is requested for an unsupported range then it is a hard
617 * failure.
618 */
619 if (hmm_range_need_fault(hmm_vma_walk,
620 range->hmm_pfns +
621 ((start - range->start) >> PAGE_SHIFT),
622 (end - start) >> PAGE_SHIFT, 0))
623 return -EFAULT;
624
625 hmm_pfns_fill(start, end, range, HMM_PFN_ERROR);
626
627 /* Skip this vma and continue processing the next vma. */
628 return 1;
629 }
630
631 static const struct mm_walk_ops hmm_walk_ops = {
632 .pud_entry = hmm_vma_walk_pud,
633 .pmd_entry = hmm_vma_walk_pmd,
634 .pte_hole = hmm_vma_walk_hole,
635 .hugetlb_entry = hmm_vma_walk_hugetlb_entry,
636 .test_walk = hmm_vma_walk_test,
637 .walk_lock = PGWALK_RDLOCK,
638 };
639
640 /**
641 * hmm_range_fault - try to fault some address in a virtual address range
642 * @range: argument structure
643 *
644 * Returns 0 on success or one of the following error codes:
645 *
646 * -EINVAL: Invalid arguments or mm or virtual address is in an invalid vma
647 * (e.g., device file vma).
648 * -ENOMEM: Out of memory.
649 * -EPERM: Invalid permission (e.g., asking for write and range is read
650 * only).
651 * -EBUSY: The range has been invalidated and the caller needs to wait for
652 * the invalidation to finish.
653 * -EFAULT: A page was requested to be valid and could not be made valid
654 * ie it has no backing VMA or it is illegal to access
655 *
656 * This is similar to get_user_pages(), except that it can read the page tables
657 * without mutating them (ie causing faults).
658 */
hmm_range_fault(struct hmm_range * range)659 int hmm_range_fault(struct hmm_range *range)
660 {
661 struct hmm_vma_walk hmm_vma_walk = {
662 .range = range,
663 .last = range->start,
664 };
665 struct mm_struct *mm = range->notifier->mm;
666 int ret;
667
668 mmap_assert_locked(mm);
669
670 do {
671 /* If range is no longer valid force retry. */
672 if (mmu_interval_check_retry(range->notifier,
673 range->notifier_seq))
674 return -EBUSY;
675 ret = walk_page_range(mm, hmm_vma_walk.last, range->end,
676 &hmm_walk_ops, &hmm_vma_walk);
677 /*
678 * When -EBUSY is returned the loop restarts with
679 * hmm_vma_walk.last set to an address that has not been stored
680 * in pfns. All entries < last in the pfn array are set to their
681 * output, and all >= are still at their input values.
682 */
683 } while (ret == -EBUSY);
684 return ret;
685 }
686 EXPORT_SYMBOL(hmm_range_fault);
687
688 /**
689 * hmm_dma_map_alloc - Allocate HMM map structure
690 * @dev: device to allocate structure for
691 * @map: HMM map to allocate
692 * @nr_entries: number of entries in the map
693 * @dma_entry_size: size of the DMA entry in the map
694 *
695 * Allocate the HMM map structure and all the lists it contains.
696 * Return 0 on success, -ENOMEM on failure.
697 */
hmm_dma_map_alloc(struct device * dev,struct hmm_dma_map * map,size_t nr_entries,size_t dma_entry_size)698 int hmm_dma_map_alloc(struct device *dev, struct hmm_dma_map *map,
699 size_t nr_entries, size_t dma_entry_size)
700 {
701 bool dma_need_sync = false;
702 bool use_iova;
703
704 WARN_ON_ONCE(!(nr_entries * PAGE_SIZE / dma_entry_size));
705
706 /*
707 * The HMM API violates our normal DMA buffer ownership rules and can't
708 * transfer buffer ownership. The dma_addressing_limited() check is a
709 * best approximation to ensure no swiotlb buffering happens.
710 */
711 #ifdef CONFIG_DMA_NEED_SYNC
712 dma_need_sync = !dev->dma_skip_sync;
713 #endif /* CONFIG_DMA_NEED_SYNC */
714 if (dma_need_sync || dma_addressing_limited(dev))
715 return -EOPNOTSUPP;
716
717 map->dma_entry_size = dma_entry_size;
718 map->pfn_list = kvcalloc(nr_entries, sizeof(*map->pfn_list),
719 GFP_KERNEL | __GFP_NOWARN);
720 if (!map->pfn_list)
721 return -ENOMEM;
722
723 use_iova = dma_iova_try_alloc(dev, &map->state, 0,
724 nr_entries * PAGE_SIZE);
725 if (!use_iova && dma_need_unmap(dev)) {
726 map->dma_list = kvcalloc(nr_entries, sizeof(*map->dma_list),
727 GFP_KERNEL | __GFP_NOWARN);
728 if (!map->dma_list)
729 goto err_dma;
730 }
731 return 0;
732
733 err_dma:
734 kvfree(map->pfn_list);
735 return -ENOMEM;
736 }
737 EXPORT_SYMBOL_GPL(hmm_dma_map_alloc);
738
739 /**
740 * hmm_dma_map_free - iFree HMM map structure
741 * @dev: device to free structure from
742 * @map: HMM map containing the various lists and state
743 *
744 * Free the HMM map structure and all the lists it contains.
745 */
hmm_dma_map_free(struct device * dev,struct hmm_dma_map * map)746 void hmm_dma_map_free(struct device *dev, struct hmm_dma_map *map)
747 {
748 if (dma_use_iova(&map->state))
749 dma_iova_free(dev, &map->state);
750 kvfree(map->pfn_list);
751 kvfree(map->dma_list);
752 }
753 EXPORT_SYMBOL_GPL(hmm_dma_map_free);
754
755 /**
756 * hmm_dma_map_pfn - Map a physical HMM page to DMA address
757 * @dev: Device to map the page for
758 * @map: HMM map
759 * @idx: Index into the PFN and dma address arrays
760 * @p2pdma_state: PCI P2P state.
761 *
762 * dma_alloc_iova() allocates IOVA based on the size specified by their use in
763 * iova->size. Call this function after IOVA allocation to link whole @page
764 * to get the DMA address. Note that very first call to this function
765 * will have @offset set to 0 in the IOVA space allocated from
766 * dma_alloc_iova(). For subsequent calls to this function on same @iova,
767 * @offset needs to be advanced by the caller with the size of previous
768 * page that was linked + DMA address returned for the previous page that was
769 * linked by this function.
770 */
hmm_dma_map_pfn(struct device * dev,struct hmm_dma_map * map,size_t idx,struct pci_p2pdma_map_state * p2pdma_state)771 dma_addr_t hmm_dma_map_pfn(struct device *dev, struct hmm_dma_map *map,
772 size_t idx,
773 struct pci_p2pdma_map_state *p2pdma_state)
774 {
775 struct dma_iova_state *state = &map->state;
776 dma_addr_t *dma_addrs = map->dma_list;
777 unsigned long *pfns = map->pfn_list;
778 struct page *page = hmm_pfn_to_page(pfns[idx]);
779 phys_addr_t paddr = hmm_pfn_to_phys(pfns[idx]);
780 size_t offset = idx * map->dma_entry_size;
781 unsigned long attrs = 0;
782 dma_addr_t dma_addr;
783 int ret;
784
785 if ((pfns[idx] & HMM_PFN_DMA_MAPPED) &&
786 !(pfns[idx] & HMM_PFN_P2PDMA_BUS)) {
787 /*
788 * We are in this flow when there is a need to resync flags,
789 * for example when page was already linked in prefetch call
790 * with READ flag and now we need to add WRITE flag
791 *
792 * This page was already programmed to HW and we don't want/need
793 * to unlink and link it again just to resync flags.
794 */
795 if (dma_use_iova(state))
796 return state->addr + offset;
797
798 /*
799 * Without dma_need_unmap, the dma_addrs array is NULL, thus we
800 * need to regenerate the address below even if there already
801 * was a mapping. But !dma_need_unmap implies that the
802 * mapping stateless, so this is fine.
803 */
804 if (dma_need_unmap(dev))
805 return dma_addrs[idx];
806
807 /* Continue to remapping */
808 }
809
810 switch (pci_p2pdma_state(p2pdma_state, dev, page)) {
811 case PCI_P2PDMA_MAP_NONE:
812 break;
813 case PCI_P2PDMA_MAP_THRU_HOST_BRIDGE:
814 attrs |= DMA_ATTR_MMIO;
815 pfns[idx] |= HMM_PFN_P2PDMA;
816 break;
817 case PCI_P2PDMA_MAP_BUS_ADDR:
818 pfns[idx] |= HMM_PFN_P2PDMA_BUS | HMM_PFN_DMA_MAPPED;
819 return pci_p2pdma_bus_addr_map(p2pdma_state->mem, paddr);
820 default:
821 return DMA_MAPPING_ERROR;
822 }
823
824 if (dma_use_iova(state)) {
825 ret = dma_iova_link(dev, state, paddr, offset,
826 map->dma_entry_size, DMA_BIDIRECTIONAL,
827 attrs);
828 if (ret)
829 goto error;
830
831 ret = dma_iova_sync(dev, state, offset, map->dma_entry_size);
832 if (ret) {
833 dma_iova_unlink(dev, state, offset, map->dma_entry_size,
834 DMA_BIDIRECTIONAL, attrs);
835 goto error;
836 }
837
838 dma_addr = state->addr + offset;
839 } else {
840 if (WARN_ON_ONCE(dma_need_unmap(dev) && !dma_addrs))
841 goto error;
842
843 dma_addr = dma_map_phys(dev, paddr, map->dma_entry_size,
844 DMA_BIDIRECTIONAL, attrs);
845 if (dma_mapping_error(dev, dma_addr))
846 goto error;
847
848 if (dma_need_unmap(dev))
849 dma_addrs[idx] = dma_addr;
850 }
851 pfns[idx] |= HMM_PFN_DMA_MAPPED;
852 return dma_addr;
853 error:
854 pfns[idx] &= ~HMM_PFN_P2PDMA;
855 return DMA_MAPPING_ERROR;
856
857 }
858 EXPORT_SYMBOL_GPL(hmm_dma_map_pfn);
859
860 /**
861 * hmm_dma_unmap_pfn - Unmap a physical HMM page from DMA address
862 * @dev: Device to unmap the page from
863 * @map: HMM map
864 * @idx: Index of the PFN to unmap
865 *
866 * Returns true if the PFN was mapped and has been unmapped, false otherwise.
867 */
hmm_dma_unmap_pfn(struct device * dev,struct hmm_dma_map * map,size_t idx)868 bool hmm_dma_unmap_pfn(struct device *dev, struct hmm_dma_map *map, size_t idx)
869 {
870 const unsigned long valid_dma = HMM_PFN_VALID | HMM_PFN_DMA_MAPPED;
871 struct dma_iova_state *state = &map->state;
872 dma_addr_t *dma_addrs = map->dma_list;
873 unsigned long *pfns = map->pfn_list;
874 unsigned long attrs = 0;
875
876 if ((pfns[idx] & valid_dma) != valid_dma)
877 return false;
878
879 if (pfns[idx] & HMM_PFN_P2PDMA)
880 attrs |= DMA_ATTR_MMIO;
881
882 if (pfns[idx] & HMM_PFN_P2PDMA_BUS)
883 ; /* no need to unmap bus address P2P mappings */
884 else if (dma_use_iova(state))
885 dma_iova_unlink(dev, state, idx * map->dma_entry_size,
886 map->dma_entry_size, DMA_BIDIRECTIONAL, attrs);
887 else if (dma_need_unmap(dev))
888 dma_unmap_phys(dev, dma_addrs[idx], map->dma_entry_size,
889 DMA_BIDIRECTIONAL, attrs);
890
891 pfns[idx] &=
892 ~(HMM_PFN_DMA_MAPPED | HMM_PFN_P2PDMA | HMM_PFN_P2PDMA_BUS);
893 return true;
894 }
895 EXPORT_SYMBOL_GPL(hmm_dma_unmap_pfn);
896