xref: /linux/include/linux/hmm.h (revision dd91b5e1d6448794c07378d1be12e3261c8769e7)
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3  * Copyright 2013 Red Hat Inc.
4  *
5  * Authors: Jérôme Glisse <jglisse@redhat.com>
6  *
7  * See Documentation/mm/hmm.rst for reasons and overview of what HMM is.
8  */
9 #ifndef LINUX_HMM_H
10 #define LINUX_HMM_H
11 
12 #include <linux/mm.h>
13 
14 struct mmu_interval_notifier;
15 
16 /*
17  * On output:
18  * 0             - The page is faultable and a future call with
19  *                 HMM_PFN_REQ_FAULT could succeed.
20  * HMM_PFN_VALID - the pfn field points to a valid PFN. This PFN is at
21  *                 least readable. If dev_private_owner is !NULL then this could
22  *                 point at a DEVICE_PRIVATE page.
23  * HMM_PFN_WRITE - if the page memory can be written to (requires HMM_PFN_VALID)
24  * HMM_PFN_ERROR - accessing the pfn is impossible and the device should
25  *                 fail. ie poisoned memory, special pages, no vma, etc
26  * HMM_PFN_P2PDMA - P2P page
27  * HMM_PFN_P2PDMA_BUS - Bus mapped P2P transfer
28  * HMM_PFN_DMA_MAPPED - Flag preserved on input-to-output transformation
29  *                      to mark that page is already DMA mapped
30  *
31  * On input:
32  * 0                 - Return the current state of the page, do not fault it.
33  * HMM_PFN_REQ_FAULT - The output must have HMM_PFN_VALID or hmm_range_fault()
34  *                     will fail
35  * HMM_PFN_REQ_WRITE - The output must have HMM_PFN_WRITE or hmm_range_fault()
36  *                     will fail. Must be combined with HMM_PFN_REQ_FAULT.
37  */
38 enum hmm_pfn_flags {
39 	/* Output fields and flags */
40 	HMM_PFN_VALID = 1UL << (BITS_PER_LONG - 1),
41 	HMM_PFN_WRITE = 1UL << (BITS_PER_LONG - 2),
42 	HMM_PFN_ERROR = 1UL << (BITS_PER_LONG - 3),
43 	/*
44 	 * Sticky flags, carried from input to output,
45 	 * don't forget to update HMM_PFN_INOUT_FLAGS
46 	 */
47 	HMM_PFN_DMA_MAPPED = 1UL << (BITS_PER_LONG - 4),
48 	HMM_PFN_P2PDMA     = 1UL << (BITS_PER_LONG - 5),
49 	HMM_PFN_P2PDMA_BUS = 1UL << (BITS_PER_LONG - 6),
50 
51 	HMM_PFN_ORDER_SHIFT = (BITS_PER_LONG - 11),
52 
53 	/* Input flags */
54 	HMM_PFN_REQ_FAULT = HMM_PFN_VALID,
55 	HMM_PFN_REQ_WRITE = HMM_PFN_WRITE,
56 
57 	HMM_PFN_FLAGS = ~((1UL << HMM_PFN_ORDER_SHIFT) - 1),
58 };
59 
60 /*
61  * hmm_pfn_to_page() - return struct page pointed to by a device entry
62  *
63  * This must be called under the caller 'user_lock' after a successful
64  * mmu_interval_read_begin(). The caller must have tested for HMM_PFN_VALID
65  * already.
66  */
hmm_pfn_to_page(unsigned long hmm_pfn)67 static inline struct page *hmm_pfn_to_page(unsigned long hmm_pfn)
68 {
69 	return pfn_to_page(hmm_pfn & ~HMM_PFN_FLAGS);
70 }
71 
72 /*
73  * hmm_pfn_to_phys() - return physical address pointed to by a device entry
74  */
hmm_pfn_to_phys(unsigned long hmm_pfn)75 static inline phys_addr_t hmm_pfn_to_phys(unsigned long hmm_pfn)
76 {
77 	return __pfn_to_phys(hmm_pfn & ~HMM_PFN_FLAGS);
78 }
79 
80 /*
81  * hmm_pfn_to_map_order() - return the CPU mapping size order
82  *
83  * This is optionally useful to optimize processing of the pfn result
84  * array. It indicates that the page starts at the order aligned VA and is
85  * 1<<order bytes long.  Every pfn within an high order page will have the
86  * same pfn flags, both access protections and the map_order.  The caller must
87  * be careful with edge cases as the start and end VA of the given page may
88  * extend past the range used with hmm_range_fault().
89  *
90  * This must be called under the caller 'user_lock' after a successful
91  * mmu_interval_read_begin(). The caller must have tested for HMM_PFN_VALID
92  * already.
93  */
hmm_pfn_to_map_order(unsigned long hmm_pfn)94 static inline unsigned int hmm_pfn_to_map_order(unsigned long hmm_pfn)
95 {
96 	return (hmm_pfn >> HMM_PFN_ORDER_SHIFT) & 0x1F;
97 }
98 
99 /*
100  * struct hmm_range - track invalidation lock on virtual address range
101  *
102  * @notifier: a mmu_interval_notifier that includes the start/end
103  * @notifier_seq: result of mmu_interval_read_begin()
104  * @start: range virtual start address (inclusive)
105  * @end: range virtual end address (exclusive)
106  * @hmm_pfns: array of pfns (big enough for the range)
107  * @default_flags: default flags for the range (write, read, ... see hmm doc)
108  * @pfn_flags_mask: allows to mask pfn flags so that only default_flags matter
109  * @dev_private_owner: owner of device private pages
110  */
111 struct hmm_range {
112 	struct mmu_interval_notifier *notifier;
113 	unsigned long		notifier_seq;
114 	unsigned long		start;
115 	unsigned long		end;
116 	unsigned long		*hmm_pfns;
117 	unsigned long		default_flags;
118 	unsigned long		pfn_flags_mask;
119 	void			*dev_private_owner;
120 };
121 
122 /*
123  * Please see Documentation/mm/hmm.rst for how to use the range API.
124  */
125 int hmm_range_fault(struct hmm_range *range);
126 
127 /*
128  * HMM_RANGE_DEFAULT_TIMEOUT - default timeout (ms) when waiting for a range
129  *
130  * When waiting for mmu notifiers we need some kind of time out otherwise we
131  * could potentially wait for ever, 1000ms ie 1s sounds like a long time to
132  * wait already.
133  */
134 #define HMM_RANGE_DEFAULT_TIMEOUT 1000
135 
136 #endif /* LINUX_HMM_H */
137