1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_MEMREMAP_H_
3 #define _LINUX_MEMREMAP_H_
4
5 #include <linux/mmzone.h>
6 #include <linux/range.h>
7 #include <linux/ioport.h>
8 #include <linux/percpu-refcount.h>
9
10 struct resource;
11 struct device;
12
13 /**
14 * struct vmem_altmap - pre-allocated storage for vmemmap_populate
15 * @base_pfn: base of the entire dev_pagemap mapping
16 * @reserve: pages mapped, but reserved for driver use (relative to @base)
17 * @free: free pages set aside in the mapping for memmap storage
18 * @align: pages reserved to meet allocation alignments
19 * @alloc: track pages consumed, private to vmemmap_populate()
20 */
21 struct vmem_altmap {
22 unsigned long base_pfn;
23 const unsigned long end_pfn;
24 const unsigned long reserve;
25 unsigned long free;
26 unsigned long align;
27 unsigned long alloc;
28 };
29
30 /*
31 * Specialize ZONE_DEVICE memory into multiple types each has a different
32 * usage.
33 *
34 * MEMORY_DEVICE_PRIVATE:
35 * Device memory that is not directly addressable by the CPU: CPU can neither
36 * read nor write private memory. In this case, we do still have struct pages
37 * backing the device memory. Doing so simplifies the implementation, but it is
38 * important to remember that there are certain points at which the struct page
39 * must be treated as an opaque object, rather than a "normal" struct page.
40 *
41 * A more complete discussion of unaddressable memory may be found in
42 * include/linux/hmm.h and Documentation/mm/hmm.rst.
43 *
44 * MEMORY_DEVICE_COHERENT:
45 * Device memory that is cache coherent from device and CPU point of view. This
46 * is used on platforms that have an advanced system bus (like CAPI or CXL). A
47 * driver can hotplug the device memory using ZONE_DEVICE and with that memory
48 * type. Any page of a process can be migrated to such memory. However no one
49 * should be allowed to pin such memory so that it can always be evicted.
50 *
51 * MEMORY_DEVICE_FS_DAX:
52 * Host memory that has similar access semantics as System RAM i.e. DMA
53 * coherent and supports page pinning. In support of coordinating page
54 * pinning vs other operations MEMORY_DEVICE_FS_DAX arranges for a
55 * wakeup event whenever a page is unpinned and becomes idle. This
56 * wakeup is used to coordinate physical address space management (ex:
57 * fs truncate/hole punch) vs pinned pages (ex: device dma).
58 *
59 * MEMORY_DEVICE_GENERIC:
60 * Host memory that has similar access semantics as System RAM i.e. DMA
61 * coherent and supports page pinning. This is for example used by DAX devices
62 * that expose memory using a character device.
63 *
64 * MEMORY_DEVICE_PCI_P2PDMA:
65 * Device memory residing in a PCI BAR intended for use with Peer-to-Peer
66 * transactions.
67 */
68 enum memory_type {
69 /* 0 is reserved to catch uninitialized type fields */
70 MEMORY_DEVICE_PRIVATE = 1,
71 MEMORY_DEVICE_COHERENT,
72 MEMORY_DEVICE_FS_DAX,
73 MEMORY_DEVICE_GENERIC,
74 MEMORY_DEVICE_PCI_P2PDMA,
75 };
76
77 struct dev_pagemap_ops {
78 /*
79 * Called once the folio refcount reaches 0. The reference count will be
80 * reset to one by the core code after the method is called to prepare
81 * for handing out the folio again.
82 */
83 void (*folio_free)(struct folio *folio);
84
85 /*
86 * Used for private (un-addressable) device memory only. Must migrate
87 * the page back to a CPU accessible page.
88 */
89 vm_fault_t (*migrate_to_ram)(struct vm_fault *vmf);
90
91 /*
92 * Handle the memory failure happens on a range of pfns. Notify the
93 * processes who are using these pfns, and try to recover the data on
94 * them if necessary. The mf_flags is finally passed to the recover
95 * function through the whole notify routine.
96 *
97 * When this is not implemented, or it returns -EOPNOTSUPP, the caller
98 * will fall back to a common handler called mf_generic_kill_procs().
99 */
100 int (*memory_failure)(struct dev_pagemap *pgmap, unsigned long pfn,
101 unsigned long nr_pages, int mf_flags);
102
103 /*
104 * Used for private (un-addressable) device memory only.
105 * This callback is used when a folio is split into
106 * a smaller folio
107 */
108 void (*folio_split)(struct folio *head, struct folio *tail);
109 };
110
111 #define PGMAP_ALTMAP_VALID (1 << 0)
112
113 /**
114 * struct dev_pagemap - metadata for ZONE_DEVICE mappings
115 * @altmap: pre-allocated/reserved memory for vmemmap allocations
116 * @ref: reference count that pins the devm_memremap_pages() mapping
117 * @done: completion for @ref
118 * @type: memory type: see MEMORY_* above in memremap.h
119 * @flags: PGMAP_* flags to specify defailed behavior
120 * @vmemmap_shift: structural definition of how the vmemmap page metadata
121 * is populated, specifically the metadata page order.
122 * A zero value (default) uses base pages as the vmemmap metadata
123 * representation. A bigger value will set up compound struct pages
124 * of the requested order value.
125 * @ops: method table
126 * @owner: an opaque pointer identifying the entity that manages this
127 * instance. Used by various helpers to make sure that no
128 * foreign ZONE_DEVICE memory is accessed.
129 * @nr_range: number of ranges to be mapped
130 * @range: range to be mapped when nr_range == 1
131 * @ranges: array of ranges to be mapped when nr_range > 1
132 */
133 struct dev_pagemap {
134 struct vmem_altmap altmap;
135 struct percpu_ref ref;
136 struct completion done;
137 enum memory_type type;
138 unsigned int flags;
139 unsigned long vmemmap_shift;
140 const struct dev_pagemap_ops *ops;
141 void *owner;
142 int nr_range;
143 union {
144 struct range range;
145 DECLARE_FLEX_ARRAY(struct range, ranges);
146 };
147 };
148
pgmap_has_memory_failure(struct dev_pagemap * pgmap)149 static inline bool pgmap_has_memory_failure(struct dev_pagemap *pgmap)
150 {
151 return pgmap->ops && pgmap->ops->memory_failure;
152 }
153
pgmap_altmap(struct dev_pagemap * pgmap)154 static inline struct vmem_altmap *pgmap_altmap(struct dev_pagemap *pgmap)
155 {
156 if (pgmap->flags & PGMAP_ALTMAP_VALID)
157 return &pgmap->altmap;
158 return NULL;
159 }
160
pgmap_vmemmap_nr(struct dev_pagemap * pgmap)161 static inline unsigned long pgmap_vmemmap_nr(struct dev_pagemap *pgmap)
162 {
163 return 1 << pgmap->vmemmap_shift;
164 }
165
folio_is_device_private(const struct folio * folio)166 static inline bool folio_is_device_private(const struct folio *folio)
167 {
168 return IS_ENABLED(CONFIG_DEVICE_PRIVATE) &&
169 folio_is_zone_device(folio) &&
170 folio->pgmap->type == MEMORY_DEVICE_PRIVATE;
171 }
172
is_device_private_page(const struct page * page)173 static inline bool is_device_private_page(const struct page *page)
174 {
175 return IS_ENABLED(CONFIG_DEVICE_PRIVATE) &&
176 folio_is_device_private(page_folio(page));
177 }
178
folio_is_pci_p2pdma(const struct folio * folio)179 static inline bool folio_is_pci_p2pdma(const struct folio *folio)
180 {
181 return IS_ENABLED(CONFIG_PCI_P2PDMA) &&
182 folio_is_zone_device(folio) &&
183 folio->pgmap->type == MEMORY_DEVICE_PCI_P2PDMA;
184 }
185
folio_zone_device_data(const struct folio * folio)186 static inline void *folio_zone_device_data(const struct folio *folio)
187 {
188 VM_WARN_ON_FOLIO(!folio_is_device_private(folio), folio);
189 return folio->page.zone_device_data;
190 }
191
folio_set_zone_device_data(struct folio * folio,void * data)192 static inline void folio_set_zone_device_data(struct folio *folio, void *data)
193 {
194 VM_WARN_ON_FOLIO(!folio_is_device_private(folio), folio);
195 folio->page.zone_device_data = data;
196 }
197
is_pci_p2pdma_page(const struct page * page)198 static inline bool is_pci_p2pdma_page(const struct page *page)
199 {
200 return IS_ENABLED(CONFIG_PCI_P2PDMA) &&
201 folio_is_pci_p2pdma(page_folio(page));
202 }
203
folio_is_device_coherent(const struct folio * folio)204 static inline bool folio_is_device_coherent(const struct folio *folio)
205 {
206 return folio_is_zone_device(folio) &&
207 folio->pgmap->type == MEMORY_DEVICE_COHERENT;
208 }
209
is_device_coherent_page(const struct page * page)210 static inline bool is_device_coherent_page(const struct page *page)
211 {
212 return folio_is_device_coherent(page_folio(page));
213 }
214
folio_is_fsdax(const struct folio * folio)215 static inline bool folio_is_fsdax(const struct folio *folio)
216 {
217 return folio_is_zone_device(folio) &&
218 folio->pgmap->type == MEMORY_DEVICE_FS_DAX;
219 }
220
is_fsdax_page(const struct page * page)221 static inline bool is_fsdax_page(const struct page *page)
222 {
223 return folio_is_fsdax(page_folio(page));
224 }
225
226 #ifdef CONFIG_ZONE_DEVICE
227 void zone_device_page_init(struct page *page, unsigned int order);
228 void *memremap_pages(struct dev_pagemap *pgmap, int nid);
229 void memunmap_pages(struct dev_pagemap *pgmap);
230 void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap);
231 void devm_memunmap_pages(struct device *dev, struct dev_pagemap *pgmap);
232 struct dev_pagemap *get_dev_pagemap(unsigned long pfn);
233 bool pgmap_pfn_valid(struct dev_pagemap *pgmap, unsigned long pfn);
234
235 unsigned long memremap_compat_align(void);
236
zone_device_folio_init(struct folio * folio,unsigned int order)237 static inline void zone_device_folio_init(struct folio *folio, unsigned int order)
238 {
239 zone_device_page_init(&folio->page, order);
240 if (order)
241 folio_set_large_rmappable(folio);
242 }
243
zone_device_private_split_cb(struct folio * original_folio,struct folio * new_folio)244 static inline void zone_device_private_split_cb(struct folio *original_folio,
245 struct folio *new_folio)
246 {
247 if (folio_is_device_private(original_folio)) {
248 if (!original_folio->pgmap->ops->folio_split) {
249 if (new_folio) {
250 new_folio->pgmap = original_folio->pgmap;
251 new_folio->page.mapping =
252 original_folio->page.mapping;
253 }
254 } else {
255 original_folio->pgmap->ops->folio_split(original_folio,
256 new_folio);
257 }
258 }
259 }
260
261 #else
devm_memremap_pages(struct device * dev,struct dev_pagemap * pgmap)262 static inline void *devm_memremap_pages(struct device *dev,
263 struct dev_pagemap *pgmap)
264 {
265 /*
266 * Fail attempts to call devm_memremap_pages() without
267 * ZONE_DEVICE support enabled, this requires callers to fall
268 * back to plain devm_memremap() based on config
269 */
270 WARN_ON_ONCE(1);
271 return ERR_PTR(-ENXIO);
272 }
273
devm_memunmap_pages(struct device * dev,struct dev_pagemap * pgmap)274 static inline void devm_memunmap_pages(struct device *dev,
275 struct dev_pagemap *pgmap)
276 {
277 }
278
get_dev_pagemap(unsigned long pfn)279 static inline struct dev_pagemap *get_dev_pagemap(unsigned long pfn)
280 {
281 return NULL;
282 }
283
pgmap_pfn_valid(struct dev_pagemap * pgmap,unsigned long pfn)284 static inline bool pgmap_pfn_valid(struct dev_pagemap *pgmap, unsigned long pfn)
285 {
286 return false;
287 }
288
289 /* when memremap_pages() is disabled all archs can remap a single page */
memremap_compat_align(void)290 static inline unsigned long memremap_compat_align(void)
291 {
292 return PAGE_SIZE;
293 }
294
zone_device_private_split_cb(struct folio * original_folio,struct folio * new_folio)295 static inline void zone_device_private_split_cb(struct folio *original_folio,
296 struct folio *new_folio)
297 {
298 }
299 #endif /* CONFIG_ZONE_DEVICE */
300
put_dev_pagemap(struct dev_pagemap * pgmap)301 static inline void put_dev_pagemap(struct dev_pagemap *pgmap)
302 {
303 if (pgmap)
304 percpu_ref_put(&pgmap->ref);
305 }
306
307 #endif /* _LINUX_MEMREMAP_H_ */
308