xref: /linux/mm/memremap.c (revision c9fdc4d5487a16bd1f003fc8b66e91f88efb50e6)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2015 Intel Corporation. All rights reserved. */
3 #include <linux/device.h>
4 #include <linux/io.h>
5 #include <linux/kasan.h>
6 #include <linux/memory_hotplug.h>
7 #include <linux/mm.h>
8 #include <linux/pfn_t.h>
9 #include <linux/swap.h>
10 #include <linux/mmzone.h>
11 #include <linux/swapops.h>
12 #include <linux/types.h>
13 #include <linux/wait_bit.h>
14 #include <linux/xarray.h>
15 
16 static DEFINE_XARRAY(pgmap_array);
17 
18 /*
19  * The memremap() and memremap_pages() interfaces are alternately used
20  * to map persistent memory namespaces. These interfaces place different
21  * constraints on the alignment and size of the mapping (namespace).
22  * memremap() can map individual PAGE_SIZE pages. memremap_pages() can
23  * only map subsections (2MB), and at least one architecture (PowerPC)
24  * the minimum mapping granularity of memremap_pages() is 16MB.
25  *
26  * The role of memremap_compat_align() is to communicate the minimum
27  * arch supported alignment of a namespace such that it can freely
28  * switch modes without violating the arch constraint. Namely, do not
29  * allow a namespace to be PAGE_SIZE aligned since that namespace may be
30  * reconfigured into a mode that requires SUBSECTION_SIZE alignment.
31  */
32 #ifndef CONFIG_ARCH_HAS_MEMREMAP_COMPAT_ALIGN
33 unsigned long memremap_compat_align(void)
34 {
35 	return SUBSECTION_SIZE;
36 }
37 EXPORT_SYMBOL_GPL(memremap_compat_align);
38 #endif
39 
40 #ifdef CONFIG_DEV_PAGEMAP_OPS
41 DEFINE_STATIC_KEY_FALSE(devmap_managed_key);
42 EXPORT_SYMBOL(devmap_managed_key);
43 
44 static void devmap_managed_enable_put(struct dev_pagemap *pgmap)
45 {
46 	if (pgmap->type == MEMORY_DEVICE_PRIVATE ||
47 	    pgmap->type == MEMORY_DEVICE_FS_DAX)
48 		static_branch_dec(&devmap_managed_key);
49 }
50 
51 static void devmap_managed_enable_get(struct dev_pagemap *pgmap)
52 {
53 	if (pgmap->type == MEMORY_DEVICE_PRIVATE ||
54 	    pgmap->type == MEMORY_DEVICE_FS_DAX)
55 		static_branch_inc(&devmap_managed_key);
56 }
57 #else
58 static void devmap_managed_enable_get(struct dev_pagemap *pgmap)
59 {
60 }
61 static void devmap_managed_enable_put(struct dev_pagemap *pgmap)
62 {
63 }
64 #endif /* CONFIG_DEV_PAGEMAP_OPS */
65 
66 static void pgmap_array_delete(struct range *range)
67 {
68 	xa_store_range(&pgmap_array, PHYS_PFN(range->start), PHYS_PFN(range->end),
69 			NULL, GFP_KERNEL);
70 	synchronize_rcu();
71 }
72 
73 static unsigned long pfn_first(struct dev_pagemap *pgmap, int range_id)
74 {
75 	struct range *range = &pgmap->ranges[range_id];
76 	unsigned long pfn = PHYS_PFN(range->start);
77 
78 	if (range_id)
79 		return pfn;
80 	return pfn + vmem_altmap_offset(pgmap_altmap(pgmap));
81 }
82 
83 bool pgmap_pfn_valid(struct dev_pagemap *pgmap, unsigned long pfn)
84 {
85 	int i;
86 
87 	for (i = 0; i < pgmap->nr_range; i++) {
88 		struct range *range = &pgmap->ranges[i];
89 
90 		if (pfn >= PHYS_PFN(range->start) &&
91 		    pfn <= PHYS_PFN(range->end))
92 			return pfn >= pfn_first(pgmap, i);
93 	}
94 
95 	return false;
96 }
97 
98 static unsigned long pfn_end(struct dev_pagemap *pgmap, int range_id)
99 {
100 	const struct range *range = &pgmap->ranges[range_id];
101 
102 	return (range->start + range_len(range)) >> PAGE_SHIFT;
103 }
104 
105 static unsigned long pfn_next(struct dev_pagemap *pgmap, unsigned long pfn)
106 {
107 	if (pfn % (1024 << pgmap->vmemmap_shift))
108 		cond_resched();
109 	return pfn + pgmap_vmemmap_nr(pgmap);
110 }
111 
112 static unsigned long pfn_len(struct dev_pagemap *pgmap, unsigned long range_id)
113 {
114 	return (pfn_end(pgmap, range_id) -
115 		pfn_first(pgmap, range_id)) >> pgmap->vmemmap_shift;
116 }
117 
118 #define for_each_device_pfn(pfn, map, i) \
119 	for (pfn = pfn_first(map, i); pfn < pfn_end(map, i); \
120 	     pfn = pfn_next(map, pfn))
121 
122 static void dev_pagemap_kill(struct dev_pagemap *pgmap)
123 {
124 	if (pgmap->ops && pgmap->ops->kill)
125 		pgmap->ops->kill(pgmap);
126 	else
127 		percpu_ref_kill(pgmap->ref);
128 }
129 
130 static void dev_pagemap_cleanup(struct dev_pagemap *pgmap)
131 {
132 	if (pgmap->ops && pgmap->ops->cleanup) {
133 		pgmap->ops->cleanup(pgmap);
134 	} else {
135 		wait_for_completion(&pgmap->done);
136 		percpu_ref_exit(pgmap->ref);
137 	}
138 	/*
139 	 * Undo the pgmap ref assignment for the internal case as the
140 	 * caller may re-enable the same pgmap.
141 	 */
142 	if (pgmap->ref == &pgmap->internal_ref)
143 		pgmap->ref = NULL;
144 }
145 
146 static void pageunmap_range(struct dev_pagemap *pgmap, int range_id)
147 {
148 	struct range *range = &pgmap->ranges[range_id];
149 	struct page *first_page;
150 
151 	/* make sure to access a memmap that was actually initialized */
152 	first_page = pfn_to_page(pfn_first(pgmap, range_id));
153 
154 	/* pages are dead and unused, undo the arch mapping */
155 	mem_hotplug_begin();
156 	remove_pfn_range_from_zone(page_zone(first_page), PHYS_PFN(range->start),
157 				   PHYS_PFN(range_len(range)));
158 	if (pgmap->type == MEMORY_DEVICE_PRIVATE) {
159 		__remove_pages(PHYS_PFN(range->start),
160 			       PHYS_PFN(range_len(range)), NULL);
161 	} else {
162 		arch_remove_memory(range->start, range_len(range),
163 				pgmap_altmap(pgmap));
164 		kasan_remove_zero_shadow(__va(range->start), range_len(range));
165 	}
166 	mem_hotplug_done();
167 
168 	untrack_pfn(NULL, PHYS_PFN(range->start), range_len(range));
169 	pgmap_array_delete(range);
170 }
171 
172 void memunmap_pages(struct dev_pagemap *pgmap)
173 {
174 	unsigned long pfn;
175 	int i;
176 
177 	dev_pagemap_kill(pgmap);
178 	for (i = 0; i < pgmap->nr_range; i++)
179 		for_each_device_pfn(pfn, pgmap, i)
180 			put_page(pfn_to_page(pfn));
181 	dev_pagemap_cleanup(pgmap);
182 
183 	for (i = 0; i < pgmap->nr_range; i++)
184 		pageunmap_range(pgmap, i);
185 
186 	WARN_ONCE(pgmap->altmap.alloc, "failed to free all reserved pages\n");
187 	devmap_managed_enable_put(pgmap);
188 }
189 EXPORT_SYMBOL_GPL(memunmap_pages);
190 
191 static void devm_memremap_pages_release(void *data)
192 {
193 	memunmap_pages(data);
194 }
195 
196 static void dev_pagemap_percpu_release(struct percpu_ref *ref)
197 {
198 	struct dev_pagemap *pgmap =
199 		container_of(ref, struct dev_pagemap, internal_ref);
200 
201 	complete(&pgmap->done);
202 }
203 
204 static int pagemap_range(struct dev_pagemap *pgmap, struct mhp_params *params,
205 		int range_id, int nid)
206 {
207 	const bool is_private = pgmap->type == MEMORY_DEVICE_PRIVATE;
208 	struct range *range = &pgmap->ranges[range_id];
209 	struct dev_pagemap *conflict_pgmap;
210 	int error, is_ram;
211 
212 	if (WARN_ONCE(pgmap_altmap(pgmap) && range_id > 0,
213 				"altmap not supported for multiple ranges\n"))
214 		return -EINVAL;
215 
216 	conflict_pgmap = get_dev_pagemap(PHYS_PFN(range->start), NULL);
217 	if (conflict_pgmap) {
218 		WARN(1, "Conflicting mapping in same section\n");
219 		put_dev_pagemap(conflict_pgmap);
220 		return -ENOMEM;
221 	}
222 
223 	conflict_pgmap = get_dev_pagemap(PHYS_PFN(range->end), NULL);
224 	if (conflict_pgmap) {
225 		WARN(1, "Conflicting mapping in same section\n");
226 		put_dev_pagemap(conflict_pgmap);
227 		return -ENOMEM;
228 	}
229 
230 	is_ram = region_intersects(range->start, range_len(range),
231 		IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE);
232 
233 	if (is_ram != REGION_DISJOINT) {
234 		WARN_ONCE(1, "attempted on %s region %#llx-%#llx\n",
235 				is_ram == REGION_MIXED ? "mixed" : "ram",
236 				range->start, range->end);
237 		return -ENXIO;
238 	}
239 
240 	error = xa_err(xa_store_range(&pgmap_array, PHYS_PFN(range->start),
241 				PHYS_PFN(range->end), pgmap, GFP_KERNEL));
242 	if (error)
243 		return error;
244 
245 	if (nid < 0)
246 		nid = numa_mem_id();
247 
248 	error = track_pfn_remap(NULL, &params->pgprot, PHYS_PFN(range->start), 0,
249 			range_len(range));
250 	if (error)
251 		goto err_pfn_remap;
252 
253 	if (!mhp_range_allowed(range->start, range_len(range), !is_private)) {
254 		error = -EINVAL;
255 		goto err_pfn_remap;
256 	}
257 
258 	mem_hotplug_begin();
259 
260 	/*
261 	 * For device private memory we call add_pages() as we only need to
262 	 * allocate and initialize struct page for the device memory. More-
263 	 * over the device memory is un-accessible thus we do not want to
264 	 * create a linear mapping for the memory like arch_add_memory()
265 	 * would do.
266 	 *
267 	 * For all other device memory types, which are accessible by
268 	 * the CPU, we do want the linear mapping and thus use
269 	 * arch_add_memory().
270 	 */
271 	if (is_private) {
272 		error = add_pages(nid, PHYS_PFN(range->start),
273 				PHYS_PFN(range_len(range)), params);
274 	} else {
275 		error = kasan_add_zero_shadow(__va(range->start), range_len(range));
276 		if (error) {
277 			mem_hotplug_done();
278 			goto err_kasan;
279 		}
280 
281 		error = arch_add_memory(nid, range->start, range_len(range),
282 					params);
283 	}
284 
285 	if (!error) {
286 		struct zone *zone;
287 
288 		zone = &NODE_DATA(nid)->node_zones[ZONE_DEVICE];
289 		move_pfn_range_to_zone(zone, PHYS_PFN(range->start),
290 				PHYS_PFN(range_len(range)), params->altmap,
291 				MIGRATE_MOVABLE);
292 	}
293 
294 	mem_hotplug_done();
295 	if (error)
296 		goto err_add_memory;
297 
298 	/*
299 	 * Initialization of the pages has been deferred until now in order
300 	 * to allow us to do the work while not holding the hotplug lock.
301 	 */
302 	memmap_init_zone_device(&NODE_DATA(nid)->node_zones[ZONE_DEVICE],
303 				PHYS_PFN(range->start),
304 				PHYS_PFN(range_len(range)), pgmap);
305 	percpu_ref_get_many(pgmap->ref, pfn_len(pgmap, range_id));
306 	return 0;
307 
308 err_add_memory:
309 	kasan_remove_zero_shadow(__va(range->start), range_len(range));
310 err_kasan:
311 	untrack_pfn(NULL, PHYS_PFN(range->start), range_len(range));
312 err_pfn_remap:
313 	pgmap_array_delete(range);
314 	return error;
315 }
316 
317 
318 /*
319  * Not device managed version of dev_memremap_pages, undone by
320  * memunmap_pages().  Please use dev_memremap_pages if you have a struct
321  * device available.
322  */
323 void *memremap_pages(struct dev_pagemap *pgmap, int nid)
324 {
325 	struct mhp_params params = {
326 		.altmap = pgmap_altmap(pgmap),
327 		.pgprot = PAGE_KERNEL,
328 	};
329 	const int nr_range = pgmap->nr_range;
330 	int error, i;
331 
332 	if (WARN_ONCE(!nr_range, "nr_range must be specified\n"))
333 		return ERR_PTR(-EINVAL);
334 
335 	switch (pgmap->type) {
336 	case MEMORY_DEVICE_PRIVATE:
337 		if (!IS_ENABLED(CONFIG_DEVICE_PRIVATE)) {
338 			WARN(1, "Device private memory not supported\n");
339 			return ERR_PTR(-EINVAL);
340 		}
341 		if (!pgmap->ops || !pgmap->ops->migrate_to_ram) {
342 			WARN(1, "Missing migrate_to_ram method\n");
343 			return ERR_PTR(-EINVAL);
344 		}
345 		if (!pgmap->ops->page_free) {
346 			WARN(1, "Missing page_free method\n");
347 			return ERR_PTR(-EINVAL);
348 		}
349 		if (!pgmap->owner) {
350 			WARN(1, "Missing owner\n");
351 			return ERR_PTR(-EINVAL);
352 		}
353 		break;
354 	case MEMORY_DEVICE_FS_DAX:
355 		if (!IS_ENABLED(CONFIG_ZONE_DEVICE) ||
356 		    IS_ENABLED(CONFIG_FS_DAX_LIMITED)) {
357 			WARN(1, "File system DAX not supported\n");
358 			return ERR_PTR(-EINVAL);
359 		}
360 		break;
361 	case MEMORY_DEVICE_GENERIC:
362 		break;
363 	case MEMORY_DEVICE_PCI_P2PDMA:
364 		params.pgprot = pgprot_noncached(params.pgprot);
365 		break;
366 	default:
367 		WARN(1, "Invalid pgmap type %d\n", pgmap->type);
368 		break;
369 	}
370 
371 	if (!pgmap->ref) {
372 		if (pgmap->ops && (pgmap->ops->kill || pgmap->ops->cleanup))
373 			return ERR_PTR(-EINVAL);
374 
375 		init_completion(&pgmap->done);
376 		error = percpu_ref_init(&pgmap->internal_ref,
377 				dev_pagemap_percpu_release, 0, GFP_KERNEL);
378 		if (error)
379 			return ERR_PTR(error);
380 		pgmap->ref = &pgmap->internal_ref;
381 	} else {
382 		if (!pgmap->ops || !pgmap->ops->kill || !pgmap->ops->cleanup) {
383 			WARN(1, "Missing reference count teardown definition\n");
384 			return ERR_PTR(-EINVAL);
385 		}
386 	}
387 
388 	devmap_managed_enable_get(pgmap);
389 
390 	/*
391 	 * Clear the pgmap nr_range as it will be incremented for each
392 	 * successfully processed range. This communicates how many
393 	 * regions to unwind in the abort case.
394 	 */
395 	pgmap->nr_range = 0;
396 	error = 0;
397 	for (i = 0; i < nr_range; i++) {
398 		error = pagemap_range(pgmap, &params, i, nid);
399 		if (error)
400 			break;
401 		pgmap->nr_range++;
402 	}
403 
404 	if (i < nr_range) {
405 		memunmap_pages(pgmap);
406 		pgmap->nr_range = nr_range;
407 		return ERR_PTR(error);
408 	}
409 
410 	return __va(pgmap->ranges[0].start);
411 }
412 EXPORT_SYMBOL_GPL(memremap_pages);
413 
414 /**
415  * devm_memremap_pages - remap and provide memmap backing for the given resource
416  * @dev: hosting device for @res
417  * @pgmap: pointer to a struct dev_pagemap
418  *
419  * Notes:
420  * 1/ At a minimum the res and type members of @pgmap must be initialized
421  *    by the caller before passing it to this function
422  *
423  * 2/ The altmap field may optionally be initialized, in which case
424  *    PGMAP_ALTMAP_VALID must be set in pgmap->flags.
425  *
426  * 3/ The ref field may optionally be provided, in which pgmap->ref must be
427  *    'live' on entry and will be killed and reaped at
428  *    devm_memremap_pages_release() time, or if this routine fails.
429  *
430  * 4/ range is expected to be a host memory range that could feasibly be
431  *    treated as a "System RAM" range, i.e. not a device mmio range, but
432  *    this is not enforced.
433  */
434 void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap)
435 {
436 	int error;
437 	void *ret;
438 
439 	ret = memremap_pages(pgmap, dev_to_node(dev));
440 	if (IS_ERR(ret))
441 		return ret;
442 
443 	error = devm_add_action_or_reset(dev, devm_memremap_pages_release,
444 			pgmap);
445 	if (error)
446 		return ERR_PTR(error);
447 	return ret;
448 }
449 EXPORT_SYMBOL_GPL(devm_memremap_pages);
450 
451 void devm_memunmap_pages(struct device *dev, struct dev_pagemap *pgmap)
452 {
453 	devm_release_action(dev, devm_memremap_pages_release, pgmap);
454 }
455 EXPORT_SYMBOL_GPL(devm_memunmap_pages);
456 
457 unsigned long vmem_altmap_offset(struct vmem_altmap *altmap)
458 {
459 	/* number of pfns from base where pfn_to_page() is valid */
460 	if (altmap)
461 		return altmap->reserve + altmap->free;
462 	return 0;
463 }
464 
465 void vmem_altmap_free(struct vmem_altmap *altmap, unsigned long nr_pfns)
466 {
467 	altmap->alloc -= nr_pfns;
468 }
469 
470 /**
471  * get_dev_pagemap() - take a new live reference on the dev_pagemap for @pfn
472  * @pfn: page frame number to lookup page_map
473  * @pgmap: optional known pgmap that already has a reference
474  *
475  * If @pgmap is non-NULL and covers @pfn it will be returned as-is.  If @pgmap
476  * is non-NULL but does not cover @pfn the reference to it will be released.
477  */
478 struct dev_pagemap *get_dev_pagemap(unsigned long pfn,
479 		struct dev_pagemap *pgmap)
480 {
481 	resource_size_t phys = PFN_PHYS(pfn);
482 
483 	/*
484 	 * In the cached case we're already holding a live reference.
485 	 */
486 	if (pgmap) {
487 		if (phys >= pgmap->range.start && phys <= pgmap->range.end)
488 			return pgmap;
489 		put_dev_pagemap(pgmap);
490 	}
491 
492 	/* fall back to slow path lookup */
493 	rcu_read_lock();
494 	pgmap = xa_load(&pgmap_array, PHYS_PFN(phys));
495 	if (pgmap && !percpu_ref_tryget_live(pgmap->ref))
496 		pgmap = NULL;
497 	rcu_read_unlock();
498 
499 	return pgmap;
500 }
501 EXPORT_SYMBOL_GPL(get_dev_pagemap);
502 
503 #ifdef CONFIG_DEV_PAGEMAP_OPS
504 void free_devmap_managed_page(struct page *page)
505 {
506 	/* notify page idle for dax */
507 	if (!is_device_private_page(page)) {
508 		wake_up_var(&page->_refcount);
509 		return;
510 	}
511 
512 	__ClearPageWaiters(page);
513 
514 	mem_cgroup_uncharge(page_folio(page));
515 
516 	/*
517 	 * When a device_private page is freed, the page->mapping field
518 	 * may still contain a (stale) mapping value. For example, the
519 	 * lower bits of page->mapping may still identify the page as an
520 	 * anonymous page. Ultimately, this entire field is just stale
521 	 * and wrong, and it will cause errors if not cleared.  One
522 	 * example is:
523 	 *
524 	 *  migrate_vma_pages()
525 	 *    migrate_vma_insert_page()
526 	 *      page_add_new_anon_rmap()
527 	 *        __page_set_anon_rmap()
528 	 *          ...checks page->mapping, via PageAnon(page) call,
529 	 *            and incorrectly concludes that the page is an
530 	 *            anonymous page. Therefore, it incorrectly,
531 	 *            silently fails to set up the new anon rmap.
532 	 *
533 	 * For other types of ZONE_DEVICE pages, migration is either
534 	 * handled differently or not done at all, so there is no need
535 	 * to clear page->mapping.
536 	 */
537 	page->mapping = NULL;
538 	page->pgmap->ops->page_free(page);
539 }
540 #endif /* CONFIG_DEV_PAGEMAP_OPS */
541