Lines Matching +full:non +full:- +full:live
1 // SPDX-License-Identifier: GPL-2.0
48 if (pgmap->type == MEMORY_DEVICE_FS_DAX)
54 if (pgmap->type == MEMORY_DEVICE_FS_DAX)
68 xa_store_range(&pgmap_array, PHYS_PFN(range->start), PHYS_PFN(range->end),
75 struct range *range = &pgmap->ranges[range_id];
76 unsigned long pfn = PHYS_PFN(range->start);
87 for (i = 0; i < pgmap->nr_range; i++) {
88 struct range *range = &pgmap->ranges[i];
90 if (pfn >= PHYS_PFN(range->start) &&
91 pfn <= PHYS_PFN(range->end))
100 const struct range *range = &pgmap->ranges[range_id];
102 return (range->start + range_len(range)) >> PAGE_SHIFT;
107 return (pfn_end(pgmap, range_id) -
108 pfn_first(pgmap, range_id)) >> pgmap->vmemmap_shift;
113 struct range *range = &pgmap->ranges[range_id];
121 remove_pfn_range_from_zone(page_zone(first_page), PHYS_PFN(range->start),
123 if (pgmap->type == MEMORY_DEVICE_PRIVATE) {
124 __remove_pages(PHYS_PFN(range->start),
127 arch_remove_memory(range->start, range_len(range),
129 kasan_remove_zero_shadow(__va(range->start), range_len(range));
133 untrack_pfn(NULL, PHYS_PFN(range->start), range_len(range), true);
141 percpu_ref_kill(&pgmap->ref);
142 if (pgmap->type != MEMORY_DEVICE_PRIVATE &&
143 pgmap->type != MEMORY_DEVICE_COHERENT)
144 for (i = 0; i < pgmap->nr_range; i++)
145 percpu_ref_put_many(&pgmap->ref, pfn_len(pgmap, i));
147 wait_for_completion(&pgmap->done);
149 for (i = 0; i < pgmap->nr_range; i++)
151 percpu_ref_exit(&pgmap->ref);
153 WARN_ONCE(pgmap->altmap.alloc, "failed to free all reserved pages\n");
167 complete(&pgmap->done);
173 const bool is_private = pgmap->type == MEMORY_DEVICE_PRIVATE;
174 struct range *range = &pgmap->ranges[range_id];
180 return -EINVAL;
182 conflict_pgmap = get_dev_pagemap(PHYS_PFN(range->start), NULL);
186 return -ENOMEM;
189 conflict_pgmap = get_dev_pagemap(PHYS_PFN(range->end), NULL);
193 return -ENOMEM;
196 is_ram = region_intersects(range->start, range_len(range),
200 WARN_ONCE(1, "attempted on %s region %#llx-%#llx\n",
202 range->start, range->end);
203 return -ENXIO;
206 error = xa_err(xa_store_range(&pgmap_array, PHYS_PFN(range->start),
207 PHYS_PFN(range->end), pgmap, GFP_KERNEL));
214 error = track_pfn_remap(NULL, ¶ms->pgprot, PHYS_PFN(range->start), 0,
219 if (!mhp_range_allowed(range->start, range_len(range), !is_private)) {
220 error = -EINVAL;
228 * allocate and initialize struct page for the device memory. More-
229 * over the device memory is un-accessible thus we do not want to
238 error = add_pages(nid, PHYS_PFN(range->start),
241 error = kasan_add_zero_shadow(__va(range->start), range_len(range));
247 error = arch_add_memory(nid, range->start, range_len(range),
254 zone = &NODE_DATA(nid)->node_zones[ZONE_DEVICE];
255 move_pfn_range_to_zone(zone, PHYS_PFN(range->start),
256 PHYS_PFN(range_len(range)), params->altmap,
268 memmap_init_zone_device(&NODE_DATA(nid)->node_zones[ZONE_DEVICE],
269 PHYS_PFN(range->start),
271 if (pgmap->type != MEMORY_DEVICE_PRIVATE &&
272 pgmap->type != MEMORY_DEVICE_COHERENT)
273 percpu_ref_get_many(&pgmap->ref, pfn_len(pgmap, range_id));
278 kasan_remove_zero_shadow(__va(range->start), range_len(range));
280 untrack_pfn(NULL, PHYS_PFN(range->start), range_len(range), true);
299 const int nr_range = pgmap->nr_range;
303 return ERR_PTR(-EINVAL);
305 switch (pgmap->type) {
309 return ERR_PTR(-EINVAL);
311 if (!pgmap->ops || !pgmap->ops->migrate_to_ram) {
313 return ERR_PTR(-EINVAL);
315 if (!pgmap->ops->page_free) {
317 return ERR_PTR(-EINVAL);
319 if (!pgmap->owner) {
321 return ERR_PTR(-EINVAL);
325 if (!pgmap->ops->page_free) {
327 return ERR_PTR(-EINVAL);
329 if (!pgmap->owner) {
331 return ERR_PTR(-EINVAL);
337 return ERR_PTR(-EINVAL);
347 WARN(1, "Invalid pgmap type %d\n", pgmap->type);
351 init_completion(&pgmap->done);
352 error = percpu_ref_init(&pgmap->ref, dev_pagemap_percpu_release, 0,
364 pgmap->nr_range = 0;
370 pgmap->nr_range++;
375 pgmap->nr_range = nr_range;
379 return __va(pgmap->ranges[0].start);
384 * devm_memremap_pages - remap and provide memmap backing for the given resource
393 * PGMAP_ALTMAP_VALID must be set in pgmap->flags.
395 * 3/ The ref field may optionally be provided, in which pgmap->ref must be
396 * 'live' on entry and will be killed and reaped at
427 * get_dev_pagemap() - take a new live reference on the dev_pagemap for @pfn
431 * If @pgmap is non-NULL and covers @pfn it will be returned as-is. If @pgmap
432 * is non-NULL but does not cover @pfn the reference to it will be released.
440 * In the cached case we're already holding a live reference.
443 if (phys >= pgmap->range.start && phys <= pgmap->range.end)
451 if (pgmap && !percpu_ref_tryget_live_rcu(&pgmap->ref))
461 if (WARN_ON_ONCE(!folio->page.pgmap->ops ||
462 !folio->page.pgmap->ops->page_free))
469 * and we could PTE-map them similar to THP, we'd have to clear
478 * When a device managed page is freed, the folio->mapping field
480 * lower bits of folio->mapping may still identify the folio as an
486 * to clear folio->mapping.
488 folio->mapping = NULL;
489 folio->page.pgmap->ops->page_free(folio_page(folio, 0));
491 if (folio->page.pgmap->type != MEMORY_DEVICE_PRIVATE &&
492 folio->page.pgmap->type != MEMORY_DEVICE_COHERENT)
499 put_dev_pagemap(folio->page.pgmap);
508 WARN_ON_ONCE(!percpu_ref_tryget_live(&page->pgmap->ref));
517 if (folio->page.pgmap->type != MEMORY_DEVICE_FS_DAX)
521 * fsdax page refcounts are 1-based, rather than 0-based: if
526 wake_up_var(&folio->_refcount);