Lines Matching +full:non +full:- +full:live
1 // SPDX-License-Identifier: GPL-2.0
43 xa_store_range(&pgmap_array, PHYS_PFN(range->start), PHYS_PFN(range->end),
50 struct range *range = &pgmap->ranges[range_id];
51 unsigned long pfn = PHYS_PFN(range->start);
62 for (i = 0; i < pgmap->nr_range; i++) {
63 struct range *range = &pgmap->ranges[i];
65 if (pfn >= PHYS_PFN(range->start) &&
66 pfn <= PHYS_PFN(range->end))
75 const struct range *range = &pgmap->ranges[range_id];
77 return (range->start + range_len(range)) >> PAGE_SHIFT;
82 return (pfn_end(pgmap, range_id) -
83 pfn_first(pgmap, range_id)) >> pgmap->vmemmap_shift;
88 struct range *range = &pgmap->ranges[range_id];
96 remove_pfn_range_from_zone(page_zone(first_page), PHYS_PFN(range->start),
98 if (pgmap->type == MEMORY_DEVICE_PRIVATE) {
99 __remove_pages(PHYS_PFN(range->start),
102 arch_remove_memory(range->start, range_len(range),
104 kasan_remove_zero_shadow(__va(range->start), range_len(range));
108 pfnmap_untrack(PHYS_PFN(range->start), range_len(range));
116 percpu_ref_kill(&pgmap->ref);
117 if (pgmap->type != MEMORY_DEVICE_PRIVATE &&
118 pgmap->type != MEMORY_DEVICE_COHERENT)
119 for (i = 0; i < pgmap->nr_range; i++)
120 percpu_ref_put_many(&pgmap->ref, pfn_len(pgmap, i));
122 wait_for_completion(&pgmap->done);
124 for (i = 0; i < pgmap->nr_range; i++)
126 percpu_ref_exit(&pgmap->ref);
128 WARN_ONCE(pgmap->altmap.alloc, "failed to free all reserved pages\n");
141 complete(&pgmap->done);
147 const bool is_private = pgmap->type == MEMORY_DEVICE_PRIVATE;
148 struct range *range = &pgmap->ranges[range_id];
154 return -EINVAL;
156 conflict_pgmap = get_dev_pagemap(PHYS_PFN(range->start), NULL);
160 return -ENOMEM;
163 conflict_pgmap = get_dev_pagemap(PHYS_PFN(range->end), NULL);
167 return -ENOMEM;
170 is_ram = region_intersects(range->start, range_len(range),
174 WARN_ONCE(1, "attempted on %s region %#llx-%#llx\n",
176 range->start, range->end);
177 return -ENXIO;
180 error = xa_err(xa_store_range(&pgmap_array, PHYS_PFN(range->start),
181 PHYS_PFN(range->end), pgmap, GFP_KERNEL));
188 error = pfnmap_track(PHYS_PFN(range->start), range_len(range),
189 ¶ms->pgprot);
193 if (!mhp_range_allowed(range->start, range_len(range), !is_private)) {
194 error = -EINVAL;
202 * allocate and initialize struct page for the device memory. More-
203 * over the device memory is un-accessible thus we do not want to
212 error = add_pages(nid, PHYS_PFN(range->start),
215 error = kasan_add_zero_shadow(__va(range->start), range_len(range));
221 error = arch_add_memory(nid, range->start, range_len(range),
228 zone = &NODE_DATA(nid)->node_zones[ZONE_DEVICE];
229 move_pfn_range_to_zone(zone, PHYS_PFN(range->start),
230 PHYS_PFN(range_len(range)), params->altmap,
242 memmap_init_zone_device(&NODE_DATA(nid)->node_zones[ZONE_DEVICE],
243 PHYS_PFN(range->start),
245 if (pgmap->type != MEMORY_DEVICE_PRIVATE &&
246 pgmap->type != MEMORY_DEVICE_COHERENT)
247 percpu_ref_get_many(&pgmap->ref, pfn_len(pgmap, range_id));
252 kasan_remove_zero_shadow(__va(range->start), range_len(range));
254 pfnmap_untrack(PHYS_PFN(range->start), range_len(range));
273 const int nr_range = pgmap->nr_range;
277 return ERR_PTR(-EINVAL);
279 switch (pgmap->type) {
283 return ERR_PTR(-EINVAL);
285 if (!pgmap->ops || !pgmap->ops->migrate_to_ram) {
287 return ERR_PTR(-EINVAL);
289 if (!pgmap->ops->page_free) {
291 return ERR_PTR(-EINVAL);
293 if (!pgmap->owner) {
295 return ERR_PTR(-EINVAL);
299 if (!pgmap->ops->page_free) {
301 return ERR_PTR(-EINVAL);
303 if (!pgmap->owner) {
305 return ERR_PTR(-EINVAL);
317 WARN(1, "Invalid pgmap type %d\n", pgmap->type);
321 init_completion(&pgmap->done);
322 error = percpu_ref_init(&pgmap->ref, dev_pagemap_percpu_release, 0,
332 pgmap->nr_range = 0;
338 pgmap->nr_range++;
343 pgmap->nr_range = nr_range;
347 return __va(pgmap->ranges[0].start);
352 * devm_memremap_pages - remap and provide memmap backing for the given resource
361 * PGMAP_ALTMAP_VALID must be set in pgmap->flags.
363 * 3/ The ref field may optionally be provided, in which pgmap->ref must be
364 * 'live' on entry and will be killed and reaped at
395 * get_dev_pagemap() - take a new live reference on the dev_pagemap for @pfn
399 * If @pgmap is non-NULL and covers @pfn it will be returned as-is. If @pgmap
400 * is non-NULL but does not cover @pfn the reference to it will be released.
408 * In the cached case we're already holding a live reference.
411 if (phys >= pgmap->range.start && phys <= pgmap->range.end)
419 if (pgmap && !percpu_ref_tryget_live_rcu(&pgmap->ref))
429 struct dev_pagemap *pgmap = folio->pgmap;
438 * and we could PTE-map them similar to THP, we'd have to clear
447 * When a device managed page is freed, the folio->mapping field
449 * lower bits of folio->mapping may still identify the folio as an
455 * to clear folio->mapping.
457 * FS DAX pages clear the mapping when the folio->share count hits
461 if (pgmap->type != MEMORY_DEVICE_FS_DAX &&
462 pgmap->type != MEMORY_DEVICE_GENERIC)
463 folio->mapping = NULL;
465 switch (pgmap->type) {
468 if (WARN_ON_ONCE(!pgmap->ops || !pgmap->ops->page_free))
470 pgmap->ops->page_free(folio_page(folio, 0));
483 wake_up_var(&folio->page);
487 if (WARN_ON_ONCE(!pgmap->ops || !pgmap->ops->page_free))
489 pgmap->ops->page_free(folio_page(folio, 0));
500 WARN_ON_ONCE(!percpu_ref_tryget_live(&page_pgmap(page)->ref));