/linux/mm/ |
H A D | memremap.c | 48 static unsigned long pfn_first(struct dev_pagemap *pgmap, int range_id) in devmap_managed_enable_put() 50 struct range *range = &pgmap->ranges[range_id]; in devmap_managed_enable_put() 55 return pfn + vmem_altmap_offset(pgmap_altmap(pgmap)); in devmap_managed_enable_get() 58 bool pgmap_pfn_valid(struct dev_pagemap *pgmap, unsigned long pfn) in devmap_managed_enable_get() argument 62 for (i = 0; i < pgmap->nr_range; i++) { in devmap_managed_enable_put() 63 struct range *range = &pgmap->ranges[i]; in devmap_managed_enable_put() 67 return pfn >= pfn_first(pgmap, i); in pgmap_array_delete() 73 static unsigned long pfn_end(struct dev_pagemap *pgmap, int range_id) in pfn_first() argument 75 const struct range *range = &pgmap->ranges[range_id]; in pfn_first() 80 static unsigned long pfn_len(struct dev_pagemap *pgmap, unsigne in pfn_first() 46 devmap_managed_enable_put(struct dev_pagemap * pgmap) devmap_managed_enable_put() argument 52 devmap_managed_enable_get(struct dev_pagemap * pgmap) devmap_managed_enable_get() argument 61 devmap_managed_enable_put(struct dev_pagemap * pgmap) devmap_managed_enable_put() argument 83 pgmap_pfn_valid(struct dev_pagemap * pgmap,unsigned long pfn) pgmap_pfn_valid() argument 98 pfn_end(struct dev_pagemap * pgmap,int range_id) pfn_end() argument 105 pfn_len(struct dev_pagemap * pgmap,unsigned long range_id) pfn_len() argument 111 pageunmap_range(struct dev_pagemap * pgmap,int range_id) pageunmap_range() argument 137 memunmap_pages(struct dev_pagemap * pgmap) memunmap_pages() argument 165 struct dev_pagemap *pgmap = container_of(ref, struct dev_pagemap, ref); dev_pagemap_percpu_release() local 170 pagemap_range(struct dev_pagemap * pgmap,struct mhp_params * params,int range_id,int nid) pagemap_range() argument 292 memremap_pages(struct dev_pagemap * pgmap,int nid) memremap_pages() argument 403 devm_memremap_pages(struct device * dev,struct dev_pagemap * pgmap) devm_memremap_pages() argument 420 devm_memunmap_pages(struct device * dev,struct dev_pagemap * pgmap) devm_memunmap_pages() argument 435 get_dev_pagemap(unsigned long pfn,struct dev_pagemap * pgmap) get_dev_pagemap() argument 461 struct dev_pagemap *pgmap = folio->pgmap; free_zone_device_folio() local [all...] |
H A D | sparse-vmemmap.c | 480 struct dev_pagemap *pgmap) in reuse_compound_section() argument 482 unsigned long nr_pages = pgmap_vmemmap_nr(pgmap); in reuse_compound_section() 484 PHYS_PFN(pgmap->ranges[pgmap->nr_range].start); in reuse_compound_section() 509 struct dev_pagemap *pgmap) in vmemmap_populate_compound_pages() argument 515 if (reuse_compound_section(start_pfn, pgmap)) { in vmemmap_populate_compound_pages() 529 size = min(end - start, pgmap_vmemmap_nr(pgmap) * sizeof(struct page)); in vmemmap_populate_compound_pages() 563 struct dev_pagemap *pgmap) in __populate_section_memmap() argument 573 if (vmemmap_can_optimize(altmap, pgmap)) in __populate_section_memmap() 574 r = vmemmap_populate_compound_pages(pfn, start, end, nid, pgmap); in __populate_section_memmap()
|
H A D | sparse.c | 419 struct dev_pagemap *pgmap) in __populate_section_memmap() argument 671 struct dev_pagemap *pgmap) in populate_section_memmap() argument 673 return __populate_section_memmap(pfn, nr_pages, nid, altmap, pgmap); in populate_section_memmap() 743 struct dev_pagemap *pgmap) in populate_section_memmap() argument 872 struct dev_pagemap *pgmap) in section_activate() argument 904 memmap = populate_section_memmap(pfn, nr_pages, nid, altmap, pgmap); in section_activate() 935 struct dev_pagemap *pgmap) in sparse_add_section() argument 946 memmap = section_activate(nid, start_pfn, nr_pages, altmap, pgmap); in sparse_add_section()
|
H A D | mm_init.c | 1009 struct dev_pagemap *pgmap) in __init_zone_device_page() 1024 * ZONE_DEVICE pages union ->lru with a ->pgmap back pointer in __init_zone_device_page() 1028 page_folio(page)->pgmap = pgmap; in __init_zone_device_page() 1055 switch (pgmap->type) { in __init_zone_device_page() 1077 struct dev_pagemap *pgmap) in compound_nr_pages() 1079 if (!vmemmap_can_optimize(altmap, pgmap)) in compound_nr_pages() 1080 return pgmap_vmemmap_nr(pgmap); 1088 struct dev_pagemap *pgmap, in memmap_init_compound() 1092 unsigned int order = pgmap in memmap_init_compound() 1005 __init_zone_device_page(struct page * page,unsigned long pfn,unsigned long zone_idx,int nid,struct dev_pagemap * pgmap) __init_zone_device_page() argument 1073 compound_nr_pages(struct vmem_altmap * altmap,struct dev_pagemap * pgmap) compound_nr_pages() argument 1084 memmap_init_compound(struct page * head,unsigned long head_pfn,unsigned long zone_idx,int nid,struct dev_pagemap * pgmap,unsigned long nr_pages) memmap_init_compound() argument 1111 memmap_init_zone_device(struct zone * zone,unsigned long start_pfn,unsigned long nr_pages,struct dev_pagemap * pgmap) memmap_init_zone_device() argument [all...] |
H A D | memory-failure.c | 1777 struct dev_pagemap *pgmap) in mf_generic_kill_procs() argument 1800 switch (pgmap->type) { in mf_generic_kill_procs() 2172 struct dev_pagemap *pgmap) in memory_failure_dev_pagemap() argument 2177 if (!pgmap_pfn_valid(pgmap, pfn)) in memory_failure_dev_pagemap() 2184 if (pgmap_has_memory_failure(pgmap)) { in memory_failure_dev_pagemap() 2185 rc = pgmap->ops->memory_failure(pgmap, pfn, 1, flags); in memory_failure_dev_pagemap() 2194 rc = mf_generic_kill_procs(pfn, flags, pgmap); in memory_failure_dev_pagemap() 2197 put_dev_pagemap(pgmap); in memory_failure_dev_pagemap() 2248 struct dev_pagemap *pgmap; in memory_failure() local 2269 pgmap = get_dev_pagemap(pfn, NULL); in memory_failure() [all …]
|
H A D | migrate_device.c | 116 struct dev_pagemap *pgmap; in migrate_vma_collect_pmd() local 144 pgmap = page_pgmap(page); in migrate_vma_collect_pmd() 147 pgmap->owner != migrate->pgmap_owner) in migrate_vma_collect_pmd() 167 pgmap = page_pgmap(page); in migrate_vma_collect_pmd() 171 pgmap->owner != migrate->pgmap_owner) in migrate_vma_collect_pmd()
|
H A D | gup.c | 32 struct dev_pagemap *pgmap; member 817 struct dev_pagemap **pgmap) in follow_page_pte() argument 929 return follow_page_pte(vma, address, pmd, flags, &ctx->pgmap); in follow_pmd_mask() 942 return follow_page_pte(vma, address, pmd, flags, &ctx->pgmap); in follow_pmd_mask() 949 follow_page_pte(vma, address, pmd, flags, &ctx->pgmap); in follow_pmd_mask() 1527 if (ctx.pgmap) in __get_user_pages() 1528 put_dev_pagemap(ctx.pgmap); in __get_user_pages() 2864 struct dev_pagemap *pgmap = NULL; in gup_fast_pte_range() local 2937 if (pgmap) in gup_fast_pte_range() 2938 put_dev_pagemap(pgmap); in gup_fast_pte_range()
|
H A D | memory_hotplug.c | 349 struct dev_pagemap *pgmap; in pfn_to_online_page() local 378 pgmap = get_dev_pagemap(pfn, NULL); in pfn_to_online_page() 379 put_dev_pagemap(pgmap); in pfn_to_online_page() 382 if (pgmap) in pfn_to_online_page() 424 params->pgmap); in __add_pages()
|
H A D | huge_memory.c | 1650 follow_devmap_pmd(struct vm_area_struct * vma,unsigned long addr,pmd_t * pmd,int flags,struct dev_pagemap ** pgmap) follow_devmap_pmd() argument
|
H A D | memory.c | 4508 struct dev_pagemap *pgmap; in do_swap_page() local 4512 pgmap = page_pgmap(vmf->page); in do_swap_page() 4513 ret = pgmap->ops->migrate_to_ram(vmf); in do_swap_page()
|
/linux/drivers/pci/ |
H A D | p2pdma.c | 33 struct dev_pagemap pgmap; member 36 static struct pci_p2pdma_pagemap *to_p2p_pgmap(struct dev_pagemap *pgmap) in to_p2p_pgmap() argument 38 return container_of(pgmap, struct pci_p2pdma_pagemap, pgmap); in to_p2p_pgmap() 205 struct pci_p2pdma_pagemap *pgmap = to_p2p_pgmap(page_pgmap(page)); in p2pdma_page_free() local 208 rcu_dereference_protected(pgmap->provider->p2pdma, 1); in p2pdma_page_free() 298 struct dev_pagemap *pgmap; in pci_p2pdma_add_resource() local 325 pgmap = &p2p_pgmap->pgmap; in pci_p2pdma_add_resource() 326 pgmap->range.start = pci_resource_start(pdev, bar) + offset; in pci_p2pdma_add_resource() 327 pgmap->range.end = pgmap->range.start + size - 1; in pci_p2pdma_add_resource() 328 pgmap->nr_range = 1; in pci_p2pdma_add_resource() [all …]
|
/linux/drivers/xen/ |
H A D | unpopulated-alloc.c | 36 struct dev_pagemap *pgmap; in fill_list() local 84 pgmap = kzalloc(sizeof(*pgmap), GFP_KERNEL); in fill_list() 85 if (!pgmap) { in fill_list() 90 pgmap->type = MEMORY_DEVICE_GENERIC; in fill_list() 91 pgmap->range = (struct range) { in fill_list() 95 pgmap->nr_range = 1; in fill_list() 96 pgmap->owner = res; in fill_list() 121 vaddr = memremap_pages(pgmap, NUMA_NO_NODE); in fill_list() 139 kfree(pgmap); in fill_list()
|
/linux/tools/testing/nvdimm/test/ |
H A D | iomap.c | 98 struct dev_pagemap *pgmap = _pgmap; in nfit_test_kill() 100 WARN_ON(!pgmap); in nfit_test_kill() 102 percpu_ref_kill(&pgmap->ref); in nfit_test_kill() 104 wait_for_completion(&pgmap->done); in nfit_test_kill() 105 percpu_ref_exit(&pgmap->ref); in nfit_test_kill() 110 struct dev_pagemap *pgmap = container_of(ref, struct dev_pagemap, ref); in dev_pagemap_percpu_release() 112 complete(&pgmap->done); in dev_pagemap_percpu_release() 115 void *__wrap_devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap) 118 resource_size_t offset = pgmap->range.start; in __wrap_devm_memremap_pages() 122 return devm_memremap_pages(dev, pgmap); in __wrap_devm_memremap_pages() 99 struct dev_pagemap *pgmap = _pgmap; nfit_test_kill() local 111 struct dev_pagemap *pgmap = container_of(ref, struct dev_pagemap, ref); dev_pagemap_percpu_release() local 116 __wrap_devm_memremap_pages(struct device * dev,struct dev_pagemap * pgmap) __wrap_devm_memremap_pages() argument [all...] |
H A D | nfit_test.h | 214 void *__wrap_devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap);
|
/linux/include/linux/ |
H A D | pci-p2pdma.h | 147 struct dev_pagemap *pgmap; member 170 if (state->pgmap != page_pgmap(page)) in pci_p2pdma_state()
|
H A D | mm.h | 3810 struct dev_pagemap *pgmap); 3874 struct dev_pagemap *pgmap) in __vmemmap_can_optimize() argument 3879 if (!pgmap || !is_power_of_2(sizeof(struct page))) in __vmemmap_can_optimize() 3882 nr_pages = pgmap_vmemmap_nr(pgmap); in __vmemmap_can_optimize() 3899 struct dev_pagemap *pgmap) in vmemmap_can_optimize() argument
|
/linux/drivers/nvdimm/ |
H A D | pfn_devs.c | 683 static int __nvdimm_setup_pfn(struct nd_pfn *nd_pfn, struct dev_pagemap *pgmap) in __nvdimm_setup_pfn() argument 685 struct range *range = &pgmap->range; in __nvdimm_setup_pfn() 686 struct vmem_altmap *altmap = &pgmap->altmap; in __nvdimm_setup_pfn() 706 pgmap->nr_range = 1; in __nvdimm_setup_pfn() 721 pgmap->flags |= PGMAP_ALTMAP_VALID; in __nvdimm_setup_pfn() 856 int nvdimm_setup_pfn(struct nd_pfn *nd_pfn, struct dev_pagemap *pgmap) in nvdimm_setup_pfn() argument 868 return __nvdimm_setup_pfn(nd_pfn, pgmap); in nvdimm_setup_pfn()
|
H A D | nd.h | 662 int nvdimm_setup_pfn(struct nd_pfn *nd_pfn, struct dev_pagemap *pgmap); 665 struct dev_pagemap *pgmap) in nvdimm_setup_pfn() argument
|
/linux/drivers/dax/ |
H A D | bus.h | 23 struct dev_pagemap *pgmap; member
|
H A D | dax-private.h | 90 struct dev_pagemap *pgmap; member
|
H A D | bus.c | 437 dev_dax->pgmap = NULL; in kill_dev_dax() 1411 kfree(dev_dax->pgmap); in dev_dax_release() 1463 if (data->pgmap) { in __devm_create_dev_dax() 1467 dev_dax->pgmap = kmemdup(data->pgmap, in __devm_create_dev_dax() 1469 if (!dev_dax->pgmap) { in __devm_create_dev_dax() 1525 kfree(dev_dax->pgmap); in __devm_create_dev_dax()
|
/linux/fs/fuse/ |
H A D | virtio_fs.c | 1058 struct dev_pagemap *pgmap; in virtio_fs_setup_dax() local 1088 pgmap = devm_kzalloc(&vdev->dev, sizeof(*pgmap), GFP_KERNEL); in virtio_fs_setup_dax() 1089 if (!pgmap) in virtio_fs_setup_dax() 1092 pgmap->type = MEMORY_DEVICE_FS_DAX; in virtio_fs_setup_dax() 1099 pgmap->range = (struct range) { in virtio_fs_setup_dax() 1103 pgmap->nr_range = 1; in virtio_fs_setup_dax() 1105 fs->window_kaddr = devm_memremap_pages(&vdev->dev, pgmap); in virtio_fs_setup_dax()
|
/linux/drivers/gpu/drm/amd/amdkfd/ |
H A D | kfd_svm.h | 204 #define KFD_IS_SVM_API_SUPPORTED(adev) ((adev)->kfd.pgmap.type != 0 ||\
|
/linux/fs/ |
H A D | dax.c | 401 struct dev_pagemap *pgmap = page_pgmap(&folio->page); in dax_folio_put() local 413 new_folio->pgmap = pgmap; in dax_folio_put()
|
/linux/arch/x86/mm/ |
H A D | init_64.c | 992 if (!params->pgmap) in add_pages()
|