| /linux/mm/ |
| H A D | memremap.c | 48 static unsigned long pfn_first(struct dev_pagemap *pgmap, int range_id) in pfn_first() argument 50 struct range *range = &pgmap->ranges[range_id]; in pfn_first() 55 return pfn + vmem_altmap_offset(pgmap_altmap(pgmap)); in pfn_first() 58 bool pgmap_pfn_valid(struct dev_pagemap *pgmap, unsigned long pfn) in pgmap_pfn_valid() argument 62 for (i = 0; i < pgmap->nr_range; i++) { in pgmap_pfn_valid() 63 struct range *range = &pgmap->ranges[i]; in pgmap_pfn_valid() 67 return pfn >= pfn_first(pgmap, i); in pgmap_pfn_valid() 73 static unsigned long pfn_end(struct dev_pagemap *pgmap, int range_id) in pfn_end() argument 75 const struct range *range = &pgmap->ranges[range_id]; in pfn_end() 80 static unsigned long pfn_len(struct dev_pagemap *pgmap, unsigned long range_id) in pfn_len() argument [all …]
|
| H A D | sparse-vmemmap.c | 480 struct dev_pagemap *pgmap) in reuse_compound_section() argument 482 unsigned long nr_pages = pgmap_vmemmap_nr(pgmap); in reuse_compound_section() 484 PHYS_PFN(pgmap->ranges[pgmap->nr_range].start); in reuse_compound_section() 509 struct dev_pagemap *pgmap) in vmemmap_populate_compound_pages() argument 515 if (reuse_compound_section(start_pfn, pgmap)) { in vmemmap_populate_compound_pages() 529 size = min(end - start, pgmap_vmemmap_nr(pgmap) * sizeof(struct page)); in vmemmap_populate_compound_pages() 563 struct dev_pagemap *pgmap) in __populate_section_memmap() argument 573 if (vmemmap_can_optimize(altmap, pgmap)) in __populate_section_memmap() 574 r = vmemmap_populate_compound_pages(pfn, start, end, nid, pgmap); in __populate_section_memmap()
|
| H A D | mm_init.c | 1009 struct dev_pagemap *pgmap) in __init_zone_device_page() argument 1028 page_folio(page)->pgmap = pgmap; in __init_zone_device_page() 1055 switch (pgmap->type) { in __init_zone_device_page() 1077 struct dev_pagemap *pgmap) in compound_nr_pages() argument 1079 if (!vmemmap_can_optimize(altmap, pgmap)) in compound_nr_pages() 1080 return pgmap_vmemmap_nr(pgmap); in compound_nr_pages() 1088 struct dev_pagemap *pgmap, in memmap_init_compound() argument 1092 unsigned int order = pgmap->vmemmap_shift; in memmap_init_compound() 1104 __init_zone_device_page(page, pfn, zone_idx, nid, pgmap); in memmap_init_compound() 1114 struct dev_pagemap *pgmap) in memmap_init_zone_device() argument [all …]
|
| H A D | memory-failure.c | 1715 struct dev_pagemap *pgmap) in mf_generic_kill_procs() argument 1738 switch (pgmap->type) { in mf_generic_kill_procs() 2110 struct dev_pagemap *pgmap) in memory_failure_dev_pagemap() argument 2115 if (!pgmap_pfn_valid(pgmap, pfn)) in memory_failure_dev_pagemap() 2122 if (pgmap_has_memory_failure(pgmap)) { in memory_failure_dev_pagemap() 2123 rc = pgmap->ops->memory_failure(pgmap, pfn, 1, flags); in memory_failure_dev_pagemap() 2132 rc = mf_generic_kill_procs(pfn, flags, pgmap); in memory_failure_dev_pagemap() 2135 put_dev_pagemap(pgmap); in memory_failure_dev_pagemap() 2318 struct dev_pagemap *pgmap; in memory_failure() local 2347 pgmap = get_dev_pagemap(pfn); in memory_failure() [all …]
|
| H A D | migrate_device.c | 173 (folio->pgmap->owner != migrate->pgmap_owner)) { in migrate_vma_collect_huge_pmd() 278 struct dev_pagemap *pgmap; in migrate_vma_collect_pmd() local 306 pgmap = page_pgmap(page); in migrate_vma_collect_pmd() 309 pgmap->owner != migrate->pgmap_owner) in migrate_vma_collect_pmd() 348 pgmap = page_pgmap(page); in migrate_vma_collect_pmd() 352 pgmap->owner != migrate->pgmap_owner) in migrate_vma_collect_pmd()
|
| H A D | memory_hotplug.c | 349 struct dev_pagemap *pgmap; in pfn_to_online_page() local 378 pgmap = get_dev_pagemap(pfn); in pfn_to_online_page() 379 put_dev_pagemap(pgmap); in pfn_to_online_page() 382 if (pgmap) in pfn_to_online_page() 424 params->pgmap); in __add_pages()
|
| /linux/include/linux/ |
| H A D | memremap.h | 100 int (*memory_failure)(struct dev_pagemap *pgmap, unsigned long pfn, 149 static inline bool pgmap_has_memory_failure(struct dev_pagemap *pgmap) in pgmap_has_memory_failure() argument 151 return pgmap->ops && pgmap->ops->memory_failure; in pgmap_has_memory_failure() 154 static inline struct vmem_altmap *pgmap_altmap(struct dev_pagemap *pgmap) in pgmap_altmap() argument 156 if (pgmap->flags & PGMAP_ALTMAP_VALID) in pgmap_altmap() 157 return &pgmap->altmap; in pgmap_altmap() 161 static inline unsigned long pgmap_vmemmap_nr(struct dev_pagemap *pgmap) in pgmap_vmemmap_nr() argument 163 return 1 << pgmap->vmemmap_shift; in pgmap_vmemmap_nr() 170 folio->pgmap->type == MEMORY_DEVICE_PRIVATE; in folio_is_device_private() 183 folio->pgmap->type == MEMORY_DEVICE_PCI_P2PDMA; in folio_is_pci_p2pdma() [all …]
|
| H A D | mm.h | 4236 struct dev_pagemap *pgmap); 4300 struct dev_pagemap *pgmap) in __vmemmap_can_optimize() argument 4305 if (!pgmap || !is_power_of_2(sizeof(struct page))) in __vmemmap_can_optimize() 4308 nr_pages = pgmap_vmemmap_nr(pgmap); in __vmemmap_can_optimize() 4325 struct dev_pagemap *pgmap) in vmemmap_can_optimize() argument
|
| /linux/drivers/xen/ |
| H A D | unpopulated-alloc.c | 36 struct dev_pagemap *pgmap; in fill_list() local 84 pgmap = kzalloc(sizeof(*pgmap), GFP_KERNEL); in fill_list() 85 if (!pgmap) { in fill_list() 90 pgmap->type = MEMORY_DEVICE_GENERIC; in fill_list() 91 pgmap->range = (struct range) { in fill_list() 95 pgmap->nr_range = 1; in fill_list() 96 pgmap->owner = res; in fill_list() 121 vaddr = memremap_pages(pgmap, NUMA_NO_NODE); in fill_list() 139 kfree(pgmap); in fill_list()
|
| /linux/drivers/dax/ |
| H A D | device.c | 92 if (dev_dax->pgmap->vmemmap_shift) in dax_set_mapping() 407 struct dev_pagemap *pgmap; in dev_dax_probe() local 420 pgmap = dev_dax->pgmap; in dev_dax_probe() 422 if (dev_dax->pgmap) { in dev_dax_probe() 428 pgmap = devm_kzalloc(dev, in dev_dax_probe() 429 struct_size(pgmap, ranges, dev_dax->nr_range - 1), in dev_dax_probe() 431 if (!pgmap) in dev_dax_probe() 434 pgmap->nr_range = dev_dax->nr_range; in dev_dax_probe() 435 dev_dax->pgmap = pgmap; in dev_dax_probe() 439 pgmap->ranges[i] = *range; in dev_dax_probe() [all …]
|
| H A D | bus.h | 23 struct dev_pagemap *pgmap; member
|
| H A D | dax-private.h | 90 struct dev_pagemap *pgmap; member
|
| H A D | bus.c | 437 dev_dax->pgmap = NULL; in kill_dev_dax() 1411 kfree(dev_dax->pgmap); in dev_dax_release() 1463 if (data->pgmap) { in __devm_create_dev_dax() 1467 dev_dax->pgmap = kmemdup(data->pgmap, in __devm_create_dev_dax() 1469 if (!dev_dax->pgmap) { in __devm_create_dev_dax() 1525 kfree(dev_dax->pgmap); in __devm_create_dev_dax()
|
| /linux/drivers/pci/ |
| H A D | p2pdma.c | 32 struct dev_pagemap pgmap; member 36 static struct pci_p2pdma_pagemap *to_p2p_pgmap(struct dev_pagemap *pgmap) in to_p2p_pgmap() argument 38 return container_of(pgmap, struct pci_p2pdma_pagemap, pgmap); in to_p2p_pgmap() 206 struct pci_p2pdma_pagemap *pgmap = to_p2p_pgmap(page_pgmap(page)); in p2pdma_folio_free() local 209 to_pci_dev(pgmap->mem->owner)->p2pdma, 1); in p2pdma_folio_free() 380 struct dev_pagemap *pgmap; in pci_p2pdma_add_resource() local 417 pgmap = &p2p_pgmap->pgmap; in pci_p2pdma_add_resource() 418 pgmap->range.start = pci_resource_start(pdev, bar) + offset; in pci_p2pdma_add_resource() 419 pgmap->range.end = pgmap->range.start + size - 1; in pci_p2pdma_add_resource() 420 pgmap->nr_range = 1; in pci_p2pdma_add_resource() [all …]
|
| /linux/tools/testing/nvdimm/test/ |
| H A D | iomap.c | 98 struct dev_pagemap *pgmap = _pgmap; in nfit_test_kill() 100 WARN_ON(!pgmap); in nfit_test_kill() 102 percpu_ref_kill(&pgmap->ref); in nfit_test_kill() 104 wait_for_completion(&pgmap->done); in nfit_test_kill() 105 percpu_ref_exit(&pgmap->ref); in nfit_test_kill() 110 struct dev_pagemap *pgmap = container_of(ref, struct dev_pagemap, ref); in dev_pagemap_percpu_release() 112 complete(&pgmap->done); in dev_pagemap_percpu_release() 115 void *__wrap_devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap) 118 resource_size_t offset = pgmap->range.start; in __wrap_devm_memremap_pages() 122 return devm_memremap_pages(dev, pgmap); in __wrap_devm_memremap_pages() 99 struct dev_pagemap *pgmap = _pgmap; nfit_test_kill() local 111 struct dev_pagemap *pgmap = container_of(ref, struct dev_pagemap, ref); dev_pagemap_percpu_release() local 116 __wrap_devm_memremap_pages(struct device * dev,struct dev_pagemap * pgmap) __wrap_devm_memremap_pages() argument [all...] |
| H A D | nfit_test.h | 214 void *__wrap_devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap);
|
| /linux/drivers/gpu/drm/amd/amdkfd/ |
| H A D | kfd_migrate.c | 209 return (addr + adev->kfd.pgmap.range.start) >> PAGE_SHIFT; in svm_migrate_addr_to_pfn() 239 return (addr - adev->kfd.pgmap.range.start); in svm_migrate_addr() 1022 struct dev_pagemap *pgmap; in kgd2kfd_init_zone_device() local 1034 pgmap = &kfddev->pgmap; in kgd2kfd_init_zone_device() 1035 memset(pgmap, 0, sizeof(*pgmap)); in kgd2kfd_init_zone_device() 1042 pgmap->range.start = adev->gmc.aper_base; in kgd2kfd_init_zone_device() 1043 pgmap->range.end = adev->gmc.aper_base + adev->gmc.aper_size - 1; in kgd2kfd_init_zone_device() 1044 pgmap->type = MEMORY_DEVICE_COHERENT; in kgd2kfd_init_zone_device() 1049 pgmap->range.start = res->start; in kgd2kfd_init_zone_device() 1050 pgmap->range.end = res->end; in kgd2kfd_init_zone_device() [all …]
|
| /linux/drivers/hv/ |
| H A D | mshv_vtl_main.c | 384 struct dev_pagemap *pgmap; in mshv_vtl_ioctl_add_vtl0_mem() local 396 pgmap = kzalloc(sizeof(*pgmap), GFP_KERNEL); in mshv_vtl_ioctl_add_vtl0_mem() 397 if (!pgmap) in mshv_vtl_ioctl_add_vtl0_mem() 400 pgmap->ranges[0].start = PFN_PHYS(vtl0_mem.start_pfn); in mshv_vtl_ioctl_add_vtl0_mem() 401 pgmap->ranges[0].end = PFN_PHYS(vtl0_mem.last_pfn) - 1; in mshv_vtl_ioctl_add_vtl0_mem() 402 pgmap->nr_range = 1; in mshv_vtl_ioctl_add_vtl0_mem() 403 pgmap->type = MEMORY_DEVICE_GENERIC; in mshv_vtl_ioctl_add_vtl0_mem() 409 pgmap->vmemmap_shift = count_trailing_zeros(vtl0_mem.start_pfn | vtl0_mem.last_pfn); in mshv_vtl_ioctl_add_vtl0_mem() 412 vtl0_mem.start_pfn, vtl0_mem.last_pfn, pgmap->vmemmap_shift); in mshv_vtl_ioctl_add_vtl0_mem() 414 addr = devm_memremap_pages(mem_dev, pgmap); in mshv_vtl_ioctl_add_vtl0_mem() [all …]
|
| /linux/drivers/nvdimm/ |
| H A D | pfn_devs.c | 672 static int __nvdimm_setup_pfn(struct nd_pfn *nd_pfn, struct dev_pagemap *pgmap) in __nvdimm_setup_pfn() argument 674 struct range *range = &pgmap->range; in __nvdimm_setup_pfn() 675 struct vmem_altmap *altmap = &pgmap->altmap; in __nvdimm_setup_pfn() 695 pgmap->nr_range = 1; in __nvdimm_setup_pfn() 710 pgmap->flags |= PGMAP_ALTMAP_VALID; in __nvdimm_setup_pfn() 845 int nvdimm_setup_pfn(struct nd_pfn *nd_pfn, struct dev_pagemap *pgmap) in nvdimm_setup_pfn() argument 857 return __nvdimm_setup_pfn(nd_pfn, pgmap); in nvdimm_setup_pfn()
|
| H A D | nd.h | 665 int nvdimm_setup_pfn(struct nd_pfn *nd_pfn, struct dev_pagemap *pgmap); 668 struct dev_pagemap *pgmap) in nvdimm_setup_pfn() argument
|
| /linux/arch/powerpc/include/asm/book3s/64/ |
| H A D | radix.h | 357 bool vmemmap_can_optimize(struct vmem_altmap *altmap, struct dev_pagemap *pgmap); 364 struct dev_pagemap *pgmap);
|
| /linux/fs/fuse/ |
| H A D | virtio_fs.c | 1056 struct dev_pagemap *pgmap; in virtio_fs_setup_dax() local 1086 pgmap = devm_kzalloc(&vdev->dev, sizeof(*pgmap), GFP_KERNEL); in virtio_fs_setup_dax() 1087 if (!pgmap) in virtio_fs_setup_dax() 1090 pgmap->type = MEMORY_DEVICE_FS_DAX; in virtio_fs_setup_dax() 1097 pgmap->range = (struct range) { in virtio_fs_setup_dax() 1101 pgmap->nr_range = 1; in virtio_fs_setup_dax() 1103 fs->window_kaddr = devm_memremap_pages(&vdev->dev, pgmap); in virtio_fs_setup_dax()
|
| /linux/Documentation/driver-api/pci/ |
| H A D | p2pdma.rst | 47 pgmap of MEMORY_DEVICE_PCI_P2PDMA to create struct pages. The lifecycle of 48 pgmap ensures that when the pgmap is destroyed all other drivers have stopped 51 FOLL_PCI_P2PDMA. The use of FOLL_LONGTERM is prevented. As this relies on pgmap
|
| /linux/arch/powerpc/mm/book3s64/ |
| H A D | radix_pgtable.c | 980 bool vmemmap_can_optimize(struct vmem_altmap *altmap, struct dev_pagemap *pgmap) in vmemmap_can_optimize() argument 983 return __vmemmap_can_optimize(altmap, pgmap); in vmemmap_can_optimize() 1308 struct dev_pagemap *pgmap) in vmemmap_populate_compound_pages() argument 1354 unsigned long nr_pages = pgmap_vmemmap_nr(pgmap); in vmemmap_populate_compound_pages()
|
| /linux/drivers/gpu/drm/nouveau/ |
| H A D | nouveau_dmem.c | 284 tail->pgmap = head->pgmap; in nouveau_dmem_folio_split()
|