/linux/include/linux/ |
H A D | pagemap.h | 21 unsigned long invalidate_mapping_pages(struct address_space *mapping, 30 int invalidate_inode_pages2(struct address_space *mapping); 31 int invalidate_inode_pages2_range(struct address_space *mapping, 35 int filemap_invalidate_pages(struct address_space *mapping, 41 int filemap_fdatawait_keep_errors(struct address_space *mapping); 43 int filemap_fdatawait_range_keep_errors(struct address_space *mapping, 48 static inline int filemap_fdatawait(struct address_space *mapping) in filemap_fdatawait() argument 50 return filemap_fdatawait_range(mapping, 0, LLONG_MAX); in filemap_fdatawait() 54 int filemap_write_and_wait_range(struct address_space *mapping, 56 int __filemap_fdatawrite_range(struct address_space *mapping, [all …]
|
H A D | io-mapping.h | 17 * The io_mapping mechanism provides an abstraction for mapping 20 * See Documentation/driver-api/io-mapping.rst 35 * For small address space machines, mapping large objects 58 io_mapping_fini(struct io_mapping *mapping) in io_mapping_fini() argument 60 iomap_free(mapping->base, mapping->size); in io_mapping_fini() 65 io_mapping_map_atomic_wc(struct io_mapping *mapping, in io_mapping_map_atomic_wc() argument 70 BUG_ON(offset >= mapping->size); in io_mapping_map_atomic_wc() 71 phys_addr = mapping->base + offset; in io_mapping_map_atomic_wc() 77 return __iomap_local_pfn_prot(PHYS_PFN(phys_addr), mapping->prot); in io_mapping_map_atomic_wc() 92 io_mapping_map_local_wc(struct io_mapping *mapping, unsigned long offset) in io_mapping_map_local_wc() argument [all …]
|
/linux/mm/ |
H A D | truncate.c | 26 static void clear_shadow_entries(struct address_space *mapping, in clear_shadow_entries() argument 29 XA_STATE(xas, &mapping->i_pages, start); in clear_shadow_entries() 33 if (shmem_mapping(mapping) || dax_mapping(mapping)) in clear_shadow_entries() 38 spin_lock(&mapping->host->i_lock); in clear_shadow_entries() 48 if (mapping_shrinkable(mapping)) in clear_shadow_entries() 49 inode_add_lru(mapping->host); in clear_shadow_entries() 50 spin_unlock(&mapping->host->i_lock); in clear_shadow_entries() 60 static void truncate_folio_batch_exceptionals(struct address_space *mapping, in truncate_folio_batch_exceptionals() argument 63 XA_STATE(xas, &mapping in truncate_folio_batch_exceptionals() 170 truncate_inode_folio(struct address_space * mapping,struct folio * folio) truncate_inode_folio() argument 274 generic_error_remove_folio(struct address_space * mapping,struct folio * folio) generic_error_remove_folio() argument 300 mapping_evict_folio(struct address_space * mapping,struct folio * folio) mapping_evict_folio() argument 341 truncate_inode_pages_range(struct address_space * mapping,loff_t lstart,loff_t lend) truncate_inode_pages_range() argument 458 truncate_inode_pages(struct address_space * mapping,loff_t lstart) truncate_inode_pages() argument 473 truncate_inode_pages_final(struct address_space * mapping) truncate_inode_pages_final() argument 509 mapping_try_invalidate(struct address_space * mapping,pgoff_t start,pgoff_t end,unsigned long * nr_failed) mapping_try_invalidate() argument 574 invalidate_mapping_pages(struct address_space * mapping,pgoff_t start,pgoff_t end) invalidate_mapping_pages() argument 581 folio_launder(struct address_space * mapping,struct folio * folio) folio_launder() argument 597 folio_unmap_invalidate(struct address_space * mapping,struct folio * folio,gfp_t gfp) folio_unmap_invalidate() argument 647 invalidate_inode_pages2_range(struct address_space * mapping,pgoff_t start,pgoff_t end) invalidate_inode_pages2_range() argument 733 invalidate_inode_pages2(struct address_space * mapping) invalidate_inode_pages2() argument 756 struct address_space *mapping = inode->i_mapping; truncate_pagecache() local 877 struct address_space *mapping = inode->i_mapping; truncate_pagecache_range() local [all...] |
H A D | filemap.c | 128 static void page_cache_delete(struct address_space *mapping, in page_cache_delete() argument 131 XA_STATE(xas, &mapping->i_pages, folio->index); in page_cache_delete() 134 mapping_set_update(&xas, mapping); in page_cache_delete() 144 folio->mapping = NULL; in page_cache_delete() 146 mapping->nrpages -= nr; in page_cache_delete() 149 static void filemap_unaccount_folio(struct address_space *mapping, in filemap_unaccount_folio() argument 162 if (mapping_exiting(mapping) && !folio_test_large(folio)) { in filemap_unaccount_folio() 191 filemap_nr_thps_dec(mapping); in filemap_unaccount_folio() 193 if (test_bit(AS_KERNEL_FILE, &folio->mapping->flags)) in filemap_unaccount_folio() 212 mapping_can_writeback(mapping))) in filemap_unaccount_folio() [all …]
|
H A D | readahead.c | 142 file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping) in file_ra_state_init() argument 144 ra->ra_pages = inode_to_bdi(mapping->host)->ra_pages; in file_ra_state_init() 151 const struct address_space_operations *aops = rac->mapping->a_ops; in read_pages() 213 struct address_space *mapping = ractl->mapping; in page_cache_ra_unbounded() local 215 gfp_t gfp_mask = readahead_gfp_mask(mapping); in page_cache_ra_unbounded() 217 unsigned int min_nrpages = mapping_min_folio_nrpages(mapping); in page_cache_ra_unbounded() 226 * filesystems already specify __GFP_NOFS in their mapping's in page_cache_ra_unbounded() 231 trace_page_cache_ra_unbounded(mapping->host, index, nr_to_read, in page_cache_ra_unbounded() 233 filemap_invalidate_lock_shared(mapping); in page_cache_ra_unbounded() 234 index = mapping_align_index(mapping, index); in page_cache_ra_unbounded() [all …]
|
/linux/arch/arm/mm/ |
H A D | dma-mapping.c | 3 * linux/arch/arm/mm/dma-mapping.c 7 * DMA uncached mapping support. 290 * Clear previous low-memory mapping to ensure that the in dma_contiguous_remap() 603 * Free a buffer as defined by the above mapping. 677 * Use the driver DMA support - see dma-mapping.h (dma_sync_*) 754 static int extend_iommu_mapping(struct dma_iommu_mapping *mapping); 756 static inline dma_addr_t __alloc_iova(struct dma_iommu_mapping *mapping, in __alloc_iova() argument 762 size_t mapping_size = mapping->bits << PAGE_SHIFT; in __alloc_iova() 773 spin_lock_irqsave(&mapping->lock, flags); in __alloc_iova() 774 for (i = 0; i < mapping->nr_bitmaps; i++) { in __alloc_iova() [all …]
|
H A D | flush.c | 199 void __flush_dcache_folio(struct address_space *mapping, struct folio *folio) in __flush_dcache_folio() argument 202 * Writeback any data associated with the kernel mapping of this in __flush_dcache_folio() 204 * coherent with the kernels mapping. in __flush_dcache_folio() 234 if (mapping && cache_is_vipt_aliasing()) in __flush_dcache_folio() 238 static void __flush_dcache_aliases(struct address_space *mapping, struct folio *folio) in __flush_dcache_aliases() argument 248 * - aliasing VIPT: we only need to find one mapping of this page. in __flush_dcache_aliases() 253 flush_dcache_mmap_lock(mapping); in __flush_dcache_aliases() 254 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff_end) { in __flush_dcache_aliases() 281 flush_dcache_mmap_unlock(mapping); in __flush_dcache_aliases() 289 struct address_space *mapping; in __sync_icache_dcache() local [all …]
|
/linux/tools/testing/selftests/arm64/mte/ |
H A D | check_mmap_options.c | 47 int mapping; member 113 static int check_anonymous_memory_mapping(int mem_type, int mode, int mapping, in check_anonymous_memory_mapping() argument 126 map_ptr = (char *)mte_allocate_memory(map_size, mem_type, mapping, false); in check_anonymous_memory_mapping() 148 static int check_file_memory_mapping(int mem_type, int mode, int mapping, in check_file_memory_mapping() argument 166 map_ptr = (char *)mte_allocate_file_memory(map_size, mem_type, mapping, false, fd); in check_file_memory_mapping() 191 static int check_clear_prot_mte_flag(int mem_type, int mode, int mapping, int atag_check) in check_clear_prot_mte_flag() argument 201 ptr = (char *)mte_allocate_memory_tag_range(sizes[run], mem_type, mapping, in check_clear_prot_mte_flag() 222 ptr = (char *)mte_allocate_file_memory_tag_range(sizes[run], mem_type, mapping, in check_clear_prot_mte_flag() 300 switch (tc->mapping) { in format_test_name() 337 "Check %s with %s mapping, %s mode, %s memory and %s (%s)\n", in format_test_name() [all …]
|
/linux/drivers/gpu/drm/tegra/ |
H A D | uapi.c | 17 struct tegra_drm_mapping *mapping = in tegra_drm_mapping_release() local 20 host1x_bo_unpin(mapping->map); in tegra_drm_mapping_release() 21 host1x_bo_put(mapping->bo); in tegra_drm_mapping_release() 23 kfree(mapping); in tegra_drm_mapping_release() 26 void tegra_drm_mapping_put(struct tegra_drm_mapping *mapping) in tegra_drm_mapping_put() argument 28 kref_put(&mapping->ref, tegra_drm_mapping_release); in tegra_drm_mapping_put() 33 struct tegra_drm_mapping *mapping; in tegra_drm_channel_context_close() local 39 xa_for_each(&context->mappings, id, mapping) in tegra_drm_channel_context_close() 40 tegra_drm_mapping_put(mapping); in tegra_drm_channel_context_close() 189 struct tegra_drm_mapping *mapping; in tegra_drm_ioctl_channel_map() local [all …]
|
/linux/Documentation/admin-guide/mm/ |
H A D | nommu-mmap.rst | 2 No-MMU memory mapping support 5 The kernel has limited support for memory mapping under no-MMU conditions, such 7 mapping is made use of in conjunction with the mmap() system call, the shmat() 9 mapping is actually performed by the binfmt drivers, which call back into the 12 Memory mapping behaviour also involves the way fork(), vfork(), clone() and 19 (#) Anonymous mapping, MAP_PRIVATE 27 (#) Anonymous mapping, MAP_SHARED 37 the underlying file are reflected in the mapping; copied across fork. 41 - If one exists, the kernel will re-use an existing mapping to the 45 - If possible, the file mapping will be directly on the backing device [all …]
|
/linux/include/trace/events/ |
H A D | filemap.h | 32 __entry->i_ino = folio->mapping->host->i_ino; 34 if (folio->mapping->host->i_sb) 35 __entry->s_dev = folio->mapping->host->i_sb->s_dev; 37 __entry->s_dev = folio->mapping->host->i_rdev; 62 struct address_space *mapping, 67 TP_ARGS(mapping, index, last_index), 77 __entry->i_ino = mapping->host->i_ino; 78 if (mapping->host->i_sb) 80 mapping->host->i_sb->s_dev; 82 __entry->s_dev = mapping->host->i_rdev; [all …]
|
/linux/drivers/gpu/drm/etnaviv/ |
H A D | etnaviv_mmu.c | 6 #include <linux/dma-mapping.h> 53 /* unroll mapping in case something went wrong */ in etnaviv_context_map() 113 struct etnaviv_vram_mapping *mapping) in etnaviv_iommu_remove_mapping() argument 115 struct etnaviv_gem_object *etnaviv_obj = mapping->object; in etnaviv_iommu_remove_mapping() 119 etnaviv_iommu_unmap(context, mapping->vram_node.start, in etnaviv_iommu_remove_mapping() 121 drm_mm_remove_node(&mapping->vram_node); in etnaviv_iommu_remove_mapping() 124 void etnaviv_iommu_reap_mapping(struct etnaviv_vram_mapping *mapping) in etnaviv_iommu_reap_mapping() argument 126 struct etnaviv_iommu_context *context = mapping->context; in etnaviv_iommu_reap_mapping() 129 WARN_ON(mapping->use); in etnaviv_iommu_reap_mapping() 131 etnaviv_iommu_remove_mapping(context, mapping); in etnaviv_iommu_reap_mapping() [all …]
|
H A D | etnaviv_gem.c | 7 #include <linux/dma-mapping.h> 218 struct etnaviv_vram_mapping *mapping; in etnaviv_gem_get_vram_mapping() local 220 list_for_each_entry(mapping, &obj->vram_list, obj_node) { in etnaviv_gem_get_vram_mapping() 221 if (mapping->context == context) in etnaviv_gem_get_vram_mapping() 222 return mapping; in etnaviv_gem_get_vram_mapping() 228 void etnaviv_gem_mapping_unreference(struct etnaviv_vram_mapping *mapping) in etnaviv_gem_mapping_unreference() argument 230 struct etnaviv_gem_object *etnaviv_obj = mapping->object; in etnaviv_gem_mapping_unreference() 233 WARN_ON(mapping->use == 0); in etnaviv_gem_mapping_unreference() 234 mapping->use -= 1; in etnaviv_gem_mapping_unreference() 245 struct etnaviv_vram_mapping *mapping; in etnaviv_gem_mapping_get() local [all …]
|
/linux/Documentation/driver-api/ |
H A D | io-mapping.rst | 8 The io_mapping functions in linux/io-mapping.h provide an abstraction for 9 efficiently mapping small regions of an I/O device to the CPU. The initial 14 A mapping object is created during driver initialization using:: 20 mappable, while 'size' indicates how large a mapping region to 23 This _wc variant provides a mapping which may only be used with 27 With this mapping object, individual pages can be mapped either temporarily 31 void *io_mapping_map_local_wc(struct io_mapping *mapping, 34 void *io_mapping_map_atomic_wc(struct io_mapping *mapping, 37 'offset' is the offset within the defined mapping region. Accessing 46 Temporary mappings are only valid in the context of the caller. The mapping [all …]
|
/linux/drivers/gpu/drm/panfrost/ |
H A D | panfrost_gem.c | 8 #include <linux/dma-mapping.h> 94 struct panfrost_gem_mapping *iter, *mapping = NULL; in panfrost_gem_mapping_get() local 100 mapping = iter; in panfrost_gem_mapping_get() 106 return mapping; in panfrost_gem_mapping_get() 110 panfrost_gem_teardown_mapping(struct panfrost_gem_mapping *mapping) in panfrost_gem_teardown_mapping() argument 112 if (mapping->active) in panfrost_gem_teardown_mapping() 113 panfrost_mmu_unmap(mapping); in panfrost_gem_teardown_mapping() 115 spin_lock(&mapping->mmu->mm_lock); in panfrost_gem_teardown_mapping() 116 if (drm_mm_node_allocated(&mapping->mmnode)) in panfrost_gem_teardown_mapping() 117 drm_mm_remove_node(&mapping->mmnode); in panfrost_gem_teardown_mapping() [all …]
|
/linux/drivers/gpu/drm/exynos/ |
H A D | exynos_drm_dma.c | 34 * drm_iommu_attach_device- attach device to iommu mapping 40 * mapping. 57 * Keep the original DMA mapping of the sub-device and in drm_iommu_attach_device() 66 ret = arm_iommu_attach_device(subdrv_dev, priv->mapping); in drm_iommu_attach_device() 68 ret = iommu_attach_device(priv->mapping, subdrv_dev); in drm_iommu_attach_device() 75 * drm_iommu_detach_device -detach device address space mapping from device 81 * mapping 92 iommu_detach_device(priv->mapping, subdrv_dev); in drm_iommu_detach_device() 102 DRM_INFO("Exynos DRM: using %s device for DMA mapping operations\n", in exynos_drm_register_dma() 109 if (!priv->mapping) { in exynos_drm_register_dma() [all …]
|
/linux/Documentation/filesystems/iomap/ |
H A D | design.rst | 70 of mapping function calls into the filesystem across a larger amount of 78 1. Obtain a space mapping via ``->iomap_begin`` 82 1. Revalidate the mapping and go back to (1) above, if necessary. 89 4. Release the mapping via ``->iomap_end``, if necessary 130 * **filesystem mapping lock**: This synchronization primitive is 131 internal to the filesystem and must protect the file mapping data 132 from updates while a mapping is being sampled. 138 mapping. 154 The filesystem communicates to the iomap iterator the mapping of 176 bytes, covered by this mapping. [all …]
|
/linux/fs/gfs2/ |
H A D | aops.c | 74 struct inode * const inode = folio->mapping->host; in gfs2_write_jdata_folio() 105 struct inode *inode = folio->mapping->host; in __gfs2_jdata_write_folio() 122 * @mapping: The mapping to write 127 int gfs2_jdata_writeback(struct address_space *mapping, struct writeback_control *wbc) in gfs2_jdata_writeback() argument 129 struct inode *inode = mapping->host; in gfs2_jdata_writeback() 131 struct gfs2_sbd *sdp = GFS2_SB(mapping->host); in gfs2_jdata_writeback() 139 while ((folio = writeback_iter(mapping, wbc, folio, &error))) { in gfs2_jdata_writeback() 153 * @mapping: The mapping to write 158 static int gfs2_writepages(struct address_space *mapping, in gfs2_writepages() argument 161 struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping); in gfs2_writepages() [all …]
|
/linux/tools/testing/selftests/mm/ |
H A D | mremap_dontunmap.c | 59 "unable to unmap destination mapping"); in kernel_support_for_mremap_dontunmap() 63 "unable to unmap source mapping"); in kernel_support_for_mremap_dontunmap() 67 // This helper will just validate that an entire mapping contains the expected 94 // the source mapping mapped. 106 // Try to just move the whole mapping anywhere (not fixed). in mremap_dontunmap_simple() 122 "unable to unmap destination mapping"); in mremap_dontunmap_simple() 124 "unable to unmap source mapping"); in mremap_dontunmap_simple() 128 // This test validates that MREMAP_DONTUNMAP on a shared mapping works as expected. 148 // Try to just move the whole mapping anywhere (not fixed). in mremap_dontunmap_simple_shmem() 155 "unable to unmap source mapping"); in mremap_dontunmap_simple_shmem() [all …]
|
/linux/arch/arm64/kvm/ |
H A D | pkvm.c | 329 struct pkvm_mapping *mapping; in __pkvm_pgtable_stage2_unmap() local 335 for_each_mapping_in_range_safe(pgt, start, end, mapping) { in __pkvm_pgtable_stage2_unmap() 336 ret = kvm_call_hyp_nvhe(__pkvm_host_unshare_guest, handle, mapping->gfn, in __pkvm_pgtable_stage2_unmap() 337 mapping->nr_pages); in __pkvm_pgtable_stage2_unmap() 340 pkvm_mapping_remove(mapping, &pgt->pkvm_mappings); in __pkvm_pgtable_stage2_unmap() 341 kfree(mapping); in __pkvm_pgtable_stage2_unmap() 357 struct pkvm_mapping *mapping = NULL; in pkvm_pgtable_stage2_map() local 373 mapping = pkvm_mapping_iter_first(&pgt->pkvm_mappings, addr, addr + size - 1); in pkvm_pgtable_stage2_map() 374 if (mapping) { in pkvm_pgtable_stage2_map() 375 if (size == (mapping->nr_pages * PAGE_SIZE)) in pkvm_pgtable_stage2_map() [all …]
|
/linux/drivers/sh/clk/ |
H A D | core.c | 340 struct clk_mapping *mapping = clk->mapping; in clk_establish_mapping() local 345 if (!mapping) { in clk_establish_mapping() 349 * dummy mapping for root clocks with no specified ranges in clk_establish_mapping() 352 clk->mapping = &dummy_mapping; in clk_establish_mapping() 357 * If we're on a child clock and it provides no mapping of its in clk_establish_mapping() 358 * own, inherit the mapping from its root clock. in clk_establish_mapping() 361 mapping = clkp->mapping; in clk_establish_mapping() 362 BUG_ON(!mapping); in clk_establish_mapping() 366 * Establish initial mapping. in clk_establish_mapping() 368 if (!mapping->base && mapping->phys) { in clk_establish_mapping() [all …]
|
/linux/fs/ |
H A D | dax.c | 181 * @entry may no longer be the entry at the index in the mapping. 346 * A DAX folio is considered shared if it has no mapping set and ->share (which 353 return !folio->mapping && folio->share; in dax_folio_is_shared() 359 * previously been associated with any mappings the ->mapping and ->index 360 * fields will be set. If it has already been associated with a mapping 361 * the mapping will be cleared and the share count set. It's then up to 363 * recover ->mapping and ->index information. For example by implementing 370 * folio->mapping. in dax_folio_make_shared() 372 folio->mapping = NULL; in dax_folio_make_shared() 394 folio->mapping = NULL; in dax_folio_put() [all …]
|
/linux/tools/testing/selftests/namespaces/ |
H A D | file_handle_test.c | 491 /* Disable setgroups to allow gid mapping */ in TEST() 496 char mapping[64]; in TEST() local 497 snprintf(mapping, sizeof(mapping), "0 %d 1", getuid()); in TEST() 498 write(uid_map_fd, mapping, strlen(mapping)); in TEST() 501 snprintf(mapping, sizeof(mapping), "0 %d 1", getgid()); in TEST() 502 write(gid_map_fd, mapping, strlen(mapping)); in TEST() 620 /* Disable setgroups to allow gid mapping */ in TEST() 625 char mapping[64]; in TEST() local 626 snprintf(mapping, sizeof(mapping), "0 %d 1", getuid()); in TEST() 627 write(uid_map_fd, mapping, strlen(mapping)); in TEST() [all …]
|
/linux/Documentation/mm/ |
H A D | highmem.rst | 18 The part of (physical) memory not covered by a permanent mapping is what we 64 These mappings are thread-local and CPU-local, meaning that the mapping 66 CPU while the mapping is active. Although preemption is never disabled by 68 CPU-hotplug until the mapping is disposed. 71 in which the local mapping is acquired does not allow it for other reasons. 82 virtual address of the direct mapping. Only real highmem pages are 95 therefore try to design their code to avoid the use of kmap() by mapping 107 NOTE: Conversions to kmap_local_page() must take care to follow the mapping 116 This permits a very short duration mapping of a single page. Since the 117 mapping is restricted to the CPU that issued it, it performs well, but [all …]
|
/linux/Documentation/arch/riscv/ |
H A D | boot.rst | 14 mapping is set up. 40 PMPs, in the direct mapping, so the firmware must correctly mark those regions 117 Virtual mapping installation 120 The installation of the virtual mapping is done in 2 steps in the RISC-V kernel: 122 1. ``setup_vm()`` installs a temporary kernel mapping in ``early_pg_dir`` which 124 at this point. When establishing this mapping, no allocation can be done 128 2. ``setup_vm_final()`` creates the final kernel mapping in ``swapper_pg_dir`` 130 mapping. When establishing this mapping, the kernel can allocate memory but 131 cannot access it directly (since the direct mapping is not present yet), so 136 direct mapping addresses to physical addresses, they need to know the start of [all …]
|