Home
last modified time | relevance | path

Searched full:mapping (Results 1 – 25 of 3586) sorted by relevance

12345678910>>...144

/linux/mm/
H A Dtruncate.c26 static void clear_shadow_entries(struct address_space *mapping, in clear_shadow_entries() argument
29 XA_STATE(xas, &mapping->i_pages, start); in clear_shadow_entries()
33 if (shmem_mapping(mapping) || dax_mapping(mapping)) in clear_shadow_entries()
38 spin_lock(&mapping->host->i_lock); in clear_shadow_entries()
48 if (mapping_shrinkable(mapping)) in clear_shadow_entries()
49 inode_add_lru(mapping->host); in clear_shadow_entries()
50 spin_unlock(&mapping->host->i_lock); in clear_shadow_entries()
60 static void truncate_folio_batch_exceptionals(struct address_space *mapping, in truncate_folio_batch_exceptionals() argument
63 XA_STATE(xas, &mapping in truncate_folio_batch_exceptionals()
170 truncate_inode_folio(struct address_space * mapping,struct folio * folio) truncate_inode_folio() argument
274 generic_error_remove_folio(struct address_space * mapping,struct folio * folio) generic_error_remove_folio() argument
300 mapping_evict_folio(struct address_space * mapping,struct folio * folio) mapping_evict_folio() argument
341 truncate_inode_pages_range(struct address_space * mapping,loff_t lstart,loff_t lend) truncate_inode_pages_range() argument
458 truncate_inode_pages(struct address_space * mapping,loff_t lstart) truncate_inode_pages() argument
473 truncate_inode_pages_final(struct address_space * mapping) truncate_inode_pages_final() argument
509 mapping_try_invalidate(struct address_space * mapping,pgoff_t start,pgoff_t end,unsigned long * nr_failed) mapping_try_invalidate() argument
574 invalidate_mapping_pages(struct address_space * mapping,pgoff_t start,pgoff_t end) invalidate_mapping_pages() argument
581 folio_launder(struct address_space * mapping,struct folio * folio) folio_launder() argument
597 folio_unmap_invalidate(struct address_space * mapping,struct folio * folio,gfp_t gfp) folio_unmap_invalidate() argument
647 invalidate_inode_pages2_range(struct address_space * mapping,pgoff_t start,pgoff_t end) invalidate_inode_pages2_range() argument
733 invalidate_inode_pages2(struct address_space * mapping) invalidate_inode_pages2() argument
756 struct address_space *mapping = inode->i_mapping; truncate_pagecache() local
877 struct address_space *mapping = inode->i_mapping; truncate_pagecache_range() local
[all...]
H A Dfilemap.c128 static void page_cache_delete(struct address_space *mapping, in page_cache_delete() argument
131 XA_STATE(xas, &mapping->i_pages, folio->index); in page_cache_delete()
134 mapping_set_update(&xas, mapping); in page_cache_delete()
144 folio->mapping = NULL; in page_cache_delete()
146 mapping->nrpages -= nr; in page_cache_delete()
149 static void filemap_unaccount_folio(struct address_space *mapping, in filemap_unaccount_folio() argument
162 if (mapping_exiting(mapping) && !folio_test_large(folio)) { in filemap_unaccount_folio()
191 filemap_nr_thps_dec(mapping); in filemap_unaccount_folio()
209 mapping_can_writeback(mapping))) in filemap_unaccount_folio()
210 folio_account_cleaned(folio, inode_to_wb(mapping in filemap_unaccount_folio()
220 struct address_space *mapping = folio->mapping; __filemap_remove_folio() local
227 filemap_free_folio(struct address_space * mapping,struct folio * folio) filemap_free_folio() argument
248 struct address_space *mapping = folio->mapping; filemap_remove_folio() local
275 page_cache_delete_batch(struct address_space * mapping,struct folio_batch * fbatch) page_cache_delete_batch() argument
316 delete_from_page_cache_batch(struct address_space * mapping,struct folio_batch * fbatch) delete_from_page_cache_batch() argument
342 filemap_check_errors(struct address_space * mapping) filemap_check_errors() argument
356 filemap_check_and_keep_errors(struct address_space * mapping) filemap_check_and_keep_errors() argument
376 filemap_fdatawrite_wbc(struct address_space * mapping,struct writeback_control * wbc) filemap_fdatawrite_wbc() argument
409 __filemap_fdatawrite_range(struct address_space * mapping,loff_t start,loff_t end,int sync_mode) __filemap_fdatawrite_range() argument
422 __filemap_fdatawrite(struct address_space * mapping,int sync_mode) __filemap_fdatawrite() argument
428 filemap_fdatawrite(struct address_space * mapping) filemap_fdatawrite() argument
434 filemap_fdatawrite_range(struct address_space * mapping,loff_t start,loff_t end) filemap_fdatawrite_range() argument
452 filemap_fdatawrite_range_kick(struct address_space * mapping,loff_t start,loff_t end) filemap_fdatawrite_range_kick() argument
468 filemap_flush(struct address_space * mapping) filemap_flush() argument
486 filemap_range_has_page(struct address_space * mapping,loff_t start_byte,loff_t end_byte) filemap_range_has_page() argument
517 __filemap_fdatawait_range(struct address_space * mapping,loff_t start_byte,loff_t end_byte) __filemap_fdatawait_range() argument
562 filemap_fdatawait_range(struct address_space * mapping,loff_t start_byte,loff_t end_byte) filemap_fdatawait_range() argument
584 filemap_fdatawait_range_keep_errors(struct address_space * mapping,loff_t start_byte,loff_t end_byte) filemap_fdatawait_range_keep_errors() argument
610 struct address_space *mapping = file->f_mapping; file_fdatawait_range() local
631 filemap_fdatawait_keep_errors(struct address_space * mapping) filemap_fdatawait_keep_errors() argument
639 mapping_needs_writeback(struct address_space * mapping) mapping_needs_writeback() argument
644 filemap_range_has_writeback(struct address_space * mapping,loff_t start_byte,loff_t end_byte) filemap_range_has_writeback() argument
682 filemap_write_and_wait_range(struct address_space * mapping,loff_t lstart,loff_t lend) filemap_write_and_wait_range() argument
709 __filemap_set_wb_err(struct address_space * mapping,int err) __filemap_set_wb_err() argument
745 struct address_space *mapping = file->f_mapping; file_check_and_advance_wb_err() local
788 struct address_space *mapping = file->f_mapping; file_write_and_wait_range() local
822 struct address_space *mapping = old->mapping; replace_page_cache_folio() local
857 __filemap_add_folio(struct address_space * mapping,struct folio * folio,pgoff_t index,gfp_t gfp,void ** shadowp) __filemap_add_folio() argument
958 filemap_add_folio(struct address_space * mapping,struct folio * folio,pgoff_t index,gfp_t gfp) filemap_add_folio() argument
1594 struct address_space *mapping = folio->mapping; filemap_end_dropbehind() local
1777 page_cache_next_miss(struct address_space * mapping,pgoff_t index,unsigned long max_scan) page_cache_next_miss() argument
1813 page_cache_prev_miss(struct address_space * mapping,pgoff_t index,unsigned long max_scan) page_cache_prev_miss() argument
1862 filemap_get_entry(struct address_space * mapping,pgoff_t index) filemap_get_entry() argument
1909 __filemap_get_folio(struct address_space * mapping,pgoff_t index,fgf_t fgp_flags,gfp_t gfp) __filemap_get_folio() argument
2084 find_get_entries(struct address_space * mapping,pgoff_t * start,pgoff_t end,struct folio_batch * fbatch,pgoff_t * indices) find_get_entries() argument
2133 find_lock_entries(struct address_space * mapping,pgoff_t * start,pgoff_t end,struct folio_batch * fbatch,pgoff_t * indices) find_lock_entries() argument
2201 filemap_get_folios(struct address_space * mapping,pgoff_t * start,pgoff_t end,struct folio_batch * fbatch) filemap_get_folios() argument
2223 filemap_get_folios_contig(struct address_space * mapping,pgoff_t * start,pgoff_t end,struct folio_batch * fbatch) filemap_get_folios_contig() argument
2299 filemap_get_folios_tag(struct address_space * mapping,pgoff_t * start,pgoff_t end,xa_mark_t tag,struct folio_batch * fbatch) filemap_get_folios_tag() argument
2366 filemap_get_read_batch(struct address_space * mapping,pgoff_t index,pgoff_t max,struct folio_batch * fbatch) filemap_get_read_batch() argument
2428 filemap_range_uptodate(struct address_space * mapping,loff_t pos,size_t count,struct folio * folio,bool need_uptodate) filemap_range_uptodate() argument
2453 filemap_update_page(struct kiocb * iocb,struct address_space * mapping,size_t count,struct folio * folio,bool need_uptodate) filemap_update_page() argument
2510 struct address_space *mapping = iocb->ki_filp->f_mapping; filemap_create_folio() local
2562 filemap_readahead(struct kiocb * iocb,struct file * file,struct address_space * mapping,struct folio * folio,pgoff_t last_index) filemap_readahead() argument
2579 struct address_space *mapping = filp->f_mapping; filemap_get_pages() local
2679 struct address_space *mapping = filp->f_mapping; filemap_read() local
2791 struct address_space *mapping = iocb->ki_filp->f_mapping; kiocb_write_and_wait() local
2805 filemap_invalidate_pages(struct address_space * mapping,loff_t pos,loff_t end,bool nowait) filemap_invalidate_pages() argument
2832 struct address_space *mapping = iocb->ki_filp->f_mapping; kiocb_invalidate_pages() local
2872 struct address_space *mapping = file->f_mapping; generic_file_read_iter() local
3053 folio_seek_hole_data(struct xa_state * xas,struct address_space * mapping,struct folio * folio,loff_t start,loff_t end,bool seek_data) folio_seek_hole_data() argument
3110 mapping_seek_hole_data(struct address_space * mapping,loff_t start,loff_t end,int whence) mapping_seek_hole_data() argument
3215 struct address_space *mapping = file->f_mapping; do_sync_mmap_readahead() local
3375 struct address_space *mapping = file->f_mapping; filemap_fault() local
3560 next_uptodate_folio(struct xa_state * xas,struct address_space * mapping,pgoff_t end_pgoff) next_uptodate_folio() argument
3704 struct address_space *mapping = file->f_mapping; filemap_map_pages() local
3774 struct address_space *mapping = vmf->vma->vm_file->f_mapping; filemap_page_mkwrite() local
3808 struct address_space *mapping = file->f_mapping; generic_file_mmap() local
3820 struct address_space *mapping = file->f_mapping; generic_file_mmap_prepare() local
3874 do_read_cache_folio(struct address_space * mapping,pgoff_t index,filler_t filler,struct file * file,gfp_t gfp) do_read_cache_folio() argument
3952 read_cache_folio(struct address_space * mapping,pgoff_t index,filler_t filler,struct file * file) read_cache_folio() argument
3977 mapping_read_folio_gfp(struct address_space * mapping,pgoff_t index,gfp_t gfp) mapping_read_folio_gfp() argument
3984 do_read_cache_page(struct address_space * mapping,pgoff_t index,filler_t * filler,struct file * file,gfp_t gfp) do_read_cache_page() argument
3995 read_cache_page(struct address_space * mapping,pgoff_t index,filler_t * filler,struct file * file) read_cache_page() argument
4018 read_cache_page_gfp(struct address_space * mapping,pgoff_t index,gfp_t gfp) read_cache_page_gfp() argument
4048 struct address_space *mapping = iocb->ki_filp->f_mapping; kiocb_invalidate_post_direct_write() local
4060 struct address_space *mapping = iocb->ki_filp->f_mapping; generic_file_direct_write() local
4117 struct address_space *mapping = file->f_mapping; generic_perform_write() local
4231 struct address_space *mapping = file->f_mapping; __generic_file_write_iter() local
4312 struct address_space * const mapping = folio->mapping; filemap_release_folio() local
4342 struct address_space *mapping = inode->i_mapping; filemap_invalidate_inode() local
4393 filemap_cachestat(struct address_space * mapping,pgoff_t first_index,pgoff_t last_index,struct cachestat * cs) filemap_cachestat() argument
4542 struct address_space *mapping; SYSCALL_DEFINE4() local
[all...]
H A Dreadahead.c139 file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping) in file_ra_state_init() argument
141 ra->ra_pages = inode_to_bdi(mapping->host)->ra_pages; in file_ra_state_init()
148 const struct address_space_operations *aops = rac->mapping->a_ops; in read_pages()
210 struct address_space *mapping = ractl->mapping; in page_cache_ra_unbounded() local
212 gfp_t gfp_mask = readahead_gfp_mask(mapping); in page_cache_ra_unbounded()
214 unsigned int min_nrpages = mapping_min_folio_nrpages(mapping); in page_cache_ra_unbounded()
223 * filesystems already specify __GFP_NOFS in their mapping's in page_cache_ra_unbounded()
228 filemap_invalidate_lock_shared(mapping); in page_cache_ra_unbounded()
229 index = mapping_align_index(mapping, inde in page_cache_ra_unbounded()
337 struct address_space *mapping = ractl->mapping; force_page_cache_ra() local
462 struct address_space *mapping = ractl->mapping; page_cache_ra_order() local
744 struct address_space *mapping = ractl->mapping; readahead_expand() local
[all...]
/linux/arch/arm/mm/
H A Ddma-mapping.c3 * linux/arch/arm/mm/dma-mapping.c
7 * DMA uncached mapping support.
290 * Clear previous low-memory mapping to ensure that the in dma_contiguous_remap()
603 * Free a buffer as defined by the above mapping.
677 * Use the driver DMA support - see dma-mapping.h (dma_sync_*)
754 static int extend_iommu_mapping(struct dma_iommu_mapping *mapping);
756 static inline dma_addr_t __alloc_iova(struct dma_iommu_mapping *mapping, in __alloc_iova() argument
762 size_t mapping_size = mapping->bits << PAGE_SHIFT; in __alloc_iova()
773 spin_lock_irqsave(&mapping->lock, flags); in __alloc_iova()
774 for (i = 0; i < mapping->nr_bitmaps; i++) { in __alloc_iova()
[all …]
/linux/tools/testing/selftests/arm64/mte/
H A Dcheck_mmap_options.c47 int mapping; member
113 static int check_anonymous_memory_mapping(int mem_type, int mode, int mapping, in check_anonymous_memory_mapping() argument
126 map_ptr = (char *)mte_allocate_memory(map_size, mem_type, mapping, false); in check_anonymous_memory_mapping()
148 static int check_file_memory_mapping(int mem_type, int mode, int mapping, in check_file_memory_mapping() argument
166 map_ptr = (char *)mte_allocate_file_memory(map_size, mem_type, mapping, false, fd); in check_file_memory_mapping()
191 static int check_clear_prot_mte_flag(int mem_type, int mode, int mapping, int atag_check) in check_clear_prot_mte_flag() argument
201 ptr = (char *)mte_allocate_memory_tag_range(sizes[run], mem_type, mapping, in check_clear_prot_mte_flag()
222 ptr = (char *)mte_allocate_file_memory_tag_range(sizes[run], mem_type, mapping, in check_clear_prot_mte_flag()
300 switch (tc->mapping) { in format_test_name()
337 "Check %s with %s mapping, %s mode, %s memory and %s (%s)\n", in format_test_name()
[all …]
H A Dcheck_child_memory.c84 static int check_child_memory_mapping(int mem_type, int mode, int mapping) in check_child_memory_mapping() argument
93 ptr = (char *)mte_allocate_memory_tag_range(sizes[run], mem_type, mapping, in check_child_memory_mapping()
106 static int check_child_file_mapping(int mem_type, int mode, int mapping) in check_child_file_mapping() argument
119 map_ptr = (char *)mte_allocate_file_memory(map_size, mem_type, mapping, false, fd); in check_child_file_mapping()
170 "Check child anonymous memory with private mapping, precise mode and mmap memory\n"); in main()
172 "Check child anonymous memory with shared mapping, precise mode and mmap memory\n"); in main()
174 "Check child anonymous memory with private mapping, imprecise mode and mmap memory\n"); in main()
176 "Check child anonymous memory with shared mapping, imprecise mode and mmap memory\n"); in main()
178 "Check child anonymous memory with private mapping, precise mode and mmap/mprotect memory\n"); in main()
180 "Check child anonymous memory with shared mapping, precise mode and mmap/mprotect memory\n"); in main()
[all …]
/linux/include/linux/
H A Dio-mapping.h17 * The io_mapping mechanism provides an abstraction for mapping
20 * See Documentation/driver-api/io-mapping.rst
35 * For small address space machines, mapping large objects
58 io_mapping_fini(struct io_mapping *mapping) in io_mapping_fini() argument
60 iomap_free(mapping->base, mapping->size); in io_mapping_fini()
65 io_mapping_map_atomic_wc(struct io_mapping *mapping, in io_mapping_map_atomic_wc() argument
70 BUG_ON(offset >= mapping->size); in io_mapping_map_atomic_wc()
71 phys_addr = mapping->base + offset; in io_mapping_map_atomic_wc()
77 return __iomap_local_pfn_prot(PHYS_PFN(phys_addr), mapping->prot); in io_mapping_map_atomic_wc()
92 io_mapping_map_local_wc(struct io_mapping *mapping, unsigned long offset) in io_mapping_map_local_wc() argument
[all …]
/linux/drivers/gpu/drm/tegra/
H A Duapi.c17 struct tegra_drm_mapping *mapping = in tegra_drm_mapping_release() local
20 host1x_bo_unpin(mapping->map); in tegra_drm_mapping_release()
21 host1x_bo_put(mapping->bo); in tegra_drm_mapping_release()
23 kfree(mapping); in tegra_drm_mapping_release()
26 void tegra_drm_mapping_put(struct tegra_drm_mapping *mapping) in tegra_drm_mapping_put() argument
28 kref_put(&mapping->ref, tegra_drm_mapping_release); in tegra_drm_mapping_put()
33 struct tegra_drm_mapping *mapping; in tegra_drm_channel_context_close() local
39 xa_for_each(&context->mappings, id, mapping) in tegra_drm_channel_context_close()
40 tegra_drm_mapping_put(mapping); in tegra_drm_channel_context_close()
189 struct tegra_drm_mapping *mapping; in tegra_drm_ioctl_channel_map() local
[all …]
/linux/Documentation/admin-guide/mm/
H A Dnommu-mmap.rst2 No-MMU memory mapping support
5 The kernel has limited support for memory mapping under no-MMU conditions, such
7 mapping is made use of in conjunction with the mmap() system call, the shmat()
9 mapping is actually performed by the binfmt drivers, which call back into the
12 Memory mapping behaviour also involves the way fork(), vfork(), clone() and
19 (#) Anonymous mapping, MAP_PRIVATE
27 (#) Anonymous mapping, MAP_SHARED
37 the underlying file are reflected in the mapping; copied across fork.
41 - If one exists, the kernel will re-use an existing mapping to the
45 - If possible, the file mapping will be directly on the backing device
[all …]
/linux/include/trace/events/
H A Dfilemap.h32 __entry->i_ino = folio->mapping->host->i_ino;
34 if (folio->mapping->host->i_sb)
35 __entry->s_dev = folio->mapping->host->i_sb->s_dev;
37 __entry->s_dev = folio->mapping->host->i_rdev;
62 struct address_space *mapping,
67 TP_ARGS(mapping, index, last_index),
77 __entry->i_ino = mapping->host->i_ino;
78 if (mapping->host->i_sb)
80 mapping->host->i_sb->s_dev;
82 __entry->s_dev = mapping->host->i_rdev;
[all …]
/linux/drivers/gpu/drm/etnaviv/
H A Detnaviv_mmu.c6 #include <linux/dma-mapping.h>
53 /* unroll mapping in case something went wrong */ in etnaviv_context_map()
113 struct etnaviv_vram_mapping *mapping) in etnaviv_iommu_remove_mapping() argument
115 struct etnaviv_gem_object *etnaviv_obj = mapping->object; in etnaviv_iommu_remove_mapping()
119 etnaviv_iommu_unmap(context, mapping->vram_node.start, in etnaviv_iommu_remove_mapping()
121 drm_mm_remove_node(&mapping->vram_node); in etnaviv_iommu_remove_mapping()
124 void etnaviv_iommu_reap_mapping(struct etnaviv_vram_mapping *mapping) in etnaviv_iommu_reap_mapping() argument
126 struct etnaviv_iommu_context *context = mapping->context; in etnaviv_iommu_reap_mapping()
129 WARN_ON(mapping->use); in etnaviv_iommu_reap_mapping()
131 etnaviv_iommu_remove_mapping(context, mapping); in etnaviv_iommu_reap_mapping()
[all …]
H A Detnaviv_gem.c7 #include <linux/dma-mapping.h>
218 struct etnaviv_vram_mapping *mapping; in etnaviv_gem_get_vram_mapping() local
220 list_for_each_entry(mapping, &obj->vram_list, obj_node) { in etnaviv_gem_get_vram_mapping()
221 if (mapping->context == context) in etnaviv_gem_get_vram_mapping()
222 return mapping; in etnaviv_gem_get_vram_mapping()
228 void etnaviv_gem_mapping_unreference(struct etnaviv_vram_mapping *mapping) in etnaviv_gem_mapping_unreference() argument
230 struct etnaviv_gem_object *etnaviv_obj = mapping->object; in etnaviv_gem_mapping_unreference()
233 WARN_ON(mapping->use == 0); in etnaviv_gem_mapping_unreference()
234 mapping->use -= 1; in etnaviv_gem_mapping_unreference()
245 struct etnaviv_vram_mapping *mapping; in etnaviv_gem_mapping_get() local
[all …]
/linux/Documentation/driver-api/
H A Dio-mapping.rst8 The io_mapping functions in linux/io-mapping.h provide an abstraction for
9 efficiently mapping small regions of an I/O device to the CPU. The initial
14 A mapping object is created during driver initialization using::
20 mappable, while 'size' indicates how large a mapping region to
23 This _wc variant provides a mapping which may only be used with
27 With this mapping object, individual pages can be mapped either temporarily
31 void *io_mapping_map_local_wc(struct io_mapping *mapping,
34 void *io_mapping_map_atomic_wc(struct io_mapping *mapping,
37 'offset' is the offset within the defined mapping region. Accessing
46 Temporary mappings are only valid in the context of the caller. The mapping
[all …]
/linux/drivers/gpu/drm/panfrost/
H A Dpanfrost_gem.c8 #include <linux/dma-mapping.h>
94 struct panfrost_gem_mapping *iter, *mapping = NULL; in panfrost_gem_mapping_get() local
100 mapping = iter; in panfrost_gem_mapping_get()
106 return mapping; in panfrost_gem_mapping_get()
110 panfrost_gem_teardown_mapping(struct panfrost_gem_mapping *mapping) in panfrost_gem_teardown_mapping() argument
112 if (mapping->active) in panfrost_gem_teardown_mapping()
113 panfrost_mmu_unmap(mapping); in panfrost_gem_teardown_mapping()
115 spin_lock(&mapping->mmu->mm_lock); in panfrost_gem_teardown_mapping()
116 if (drm_mm_node_allocated(&mapping->mmnode)) in panfrost_gem_teardown_mapping()
117 drm_mm_remove_node(&mapping->mmnode); in panfrost_gem_teardown_mapping()
[all …]
/linux/drivers/gpu/drm/exynos/
H A Dexynos_drm_dma.c34 * drm_iommu_attach_device- attach device to iommu mapping
40 * mapping.
57 * Keep the original DMA mapping of the sub-device and in drm_iommu_attach_device()
66 ret = arm_iommu_attach_device(subdrv_dev, priv->mapping); in drm_iommu_attach_device()
68 ret = iommu_attach_device(priv->mapping, subdrv_dev); in drm_iommu_attach_device()
75 * drm_iommu_detach_device -detach device address space mapping from device
81 * mapping
92 iommu_detach_device(priv->mapping, subdrv_dev); in drm_iommu_detach_device()
102 DRM_INFO("Exynos DRM: using %s device for DMA mapping operations\n", in exynos_drm_register_dma()
109 if (!priv->mapping) { in exynos_drm_register_dma()
[all …]
/linux/Documentation/filesystems/iomap/
H A Ddesign.rst70 of mapping function calls into the filesystem across a larger amount of
78 1. Obtain a space mapping via ``->iomap_begin``
82 1. Revalidate the mapping and go back to (1) above, if necessary.
89 4. Release the mapping via ``->iomap_end``, if necessary
130 * **filesystem mapping lock**: This synchronization primitive is
131 internal to the filesystem and must protect the file mapping data
132 from updates while a mapping is being sampled.
138 mapping.
154 The filesystem communicates to the iomap iterator the mapping of
176 bytes, covered by this mapping.
[all …]
/linux/fs/gfs2/
H A Daops.c74 struct inode * const inode = folio->mapping->host; in gfs2_write_jdata_folio()
105 struct inode *inode = folio->mapping->host; in __gfs2_jdata_write_folio()
122 * @mapping: The mapping to write
127 int gfs2_jdata_writeback(struct address_space *mapping, struct writeback_control *wbc) in gfs2_jdata_writeback() argument
129 struct inode *inode = mapping->host; in gfs2_jdata_writeback()
131 struct gfs2_sbd *sdp = GFS2_SB(mapping->host); in gfs2_jdata_writeback()
139 while ((folio = writeback_iter(mapping, wbc, folio, &error))) { in gfs2_jdata_writeback()
153 * @mapping: The mapping to write
158 static int gfs2_writepages(struct address_space *mapping, in gfs2_writepages() argument
161 struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping); in gfs2_writepages()
[all …]
/linux/tools/testing/selftests/mm/
H A Dmremap_dontunmap.c59 "unable to unmap destination mapping"); in kernel_support_for_mremap_dontunmap()
63 "unable to unmap source mapping"); in kernel_support_for_mremap_dontunmap()
67 // This helper will just validate that an entire mapping contains the expected
94 // the source mapping mapped.
106 // Try to just move the whole mapping anywhere (not fixed). in mremap_dontunmap_simple()
122 "unable to unmap destination mapping"); in mremap_dontunmap_simple()
124 "unable to unmap source mapping"); in mremap_dontunmap_simple()
128 // This test validates that MREMAP_DONTUNMAP on a shared mapping works as expected.
148 // Try to just move the whole mapping anywhere (not fixed). in mremap_dontunmap_simple_shmem()
155 "unable to unmap source mapping"); in mremap_dontunmap_simple_shmem()
[all …]
/linux/arch/arm64/kvm/
H A Dpkvm.c301 struct pkvm_mapping *mapping; in __pkvm_pgtable_stage2_unmap() local
307 for_each_mapping_in_range_safe(pgt, start, end, mapping) { in __pkvm_pgtable_stage2_unmap()
308 ret = kvm_call_hyp_nvhe(__pkvm_host_unshare_guest, handle, mapping->gfn, in __pkvm_pgtable_stage2_unmap()
309 mapping->nr_pages); in __pkvm_pgtable_stage2_unmap()
312 pkvm_mapping_remove(mapping, &pgt->pkvm_mappings); in __pkvm_pgtable_stage2_unmap()
313 kfree(mapping); in __pkvm_pgtable_stage2_unmap()
336 struct pkvm_mapping *mapping = NULL; in pkvm_pgtable_stage2_map() local
352 mapping = pkvm_mapping_iter_first(&pgt->pkvm_mappings, addr, addr + size - 1); in pkvm_pgtable_stage2_map()
353 if (mapping) { in pkvm_pgtable_stage2_map()
354 if (size == (mapping->nr_pages * PAGE_SIZE)) in pkvm_pgtable_stage2_map()
[all …]
/linux/drivers/sh/clk/
H A Dcore.c340 struct clk_mapping *mapping = clk->mapping; in clk_establish_mapping() local
345 if (!mapping) { in clk_establish_mapping()
349 * dummy mapping for root clocks with no specified ranges in clk_establish_mapping()
352 clk->mapping = &dummy_mapping; in clk_establish_mapping()
357 * If we're on a child clock and it provides no mapping of its in clk_establish_mapping()
358 * own, inherit the mapping from its root clock. in clk_establish_mapping()
361 mapping = clkp->mapping; in clk_establish_mapping()
362 BUG_ON(!mapping); in clk_establish_mapping()
366 * Establish initial mapping. in clk_establish_mapping()
368 if (!mapping->base && mapping->phys) { in clk_establish_mapping()
[all …]
/linux/fs/
H A Ddax.c181 * @entry may no longer be the entry at the index in the mapping.
346 * A DAX folio is considered shared if it has no mapping set and ->share (which
353 return !folio->mapping && folio->share; in dax_folio_is_shared()
359 * previously been associated with any mappings the ->mapping and ->index
360 * fields will be set. If it has already been associated with a mapping
361 * the mapping will be cleared and the share count set. It's then up to
363 * recover ->mapping and ->index information. For example by implementing
370 * folio->mapping. in dax_folio_make_shared()
372 folio->mapping = NULL; in dax_folio_make_shared()
394 folio->mapping = NULL; in dax_folio_put()
[all …]
/linux/Documentation/mm/
H A Dhighmem.rst18 The part of (physical) memory not covered by a permanent mapping is what we
64 These mappings are thread-local and CPU-local, meaning that the mapping
66 CPU while the mapping is active. Although preemption is never disabled by
68 CPU-hotplug until the mapping is disposed.
71 in which the local mapping is acquired does not allow it for other reasons.
82 virtual address of the direct mapping. Only real highmem pages are
95 therefore try to design their code to avoid the use of kmap() by mapping
107 NOTE: Conversions to kmap_local_page() must take care to follow the mapping
116 This permits a very short duration mapping of a single page. Since the
117 mapping is restricted to the CPU that issued it, it performs well, but
[all …]
/linux/Documentation/arch/riscv/
H A Dboot.rst14 mapping is set up.
40 PMPs, in the direct mapping, so the firmware must correctly mark those regions
117 Virtual mapping installation
120 The installation of the virtual mapping is done in 2 steps in the RISC-V kernel:
122 1. ``setup_vm()`` installs a temporary kernel mapping in ``early_pg_dir`` which
124 at this point. When establishing this mapping, no allocation can be done
128 2. ``setup_vm_final()`` creates the final kernel mapping in ``swapper_pg_dir``
130 mapping. When establishing this mapping, the kernel can allocate memory but
131 cannot access it directly (since the direct mapping is not present yet), so
136 direct mapping addresses to physical addresses, they need to know the start of
[all …]
/linux/drivers/pci/
H A Ddevres.c11 * It is very strongly tied to the statically allocated mapping table in struct
13 * functions in this file providing things like ranged mapping by bypassing
15 * enter the mapping addresses into the table for users of the old API.
42 * A requested region spanning an entire BAR, and a mapping for
48 * A mapping within a BAR, either spanning the whole BAR or just a
435 * mapping's address directly from one of the pcim_* mapping functions. For
457 * Fill the legacy mapping-table, so that drivers using the old API can
458 * still get a BAR's mapping address through pcim_iomap_table().
461 void __iomem *mapping, int bar) in pcim_add_mapping_to_legacy_table() argument
475 legacy_iomap_table[bar] = mapping; in pcim_add_mapping_to_legacy_table()
[all …]
/linux/Documentation/core-api/
H A Ddma-attributes.rst6 defined in linux/dma-mapping.h.
11 DMA_ATTR_WEAK_ORDERING specifies that reads and writes to the mapping
21 DMA_ATTR_WRITE_COMBINE specifies that writes to the mapping may be
32 virtual mapping for the allocated buffer. On some architectures creating
33 such mapping is non-trivial task and consumes very limited resources
52 having a mapping created separately for each device and is usually
67 device domain after releasing a mapping for it. Use this attribute with
73 By default DMA-mapping subsystem is allowed to assemble the buffer
82 This is a hint to the DMA-mapping subsystem that it's probably not worth
84 efficiency (AKA it's not worth trying to build the mapping out of larger
[all …]

12345678910>>...144