| /linux/include/linux/ |
| H A D | pagemap.h | 21 unsigned long invalidate_mapping_pages(struct address_space *mapping, 30 int invalidate_inode_pages2(struct address_space *mapping); 31 int invalidate_inode_pages2_range(struct address_space *mapping, 35 int filemap_invalidate_pages(struct address_space *mapping, 41 int filemap_flush_nr(struct address_space *mapping, long *nr_to_write); 42 int filemap_fdatawait_keep_errors(struct address_space *mapping); 44 int filemap_fdatawait_range_keep_errors(struct address_space *mapping, 49 static inline int filemap_fdatawait(struct address_space *mapping) in filemap_fdatawait() argument 51 return filemap_fdatawait_range(mapping, 0, LLONG_MAX); in filemap_fdatawait() 55 int filemap_write_and_wait_range(struct address_space *mapping, [all …]
|
| H A D | io-mapping.h | 58 io_mapping_fini(struct io_mapping *mapping) in io_mapping_fini() argument 60 iomap_free(mapping->base, mapping->size); in io_mapping_fini() 65 io_mapping_map_atomic_wc(struct io_mapping *mapping, in io_mapping_map_atomic_wc() argument 70 BUG_ON(offset >= mapping->size); in io_mapping_map_atomic_wc() 71 phys_addr = mapping->base + offset; in io_mapping_map_atomic_wc() 77 return __iomap_local_pfn_prot(PHYS_PFN(phys_addr), mapping->prot); in io_mapping_map_atomic_wc() 92 io_mapping_map_local_wc(struct io_mapping *mapping, unsigned long offset) in io_mapping_map_local_wc() argument 96 BUG_ON(offset >= mapping->size); in io_mapping_map_local_wc() 97 phys_addr = mapping->base + offset; in io_mapping_map_local_wc() 98 return __iomap_local_pfn_prot(PHYS_PFN(phys_addr), mapping->prot); in io_mapping_map_local_wc() [all …]
|
| H A D | shmem_fs.h | 117 bool shmem_mapping(const struct address_space *mapping); 119 static inline bool shmem_mapping(const struct address_space *mapping) in shmem_mapping() argument 124 void shmem_unlock_mapping(struct address_space *mapping); 125 struct page *shmem_read_mapping_page_gfp(struct address_space *mapping, 164 extern unsigned long shmem_partial_swap_usage(struct address_space *mapping, 178 struct folio *shmem_read_folio_gfp(struct address_space *mapping, 181 static inline struct folio *shmem_read_folio(struct address_space *mapping, in shmem_read_folio() argument 184 return shmem_read_folio_gfp(mapping, index, mapping_gfp_mask(mapping)); in shmem_read_folio() 188 struct address_space *mapping, pgoff_t index) in shmem_read_mapping_page() argument 190 return shmem_read_mapping_page_gfp(mapping, index, in shmem_read_mapping_page() [all …]
|
| /linux/mm/ |
| H A D | truncate.c | 26 static void clear_shadow_entries(struct address_space *mapping, in clear_shadow_entries() argument 29 XA_STATE(xas, &mapping->i_pages, start); in clear_shadow_entries() 33 if (shmem_mapping(mapping) || dax_mapping(mapping)) in clear_shadow_entries() 38 spin_lock(&mapping->host->i_lock); in clear_shadow_entries() 48 if (mapping_shrinkable(mapping)) in clear_shadow_entries() 49 inode_lru_list_add(mapping->host); in clear_shadow_entries() 50 spin_unlock(&mapping->host->i_lock); in clear_shadow_entries() 60 static void truncate_folio_batch_exceptionals(struct address_space *mapping, in truncate_folio_batch_exceptionals() argument 63 XA_STATE(xas, &mapping->i_pages, indices[0]); in truncate_folio_batch_exceptionals() 69 if (shmem_mapping(mapping)) in truncate_folio_batch_exceptionals() [all …]
|
| H A D | filemap.c | 129 static void page_cache_delete(struct address_space *mapping, in page_cache_delete() argument 132 XA_STATE(xas, &mapping->i_pages, folio->index); in page_cache_delete() 135 mapping_set_update(&xas, mapping); in page_cache_delete() 145 folio->mapping = NULL; in page_cache_delete() 147 mapping->nrpages -= nr; in page_cache_delete() 150 static void filemap_unaccount_folio(struct address_space *mapping, in filemap_unaccount_folio() argument 163 if (mapping_exiting(mapping) && !folio_test_large(folio)) { in filemap_unaccount_folio() 192 filemap_nr_thps_dec(mapping); in filemap_unaccount_folio() 194 if (test_bit(AS_KERNEL_FILE, &folio->mapping->flags)) in filemap_unaccount_folio() 213 mapping_can_writeback(mapping))) in filemap_unaccount_folio() [all …]
|
| H A D | readahead.c | 142 file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping) in file_ra_state_init() argument 144 ra->ra_pages = inode_to_bdi(mapping->host)->ra_pages; in file_ra_state_init() 151 const struct address_space_operations *aops = rac->mapping->a_ops; in read_pages() 213 struct address_space *mapping = ractl->mapping; in page_cache_ra_unbounded() local 215 gfp_t gfp_mask = readahead_gfp_mask(mapping); in page_cache_ra_unbounded() 217 unsigned int min_nrpages = mapping_min_folio_nrpages(mapping); in page_cache_ra_unbounded() 231 trace_page_cache_ra_unbounded(mapping->host, index, nr_to_read, in page_cache_ra_unbounded() 233 filemap_invalidate_lock_shared(mapping); in page_cache_ra_unbounded() 234 index = mapping_align_index(mapping, index); in page_cache_ra_unbounded() 257 struct folio *folio = xa_load(&mapping->i_pages, index + i); in page_cache_ra_unbounded() [all …]
|
| H A D | page-writeback.c | 2046 int balance_dirty_pages_ratelimited_flags(struct address_space *mapping, in balance_dirty_pages_ratelimited_flags() argument 2049 struct inode *inode = mapping->host; in balance_dirty_pages_ratelimited_flags() 2116 void balance_dirty_pages_ratelimited(struct address_space *mapping) in balance_dirty_pages_ratelimited() argument 2118 balance_dirty_pages_ratelimited_flags(mapping, 0); in balance_dirty_pages_ratelimited() 2383 void tag_pages_for_writeback(struct address_space *mapping, in tag_pages_for_writeback() argument 2386 XA_STATE(xas, &mapping->i_pages, start); in tag_pages_for_writeback() 2405 static bool folio_prepare_writeback(struct address_space *mapping, in folio_prepare_writeback() argument 2415 if (unlikely(folio->mapping != mapping)) in folio_prepare_writeback() 2445 static struct folio *writeback_get_folio(struct address_space *mapping, in writeback_get_folio() argument 2455 filemap_get_folios_tag(mapping, &wbc->index, wbc_end(wbc), in writeback_get_folio() [all …]
|
| /linux/include/trace/events/ |
| H A D | filemap.h | 32 __entry->i_ino = folio->mapping->host->i_ino; 34 if (folio->mapping->host->i_sb) 35 __entry->s_dev = folio->mapping->host->i_sb->s_dev; 37 __entry->s_dev = folio->mapping->host->i_rdev; 62 struct address_space *mapping, 67 TP_ARGS(mapping, index, last_index), 77 __entry->i_ino = mapping->host->i_ino; 78 if (mapping->host->i_sb) 80 mapping->host->i_sb->s_dev; 82 __entry->s_dev = mapping->host->i_rdev; [all …]
|
| /linux/tools/testing/selftests/arm64/mte/ |
| H A D | check_mmap_options.c | 47 int mapping; member 113 static int check_anonymous_memory_mapping(int mem_type, int mode, int mapping, in check_anonymous_memory_mapping() argument 126 map_ptr = (char *)mte_allocate_memory(map_size, mem_type, mapping, false); in check_anonymous_memory_mapping() 148 static int check_file_memory_mapping(int mem_type, int mode, int mapping, in check_file_memory_mapping() argument 166 map_ptr = (char *)mte_allocate_file_memory(map_size, mem_type, mapping, false, fd); in check_file_memory_mapping() 191 static int check_clear_prot_mte_flag(int mem_type, int mode, int mapping, int atag_check) in check_clear_prot_mte_flag() argument 201 ptr = (char *)mte_allocate_memory_tag_range(sizes[run], mem_type, mapping, in check_clear_prot_mte_flag() 222 ptr = (char *)mte_allocate_file_memory_tag_range(sizes[run], mem_type, mapping, in check_clear_prot_mte_flag() 300 switch (tc->mapping) { in format_test_name() 370 .mapping = MAP_PRIVATE, in main() [all …]
|
| /linux/drivers/gpu/drm/tegra/ |
| H A D | uapi.c | 17 struct tegra_drm_mapping *mapping = in tegra_drm_mapping_release() local 20 host1x_bo_unpin(mapping->map); in tegra_drm_mapping_release() 21 host1x_bo_put(mapping->bo); in tegra_drm_mapping_release() 23 kfree(mapping); in tegra_drm_mapping_release() 26 void tegra_drm_mapping_put(struct tegra_drm_mapping *mapping) in tegra_drm_mapping_put() argument 28 kref_put(&mapping->ref, tegra_drm_mapping_release); in tegra_drm_mapping_put() 33 struct tegra_drm_mapping *mapping; in tegra_drm_channel_context_close() local 39 xa_for_each(&context->mappings, id, mapping) in tegra_drm_channel_context_close() 40 tegra_drm_mapping_put(mapping); in tegra_drm_channel_context_close() 192 struct tegra_drm_mapping *mapping; in tegra_drm_ioctl_channel_map() local [all …]
|
| /linux/arch/arm/mm/ |
| H A D | dma-mapping.c | 752 static int extend_iommu_mapping(struct dma_iommu_mapping *mapping); 754 static inline dma_addr_t __alloc_iova(struct dma_iommu_mapping *mapping, in __alloc_iova() argument 760 size_t mapping_size = mapping->bits << PAGE_SHIFT; in __alloc_iova() 771 spin_lock_irqsave(&mapping->lock, flags); in __alloc_iova() 772 for (i = 0; i < mapping->nr_bitmaps; i++) { in __alloc_iova() 773 start = bitmap_find_next_zero_area(mapping->bitmaps[i], in __alloc_iova() 774 mapping->bits, 0, count, align); in __alloc_iova() 776 if (start > mapping->bits) in __alloc_iova() 779 bitmap_set(mapping->bitmaps[i], start, count); in __alloc_iova() 788 if (i == mapping->nr_bitmaps) { in __alloc_iova() [all …]
|
| H A D | flush.c | 199 void __flush_dcache_folio(struct address_space *mapping, struct folio *folio) in __flush_dcache_folio() argument 234 if (mapping && cache_is_vipt_aliasing()) in __flush_dcache_folio() 238 static void __flush_dcache_aliases(struct address_space *mapping, struct folio *folio) in __flush_dcache_aliases() argument 253 flush_dcache_mmap_lock(mapping); in __flush_dcache_aliases() 254 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff_end) { in __flush_dcache_aliases() 281 flush_dcache_mmap_unlock(mapping); in __flush_dcache_aliases() 289 struct address_space *mapping; in __sync_icache_dcache() local 303 mapping = folio_flush_mapping(folio); in __sync_icache_dcache() 305 mapping = NULL; in __sync_icache_dcache() 308 __flush_dcache_folio(mapping, folio); in __sync_icache_dcache() [all …]
|
| /linux/tools/testing/selftests/vfio/ |
| H A D | vfio_dma_mapping_test.c | 40 struct iommu_mapping *mapping) in intel_iommu_mapping_get() argument 68 memset(mapping, 0, sizeof(*mapping)); in intel_iommu_mapping_get() 69 parse_next_value(&rest, &mapping->pgd); in intel_iommu_mapping_get() 70 parse_next_value(&rest, &mapping->p4d); in intel_iommu_mapping_get() 71 parse_next_value(&rest, &mapping->pud); in intel_iommu_mapping_get() 72 parse_next_value(&rest, &mapping->pmd); in intel_iommu_mapping_get() 73 parse_next_value(&rest, &mapping->pte); in intel_iommu_mapping_get() 88 struct iommu_mapping *mapping) in iommu_mapping_get() argument 91 return intel_iommu_mapping_get(bdf, iova, mapping); in iommu_mapping_get() 140 struct iommu_mapping mapping; in TEST_F() local [all …]
|
| /linux/arch/arm64/kvm/ |
| H A D | pkvm.c | 329 struct pkvm_mapping *mapping; in __pkvm_pgtable_stage2_unmap() local 335 for_each_mapping_in_range_safe(pgt, start, end, mapping) { in __pkvm_pgtable_stage2_unmap() 336 ret = kvm_call_hyp_nvhe(__pkvm_host_unshare_guest, handle, mapping->gfn, in __pkvm_pgtable_stage2_unmap() 337 mapping->nr_pages); in __pkvm_pgtable_stage2_unmap() 340 pkvm_mapping_remove(mapping, &pgt->pkvm_mappings); in __pkvm_pgtable_stage2_unmap() 341 kfree(mapping); in __pkvm_pgtable_stage2_unmap() 364 struct pkvm_mapping *mapping = NULL; in pkvm_pgtable_stage2_map() local 380 mapping = pkvm_mapping_iter_first(&pgt->pkvm_mappings, addr, addr + size - 1); in pkvm_pgtable_stage2_map() 381 if (mapping) { in pkvm_pgtable_stage2_map() 382 if (size == (mapping->nr_pages * PAGE_SIZE)) in pkvm_pgtable_stage2_map() [all …]
|
| /linux/drivers/gpu/drm/exynos/ |
| H A D | exynos_drm_dma.c | 66 ret = arm_iommu_attach_device(subdrv_dev, priv->mapping); in drm_iommu_attach_device() 68 ret = iommu_attach_device(priv->mapping, subdrv_dev); in drm_iommu_attach_device() 92 iommu_detach_device(priv->mapping, subdrv_dev); in drm_iommu_detach_device() 109 if (!priv->mapping) { in exynos_drm_register_dma() 110 void *mapping = NULL; in exynos_drm_register_dma() local 113 mapping = arm_iommu_create_mapping(dev, in exynos_drm_register_dma() 116 mapping = iommu_get_domain_for_dev(priv->dma_dev); in exynos_drm_register_dma() 118 if (!mapping) in exynos_drm_register_dma() 120 priv->mapping = mapping; in exynos_drm_register_dma() 140 arm_iommu_release_mapping(priv->mapping); in exynos_drm_cleanup_dma() [all …]
|
| /linux/tools/testing/selftests/namespaces/ |
| H A D | file_handle_test.c | 496 char mapping[64]; in TEST() local 497 snprintf(mapping, sizeof(mapping), "0 %d 1", getuid()); in TEST() 498 write(uid_map_fd, mapping, strlen(mapping)); in TEST() 501 snprintf(mapping, sizeof(mapping), "0 %d 1", getgid()); in TEST() 502 write(gid_map_fd, mapping, strlen(mapping)); in TEST() 625 char mapping[64]; in TEST() local 626 snprintf(mapping, sizeof(mapping), "0 %d 1", getuid()); in TEST() 627 write(uid_map_fd, mapping, strlen(mapping)); in TEST() 630 snprintf(mapping, sizeof(mapping), "0 %d 1", getgid()); in TEST() 631 write(gid_map_fd, mapping, strlen(mapping)); in TEST() [all …]
|
| /linux/fs/ |
| H A D | dax.c | 353 return !folio->mapping && folio->share; in dax_folio_is_shared() 372 folio->mapping = NULL; in dax_folio_make_shared() 394 folio->mapping = NULL; in dax_folio_put() 408 new_folio->mapping = NULL; in dax_folio_put() 441 static void dax_associate_entry(void *entry, struct address_space *mapping, in dax_associate_entry() argument 452 if (shared && (folio->mapping || dax_folio_is_shared(folio))) { in dax_associate_entry() 453 if (folio->mapping) in dax_associate_entry() 460 WARN_ON_ONCE(folio->mapping); in dax_associate_entry() 463 folio->mapping = mapping; in dax_associate_entry() 468 static void dax_disassociate_entry(void *entry, struct address_space *mapping, in dax_disassociate_entry() argument [all …]
|
| /linux/Documentation/arch/powerpc/ |
| H A D | vmemmap_dedup.rst | 14 With 2M PMD level mapping, we require 32 struct pages and a single 64K vmemmap 18 With 1G PUD level mapping, we require 16384 struct pages and a single 64K 20 require 16 64K pages in vmemmap to map the struct page for 1G PUD level mapping. 23 +-----------+ ---virt_to_page---> +-----------+ mapping to +-----------+ 35 | mapping | +-----------+ | | 46 With 4K page size, 2M PMD level mapping requires 512 struct pages and a single 48 require 8 4K pages in vmemmap to map the struct page for 2M pmd level mapping. 52 +-----------+ ---virt_to_page---> +-----------+ mapping to +-----------+ 64 | mapping | +-----------+ | | 74 With 1G PUD level mapping, we require 262144 struct pages and a single 4K [all …]
|
| /linux/drivers/sh/clk/ |
| H A D | core.c | 340 struct clk_mapping *mapping = clk->mapping; in clk_establish_mapping() local 345 if (!mapping) { in clk_establish_mapping() 352 clk->mapping = &dummy_mapping; in clk_establish_mapping() 361 mapping = clkp->mapping; in clk_establish_mapping() 362 BUG_ON(!mapping); in clk_establish_mapping() 368 if (!mapping->base && mapping->phys) { in clk_establish_mapping() 369 kref_init(&mapping->ref); in clk_establish_mapping() 371 mapping->base = ioremap(mapping->phys, mapping->len); in clk_establish_mapping() 372 if (unlikely(!mapping->base)) in clk_establish_mapping() 374 } else if (mapping->base) { in clk_establish_mapping() [all …]
|
| /linux/Documentation/translations/zh_CN/mm/ |
| H A D | page_migration.rst | 143 2. ``int (*migratepage) (struct address_space *mapping,`` 168 void __SetPageMovable(struct page *page, struct address_space *mapping) 171 PG_movable不是struct page的一个真正的标志。相反,VM复用了page->mapping的低 175 page->mapping = page->mapping | PAGE_MAPPING_MOVABLE; 177 所以驱动不应该直接访问page->mapping。相反,驱动应该使用page_mapping(),它可 178 以在页面锁下屏蔽掉page->mapping的低2位,从而获得正确的struct address_space。 181 非LRU可移动页面,因为page->mapping字段与struct page中的其他变量是统一的。如 182 果驱动程序在被虚拟机隔离后释放了页面,尽管page->mapping设置了PAGE_MAPPING_MOVABLE, 185 page->mapping中不可能有PAGE_MAPPING_MOVABLE设置。在用pfn扫描中的lock_page() 189 同,PageMovable()在lock_page()下验证page->mapping和 [all …]
|
| /linux/Documentation/driver-api/ |
| H A D | io-mapping.rst | 8 The io_mapping functions in linux/io-mapping.h provide an abstraction for 9 efficiently mapping small regions of an I/O device to the CPU. The initial 14 A mapping object is created during driver initialization using:: 20 mappable, while 'size' indicates how large a mapping region to 23 This _wc variant provides a mapping which may only be used with 27 With this mapping object, individual pages can be mapped either temporarily 31 void *io_mapping_map_local_wc(struct io_mapping *mapping, 34 void *io_mapping_map_atomic_wc(struct io_mapping *mapping, 37 'offset' is the offset within the defined mapping region. Accessing 46 Temporary mappings are only valid in the context of the caller. The mapping [all …]
|
| /linux/drivers/net/wireless/marvell/mwifiex/ |
| H A D | util.h | 57 struct mwifiex_dma_mapping *mapping) in mwifiex_store_mapping() argument 61 memcpy(&cb->dma_mapping, mapping, sizeof(*mapping)); in mwifiex_store_mapping() 65 struct mwifiex_dma_mapping *mapping) in mwifiex_get_mapping() argument 69 memcpy(mapping, &cb->dma_mapping, sizeof(*mapping)); in mwifiex_get_mapping() 74 struct mwifiex_dma_mapping mapping; in MWIFIEX_SKB_DMA_ADDR() local 76 mwifiex_get_mapping(skb, &mapping); in MWIFIEX_SKB_DMA_ADDR() 78 return mapping.addr; in MWIFIEX_SKB_DMA_ADDR()
|
| /linux/arch/nios2/include/asm/ |
| H A D | cacheflush.h | 54 #define flush_dcache_mmap_lock(mapping) xa_lock_irq(&mapping->i_pages) argument 55 #define flush_dcache_mmap_unlock(mapping) xa_unlock_irq(&mapping->i_pages) argument 56 #define flush_dcache_mmap_lock_irqsave(mapping, flags) \ argument 57 xa_lock_irqsave(&mapping->i_pages, flags) 58 #define flush_dcache_mmap_unlock_irqrestore(mapping, flags) \ argument 59 xa_unlock_irqrestore(&mapping->i_pages, flags)
|
| /linux/fs/ecryptfs/ |
| H A D | mmap.c | 30 static int ecryptfs_writepages(struct address_space *mapping, in ecryptfs_writepages() argument 36 while ((folio = writeback_iter(mapping, wbc, folio, &error))) { in ecryptfs_writepages() 43 mapping_set_error(mapping, error); in ecryptfs_writepages() 117 page_virt, folio->mapping->host); in ecryptfs_copy_up_encrypted_with_header() 139 crypt_stat->extent_size, folio->mapping->host); in ecryptfs_copy_up_encrypted_with_header() 165 struct inode *inode = folio->mapping->host; in ecryptfs_read_folio() 216 struct inode *inode = folio->mapping->host; in fill_zeros_to_end_of_page() 243 struct address_space *mapping, in ecryptfs_write_begin() argument 252 folio = __filemap_get_folio(mapping, index, FGP_WRITEBEGIN, in ecryptfs_write_begin() 253 mapping_gfp_mask(mapping)); in ecryptfs_write_begin() [all …]
|
| /linux/arch/nios2/mm/ |
| H A D | cacheflush.c | 74 static void flush_aliases(struct address_space *mapping, struct folio *folio) in flush_aliases() argument 84 flush_dcache_mmap_lock_irqsave(mapping, flags); in flush_aliases() 85 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff + nr - 1) { in flush_aliases() 96 flush_dcache_mmap_unlock_irqrestore(mapping, flags); in flush_aliases() 177 struct address_space *mapping; in flush_dcache_folio() local 186 mapping = folio_flush_mapping(folio); in flush_dcache_folio() 189 if (mapping && !mapping_mapped(mapping)) { in flush_dcache_folio() 193 if (mapping) { in flush_dcache_folio() 195 flush_aliases(mapping, folio); in flush_dcache_folio() 215 struct address_space *mapping; in update_mmu_cache_range() local [all …]
|