/linux/include/linux/ |
H A D | pagemap.h | 21 unsigned long invalidate_mapping_pages(struct address_space *mapping, 30 int invalidate_inode_pages2(struct address_space *mapping); 31 int invalidate_inode_pages2_range(struct address_space *mapping, 35 int filemap_invalidate_pages(struct address_space *mapping, 41 int filemap_fdatawait_keep_errors(struct address_space *mapping); 43 int filemap_fdatawait_range_keep_errors(struct address_space *mapping, 48 static inline int filemap_fdatawait(struct address_space *mapping) in filemap_fdatawait() argument 50 return filemap_fdatawait_range(mapping, 0, LLONG_MAX); in filemap_fdatawait() 54 int filemap_write_and_wait_range(struct address_space *mapping, 56 int __filemap_fdatawrite_range(struct address_space *mapping, 66 filemap_write_and_wait(struct address_space * mapping) filemap_write_and_wait() argument 85 filemap_set_wb_err(struct address_space * mapping,int err) filemap_set_wb_err() argument 102 filemap_check_wb_err(struct address_space * mapping,errseq_t since) filemap_check_wb_err() argument 115 filemap_sample_wb_err(struct address_space * mapping) filemap_sample_wb_err() argument 143 mapping_empty(struct address_space * mapping) mapping_empty() argument 169 mapping_shrinkable(struct address_space * mapping) mapping_shrinkable() argument 238 mapping_set_error(struct address_space * mapping,int error) mapping_set_error() argument 257 mapping_set_unevictable(struct address_space * mapping) mapping_set_unevictable() argument 262 mapping_clear_unevictable(struct address_space * mapping) mapping_clear_unevictable() argument 267 mapping_unevictable(struct address_space * mapping) mapping_unevictable() argument 272 mapping_set_exiting(struct address_space * mapping) mapping_set_exiting() argument 277 mapping_exiting(struct address_space * mapping) mapping_exiting() argument 282 mapping_set_no_writeback_tags(struct address_space * mapping) mapping_set_no_writeback_tags() argument 287 mapping_use_writeback_tags(struct address_space * mapping) mapping_use_writeback_tags() argument 292 mapping_release_always(const struct address_space * mapping) mapping_release_always() argument 297 mapping_set_release_always(struct address_space * mapping) mapping_set_release_always() argument 302 mapping_clear_release_always(struct address_space * mapping) mapping_clear_release_always() argument 307 mapping_stable_writes(const struct address_space * mapping) mapping_stable_writes() argument 312 mapping_set_stable_writes(struct address_space * mapping) mapping_set_stable_writes() argument 317 mapping_clear_stable_writes(struct address_space * mapping) mapping_clear_stable_writes() argument 322 mapping_set_inaccessible(struct address_space * mapping) mapping_set_inaccessible() argument 333 mapping_inaccessible(struct address_space * mapping) mapping_inaccessible() argument 338 mapping_gfp_mask(struct address_space * mapping) mapping_gfp_mask() argument 344 mapping_gfp_constraint(struct address_space * mapping,gfp_t gfp_mask) mapping_gfp_constraint() argument 408 mapping_set_folio_order_range(struct address_space * mapping,unsigned int min,unsigned int max) mapping_set_folio_order_range() argument 428 mapping_set_folio_min_order(struct address_space * mapping,unsigned int min) mapping_set_folio_min_order() argument 445 mapping_set_large_folios(struct address_space * mapping) mapping_set_large_folios() argument 451 mapping_max_folio_order(const struct address_space * mapping) mapping_max_folio_order() argument 459 mapping_min_folio_order(const struct address_space * mapping) mapping_min_folio_order() argument 467 mapping_min_folio_nrpages(struct address_space * mapping) mapping_min_folio_nrpages() argument 481 mapping_align_index(struct address_space * mapping,pgoff_t index) mapping_align_index() argument 491 mapping_large_folio_support(struct address_space * mapping) mapping_large_folio_support() argument 501 mapping_max_folio_size(const struct address_space * mapping) mapping_max_folio_size() argument 506 filemap_nr_thps(struct address_space * mapping) filemap_nr_thps() argument 515 filemap_nr_thps_inc(struct address_space * mapping) filemap_nr_thps_inc() argument 525 filemap_nr_thps_dec(struct address_space * mapping) filemap_nr_thps_dec() argument 753 filemap_get_folio(struct address_space * mapping,pgoff_t index) filemap_get_folio() argument 771 filemap_lock_folio(struct address_space * mapping,pgoff_t index) filemap_lock_folio() argument 789 filemap_grab_folio(struct address_space * mapping,pgoff_t index) filemap_grab_folio() argument 807 find_get_page(struct address_space * mapping,pgoff_t offset) find_get_page() argument 813 find_get_page_flags(struct address_space * mapping,pgoff_t offset,fgf_t fgp_flags) find_get_page_flags() argument 832 find_lock_page(struct address_space * mapping,pgoff_t index) find_lock_page() argument 857 find_or_create_page(struct address_space * mapping,pgoff_t index,gfp_t gfp_mask) find_or_create_page() argument 878 grab_cache_page_nowait(struct address_space * mapping,pgoff_t index) grab_cache_page_nowait() argument 937 grab_cache_page(struct address_space * mapping,pgoff_t index) grab_cache_page() argument 952 read_mapping_page(struct address_space * mapping,pgoff_t index,struct file * file) read_mapping_page() argument 958 read_mapping_folio(struct address_space * mapping,pgoff_t index,struct file * file) read_mapping_folio() argument 1261 filemap_range_needs_writeback(struct address_space * mapping,loff_t start_byte,loff_t end_byte) filemap_range_needs_writeback() argument 1291 struct address_space *mapping; global() member 1334 page_cache_sync_readahead(struct address_space * mapping,struct file_ra_state * ra,struct file * file,pgoff_t index,unsigned long req_count) page_cache_sync_readahead() argument 1356 page_cache_async_readahead(struct address_space * mapping,struct file_ra_state * ra,struct file * file,struct folio * folio,unsigned long req_count) page_cache_async_readahead() argument [all...] |
H A D | io-mapping.h | 17 * The io_mapping mechanism provides an abstraction for mapping 20 * See Documentation/driver-api/io-mapping.rst 35 * For small address space machines, mapping large objects 58 io_mapping_fini(struct io_mapping *mapping) in io_mapping_fini() argument 60 iomap_free(mapping->base, mapping->size); in io_mapping_fini() 65 io_mapping_map_atomic_wc(struct io_mapping *mapping, in io_mapping_map_atomic_wc() argument 70 BUG_ON(offset >= mapping->size); in io_mapping_map_atomic_wc() 71 phys_addr = mapping->base + offset; in io_mapping_map_atomic_wc() 77 return __iomap_local_pfn_prot(PHYS_PFN(phys_addr), mapping->prot); in io_mapping_map_atomic_wc() 92 io_mapping_map_local_wc(struct io_mapping *mapping, unsigned long offset) in io_mapping_map_local_wc() argument [all …]
|
/linux/mm/ |
H A D | truncate.c | 26 static void clear_shadow_entries(struct address_space *mapping, in clear_shadow_entries() argument 29 XA_STATE(xas, &mapping->i_pages, start); in clear_shadow_entries() 33 if (shmem_mapping(mapping) || dax_mapping(mapping)) in clear_shadow_entries() 38 spin_lock(&mapping->host->i_lock); in clear_shadow_entries() 48 if (mapping_shrinkable(mapping)) in clear_shadow_entries() 49 inode_add_lru(mapping->host); in clear_shadow_entries() 50 spin_unlock(&mapping->host->i_lock); in clear_shadow_entries() 60 static void truncate_folio_batch_exceptionals(struct address_space *mapping, in truncate_folio_batch_exceptionals() argument 63 XA_STATE(xas, &mapping in truncate_folio_batch_exceptionals() 170 truncate_inode_folio(struct address_space * mapping,struct folio * folio) truncate_inode_folio() argument 274 generic_error_remove_folio(struct address_space * mapping,struct folio * folio) generic_error_remove_folio() argument 300 mapping_evict_folio(struct address_space * mapping,struct folio * folio) mapping_evict_folio() argument 341 truncate_inode_pages_range(struct address_space * mapping,loff_t lstart,loff_t lend) truncate_inode_pages_range() argument 458 truncate_inode_pages(struct address_space * mapping,loff_t lstart) truncate_inode_pages() argument 473 truncate_inode_pages_final(struct address_space * mapping) truncate_inode_pages_final() argument 509 mapping_try_invalidate(struct address_space * mapping,pgoff_t start,pgoff_t end,unsigned long * nr_failed) mapping_try_invalidate() argument 574 invalidate_mapping_pages(struct address_space * mapping,pgoff_t start,pgoff_t end) invalidate_mapping_pages() argument 581 folio_launder(struct address_space * mapping,struct folio * folio) folio_launder() argument 597 folio_unmap_invalidate(struct address_space * mapping,struct folio * folio,gfp_t gfp) folio_unmap_invalidate() argument 647 invalidate_inode_pages2_range(struct address_space * mapping,pgoff_t start,pgoff_t end) invalidate_inode_pages2_range() argument 733 invalidate_inode_pages2(struct address_space * mapping) invalidate_inode_pages2() argument 756 struct address_space *mapping = inode->i_mapping; truncate_pagecache() local 877 struct address_space *mapping = inode->i_mapping; truncate_pagecache_range() local [all...] |
H A D | filemap.c | 128 static void page_cache_delete(struct address_space *mapping, in page_cache_delete() argument 131 XA_STATE(xas, &mapping->i_pages, folio->index); in page_cache_delete() 134 mapping_set_update(&xas, mapping); in page_cache_delete() 144 folio->mapping = NULL; in page_cache_delete() 146 mapping->nrpages -= nr; in page_cache_delete() 149 static void filemap_unaccount_folio(struct address_space *mapping, in filemap_unaccount_folio() argument 162 if (mapping_exiting(mapping) && !folio_test_large(folio)) { in filemap_unaccount_folio() 191 filemap_nr_thps_dec(mapping); in filemap_unaccount_folio() 209 mapping_can_writeback(mapping))) in filemap_unaccount_folio() 210 folio_account_cleaned(folio, inode_to_wb(mapping->host)); in filemap_unaccount_folio() [all …]
|
H A D | readahead.c | 139 file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping) in file_ra_state_init() argument 141 ra->ra_pages = inode_to_bdi(mapping->host)->ra_pages; in file_ra_state_init() 148 const struct address_space_operations *aops = rac->mapping->a_ops; in read_pages() 210 struct address_space *mapping = ractl->mapping; in page_cache_ra_unbounded() local 212 gfp_t gfp_mask = readahead_gfp_mask(mapping); in page_cache_ra_unbounded() 214 unsigned int min_nrpages = mapping_min_folio_nrpages(mapping); in page_cache_ra_unbounded() 223 * filesystems already specify __GFP_NOFS in their mapping's in page_cache_ra_unbounded() 228 filemap_invalidate_lock_shared(mapping); in page_cache_ra_unbounded() 229 index = mapping_align_index(mapping, inde in page_cache_ra_unbounded() 337 struct address_space *mapping = ractl->mapping; force_page_cache_ra() local 462 struct address_space *mapping = ractl->mapping; page_cache_ra_order() local 744 struct address_space *mapping = ractl->mapping; readahead_expand() local [all...] |
/linux/arch/arm/mm/ |
H A D | dma-mapping.c | 3 * linux/arch/arm/mm/dma-mapping.c 7 * DMA uncached mapping support. 290 * Clear previous low-memory mapping to ensure that the in dma_contiguous_remap() 603 * Free a buffer as defined by the above mapping. 677 * Use the driver DMA support - see dma-mapping.h (dma_sync_*) 754 static int extend_iommu_mapping(struct dma_iommu_mapping *mapping); 756 static inline dma_addr_t __alloc_iova(struct dma_iommu_mapping *mapping, in __alloc_iova() argument 762 size_t mapping_size = mapping->bits << PAGE_SHIFT; in __alloc_iova() 773 spin_lock_irqsave(&mapping->lock, flags); in __alloc_iova() 774 for (i = 0; i < mapping->nr_bitmaps; i++) { in __alloc_iova() [all …]
|
/linux/drivers/gpu/drm/tegra/ |
H A D | uapi.c | 17 struct tegra_drm_mapping *mapping = in tegra_drm_mapping_release() local 20 host1x_bo_unpin(mapping->map); in tegra_drm_mapping_release() 21 host1x_bo_put(mapping->bo); in tegra_drm_mapping_release() 23 kfree(mapping); in tegra_drm_mapping_release() 26 void tegra_drm_mapping_put(struct tegra_drm_mapping *mapping) in tegra_drm_mapping_put() argument 28 kref_put(&mapping->ref, tegra_drm_mapping_release); in tegra_drm_mapping_put() 33 struct tegra_drm_mapping *mapping; in tegra_drm_channel_context_close() local 39 xa_for_each(&context->mappings, id, mapping) in tegra_drm_channel_context_close() 40 tegra_drm_mapping_put(mapping); in tegra_drm_channel_context_close() 189 struct tegra_drm_mapping *mapping; in tegra_drm_ioctl_channel_map() local [all …]
|
/linux/Documentation/admin-guide/mm/ |
H A D | nommu-mmap.rst | 2 No-MMU memory mapping support 5 The kernel has limited support for memory mapping under no-MMU conditions, such 7 mapping is made use of in conjunction with the mmap() system call, the shmat() 9 mapping is actually performed by the binfmt drivers, which call back into the 12 Memory mapping behaviour also involves the way fork(), vfork(), clone() and 19 (#) Anonymous mapping, MAP_PRIVATE 27 (#) Anonymous mapping, MAP_SHARED 37 the underlying file are reflected in the mapping; copied across fork. 41 - If one exists, the kernel will re-use an existing mapping to the 45 - If possible, the file mapping will be directly on the backing device [all …]
|
/linux/include/trace/events/ |
H A D | filemap.h | 32 __entry->i_ino = folio->mapping->host->i_ino; 34 if (folio->mapping->host->i_sb) 35 __entry->s_dev = folio->mapping->host->i_sb->s_dev; 37 __entry->s_dev = folio->mapping->host->i_rdev; 62 struct address_space *mapping, 67 TP_ARGS(mapping, index, last_index), 77 __entry->i_ino = mapping->host->i_ino; 78 if (mapping->host->i_sb) 80 mapping->host->i_sb->s_dev; 82 __entry->s_dev = mapping->host->i_rdev; [all …]
|
/linux/drivers/gpu/drm/etnaviv/ |
H A D | etnaviv_mmu.c | 6 #include <linux/dma-mapping.h> 53 /* unroll mapping in case something went wrong */ in etnaviv_context_map() 113 struct etnaviv_vram_mapping *mapping) in etnaviv_iommu_remove_mapping() argument 115 struct etnaviv_gem_object *etnaviv_obj = mapping->object; in etnaviv_iommu_remove_mapping() 119 etnaviv_iommu_unmap(context, mapping->vram_node.start, in etnaviv_iommu_remove_mapping() 121 drm_mm_remove_node(&mapping->vram_node); in etnaviv_iommu_remove_mapping() 124 void etnaviv_iommu_reap_mapping(struct etnaviv_vram_mapping *mapping) in etnaviv_iommu_reap_mapping() argument 126 struct etnaviv_iommu_context *context = mapping->context; in etnaviv_iommu_reap_mapping() 129 WARN_ON(mapping->use); in etnaviv_iommu_reap_mapping() 131 etnaviv_iommu_remove_mapping(context, mapping); in etnaviv_iommu_reap_mapping() [all …]
|
H A D | etnaviv_gem.c | 7 #include <linux/dma-mapping.h> 218 struct etnaviv_vram_mapping *mapping; in etnaviv_gem_get_vram_mapping() local 220 list_for_each_entry(mapping, &obj->vram_list, obj_node) { in etnaviv_gem_get_vram_mapping() 221 if (mapping->context == context) in etnaviv_gem_get_vram_mapping() 222 return mapping; in etnaviv_gem_get_vram_mapping() 228 void etnaviv_gem_mapping_unreference(struct etnaviv_vram_mapping *mapping) in etnaviv_gem_mapping_unreference() argument 230 struct etnaviv_gem_object *etnaviv_obj = mapping->object; in etnaviv_gem_mapping_unreference() 233 WARN_ON(mapping->use == 0); in etnaviv_gem_mapping_unreference() 234 mapping->use -= 1; in etnaviv_gem_mapping_unreference() 245 struct etnaviv_vram_mapping *mapping; in etnaviv_gem_mapping_get() local [all …]
|
/linux/Documentation/driver-api/ |
H A D | io-mapping.rst | 8 The io_mapping functions in linux/io-mapping.h provide an abstraction for 9 efficiently mapping small regions of an I/O device to the CPU. The initial 14 A mapping object is created during driver initialization using:: 20 mappable, while 'size' indicates how large a mapping region to 23 This _wc variant provides a mapping which may only be used with 27 With this mapping object, individual pages can be mapped either temporarily 31 void *io_mapping_map_local_wc(struct io_mapping *mapping, 34 void *io_mapping_map_atomic_wc(struct io_mapping *mapping, 37 'offset' is the offset within the defined mapping region. Accessing 46 Temporary mappings are only valid in the context of the caller. The mapping [all …]
|
/linux/drivers/gpu/drm/exynos/ |
H A D | exynos_drm_dma.c | 34 * drm_iommu_attach_device- attach device to iommu mapping 40 * mapping. 57 * Keep the original DMA mapping of the sub-device and in drm_iommu_attach_device() 66 ret = arm_iommu_attach_device(subdrv_dev, priv->mapping); in drm_iommu_attach_device() 68 ret = iommu_attach_device(priv->mapping, subdrv_dev); in drm_iommu_attach_device() 75 * drm_iommu_detach_device -detach device address space mapping from device 81 * mapping 92 iommu_detach_device(priv->mapping, subdrv_dev); in drm_iommu_detach_device() 102 DRM_INFO("Exynos DRM: using %s device for DMA mapping operations\n", in exynos_drm_register_dma() 109 if (!priv->mapping) { in exynos_drm_register_dma() [all …]
|
/linux/tools/testing/selftests/arm64/mte/ |
H A D | check_mmap_options.c | 60 static int check_anonymous_memory_mapping(int mem_type, int mode, int mapping, int tag_check) in check_anonymous_memory_mapping() argument 69 map_ptr = (char *)mte_allocate_memory(map_size, mem_type, mapping, false); in check_anonymous_memory_mapping() 91 static int check_file_memory_mapping(int mem_type, int mode, int mapping, int tag_check) in check_file_memory_mapping() argument 105 map_ptr = (char *)mte_allocate_file_memory(map_size, mem_type, mapping, false, fd); in check_file_memory_mapping() 130 static int check_clear_prot_mte_flag(int mem_type, int mode, int mapping) in check_clear_prot_mte_flag() argument 140 ptr = (char *)mte_allocate_memory_tag_range(sizes[run], mem_type, mapping, in check_clear_prot_mte_flag() 161 ptr = (char *)mte_allocate_file_memory_tag_range(sizes[run], mem_type, mapping, in check_clear_prot_mte_flag() 213 "Check anonymous memory with private mapping, sync error mode, mmap memory and tag check off\n"); in main() 215 …"Check file memory with private mapping, sync error mode, mmap/mprotect memory and tag check off\n… in main() 219 "Check anonymous memory with private mapping, no error mode, mmap memory and tag check off\n"); in main() [all …]
|
H A D | check_child_memory.c | 84 static int check_child_memory_mapping(int mem_type, int mode, int mapping) in check_child_memory_mapping() argument 93 ptr = (char *)mte_allocate_memory_tag_range(sizes[run], mem_type, mapping, in check_child_memory_mapping() 106 static int check_child_file_mapping(int mem_type, int mode, int mapping) in check_child_file_mapping() argument 119 map_ptr = (char *)mte_allocate_file_memory(map_size, mem_type, mapping, false, fd); in check_child_file_mapping() 170 "Check child anonymous memory with private mapping, precise mode and mmap memory\n"); in main() 172 "Check child anonymous memory with shared mapping, precise mode and mmap memory\n"); in main() 174 "Check child anonymous memory with private mapping, imprecise mode and mmap memory\n"); in main() 176 "Check child anonymous memory with shared mapping, imprecise mode and mmap memory\n"); in main() 178 "Check child anonymous memory with private mapping, precise mode and mmap/mprotect memory\n"); in main() 180 "Check child anonymous memory with shared mapping, precise mode and mmap/mprotect memory\n"); in main() [all …]
|
/linux/tools/testing/selftests/mm/ |
H A D | mremap_dontunmap.c | 59 "unable to unmap destination mapping"); in kernel_support_for_mremap_dontunmap() 63 "unable to unmap source mapping"); in kernel_support_for_mremap_dontunmap() 67 // This helper will just validate that an entire mapping contains the expected 94 // the source mapping mapped. 106 // Try to just move the whole mapping anywhere (not fixed). in mremap_dontunmap_simple() 122 "unable to unmap destination mapping"); in mremap_dontunmap_simple() 124 "unable to unmap source mapping"); in mremap_dontunmap_simple() 128 // This test validates that MREMAP_DONTUNMAP on a shared mapping works as expected. 148 // Try to just move the whole mapping anywhere (not fixed). in mremap_dontunmap_simple_shmem() 155 "unable to unmap source mapping"); in mremap_dontunmap_simple_shmem() [all …]
|
/linux/arch/arm64/kvm/ |
H A D | pkvm.c | 301 struct pkvm_mapping *mapping; in __pkvm_pgtable_stage2_unmap() local 307 for_each_mapping_in_range_safe(pgt, start, end, mapping) { in __pkvm_pgtable_stage2_unmap() 308 ret = kvm_call_hyp_nvhe(__pkvm_host_unshare_guest, handle, mapping->gfn, in __pkvm_pgtable_stage2_unmap() 309 mapping->nr_pages); in __pkvm_pgtable_stage2_unmap() 312 pkvm_mapping_remove(mapping, &pgt->pkvm_mappings); in __pkvm_pgtable_stage2_unmap() 313 kfree(mapping); in __pkvm_pgtable_stage2_unmap() 329 struct pkvm_mapping *mapping = NULL; in pkvm_pgtable_stage2_map() local 345 mapping = pkvm_mapping_iter_first(&pgt->pkvm_mappings, addr, addr + size - 1); in pkvm_pgtable_stage2_map() 346 if (mapping) { in pkvm_pgtable_stage2_map() 347 if (size == (mapping->nr_pages * PAGE_SIZE)) in pkvm_pgtable_stage2_map() [all …]
|
/linux/drivers/sh/clk/ |
H A D | core.c | 340 struct clk_mapping *mapping = clk->mapping; in clk_establish_mapping() local 345 if (!mapping) { in clk_establish_mapping() 349 * dummy mapping for root clocks with no specified ranges in clk_establish_mapping() 352 clk->mapping = &dummy_mapping; in clk_establish_mapping() 357 * If we're on a child clock and it provides no mapping of its in clk_establish_mapping() 358 * own, inherit the mapping from its root clock. in clk_establish_mapping() 361 mapping = clkp->mapping; in clk_establish_mapping() 362 BUG_ON(!mapping); in clk_establish_mapping() 366 * Establish initial mapping. in clk_establish_mapping() 368 if (!mapping->base && mapping->phys) { in clk_establish_mapping() [all …]
|
/linux/fs/ |
H A D | dax.c | 182 * @entry may no longer be the entry at the index in the mapping. 347 * A DAX folio is considered shared if it has no mapping set and ->share (which 354 return !folio->mapping && folio->share; in dax_folio_is_shared() 360 * previously been associated with any mappings the ->mapping and ->index 361 * fields will be set. If it has already been associated with a mapping 362 * the mapping will be cleared and the share count set. It's then up to 364 * recover ->mapping and ->index information. For example by implementing 371 * folio->mapping. in dax_folio_make_shared() 373 folio->mapping = NULL; in dax_folio_make_shared() 395 folio->mapping = NULL; in dax_folio_put() [all …]
|
/linux/Documentation/mm/ |
H A D | highmem.rst | 18 The part of (physical) memory not covered by a permanent mapping is what we 64 These mappings are thread-local and CPU-local, meaning that the mapping 66 CPU while the mapping is active. Although preemption is never disabled by 68 CPU-hotplug until the mapping is disposed. 71 in which the local mapping is acquired does not allow it for other reasons. 82 virtual address of the direct mapping. Only real highmem pages are 95 therefore try to design their code to avoid the use of kmap() by mapping 107 NOTE: Conversions to kmap_local_page() must take care to follow the mapping 116 This permits a very short duration mapping of a single page. Since the 117 mapping is restricted to the CPU that issued it, it performs well, but [all …]
|
/linux/Documentation/arch/riscv/ |
H A D | boot.rst | 14 mapping is set up. 40 PMPs, in the direct mapping, so the firmware must correctly mark those regions 117 Virtual mapping installation 120 The installation of the virtual mapping is done in 2 steps in the RISC-V kernel: 122 1. ``setup_vm()`` installs a temporary kernel mapping in ``early_pg_dir`` which 124 at this point. When establishing this mapping, no allocation can be done 128 2. ``setup_vm_final()`` creates the final kernel mapping in ``swapper_pg_dir`` 130 mapping. When establishing this mapping, the kernel can allocate memory but 131 cannot access it directly (since the direct mapping is not present yet), so 136 direct mapping addresses to physical addresses, they need to know the start of [all …]
|
/linux/drivers/pci/ |
H A D | devres.c | 11 * It is very strongly tied to the statically allocated mapping table in struct 13 * functions in this file providing things like ranged mapping by bypassing 15 * enter the mapping addresses into the table for users of the old API. 42 * A requested region spanning an entire BAR, and a mapping for 48 * A mapping within a BAR, either spanning the whole BAR or just a 435 * mapping's address directly from one of the pcim_* mapping functions. For 457 * Fill the legacy mapping-table, so that drivers using the old API can 458 * still get a BAR's mapping address through pcim_iomap_table(). 461 void __iomem *mapping, int bar) in pcim_add_mapping_to_legacy_table() argument 475 legacy_iomap_table[bar] = mapping; in pcim_add_mapping_to_legacy_table() [all …]
|
/linux/Documentation/core-api/ |
H A D | dma-attributes.rst | 6 defined in linux/dma-mapping.h. 11 DMA_ATTR_WEAK_ORDERING specifies that reads and writes to the mapping 21 DMA_ATTR_WRITE_COMBINE specifies that writes to the mapping may be 32 virtual mapping for the allocated buffer. On some architectures creating 33 such mapping is non-trivial task and consumes very limited resources 52 having a mapping created separately for each device and is usually 67 device domain after releasing a mapping for it. Use this attribute with 73 By default DMA-mapping subsystem is allowed to assemble the buffer 82 This is a hint to the DMA-mapping subsystem that it's probably not worth 84 efficiency (AKA it's not worth trying to build the mapping out of larger [all …]
|
/linux/fs/freevxfs/ |
H A D | vxfs_subr.c | 34 * @mapping: mapping to read from 44 vxfs_get_page(struct address_space *mapping, u_long n) in vxfs_get_page() argument 48 pp = read_mapping_page(mapping, n, NULL); in vxfs_get_page() 134 * vxfs_bmap - perform logical to physical block mapping 135 * @mapping: logical to physical mapping to use 136 * @block: logical block (relative to @mapping). 140 * @mapping, @block pair. 149 vxfs_bmap(struct address_space *mapping, sector_t block) in vxfs_bmap() argument 151 return generic_block_bmap(mapping, block, vxfs_getblk); in vxfs_bmap()
|
/linux/drivers/net/wireless/marvell/mwifiex/ |
H A D | util.h | 57 struct mwifiex_dma_mapping *mapping) in mwifiex_store_mapping() argument 61 memcpy(&cb->dma_mapping, mapping, sizeof(*mapping)); in mwifiex_store_mapping() 65 struct mwifiex_dma_mapping *mapping) in mwifiex_get_mapping() argument 69 memcpy(mapping, &cb->dma_mapping, sizeof(*mapping)); in mwifiex_get_mapping() 74 struct mwifiex_dma_mapping mapping; in MWIFIEX_SKB_DMA_ADDR() local 76 mwifiex_get_mapping(skb, &mapping); in MWIFIEX_SKB_DMA_ADDR() 78 return mapping.addr; in MWIFIEX_SKB_DMA_ADDR()
|