| /linux/include/linux/ |
| H A D | pagemap.h | 21 unsigned long invalidate_mapping_pages(struct address_space *mapping, 30 int invalidate_inode_pages2(struct address_space *mapping); 31 int invalidate_inode_pages2_range(struct address_space *mapping, 35 int filemap_invalidate_pages(struct address_space *mapping, 41 int filemap_flush_nr(struct address_space *mapping, long *nr_to_write); 42 int filemap_fdatawait_keep_errors(struct address_space *mapping); 44 int filemap_fdatawait_range_keep_errors(struct address_space *mapping, 49 static inline int filemap_fdatawait(struct address_space *mapping) in filemap_fdatawait() argument 51 return filemap_fdatawait_range(mapping, 0, LLONG_MAX); in filemap_fdatawait() 55 int filemap_write_and_wait_range(struct address_space *mapping, [all …]
|
| H A D | io-mapping.h | 17 * The io_mapping mechanism provides an abstraction for mapping 20 * See Documentation/driver-api/io-mapping.rst 35 * For small address space machines, mapping large objects 58 io_mapping_fini(struct io_mapping *mapping) in io_mapping_fini() argument 60 iomap_free(mapping->base, mapping->size); in io_mapping_fini() 65 io_mapping_map_atomic_wc(struct io_mapping *mapping, in io_mapping_map_atomic_wc() argument 70 BUG_ON(offset >= mapping->size); in io_mapping_map_atomic_wc() 71 phys_addr = mapping->base + offset; in io_mapping_map_atomic_wc() 77 return __iomap_local_pfn_prot(PHYS_PFN(phys_addr), mapping->prot); in io_mapping_map_atomic_wc() 92 io_mapping_map_local_wc(struct io_mapping *mapping, unsigned long offset) in io_mapping_map_local_wc() argument [all …]
|
| /linux/mm/ |
| H A D | truncate.c | 26 static void clear_shadow_entries(struct address_space *mapping, in clear_shadow_entries() argument 29 XA_STATE(xas, &mapping->i_pages, start); in clear_shadow_entries() 33 if (shmem_mapping(mapping) || dax_mapping(mapping)) in clear_shadow_entries() 38 spin_lock(&mapping->host->i_lock); in clear_shadow_entries() 48 if (mapping_shrinkable(mapping)) in clear_shadow_entries() 49 inode_lru_list_add(mapping->host); in clear_shadow_entries() 50 spin_unlock(&mapping->host->i_lock); in clear_shadow_entries() 60 static void truncate_folio_batch_exceptionals(struct address_space *mapping, in truncate_folio_batch_exceptionals() argument 63 XA_STATE(xas, &mapping->i_pages, indices[0]); in truncate_folio_batch_exceptionals() 69 if (shmem_mapping(mapping)) in truncate_folio_batch_exceptionals() [all …]
|
| H A D | filemap.c | 129 static void page_cache_delete(struct address_space *mapping, in page_cache_delete() argument 132 XA_STATE(xas, &mapping->i_pages, folio->index); in page_cache_delete() 135 mapping_set_update(&xas, mapping); in page_cache_delete() 145 folio->mapping = NULL; in page_cache_delete() 147 mapping->nrpages -= nr; in page_cache_delete() 150 static void filemap_unaccount_folio(struct address_space *mapping, in filemap_unaccount_folio() argument 163 if (mapping_exiting(mapping) && !folio_test_large(folio)) { in filemap_unaccount_folio() 192 filemap_nr_thps_dec(mapping); in filemap_unaccount_folio() 194 if (test_bit(AS_KERNEL_FILE, &folio->mapping->flags)) in filemap_unaccount_folio() 213 mapping_can_writeback(mapping))) in filemap_unaccount_folio() [all …]
|
| H A D | readahead.c | 142 file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping) in file_ra_state_init() argument 144 ra->ra_pages = inode_to_bdi(mapping->host)->ra_pages; in file_ra_state_init() 151 const struct address_space_operations *aops = rac->mapping->a_ops; in read_pages() 213 struct address_space *mapping = ractl->mapping; in page_cache_ra_unbounded() local 215 gfp_t gfp_mask = readahead_gfp_mask(mapping); in page_cache_ra_unbounded() 217 unsigned int min_nrpages = mapping_min_folio_nrpages(mapping); in page_cache_ra_unbounded() 226 * filesystems already specify __GFP_NOFS in their mapping's in page_cache_ra_unbounded() 231 trace_page_cache_ra_unbounded(mapping->host, index, nr_to_read, in page_cache_ra_unbounded() 233 filemap_invalidate_lock_shared(mapping); in page_cache_ra_unbounded() 234 index = mapping_align_index(mapping, index); in page_cache_ra_unbounded() [all …]
|
| /linux/tools/testing/selftests/arm64/mte/ |
| H A D | check_mmap_options.c | 47 int mapping; member 113 static int check_anonymous_memory_mapping(int mem_type, int mode, int mapping, in check_anonymous_memory_mapping() argument 126 map_ptr = (char *)mte_allocate_memory(map_size, mem_type, mapping, false); in check_anonymous_memory_mapping() 148 static int check_file_memory_mapping(int mem_type, int mode, int mapping, in check_file_memory_mapping() argument 166 map_ptr = (char *)mte_allocate_file_memory(map_size, mem_type, mapping, false, fd); in check_file_memory_mapping() 191 static int check_clear_prot_mte_flag(int mem_type, int mode, int mapping, int atag_check) in check_clear_prot_mte_flag() argument 201 ptr = (char *)mte_allocate_memory_tag_range(sizes[run], mem_type, mapping, in check_clear_prot_mte_flag() 222 ptr = (char *)mte_allocate_file_memory_tag_range(sizes[run], mem_type, mapping, in check_clear_prot_mte_flag() 300 switch (tc->mapping) { in format_test_name() 337 "Check %s with %s mapping, %s mode, %s memory and %s (%s)\n", in format_test_name() [all …]
|
| /linux/arch/arm/mm/ |
| H A D | dma-mapping.c | 3 * linux/arch/arm/mm/dma-mapping.c 7 * DMA uncached mapping support. 290 * Clear previous low-memory mapping to ensure that the in dma_contiguous_remap() 603 * Free a buffer as defined by the above mapping. 677 * Use the driver DMA support - see dma-mapping.h (dma_sync_*) 752 static int extend_iommu_mapping(struct dma_iommu_mapping *mapping); 754 static inline dma_addr_t __alloc_iova(struct dma_iommu_mapping *mapping, in __alloc_iova() argument 760 size_t mapping_size = mapping->bits << PAGE_SHIFT; in __alloc_iova() 771 spin_lock_irqsave(&mapping->lock, flags); in __alloc_iova() 772 for (i = 0; i < mapping->nr_bitmaps; i++) { in __alloc_iova() [all …]
|
| H A D | flush.c | 199 void __flush_dcache_folio(struct address_space *mapping, struct folio *folio) in __flush_dcache_folio() argument 202 * Writeback any data associated with the kernel mapping of this in __flush_dcache_folio() 204 * coherent with the kernels mapping. in __flush_dcache_folio() 234 if (mapping && cache_is_vipt_aliasing()) in __flush_dcache_folio() 238 static void __flush_dcache_aliases(struct address_space *mapping, struct folio *folio) in __flush_dcache_aliases() argument 248 * - aliasing VIPT: we only need to find one mapping of this page. in __flush_dcache_aliases() 253 flush_dcache_mmap_lock(mapping); in __flush_dcache_aliases() 254 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff_end) { in __flush_dcache_aliases() 281 flush_dcache_mmap_unlock(mapping); in __flush_dcache_aliases() 289 struct address_space *mapping; in __sync_icache_dcache() local [all …]
|
| /linux/drivers/gpu/drm/tegra/ |
| H A D | uapi.c | 17 struct tegra_drm_mapping *mapping = in tegra_drm_mapping_release() local 20 host1x_bo_unpin(mapping->map); in tegra_drm_mapping_release() 21 host1x_bo_put(mapping->bo); in tegra_drm_mapping_release() 23 kfree(mapping); in tegra_drm_mapping_release() 26 void tegra_drm_mapping_put(struct tegra_drm_mapping *mapping) in tegra_drm_mapping_put() argument 28 kref_put(&mapping->ref, tegra_drm_mapping_release); in tegra_drm_mapping_put() 33 struct tegra_drm_mapping *mapping; in tegra_drm_channel_context_close() local 39 xa_for_each(&context->mappings, id, mapping) in tegra_drm_channel_context_close() 40 tegra_drm_mapping_put(mapping); in tegra_drm_channel_context_close() 192 struct tegra_drm_mapping *mapping; in tegra_drm_ioctl_channel_map() local [all …]
|
| /linux/Documentation/admin-guide/mm/ |
| H A D | nommu-mmap.rst | 2 No-MMU memory mapping support 5 The kernel has limited support for memory mapping under no-MMU conditions, such 7 mapping is made use of in conjunction with the mmap() system call, the shmat() 9 mapping is actually performed by the binfmt drivers, which call back into the 12 Memory mapping behaviour also involves the way fork(), vfork(), clone() and 19 (#) Anonymous mapping, MAP_PRIVATE 27 (#) Anonymous mapping, MAP_SHARED 37 the underlying file are reflected in the mapping; copied across fork. 41 - If one exists, the kernel will re-use an existing mapping to the 45 - If possible, the file mapping will be directly on the backing device [all …]
|
| /linux/include/trace/events/ |
| H A D | filemap.h | 32 __entry->i_ino = folio->mapping->host->i_ino; 34 if (folio->mapping->host->i_sb) 35 __entry->s_dev = folio->mapping->host->i_sb->s_dev; 37 __entry->s_dev = folio->mapping->host->i_rdev; 62 struct address_space *mapping, 67 TP_ARGS(mapping, index, last_index), 77 __entry->i_ino = mapping->host->i_ino; 78 if (mapping->host->i_sb) 80 mapping->host->i_sb->s_dev; 82 __entry->s_dev = mapping->host->i_rdev; [all …]
|
| /linux/drivers/gpu/drm/etnaviv/ |
| H A D | etnaviv_mmu.c | 6 #include <linux/dma-mapping.h> 55 /* unroll mapping in case something went wrong */ in etnaviv_context_map() 115 struct etnaviv_vram_mapping *mapping) in etnaviv_iommu_remove_mapping() 117 struct etnaviv_gem_object *etnaviv_obj = mapping->object; in etnaviv_iommu_remove_mapping() 121 etnaviv_iommu_unmap(context, mapping->vram_node.start, in etnaviv_iommu_remove_mapping() 123 drm_mm_remove_node(&mapping->vram_node); 126 void etnaviv_iommu_reap_mapping(struct etnaviv_vram_mapping *mapping) in etnaviv_iommu_reap_mapping() 128 struct etnaviv_iommu_context *context = mapping->context; in etnaviv_iommu_reap_mapping() 131 WARN_ON(mapping->use); in etnaviv_iommu_reap_mapping() 133 etnaviv_iommu_remove_mapping(context, mapping); in etnaviv_iommu_reap_mapping() 113 etnaviv_iommu_remove_mapping(struct etnaviv_iommu_context * context,struct etnaviv_vram_mapping * mapping) etnaviv_iommu_remove_mapping() argument 124 etnaviv_iommu_reap_mapping(struct etnaviv_vram_mapping * mapping) etnaviv_iommu_reap_mapping() argument 266 etnaviv_iommu_map_gem(struct etnaviv_iommu_context * context,struct etnaviv_gem_object * etnaviv_obj,u32 memory_base,struct etnaviv_vram_mapping * mapping,u64 va) etnaviv_iommu_map_gem() argument 318 etnaviv_iommu_unmap_gem(struct etnaviv_iommu_context * context,struct etnaviv_vram_mapping * mapping) etnaviv_iommu_unmap_gem() argument 396 etnaviv_iommu_get_suballoc_va(struct etnaviv_iommu_context * context,struct etnaviv_vram_mapping * mapping,u32 memory_base,dma_addr_t paddr,size_t size) etnaviv_iommu_get_suballoc_va() argument 447 etnaviv_iommu_put_suballoc_va(struct etnaviv_iommu_context * context,struct etnaviv_vram_mapping * mapping) etnaviv_iommu_put_suballoc_va() argument [all...] |
| H A D | etnaviv_gem.c | 8 #include <linux/dma-mapping.h> 219 struct etnaviv_vram_mapping *mapping; in etnaviv_gem_get_vram_mapping() 221 list_for_each_entry(mapping, &obj->vram_list, obj_node) { in etnaviv_gem_get_vram_mapping() 222 if (mapping->context == context) in etnaviv_gem_get_vram_mapping() 223 return mapping; in etnaviv_gem_get_vram_mapping() 229 void etnaviv_gem_mapping_unreference(struct etnaviv_vram_mapping *mapping) in etnaviv_gem_mapping_unreference() 231 struct etnaviv_gem_object *etnaviv_obj = mapping->object; in etnaviv_gem_mapping_unreference() 234 WARN_ON(mapping->use == 0); in etnaviv_gem_mapping_unreference() 235 mapping->use -= 1; in etnaviv_gem_mapping_unreference() 246 struct etnaviv_vram_mapping *mapping; in etnaviv_gem_mapping_get() 218 struct etnaviv_vram_mapping *mapping; etnaviv_gem_get_vram_mapping() local 228 etnaviv_gem_mapping_unreference(struct etnaviv_vram_mapping * mapping) etnaviv_gem_mapping_unreference() argument 245 struct etnaviv_vram_mapping *mapping; etnaviv_gem_mapping_get() local 504 struct etnaviv_vram_mapping *mapping, *tmp; etnaviv_gem_free_object() local [all...] |
| /linux/Documentation/driver-api/ |
| H A D | io-mapping.rst | 8 The io_mapping functions in linux/io-mapping.h provide an abstraction for 9 efficiently mapping small regions of an I/O device to the CPU. The initial 14 A mapping object is created during driver initialization using:: 20 mappable, while 'size' indicates how large a mapping region to 23 This _wc variant provides a mapping which may only be used with 27 With this mapping object, individual pages can be mapped either temporarily 31 void *io_mapping_map_local_wc(struct io_mapping *mapping, 34 void *io_mapping_map_atomic_wc(struct io_mapping *mapping, 37 'offset' is the offset within the defined mapping region. Accessing 46 Temporary mappings are only valid in the context of the caller. The mapping [all …]
|
| /linux/drivers/gpu/drm/panfrost/ |
| H A D | panfrost_gem.c | 8 #include <linux/dma-mapping.h> 95 struct panfrost_gem_mapping *iter, *mapping = NULL; in panfrost_gem_mapping_get() 101 mapping = iter; in panfrost_gem_mapping_get() 107 return mapping; in panfrost_gem_mapping_get() 111 panfrost_gem_teardown_mapping(struct panfrost_gem_mapping *mapping) in panfrost_gem_teardown_mapping() 113 if (mapping->active) in panfrost_gem_teardown_mapping() 114 panfrost_mmu_unmap(mapping); in panfrost_gem_teardown_mapping() 116 spin_lock(&mapping->mmu->mm_lock); in panfrost_gem_teardown_mapping() 117 if (drm_mm_node_allocated(&mapping->mmnode)) in panfrost_gem_teardown_mapping() 118 drm_mm_remove_node(&mapping in panfrost_gem_teardown_mapping() 94 struct panfrost_gem_mapping *iter, *mapping = NULL; panfrost_gem_mapping_get() local 110 panfrost_gem_teardown_mapping(struct panfrost_gem_mapping * mapping) panfrost_gem_teardown_mapping() argument 123 struct panfrost_gem_mapping *mapping; panfrost_gem_mapping_release() local 133 panfrost_gem_mapping_put(struct panfrost_gem_mapping * mapping) panfrost_gem_mapping_put() argument 143 struct panfrost_gem_mapping *mapping; panfrost_gem_teardown_mappings_locked() local 157 struct panfrost_gem_mapping *mapping; panfrost_gem_open() local 208 struct panfrost_gem_mapping *mapping = NULL, *iter; panfrost_gem_close() local [all...] |
| /linux/drivers/gpu/drm/exynos/ |
| H A D | exynos_drm_dma.c | 34 * drm_iommu_attach_device- attach device to iommu mapping 40 * mapping. 57 * Keep the original DMA mapping of the sub-device and in drm_iommu_attach_device() 66 ret = arm_iommu_attach_device(subdrv_dev, priv->mapping); in drm_iommu_attach_device() 68 ret = iommu_attach_device(priv->mapping, subdrv_dev); in drm_iommu_attach_device() 75 * drm_iommu_detach_device -detach device address space mapping from device 81 * mapping 92 iommu_detach_device(priv->mapping, subdrv_dev); in drm_iommu_detach_device() 102 DRM_INFO("Exynos DRM: using %s device for DMA mapping operations\n", in exynos_drm_register_dma() 109 if (!priv->mapping) { in exynos_drm_register_dma() [all …]
|
| /linux/Documentation/filesystems/iomap/ |
| H A D | design.rst | 70 of mapping function calls into the filesystem across a larger amount of 78 1. Obtain a space mapping via ``->iomap_begin`` 82 1. Revalidate the mapping and go back to (1) above, if necessary. 89 4. Release the mapping via ``->iomap_end``, if necessary 130 * **filesystem mapping lock**: This synchronization primitive is 131 internal to the filesystem and must protect the file mapping data 132 from updates while a mapping is being sampled. 138 mapping. 154 The filesystem communicates to the iomap iterator the mapping of 176 bytes, covered by this mapping. [all …]
|
| /linux/fs/gfs2/ |
| H A D | aops.c | 74 struct inode * const inode = folio->mapping->host; in gfs2_write_jdata_folio() 104 struct inode *inode = folio->mapping->host; in __gfs2_jdata_write_folio() 121 * @mapping: The mapping to write 126 int gfs2_jdata_writeback(struct address_space *mapping, struct writeback_control *wbc) in gfs2_jdata_writeback() argument 128 struct inode *inode = mapping->host; in gfs2_jdata_writeback() 130 struct gfs2_sbd *sdp = GFS2_SB(mapping->host); in gfs2_jdata_writeback() 138 while ((folio = writeback_iter(mapping, wbc, folio, &error))) { in gfs2_jdata_writeback() 152 * @mapping: The mapping t 157 gfs2_writepages(struct address_space * mapping,struct writeback_control * wbc) gfs2_writepages() argument 190 gfs2_write_jdata_batch(struct address_space * mapping,struct writeback_control * wbc,struct folio_batch * fbatch,pgoff_t * done_index) gfs2_write_jdata_batch() argument 282 gfs2_write_cache_jdata(struct address_space * mapping,struct writeback_control * wbc) gfs2_write_cache_jdata() argument 361 gfs2_jdata_writepages(struct address_space * mapping,struct writeback_control * wbc) gfs2_jdata_writepages() argument 452 struct address_space *mapping = ip->i_inode.i_mapping; gfs2_internal_read() local 545 gfs2_jdata_dirty_folio(struct address_space * mapping,struct folio * folio) gfs2_jdata_dirty_folio() argument 561 gfs2_bmap(struct address_space * mapping,sector_t lblock) gfs2_bmap() argument 649 struct address_space *mapping = folio->mapping; gfs2_release_folio() local [all...] |
| /linux/tools/testing/selftests/mm/ |
| H A D | mremap_dontunmap.c | 59 "unable to unmap destination mapping"); in kernel_support_for_mremap_dontunmap() 63 "unable to unmap source mapping"); in kernel_support_for_mremap_dontunmap() 67 // This helper will just validate that an entire mapping contains the expected 94 // the source mapping mapped. 106 // Try to just move the whole mapping anywhere (not fixed). in mremap_dontunmap_simple() 122 "unable to unmap destination mapping"); in mremap_dontunmap_simple() 124 "unable to unmap source mapping"); in mremap_dontunmap_simple() 128 // This test validates that MREMAP_DONTUNMAP on a shared mapping works as expected. 148 // Try to just move the whole mapping anywhere (not fixed). in mremap_dontunmap_simple_shmem() 155 "unable to unmap source mapping"); in mremap_dontunmap_simple_shmem() [all …]
|
| /linux/tools/testing/selftests/vfio/ |
| H A D | vfio_dma_mapping_test.c | 40 struct iommu_mapping *mapping) in intel_iommu_mapping_get() argument 68 memset(mapping, 0, sizeof(*mapping)); in intel_iommu_mapping_get() 69 parse_next_value(&rest, &mapping->pgd); in intel_iommu_mapping_get() 70 parse_next_value(&rest, &mapping->p4d); in intel_iommu_mapping_get() 71 parse_next_value(&rest, &mapping->pud); in intel_iommu_mapping_get() 72 parse_next_value(&rest, &mapping->pmd); in intel_iommu_mapping_get() 73 parse_next_value(&rest, &mapping->pte); in intel_iommu_mapping_get() 88 struct iommu_mapping *mapping) in iommu_mapping_get() argument 91 return intel_iommu_mapping_get(bdf, iova, mapping); in iommu_mapping_get() 140 struct iommu_mapping mapping; in TEST_F() local [all …]
|
| /linux/arch/arm64/kvm/ |
| H A D | pkvm.c | 329 struct pkvm_mapping *mapping; in __pkvm_pgtable_stage2_unmap() local 335 for_each_mapping_in_range_safe(pgt, start, end, mapping) { in __pkvm_pgtable_stage2_unmap() 336 ret = kvm_call_hyp_nvhe(__pkvm_host_unshare_guest, handle, mapping->gfn, in __pkvm_pgtable_stage2_unmap() 337 mapping->nr_pages); in __pkvm_pgtable_stage2_unmap() 340 pkvm_mapping_remove(mapping, &pgt->pkvm_mappings); in __pkvm_pgtable_stage2_unmap() 341 kfree(mapping); in __pkvm_pgtable_stage2_unmap() 364 struct pkvm_mapping *mapping = NULL; in pkvm_pgtable_stage2_map() local 380 mapping = pkvm_mapping_iter_first(&pgt->pkvm_mappings, addr, addr + size - 1); in pkvm_pgtable_stage2_map() 381 if (mapping) { in pkvm_pgtable_stage2_map() 382 if (size == (mapping->nr_pages * PAGE_SIZE)) in pkvm_pgtable_stage2_map() [all …]
|
| /linux/drivers/sh/clk/ |
| H A D | core.c | 340 struct clk_mapping *mapping = clk->mapping; in clk_establish_mapping() local 345 if (!mapping) { in clk_establish_mapping() 349 * dummy mapping for root clocks with no specified ranges in clk_establish_mapping() 352 clk->mapping = &dummy_mapping; in clk_establish_mapping() 357 * If we're on a child clock and it provides no mapping of its in clk_establish_mapping() 358 * own, inherit the mapping from its root clock. in clk_establish_mapping() 361 mapping = clkp->mapping; in clk_establish_mapping() 362 BUG_ON(!mapping); in clk_establish_mapping() 366 * Establish initial mapping. in clk_establish_mapping() 368 if (!mapping->base && mapping->phys) { in clk_establish_mapping() [all …]
|
| /linux/fs/ |
| H A D | dax.c | 181 * @entry may no longer be the entry at the index in the mapping. 346 * A DAX folio is considered shared if it has no mapping set and ->share (which 353 return !folio->mapping && folio->share; in dax_folio_is_shared() 359 * previously been associated with any mappings the ->mapping and ->index 360 * fields will be set. If it has already been associated with a mapping 361 * the mapping will be cleared and the share count set. It's then up to 363 * recover ->mapping and ->index information. For example by implementing 370 * folio->mapping. in dax_folio_make_shared() 372 folio->mapping = NULL; in dax_folio_make_shared() 394 folio->mapping = NULL; in dax_folio_put() [all …]
|
| /linux/tools/testing/selftests/namespaces/ |
| H A D | file_handle_test.c | 491 /* Disable setgroups to allow gid mapping */ in TEST() 496 char mapping[64]; in TEST() local 497 snprintf(mapping, sizeof(mapping), "0 %d 1", getuid()); in TEST() 498 write(uid_map_fd, mapping, strlen(mapping)); in TEST() 501 snprintf(mapping, sizeof(mapping), "0 %d 1", getgid()); in TEST() 502 write(gid_map_fd, mapping, strlen(mapping)); in TEST() 620 /* Disable setgroups to allow gid mapping */ in TEST() 625 char mapping[64]; in TEST() local 626 snprintf(mapping, sizeof(mapping), "0 %d 1", getuid()); in TEST() 627 write(uid_map_fd, mapping, strlen(mapping)); in TEST() [all …]
|
| /linux/Documentation/mm/ |
| H A D | highmem.rst | 18 The part of (physical) memory not covered by a permanent mapping is what we 64 These mappings are thread-local and CPU-local, meaning that the mapping 66 CPU while the mapping is active. Although preemption is never disabled by 68 CPU-hotplug until the mapping is disposed. 71 in which the local mapping is acquired does not allow it for other reasons. 82 virtual address of the direct mapping. Only real highmem pages are 95 therefore try to design their code to avoid the use of kmap() by mapping 107 NOTE: Conversions to kmap_local_page() must take care to follow the mapping 116 This permits a very short duration mapping of a single page. Since the 117 mapping is restricted to the CPU that issued it, it performs well, but [all …]
|