Lines Matching +full:user +full:- +full:visible
1 /* SPDX-License-Identifier: GPL-2.0-only */
5 * Copyright (C) 1999-2002 Russell King
12 #include <asm/glue-cache.h>
17 #define CACHE_COLOUR(vaddr) ((vaddr & (SHMLBA - 1)) >> PAGE_SHIFT)
21 * and does not require cleaning before returning it to the user.
29 * The arch/arm/mm/cache-*.S and arch/arm/mm/proc-*.S files
35 * See Documentation/core-api/cachetlb.rst for more information.
37 * effects are cache-type (VIVT/VIPT/PIPT) specific.
42 * Currently only needed for cache-v6.S and cache-v7.S, see
52 * inner shareable and invalidate the I-cache.
58 * Clean and invalidate all user space cache entries
65 * - start - user start address (inclusive, page aligned)
66 * - end - user end address (exclusive, page aligned)
67 * - flags - vma->vm_flags field
72 * region described by start, end. If you have non-snooping
74 * - start - virtual start address
75 * - end - virtual end address
80 * region described by start, end. If you have non-snooping
82 * - start - virtual start address
83 * - end - virtual end address
88 * - kaddr - page address
89 * - size - region size
97 * - start - virtual start address
98 * - end - virtual end address
135 * These are private to the dma-mapping API. Do not use directly.
137 * is visible to DMA, or data written by DMA to system memory is
138 * visible to the CPU.
154 * These are private to the dma-mapping API. Do not use directly.
156 * is visible to DMA, or data written by DMA to system memory is
157 * visible to the CPU.
164 * Copy user data from/to a page which is mapped into a different
165 * processes address space. Really, we want to allow our "user
179 /* Invalidate I-cache */
184 /* Invalidate I-cache inner shareable */
227 struct mm_struct *mm = vma->vm_mm; in vivt_flush_cache_range()
231 vma->vm_flags); in vivt_flush_cache_range()
237 struct mm_struct *mm = vma->vm_mm; in vivt_flush_cache_pages()
242 vma->vm_flags); in vivt_flush_cache_pages()
265 * Harvard caches are synchronised for the user space address range.
284 * cache page at virtual address page->virtual.
291 * about to change to user space. This is the same method as used on SPARC64.
292 * See update_mmu_cache for the user space part.
321 #define flush_dcache_mmap_lock(mapping) xa_lock_irq(&mapping->i_pages)
322 #define flush_dcache_mmap_unlock(mapping) xa_unlock_irq(&mapping->i_pages)
326 * vmalloc, ioremap etc) in kernel space for pages. On non-VIPT
327 * caches, since the direct-mappings of these pages may contain cached
366 * adjacent non-cached writer, each state variable must be located to
385 * Ensure preceding writes to *p by this CPU are visible to
397 * Ensure preceding writes to *p by other CPUs are visible to
430 * - Clear the SCTLR.C bit to prevent further cache allocations
431 * - Flush the desired level of cache
432 * - Clear the ACTLR "SMP" bit to disable local coherency
437 * WARNING -- After this has been called:
439 * - No ldrex/strex (and similar) instructions must be used.
440 * - The CPU is obviously no longer coherent with the other CPUs.
441 * - This is unlikely to work as expected if Linux is running non-secure.
445 * - This is known to apply to several ARMv7 processor implementations,
448 * - The clobber list is dictated by the call to v7_flush_dcache_*.
452 ".arch armv7-a \n\t" \