cacheflush.h (4b4193256c8d3bc3a5397b5cd9494c2ad386317d) cacheflush.h (f358afc52c3066f4e8cd7b3a2d75b31e822519e9)
1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * arch/arm/include/asm/cacheflush.h
4 *
5 * Copyright (C) 1999-2002 Russell King
6 */
7#ifndef _ASMARM_CACHEFLUSH_H
8#define _ASMARM_CACHEFLUSH_H

--- 277 unchanged lines hidden (view full) ---

286 *
287 * Otherwise we can defer the operation, and clean the cache when we are
288 * about to change to user space. This is the same method as used on SPARC64.
289 * See update_mmu_cache for the user space part.
290 */
291#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
292extern void flush_dcache_page(struct page *);
293
1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * arch/arm/include/asm/cacheflush.h
4 *
5 * Copyright (C) 1999-2002 Russell King
6 */
7#ifndef _ASMARM_CACHEFLUSH_H
8#define _ASMARM_CACHEFLUSH_H

--- 277 unchanged lines hidden (view full) ---

286 *
287 * Otherwise we can defer the operation, and clean the cache when we are
288 * about to change to user space. This is the same method as used on SPARC64.
289 * See update_mmu_cache for the user space part.
290 */
291#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
292extern void flush_dcache_page(struct page *);
293
294#define ARCH_IMPLEMENTS_FLUSH_KERNEL_VMAP_RANGE 1
294static inline void flush_kernel_vmap_range(void *addr, int size)
295{
296 if ((cache_is_vivt() || cache_is_vipt_aliasing()))
297 __cpuc_flush_dcache_area(addr, (size_t)size);
298}
299static inline void invalidate_kernel_vmap_range(void *addr, int size)
300{
301 if ((cache_is_vivt() || cache_is_vipt_aliasing()))

--- 5 unchanged lines hidden (view full) ---

307 struct page *page, unsigned long vmaddr)
308{
309 extern void __flush_anon_page(struct vm_area_struct *vma,
310 struct page *, unsigned long);
311 if (PageAnon(page))
312 __flush_anon_page(vma, page, vmaddr);
313}
314
295static inline void flush_kernel_vmap_range(void *addr, int size)
296{
297 if ((cache_is_vivt() || cache_is_vipt_aliasing()))
298 __cpuc_flush_dcache_area(addr, (size_t)size);
299}
300static inline void invalidate_kernel_vmap_range(void *addr, int size)
301{
302 if ((cache_is_vivt() || cache_is_vipt_aliasing()))

--- 5 unchanged lines hidden (view full) ---

308 struct page *page, unsigned long vmaddr)
309{
310 extern void __flush_anon_page(struct vm_area_struct *vma,
311 struct page *, unsigned long);
312 if (PageAnon(page))
313 __flush_anon_page(vma, page, vmaddr);
314}
315
315#define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
316extern void flush_kernel_dcache_page(struct page *);
317
318#define flush_dcache_mmap_lock(mapping) xa_lock_irq(&mapping->i_pages)
319#define flush_dcache_mmap_unlock(mapping) xa_unlock_irq(&mapping->i_pages)
320
321/*
322 * We don't appear to need to do anything here. In fact, if we did, we'd
323 * duplicate cache flushing elsewhere performed by flush_dcache_page().
324 */
325#define flush_icache_page(vma,page) do { } while (0)

--- 158 unchanged lines hidden ---
316#define flush_dcache_mmap_lock(mapping) xa_lock_irq(&mapping->i_pages)
317#define flush_dcache_mmap_unlock(mapping) xa_unlock_irq(&mapping->i_pages)
318
319/*
320 * We don't appear to need to do anything here. In fact, if we did, we'd
321 * duplicate cache flushing elsewhere performed by flush_dcache_page().
322 */
323#define flush_icache_page(vma,page) do { } while (0)

--- 158 unchanged lines hidden ---