cacheflush.h (597473720f4dc69749542bfcfed4a927a43d935e) cacheflush.h (f358afc52c3066f4e8cd7b3a2d75b31e822519e9)
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1994, 95, 96, 97, 98, 99, 2000, 01, 02, 03 by Ralf Baechle
7 * Copyright (C) 1999, 2000, 2001 Silicon Graphics, Inc.
8 */

--- 111 unchanged lines hidden (view full) ---

120extern void kunmap_coherent(void);
121extern void *kmap_noncoherent(struct page *page, unsigned long addr);
122
123static inline void kunmap_noncoherent(void)
124{
125 kunmap_coherent();
126}
127
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1994, 95, 96, 97, 98, 99, 2000, 01, 02, 03 by Ralf Baechle
7 * Copyright (C) 1999, 2000, 2001 Silicon Graphics, Inc.
8 */

--- 111 unchanged lines hidden (view full) ---

120extern void kunmap_coherent(void);
121extern void *kmap_noncoherent(struct page *page, unsigned long addr);
122
123static inline void kunmap_noncoherent(void)
124{
125 kunmap_coherent();
126}
127
128#define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
129static inline void flush_kernel_dcache_page(struct page *page)
130{
131 BUG_ON(cpu_has_dc_aliases && PageHighMem(page));
132 flush_dcache_page(page);
133}
134
128#define ARCH_IMPLEMENTS_FLUSH_KERNEL_VMAP_RANGE 1
135/*
136 * For now flush_kernel_vmap_range and invalidate_kernel_vmap_range both do a
137 * cache writeback and invalidate operation.
138 */
139extern void (*__flush_kernel_vmap_range)(unsigned long vaddr, int size);
140
141static inline void flush_kernel_vmap_range(void *vaddr, int size)
142{
143 if (cpu_has_dc_aliases)
144 __flush_kernel_vmap_range((unsigned long) vaddr, size);
145}
146
147static inline void invalidate_kernel_vmap_range(void *vaddr, int size)
148{
149 if (cpu_has_dc_aliases)
150 __flush_kernel_vmap_range((unsigned long) vaddr, size);
151}
152
153#endif /* _ASM_CACHEFLUSH_H */
129/*
130 * For now flush_kernel_vmap_range and invalidate_kernel_vmap_range both do a
131 * cache writeback and invalidate operation.
132 */
133extern void (*__flush_kernel_vmap_range)(unsigned long vaddr, int size);
134
135static inline void flush_kernel_vmap_range(void *vaddr, int size)
136{
137 if (cpu_has_dc_aliases)
138 __flush_kernel_vmap_range((unsigned long) vaddr, size);
139}
140
141static inline void invalidate_kernel_vmap_range(void *vaddr, int size)
142{
143 if (cpu_has_dc_aliases)
144 __flush_kernel_vmap_range((unsigned long) vaddr, size);
145}
146
147#endif /* _ASM_CACHEFLUSH_H */