1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
4 */
5 #ifndef _ASM_CACHEFLUSH_H
6 #define _ASM_CACHEFLUSH_H
7
8 #include <linux/mm.h>
9 #include <asm/cpu-info.h>
10 #include <asm/cacheops.h>
11
cache_present(struct cache_desc * cdesc)12 static inline bool cache_present(struct cache_desc *cdesc)
13 {
14 return cdesc->flags & CACHE_PRESENT;
15 }
16
cache_private(struct cache_desc * cdesc)17 static inline bool cache_private(struct cache_desc *cdesc)
18 {
19 return cdesc->flags & CACHE_PRIVATE;
20 }
21
cache_inclusive(struct cache_desc * cdesc)22 static inline bool cache_inclusive(struct cache_desc *cdesc)
23 {
24 return cdesc->flags & CACHE_INCLUSIVE;
25 }
26
cpu_last_level_cache_line_size(void)27 static inline unsigned int cpu_last_level_cache_line_size(void)
28 {
29 int cache_present = boot_cpu_data.cache_leaves_present;
30
31 return boot_cpu_data.cache_leaves[cache_present - 1].linesz;
32 }
33
34 asmlinkage void __flush_cache_all(void);
35
36 /*
37 * LoongArch maintains ICache/DCache coherency by hardware,
38 * we just need "ibar" to avoid instruction hazard here.
39 */
local_flush_icache_all(void)40 static inline void local_flush_icache_all(void)
41 {
42 asm volatile ("ibar\t0\n"::);
43 }
44
local_flush_icache_range(unsigned long start,unsigned long end)45 static inline void local_flush_icache_range(unsigned long start, unsigned long end)
46 {
47 asm volatile ("ibar\t0\n"::);
48 }
49
50 #define flush_icache_all local_flush_icache_all
51 #define flush_icache_range local_flush_icache_range
52 #define flush_icache_user_range local_flush_icache_range
53
54 #define flush_cache_all() do { } while (0)
55 #define flush_cache_mm(mm) do { } while (0)
56 #define flush_cache_dup_mm(mm) do { } while (0)
57 #define flush_cache_range(vma, start, end) do { } while (0)
58 #define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
59 #define flush_cache_vmap(start, end) do { } while (0)
60 #define flush_cache_vunmap(start, end) do { } while (0)
61 #define flush_icache_user_page(vma, page, addr, len) do { } while (0)
62 #define flush_dcache_mmap_lock(mapping) do { } while (0)
63 #define flush_dcache_mmap_unlock(mapping) do { } while (0)
64
65 #define cache_op(op, addr) \
66 __asm__ __volatile__( \
67 " cacop %0, %1 \n" \
68 : \
69 : "i" (op), "ZC" (*(unsigned char *)(addr)))
70
flush_cache_line(int leaf,unsigned long addr)71 static inline void flush_cache_line(int leaf, unsigned long addr)
72 {
73 switch (leaf) {
74 case Cache_LEAF0:
75 cache_op(Index_Writeback_Inv_LEAF0, addr);
76 break;
77 case Cache_LEAF1:
78 cache_op(Index_Writeback_Inv_LEAF1, addr);
79 break;
80 case Cache_LEAF2:
81 cache_op(Index_Writeback_Inv_LEAF2, addr);
82 break;
83 case Cache_LEAF3:
84 cache_op(Index_Writeback_Inv_LEAF3, addr);
85 break;
86 case Cache_LEAF4:
87 cache_op(Index_Writeback_Inv_LEAF4, addr);
88 break;
89 case Cache_LEAF5:
90 cache_op(Index_Writeback_Inv_LEAF5, addr);
91 break;
92 default:
93 break;
94 }
95 }
96
97 #include <asm-generic/cacheflush.h>
98
99 #endif /* _ASM_CACHEFLUSH_H */
100