1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2021 Heiko Stuebner <heiko@sntech.de>
4 */
5
6 #include <linux/bug.h>
7 #include <linux/kernel.h>
8 #include <linux/memory.h>
9 #include <linux/module.h>
10 #include <linux/string.h>
11 #include <linux/uaccess.h>
12 #include <asm/alternative.h>
13 #include <asm/cacheflush.h>
14 #include <asm/cpufeature.h>
15 #include <asm/dma-noncoherent.h>
16 #include <asm/errata_list.h>
17 #include <asm/hwprobe.h>
18 #include <asm/io.h>
19 #include <asm/patch.h>
20 #include <asm/vendorid_list.h>
21 #include <asm/vendor_extensions.h>
22
23 #define CSR_TH_SXSTATUS 0x5c0
24 #define SXSTATUS_MAEE _AC(0x200000, UL)
25
errata_probe_mae(unsigned int stage,unsigned long arch_id,unsigned long impid)26 static bool errata_probe_mae(unsigned int stage,
27 unsigned long arch_id, unsigned long impid)
28 {
29 if (!IS_ENABLED(CONFIG_ERRATA_THEAD_MAE))
30 return false;
31
32 if (arch_id != 0 || impid != 0)
33 return false;
34
35 if (stage != RISCV_ALTERNATIVES_EARLY_BOOT &&
36 stage != RISCV_ALTERNATIVES_MODULE)
37 return false;
38
39 if (!(csr_read(CSR_TH_SXSTATUS) & SXSTATUS_MAEE))
40 return false;
41
42 return true;
43 }
44
45 /*
46 * th.dcache.ipa rs1 (invalidate, physical address)
47 * | 31 - 25 | 24 - 20 | 19 - 15 | 14 - 12 | 11 - 7 | 6 - 0 |
48 * 0000001 01010 rs1 000 00000 0001011
49 * th.dcache.iva rs1 (invalidate, virtual address)
50 * 0000001 00110 rs1 000 00000 0001011
51 *
52 * th.dcache.cpa rs1 (clean, physical address)
53 * | 31 - 25 | 24 - 20 | 19 - 15 | 14 - 12 | 11 - 7 | 6 - 0 |
54 * 0000001 01001 rs1 000 00000 0001011
55 * th.dcache.cva rs1 (clean, virtual address)
56 * 0000001 00101 rs1 000 00000 0001011
57 *
58 * th.dcache.cipa rs1 (clean then invalidate, physical address)
59 * | 31 - 25 | 24 - 20 | 19 - 15 | 14 - 12 | 11 - 7 | 6 - 0 |
60 * 0000001 01011 rs1 000 00000 0001011
61 * th.dcache.civa rs1 (clean then invalidate, virtual address)
62 * 0000001 00111 rs1 000 00000 0001011
63 *
64 * th.sync.s (make sure all cache operations finished)
65 * | 31 - 25 | 24 - 20 | 19 - 15 | 14 - 12 | 11 - 7 | 6 - 0 |
66 * 0000000 11001 00000 000 00000 0001011
67 */
68 #define THEAD_INVAL_A0 ".long 0x02a5000b"
69 #define THEAD_CLEAN_A0 ".long 0x0295000b"
70 #define THEAD_FLUSH_A0 ".long 0x02b5000b"
71 #define THEAD_SYNC_S ".long 0x0190000b"
72
73 #define THEAD_CMO_OP(_op, _start, _size, _cachesize) \
74 asm volatile("mv a0, %1\n\t" \
75 "j 2f\n\t" \
76 "3:\n\t" \
77 THEAD_##_op##_A0 "\n\t" \
78 "add a0, a0, %0\n\t" \
79 "2:\n\t" \
80 "bltu a0, %2, 3b\n\t" \
81 THEAD_SYNC_S \
82 : : "r"(_cachesize), \
83 "r"((unsigned long)(_start) & ~((_cachesize) - 1UL)), \
84 "r"((unsigned long)(_start) + (_size)) \
85 : "a0")
86
thead_errata_cache_inv(phys_addr_t paddr,size_t size)87 static void thead_errata_cache_inv(phys_addr_t paddr, size_t size)
88 {
89 THEAD_CMO_OP(INVAL, paddr, size, riscv_cbom_block_size);
90 }
91
thead_errata_cache_wback(phys_addr_t paddr,size_t size)92 static void thead_errata_cache_wback(phys_addr_t paddr, size_t size)
93 {
94 THEAD_CMO_OP(CLEAN, paddr, size, riscv_cbom_block_size);
95 }
96
thead_errata_cache_wback_inv(phys_addr_t paddr,size_t size)97 static void thead_errata_cache_wback_inv(phys_addr_t paddr, size_t size)
98 {
99 THEAD_CMO_OP(FLUSH, paddr, size, riscv_cbom_block_size);
100 }
101
102 static const struct riscv_nonstd_cache_ops thead_errata_cmo_ops = {
103 .wback = &thead_errata_cache_wback,
104 .inv = &thead_errata_cache_inv,
105 .wback_inv = &thead_errata_cache_wback_inv,
106 };
107
errata_probe_cmo(unsigned int stage,unsigned long arch_id,unsigned long impid)108 static bool errata_probe_cmo(unsigned int stage,
109 unsigned long arch_id, unsigned long impid)
110 {
111 if (!IS_ENABLED(CONFIG_ERRATA_THEAD_CMO))
112 return false;
113
114 if (arch_id != 0 || impid != 0)
115 return false;
116
117 if (stage == RISCV_ALTERNATIVES_EARLY_BOOT)
118 return false;
119
120 if (stage == RISCV_ALTERNATIVES_BOOT) {
121 riscv_cbom_block_size = L1_CACHE_BYTES;
122 riscv_noncoherent_supported();
123 riscv_noncoherent_register_cache_ops(&thead_errata_cmo_ops);
124 }
125
126 return true;
127 }
128
errata_probe_pmu(unsigned int stage,unsigned long arch_id,unsigned long impid)129 static bool errata_probe_pmu(unsigned int stage,
130 unsigned long arch_id, unsigned long impid)
131 {
132 if (!IS_ENABLED(CONFIG_ERRATA_THEAD_PMU))
133 return false;
134
135 /* target-c9xx cores report arch_id and impid as 0 */
136 if (arch_id != 0 || impid != 0)
137 return false;
138
139 if (stage == RISCV_ALTERNATIVES_EARLY_BOOT)
140 return false;
141
142 return true;
143 }
144
thead_errata_probe(unsigned int stage,unsigned long archid,unsigned long impid)145 static u32 thead_errata_probe(unsigned int stage,
146 unsigned long archid, unsigned long impid)
147 {
148 u32 cpu_req_errata = 0;
149
150 if (errata_probe_mae(stage, archid, impid))
151 cpu_req_errata |= BIT(ERRATA_THEAD_MAE);
152
153 errata_probe_cmo(stage, archid, impid);
154
155 if (errata_probe_pmu(stage, archid, impid))
156 cpu_req_errata |= BIT(ERRATA_THEAD_PMU);
157
158 return cpu_req_errata;
159 }
160
thead_errata_patch_func(struct alt_entry * begin,struct alt_entry * end,unsigned long archid,unsigned long impid,unsigned int stage)161 void thead_errata_patch_func(struct alt_entry *begin, struct alt_entry *end,
162 unsigned long archid, unsigned long impid,
163 unsigned int stage)
164 {
165 struct alt_entry *alt;
166 u32 cpu_req_errata = thead_errata_probe(stage, archid, impid);
167 u32 tmp;
168 void *oldptr, *altptr;
169
170 BUILD_BUG_ON(ERRATA_THEAD_NUMBER >= RISCV_VENDOR_EXT_ALTERNATIVES_BASE);
171
172 for (alt = begin; alt < end; alt++) {
173 if (alt->vendor_id != THEAD_VENDOR_ID)
174 continue;
175 if (alt->patch_id >= ERRATA_THEAD_NUMBER)
176 continue;
177
178 tmp = (1U << alt->patch_id);
179 if (cpu_req_errata & tmp) {
180 oldptr = ALT_OLD_PTR(alt);
181 altptr = ALT_ALT_PTR(alt);
182
183 /* On vm-alternatives, the mmu isn't running yet */
184 if (stage == RISCV_ALTERNATIVES_EARLY_BOOT) {
185 memcpy(oldptr, altptr, alt->alt_len);
186 } else {
187 mutex_lock(&text_mutex);
188 patch_text_nosync(oldptr, altptr, alt->alt_len);
189 mutex_unlock(&text_mutex);
190 }
191 }
192 }
193
194 if (stage == RISCV_ALTERNATIVES_EARLY_BOOT)
195 local_flush_icache_all();
196 }
197