1 /* $NetBSD: cpufunc.c,v 1.65 2003/11/05 12:53:15 scw Exp $ */
2
3 /*-
4 * SPDX-License-Identifier: BSD-4-Clause
5 *
6 * arm9 support code Copyright (C) 2001 ARM Ltd
7 * Copyright (c) 1997 Mark Brinicombe.
8 * Copyright (c) 1997 Causality Limited
9 * All rights reserved.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by Causality Limited.
22 * 4. The name of Causality Limited may not be used to endorse or promote
23 * products derived from this software without specific prior written
24 * permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS
27 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
28 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
29 * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT,
30 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
31 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
32 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
37 *
38 * RiscBSD kernel project
39 *
40 * cpufuncs.c
41 *
42 * C functions for supporting CPU / MMU / TLB specific operations.
43 *
44 * Created : 30/01/97
45 */
46
47 #include <sys/param.h>
48 #include <sys/systm.h>
49 #include <sys/lock.h>
50 #include <sys/mutex.h>
51 #include <sys/bus.h>
52 #include <machine/bus.h>
53 #include <machine/cpu.h>
54 #include <machine/disassem.h>
55
56 #include <vm/vm.h>
57 #include <vm/pmap.h>
58 #include <vm/uma.h>
59
60 #include <machine/cpufunc.h>
61
62 /* PRIMARY CACHE VARIABLES */
63
64 unsigned int arm_dcache_align;
65 unsigned int arm_dcache_align_mask;
66
67 #ifdef CPU_MV_PJ4B
68 static void pj4bv7_setup(void);
69 #endif
70 #if defined(CPU_CORTEXA) || defined(CPU_KRAIT)
71 static void cortexa_setup(void);
72 #endif
73
74 #ifdef CPU_MV_PJ4B
75 struct cpu_functions pj4bv7_cpufuncs = {
76 /* Cache operations */
77 .cf_l2cache_wbinv_all = (void *)cpufunc_nullop,
78 .cf_l2cache_wbinv_range = (void *)cpufunc_nullop,
79 .cf_l2cache_inv_range = (void *)cpufunc_nullop,
80 .cf_l2cache_wb_range = (void *)cpufunc_nullop,
81 .cf_l2cache_drain_writebuf = (void *)cpufunc_nullop,
82
83 /* Other functions */
84 .cf_sleep = (void *)cpufunc_nullop,
85
86 /* Soft functions */
87 .cf_setup = pj4bv7_setup
88 };
89 #endif /* CPU_MV_PJ4B */
90
91
92 #if defined(CPU_CORTEXA) || defined(CPU_KRAIT)
93 struct cpu_functions cortexa_cpufuncs = {
94 /* Cache operations */
95
96 /*
97 * Note: For CPUs using the PL310 the L2 ops are filled in when the
98 * L2 cache controller is actually enabled.
99 */
100 .cf_l2cache_wbinv_all = cpufunc_nullop,
101 .cf_l2cache_wbinv_range = (void *)cpufunc_nullop,
102 .cf_l2cache_inv_range = (void *)cpufunc_nullop,
103 .cf_l2cache_wb_range = (void *)cpufunc_nullop,
104 .cf_l2cache_drain_writebuf = (void *)cpufunc_nullop,
105
106 /* Other functions */
107 .cf_sleep = armv7_cpu_sleep,
108
109 /* Soft functions */
110 .cf_setup = cortexa_setup
111 };
112 #endif /* CPU_CORTEXA || CPU_KRAIT */
113
114 /*
115 * Global constants also used by locore.s
116 */
117
118 struct cpu_functions cpufuncs;
119 u_int cputype;
120
121 static void get_cachetype_cp15(void);
122
123 static void
get_cachetype_cp15(void)124 get_cachetype_cp15(void)
125 {
126 u_int ctype, dsize, cpuid;
127 u_int clevel, csize, i, sel;
128 u_char type;
129
130 ctype = cp15_ctr_get();
131 cpuid = cp15_midr_get();
132 /*
133 * ...and thus spake the ARM ARM:
134 *
135 * If an <opcode2> value corresponding to an unimplemented or
136 * reserved ID register is encountered, the System Control
137 * processor returns the value of the main ID register.
138 */
139 if (ctype == cpuid)
140 goto out;
141
142 if (CPU_CT_FORMAT(ctype) == CPU_CT_ARMV7) {
143 __asm __volatile("mrc p15, 1, %0, c0, c0, 1"
144 : "=r" (clevel));
145 i = 0;
146 while ((type = (clevel & 0x7)) && i < 7) {
147 if (type == CACHE_DCACHE || type == CACHE_UNI_CACHE ||
148 type == CACHE_SEP_CACHE) {
149 sel = i << 1;
150 __asm __volatile("mcr p15, 2, %0, c0, c0, 0"
151 : : "r" (sel));
152 __asm __volatile("mrc p15, 1, %0, c0, c0, 0"
153 : "=r" (csize));
154 arm_dcache_align = 1U <<
155 (CPUV7_CT_xSIZE_LEN(csize) + 4);
156 }
157 if (type == CACHE_ICACHE || type == CACHE_SEP_CACHE) {
158 sel = (i << 1) | 1;
159 __asm __volatile("mcr p15, 2, %0, c0, c0, 0"
160 : : "r" (sel));
161 __asm __volatile("mrc p15, 1, %0, c0, c0, 0"
162 : "=r" (csize));
163 }
164 i++;
165 clevel >>= 3;
166 }
167 } else {
168 /*
169 * If you want to know how this code works, go read the ARM ARM.
170 */
171
172 dsize = CPU_CT_DSIZE(ctype);
173 arm_dcache_align = 1U << (CPU_CT_xSIZE_LEN(dsize) + 3);
174 if (CPU_CT_xSIZE_ASSOC(dsize) == 0) {
175 if (dsize & CPU_CT_xSIZE_M)
176 arm_dcache_align = 0; /* not present */
177 }
178 }
179
180 out:
181 arm_dcache_align_mask = arm_dcache_align - 1;
182 }
183
184 /*
185 * Cannot panic here as we may not have a console yet ...
186 */
187
188 int
set_cpufuncs(void)189 set_cpufuncs(void)
190 {
191 cputype = cp15_midr_get();
192 cputype &= CPU_ID_CPU_MASK;
193
194 #if defined(CPU_CORTEXA) || defined(CPU_KRAIT)
195 switch(cputype & CPU_ID_SCHEME_MASK) {
196 case CPU_ID_CORTEXA5:
197 case CPU_ID_CORTEXA7:
198 case CPU_ID_CORTEXA8:
199 case CPU_ID_CORTEXA9:
200 case CPU_ID_CORTEXA12:
201 case CPU_ID_CORTEXA15:
202 case CPU_ID_CORTEXA53:
203 case CPU_ID_CORTEXA57:
204 case CPU_ID_CORTEXA72:
205 case CPU_ID_KRAIT300:
206 cpufuncs = cortexa_cpufuncs;
207 get_cachetype_cp15();
208 goto out;
209 default:
210 break;
211 }
212 #endif /* CPU_CORTEXA || CPU_KRAIT */
213
214 #if defined(CPU_MV_PJ4B)
215 if (cputype == CPU_ID_MV88SV581X_V7 ||
216 cputype == CPU_ID_MV88SV584X_V7 ||
217 cputype == CPU_ID_ARM_88SV581X_V7) {
218 cpufuncs = pj4bv7_cpufuncs;
219 get_cachetype_cp15();
220 goto out;
221 }
222 #endif /* CPU_MV_PJ4B */
223
224 /*
225 * Bzzzz. And the answer was ...
226 */
227 panic("No support for this CPU type (%08x) in kernel", cputype);
228 return(ARCHITECTURE_NOT_PRESENT);
229 out:
230 uma_set_cache_align_mask(arm_dcache_align_mask);
231 return (0);
232 }
233
234 /*
235 * CPU Setup code
236 */
237
238
239 #if defined(CPU_MV_PJ4B) || defined(CPU_CORTEXA) || defined(CPU_KRAIT)
240 static __inline void
cpu_scc_setup_ccnt(void)241 cpu_scc_setup_ccnt(void)
242 {
243 /* This is how you give userland access to the CCNT and PMCn
244 * registers.
245 * BEWARE! This gives write access also, which may not be what
246 * you want!
247 */
248 #ifdef _PMC_USER_READ_WRITE_
249 /* Set PMUSERENR[0] to allow userland access */
250 cp15_pmuserenr_set(1);
251 #endif
252 /* Set up the PMCCNTR register as a cyclecounter:
253 * Set PMINTENCLR to 0xFFFFFFFF to block interrupts
254 * Set PMCR[2,0] to enable counters and reset CCNT
255 * Set PMCNTENSET to 0x80000000 to enable CCNT */
256 cp15_pminten_clr(0xFFFFFFFF);
257 cp15_pmcr_set(5);
258 cp15_pmcnten_set(0x80000000);
259 }
260 #endif
261
262
263 #ifdef CPU_MV_PJ4B
264 static void
pj4bv7_setup(void)265 pj4bv7_setup(void)
266 {
267
268 pj4b_config();
269 cpu_scc_setup_ccnt();
270 }
271 #endif /* CPU_MV_PJ4B */
272
273 #if defined(CPU_CORTEXA) || defined(CPU_KRAIT)
274 static void
cortexa_setup(void)275 cortexa_setup(void)
276 {
277
278 cpu_scc_setup_ccnt();
279 }
280 #endif /* CPU_CORTEXA || CPU_KRAIT */
281