xref: /freebsd/sys/arm/include/cpufunc.h (revision 3468ddce672350a6d974b4f0fdf3f4a56eaab0a0)
1 /*	$NetBSD: cpufunc.h,v 1.29 2003/09/06 09:08:35 rearnsha Exp $	*/
2 
3 /*-
4  * SPDX-License-Identifier: BSD-4-Clause
5  *
6  * Copyright (c) 1997 Mark Brinicombe.
7  * Copyright (c) 1997 Causality Limited
8  * All rights reserved.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *	This product includes software developed by Causality Limited.
21  * 4. The name of Causality Limited may not be used to endorse or promote
22  *    products derived from this software without specific prior written
23  *    permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS
26  * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
27  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
28  * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT,
29  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
30  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
31  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35  * SUCH DAMAGE.
36  *
37  * RiscBSD kernel project
38  *
39  * cpufunc.h
40  *
41  * Prototypes for cpu, mmu and tlb related functions.
42  *
43  * $FreeBSD$
44  */
45 
46 #ifndef _MACHINE_CPUFUNC_H_
47 #define _MACHINE_CPUFUNC_H_
48 
49 #ifdef _KERNEL
50 
51 #include <sys/types.h>
52 #include <machine/armreg.h>
53 
54 static __inline void
55 breakpoint(void)
56 {
57 	__asm("udf        0xffff");
58 }
59 
60 struct cpu_functions {
61 
62 	/* CPU functions */
63 #if __ARM_ARCH < 6
64 	void	(*cf_cpwait)		(void);
65 
66 	/* MMU functions */
67 
68 	u_int	(*cf_control)		(u_int bic, u_int eor);
69 	void	(*cf_setttb)		(u_int ttb);
70 
71 	/* TLB functions */
72 
73 	void	(*cf_tlb_flushID)	(void);
74 	void	(*cf_tlb_flushID_SE)	(u_int va);
75 	void	(*cf_tlb_flushD)	(void);
76 	void	(*cf_tlb_flushD_SE)	(u_int va);
77 
78 	/*
79 	 * Cache operations:
80 	 *
81 	 * We define the following primitives:
82 	 *
83 	 *	icache_sync_range	Synchronize I-cache range
84 	 *
85 	 *	dcache_wbinv_all	Write-back and Invalidate D-cache
86 	 *	dcache_wbinv_range	Write-back and Invalidate D-cache range
87 	 *	dcache_inv_range	Invalidate D-cache range
88 	 *	dcache_wb_range		Write-back D-cache range
89 	 *
90 	 *	idcache_wbinv_all	Write-back and Invalidate D-cache,
91 	 *				Invalidate I-cache
92 	 *	idcache_wbinv_range	Write-back and Invalidate D-cache,
93 	 *				Invalidate I-cache range
94 	 *
95 	 * Note that the ARM term for "write-back" is "clean".  We use
96 	 * the term "write-back" since it's a more common way to describe
97 	 * the operation.
98 	 *
99 	 * There are some rules that must be followed:
100 	 *
101 	 *	ID-cache Invalidate All:
102 	 *		Unlike other functions, this one must never write back.
103 	 *		It is used to intialize the MMU when it is in an unknown
104 	 *		state (such as when it may have lines tagged as valid
105 	 *		that belong to a previous set of mappings).
106 	 *
107 	 *	I-cache Sync range:
108 	 *		The goal is to synchronize the instruction stream,
109 	 *		so you may beed to write-back dirty D-cache blocks
110 	 *		first.  If a range is requested, and you can't
111 	 *		synchronize just a range, you have to hit the whole
112 	 *		thing.
113 	 *
114 	 *	D-cache Write-Back and Invalidate range:
115 	 *		If you can't WB-Inv a range, you must WB-Inv the
116 	 *		entire D-cache.
117 	 *
118 	 *	D-cache Invalidate:
119 	 *		If you can't Inv the D-cache, you must Write-Back
120 	 *		and Invalidate.  Code that uses this operation
121 	 *		MUST NOT assume that the D-cache will not be written
122 	 *		back to memory.
123 	 *
124 	 *	D-cache Write-Back:
125 	 *		If you can't Write-back without doing an Inv,
126 	 *		that's fine.  Then treat this as a WB-Inv.
127 	 *		Skipping the invalidate is merely an optimization.
128 	 *
129 	 *	All operations:
130 	 *		Valid virtual addresses must be passed to each
131 	 *		cache operation.
132 	 */
133 	void	(*cf_icache_sync_range)	(vm_offset_t, vm_size_t);
134 
135 	void	(*cf_dcache_wbinv_all)	(void);
136 	void	(*cf_dcache_wbinv_range) (vm_offset_t, vm_size_t);
137 	void	(*cf_dcache_inv_range)	(vm_offset_t, vm_size_t);
138 	void	(*cf_dcache_wb_range)	(vm_offset_t, vm_size_t);
139 
140 	void	(*cf_idcache_inv_all)	(void);
141 	void	(*cf_idcache_wbinv_all)	(void);
142 	void	(*cf_idcache_wbinv_range) (vm_offset_t, vm_size_t);
143 #endif
144 	void	(*cf_l2cache_wbinv_all) (void);
145 	void	(*cf_l2cache_wbinv_range) (vm_offset_t, vm_size_t);
146 	void	(*cf_l2cache_inv_range)	  (vm_offset_t, vm_size_t);
147 	void	(*cf_l2cache_wb_range)	  (vm_offset_t, vm_size_t);
148 	void	(*cf_l2cache_drain_writebuf)	  (void);
149 
150 	/* Other functions */
151 
152 #if __ARM_ARCH < 6
153 	void	(*cf_drain_writebuf)	(void);
154 #endif
155 
156 	void	(*cf_sleep)		(int mode);
157 
158 #if __ARM_ARCH < 6
159 	/* Soft functions */
160 
161 	void	(*cf_context_switch)	(void);
162 #endif
163 
164 	void	(*cf_setup)		(void);
165 };
166 
167 extern struct cpu_functions cpufuncs;
168 extern u_int cputype;
169 
170 #if __ARM_ARCH < 6
171 #define	cpu_cpwait()		cpufuncs.cf_cpwait()
172 
173 #define cpu_control(c, e)	cpufuncs.cf_control(c, e)
174 #define cpu_setttb(t)		cpufuncs.cf_setttb(t)
175 
176 #define	cpu_tlb_flushID()	cpufuncs.cf_tlb_flushID()
177 #define	cpu_tlb_flushID_SE(e)	cpufuncs.cf_tlb_flushID_SE(e)
178 #define	cpu_tlb_flushD()	cpufuncs.cf_tlb_flushD()
179 #define	cpu_tlb_flushD_SE(e)	cpufuncs.cf_tlb_flushD_SE(e)
180 
181 #define	cpu_icache_sync_range(a, s) cpufuncs.cf_icache_sync_range((a), (s))
182 
183 #define	cpu_dcache_wbinv_all()	cpufuncs.cf_dcache_wbinv_all()
184 #define	cpu_dcache_wbinv_range(a, s) cpufuncs.cf_dcache_wbinv_range((a), (s))
185 #define	cpu_dcache_inv_range(a, s) cpufuncs.cf_dcache_inv_range((a), (s))
186 #define	cpu_dcache_wb_range(a, s) cpufuncs.cf_dcache_wb_range((a), (s))
187 
188 #define	cpu_idcache_inv_all()	cpufuncs.cf_idcache_inv_all()
189 #define	cpu_idcache_wbinv_all()	cpufuncs.cf_idcache_wbinv_all()
190 #define	cpu_idcache_wbinv_range(a, s) cpufuncs.cf_idcache_wbinv_range((a), (s))
191 #endif
192 
193 #define cpu_l2cache_wbinv_all()	cpufuncs.cf_l2cache_wbinv_all()
194 #define cpu_l2cache_wb_range(a, s) cpufuncs.cf_l2cache_wb_range((a), (s))
195 #define cpu_l2cache_inv_range(a, s) cpufuncs.cf_l2cache_inv_range((a), (s))
196 #define cpu_l2cache_wbinv_range(a, s) cpufuncs.cf_l2cache_wbinv_range((a), (s))
197 #define cpu_l2cache_drain_writebuf() cpufuncs.cf_l2cache_drain_writebuf()
198 
199 #if __ARM_ARCH < 6
200 #define	cpu_drain_writebuf()	cpufuncs.cf_drain_writebuf()
201 #endif
202 #define cpu_sleep(m)		cpufuncs.cf_sleep(m)
203 
204 #define cpu_setup()			cpufuncs.cf_setup()
205 
206 int	set_cpufuncs		(void);
207 #define ARCHITECTURE_NOT_PRESENT	1	/* known but not configured */
208 #define ARCHITECTURE_NOT_SUPPORTED	2	/* not known */
209 
210 void	cpufunc_nullop		(void);
211 u_int	cpu_ident		(void);
212 u_int	cpufunc_control		(u_int clear, u_int bic);
213 void	cpu_domains		(u_int domains);
214 u_int	cpu_faultstatus		(void);
215 u_int	cpu_faultaddress	(void);
216 u_int	cpu_get_control		(void);
217 u_int	cpu_pfr			(int);
218 
219 #if defined(CPU_FA526)
220 void	fa526_setup		(void);
221 void	fa526_setttb		(u_int ttb);
222 void	fa526_context_switch	(void);
223 void	fa526_cpu_sleep		(int);
224 void	fa526_tlb_flushID_SE	(u_int);
225 
226 void	fa526_icache_sync_range(vm_offset_t start, vm_size_t end);
227 void	fa526_dcache_wbinv_all	(void);
228 void	fa526_dcache_wbinv_range(vm_offset_t start, vm_size_t end);
229 void	fa526_dcache_inv_range	(vm_offset_t start, vm_size_t end);
230 void	fa526_dcache_wb_range	(vm_offset_t start, vm_size_t end);
231 void	fa526_idcache_wbinv_all(void);
232 void	fa526_idcache_wbinv_range(vm_offset_t start, vm_size_t end);
233 #endif
234 
235 
236 #if defined(CPU_ARM9) || defined(CPU_ARM9E)
237 void	arm9_setttb		(u_int);
238 void	arm9_tlb_flushID_SE	(u_int va);
239 void	arm9_context_switch	(void);
240 #endif
241 
242 #if defined(CPU_ARM9)
243 void	arm9_icache_sync_range	(vm_offset_t, vm_size_t);
244 
245 void	arm9_dcache_wbinv_all	(void);
246 void	arm9_dcache_wbinv_range (vm_offset_t, vm_size_t);
247 void	arm9_dcache_inv_range	(vm_offset_t, vm_size_t);
248 void	arm9_dcache_wb_range	(vm_offset_t, vm_size_t);
249 
250 void	arm9_idcache_wbinv_all	(void);
251 void	arm9_idcache_wbinv_range (vm_offset_t, vm_size_t);
252 
253 void	arm9_setup		(void);
254 
255 extern unsigned arm9_dcache_sets_max;
256 extern unsigned arm9_dcache_sets_inc;
257 extern unsigned arm9_dcache_index_max;
258 extern unsigned arm9_dcache_index_inc;
259 #endif
260 
261 #if defined(CPU_ARM9E)
262 void	arm10_setup		(void);
263 
264 u_int	sheeva_control_ext 		(u_int, u_int);
265 void	sheeva_cpu_sleep		(int);
266 void	sheeva_setttb			(u_int);
267 void	sheeva_dcache_wbinv_range	(vm_offset_t, vm_size_t);
268 void	sheeva_dcache_inv_range		(vm_offset_t, vm_size_t);
269 void	sheeva_dcache_wb_range		(vm_offset_t, vm_size_t);
270 void	sheeva_idcache_wbinv_range	(vm_offset_t, vm_size_t);
271 
272 void	sheeva_l2cache_wbinv_range	(vm_offset_t, vm_size_t);
273 void	sheeva_l2cache_inv_range	(vm_offset_t, vm_size_t);
274 void	sheeva_l2cache_wb_range		(vm_offset_t, vm_size_t);
275 void	sheeva_l2cache_wbinv_all	(void);
276 #endif
277 
278 #if defined(CPU_MV_PJ4B)
279 void	armv6_idcache_wbinv_all		(void);
280 #endif
281 #if defined(CPU_CORTEXA) || defined(CPU_MV_PJ4B) || defined(CPU_KRAIT)
282 void	armv7_idcache_wbinv_all		(void);
283 void	armv7_cpu_sleep			(int);
284 void	armv7_setup			(void);
285 void	armv7_drain_writebuf		(void);
286 
287 void 	cortexa_setup			(void);
288 #endif
289 #if defined(CPU_MV_PJ4B)
290 void	pj4b_config			(void);
291 void	pj4bv7_setup			(void);
292 #endif
293 
294 #if defined(CPU_ARM1176)
295 void	arm11_drain_writebuf	(void);
296 
297 void    arm11x6_setup                   (void);
298 void    arm11x6_sleep                   (int);  /* no ref. for errata */
299 #endif
300 
301 #if defined(CPU_ARM9E)
302 void	armv5_ec_setttb(u_int);
303 
304 void	armv5_ec_icache_sync_range(vm_offset_t, vm_size_t);
305 
306 void	armv5_ec_dcache_wbinv_all(void);
307 void	armv5_ec_dcache_wbinv_range(vm_offset_t, vm_size_t);
308 void	armv5_ec_dcache_inv_range(vm_offset_t, vm_size_t);
309 void	armv5_ec_dcache_wb_range(vm_offset_t, vm_size_t);
310 
311 void	armv5_ec_idcache_wbinv_all(void);
312 void	armv5_ec_idcache_wbinv_range(vm_offset_t, vm_size_t);
313 #endif
314 
315 #if defined(CPU_ARM9) || defined(CPU_ARM9E) ||				\
316   defined(CPU_FA526) ||							\
317   defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_81342)
318 
319 void	armv4_tlb_flushID	(void);
320 void	armv4_tlb_flushD	(void);
321 void	armv4_tlb_flushD_SE	(u_int va);
322 
323 void	armv4_drain_writebuf	(void);
324 void	armv4_idcache_inv_all	(void);
325 #endif
326 
327 #if defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_81342)
328 void	xscale_cpwait		(void);
329 
330 void	xscale_cpu_sleep	(int mode);
331 
332 u_int	xscale_control		(u_int clear, u_int bic);
333 
334 void	xscale_setttb		(u_int ttb);
335 
336 void	xscale_tlb_flushID_SE	(u_int va);
337 
338 void	xscale_cache_flushID	(void);
339 void	xscale_cache_flushI	(void);
340 void	xscale_cache_flushD	(void);
341 void	xscale_cache_flushD_SE	(u_int entry);
342 
343 void	xscale_cache_cleanID	(void);
344 void	xscale_cache_cleanD	(void);
345 void	xscale_cache_cleanD_E	(u_int entry);
346 
347 void	xscale_cache_clean_minidata (void);
348 
349 void	xscale_cache_purgeID	(void);
350 void	xscale_cache_purgeID_E	(u_int entry);
351 void	xscale_cache_purgeD	(void);
352 void	xscale_cache_purgeD_E	(u_int entry);
353 
354 void	xscale_cache_syncI	(void);
355 void	xscale_cache_cleanID_rng (vm_offset_t start, vm_size_t end);
356 void	xscale_cache_cleanD_rng	(vm_offset_t start, vm_size_t end);
357 void	xscale_cache_purgeID_rng (vm_offset_t start, vm_size_t end);
358 void	xscale_cache_purgeD_rng	(vm_offset_t start, vm_size_t end);
359 void	xscale_cache_syncI_rng	(vm_offset_t start, vm_size_t end);
360 void	xscale_cache_flushD_rng	(vm_offset_t start, vm_size_t end);
361 
362 void	xscale_context_switch	(void);
363 
364 void	xscale_setup		(void);
365 #endif	/* CPU_XSCALE_PXA2X0 */
366 
367 #ifdef	CPU_XSCALE_81342
368 
369 void	xscalec3_l2cache_purge	(void);
370 void	xscalec3_cache_purgeID	(void);
371 void	xscalec3_cache_purgeD	(void);
372 void	xscalec3_cache_cleanID	(void);
373 void	xscalec3_cache_cleanD	(void);
374 void	xscalec3_cache_syncI	(void);
375 
376 void	xscalec3_cache_purgeID_rng 	(vm_offset_t start, vm_size_t end);
377 void	xscalec3_cache_purgeD_rng	(vm_offset_t start, vm_size_t end);
378 void	xscalec3_cache_cleanID_rng	(vm_offset_t start, vm_size_t end);
379 void	xscalec3_cache_cleanD_rng	(vm_offset_t start, vm_size_t end);
380 void	xscalec3_cache_syncI_rng	(vm_offset_t start, vm_size_t end);
381 
382 void	xscalec3_l2cache_flush_rng	(vm_offset_t, vm_size_t);
383 void	xscalec3_l2cache_clean_rng	(vm_offset_t start, vm_size_t end);
384 void	xscalec3_l2cache_purge_rng	(vm_offset_t start, vm_size_t end);
385 
386 
387 void	xscalec3_setttb		(u_int ttb);
388 void	xscalec3_context_switch	(void);
389 
390 #endif /* CPU_XSCALE_81342 */
391 
392 /*
393  * Macros for manipulating CPU interrupts
394  */
395 #if __ARM_ARCH < 6
396 #define	__ARM_INTR_BITS		(PSR_I | PSR_F)
397 #else
398 #define	__ARM_INTR_BITS		(PSR_I | PSR_F | PSR_A)
399 #endif
400 
401 static __inline uint32_t
402 __set_cpsr(uint32_t bic, uint32_t eor)
403 {
404 	uint32_t	tmp, ret;
405 
406 	__asm __volatile(
407 		"mrs     %0, cpsr\n"		/* Get the CPSR */
408 		"bic	 %1, %0, %2\n"		/* Clear bits */
409 		"eor	 %1, %1, %3\n"		/* XOR bits */
410 		"msr     cpsr_xc, %1\n"		/* Set the CPSR */
411 	: "=&r" (ret), "=&r" (tmp)
412 	: "r" (bic), "r" (eor) : "memory");
413 
414 	return ret;
415 }
416 
417 static __inline uint32_t
418 disable_interrupts(uint32_t mask)
419 {
420 
421 	return (__set_cpsr(mask & __ARM_INTR_BITS, mask & __ARM_INTR_BITS));
422 }
423 
424 static __inline uint32_t
425 enable_interrupts(uint32_t mask)
426 {
427 
428 	return (__set_cpsr(mask & __ARM_INTR_BITS, 0));
429 }
430 
431 static __inline uint32_t
432 restore_interrupts(uint32_t old_cpsr)
433 {
434 
435 	return (__set_cpsr(__ARM_INTR_BITS, old_cpsr & __ARM_INTR_BITS));
436 }
437 
438 static __inline register_t
439 intr_disable(void)
440 {
441 
442 	return (disable_interrupts(PSR_I | PSR_F));
443 }
444 
445 static __inline void
446 intr_restore(register_t s)
447 {
448 
449 	restore_interrupts(s);
450 }
451 #undef __ARM_INTR_BITS
452 
453 /*
454  * Functions to manipulate cpu r13
455  * (in arm/arm32/setstack.S)
456  */
457 
458 void set_stackptr	(u_int mode, u_int address);
459 u_int get_stackptr	(u_int mode);
460 
461 /*
462  * Miscellany
463  */
464 
465 int get_pc_str_offset	(void);
466 
467 /*
468  * CPU functions from locore.S
469  */
470 
471 void cpu_reset		(void) __attribute__((__noreturn__));
472 
473 /*
474  * Cache info variables.
475  */
476 
477 /* PRIMARY CACHE VARIABLES */
478 extern int	arm_picache_size;
479 extern int	arm_picache_line_size;
480 extern int	arm_picache_ways;
481 
482 extern int	arm_pdcache_size;	/* and unified */
483 extern int	arm_pdcache_line_size;
484 extern int	arm_pdcache_ways;
485 
486 extern int	arm_pcache_type;
487 extern int	arm_pcache_unified;
488 
489 extern int	arm_dcache_align;
490 extern int	arm_dcache_align_mask;
491 
492 extern u_int	arm_cache_level;
493 extern u_int	arm_cache_loc;
494 extern u_int	arm_cache_type[14];
495 
496 #else	/* !_KERNEL */
497 
498 static __inline void
499 breakpoint(void)
500 {
501 
502 	/*
503 	 * This matches the instruction used by GDB for software
504 	 * breakpoints.
505 	 */
506 	__asm("udf        0xfdee");
507 }
508 
509 #endif	/* _KERNEL */
510 #endif	/* _MACHINE_CPUFUNC_H_ */
511 
512 /* End of cpufunc.h */
513