167d39872SAndrew Turner /*-
267d39872SAndrew Turner * Copyright 2014 Svatopluk Kraus <onwahe@gmail.com>
367d39872SAndrew Turner * Copyright 2014 Michal Meloun <meloun@miracle.cz>
467d39872SAndrew Turner * All rights reserved.
567d39872SAndrew Turner *
667d39872SAndrew Turner * Redistribution and use in source and binary forms, with or without
767d39872SAndrew Turner * modification, are permitted provided that the following conditions
867d39872SAndrew Turner * are met:
967d39872SAndrew Turner * 1. Redistributions of source code must retain the above copyright
1067d39872SAndrew Turner * notice, this list of conditions and the following disclaimer.
1167d39872SAndrew Turner * 2. Redistributions in binary form must reproduce the above copyright
1267d39872SAndrew Turner * notice, this list of conditions and the following disclaimer in the
1367d39872SAndrew Turner * documentation and/or other materials provided with the distribution.
1467d39872SAndrew Turner *
1567d39872SAndrew Turner * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
1667d39872SAndrew Turner * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
1767d39872SAndrew Turner * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
1867d39872SAndrew Turner * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
1967d39872SAndrew Turner * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
2067d39872SAndrew Turner * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
2167d39872SAndrew Turner * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
2267d39872SAndrew Turner * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
2367d39872SAndrew Turner * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
2467d39872SAndrew Turner * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
2567d39872SAndrew Turner * SUCH DAMAGE.
2667d39872SAndrew Turner */
276fc729afSOlivier Houchard /* $NetBSD: cpu.h,v 1.2 2001/02/23 21:23:52 reinoud Exp $ */
286fc729afSOlivier Houchard
296fc729afSOlivier Houchard #ifndef MACHINE_CPU_H
306fc729afSOlivier Houchard #define MACHINE_CPU_H
316fc729afSOlivier Houchard
326fc729afSOlivier Houchard #include <machine/armreg.h>
3364894120SIan Lepore #include <machine/frame.h>
346fc729afSOlivier Houchard
356fc729afSOlivier Houchard void cpu_halt(void);
366fc729afSOlivier Houchard
37a29cc9a3SAndriy Gapon #ifdef _KERNEL
3867d39872SAndrew Turner #include <machine/atomic.h>
3967d39872SAndrew Turner #include <machine/cpufunc.h>
4067d39872SAndrew Turner #include <machine/cpuinfo.h>
4167d39872SAndrew Turner #include <machine/sysreg.h>
4267d39872SAndrew Turner
4367d39872SAndrew Turner /*
4467d39872SAndrew Turner * Some kernel modules (dtrace all for example) are compiled
4567d39872SAndrew Turner * unconditionally with -DSMP. Although it looks like a bug,
4667d39872SAndrew Turner * handle this case here and in #elif condition in ARM_SMP_UP macro.
4767d39872SAndrew Turner */
4867d39872SAndrew Turner
4967d39872SAndrew Turner
5067d39872SAndrew Turner #if !defined(SMP) && defined(SMP_ON_UP)
5167d39872SAndrew Turner #error SMP option must be defined for SMP_ON_UP option
5267d39872SAndrew Turner #endif
5367d39872SAndrew Turner
5467d39872SAndrew Turner #define CPU_ASID_KERNEL 0
5567d39872SAndrew Turner
5667d39872SAndrew Turner #if defined(SMP_ON_UP)
5767d39872SAndrew Turner #define ARM_SMP_UP(smp_code, up_code) \
5867d39872SAndrew Turner do { \
5967d39872SAndrew Turner if (cpuinfo.mp_ext != 0) { \
6067d39872SAndrew Turner smp_code; \
6167d39872SAndrew Turner } else { \
6267d39872SAndrew Turner up_code; \
6367d39872SAndrew Turner } \
6467d39872SAndrew Turner } while (0)
65*d29771a7SAndrew Turner #elif defined(SMP)
6667d39872SAndrew Turner #define ARM_SMP_UP(smp_code, up_code) \
6767d39872SAndrew Turner do { \
6867d39872SAndrew Turner smp_code; \
6967d39872SAndrew Turner } while (0)
7067d39872SAndrew Turner #else
7167d39872SAndrew Turner #define ARM_SMP_UP(smp_code, up_code) \
7267d39872SAndrew Turner do { \
7367d39872SAndrew Turner up_code; \
7467d39872SAndrew Turner } while (0)
7567d39872SAndrew Turner #endif
7667d39872SAndrew Turner
7767d39872SAndrew Turner void dcache_wbinv_poc_all(void); /* !!! NOT SMP coherent function !!! */
7867d39872SAndrew Turner vm_offset_t dcache_wb_pou_checked(vm_offset_t, vm_size_t);
7967d39872SAndrew Turner vm_offset_t icache_inv_pou_checked(vm_offset_t, vm_size_t);
8067d39872SAndrew Turner
8167d39872SAndrew Turner #ifdef DEV_PMU
8267d39872SAndrew Turner #include <sys/pcpu.h>
8367d39872SAndrew Turner #define PMU_OVSR_C 0x80000000 /* Cycle Counter */
8467d39872SAndrew Turner extern uint32_t ccnt_hi[MAXCPU];
8567d39872SAndrew Turner extern int pmu_attched;
8667d39872SAndrew Turner #endif /* DEV_PMU */
8767d39872SAndrew Turner
8867d39872SAndrew Turner #define sev() __asm __volatile("sev" : : : "memory")
8967d39872SAndrew Turner #define wfe() __asm __volatile("wfe" : : : "memory")
9067d39872SAndrew Turner
9167d39872SAndrew Turner /*
9267d39872SAndrew Turner * Macros to generate CP15 (system control processor) read/write functions.
9367d39872SAndrew Turner */
9467d39872SAndrew Turner #define _FX(s...) #s
9567d39872SAndrew Turner
9667d39872SAndrew Turner #define _RF0(fname, aname...) \
9767d39872SAndrew Turner static __inline uint32_t \
9867d39872SAndrew Turner fname(void) \
9967d39872SAndrew Turner { \
10067d39872SAndrew Turner uint32_t reg; \
10167d39872SAndrew Turner __asm __volatile("mrc\t" _FX(aname): "=r" (reg)); \
10267d39872SAndrew Turner return(reg); \
10367d39872SAndrew Turner }
10467d39872SAndrew Turner
10567d39872SAndrew Turner #define _R64F0(fname, aname) \
10667d39872SAndrew Turner static __inline uint64_t \
10767d39872SAndrew Turner fname(void) \
10867d39872SAndrew Turner { \
10967d39872SAndrew Turner uint64_t reg; \
11067d39872SAndrew Turner __asm __volatile("mrrc\t" _FX(aname): "=r" (reg)); \
11167d39872SAndrew Turner return(reg); \
11267d39872SAndrew Turner }
11367d39872SAndrew Turner
11467d39872SAndrew Turner #define _WF0(fname, aname...) \
11567d39872SAndrew Turner static __inline void \
11667d39872SAndrew Turner fname(void) \
11767d39872SAndrew Turner { \
11867d39872SAndrew Turner __asm __volatile("mcr\t" _FX(aname)); \
11967d39872SAndrew Turner }
12067d39872SAndrew Turner
12167d39872SAndrew Turner #define _WF1(fname, aname...) \
12267d39872SAndrew Turner static __inline void \
12367d39872SAndrew Turner fname(uint32_t reg) \
12467d39872SAndrew Turner { \
12567d39872SAndrew Turner __asm __volatile("mcr\t" _FX(aname):: "r" (reg)); \
12667d39872SAndrew Turner }
12767d39872SAndrew Turner
12867d39872SAndrew Turner #define _W64F1(fname, aname...) \
12967d39872SAndrew Turner static __inline void \
13067d39872SAndrew Turner fname(uint64_t reg) \
13167d39872SAndrew Turner { \
13267d39872SAndrew Turner __asm __volatile("mcrr\t" _FX(aname):: "r" (reg)); \
13367d39872SAndrew Turner }
13467d39872SAndrew Turner
13567d39872SAndrew Turner /*
13667d39872SAndrew Turner * Raw CP15 maintenance operations
13767d39872SAndrew Turner * !!! not for external use !!!
13867d39872SAndrew Turner */
13967d39872SAndrew Turner
14067d39872SAndrew Turner /* TLB */
14167d39872SAndrew Turner
_WF0(_CP15_TLBIALL,CP15_TLBIALL)14267d39872SAndrew Turner _WF0(_CP15_TLBIALL, CP15_TLBIALL) /* Invalidate entire unified TLB */
143*d29771a7SAndrew Turner #if defined(SMP)
14467d39872SAndrew Turner _WF0(_CP15_TLBIALLIS, CP15_TLBIALLIS) /* Invalidate entire unified TLB IS */
14567d39872SAndrew Turner #endif
14667d39872SAndrew Turner _WF1(_CP15_TLBIASID, CP15_TLBIASID(%0)) /* Invalidate unified TLB by ASID */
147*d29771a7SAndrew Turner #if defined(SMP)
14867d39872SAndrew Turner _WF1(_CP15_TLBIASIDIS, CP15_TLBIASIDIS(%0)) /* Invalidate unified TLB by ASID IS */
14967d39872SAndrew Turner #endif
15067d39872SAndrew Turner _WF1(_CP15_TLBIMVAA, CP15_TLBIMVAA(%0)) /* Invalidate unified TLB by MVA, all ASID */
151*d29771a7SAndrew Turner #if defined(SMP)
15267d39872SAndrew Turner _WF1(_CP15_TLBIMVAAIS, CP15_TLBIMVAAIS(%0)) /* Invalidate unified TLB by MVA, all ASID IS */
15367d39872SAndrew Turner #endif
15467d39872SAndrew Turner _WF1(_CP15_TLBIMVA, CP15_TLBIMVA(%0)) /* Invalidate unified TLB by MVA */
15567d39872SAndrew Turner
15667d39872SAndrew Turner _WF1(_CP15_TTB_SET, CP15_TTBR0(%0))
15767d39872SAndrew Turner
15867d39872SAndrew Turner /* Cache and Branch predictor */
15967d39872SAndrew Turner
16067d39872SAndrew Turner _WF0(_CP15_BPIALL, CP15_BPIALL) /* Branch predictor invalidate all */
161*d29771a7SAndrew Turner #if defined(SMP)
16267d39872SAndrew Turner _WF0(_CP15_BPIALLIS, CP15_BPIALLIS) /* Branch predictor invalidate all IS */
16367d39872SAndrew Turner #endif
16467d39872SAndrew Turner _WF1(_CP15_BPIMVA, CP15_BPIMVA(%0)) /* Branch predictor invalidate by MVA */
16567d39872SAndrew Turner _WF1(_CP15_DCCIMVAC, CP15_DCCIMVAC(%0)) /* Data cache clean and invalidate by MVA PoC */
16667d39872SAndrew Turner _WF1(_CP15_DCCISW, CP15_DCCISW(%0)) /* Data cache clean and invalidate by set/way */
16767d39872SAndrew Turner _WF1(_CP15_DCCMVAC, CP15_DCCMVAC(%0)) /* Data cache clean by MVA PoC */
16867d39872SAndrew Turner _WF1(_CP15_DCCMVAU, CP15_DCCMVAU(%0)) /* Data cache clean by MVA PoU */
16967d39872SAndrew Turner _WF1(_CP15_DCCSW, CP15_DCCSW(%0)) /* Data cache clean by set/way */
17067d39872SAndrew Turner _WF1(_CP15_DCIMVAC, CP15_DCIMVAC(%0)) /* Data cache invalidate by MVA PoC */
17167d39872SAndrew Turner _WF1(_CP15_DCISW, CP15_DCISW(%0)) /* Data cache invalidate by set/way */
17267d39872SAndrew Turner _WF0(_CP15_ICIALLU, CP15_ICIALLU) /* Instruction cache invalidate all PoU */
173*d29771a7SAndrew Turner #if defined(SMP)
17467d39872SAndrew Turner _WF0(_CP15_ICIALLUIS, CP15_ICIALLUIS) /* Instruction cache invalidate all PoU IS */
17567d39872SAndrew Turner #endif
17667d39872SAndrew Turner _WF1(_CP15_ICIMVAU, CP15_ICIMVAU(%0)) /* Instruction cache invalidate */
17767d39872SAndrew Turner
17867d39872SAndrew Turner /*
17967d39872SAndrew Turner * Publicly accessible functions
18067d39872SAndrew Turner */
18167d39872SAndrew Turner
18267d39872SAndrew Turner /* CP14 Debug Registers */
18367d39872SAndrew Turner _RF0(cp14_dbgdidr_get, CP14_DBGDIDR(%0))
18467d39872SAndrew Turner _RF0(cp14_dbgprsr_get, CP14_DBGPRSR(%0))
18567d39872SAndrew Turner _RF0(cp14_dbgoslsr_get, CP14_DBGOSLSR(%0))
18667d39872SAndrew Turner _RF0(cp14_dbgosdlr_get, CP14_DBGOSDLR(%0))
18767d39872SAndrew Turner _RF0(cp14_dbgdscrint_get, CP14_DBGDSCRint(%0))
18867d39872SAndrew Turner
18967d39872SAndrew Turner _WF1(cp14_dbgdscr_v6_set, CP14_DBGDSCRext_V6(%0))
19067d39872SAndrew Turner _WF1(cp14_dbgdscr_v7_set, CP14_DBGDSCRext_V7(%0))
19167d39872SAndrew Turner _WF1(cp14_dbgvcr_set, CP14_DBGVCR(%0))
19267d39872SAndrew Turner _WF1(cp14_dbgoslar_set, CP14_DBGOSLAR(%0))
19367d39872SAndrew Turner
19467d39872SAndrew Turner /* Various control registers */
19567d39872SAndrew Turner
19667d39872SAndrew Turner _RF0(cp15_cpacr_get, CP15_CPACR(%0))
19767d39872SAndrew Turner _WF1(cp15_cpacr_set, CP15_CPACR(%0))
19867d39872SAndrew Turner _RF0(cp15_dfsr_get, CP15_DFSR(%0))
19967d39872SAndrew Turner _RF0(cp15_ifsr_get, CP15_IFSR(%0))
20067d39872SAndrew Turner _WF1(cp15_prrr_set, CP15_PRRR(%0))
20167d39872SAndrew Turner _WF1(cp15_nmrr_set, CP15_NMRR(%0))
20267d39872SAndrew Turner _RF0(cp15_ttbr_get, CP15_TTBR0(%0))
20367d39872SAndrew Turner _RF0(cp15_dfar_get, CP15_DFAR(%0))
20467d39872SAndrew Turner _RF0(cp15_ifar_get, CP15_IFAR(%0))
20567d39872SAndrew Turner _RF0(cp15_l2ctlr_get, CP15_L2CTLR(%0))
20667d39872SAndrew Turner _RF0(cp15_actlr_get, CP15_ACTLR(%0))
20767d39872SAndrew Turner _WF1(cp15_actlr_set, CP15_ACTLR(%0))
20867d39872SAndrew Turner _WF1(cp15_ats1cpr_set, CP15_ATS1CPR(%0))
20967d39872SAndrew Turner _WF1(cp15_ats1cpw_set, CP15_ATS1CPW(%0))
21067d39872SAndrew Turner _WF1(cp15_ats1cur_set, CP15_ATS1CUR(%0))
21167d39872SAndrew Turner _WF1(cp15_ats1cuw_set, CP15_ATS1CUW(%0))
21267d39872SAndrew Turner _RF0(cp15_par_get, CP15_PAR(%0))
21367d39872SAndrew Turner _RF0(cp15_sctlr_get, CP15_SCTLR(%0))
21467d39872SAndrew Turner
21567d39872SAndrew Turner /*CPU id registers */
21667d39872SAndrew Turner _RF0(cp15_midr_get, CP15_MIDR(%0))
21767d39872SAndrew Turner _RF0(cp15_ctr_get, CP15_CTR(%0))
21867d39872SAndrew Turner _RF0(cp15_tcmtr_get, CP15_TCMTR(%0))
21967d39872SAndrew Turner _RF0(cp15_tlbtr_get, CP15_TLBTR(%0))
22067d39872SAndrew Turner _RF0(cp15_mpidr_get, CP15_MPIDR(%0))
22167d39872SAndrew Turner _RF0(cp15_revidr_get, CP15_REVIDR(%0))
22267d39872SAndrew Turner _RF0(cp15_ccsidr_get, CP15_CCSIDR(%0))
22367d39872SAndrew Turner _RF0(cp15_clidr_get, CP15_CLIDR(%0))
22467d39872SAndrew Turner _RF0(cp15_aidr_get, CP15_AIDR(%0))
22567d39872SAndrew Turner _WF1(cp15_csselr_set, CP15_CSSELR(%0))
22667d39872SAndrew Turner _RF0(cp15_id_pfr0_get, CP15_ID_PFR0(%0))
22767d39872SAndrew Turner _RF0(cp15_id_pfr1_get, CP15_ID_PFR1(%0))
22867d39872SAndrew Turner _RF0(cp15_id_dfr0_get, CP15_ID_DFR0(%0))
22967d39872SAndrew Turner _RF0(cp15_id_afr0_get, CP15_ID_AFR0(%0))
23067d39872SAndrew Turner _RF0(cp15_id_mmfr0_get, CP15_ID_MMFR0(%0))
23167d39872SAndrew Turner _RF0(cp15_id_mmfr1_get, CP15_ID_MMFR1(%0))
23267d39872SAndrew Turner _RF0(cp15_id_mmfr2_get, CP15_ID_MMFR2(%0))
23367d39872SAndrew Turner _RF0(cp15_id_mmfr3_get, CP15_ID_MMFR3(%0))
23467d39872SAndrew Turner _RF0(cp15_id_isar0_get, CP15_ID_ISAR0(%0))
23567d39872SAndrew Turner _RF0(cp15_id_isar1_get, CP15_ID_ISAR1(%0))
23667d39872SAndrew Turner _RF0(cp15_id_isar2_get, CP15_ID_ISAR2(%0))
23767d39872SAndrew Turner _RF0(cp15_id_isar3_get, CP15_ID_ISAR3(%0))
23867d39872SAndrew Turner _RF0(cp15_id_isar4_get, CP15_ID_ISAR4(%0))
23967d39872SAndrew Turner _RF0(cp15_id_isar5_get, CP15_ID_ISAR5(%0))
24067d39872SAndrew Turner _RF0(cp15_cbar_get, CP15_CBAR(%0))
24167d39872SAndrew Turner
24267d39872SAndrew Turner /* Performance Monitor registers */
24367d39872SAndrew Turner
24467d39872SAndrew Turner _RF0(cp15_pmcr_get, CP15_PMCR(%0))
24567d39872SAndrew Turner _WF1(cp15_pmcr_set, CP15_PMCR(%0))
24667d39872SAndrew Turner _RF0(cp15_pmcnten_get, CP15_PMCNTENSET(%0))
24767d39872SAndrew Turner _WF1(cp15_pmcnten_set, CP15_PMCNTENSET(%0))
24867d39872SAndrew Turner _WF1(cp15_pmcnten_clr, CP15_PMCNTENCLR(%0))
24967d39872SAndrew Turner _RF0(cp15_pmovsr_get, CP15_PMOVSR(%0))
25067d39872SAndrew Turner _WF1(cp15_pmovsr_set, CP15_PMOVSR(%0))
25167d39872SAndrew Turner _WF1(cp15_pmswinc_set, CP15_PMSWINC(%0))
25267d39872SAndrew Turner _RF0(cp15_pmselr_get, CP15_PMSELR(%0))
25367d39872SAndrew Turner _WF1(cp15_pmselr_set, CP15_PMSELR(%0))
25467d39872SAndrew Turner _RF0(cp15_pmccntr_get, CP15_PMCCNTR(%0))
25567d39872SAndrew Turner _WF1(cp15_pmccntr_set, CP15_PMCCNTR(%0))
25667d39872SAndrew Turner _RF0(cp15_pmxevtyper_get, CP15_PMXEVTYPER(%0))
25767d39872SAndrew Turner _WF1(cp15_pmxevtyper_set, CP15_PMXEVTYPER(%0))
25867d39872SAndrew Turner _RF0(cp15_pmxevcntr_get, CP15_PMXEVCNTRR(%0))
25967d39872SAndrew Turner _WF1(cp15_pmxevcntr_set, CP15_PMXEVCNTRR(%0))
26067d39872SAndrew Turner _RF0(cp15_pmuserenr_get, CP15_PMUSERENR(%0))
26167d39872SAndrew Turner _WF1(cp15_pmuserenr_set, CP15_PMUSERENR(%0))
26267d39872SAndrew Turner _RF0(cp15_pminten_get, CP15_PMINTENSET(%0))
26367d39872SAndrew Turner _WF1(cp15_pminten_set, CP15_PMINTENSET(%0))
26467d39872SAndrew Turner _WF1(cp15_pminten_clr, CP15_PMINTENCLR(%0))
26567d39872SAndrew Turner
26667d39872SAndrew Turner _RF0(cp15_tpidrurw_get, CP15_TPIDRURW(%0))
26767d39872SAndrew Turner _WF1(cp15_tpidrurw_set, CP15_TPIDRURW(%0))
26867d39872SAndrew Turner _RF0(cp15_tpidruro_get, CP15_TPIDRURO(%0))
26967d39872SAndrew Turner _WF1(cp15_tpidruro_set, CP15_TPIDRURO(%0))
27067d39872SAndrew Turner _RF0(cp15_tpidrpwr_get, CP15_TPIDRPRW(%0))
27167d39872SAndrew Turner _WF1(cp15_tpidrpwr_set, CP15_TPIDRPRW(%0))
27267d39872SAndrew Turner
27367d39872SAndrew Turner /* Generic Timer registers - only use when you know the hardware is available */
27467d39872SAndrew Turner _RF0(cp15_cntfrq_get, CP15_CNTFRQ(%0))
27567d39872SAndrew Turner _WF1(cp15_cntfrq_set, CP15_CNTFRQ(%0))
27667d39872SAndrew Turner _RF0(cp15_cntkctl_get, CP15_CNTKCTL(%0))
27767d39872SAndrew Turner _WF1(cp15_cntkctl_set, CP15_CNTKCTL(%0))
27867d39872SAndrew Turner _RF0(cp15_cntp_tval_get, CP15_CNTP_TVAL(%0))
27967d39872SAndrew Turner _WF1(cp15_cntp_tval_set, CP15_CNTP_TVAL(%0))
28067d39872SAndrew Turner _RF0(cp15_cntp_ctl_get, CP15_CNTP_CTL(%0))
28167d39872SAndrew Turner _WF1(cp15_cntp_ctl_set, CP15_CNTP_CTL(%0))
28267d39872SAndrew Turner _RF0(cp15_cntv_tval_get, CP15_CNTV_TVAL(%0))
28367d39872SAndrew Turner _WF1(cp15_cntv_tval_set, CP15_CNTV_TVAL(%0))
28467d39872SAndrew Turner _RF0(cp15_cntv_ctl_get, CP15_CNTV_CTL(%0))
28567d39872SAndrew Turner _WF1(cp15_cntv_ctl_set, CP15_CNTV_CTL(%0))
28667d39872SAndrew Turner _RF0(cp15_cnthctl_get, CP15_CNTHCTL(%0))
28767d39872SAndrew Turner _WF1(cp15_cnthctl_set, CP15_CNTHCTL(%0))
28867d39872SAndrew Turner _RF0(cp15_cnthp_tval_get, CP15_CNTHP_TVAL(%0))
28967d39872SAndrew Turner _WF1(cp15_cnthp_tval_set, CP15_CNTHP_TVAL(%0))
29067d39872SAndrew Turner _RF0(cp15_cnthp_ctl_get, CP15_CNTHP_CTL(%0))
29167d39872SAndrew Turner _WF1(cp15_cnthp_ctl_set, CP15_CNTHP_CTL(%0))
29267d39872SAndrew Turner
29367d39872SAndrew Turner _R64F0(cp15_cntpct_get, CP15_CNTPCT(%Q0, %R0))
29467d39872SAndrew Turner _R64F0(cp15_cntvct_get, CP15_CNTVCT(%Q0, %R0))
29567d39872SAndrew Turner _R64F0(cp15_cntp_cval_get, CP15_CNTP_CVAL(%Q0, %R0))
29667d39872SAndrew Turner _W64F1(cp15_cntp_cval_set, CP15_CNTP_CVAL(%Q0, %R0))
29767d39872SAndrew Turner _R64F0(cp15_cntv_cval_get, CP15_CNTV_CVAL(%Q0, %R0))
29867d39872SAndrew Turner _W64F1(cp15_cntv_cval_set, CP15_CNTV_CVAL(%Q0, %R0))
29967d39872SAndrew Turner _R64F0(cp15_cntvoff_get, CP15_CNTVOFF(%Q0, %R0))
30067d39872SAndrew Turner _W64F1(cp15_cntvoff_set, CP15_CNTVOFF(%Q0, %R0))
30167d39872SAndrew Turner _R64F0(cp15_cnthp_cval_get, CP15_CNTHP_CVAL(%Q0, %R0))
30267d39872SAndrew Turner _W64F1(cp15_cnthp_cval_set, CP15_CNTHP_CVAL(%Q0, %R0))
30367d39872SAndrew Turner
30467d39872SAndrew Turner #undef _FX
30567d39872SAndrew Turner #undef _RF0
30667d39872SAndrew Turner #undef _WF0
30767d39872SAndrew Turner #undef _WF1
30867d39872SAndrew Turner
30967d39872SAndrew Turner /*
31067d39872SAndrew Turner * TLB maintenance operations.
31167d39872SAndrew Turner */
31267d39872SAndrew Turner
31367d39872SAndrew Turner /* Local (i.e. not broadcasting ) operations. */
31467d39872SAndrew Turner
31567d39872SAndrew Turner /* Flush all TLB entries (even global). */
31667d39872SAndrew Turner static __inline void
31767d39872SAndrew Turner tlb_flush_all_local(void)
31867d39872SAndrew Turner {
31967d39872SAndrew Turner
32067d39872SAndrew Turner dsb();
32167d39872SAndrew Turner _CP15_TLBIALL();
32267d39872SAndrew Turner dsb();
32367d39872SAndrew Turner }
32467d39872SAndrew Turner
32567d39872SAndrew Turner /* Flush all not global TLB entries. */
32667d39872SAndrew Turner static __inline void
tlb_flush_all_ng_local(void)32767d39872SAndrew Turner tlb_flush_all_ng_local(void)
32867d39872SAndrew Turner {
32967d39872SAndrew Turner
33067d39872SAndrew Turner dsb();
33167d39872SAndrew Turner _CP15_TLBIASID(CPU_ASID_KERNEL);
33267d39872SAndrew Turner dsb();
33367d39872SAndrew Turner }
33467d39872SAndrew Turner
33567d39872SAndrew Turner /* Flush single TLB entry (even global). */
33667d39872SAndrew Turner static __inline void
tlb_flush_local(vm_offset_t va)33767d39872SAndrew Turner tlb_flush_local(vm_offset_t va)
33867d39872SAndrew Turner {
33967d39872SAndrew Turner
34067d39872SAndrew Turner KASSERT((va & PAGE_MASK) == 0, ("%s: va %#x not aligned", __func__, va));
34167d39872SAndrew Turner
34267d39872SAndrew Turner dsb();
34367d39872SAndrew Turner _CP15_TLBIMVA(va | CPU_ASID_KERNEL);
34467d39872SAndrew Turner dsb();
34567d39872SAndrew Turner }
34667d39872SAndrew Turner
34767d39872SAndrew Turner /* Flush range of TLB entries (even global). */
34867d39872SAndrew Turner static __inline void
tlb_flush_range_local(vm_offset_t va,vm_size_t size)34967d39872SAndrew Turner tlb_flush_range_local(vm_offset_t va, vm_size_t size)
35067d39872SAndrew Turner {
35167d39872SAndrew Turner vm_offset_t eva = va + size;
35267d39872SAndrew Turner
35367d39872SAndrew Turner KASSERT((va & PAGE_MASK) == 0, ("%s: va %#x not aligned", __func__, va));
35467d39872SAndrew Turner KASSERT((size & PAGE_MASK) == 0, ("%s: size %#x not aligned", __func__,
35567d39872SAndrew Turner size));
35667d39872SAndrew Turner
35767d39872SAndrew Turner dsb();
35867d39872SAndrew Turner for (; va < eva; va += PAGE_SIZE)
35967d39872SAndrew Turner _CP15_TLBIMVA(va | CPU_ASID_KERNEL);
36067d39872SAndrew Turner dsb();
36167d39872SAndrew Turner }
36267d39872SAndrew Turner
36367d39872SAndrew Turner /* Broadcasting operations. */
364*d29771a7SAndrew Turner #if defined(SMP)
36567d39872SAndrew Turner
36667d39872SAndrew Turner static __inline void
tlb_flush_all(void)36767d39872SAndrew Turner tlb_flush_all(void)
36867d39872SAndrew Turner {
36967d39872SAndrew Turner
37067d39872SAndrew Turner dsb();
37167d39872SAndrew Turner ARM_SMP_UP(
37267d39872SAndrew Turner _CP15_TLBIALLIS(),
37367d39872SAndrew Turner _CP15_TLBIALL()
37467d39872SAndrew Turner );
37567d39872SAndrew Turner dsb();
37667d39872SAndrew Turner }
37767d39872SAndrew Turner
37867d39872SAndrew Turner static __inline void
tlb_flush_all_ng(void)37967d39872SAndrew Turner tlb_flush_all_ng(void)
38067d39872SAndrew Turner {
38167d39872SAndrew Turner
38267d39872SAndrew Turner dsb();
38367d39872SAndrew Turner ARM_SMP_UP(
38467d39872SAndrew Turner _CP15_TLBIASIDIS(CPU_ASID_KERNEL),
38567d39872SAndrew Turner _CP15_TLBIASID(CPU_ASID_KERNEL)
38667d39872SAndrew Turner );
38767d39872SAndrew Turner dsb();
38867d39872SAndrew Turner }
38967d39872SAndrew Turner
39067d39872SAndrew Turner static __inline void
tlb_flush(vm_offset_t va)39167d39872SAndrew Turner tlb_flush(vm_offset_t va)
39267d39872SAndrew Turner {
39367d39872SAndrew Turner
39467d39872SAndrew Turner KASSERT((va & PAGE_MASK) == 0, ("%s: va %#x not aligned", __func__, va));
39567d39872SAndrew Turner
39667d39872SAndrew Turner dsb();
39767d39872SAndrew Turner ARM_SMP_UP(
39867d39872SAndrew Turner _CP15_TLBIMVAAIS(va),
39967d39872SAndrew Turner _CP15_TLBIMVA(va | CPU_ASID_KERNEL)
40067d39872SAndrew Turner );
40167d39872SAndrew Turner dsb();
40267d39872SAndrew Turner }
40367d39872SAndrew Turner
40467d39872SAndrew Turner static __inline void
tlb_flush_range(vm_offset_t va,vm_size_t size)40567d39872SAndrew Turner tlb_flush_range(vm_offset_t va, vm_size_t size)
40667d39872SAndrew Turner {
40767d39872SAndrew Turner vm_offset_t eva = va + size;
40867d39872SAndrew Turner
40967d39872SAndrew Turner KASSERT((va & PAGE_MASK) == 0, ("%s: va %#x not aligned", __func__, va));
41067d39872SAndrew Turner KASSERT((size & PAGE_MASK) == 0, ("%s: size %#x not aligned", __func__,
41167d39872SAndrew Turner size));
41267d39872SAndrew Turner
41367d39872SAndrew Turner dsb();
41467d39872SAndrew Turner ARM_SMP_UP(
41567d39872SAndrew Turner {
41667d39872SAndrew Turner for (; va < eva; va += PAGE_SIZE)
41767d39872SAndrew Turner _CP15_TLBIMVAAIS(va);
41867d39872SAndrew Turner },
41967d39872SAndrew Turner {
42067d39872SAndrew Turner for (; va < eva; va += PAGE_SIZE)
42167d39872SAndrew Turner _CP15_TLBIMVA(va | CPU_ASID_KERNEL);
42267d39872SAndrew Turner }
42367d39872SAndrew Turner );
42467d39872SAndrew Turner dsb();
42567d39872SAndrew Turner }
426*d29771a7SAndrew Turner #else /* !SMP */
42767d39872SAndrew Turner
42867d39872SAndrew Turner #define tlb_flush_all() tlb_flush_all_local()
42967d39872SAndrew Turner #define tlb_flush_all_ng() tlb_flush_all_ng_local()
43067d39872SAndrew Turner #define tlb_flush(va) tlb_flush_local(va)
43167d39872SAndrew Turner #define tlb_flush_range(va, size) tlb_flush_range_local(va, size)
43267d39872SAndrew Turner
433*d29771a7SAndrew Turner #endif /* !SMP */
43467d39872SAndrew Turner
43567d39872SAndrew Turner /*
43667d39872SAndrew Turner * Cache maintenance operations.
43767d39872SAndrew Turner */
43867d39872SAndrew Turner
43967d39872SAndrew Turner /* Sync I and D caches to PoU */
44067d39872SAndrew Turner static __inline void
icache_sync(vm_offset_t va,vm_size_t size)44167d39872SAndrew Turner icache_sync(vm_offset_t va, vm_size_t size)
44267d39872SAndrew Turner {
44367d39872SAndrew Turner vm_offset_t eva = va + size;
44467d39872SAndrew Turner
44567d39872SAndrew Turner dsb();
44667d39872SAndrew Turner va &= ~cpuinfo.dcache_line_mask;
44767d39872SAndrew Turner
44867d39872SAndrew Turner for ( ; va < eva; va += cpuinfo.dcache_line_size) {
44967d39872SAndrew Turner _CP15_DCCMVAU(va);
45067d39872SAndrew Turner }
45167d39872SAndrew Turner dsb();
45267d39872SAndrew Turner ARM_SMP_UP(
45367d39872SAndrew Turner _CP15_ICIALLUIS(),
45467d39872SAndrew Turner _CP15_ICIALLU()
45567d39872SAndrew Turner );
45667d39872SAndrew Turner dsb();
45767d39872SAndrew Turner isb();
45867d39872SAndrew Turner }
45967d39872SAndrew Turner
46067d39872SAndrew Turner /* Invalidate I cache */
46167d39872SAndrew Turner static __inline void
icache_inv_all(void)46267d39872SAndrew Turner icache_inv_all(void)
46367d39872SAndrew Turner {
46467d39872SAndrew Turner
46567d39872SAndrew Turner ARM_SMP_UP(
46667d39872SAndrew Turner _CP15_ICIALLUIS(),
46767d39872SAndrew Turner _CP15_ICIALLU()
46867d39872SAndrew Turner );
46967d39872SAndrew Turner dsb();
47067d39872SAndrew Turner isb();
47167d39872SAndrew Turner }
47267d39872SAndrew Turner
47367d39872SAndrew Turner /* Invalidate branch predictor buffer */
47467d39872SAndrew Turner static __inline void
bpb_inv_all(void)47567d39872SAndrew Turner bpb_inv_all(void)
47667d39872SAndrew Turner {
47767d39872SAndrew Turner
47867d39872SAndrew Turner ARM_SMP_UP(
47967d39872SAndrew Turner _CP15_BPIALLIS(),
48067d39872SAndrew Turner _CP15_BPIALL()
48167d39872SAndrew Turner );
48267d39872SAndrew Turner dsb();
48367d39872SAndrew Turner isb();
48467d39872SAndrew Turner }
48567d39872SAndrew Turner
48667d39872SAndrew Turner /* Write back D-cache to PoU */
48767d39872SAndrew Turner static __inline void
dcache_wb_pou(vm_offset_t va,vm_size_t size)48867d39872SAndrew Turner dcache_wb_pou(vm_offset_t va, vm_size_t size)
48967d39872SAndrew Turner {
49067d39872SAndrew Turner vm_offset_t eva = va + size;
49167d39872SAndrew Turner
49267d39872SAndrew Turner dsb();
49367d39872SAndrew Turner va &= ~cpuinfo.dcache_line_mask;
49467d39872SAndrew Turner for ( ; va < eva; va += cpuinfo.dcache_line_size) {
49567d39872SAndrew Turner _CP15_DCCMVAU(va);
49667d39872SAndrew Turner }
49767d39872SAndrew Turner dsb();
49867d39872SAndrew Turner }
49967d39872SAndrew Turner
50067d39872SAndrew Turner /*
50167d39872SAndrew Turner * Invalidate D-cache to PoC
50267d39872SAndrew Turner *
50367d39872SAndrew Turner * Caches are invalidated from outermost to innermost as fresh cachelines
50467d39872SAndrew Turner * flow in this direction. In given range, if there was no dirty cacheline
50567d39872SAndrew Turner * in any cache before, no stale cacheline should remain in them after this
50667d39872SAndrew Turner * operation finishes.
50767d39872SAndrew Turner */
50867d39872SAndrew Turner static __inline void
dcache_inv_poc(vm_offset_t va,vm_paddr_t pa,vm_size_t size)50967d39872SAndrew Turner dcache_inv_poc(vm_offset_t va, vm_paddr_t pa, vm_size_t size)
51067d39872SAndrew Turner {
51167d39872SAndrew Turner vm_offset_t eva = va + size;
51267d39872SAndrew Turner
51367d39872SAndrew Turner dsb();
51467d39872SAndrew Turner /* invalidate L2 first */
51567d39872SAndrew Turner cpu_l2cache_inv_range(pa, size);
51667d39872SAndrew Turner
51767d39872SAndrew Turner /* then L1 */
51867d39872SAndrew Turner va &= ~cpuinfo.dcache_line_mask;
51967d39872SAndrew Turner for ( ; va < eva; va += cpuinfo.dcache_line_size) {
52067d39872SAndrew Turner _CP15_DCIMVAC(va);
52167d39872SAndrew Turner }
52267d39872SAndrew Turner dsb();
52367d39872SAndrew Turner }
52467d39872SAndrew Turner
52567d39872SAndrew Turner /*
52667d39872SAndrew Turner * Discard D-cache lines to PoC, prior to overwrite by DMA engine.
52767d39872SAndrew Turner *
52867d39872SAndrew Turner * Normal invalidation does L2 then L1 to ensure that stale data from L2 doesn't
52967d39872SAndrew Turner * flow into L1 while invalidating. This routine is intended to be used only
53067d39872SAndrew Turner * when invalidating a buffer before a DMA operation loads new data into memory.
53167d39872SAndrew Turner * The concern in this case is that dirty lines are not evicted to main memory,
53267d39872SAndrew Turner * overwriting the DMA data. For that reason, the L1 is done first to ensure
53367d39872SAndrew Turner * that an evicted L1 line doesn't flow to L2 after the L2 has been cleaned.
53467d39872SAndrew Turner */
53567d39872SAndrew Turner static __inline void
dcache_inv_poc_dma(vm_offset_t va,vm_paddr_t pa,vm_size_t size)53667d39872SAndrew Turner dcache_inv_poc_dma(vm_offset_t va, vm_paddr_t pa, vm_size_t size)
53767d39872SAndrew Turner {
53867d39872SAndrew Turner vm_offset_t eva = va + size;
53967d39872SAndrew Turner
54067d39872SAndrew Turner /* invalidate L1 first */
54167d39872SAndrew Turner dsb();
54267d39872SAndrew Turner va &= ~cpuinfo.dcache_line_mask;
54367d39872SAndrew Turner for ( ; va < eva; va += cpuinfo.dcache_line_size) {
54467d39872SAndrew Turner _CP15_DCIMVAC(va);
54567d39872SAndrew Turner }
54667d39872SAndrew Turner dsb();
54767d39872SAndrew Turner
54867d39872SAndrew Turner /* then L2 */
54967d39872SAndrew Turner cpu_l2cache_inv_range(pa, size);
55067d39872SAndrew Turner }
55167d39872SAndrew Turner
55267d39872SAndrew Turner /*
55367d39872SAndrew Turner * Write back D-cache to PoC
55467d39872SAndrew Turner *
55567d39872SAndrew Turner * Caches are written back from innermost to outermost as dirty cachelines
55667d39872SAndrew Turner * flow in this direction. In given range, no dirty cacheline should remain
55767d39872SAndrew Turner * in any cache after this operation finishes.
55867d39872SAndrew Turner */
55967d39872SAndrew Turner static __inline void
dcache_wb_poc(vm_offset_t va,vm_paddr_t pa,vm_size_t size)56067d39872SAndrew Turner dcache_wb_poc(vm_offset_t va, vm_paddr_t pa, vm_size_t size)
56167d39872SAndrew Turner {
56267d39872SAndrew Turner vm_offset_t eva = va + size;
56367d39872SAndrew Turner
56467d39872SAndrew Turner dsb();
56567d39872SAndrew Turner va &= ~cpuinfo.dcache_line_mask;
56667d39872SAndrew Turner for ( ; va < eva; va += cpuinfo.dcache_line_size) {
56767d39872SAndrew Turner _CP15_DCCMVAC(va);
56867d39872SAndrew Turner }
56967d39872SAndrew Turner dsb();
57067d39872SAndrew Turner
57167d39872SAndrew Turner cpu_l2cache_wb_range(pa, size);
57267d39872SAndrew Turner }
57367d39872SAndrew Turner
57467d39872SAndrew Turner /* Write back and invalidate D-cache to PoC */
57567d39872SAndrew Turner static __inline void
dcache_wbinv_poc(vm_offset_t sva,vm_paddr_t pa,vm_size_t size)57667d39872SAndrew Turner dcache_wbinv_poc(vm_offset_t sva, vm_paddr_t pa, vm_size_t size)
57767d39872SAndrew Turner {
57867d39872SAndrew Turner vm_offset_t va;
57967d39872SAndrew Turner vm_offset_t eva = sva + size;
58067d39872SAndrew Turner
58167d39872SAndrew Turner dsb();
58267d39872SAndrew Turner /* write back L1 first */
58367d39872SAndrew Turner va = sva & ~cpuinfo.dcache_line_mask;
58467d39872SAndrew Turner for ( ; va < eva; va += cpuinfo.dcache_line_size) {
58567d39872SAndrew Turner _CP15_DCCMVAC(va);
58667d39872SAndrew Turner }
58767d39872SAndrew Turner dsb();
58867d39872SAndrew Turner
58967d39872SAndrew Turner /* then write back and invalidate L2 */
59067d39872SAndrew Turner cpu_l2cache_wbinv_range(pa, size);
59167d39872SAndrew Turner
59267d39872SAndrew Turner /* then invalidate L1 */
59367d39872SAndrew Turner va = sva & ~cpuinfo.dcache_line_mask;
59467d39872SAndrew Turner for ( ; va < eva; va += cpuinfo.dcache_line_size) {
59567d39872SAndrew Turner _CP15_DCIMVAC(va);
59667d39872SAndrew Turner }
59767d39872SAndrew Turner dsb();
59867d39872SAndrew Turner }
59967d39872SAndrew Turner
60067d39872SAndrew Turner /* Set TTB0 register */
60167d39872SAndrew Turner static __inline void
cp15_ttbr_set(uint32_t reg)60267d39872SAndrew Turner cp15_ttbr_set(uint32_t reg)
60367d39872SAndrew Turner {
60467d39872SAndrew Turner dsb();
60567d39872SAndrew Turner _CP15_TTB_SET(reg);
60667d39872SAndrew Turner dsb();
60767d39872SAndrew Turner _CP15_BPIALL();
60867d39872SAndrew Turner dsb();
60967d39872SAndrew Turner isb();
61067d39872SAndrew Turner tlb_flush_all_ng_local();
61167d39872SAndrew Turner }
61267d39872SAndrew Turner
61367d39872SAndrew Turner /*
61467d39872SAndrew Turner * Functions for address checking:
61567d39872SAndrew Turner *
61667d39872SAndrew Turner * cp15_ats1cpr_check() ... check stage 1 privileged (PL1) read access
61767d39872SAndrew Turner * cp15_ats1cpw_check() ... check stage 1 privileged (PL1) write access
61867d39872SAndrew Turner * cp15_ats1cur_check() ... check stage 1 unprivileged (PL0) read access
61967d39872SAndrew Turner * cp15_ats1cuw_check() ... check stage 1 unprivileged (PL0) write access
62067d39872SAndrew Turner *
62167d39872SAndrew Turner * They must be called while interrupts are disabled to get consistent result.
62267d39872SAndrew Turner */
62367d39872SAndrew Turner static __inline int
cp15_ats1cpr_check(vm_offset_t addr)62467d39872SAndrew Turner cp15_ats1cpr_check(vm_offset_t addr)
62567d39872SAndrew Turner {
62667d39872SAndrew Turner
62767d39872SAndrew Turner cp15_ats1cpr_set(addr);
62867d39872SAndrew Turner isb();
62967d39872SAndrew Turner return (cp15_par_get() & 0x01 ? EFAULT : 0);
63067d39872SAndrew Turner }
63167d39872SAndrew Turner
63267d39872SAndrew Turner static __inline int
cp15_ats1cpw_check(vm_offset_t addr)63367d39872SAndrew Turner cp15_ats1cpw_check(vm_offset_t addr)
63467d39872SAndrew Turner {
63567d39872SAndrew Turner
63667d39872SAndrew Turner cp15_ats1cpw_set(addr);
63767d39872SAndrew Turner isb();
63867d39872SAndrew Turner return (cp15_par_get() & 0x01 ? EFAULT : 0);
63967d39872SAndrew Turner }
64067d39872SAndrew Turner
64167d39872SAndrew Turner static __inline int
cp15_ats1cur_check(vm_offset_t addr)64267d39872SAndrew Turner cp15_ats1cur_check(vm_offset_t addr)
64367d39872SAndrew Turner {
64467d39872SAndrew Turner
64567d39872SAndrew Turner cp15_ats1cur_set(addr);
64667d39872SAndrew Turner isb();
64767d39872SAndrew Turner return (cp15_par_get() & 0x01 ? EFAULT : 0);
64867d39872SAndrew Turner }
64967d39872SAndrew Turner
65067d39872SAndrew Turner static __inline int
cp15_ats1cuw_check(vm_offset_t addr)65167d39872SAndrew Turner cp15_ats1cuw_check(vm_offset_t addr)
65267d39872SAndrew Turner {
65367d39872SAndrew Turner
65467d39872SAndrew Turner cp15_ats1cuw_set(addr);
65567d39872SAndrew Turner isb();
65667d39872SAndrew Turner return (cp15_par_get() & 0x01 ? EFAULT : 0);
65767d39872SAndrew Turner }
658eeaf6acbSBjoern A. Zeeb
6596fc729afSOlivier Houchard static __inline uint64_t
get_cyclecount(void)6606fc729afSOlivier Houchard get_cyclecount(void)
6616fc729afSOlivier Houchard {
662*d29771a7SAndrew Turner #if defined(DEV_PMU)
663eeaf6acbSBjoern A. Zeeb if (pmu_attched) {
664eeaf6acbSBjoern A. Zeeb u_int cpu;
665eeaf6acbSBjoern A. Zeeb uint64_t h, h2;
666eeaf6acbSBjoern A. Zeeb uint32_t l, r;
667eeaf6acbSBjoern A. Zeeb
668eeaf6acbSBjoern A. Zeeb cpu = PCPU_GET(cpuid);
669eeaf6acbSBjoern A. Zeeb h = (uint64_t)atomic_load_acq_32(&ccnt_hi[cpu]);
670eeaf6acbSBjoern A. Zeeb l = cp15_pmccntr_get();
671eeaf6acbSBjoern A. Zeeb /* In case interrupts are disabled we need to check for overflow. */
672eeaf6acbSBjoern A. Zeeb r = cp15_pmovsr_get();
673eeaf6acbSBjoern A. Zeeb if (r & PMU_OVSR_C) {
674eeaf6acbSBjoern A. Zeeb atomic_add_32(&ccnt_hi[cpu], 1);
675eeaf6acbSBjoern A. Zeeb /* Clear the event. */
676eeaf6acbSBjoern A. Zeeb cp15_pmovsr_set(PMU_OVSR_C);
677eeaf6acbSBjoern A. Zeeb }
678eeaf6acbSBjoern A. Zeeb /* Make sure there was no wrap-around while we read the lo half. */
679eeaf6acbSBjoern A. Zeeb h2 = (uint64_t)atomic_load_acq_32(&ccnt_hi[cpu]);
680eeaf6acbSBjoern A. Zeeb if (h != h2)
681eeaf6acbSBjoern A. Zeeb l = cp15_pmccntr_get();
682eeaf6acbSBjoern A. Zeeb return (h2 << 32 | l);
683eeaf6acbSBjoern A. Zeeb } else
684eeaf6acbSBjoern A. Zeeb #endif
6858a474d01SIan Lepore return cp15_pmccntr_get();
6866fc729afSOlivier Houchard }
687a29cc9a3SAndriy Gapon #endif
6886fc729afSOlivier Houchard
6896fc729afSOlivier Houchard #define TRAPF_USERMODE(frame) ((frame->tf_spsr & PSR_MODE) == PSR_USR32_MODE)
6906fc729afSOlivier Houchard
6916fc729afSOlivier Houchard #define TRAPF_PC(tfp) ((tfp)->tf_pc)
6926fc729afSOlivier Houchard
6936fc729afSOlivier Houchard #define cpu_getstack(td) ((td)->td_frame->tf_usr_sp)
6946fc729afSOlivier Houchard #define cpu_setstack(td, sp) ((td)->td_frame->tf_usr_sp = (sp))
6959f1b87f1SMaxime Henrion #define cpu_spinwait() /* nothing */
6964cbbb748SJohn Baldwin #define cpu_lock_delay() DELAY(1)
6976fc729afSOlivier Houchard
6986abad5b6SAndrew Turner #define ARM_NVEC 7
6996fc729afSOlivier Houchard #define ARM_VEC_ALL 0xffffffff
7006fc729afSOlivier Houchard
7016fc729afSOlivier Houchard extern vm_offset_t vector_page;
7026fc729afSOlivier Houchard
703b2478843SAndrew Turner /*
704b2478843SAndrew Turner * Params passed into initarm. If you change the size of this you will
705b2478843SAndrew Turner * need to update locore.S to allocate more memory on the stack before
706b2478843SAndrew Turner * it calls initarm.
707b2478843SAndrew Turner */
70846231809SWarner Losh struct arm_boot_params {
70946231809SWarner Losh register_t abp_size; /* Size of this structure */
71046231809SWarner Losh register_t abp_r0; /* r0 from the boot loader */
71146231809SWarner Losh register_t abp_r1; /* r1 from the boot loader */
71246231809SWarner Losh register_t abp_r2; /* r2 from the boot loader */
71346231809SWarner Losh register_t abp_r3; /* r3 from the boot loader */
714313857e9SAndrew Turner vm_offset_t abp_physaddr; /* The kernel physical address */
715b2478843SAndrew Turner vm_offset_t abp_pagetable; /* The early page table */
71646231809SWarner Losh };
71746231809SWarner Losh
7186fc729afSOlivier Houchard void arm_vector_init(vm_offset_t, int);
7193d7d8f61SNate Lawson void fork_trampoline(void);
7206fc729afSOlivier Houchard void identify_arm_cpu(void);
72146231809SWarner Losh void *initarm(struct arm_boot_params *);
7226fc729afSOlivier Houchard
7236fc729afSOlivier Houchard extern char btext[];
7246fc729afSOlivier Houchard extern char etext[];
7256fc729afSOlivier Houchard int badaddr_read(void *, size_t, void *);
7266fc729afSOlivier Houchard #endif /* !MACHINE_CPU_H */
727