xref: /linux/arch/arm/include/asm/arch_timer.h (revision 38fd2c202a3d82bc12430bce5789fa2c2a406f71)
1 #ifndef __ASMARM_ARCH_TIMER_H
2 #define __ASMARM_ARCH_TIMER_H
3 
4 #include <asm/barrier.h>
5 #include <asm/errno.h>
6 #include <linux/clocksource.h>
7 #include <linux/init.h>
8 #include <linux/types.h>
9 
10 #include <clocksource/arm_arch_timer.h>
11 
12 #ifdef CONFIG_ARM_ARCH_TIMER
13 int arch_timer_arch_init(void);
14 
15 /*
16  * These register accessors are marked inline so the compiler can
17  * nicely work out which register we want, and chuck away the rest of
18  * the code. At least it does so with a recent GCC (4.6.3).
19  */
20 static __always_inline
21 void arch_timer_reg_write_cp15(int access, enum arch_timer_reg reg, u32 val)
22 {
23 	if (access == ARCH_TIMER_PHYS_ACCESS) {
24 		switch (reg) {
25 		case ARCH_TIMER_REG_CTRL:
26 			asm volatile("mcr p15, 0, %0, c14, c2, 1" : : "r" (val));
27 			break;
28 		case ARCH_TIMER_REG_TVAL:
29 			asm volatile("mcr p15, 0, %0, c14, c2, 0" : : "r" (val));
30 			break;
31 		}
32 	} else if (access == ARCH_TIMER_VIRT_ACCESS) {
33 		switch (reg) {
34 		case ARCH_TIMER_REG_CTRL:
35 			asm volatile("mcr p15, 0, %0, c14, c3, 1" : : "r" (val));
36 			break;
37 		case ARCH_TIMER_REG_TVAL:
38 			asm volatile("mcr p15, 0, %0, c14, c3, 0" : : "r" (val));
39 			break;
40 		}
41 	}
42 
43 	isb();
44 }
45 
46 static __always_inline
47 u32 arch_timer_reg_read_cp15(int access, enum arch_timer_reg reg)
48 {
49 	u32 val = 0;
50 
51 	if (access == ARCH_TIMER_PHYS_ACCESS) {
52 		switch (reg) {
53 		case ARCH_TIMER_REG_CTRL:
54 			asm volatile("mrc p15, 0, %0, c14, c2, 1" : "=r" (val));
55 			break;
56 		case ARCH_TIMER_REG_TVAL:
57 			asm volatile("mrc p15, 0, %0, c14, c2, 0" : "=r" (val));
58 			break;
59 		}
60 	} else if (access == ARCH_TIMER_VIRT_ACCESS) {
61 		switch (reg) {
62 		case ARCH_TIMER_REG_CTRL:
63 			asm volatile("mrc p15, 0, %0, c14, c3, 1" : "=r" (val));
64 			break;
65 		case ARCH_TIMER_REG_TVAL:
66 			asm volatile("mrc p15, 0, %0, c14, c3, 0" : "=r" (val));
67 			break;
68 		}
69 	}
70 
71 	return val;
72 }
73 
74 static inline u32 arch_timer_get_cntfrq(void)
75 {
76 	u32 val;
77 	asm volatile("mrc p15, 0, %0, c14, c0, 0" : "=r" (val));
78 	return val;
79 }
80 
81 static inline u64 arch_counter_get_cntvct(void)
82 {
83 	u64 cval;
84 
85 	isb();
86 	asm volatile("mrrc p15, 1, %Q0, %R0, c14" : "=r" (cval));
87 	return cval;
88 }
89 
90 static inline u32 arch_timer_get_cntkctl(void)
91 {
92 	u32 cntkctl;
93 	asm volatile("mrc p15, 0, %0, c14, c1, 0" : "=r" (cntkctl));
94 	return cntkctl;
95 }
96 
97 static inline void arch_timer_set_cntkctl(u32 cntkctl)
98 {
99 	asm volatile("mcr p15, 0, %0, c14, c1, 0" : : "r" (cntkctl));
100 }
101 
102 static inline void arch_counter_set_user_access(void)
103 {
104 	u32 cntkctl = arch_timer_get_cntkctl();
105 
106 	/* Disable user access to both physical/virtual counters/timers */
107 	/* Also disable virtual event stream */
108 	cntkctl &= ~(ARCH_TIMER_USR_PT_ACCESS_EN
109 			| ARCH_TIMER_USR_VT_ACCESS_EN
110 			| ARCH_TIMER_VIRT_EVT_EN
111 			| ARCH_TIMER_USR_VCT_ACCESS_EN
112 			| ARCH_TIMER_USR_PCT_ACCESS_EN);
113 	arch_timer_set_cntkctl(cntkctl);
114 }
115 
116 static inline void arch_timer_evtstrm_enable(int divider)
117 {
118 	u32 cntkctl = arch_timer_get_cntkctl();
119 	cntkctl &= ~ARCH_TIMER_EVT_TRIGGER_MASK;
120 	/* Set the divider and enable virtual event stream */
121 	cntkctl |= (divider << ARCH_TIMER_EVT_TRIGGER_SHIFT)
122 			| ARCH_TIMER_VIRT_EVT_EN;
123 	arch_timer_set_cntkctl(cntkctl);
124 	elf_hwcap |= HWCAP_EVTSTRM;
125 }
126 
127 #endif
128 
129 #endif
130