xref: /linux/arch/arm/include/asm/arch_timer.h (revision 31881d74b6dd1a6c530cff61248def4f2da38bee)
1 #ifndef __ASMARM_ARCH_TIMER_H
2 #define __ASMARM_ARCH_TIMER_H
3 
4 #include <asm/barrier.h>
5 #include <asm/errno.h>
6 #include <linux/clocksource.h>
7 #include <linux/init.h>
8 #include <linux/types.h>
9 
10 #include <clocksource/arm_arch_timer.h>
11 
12 #ifdef CONFIG_ARM_ARCH_TIMER
13 int arch_timer_arch_init(void);
14 
15 /*
16  * These register accessors are marked inline so the compiler can
17  * nicely work out which register we want, and chuck away the rest of
18  * the code. At least it does so with a recent GCC (4.6.3).
19  */
20 static inline void arch_timer_reg_write(const int access, const int reg, u32 val)
21 {
22 	if (access == ARCH_TIMER_PHYS_ACCESS) {
23 		switch (reg) {
24 		case ARCH_TIMER_REG_CTRL:
25 			asm volatile("mcr p15, 0, %0, c14, c2, 1" : : "r" (val));
26 			break;
27 		case ARCH_TIMER_REG_TVAL:
28 			asm volatile("mcr p15, 0, %0, c14, c2, 0" : : "r" (val));
29 			break;
30 		}
31 	}
32 
33 	if (access == ARCH_TIMER_VIRT_ACCESS) {
34 		switch (reg) {
35 		case ARCH_TIMER_REG_CTRL:
36 			asm volatile("mcr p15, 0, %0, c14, c3, 1" : : "r" (val));
37 			break;
38 		case ARCH_TIMER_REG_TVAL:
39 			asm volatile("mcr p15, 0, %0, c14, c3, 0" : : "r" (val));
40 			break;
41 		}
42 	}
43 
44 	isb();
45 }
46 
47 static inline u32 arch_timer_reg_read(const int access, const int reg)
48 {
49 	u32 val = 0;
50 
51 	if (access == ARCH_TIMER_PHYS_ACCESS) {
52 		switch (reg) {
53 		case ARCH_TIMER_REG_CTRL:
54 			asm volatile("mrc p15, 0, %0, c14, c2, 1" : "=r" (val));
55 			break;
56 		case ARCH_TIMER_REG_TVAL:
57 			asm volatile("mrc p15, 0, %0, c14, c2, 0" : "=r" (val));
58 			break;
59 		}
60 	}
61 
62 	if (access == ARCH_TIMER_VIRT_ACCESS) {
63 		switch (reg) {
64 		case ARCH_TIMER_REG_CTRL:
65 			asm volatile("mrc p15, 0, %0, c14, c3, 1" : "=r" (val));
66 			break;
67 		case ARCH_TIMER_REG_TVAL:
68 			asm volatile("mrc p15, 0, %0, c14, c3, 0" : "=r" (val));
69 			break;
70 		}
71 	}
72 
73 	return val;
74 }
75 
76 static inline u32 arch_timer_get_cntfrq(void)
77 {
78 	u32 val;
79 	asm volatile("mrc p15, 0, %0, c14, c0, 0" : "=r" (val));
80 	return val;
81 }
82 
83 static inline u64 arch_counter_get_cntpct(void)
84 {
85 	u64 cval;
86 
87 	isb();
88 	asm volatile("mrrc p15, 0, %Q0, %R0, c14" : "=r" (cval));
89 	return cval;
90 }
91 
92 static inline u64 arch_counter_get_cntvct(void)
93 {
94 	u64 cval;
95 
96 	isb();
97 	asm volatile("mrrc p15, 1, %Q0, %R0, c14" : "=r" (cval));
98 	return cval;
99 }
100 
101 static inline void __cpuinit arch_counter_set_user_access(void)
102 {
103 	u32 cntkctl;
104 
105 	asm volatile("mrc p15, 0, %0, c14, c1, 0" : "=r" (cntkctl));
106 
107 	/* disable user access to everything */
108 	cntkctl &= ~((3 << 8) | (7 << 0));
109 
110 	asm volatile("mcr p15, 0, %0, c14, c1, 0" : : "r" (cntkctl));
111 }
112 #endif
113 
114 #endif
115