xref: /linux/arch/arm/include/asm/arch_timer.h (revision 0d4a42f6bd298e826620585e766a154ab460617a)
1 #ifndef __ASMARM_ARCH_TIMER_H
2 #define __ASMARM_ARCH_TIMER_H
3 
4 #include <asm/barrier.h>
5 #include <asm/errno.h>
6 #include <linux/clocksource.h>
7 #include <linux/init.h>
8 #include <linux/types.h>
9 
10 #include <clocksource/arm_arch_timer.h>
11 
12 #ifdef CONFIG_ARM_ARCH_TIMER
13 int arch_timer_of_register(void);
14 int arch_timer_sched_clock_init(void);
15 
16 /*
17  * These register accessors are marked inline so the compiler can
18  * nicely work out which register we want, and chuck away the rest of
19  * the code. At least it does so with a recent GCC (4.6.3).
20  */
21 static inline void arch_timer_reg_write(const int access, const int reg, u32 val)
22 {
23 	if (access == ARCH_TIMER_PHYS_ACCESS) {
24 		switch (reg) {
25 		case ARCH_TIMER_REG_CTRL:
26 			asm volatile("mcr p15, 0, %0, c14, c2, 1" : : "r" (val));
27 			break;
28 		case ARCH_TIMER_REG_TVAL:
29 			asm volatile("mcr p15, 0, %0, c14, c2, 0" : : "r" (val));
30 			break;
31 		}
32 	}
33 
34 	if (access == ARCH_TIMER_VIRT_ACCESS) {
35 		switch (reg) {
36 		case ARCH_TIMER_REG_CTRL:
37 			asm volatile("mcr p15, 0, %0, c14, c3, 1" : : "r" (val));
38 			break;
39 		case ARCH_TIMER_REG_TVAL:
40 			asm volatile("mcr p15, 0, %0, c14, c3, 0" : : "r" (val));
41 			break;
42 		}
43 	}
44 
45 	isb();
46 }
47 
48 static inline u32 arch_timer_reg_read(const int access, const int reg)
49 {
50 	u32 val = 0;
51 
52 	if (access == ARCH_TIMER_PHYS_ACCESS) {
53 		switch (reg) {
54 		case ARCH_TIMER_REG_CTRL:
55 			asm volatile("mrc p15, 0, %0, c14, c2, 1" : "=r" (val));
56 			break;
57 		case ARCH_TIMER_REG_TVAL:
58 			asm volatile("mrc p15, 0, %0, c14, c2, 0" : "=r" (val));
59 			break;
60 		}
61 	}
62 
63 	if (access == ARCH_TIMER_VIRT_ACCESS) {
64 		switch (reg) {
65 		case ARCH_TIMER_REG_CTRL:
66 			asm volatile("mrc p15, 0, %0, c14, c3, 1" : "=r" (val));
67 			break;
68 		case ARCH_TIMER_REG_TVAL:
69 			asm volatile("mrc p15, 0, %0, c14, c3, 0" : "=r" (val));
70 			break;
71 		}
72 	}
73 
74 	return val;
75 }
76 
77 static inline u32 arch_timer_get_cntfrq(void)
78 {
79 	u32 val;
80 	asm volatile("mrc p15, 0, %0, c14, c0, 0" : "=r" (val));
81 	return val;
82 }
83 
84 static inline u64 arch_counter_get_cntpct(void)
85 {
86 	u64 cval;
87 
88 	isb();
89 	asm volatile("mrrc p15, 0, %Q0, %R0, c14" : "=r" (cval));
90 	return cval;
91 }
92 
93 static inline u64 arch_counter_get_cntvct(void)
94 {
95 	u64 cval;
96 
97 	isb();
98 	asm volatile("mrrc p15, 1, %Q0, %R0, c14" : "=r" (cval));
99 	return cval;
100 }
101 
102 static inline void __cpuinit arch_counter_set_user_access(void)
103 {
104 	u32 cntkctl;
105 
106 	asm volatile("mrc p15, 0, %0, c14, c1, 0" : "=r" (cntkctl));
107 
108 	/* disable user access to everything */
109 	cntkctl &= ~((3 << 8) | (7 << 0));
110 
111 	asm volatile("mcr p15, 0, %0, c14, c1, 0" : : "r" (cntkctl));
112 }
113 #else
114 static inline int arch_timer_of_register(void)
115 {
116 	return -ENXIO;
117 }
118 
119 static inline int arch_timer_sched_clock_init(void)
120 {
121 	return -ENXIO;
122 }
123 #endif
124 
125 #endif
126