1 #ifndef __ASMARM_ARCH_TIMER_H 2 #define __ASMARM_ARCH_TIMER_H 3 4 #include <asm/barrier.h> 5 #include <asm/errno.h> 6 #include <linux/clocksource.h> 7 #include <linux/init.h> 8 #include <linux/types.h> 9 10 #include <clocksource/arm_arch_timer.h> 11 12 #ifdef CONFIG_ARM_ARCH_TIMER 13 int arch_timer_arch_init(void); 14 15 /* 16 * These register accessors are marked inline so the compiler can 17 * nicely work out which register we want, and chuck away the rest of 18 * the code. At least it does so with a recent GCC (4.6.3). 19 */ 20 static inline void arch_timer_reg_write(const int access, const int reg, u32 val) 21 { 22 if (access == ARCH_TIMER_PHYS_ACCESS) { 23 switch (reg) { 24 case ARCH_TIMER_REG_CTRL: 25 asm volatile("mcr p15, 0, %0, c14, c2, 1" : : "r" (val)); 26 break; 27 case ARCH_TIMER_REG_TVAL: 28 asm volatile("mcr p15, 0, %0, c14, c2, 0" : : "r" (val)); 29 break; 30 } 31 } 32 33 if (access == ARCH_TIMER_VIRT_ACCESS) { 34 switch (reg) { 35 case ARCH_TIMER_REG_CTRL: 36 asm volatile("mcr p15, 0, %0, c14, c3, 1" : : "r" (val)); 37 break; 38 case ARCH_TIMER_REG_TVAL: 39 asm volatile("mcr p15, 0, %0, c14, c3, 0" : : "r" (val)); 40 break; 41 } 42 } 43 44 isb(); 45 } 46 47 static inline u32 arch_timer_reg_read(const int access, const int reg) 48 { 49 u32 val = 0; 50 51 if (access == ARCH_TIMER_PHYS_ACCESS) { 52 switch (reg) { 53 case ARCH_TIMER_REG_CTRL: 54 asm volatile("mrc p15, 0, %0, c14, c2, 1" : "=r" (val)); 55 break; 56 case ARCH_TIMER_REG_TVAL: 57 asm volatile("mrc p15, 0, %0, c14, c2, 0" : "=r" (val)); 58 break; 59 } 60 } 61 62 if (access == ARCH_TIMER_VIRT_ACCESS) { 63 switch (reg) { 64 case ARCH_TIMER_REG_CTRL: 65 asm volatile("mrc p15, 0, %0, c14, c3, 1" : "=r" (val)); 66 break; 67 case ARCH_TIMER_REG_TVAL: 68 asm volatile("mrc p15, 0, %0, c14, c3, 0" : "=r" (val)); 69 break; 70 } 71 } 72 73 return val; 74 } 75 76 static inline u32 arch_timer_get_cntfrq(void) 77 { 78 u32 val; 79 asm volatile("mrc p15, 0, %0, c14, c0, 0" : "=r" (val)); 80 return val; 81 } 82 83 static inline u64 arch_counter_get_cntvct(void) 84 { 85 u64 cval; 86 87 isb(); 88 asm volatile("mrrc p15, 1, %Q0, %R0, c14" : "=r" (cval)); 89 return cval; 90 } 91 92 static inline void __cpuinit arch_counter_set_user_access(void) 93 { 94 u32 cntkctl; 95 96 asm volatile("mrc p15, 0, %0, c14, c1, 0" : "=r" (cntkctl)); 97 98 /* disable user access to everything */ 99 cntkctl &= ~((3 << 8) | (7 << 0)); 100 101 asm volatile("mcr p15, 0, %0, c14, c1, 0" : : "r" (cntkctl)); 102 } 103 #endif 104 105 #endif 106