1 /* 2 * linux/arch/parisc/kernel/time.c 3 * 4 * Copyright (C) 1991, 1992, 1995 Linus Torvalds 5 * Modifications for ARM (C) 1994, 1995, 1996,1997 Russell King 6 * Copyright (C) 1999 SuSE GmbH, (Philipp Rumpf, prumpf@tux.org) 7 * 8 * 1994-07-02 Alan Modra 9 * fixed set_rtc_mmss, fixed time.year for >= 2000, new mktime 10 * 1998-12-20 Updated NTP code according to technical memorandum Jan '96 11 * "A Kernel Model for Precision Timekeeping" by Dave Mills 12 */ 13 #include <linux/config.h> 14 #include <linux/errno.h> 15 #include <linux/module.h> 16 #include <linux/sched.h> 17 #include <linux/kernel.h> 18 #include <linux/param.h> 19 #include <linux/string.h> 20 #include <linux/mm.h> 21 #include <linux/interrupt.h> 22 #include <linux/time.h> 23 #include <linux/init.h> 24 #include <linux/smp.h> 25 #include <linux/profile.h> 26 27 #include <asm/uaccess.h> 28 #include <asm/io.h> 29 #include <asm/irq.h> 30 #include <asm/param.h> 31 #include <asm/pdc.h> 32 #include <asm/led.h> 33 34 #include <linux/timex.h> 35 36 /* xtime and wall_jiffies keep wall-clock time */ 37 extern unsigned long wall_jiffies; 38 39 static long clocktick __read_mostly; /* timer cycles per tick */ 40 static long halftick __read_mostly; 41 42 #ifdef CONFIG_SMP 43 extern void smp_do_timer(struct pt_regs *regs); 44 #endif 45 46 irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs) 47 { 48 long now; 49 long next_tick; 50 int nticks; 51 int cpu = smp_processor_id(); 52 53 profile_tick(CPU_PROFILING, regs); 54 55 now = mfctl(16); 56 /* initialize next_tick to time at last clocktick */ 57 next_tick = cpu_data[cpu].it_value; 58 59 /* since time passes between the interrupt and the mfctl() 60 * above, it is never true that last_tick + clocktick == now. If we 61 * never miss a clocktick, we could set next_tick = last_tick + clocktick 62 * but maybe we'll miss ticks, hence the loop. 63 * 64 * Variables are *signed*. 65 */ 66 67 nticks = 0; 68 while((next_tick - now) < halftick) { 69 next_tick += clocktick; 70 nticks++; 71 } 72 mtctl(next_tick, 16); 73 cpu_data[cpu].it_value = next_tick; 74 75 while (nticks--) { 76 #ifdef CONFIG_SMP 77 smp_do_timer(regs); 78 #else 79 update_process_times(user_mode(regs)); 80 #endif 81 if (cpu == 0) { 82 write_seqlock(&xtime_lock); 83 do_timer(regs); 84 write_sequnlock(&xtime_lock); 85 } 86 } 87 88 /* check soft power switch status */ 89 if (cpu == 0 && !atomic_read(&power_tasklet.count)) 90 tasklet_schedule(&power_tasklet); 91 92 return IRQ_HANDLED; 93 } 94 95 96 unsigned long profile_pc(struct pt_regs *regs) 97 { 98 unsigned long pc = instruction_pointer(regs); 99 100 if (regs->gr[0] & PSW_N) 101 pc -= 4; 102 103 #ifdef CONFIG_SMP 104 if (in_lock_functions(pc)) 105 pc = regs->gr[2]; 106 #endif 107 108 return pc; 109 } 110 EXPORT_SYMBOL(profile_pc); 111 112 113 /*** converted from ia64 ***/ 114 /* 115 * Return the number of micro-seconds that elapsed since the last 116 * update to wall time (aka xtime aka wall_jiffies). The xtime_lock 117 * must be at least read-locked when calling this routine. 118 */ 119 static inline unsigned long 120 gettimeoffset (void) 121 { 122 #ifndef CONFIG_SMP 123 /* 124 * FIXME: This won't work on smp because jiffies are updated by cpu 0. 125 * Once parisc-linux learns the cr16 difference between processors, 126 * this could be made to work. 127 */ 128 long last_tick; 129 long elapsed_cycles; 130 131 /* it_value is the intended time of the next tick */ 132 last_tick = cpu_data[smp_processor_id()].it_value; 133 134 /* Subtract one tick and account for possible difference between 135 * when we expected the tick and when it actually arrived. 136 * (aka wall vs real) 137 */ 138 last_tick -= clocktick * (jiffies - wall_jiffies + 1); 139 elapsed_cycles = mfctl(16) - last_tick; 140 141 /* the precision of this math could be improved */ 142 return elapsed_cycles / (PAGE0->mem_10msec / 10000); 143 #else 144 return 0; 145 #endif 146 } 147 148 void 149 do_gettimeofday (struct timeval *tv) 150 { 151 unsigned long flags, seq, usec, sec; 152 153 do { 154 seq = read_seqbegin_irqsave(&xtime_lock, flags); 155 usec = gettimeoffset(); 156 sec = xtime.tv_sec; 157 usec += (xtime.tv_nsec / 1000); 158 } while (read_seqretry_irqrestore(&xtime_lock, seq, flags)); 159 160 if (unlikely(usec > LONG_MAX)) { 161 /* This can happen if the gettimeoffset adjustment is 162 * negative and xtime.tv_nsec is smaller than the 163 * adjustment */ 164 printk(KERN_ERR "do_gettimeofday() spurious xtime.tv_nsec of %ld\n", usec); 165 usec += USEC_PER_SEC; 166 --sec; 167 /* This should never happen, it means the negative 168 * time adjustment was more than a second, so there's 169 * something seriously wrong */ 170 BUG_ON(usec > LONG_MAX); 171 } 172 173 174 while (usec >= USEC_PER_SEC) { 175 usec -= USEC_PER_SEC; 176 ++sec; 177 } 178 179 tv->tv_sec = sec; 180 tv->tv_usec = usec; 181 } 182 183 EXPORT_SYMBOL(do_gettimeofday); 184 185 int 186 do_settimeofday (struct timespec *tv) 187 { 188 time_t wtm_sec, sec = tv->tv_sec; 189 long wtm_nsec, nsec = tv->tv_nsec; 190 191 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC) 192 return -EINVAL; 193 194 write_seqlock_irq(&xtime_lock); 195 { 196 /* 197 * This is revolting. We need to set "xtime" 198 * correctly. However, the value in this location is 199 * the value at the most recent update of wall time. 200 * Discover what correction gettimeofday would have 201 * done, and then undo it! 202 */ 203 nsec -= gettimeoffset() * 1000; 204 205 wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec); 206 wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec); 207 208 set_normalized_timespec(&xtime, sec, nsec); 209 set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec); 210 211 ntp_clear(); 212 } 213 write_sequnlock_irq(&xtime_lock); 214 clock_was_set(); 215 return 0; 216 } 217 EXPORT_SYMBOL(do_settimeofday); 218 219 /* 220 * XXX: We can do better than this. 221 * Returns nanoseconds 222 */ 223 224 unsigned long long sched_clock(void) 225 { 226 return (unsigned long long)jiffies * (1000000000 / HZ); 227 } 228 229 230 void __init time_init(void) 231 { 232 unsigned long next_tick; 233 static struct pdc_tod tod_data; 234 235 clocktick = (100 * PAGE0->mem_10msec) / HZ; 236 halftick = clocktick / 2; 237 238 /* Setup clock interrupt timing */ 239 240 next_tick = mfctl(16); 241 next_tick += clocktick; 242 cpu_data[smp_processor_id()].it_value = next_tick; 243 244 /* kick off Itimer (CR16) */ 245 mtctl(next_tick, 16); 246 247 if(pdc_tod_read(&tod_data) == 0) { 248 write_seqlock_irq(&xtime_lock); 249 xtime.tv_sec = tod_data.tod_sec; 250 xtime.tv_nsec = tod_data.tod_usec * 1000; 251 set_normalized_timespec(&wall_to_monotonic, 252 -xtime.tv_sec, -xtime.tv_nsec); 253 write_sequnlock_irq(&xtime_lock); 254 } else { 255 printk(KERN_ERR "Error reading tod clock\n"); 256 xtime.tv_sec = 0; 257 xtime.tv_nsec = 0; 258 } 259 } 260 261