1 /* 2 * linux/arch/parisc/kernel/time.c 3 * 4 * Copyright (C) 1991, 1992, 1995 Linus Torvalds 5 * Modifications for ARM (C) 1994, 1995, 1996,1997 Russell King 6 * Copyright (C) 1999 SuSE GmbH, (Philipp Rumpf, prumpf@tux.org) 7 * 8 * 1994-07-02 Alan Modra 9 * fixed set_rtc_mmss, fixed time.year for >= 2000, new mktime 10 * 1998-12-20 Updated NTP code according to technical memorandum Jan '96 11 * "A Kernel Model for Precision Timekeeping" by Dave Mills 12 */ 13 #include <linux/errno.h> 14 #include <linux/module.h> 15 #include <linux/sched.h> 16 #include <linux/kernel.h> 17 #include <linux/param.h> 18 #include <linux/string.h> 19 #include <linux/mm.h> 20 #include <linux/interrupt.h> 21 #include <linux/time.h> 22 #include <linux/init.h> 23 #include <linux/smp.h> 24 #include <linux/profile.h> 25 26 #include <asm/uaccess.h> 27 #include <asm/io.h> 28 #include <asm/irq.h> 29 #include <asm/param.h> 30 #include <asm/pdc.h> 31 #include <asm/led.h> 32 33 #include <linux/timex.h> 34 35 /* xtime and wall_jiffies keep wall-clock time */ 36 extern unsigned long wall_jiffies; 37 38 static long clocktick __read_mostly; /* timer cycles per tick */ 39 static long halftick __read_mostly; 40 41 #ifdef CONFIG_SMP 42 extern void smp_do_timer(struct pt_regs *regs); 43 #endif 44 45 irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs) 46 { 47 long now; 48 long next_tick; 49 int nticks; 50 int cpu = smp_processor_id(); 51 52 profile_tick(CPU_PROFILING, regs); 53 54 now = mfctl(16); 55 /* initialize next_tick to time at last clocktick */ 56 next_tick = cpu_data[cpu].it_value; 57 58 /* since time passes between the interrupt and the mfctl() 59 * above, it is never true that last_tick + clocktick == now. If we 60 * never miss a clocktick, we could set next_tick = last_tick + clocktick 61 * but maybe we'll miss ticks, hence the loop. 62 * 63 * Variables are *signed*. 64 */ 65 66 nticks = 0; 67 while((next_tick - now) < halftick) { 68 next_tick += clocktick; 69 nticks++; 70 } 71 mtctl(next_tick, 16); 72 cpu_data[cpu].it_value = next_tick; 73 74 while (nticks--) { 75 #ifdef CONFIG_SMP 76 smp_do_timer(regs); 77 #else 78 update_process_times(user_mode(regs)); 79 #endif 80 if (cpu == 0) { 81 write_seqlock(&xtime_lock); 82 do_timer(regs); 83 write_sequnlock(&xtime_lock); 84 } 85 } 86 87 /* check soft power switch status */ 88 if (cpu == 0 && !atomic_read(&power_tasklet.count)) 89 tasklet_schedule(&power_tasklet); 90 91 return IRQ_HANDLED; 92 } 93 94 95 unsigned long profile_pc(struct pt_regs *regs) 96 { 97 unsigned long pc = instruction_pointer(regs); 98 99 if (regs->gr[0] & PSW_N) 100 pc -= 4; 101 102 #ifdef CONFIG_SMP 103 if (in_lock_functions(pc)) 104 pc = regs->gr[2]; 105 #endif 106 107 return pc; 108 } 109 EXPORT_SYMBOL(profile_pc); 110 111 112 /*** converted from ia64 ***/ 113 /* 114 * Return the number of micro-seconds that elapsed since the last 115 * update to wall time (aka xtime aka wall_jiffies). The xtime_lock 116 * must be at least read-locked when calling this routine. 117 */ 118 static inline unsigned long 119 gettimeoffset (void) 120 { 121 #ifndef CONFIG_SMP 122 /* 123 * FIXME: This won't work on smp because jiffies are updated by cpu 0. 124 * Once parisc-linux learns the cr16 difference between processors, 125 * this could be made to work. 126 */ 127 long last_tick; 128 long elapsed_cycles; 129 130 /* it_value is the intended time of the next tick */ 131 last_tick = cpu_data[smp_processor_id()].it_value; 132 133 /* Subtract one tick and account for possible difference between 134 * when we expected the tick and when it actually arrived. 135 * (aka wall vs real) 136 */ 137 last_tick -= clocktick * (jiffies - wall_jiffies + 1); 138 elapsed_cycles = mfctl(16) - last_tick; 139 140 /* the precision of this math could be improved */ 141 return elapsed_cycles / (PAGE0->mem_10msec / 10000); 142 #else 143 return 0; 144 #endif 145 } 146 147 void 148 do_gettimeofday (struct timeval *tv) 149 { 150 unsigned long flags, seq, usec, sec; 151 152 do { 153 seq = read_seqbegin_irqsave(&xtime_lock, flags); 154 usec = gettimeoffset(); 155 sec = xtime.tv_sec; 156 usec += (xtime.tv_nsec / 1000); 157 } while (read_seqretry_irqrestore(&xtime_lock, seq, flags)); 158 159 if (unlikely(usec > LONG_MAX)) { 160 /* This can happen if the gettimeoffset adjustment is 161 * negative and xtime.tv_nsec is smaller than the 162 * adjustment */ 163 printk(KERN_ERR "do_gettimeofday() spurious xtime.tv_nsec of %ld\n", usec); 164 usec += USEC_PER_SEC; 165 --sec; 166 /* This should never happen, it means the negative 167 * time adjustment was more than a second, so there's 168 * something seriously wrong */ 169 BUG_ON(usec > LONG_MAX); 170 } 171 172 173 while (usec >= USEC_PER_SEC) { 174 usec -= USEC_PER_SEC; 175 ++sec; 176 } 177 178 tv->tv_sec = sec; 179 tv->tv_usec = usec; 180 } 181 182 EXPORT_SYMBOL(do_gettimeofday); 183 184 int 185 do_settimeofday (struct timespec *tv) 186 { 187 time_t wtm_sec, sec = tv->tv_sec; 188 long wtm_nsec, nsec = tv->tv_nsec; 189 190 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC) 191 return -EINVAL; 192 193 write_seqlock_irq(&xtime_lock); 194 { 195 /* 196 * This is revolting. We need to set "xtime" 197 * correctly. However, the value in this location is 198 * the value at the most recent update of wall time. 199 * Discover what correction gettimeofday would have 200 * done, and then undo it! 201 */ 202 nsec -= gettimeoffset() * 1000; 203 204 wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec); 205 wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec); 206 207 set_normalized_timespec(&xtime, sec, nsec); 208 set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec); 209 210 ntp_clear(); 211 } 212 write_sequnlock_irq(&xtime_lock); 213 clock_was_set(); 214 return 0; 215 } 216 EXPORT_SYMBOL(do_settimeofday); 217 218 /* 219 * XXX: We can do better than this. 220 * Returns nanoseconds 221 */ 222 223 unsigned long long sched_clock(void) 224 { 225 return (unsigned long long)jiffies * (1000000000 / HZ); 226 } 227 228 229 void __init time_init(void) 230 { 231 unsigned long next_tick; 232 static struct pdc_tod tod_data; 233 234 clocktick = (100 * PAGE0->mem_10msec) / HZ; 235 halftick = clocktick / 2; 236 237 /* Setup clock interrupt timing */ 238 239 next_tick = mfctl(16); 240 next_tick += clocktick; 241 cpu_data[smp_processor_id()].it_value = next_tick; 242 243 /* kick off Itimer (CR16) */ 244 mtctl(next_tick, 16); 245 246 if(pdc_tod_read(&tod_data) == 0) { 247 write_seqlock_irq(&xtime_lock); 248 xtime.tv_sec = tod_data.tod_sec; 249 xtime.tv_nsec = tod_data.tod_usec * 1000; 250 set_normalized_timespec(&wall_to_monotonic, 251 -xtime.tv_sec, -xtime.tv_nsec); 252 write_sequnlock_irq(&xtime_lock); 253 } else { 254 printk(KERN_ERR "Error reading tod clock\n"); 255 xtime.tv_sec = 0; 256 xtime.tv_nsec = 0; 257 } 258 } 259 260