1 /* linux/arch/sparc/kernel/time.c 2 * 3 * Copyright (C) 1995 David S. Miller (davem@davemloft.net) 4 * Copyright (C) 1996 Thomas K. Dyas (tdyas@eden.rutgers.edu) 5 * 6 * Chris Davis (cdavis@cois.on.ca) 03/27/1998 7 * Added support for the intersil on the sun4/4200 8 * 9 * Gleb Raiko (rajko@mech.math.msu.su) 08/18/1998 10 * Support for MicroSPARC-IIep, PCI CPU. 11 * 12 * This file handles the Sparc specific time handling details. 13 * 14 * 1997-09-10 Updated NTP code according to technical memorandum Jan '96 15 * "A Kernel Model for Precision Timekeeping" by Dave Mills 16 */ 17 #include <linux/errno.h> 18 #include <linux/module.h> 19 #include <linux/sched.h> 20 #include <linux/kernel.h> 21 #include <linux/param.h> 22 #include <linux/string.h> 23 #include <linux/mm.h> 24 #include <linux/interrupt.h> 25 #include <linux/time.h> 26 #include <linux/rtc.h> 27 #include <linux/rtc/m48t59.h> 28 #include <linux/timex.h> 29 #include <linux/clocksource.h> 30 #include <linux/clockchips.h> 31 #include <linux/init.h> 32 #include <linux/pci.h> 33 #include <linux/ioport.h> 34 #include <linux/profile.h> 35 #include <linux/of.h> 36 #include <linux/of_device.h> 37 #include <linux/platform_device.h> 38 39 #include <asm/oplib.h> 40 #include <asm/timex.h> 41 #include <asm/timer.h> 42 #include <asm/irq.h> 43 #include <asm/io.h> 44 #include <asm/idprom.h> 45 #include <asm/page.h> 46 #include <asm/pcic.h> 47 #include <asm/irq_regs.h> 48 #include <asm/setup.h> 49 50 #include "irq.h" 51 52 static __cacheline_aligned_in_smp DEFINE_SEQLOCK(timer_cs_lock); 53 static __volatile__ u64 timer_cs_internal_counter = 0; 54 static char timer_cs_enabled = 0; 55 56 static struct clock_event_device timer_ce; 57 static char timer_ce_enabled = 0; 58 59 #ifdef CONFIG_SMP 60 DEFINE_PER_CPU(struct clock_event_device, sparc32_clockevent); 61 #endif 62 63 DEFINE_SPINLOCK(rtc_lock); 64 EXPORT_SYMBOL(rtc_lock); 65 66 static int set_rtc_mmss(unsigned long); 67 68 unsigned long profile_pc(struct pt_regs *regs) 69 { 70 extern char __copy_user_begin[], __copy_user_end[]; 71 extern char __bzero_begin[], __bzero_end[]; 72 73 unsigned long pc = regs->pc; 74 75 if (in_lock_functions(pc) || 76 (pc >= (unsigned long) __copy_user_begin && 77 pc < (unsigned long) __copy_user_end) || 78 (pc >= (unsigned long) __bzero_begin && 79 pc < (unsigned long) __bzero_end)) 80 pc = regs->u_regs[UREG_RETPC]; 81 return pc; 82 } 83 84 EXPORT_SYMBOL(profile_pc); 85 86 __volatile__ unsigned int *master_l10_counter; 87 88 int update_persistent_clock(struct timespec now) 89 { 90 return set_rtc_mmss(now.tv_sec); 91 } 92 93 irqreturn_t notrace timer_interrupt(int dummy, void *dev_id) 94 { 95 if (timer_cs_enabled) { 96 write_seqlock(&timer_cs_lock); 97 timer_cs_internal_counter++; 98 sparc_config.clear_clock_irq(); 99 write_sequnlock(&timer_cs_lock); 100 } else { 101 sparc_config.clear_clock_irq(); 102 } 103 104 if (timer_ce_enabled) 105 timer_ce.event_handler(&timer_ce); 106 107 return IRQ_HANDLED; 108 } 109 110 static void timer_ce_set_mode(enum clock_event_mode mode, 111 struct clock_event_device *evt) 112 { 113 switch (mode) { 114 case CLOCK_EVT_MODE_PERIODIC: 115 case CLOCK_EVT_MODE_RESUME: 116 timer_ce_enabled = 1; 117 break; 118 case CLOCK_EVT_MODE_SHUTDOWN: 119 timer_ce_enabled = 0; 120 break; 121 default: 122 break; 123 } 124 smp_mb(); 125 } 126 127 static __init void setup_timer_ce(void) 128 { 129 struct clock_event_device *ce = &timer_ce; 130 131 BUG_ON(smp_processor_id() != boot_cpu_id); 132 133 ce->name = "timer_ce"; 134 ce->rating = 100; 135 ce->features = CLOCK_EVT_FEAT_PERIODIC; 136 ce->set_mode = timer_ce_set_mode; 137 ce->cpumask = cpu_possible_mask; 138 ce->shift = 32; 139 ce->mult = div_sc(sparc_config.clock_rate, NSEC_PER_SEC, 140 ce->shift); 141 clockevents_register_device(ce); 142 } 143 144 static unsigned int sbus_cycles_offset(void) 145 { 146 unsigned int val, offset; 147 148 val = *master_l10_counter; 149 offset = (val >> TIMER_VALUE_SHIFT) & TIMER_VALUE_MASK; 150 151 /* Limit hit? */ 152 if (val & TIMER_LIMIT_BIT) 153 offset += sparc_config.cs_period; 154 155 return offset; 156 } 157 158 static cycle_t timer_cs_read(struct clocksource *cs) 159 { 160 unsigned int seq, offset; 161 u64 cycles; 162 163 do { 164 seq = read_seqbegin(&timer_cs_lock); 165 166 cycles = timer_cs_internal_counter; 167 offset = sparc_config.get_cycles_offset(); 168 } while (read_seqretry(&timer_cs_lock, seq)); 169 170 /* Count absolute cycles */ 171 cycles *= sparc_config.cs_period; 172 cycles += offset; 173 174 return cycles; 175 } 176 177 static struct clocksource timer_cs = { 178 .name = "timer_cs", 179 .rating = 100, 180 .read = timer_cs_read, 181 .mask = CLOCKSOURCE_MASK(64), 182 .shift = 2, 183 .flags = CLOCK_SOURCE_IS_CONTINUOUS, 184 }; 185 186 static __init int setup_timer_cs(void) 187 { 188 timer_cs_enabled = 1; 189 timer_cs.mult = clocksource_hz2mult(sparc_config.clock_rate, 190 timer_cs.shift); 191 192 return clocksource_register(&timer_cs); 193 } 194 195 #ifdef CONFIG_SMP 196 static void percpu_ce_setup(enum clock_event_mode mode, 197 struct clock_event_device *evt) 198 { 199 int cpu = __first_cpu(evt->cpumask); 200 201 switch (mode) { 202 case CLOCK_EVT_MODE_PERIODIC: 203 sparc_config.load_profile_irq(cpu, 204 SBUS_CLOCK_RATE / HZ); 205 break; 206 case CLOCK_EVT_MODE_ONESHOT: 207 case CLOCK_EVT_MODE_SHUTDOWN: 208 case CLOCK_EVT_MODE_UNUSED: 209 sparc_config.load_profile_irq(cpu, 0); 210 break; 211 default: 212 break; 213 } 214 } 215 216 static int percpu_ce_set_next_event(unsigned long delta, 217 struct clock_event_device *evt) 218 { 219 int cpu = __first_cpu(evt->cpumask); 220 unsigned int next = (unsigned int)delta; 221 222 sparc_config.load_profile_irq(cpu, next); 223 return 0; 224 } 225 226 void register_percpu_ce(int cpu) 227 { 228 struct clock_event_device *ce = &per_cpu(sparc32_clockevent, cpu); 229 unsigned int features = CLOCK_EVT_FEAT_PERIODIC; 230 231 if (sparc_config.features & FEAT_L14_ONESHOT) 232 features |= CLOCK_EVT_FEAT_ONESHOT; 233 234 ce->name = "percpu_ce"; 235 ce->rating = 200; 236 ce->features = features; 237 ce->set_mode = percpu_ce_setup; 238 ce->set_next_event = percpu_ce_set_next_event; 239 ce->cpumask = cpumask_of(cpu); 240 ce->shift = 32; 241 ce->mult = div_sc(sparc_config.clock_rate, NSEC_PER_SEC, 242 ce->shift); 243 ce->max_delta_ns = clockevent_delta2ns(sparc_config.clock_rate, ce); 244 ce->min_delta_ns = clockevent_delta2ns(100, ce); 245 246 clockevents_register_device(ce); 247 } 248 #endif 249 250 static unsigned char mostek_read_byte(struct device *dev, u32 ofs) 251 { 252 struct platform_device *pdev = to_platform_device(dev); 253 struct m48t59_plat_data *pdata = pdev->dev.platform_data; 254 255 return readb(pdata->ioaddr + ofs); 256 } 257 258 static void mostek_write_byte(struct device *dev, u32 ofs, u8 val) 259 { 260 struct platform_device *pdev = to_platform_device(dev); 261 struct m48t59_plat_data *pdata = pdev->dev.platform_data; 262 263 writeb(val, pdata->ioaddr + ofs); 264 } 265 266 static struct m48t59_plat_data m48t59_data = { 267 .read_byte = mostek_read_byte, 268 .write_byte = mostek_write_byte, 269 }; 270 271 /* resource is set at runtime */ 272 static struct platform_device m48t59_rtc = { 273 .name = "rtc-m48t59", 274 .id = 0, 275 .num_resources = 1, 276 .dev = { 277 .platform_data = &m48t59_data, 278 }, 279 }; 280 281 static int __devinit clock_probe(struct platform_device *op) 282 { 283 struct device_node *dp = op->dev.of_node; 284 const char *model = of_get_property(dp, "model", NULL); 285 286 if (!model) 287 return -ENODEV; 288 289 /* Only the primary RTC has an address property */ 290 if (!of_find_property(dp, "address", NULL)) 291 return -ENODEV; 292 293 m48t59_rtc.resource = &op->resource[0]; 294 if (!strcmp(model, "mk48t02")) { 295 /* Map the clock register io area read-only */ 296 m48t59_data.ioaddr = of_ioremap(&op->resource[0], 0, 297 2048, "rtc-m48t59"); 298 m48t59_data.type = M48T59RTC_TYPE_M48T02; 299 } else if (!strcmp(model, "mk48t08")) { 300 m48t59_data.ioaddr = of_ioremap(&op->resource[0], 0, 301 8192, "rtc-m48t59"); 302 m48t59_data.type = M48T59RTC_TYPE_M48T08; 303 } else 304 return -ENODEV; 305 306 if (platform_device_register(&m48t59_rtc) < 0) 307 printk(KERN_ERR "Registering RTC device failed\n"); 308 309 return 0; 310 } 311 312 static struct of_device_id clock_match[] = { 313 { 314 .name = "eeprom", 315 }, 316 {}, 317 }; 318 319 static struct platform_driver clock_driver = { 320 .probe = clock_probe, 321 .driver = { 322 .name = "rtc", 323 .owner = THIS_MODULE, 324 .of_match_table = clock_match, 325 }, 326 }; 327 328 329 /* Probe for the mostek real time clock chip. */ 330 static int __init clock_init(void) 331 { 332 return platform_driver_register(&clock_driver); 333 } 334 /* Must be after subsys_initcall() so that busses are probed. Must 335 * be before device_initcall() because things like the RTC driver 336 * need to see the clock registers. 337 */ 338 fs_initcall(clock_init); 339 340 static void __init sparc32_late_time_init(void) 341 { 342 if (sparc_config.features & FEAT_L10_CLOCKEVENT) 343 setup_timer_ce(); 344 if (sparc_config.features & FEAT_L10_CLOCKSOURCE) 345 setup_timer_cs(); 346 #ifdef CONFIG_SMP 347 register_percpu_ce(smp_processor_id()); 348 #endif 349 } 350 351 static void __init sbus_time_init(void) 352 { 353 sparc_config.get_cycles_offset = sbus_cycles_offset; 354 sparc_config.init_timers(); 355 } 356 357 void __init time_init(void) 358 { 359 sparc_config.features = 0; 360 late_time_init = sparc32_late_time_init; 361 362 if (pcic_present()) 363 pci_time_init(); 364 else 365 sbus_time_init(); 366 } 367 368 369 static int set_rtc_mmss(unsigned long secs) 370 { 371 struct rtc_device *rtc = rtc_class_open("rtc0"); 372 int err = -1; 373 374 if (rtc) { 375 err = rtc_set_mmss(rtc, secs); 376 rtc_class_close(rtc); 377 } 378 379 return err; 380 } 381