1 /* 2 * linux/kernel/time/clocksource.c 3 * 4 * This file contains the functions which manage clocksource drivers. 5 * 6 * Copyright (C) 2004, 2005 IBM, John Stultz (johnstul@us.ibm.com) 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License as published by 10 * the Free Software Foundation; either version 2 of the License, or 11 * (at your option) any later version. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program; if not, write to the Free Software 20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 21 * 22 * TODO WishList: 23 * o Allow clocksource drivers to be unregistered 24 * o get rid of clocksource_jiffies extern 25 */ 26 27 #include <linux/clocksource.h> 28 #include <linux/sysdev.h> 29 #include <linux/init.h> 30 #include <linux/module.h> 31 #include <linux/sched.h> /* for spin_unlock_irq() using preempt_count() m68k */ 32 #include <linux/tick.h> 33 34 /* XXX - Would like a better way for initializing curr_clocksource */ 35 extern struct clocksource clocksource_jiffies; 36 37 /*[Clocksource internal variables]--------- 38 * curr_clocksource: 39 * currently selected clocksource. Initialized to clocksource_jiffies. 40 * next_clocksource: 41 * pending next selected clocksource. 42 * clocksource_list: 43 * linked list with the registered clocksources 44 * clocksource_lock: 45 * protects manipulations to curr_clocksource and next_clocksource 46 * and the clocksource_list 47 * override_name: 48 * Name of the user-specified clocksource. 49 */ 50 static struct clocksource *curr_clocksource = &clocksource_jiffies; 51 static struct clocksource *next_clocksource; 52 static struct clocksource *clocksource_override; 53 static LIST_HEAD(clocksource_list); 54 static DEFINE_SPINLOCK(clocksource_lock); 55 static char override_name[32]; 56 static int finished_booting; 57 58 /* clocksource_done_booting - Called near the end of core bootup 59 * 60 * Hack to avoid lots of clocksource churn at boot time. 61 * We use fs_initcall because we want this to start before 62 * device_initcall but after subsys_initcall. 63 */ 64 static int __init clocksource_done_booting(void) 65 { 66 finished_booting = 1; 67 return 0; 68 } 69 fs_initcall(clocksource_done_booting); 70 71 #ifdef CONFIG_CLOCKSOURCE_WATCHDOG 72 static LIST_HEAD(watchdog_list); 73 static struct clocksource *watchdog; 74 static struct timer_list watchdog_timer; 75 static DEFINE_SPINLOCK(watchdog_lock); 76 static cycle_t watchdog_last; 77 /* 78 * Interval: 0.5sec Treshold: 0.0625s 79 */ 80 #define WATCHDOG_INTERVAL (HZ >> 1) 81 #define WATCHDOG_TRESHOLD (NSEC_PER_SEC >> 4) 82 83 static void clocksource_ratewd(struct clocksource *cs, int64_t delta) 84 { 85 if (delta > -WATCHDOG_TRESHOLD && delta < WATCHDOG_TRESHOLD) 86 return; 87 88 printk(KERN_WARNING "Clocksource %s unstable (delta = %Ld ns)\n", 89 cs->name, delta); 90 cs->flags &= ~(CLOCK_SOURCE_VALID_FOR_HRES | CLOCK_SOURCE_WATCHDOG); 91 clocksource_change_rating(cs, 0); 92 cs->flags &= ~CLOCK_SOURCE_WATCHDOG; 93 list_del(&cs->wd_list); 94 } 95 96 static void clocksource_watchdog(unsigned long data) 97 { 98 struct clocksource *cs, *tmp; 99 cycle_t csnow, wdnow; 100 int64_t wd_nsec, cs_nsec; 101 102 spin_lock(&watchdog_lock); 103 104 wdnow = watchdog->read(); 105 wd_nsec = cyc2ns(watchdog, (wdnow - watchdog_last) & watchdog->mask); 106 watchdog_last = wdnow; 107 108 list_for_each_entry_safe(cs, tmp, &watchdog_list, wd_list) { 109 csnow = cs->read(); 110 /* Initialized ? */ 111 if (!(cs->flags & CLOCK_SOURCE_WATCHDOG)) { 112 if ((cs->flags & CLOCK_SOURCE_IS_CONTINUOUS) && 113 (watchdog->flags & CLOCK_SOURCE_IS_CONTINUOUS)) { 114 cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES; 115 /* 116 * We just marked the clocksource as 117 * highres-capable, notify the rest of the 118 * system as well so that we transition 119 * into high-res mode: 120 */ 121 tick_clock_notify(); 122 } 123 cs->flags |= CLOCK_SOURCE_WATCHDOG; 124 cs->wd_last = csnow; 125 } else { 126 cs_nsec = cyc2ns(cs, (csnow - cs->wd_last) & cs->mask); 127 cs->wd_last = csnow; 128 /* Check the delta. Might remove from the list ! */ 129 clocksource_ratewd(cs, cs_nsec - wd_nsec); 130 } 131 } 132 133 if (!list_empty(&watchdog_list)) { 134 __mod_timer(&watchdog_timer, 135 watchdog_timer.expires + WATCHDOG_INTERVAL); 136 } 137 spin_unlock(&watchdog_lock); 138 } 139 static void clocksource_check_watchdog(struct clocksource *cs) 140 { 141 struct clocksource *cse; 142 unsigned long flags; 143 144 spin_lock_irqsave(&watchdog_lock, flags); 145 if (cs->flags & CLOCK_SOURCE_MUST_VERIFY) { 146 int started = !list_empty(&watchdog_list); 147 148 list_add(&cs->wd_list, &watchdog_list); 149 if (!started && watchdog) { 150 watchdog_last = watchdog->read(); 151 watchdog_timer.expires = jiffies + WATCHDOG_INTERVAL; 152 add_timer(&watchdog_timer); 153 } 154 } else if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS) { 155 cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES; 156 157 if (!watchdog || cs->rating > watchdog->rating) { 158 if (watchdog) 159 del_timer(&watchdog_timer); 160 watchdog = cs; 161 init_timer(&watchdog_timer); 162 watchdog_timer.function = clocksource_watchdog; 163 164 /* Reset watchdog cycles */ 165 list_for_each_entry(cse, &watchdog_list, wd_list) 166 cse->flags &= ~CLOCK_SOURCE_WATCHDOG; 167 /* Start if list is not empty */ 168 if (!list_empty(&watchdog_list)) { 169 watchdog_last = watchdog->read(); 170 watchdog_timer.expires = 171 jiffies + WATCHDOG_INTERVAL; 172 add_timer(&watchdog_timer); 173 } 174 } 175 } 176 spin_unlock_irqrestore(&watchdog_lock, flags); 177 } 178 #else 179 static void clocksource_check_watchdog(struct clocksource *cs) 180 { 181 if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS) 182 cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES; 183 } 184 #endif 185 186 /** 187 * clocksource_get_next - Returns the selected clocksource 188 * 189 */ 190 struct clocksource *clocksource_get_next(void) 191 { 192 unsigned long flags; 193 194 spin_lock_irqsave(&clocksource_lock, flags); 195 if (next_clocksource && finished_booting) { 196 curr_clocksource = next_clocksource; 197 next_clocksource = NULL; 198 } 199 spin_unlock_irqrestore(&clocksource_lock, flags); 200 201 return curr_clocksource; 202 } 203 204 /** 205 * select_clocksource - Selects the best registered clocksource. 206 * 207 * Private function. Must hold clocksource_lock when called. 208 * 209 * Select the clocksource with the best rating, or the clocksource, 210 * which is selected by userspace override. 211 */ 212 static struct clocksource *select_clocksource(void) 213 { 214 struct clocksource *next; 215 216 if (list_empty(&clocksource_list)) 217 return NULL; 218 219 if (clocksource_override) 220 next = clocksource_override; 221 else 222 next = list_entry(clocksource_list.next, struct clocksource, 223 list); 224 225 if (next == curr_clocksource) 226 return NULL; 227 228 return next; 229 } 230 231 /* 232 * Enqueue the clocksource sorted by rating 233 */ 234 static int clocksource_enqueue(struct clocksource *c) 235 { 236 struct list_head *tmp, *entry = &clocksource_list; 237 238 list_for_each(tmp, &clocksource_list) { 239 struct clocksource *cs; 240 241 cs = list_entry(tmp, struct clocksource, list); 242 if (cs == c) 243 return -EBUSY; 244 /* Keep track of the place, where to insert */ 245 if (cs->rating >= c->rating) 246 entry = tmp; 247 } 248 list_add(&c->list, entry); 249 250 if (strlen(c->name) == strlen(override_name) && 251 !strcmp(c->name, override_name)) 252 clocksource_override = c; 253 254 return 0; 255 } 256 257 /** 258 * clocksource_register - Used to install new clocksources 259 * @t: clocksource to be registered 260 * 261 * Returns -EBUSY if registration fails, zero otherwise. 262 */ 263 int clocksource_register(struct clocksource *c) 264 { 265 unsigned long flags; 266 int ret; 267 268 spin_lock_irqsave(&clocksource_lock, flags); 269 ret = clocksource_enqueue(c); 270 if (!ret) 271 next_clocksource = select_clocksource(); 272 spin_unlock_irqrestore(&clocksource_lock, flags); 273 if (!ret) 274 clocksource_check_watchdog(c); 275 return ret; 276 } 277 EXPORT_SYMBOL(clocksource_register); 278 279 /** 280 * clocksource_change_rating - Change the rating of a registered clocksource 281 * 282 */ 283 void clocksource_change_rating(struct clocksource *cs, int rating) 284 { 285 unsigned long flags; 286 287 spin_lock_irqsave(&clocksource_lock, flags); 288 list_del(&cs->list); 289 cs->rating = rating; 290 clocksource_enqueue(cs); 291 next_clocksource = select_clocksource(); 292 spin_unlock_irqrestore(&clocksource_lock, flags); 293 } 294 295 #ifdef CONFIG_SYSFS 296 /** 297 * sysfs_show_current_clocksources - sysfs interface for current clocksource 298 * @dev: unused 299 * @buf: char buffer to be filled with clocksource list 300 * 301 * Provides sysfs interface for listing current clocksource. 302 */ 303 static ssize_t 304 sysfs_show_current_clocksources(struct sys_device *dev, char *buf) 305 { 306 char *curr = buf; 307 308 spin_lock_irq(&clocksource_lock); 309 curr += sprintf(curr, "%s ", curr_clocksource->name); 310 spin_unlock_irq(&clocksource_lock); 311 312 curr += sprintf(curr, "\n"); 313 314 return curr - buf; 315 } 316 317 /** 318 * sysfs_override_clocksource - interface for manually overriding clocksource 319 * @dev: unused 320 * @buf: name of override clocksource 321 * @count: length of buffer 322 * 323 * Takes input from sysfs interface for manually overriding the default 324 * clocksource selction. 325 */ 326 static ssize_t sysfs_override_clocksource(struct sys_device *dev, 327 const char *buf, size_t count) 328 { 329 struct clocksource *ovr = NULL; 330 struct list_head *tmp; 331 size_t ret = count; 332 int len; 333 334 /* strings from sysfs write are not 0 terminated! */ 335 if (count >= sizeof(override_name)) 336 return -EINVAL; 337 338 /* strip of \n: */ 339 if (buf[count-1] == '\n') 340 count--; 341 342 spin_lock_irq(&clocksource_lock); 343 344 if (count > 0) 345 memcpy(override_name, buf, count); 346 override_name[count] = 0; 347 348 len = strlen(override_name); 349 if (len) { 350 ovr = clocksource_override; 351 /* try to select it: */ 352 list_for_each(tmp, &clocksource_list) { 353 struct clocksource *cs; 354 355 cs = list_entry(tmp, struct clocksource, list); 356 if (strlen(cs->name) == len && 357 !strcmp(cs->name, override_name)) 358 ovr = cs; 359 } 360 } 361 362 /* Reselect, when the override name has changed */ 363 if (ovr != clocksource_override) { 364 clocksource_override = ovr; 365 next_clocksource = select_clocksource(); 366 } 367 368 spin_unlock_irq(&clocksource_lock); 369 370 return ret; 371 } 372 373 /** 374 * sysfs_show_available_clocksources - sysfs interface for listing clocksource 375 * @dev: unused 376 * @buf: char buffer to be filled with clocksource list 377 * 378 * Provides sysfs interface for listing registered clocksources 379 */ 380 static ssize_t 381 sysfs_show_available_clocksources(struct sys_device *dev, char *buf) 382 { 383 struct list_head *tmp; 384 char *curr = buf; 385 386 spin_lock_irq(&clocksource_lock); 387 list_for_each(tmp, &clocksource_list) { 388 struct clocksource *src; 389 390 src = list_entry(tmp, struct clocksource, list); 391 curr += sprintf(curr, "%s ", src->name); 392 } 393 spin_unlock_irq(&clocksource_lock); 394 395 curr += sprintf(curr, "\n"); 396 397 return curr - buf; 398 } 399 400 /* 401 * Sysfs setup bits: 402 */ 403 static SYSDEV_ATTR(current_clocksource, 0600, sysfs_show_current_clocksources, 404 sysfs_override_clocksource); 405 406 static SYSDEV_ATTR(available_clocksource, 0600, 407 sysfs_show_available_clocksources, NULL); 408 409 static struct sysdev_class clocksource_sysclass = { 410 set_kset_name("clocksource"), 411 }; 412 413 static struct sys_device device_clocksource = { 414 .id = 0, 415 .cls = &clocksource_sysclass, 416 }; 417 418 static int __init init_clocksource_sysfs(void) 419 { 420 int error = sysdev_class_register(&clocksource_sysclass); 421 422 if (!error) 423 error = sysdev_register(&device_clocksource); 424 if (!error) 425 error = sysdev_create_file( 426 &device_clocksource, 427 &attr_current_clocksource); 428 if (!error) 429 error = sysdev_create_file( 430 &device_clocksource, 431 &attr_available_clocksource); 432 return error; 433 } 434 435 device_initcall(init_clocksource_sysfs); 436 #endif /* CONFIG_SYSFS */ 437 438 /** 439 * boot_override_clocksource - boot clock override 440 * @str: override name 441 * 442 * Takes a clocksource= boot argument and uses it 443 * as the clocksource override name. 444 */ 445 static int __init boot_override_clocksource(char* str) 446 { 447 unsigned long flags; 448 spin_lock_irqsave(&clocksource_lock, flags); 449 if (str) 450 strlcpy(override_name, str, sizeof(override_name)); 451 spin_unlock_irqrestore(&clocksource_lock, flags); 452 return 1; 453 } 454 455 __setup("clocksource=", boot_override_clocksource); 456 457 /** 458 * boot_override_clock - Compatibility layer for deprecated boot option 459 * @str: override name 460 * 461 * DEPRECATED! Takes a clock= boot argument and uses it 462 * as the clocksource override name 463 */ 464 static int __init boot_override_clock(char* str) 465 { 466 if (!strcmp(str, "pmtmr")) { 467 printk("Warning: clock=pmtmr is deprecated. " 468 "Use clocksource=acpi_pm.\n"); 469 return boot_override_clocksource("acpi_pm"); 470 } 471 printk("Warning! clock= boot option is deprecated. " 472 "Use clocksource=xyz\n"); 473 return boot_override_clocksource(str); 474 } 475 476 __setup("clock=", boot_override_clock); 477