1 /* 2 * linux/kernel/time/clockevents.c 3 * 4 * This file contains functions which manage clock event devices. 5 * 6 * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de> 7 * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar 8 * Copyright(C) 2006-2007, Timesys Corp., Thomas Gleixner 9 * 10 * This code is licenced under the GPL version 2. For details see 11 * kernel-base/COPYING. 12 */ 13 14 #include <linux/clockchips.h> 15 #include <linux/hrtimer.h> 16 #include <linux/init.h> 17 #include <linux/module.h> 18 #include <linux/smp.h> 19 #include <linux/device.h> 20 21 #include "tick-internal.h" 22 23 /* The registered clock event devices */ 24 static LIST_HEAD(clockevent_devices); 25 static LIST_HEAD(clockevents_released); 26 /* Protection for the above */ 27 static DEFINE_RAW_SPINLOCK(clockevents_lock); 28 /* Protection for unbind operations */ 29 static DEFINE_MUTEX(clockevents_mutex); 30 31 struct ce_unbind { 32 struct clock_event_device *ce; 33 int res; 34 }; 35 36 static u64 cev_delta2ns(unsigned long latch, struct clock_event_device *evt, 37 bool ismax) 38 { 39 u64 clc = (u64) latch << evt->shift; 40 u64 rnd; 41 42 if (unlikely(!evt->mult)) { 43 evt->mult = 1; 44 WARN_ON(1); 45 } 46 rnd = (u64) evt->mult - 1; 47 48 /* 49 * Upper bound sanity check. If the backwards conversion is 50 * not equal latch, we know that the above shift overflowed. 51 */ 52 if ((clc >> evt->shift) != (u64)latch) 53 clc = ~0ULL; 54 55 /* 56 * Scaled math oddities: 57 * 58 * For mult <= (1 << shift) we can safely add mult - 1 to 59 * prevent integer rounding loss. So the backwards conversion 60 * from nsec to device ticks will be correct. 61 * 62 * For mult > (1 << shift), i.e. device frequency is > 1GHz we 63 * need to be careful. Adding mult - 1 will result in a value 64 * which when converted back to device ticks can be larger 65 * than latch by up to (mult - 1) >> shift. For the min_delta 66 * calculation we still want to apply this in order to stay 67 * above the minimum device ticks limit. For the upper limit 68 * we would end up with a latch value larger than the upper 69 * limit of the device, so we omit the add to stay below the 70 * device upper boundary. 71 * 72 * Also omit the add if it would overflow the u64 boundary. 73 */ 74 if ((~0ULL - clc > rnd) && 75 (!ismax || evt->mult <= (1ULL << evt->shift))) 76 clc += rnd; 77 78 do_div(clc, evt->mult); 79 80 /* Deltas less than 1usec are pointless noise */ 81 return clc > 1000 ? clc : 1000; 82 } 83 84 /** 85 * clockevents_delta2ns - Convert a latch value (device ticks) to nanoseconds 86 * @latch: value to convert 87 * @evt: pointer to clock event device descriptor 88 * 89 * Math helper, returns latch value converted to nanoseconds (bound checked) 90 */ 91 u64 clockevent_delta2ns(unsigned long latch, struct clock_event_device *evt) 92 { 93 return cev_delta2ns(latch, evt, false); 94 } 95 EXPORT_SYMBOL_GPL(clockevent_delta2ns); 96 97 static int __clockevents_switch_state(struct clock_event_device *dev, 98 enum clock_event_state state) 99 { 100 /* Transition with legacy set_mode() callback */ 101 if (dev->set_mode) { 102 /* Legacy callback doesn't support new modes */ 103 if (state > CLOCK_EVT_STATE_ONESHOT) 104 return -ENOSYS; 105 /* 106 * 'clock_event_state' and 'clock_event_mode' have 1-to-1 107 * mapping until *_ONESHOT, and so a simple cast will work. 108 */ 109 dev->set_mode((enum clock_event_mode)state, dev); 110 dev->mode = (enum clock_event_mode)state; 111 return 0; 112 } 113 114 if (dev->features & CLOCK_EVT_FEAT_DUMMY) 115 return 0; 116 117 /* Transition with new state-specific callbacks */ 118 switch (state) { 119 case CLOCK_EVT_STATE_DETACHED: 120 /* The clockevent device is getting replaced. Shut it down. */ 121 122 case CLOCK_EVT_STATE_SHUTDOWN: 123 if (dev->set_state_shutdown) 124 return dev->set_state_shutdown(dev); 125 return 0; 126 127 case CLOCK_EVT_STATE_PERIODIC: 128 /* Core internal bug */ 129 if (!(dev->features & CLOCK_EVT_FEAT_PERIODIC)) 130 return -ENOSYS; 131 if (dev->set_state_periodic) 132 return dev->set_state_periodic(dev); 133 return 0; 134 135 case CLOCK_EVT_STATE_ONESHOT: 136 /* Core internal bug */ 137 if (!(dev->features & CLOCK_EVT_FEAT_ONESHOT)) 138 return -ENOSYS; 139 if (dev->set_state_oneshot) 140 return dev->set_state_oneshot(dev); 141 return 0; 142 143 case CLOCK_EVT_STATE_ONESHOT_STOPPED: 144 /* Core internal bug */ 145 if (WARN_ONCE(!clockevent_state_oneshot(dev), 146 "Current state: %d\n", 147 clockevent_get_state(dev))) 148 return -EINVAL; 149 150 if (dev->set_state_oneshot_stopped) 151 return dev->set_state_oneshot_stopped(dev); 152 else 153 return -ENOSYS; 154 155 default: 156 return -ENOSYS; 157 } 158 } 159 160 /** 161 * clockevents_switch_state - set the operating state of a clock event device 162 * @dev: device to modify 163 * @state: new state 164 * 165 * Must be called with interrupts disabled ! 166 */ 167 void clockevents_switch_state(struct clock_event_device *dev, 168 enum clock_event_state state) 169 { 170 if (clockevent_get_state(dev) != state) { 171 if (__clockevents_switch_state(dev, state)) 172 return; 173 174 clockevent_set_state(dev, state); 175 176 /* 177 * A nsec2cyc multiplicator of 0 is invalid and we'd crash 178 * on it, so fix it up and emit a warning: 179 */ 180 if (clockevent_state_oneshot(dev)) { 181 if (unlikely(!dev->mult)) { 182 dev->mult = 1; 183 WARN_ON(1); 184 } 185 } 186 } 187 } 188 189 /** 190 * clockevents_shutdown - shutdown the device and clear next_event 191 * @dev: device to shutdown 192 */ 193 void clockevents_shutdown(struct clock_event_device *dev) 194 { 195 clockevents_switch_state(dev, CLOCK_EVT_STATE_SHUTDOWN); 196 dev->next_event.tv64 = KTIME_MAX; 197 } 198 199 /** 200 * clockevents_tick_resume - Resume the tick device before using it again 201 * @dev: device to resume 202 */ 203 int clockevents_tick_resume(struct clock_event_device *dev) 204 { 205 int ret = 0; 206 207 if (dev->set_mode) { 208 dev->set_mode(CLOCK_EVT_MODE_RESUME, dev); 209 dev->mode = CLOCK_EVT_MODE_RESUME; 210 } else if (dev->tick_resume) { 211 ret = dev->tick_resume(dev); 212 } 213 214 return ret; 215 } 216 217 #ifdef CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST 218 219 /* Limit min_delta to a jiffie */ 220 #define MIN_DELTA_LIMIT (NSEC_PER_SEC / HZ) 221 222 /** 223 * clockevents_increase_min_delta - raise minimum delta of a clock event device 224 * @dev: device to increase the minimum delta 225 * 226 * Returns 0 on success, -ETIME when the minimum delta reached the limit. 227 */ 228 static int clockevents_increase_min_delta(struct clock_event_device *dev) 229 { 230 /* Nothing to do if we already reached the limit */ 231 if (dev->min_delta_ns >= MIN_DELTA_LIMIT) { 232 printk_deferred(KERN_WARNING 233 "CE: Reprogramming failure. Giving up\n"); 234 dev->next_event.tv64 = KTIME_MAX; 235 return -ETIME; 236 } 237 238 if (dev->min_delta_ns < 5000) 239 dev->min_delta_ns = 5000; 240 else 241 dev->min_delta_ns += dev->min_delta_ns >> 1; 242 243 if (dev->min_delta_ns > MIN_DELTA_LIMIT) 244 dev->min_delta_ns = MIN_DELTA_LIMIT; 245 246 printk_deferred(KERN_WARNING 247 "CE: %s increased min_delta_ns to %llu nsec\n", 248 dev->name ? dev->name : "?", 249 (unsigned long long) dev->min_delta_ns); 250 return 0; 251 } 252 253 /** 254 * clockevents_program_min_delta - Set clock event device to the minimum delay. 255 * @dev: device to program 256 * 257 * Returns 0 on success, -ETIME when the retry loop failed. 258 */ 259 static int clockevents_program_min_delta(struct clock_event_device *dev) 260 { 261 unsigned long long clc; 262 int64_t delta; 263 int i; 264 265 for (i = 0;;) { 266 delta = dev->min_delta_ns; 267 dev->next_event = ktime_add_ns(ktime_get(), delta); 268 269 if (clockevent_state_shutdown(dev)) 270 return 0; 271 272 dev->retries++; 273 clc = ((unsigned long long) delta * dev->mult) >> dev->shift; 274 if (dev->set_next_event((unsigned long) clc, dev) == 0) 275 return 0; 276 277 if (++i > 2) { 278 /* 279 * We tried 3 times to program the device with the 280 * given min_delta_ns. Try to increase the minimum 281 * delta, if that fails as well get out of here. 282 */ 283 if (clockevents_increase_min_delta(dev)) 284 return -ETIME; 285 i = 0; 286 } 287 } 288 } 289 290 #else /* CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST */ 291 292 /** 293 * clockevents_program_min_delta - Set clock event device to the minimum delay. 294 * @dev: device to program 295 * 296 * Returns 0 on success, -ETIME when the retry loop failed. 297 */ 298 static int clockevents_program_min_delta(struct clock_event_device *dev) 299 { 300 unsigned long long clc; 301 int64_t delta; 302 303 delta = dev->min_delta_ns; 304 dev->next_event = ktime_add_ns(ktime_get(), delta); 305 306 if (clockevent_state_shutdown(dev)) 307 return 0; 308 309 dev->retries++; 310 clc = ((unsigned long long) delta * dev->mult) >> dev->shift; 311 return dev->set_next_event((unsigned long) clc, dev); 312 } 313 314 #endif /* CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST */ 315 316 /** 317 * clockevents_program_event - Reprogram the clock event device. 318 * @dev: device to program 319 * @expires: absolute expiry time (monotonic clock) 320 * @force: program minimum delay if expires can not be set 321 * 322 * Returns 0 on success, -ETIME when the event is in the past. 323 */ 324 int clockevents_program_event(struct clock_event_device *dev, ktime_t expires, 325 bool force) 326 { 327 unsigned long long clc; 328 int64_t delta; 329 int rc; 330 331 if (unlikely(expires.tv64 < 0)) { 332 WARN_ON_ONCE(1); 333 return -ETIME; 334 } 335 336 dev->next_event = expires; 337 338 if (clockevent_state_shutdown(dev)) 339 return 0; 340 341 /* We must be in ONESHOT state here */ 342 WARN_ONCE(!clockevent_state_oneshot(dev), "Current state: %d\n", 343 clockevent_get_state(dev)); 344 345 /* Shortcut for clockevent devices that can deal with ktime. */ 346 if (dev->features & CLOCK_EVT_FEAT_KTIME) 347 return dev->set_next_ktime(expires, dev); 348 349 delta = ktime_to_ns(ktime_sub(expires, ktime_get())); 350 if (delta <= 0) 351 return force ? clockevents_program_min_delta(dev) : -ETIME; 352 353 delta = min(delta, (int64_t) dev->max_delta_ns); 354 delta = max(delta, (int64_t) dev->min_delta_ns); 355 356 clc = ((unsigned long long) delta * dev->mult) >> dev->shift; 357 rc = dev->set_next_event((unsigned long) clc, dev); 358 359 return (rc && force) ? clockevents_program_min_delta(dev) : rc; 360 } 361 362 /* 363 * Called after a notify add to make devices available which were 364 * released from the notifier call. 365 */ 366 static void clockevents_notify_released(void) 367 { 368 struct clock_event_device *dev; 369 370 while (!list_empty(&clockevents_released)) { 371 dev = list_entry(clockevents_released.next, 372 struct clock_event_device, list); 373 list_del(&dev->list); 374 list_add(&dev->list, &clockevent_devices); 375 tick_check_new_device(dev); 376 } 377 } 378 379 /* 380 * Try to install a replacement clock event device 381 */ 382 static int clockevents_replace(struct clock_event_device *ced) 383 { 384 struct clock_event_device *dev, *newdev = NULL; 385 386 list_for_each_entry(dev, &clockevent_devices, list) { 387 if (dev == ced || !clockevent_state_detached(dev)) 388 continue; 389 390 if (!tick_check_replacement(newdev, dev)) 391 continue; 392 393 if (!try_module_get(dev->owner)) 394 continue; 395 396 if (newdev) 397 module_put(newdev->owner); 398 newdev = dev; 399 } 400 if (newdev) { 401 tick_install_replacement(newdev); 402 list_del_init(&ced->list); 403 } 404 return newdev ? 0 : -EBUSY; 405 } 406 407 /* 408 * Called with clockevents_mutex and clockevents_lock held 409 */ 410 static int __clockevents_try_unbind(struct clock_event_device *ced, int cpu) 411 { 412 /* Fast track. Device is unused */ 413 if (clockevent_state_detached(ced)) { 414 list_del_init(&ced->list); 415 return 0; 416 } 417 418 return ced == per_cpu(tick_cpu_device, cpu).evtdev ? -EAGAIN : -EBUSY; 419 } 420 421 /* 422 * SMP function call to unbind a device 423 */ 424 static void __clockevents_unbind(void *arg) 425 { 426 struct ce_unbind *cu = arg; 427 int res; 428 429 raw_spin_lock(&clockevents_lock); 430 res = __clockevents_try_unbind(cu->ce, smp_processor_id()); 431 if (res == -EAGAIN) 432 res = clockevents_replace(cu->ce); 433 cu->res = res; 434 raw_spin_unlock(&clockevents_lock); 435 } 436 437 /* 438 * Issues smp function call to unbind a per cpu device. Called with 439 * clockevents_mutex held. 440 */ 441 static int clockevents_unbind(struct clock_event_device *ced, int cpu) 442 { 443 struct ce_unbind cu = { .ce = ced, .res = -ENODEV }; 444 445 smp_call_function_single(cpu, __clockevents_unbind, &cu, 1); 446 return cu.res; 447 } 448 449 /* 450 * Unbind a clockevents device. 451 */ 452 int clockevents_unbind_device(struct clock_event_device *ced, int cpu) 453 { 454 int ret; 455 456 mutex_lock(&clockevents_mutex); 457 ret = clockevents_unbind(ced, cpu); 458 mutex_unlock(&clockevents_mutex); 459 return ret; 460 } 461 EXPORT_SYMBOL_GPL(clockevents_unbind_device); 462 463 /* Sanity check of state transition callbacks */ 464 static int clockevents_sanity_check(struct clock_event_device *dev) 465 { 466 /* Legacy set_mode() callback */ 467 if (dev->set_mode) { 468 /* We shouldn't be supporting new modes now */ 469 WARN_ON(dev->set_state_periodic || dev->set_state_oneshot || 470 dev->set_state_shutdown || dev->tick_resume || 471 dev->set_state_oneshot_stopped); 472 473 BUG_ON(dev->mode != CLOCK_EVT_MODE_UNUSED); 474 return 0; 475 } 476 477 if (dev->features & CLOCK_EVT_FEAT_DUMMY) 478 return 0; 479 480 return 0; 481 } 482 483 /** 484 * clockevents_register_device - register a clock event device 485 * @dev: device to register 486 */ 487 void clockevents_register_device(struct clock_event_device *dev) 488 { 489 unsigned long flags; 490 491 BUG_ON(clockevents_sanity_check(dev)); 492 493 /* Initialize state to DETACHED */ 494 clockevent_set_state(dev, CLOCK_EVT_STATE_DETACHED); 495 496 if (!dev->cpumask) { 497 WARN_ON(num_possible_cpus() > 1); 498 dev->cpumask = cpumask_of(smp_processor_id()); 499 } 500 501 raw_spin_lock_irqsave(&clockevents_lock, flags); 502 503 list_add(&dev->list, &clockevent_devices); 504 tick_check_new_device(dev); 505 clockevents_notify_released(); 506 507 raw_spin_unlock_irqrestore(&clockevents_lock, flags); 508 } 509 EXPORT_SYMBOL_GPL(clockevents_register_device); 510 511 void clockevents_config(struct clock_event_device *dev, u32 freq) 512 { 513 u64 sec; 514 515 if (!(dev->features & CLOCK_EVT_FEAT_ONESHOT)) 516 return; 517 518 /* 519 * Calculate the maximum number of seconds we can sleep. Limit 520 * to 10 minutes for hardware which can program more than 521 * 32bit ticks so we still get reasonable conversion values. 522 */ 523 sec = dev->max_delta_ticks; 524 do_div(sec, freq); 525 if (!sec) 526 sec = 1; 527 else if (sec > 600 && dev->max_delta_ticks > UINT_MAX) 528 sec = 600; 529 530 clockevents_calc_mult_shift(dev, freq, sec); 531 dev->min_delta_ns = cev_delta2ns(dev->min_delta_ticks, dev, false); 532 dev->max_delta_ns = cev_delta2ns(dev->max_delta_ticks, dev, true); 533 } 534 535 /** 536 * clockevents_config_and_register - Configure and register a clock event device 537 * @dev: device to register 538 * @freq: The clock frequency 539 * @min_delta: The minimum clock ticks to program in oneshot mode 540 * @max_delta: The maximum clock ticks to program in oneshot mode 541 * 542 * min/max_delta can be 0 for devices which do not support oneshot mode. 543 */ 544 void clockevents_config_and_register(struct clock_event_device *dev, 545 u32 freq, unsigned long min_delta, 546 unsigned long max_delta) 547 { 548 dev->min_delta_ticks = min_delta; 549 dev->max_delta_ticks = max_delta; 550 clockevents_config(dev, freq); 551 clockevents_register_device(dev); 552 } 553 EXPORT_SYMBOL_GPL(clockevents_config_and_register); 554 555 int __clockevents_update_freq(struct clock_event_device *dev, u32 freq) 556 { 557 clockevents_config(dev, freq); 558 559 if (clockevent_state_oneshot(dev)) 560 return clockevents_program_event(dev, dev->next_event, false); 561 562 if (clockevent_state_periodic(dev)) 563 return __clockevents_switch_state(dev, CLOCK_EVT_STATE_PERIODIC); 564 565 return 0; 566 } 567 568 /** 569 * clockevents_update_freq - Update frequency and reprogram a clock event device. 570 * @dev: device to modify 571 * @freq: new device frequency 572 * 573 * Reconfigure and reprogram a clock event device in oneshot 574 * mode. Must be called on the cpu for which the device delivers per 575 * cpu timer events. If called for the broadcast device the core takes 576 * care of serialization. 577 * 578 * Returns 0 on success, -ETIME when the event is in the past. 579 */ 580 int clockevents_update_freq(struct clock_event_device *dev, u32 freq) 581 { 582 unsigned long flags; 583 int ret; 584 585 local_irq_save(flags); 586 ret = tick_broadcast_update_freq(dev, freq); 587 if (ret == -ENODEV) 588 ret = __clockevents_update_freq(dev, freq); 589 local_irq_restore(flags); 590 return ret; 591 } 592 593 /* 594 * Noop handler when we shut down an event device 595 */ 596 void clockevents_handle_noop(struct clock_event_device *dev) 597 { 598 } 599 600 /** 601 * clockevents_exchange_device - release and request clock devices 602 * @old: device to release (can be NULL) 603 * @new: device to request (can be NULL) 604 * 605 * Called from various tick functions with clockevents_lock held and 606 * interrupts disabled. 607 */ 608 void clockevents_exchange_device(struct clock_event_device *old, 609 struct clock_event_device *new) 610 { 611 /* 612 * Caller releases a clock event device. We queue it into the 613 * released list and do a notify add later. 614 */ 615 if (old) { 616 module_put(old->owner); 617 clockevents_switch_state(old, CLOCK_EVT_STATE_DETACHED); 618 list_del(&old->list); 619 list_add(&old->list, &clockevents_released); 620 } 621 622 if (new) { 623 BUG_ON(!clockevent_state_detached(new)); 624 clockevents_shutdown(new); 625 } 626 } 627 628 /** 629 * clockevents_suspend - suspend clock devices 630 */ 631 void clockevents_suspend(void) 632 { 633 struct clock_event_device *dev; 634 635 list_for_each_entry_reverse(dev, &clockevent_devices, list) 636 if (dev->suspend && !clockevent_state_detached(dev)) 637 dev->suspend(dev); 638 } 639 640 /** 641 * clockevents_resume - resume clock devices 642 */ 643 void clockevents_resume(void) 644 { 645 struct clock_event_device *dev; 646 647 list_for_each_entry(dev, &clockevent_devices, list) 648 if (dev->resume && !clockevent_state_detached(dev)) 649 dev->resume(dev); 650 } 651 652 #ifdef CONFIG_HOTPLUG_CPU 653 /** 654 * tick_cleanup_dead_cpu - Cleanup the tick and clockevents of a dead cpu 655 */ 656 void tick_cleanup_dead_cpu(int cpu) 657 { 658 struct clock_event_device *dev, *tmp; 659 unsigned long flags; 660 661 raw_spin_lock_irqsave(&clockevents_lock, flags); 662 663 tick_shutdown_broadcast_oneshot(cpu); 664 tick_shutdown_broadcast(cpu); 665 tick_shutdown(cpu); 666 /* 667 * Unregister the clock event devices which were 668 * released from the users in the notify chain. 669 */ 670 list_for_each_entry_safe(dev, tmp, &clockevents_released, list) 671 list_del(&dev->list); 672 /* 673 * Now check whether the CPU has left unused per cpu devices 674 */ 675 list_for_each_entry_safe(dev, tmp, &clockevent_devices, list) { 676 if (cpumask_test_cpu(cpu, dev->cpumask) && 677 cpumask_weight(dev->cpumask) == 1 && 678 !tick_is_broadcast_device(dev)) { 679 BUG_ON(!clockevent_state_detached(dev)); 680 list_del(&dev->list); 681 } 682 } 683 raw_spin_unlock_irqrestore(&clockevents_lock, flags); 684 } 685 #endif 686 687 #ifdef CONFIG_SYSFS 688 struct bus_type clockevents_subsys = { 689 .name = "clockevents", 690 .dev_name = "clockevent", 691 }; 692 693 static DEFINE_PER_CPU(struct device, tick_percpu_dev); 694 static struct tick_device *tick_get_tick_dev(struct device *dev); 695 696 static ssize_t sysfs_show_current_tick_dev(struct device *dev, 697 struct device_attribute *attr, 698 char *buf) 699 { 700 struct tick_device *td; 701 ssize_t count = 0; 702 703 raw_spin_lock_irq(&clockevents_lock); 704 td = tick_get_tick_dev(dev); 705 if (td && td->evtdev) 706 count = snprintf(buf, PAGE_SIZE, "%s\n", td->evtdev->name); 707 raw_spin_unlock_irq(&clockevents_lock); 708 return count; 709 } 710 static DEVICE_ATTR(current_device, 0444, sysfs_show_current_tick_dev, NULL); 711 712 /* We don't support the abomination of removable broadcast devices */ 713 static ssize_t sysfs_unbind_tick_dev(struct device *dev, 714 struct device_attribute *attr, 715 const char *buf, size_t count) 716 { 717 char name[CS_NAME_LEN]; 718 ssize_t ret = sysfs_get_uname(buf, name, count); 719 struct clock_event_device *ce; 720 721 if (ret < 0) 722 return ret; 723 724 ret = -ENODEV; 725 mutex_lock(&clockevents_mutex); 726 raw_spin_lock_irq(&clockevents_lock); 727 list_for_each_entry(ce, &clockevent_devices, list) { 728 if (!strcmp(ce->name, name)) { 729 ret = __clockevents_try_unbind(ce, dev->id); 730 break; 731 } 732 } 733 raw_spin_unlock_irq(&clockevents_lock); 734 /* 735 * We hold clockevents_mutex, so ce can't go away 736 */ 737 if (ret == -EAGAIN) 738 ret = clockevents_unbind(ce, dev->id); 739 mutex_unlock(&clockevents_mutex); 740 return ret ? ret : count; 741 } 742 static DEVICE_ATTR(unbind_device, 0200, NULL, sysfs_unbind_tick_dev); 743 744 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST 745 static struct device tick_bc_dev = { 746 .init_name = "broadcast", 747 .id = 0, 748 .bus = &clockevents_subsys, 749 }; 750 751 static struct tick_device *tick_get_tick_dev(struct device *dev) 752 { 753 return dev == &tick_bc_dev ? tick_get_broadcast_device() : 754 &per_cpu(tick_cpu_device, dev->id); 755 } 756 757 static __init int tick_broadcast_init_sysfs(void) 758 { 759 int err = device_register(&tick_bc_dev); 760 761 if (!err) 762 err = device_create_file(&tick_bc_dev, &dev_attr_current_device); 763 return err; 764 } 765 #else 766 static struct tick_device *tick_get_tick_dev(struct device *dev) 767 { 768 return &per_cpu(tick_cpu_device, dev->id); 769 } 770 static inline int tick_broadcast_init_sysfs(void) { return 0; } 771 #endif 772 773 static int __init tick_init_sysfs(void) 774 { 775 int cpu; 776 777 for_each_possible_cpu(cpu) { 778 struct device *dev = &per_cpu(tick_percpu_dev, cpu); 779 int err; 780 781 dev->id = cpu; 782 dev->bus = &clockevents_subsys; 783 err = device_register(dev); 784 if (!err) 785 err = device_create_file(dev, &dev_attr_current_device); 786 if (!err) 787 err = device_create_file(dev, &dev_attr_unbind_device); 788 if (err) 789 return err; 790 } 791 return tick_broadcast_init_sysfs(); 792 } 793 794 static int __init clockevents_init_sysfs(void) 795 { 796 int err = subsys_system_register(&clockevents_subsys, NULL); 797 798 if (!err) 799 err = tick_init_sysfs(); 800 return err; 801 } 802 device_initcall(clockevents_init_sysfs); 803 #endif /* SYSFS */ 804