1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * This file contains functions which manage clock event devices.
4 *
5 * Copyright(C) 2005-2006, Linutronix GmbH, Thomas Gleixner <tglx@kernel.org>
6 * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar
7 * Copyright(C) 2006-2007, Timesys Corp., Thomas Gleixner
8 */
9
10 #include <linux/clockchips.h>
11 #include <linux/hrtimer.h>
12 #include <linux/init.h>
13 #include <linux/module.h>
14 #include <linux/smp.h>
15 #include <linux/device.h>
16
17 #include "tick-internal.h"
18
19 /* The registered clock event devices */
20 static LIST_HEAD(clockevent_devices);
21 static LIST_HEAD(clockevents_released);
22 /* Protection for the above */
23 static DEFINE_RAW_SPINLOCK(clockevents_lock);
24 /* Protection for unbind operations */
25 static DEFINE_MUTEX(clockevents_mutex);
26
27 struct ce_unbind {
28 struct clock_event_device *ce;
29 int res;
30 };
31
cev_delta2ns(unsigned long latch,struct clock_event_device * evt,bool ismax)32 static u64 cev_delta2ns(unsigned long latch, struct clock_event_device *evt,
33 bool ismax)
34 {
35 u64 clc = (u64) latch << evt->shift;
36 u64 rnd;
37
38 if (WARN_ON(!evt->mult))
39 evt->mult = 1;
40 rnd = (u64) evt->mult - 1;
41
42 /*
43 * Upper bound sanity check. If the backwards conversion is
44 * not equal latch, we know that the above shift overflowed.
45 */
46 if ((clc >> evt->shift) != (u64)latch)
47 clc = ~0ULL;
48
49 /*
50 * Scaled math oddities:
51 *
52 * For mult <= (1 << shift) we can safely add mult - 1 to
53 * prevent integer rounding loss. So the backwards conversion
54 * from nsec to device ticks will be correct.
55 *
56 * For mult > (1 << shift), i.e. device frequency is > 1GHz we
57 * need to be careful. Adding mult - 1 will result in a value
58 * which when converted back to device ticks can be larger
59 * than latch by up to (mult - 1) >> shift. For the min_delta
60 * calculation we still want to apply this in order to stay
61 * above the minimum device ticks limit. For the upper limit
62 * we would end up with a latch value larger than the upper
63 * limit of the device, so we omit the add to stay below the
64 * device upper boundary.
65 *
66 * Also omit the add if it would overflow the u64 boundary.
67 */
68 if ((~0ULL - clc > rnd) &&
69 (!ismax || evt->mult <= (1ULL << evt->shift)))
70 clc += rnd;
71
72 do_div(clc, evt->mult);
73
74 /* Deltas less than 1usec are pointless noise */
75 return clc > 1000 ? clc : 1000;
76 }
77
78 /**
79 * clockevent_delta2ns - Convert a latch value (device ticks) to nanoseconds
80 * @latch: value to convert
81 * @evt: pointer to clock event device descriptor
82 *
83 * Math helper, returns latch value converted to nanoseconds (bound checked)
84 */
clockevent_delta2ns(unsigned long latch,struct clock_event_device * evt)85 u64 clockevent_delta2ns(unsigned long latch, struct clock_event_device *evt)
86 {
87 return cev_delta2ns(latch, evt, false);
88 }
89 EXPORT_SYMBOL_GPL(clockevent_delta2ns);
90
__clockevents_switch_state(struct clock_event_device * dev,enum clock_event_state state)91 static int __clockevents_switch_state(struct clock_event_device *dev,
92 enum clock_event_state state)
93 {
94 if (dev->features & CLOCK_EVT_FEAT_DUMMY)
95 return 0;
96
97 /* On state transitions clear the forced flag unconditionally */
98 dev->next_event_forced = 0;
99
100 /* Transition with new state-specific callbacks */
101 switch (state) {
102 case CLOCK_EVT_STATE_DETACHED:
103 /* The clockevent device is getting replaced. Shut it down. */
104
105 case CLOCK_EVT_STATE_SHUTDOWN:
106 if (dev->set_state_shutdown)
107 return dev->set_state_shutdown(dev);
108 return 0;
109
110 case CLOCK_EVT_STATE_PERIODIC:
111 /* Core internal bug */
112 if (!(dev->features & CLOCK_EVT_FEAT_PERIODIC))
113 return -ENOSYS;
114 if (dev->set_state_periodic)
115 return dev->set_state_periodic(dev);
116 return 0;
117
118 case CLOCK_EVT_STATE_ONESHOT:
119 /* Core internal bug */
120 if (!(dev->features & CLOCK_EVT_FEAT_ONESHOT))
121 return -ENOSYS;
122 if (dev->set_state_oneshot)
123 return dev->set_state_oneshot(dev);
124 return 0;
125
126 case CLOCK_EVT_STATE_ONESHOT_STOPPED:
127 /* Core internal bug */
128 if (WARN_ONCE(!clockevent_state_oneshot(dev),
129 "Current state: %d\n",
130 clockevent_get_state(dev)))
131 return -EINVAL;
132
133 if (dev->set_state_oneshot_stopped)
134 return dev->set_state_oneshot_stopped(dev);
135 else
136 return -ENOSYS;
137
138 default:
139 return -ENOSYS;
140 }
141 }
142
143 /**
144 * clockevents_switch_state - set the operating state of a clock event device
145 * @dev: device to modify
146 * @state: new state
147 *
148 * Must be called with interrupts disabled !
149 */
clockevents_switch_state(struct clock_event_device * dev,enum clock_event_state state)150 void clockevents_switch_state(struct clock_event_device *dev,
151 enum clock_event_state state)
152 {
153 if (clockevent_get_state(dev) != state) {
154 if (__clockevents_switch_state(dev, state))
155 return;
156
157 clockevent_set_state(dev, state);
158
159 /*
160 * A nsec2cyc multiplicator of 0 is invalid and we'd crash
161 * on it, so fix it up and emit a warning:
162 */
163 if (clockevent_state_oneshot(dev)) {
164 if (WARN_ON(!dev->mult))
165 dev->mult = 1;
166 }
167 }
168 }
169
170 /**
171 * clockevents_shutdown - shutdown the device and clear next_event
172 * @dev: device to shutdown
173 */
clockevents_shutdown(struct clock_event_device * dev)174 void clockevents_shutdown(struct clock_event_device *dev)
175 {
176 clockevents_switch_state(dev, CLOCK_EVT_STATE_SHUTDOWN);
177 dev->next_event = KTIME_MAX;
178 dev->next_event_forced = 0;
179 }
180
181 /**
182 * clockevents_tick_resume - Resume the tick device before using it again
183 * @dev: device to resume
184 */
clockevents_tick_resume(struct clock_event_device * dev)185 int clockevents_tick_resume(struct clock_event_device *dev)
186 {
187 int ret = 0;
188
189 if (dev->tick_resume)
190 ret = dev->tick_resume(dev);
191
192 return ret;
193 }
194
195 #ifdef CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST
196
197 /* Limit min_delta to a jiffy */
198 #define MIN_DELTA_LIMIT (NSEC_PER_SEC / HZ)
199
200 /**
201 * clockevents_increase_min_delta - raise minimum delta of a clock event device
202 * @dev: device to increase the minimum delta
203 *
204 * Returns 0 on success, -ETIME when the minimum delta reached the limit.
205 */
clockevents_increase_min_delta(struct clock_event_device * dev)206 static int clockevents_increase_min_delta(struct clock_event_device *dev)
207 {
208 /* Nothing to do if we already reached the limit */
209 if (dev->min_delta_ns >= MIN_DELTA_LIMIT) {
210 printk_deferred(KERN_WARNING
211 "CE: Reprogramming failure. Giving up\n");
212 dev->next_event = KTIME_MAX;
213 return -ETIME;
214 }
215
216 if (dev->min_delta_ns < 5000)
217 dev->min_delta_ns = 5000;
218 else
219 dev->min_delta_ns += dev->min_delta_ns >> 1;
220
221 if (dev->min_delta_ns > MIN_DELTA_LIMIT)
222 dev->min_delta_ns = MIN_DELTA_LIMIT;
223
224 printk_deferred(KERN_WARNING
225 "CE: %s increased min_delta_ns to %llu nsec\n",
226 dev->name ? dev->name : "?",
227 (unsigned long long) dev->min_delta_ns);
228 return 0;
229 }
230
231 /**
232 * clockevents_program_min_delta - Set clock event device to the minimum delay.
233 * @dev: device to program
234 *
235 * Returns 0 on success, -ETIME when the retry loop failed.
236 */
clockevents_program_min_delta(struct clock_event_device * dev)237 static int clockevents_program_min_delta(struct clock_event_device *dev)
238 {
239 unsigned long long clc;
240 int64_t delta;
241 int i;
242
243 for (i = 0;;) {
244 delta = dev->min_delta_ns;
245 dev->next_event = ktime_add_ns(ktime_get(), delta);
246
247 if (clockevent_state_shutdown(dev))
248 return 0;
249
250 dev->retries++;
251 clc = ((unsigned long long) delta * dev->mult) >> dev->shift;
252 if (dev->set_next_event((unsigned long) clc, dev) == 0)
253 return 0;
254
255 if (++i > 2) {
256 /*
257 * We tried 3 times to program the device with the
258 * given min_delta_ns. Try to increase the minimum
259 * delta, if that fails as well get out of here.
260 */
261 if (clockevents_increase_min_delta(dev))
262 return -ETIME;
263 i = 0;
264 }
265 }
266 }
267
268 #else /* CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST */
269
270 /**
271 * clockevents_program_min_delta - Set clock event device to the minimum delay.
272 * @dev: device to program
273 *
274 * Returns 0 on success, -ETIME when the retry loop failed.
275 */
clockevents_program_min_delta(struct clock_event_device * dev)276 static int clockevents_program_min_delta(struct clock_event_device *dev)
277 {
278 unsigned long long clc;
279 int64_t delta = 0;
280 int i;
281
282 for (i = 0; i < 10; i++) {
283 delta += dev->min_delta_ns;
284 dev->next_event = ktime_add_ns(ktime_get(), delta);
285
286 if (clockevent_state_shutdown(dev))
287 return 0;
288
289 dev->retries++;
290 clc = ((unsigned long long) delta * dev->mult) >> dev->shift;
291 if (dev->set_next_event((unsigned long) clc, dev) == 0)
292 return 0;
293 }
294 return -ETIME;
295 }
296
297 #endif /* CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST */
298
299 #ifdef CONFIG_GENERIC_CLOCKEVENTS_COUPLED
300 #ifdef CONFIG_GENERIC_CLOCKEVENTS_COUPLED_INLINE
301 #include <asm/clock_inlined.h>
302 #else
303 static __always_inline void
arch_inlined_clockevent_set_next_coupled(u64 u64 cycles,struct clock_event_device * dev)304 arch_inlined_clockevent_set_next_coupled(u64 u64 cycles, struct clock_event_device *dev) { }
305 #endif
306
clockevent_set_next_coupled(struct clock_event_device * dev,ktime_t expires)307 static inline bool clockevent_set_next_coupled(struct clock_event_device *dev, ktime_t expires)
308 {
309 u64 cycles;
310
311 if (unlikely(!(dev->features & CLOCK_EVT_FEAT_CLOCKSOURCE_COUPLED)))
312 return false;
313
314 if (unlikely(!ktime_expiry_to_cycles(dev->cs_id, expires, &cycles)))
315 return false;
316
317 if (IS_ENABLED(CONFIG_GENERIC_CLOCKEVENTS_COUPLED_INLINE))
318 arch_inlined_clockevent_set_next_coupled(cycles, dev);
319 else
320 dev->set_next_coupled(cycles, dev);
321 return true;
322 }
323
324 #else
clockevent_set_next_coupled(struct clock_event_device * dev,ktime_t expires)325 static inline bool clockevent_set_next_coupled(struct clock_event_device *dev, ktime_t expires)
326 {
327 return false;
328 }
329 #endif
330
331 /**
332 * clockevents_program_event - Reprogram the clock event device.
333 * @dev: device to program
334 * @expires: absolute expiry time (monotonic clock)
335 * @force: program minimum delay if expires can not be set
336 *
337 * Returns 0 on success, -ETIME when the event is in the past.
338 */
clockevents_program_event(struct clock_event_device * dev,ktime_t expires,bool force)339 int clockevents_program_event(struct clock_event_device *dev, ktime_t expires, bool force)
340 {
341 int64_t delta;
342 u64 cycles;
343
344 if (WARN_ON_ONCE(expires < 0))
345 return -ETIME;
346
347 dev->next_event = expires;
348
349 if (clockevent_state_shutdown(dev))
350 return 0;
351
352 /* We must be in ONESHOT state here */
353 WARN_ONCE(!clockevent_state_oneshot(dev), "Current state: %d\n",
354 clockevent_get_state(dev));
355
356 /* ktime_t based reprogramming for the broadcast hrtimer device */
357 if (unlikely(dev->features & CLOCK_EVT_FEAT_HRTIMER))
358 return dev->set_next_ktime(expires, dev);
359
360 if (likely(clockevent_set_next_coupled(dev, expires)))
361 return 0;
362
363 delta = ktime_to_ns(ktime_sub(expires, ktime_get()));
364
365 /* Required for tick_periodic() during early boot */
366 if (delta <= 0 && !force)
367 return -ETIME;
368
369 if (delta > (int64_t)dev->min_delta_ns) {
370 delta = min(delta, (int64_t) dev->max_delta_ns);
371 cycles = ((u64)delta * dev->mult) >> dev->shift;
372 if (!dev->set_next_event((unsigned long) cycles, dev)) {
373 dev->next_event_forced = 0;
374 return 0;
375 }
376 }
377
378 if (dev->next_event_forced)
379 return 0;
380
381 if (dev->set_next_event(dev->min_delta_ticks, dev)) {
382 if (!force || clockevents_program_min_delta(dev))
383 return -ETIME;
384 }
385 dev->next_event_forced = 1;
386 return 0;
387 }
388
389 /*
390 * Called after a clockevent has been added which might
391 * have replaced a current regular or broadcast device. A
392 * released normal device might be a suitable replacement
393 * for the current broadcast device. Similarly a released
394 * broadcast device might be a suitable replacement for a
395 * normal device.
396 */
clockevents_notify_released(void)397 static void clockevents_notify_released(void)
398 {
399 struct clock_event_device *dev;
400
401 /*
402 * Keep iterating as long as tick_check_new_device()
403 * replaces a device.
404 */
405 while (!list_empty(&clockevents_released)) {
406 dev = list_entry(clockevents_released.next,
407 struct clock_event_device, list);
408 list_move(&dev->list, &clockevent_devices);
409 tick_check_new_device(dev);
410 }
411 }
412
413 /*
414 * Try to install a replacement clock event device
415 */
clockevents_replace(struct clock_event_device * ced)416 static int clockevents_replace(struct clock_event_device *ced)
417 {
418 struct clock_event_device *dev, *newdev = NULL;
419
420 list_for_each_entry(dev, &clockevent_devices, list) {
421 if (dev == ced || !clockevent_state_detached(dev))
422 continue;
423
424 if (!tick_check_replacement(newdev, dev))
425 continue;
426
427 if (!try_module_get(dev->owner))
428 continue;
429
430 if (newdev)
431 module_put(newdev->owner);
432 newdev = dev;
433 }
434 if (newdev) {
435 tick_install_replacement(newdev);
436 list_del_init(&ced->list);
437 }
438 return newdev ? 0 : -EBUSY;
439 }
440
441 /*
442 * Called with clockevents_mutex and clockevents_lock held
443 */
__clockevents_try_unbind(struct clock_event_device * ced,int cpu)444 static int __clockevents_try_unbind(struct clock_event_device *ced, int cpu)
445 {
446 /* Fast track. Device is unused */
447 if (clockevent_state_detached(ced)) {
448 list_del_init(&ced->list);
449 return 0;
450 }
451
452 return ced == per_cpu(tick_cpu_device, cpu).evtdev ? -EAGAIN : -EBUSY;
453 }
454
455 /*
456 * SMP function call to unbind a device
457 */
__clockevents_unbind(void * arg)458 static void __clockevents_unbind(void *arg)
459 {
460 struct ce_unbind *cu = arg;
461 int res;
462
463 raw_spin_lock(&clockevents_lock);
464 res = __clockevents_try_unbind(cu->ce, smp_processor_id());
465 if (res == -EAGAIN)
466 res = clockevents_replace(cu->ce);
467 cu->res = res;
468 raw_spin_unlock(&clockevents_lock);
469 }
470
471 /*
472 * Issues smp function call to unbind a per cpu device. Called with
473 * clockevents_mutex held.
474 */
clockevents_unbind(struct clock_event_device * ced,int cpu)475 static int clockevents_unbind(struct clock_event_device *ced, int cpu)
476 {
477 struct ce_unbind cu = { .ce = ced, .res = -ENODEV };
478
479 smp_call_function_single(cpu, __clockevents_unbind, &cu, 1);
480 return cu.res;
481 }
482
483 /*
484 * Unbind a clockevents device.
485 */
clockevents_unbind_device(struct clock_event_device * ced,int cpu)486 int clockevents_unbind_device(struct clock_event_device *ced, int cpu)
487 {
488 int ret;
489
490 mutex_lock(&clockevents_mutex);
491 ret = clockevents_unbind(ced, cpu);
492 mutex_unlock(&clockevents_mutex);
493 return ret;
494 }
495 EXPORT_SYMBOL_GPL(clockevents_unbind_device);
496
497 /**
498 * clockevents_register_device - register a clock event device
499 * @dev: device to register
500 */
clockevents_register_device(struct clock_event_device * dev)501 void clockevents_register_device(struct clock_event_device *dev)
502 {
503 unsigned long flags;
504
505 /* Initialize state to DETACHED */
506 clockevent_set_state(dev, CLOCK_EVT_STATE_DETACHED);
507
508 if (!dev->cpumask) {
509 WARN_ON(num_possible_cpus() > 1);
510 dev->cpumask = cpumask_of(smp_processor_id());
511 }
512
513 if (dev->cpumask == cpu_all_mask) {
514 WARN(1, "%s cpumask == cpu_all_mask, using cpu_possible_mask instead\n",
515 dev->name);
516 dev->cpumask = cpu_possible_mask;
517 }
518
519 raw_spin_lock_irqsave(&clockevents_lock, flags);
520
521 list_add(&dev->list, &clockevent_devices);
522 tick_check_new_device(dev);
523 clockevents_notify_released();
524
525 raw_spin_unlock_irqrestore(&clockevents_lock, flags);
526 }
527 EXPORT_SYMBOL_GPL(clockevents_register_device);
528
clockevents_config(struct clock_event_device * dev,u32 freq)529 static void clockevents_config(struct clock_event_device *dev, u32 freq)
530 {
531 u64 sec;
532
533 if (!(dev->features & CLOCK_EVT_FEAT_ONESHOT))
534 return;
535
536 /*
537 * Calculate the maximum number of seconds we can sleep. Limit
538 * to 10 minutes for hardware which can program more than
539 * 32bit ticks so we still get reasonable conversion values.
540 */
541 sec = dev->max_delta_ticks;
542 do_div(sec, freq);
543 if (!sec)
544 sec = 1;
545 else if (sec > 600 && dev->max_delta_ticks > UINT_MAX)
546 sec = 600;
547
548 clockevents_calc_mult_shift(dev, freq, sec);
549 dev->min_delta_ns = cev_delta2ns(dev->min_delta_ticks, dev, false);
550 dev->max_delta_ns = cev_delta2ns(dev->max_delta_ticks, dev, true);
551 }
552
553 /**
554 * clockevents_config_and_register - Configure and register a clock event device
555 * @dev: device to register
556 * @freq: The clock frequency
557 * @min_delta: The minimum clock ticks to program in oneshot mode
558 * @max_delta: The maximum clock ticks to program in oneshot mode
559 *
560 * min/max_delta can be 0 for devices which do not support oneshot mode.
561 */
clockevents_config_and_register(struct clock_event_device * dev,u32 freq,unsigned long min_delta,unsigned long max_delta)562 void clockevents_config_and_register(struct clock_event_device *dev,
563 u32 freq, unsigned long min_delta,
564 unsigned long max_delta)
565 {
566 dev->min_delta_ticks = min_delta;
567 dev->max_delta_ticks = max_delta;
568 clockevents_config(dev, freq);
569 clockevents_register_device(dev);
570 }
571 EXPORT_SYMBOL_GPL(clockevents_config_and_register);
572
__clockevents_update_freq(struct clock_event_device * dev,u32 freq)573 int __clockevents_update_freq(struct clock_event_device *dev, u32 freq)
574 {
575 clockevents_config(dev, freq);
576
577 if (clockevent_state_oneshot(dev))
578 return clockevents_program_event(dev, dev->next_event, false);
579
580 if (clockevent_state_periodic(dev))
581 return __clockevents_switch_state(dev, CLOCK_EVT_STATE_PERIODIC);
582
583 return 0;
584 }
585
586 /**
587 * clockevents_update_freq - Update frequency and reprogram a clock event device.
588 * @dev: device to modify
589 * @freq: new device frequency
590 *
591 * Reconfigure and reprogram a clock event device in oneshot
592 * mode. Must be called on the cpu for which the device delivers per
593 * cpu timer events. If called for the broadcast device the core takes
594 * care of serialization.
595 *
596 * Returns 0 on success, -ETIME when the event is in the past.
597 */
clockevents_update_freq(struct clock_event_device * dev,u32 freq)598 int clockevents_update_freq(struct clock_event_device *dev, u32 freq)
599 {
600 unsigned long flags;
601 int ret;
602
603 local_irq_save(flags);
604 ret = tick_broadcast_update_freq(dev, freq);
605 if (ret == -ENODEV)
606 ret = __clockevents_update_freq(dev, freq);
607 local_irq_restore(flags);
608 return ret;
609 }
610
611 /*
612 * Noop handler when we shut down an event device
613 */
clockevents_handle_noop(struct clock_event_device * dev)614 void clockevents_handle_noop(struct clock_event_device *dev)
615 {
616 }
617
618 /**
619 * clockevents_exchange_device - release and request clock devices
620 * @old: device to release (can be NULL)
621 * @new: device to request (can be NULL)
622 *
623 * Called from various tick functions with clockevents_lock held and
624 * interrupts disabled.
625 */
clockevents_exchange_device(struct clock_event_device * old,struct clock_event_device * new)626 void clockevents_exchange_device(struct clock_event_device *old,
627 struct clock_event_device *new)
628 {
629 /*
630 * Caller releases a clock event device. We queue it into the
631 * released list and do a notify add later.
632 */
633 if (old) {
634 module_put(old->owner);
635 clockevents_switch_state(old, CLOCK_EVT_STATE_DETACHED);
636 list_move(&old->list, &clockevents_released);
637 }
638
639 if (new) {
640 BUG_ON(!clockevent_state_detached(new));
641 clockevents_shutdown(new);
642 }
643 }
644
645 /**
646 * clockevents_suspend - suspend clock devices
647 */
clockevents_suspend(void)648 void clockevents_suspend(void)
649 {
650 struct clock_event_device *dev;
651
652 list_for_each_entry_reverse(dev, &clockevent_devices, list)
653 if (dev->suspend && !clockevent_state_detached(dev))
654 dev->suspend(dev);
655 }
656
657 /**
658 * clockevents_resume - resume clock devices
659 */
clockevents_resume(void)660 void clockevents_resume(void)
661 {
662 struct clock_event_device *dev;
663
664 list_for_each_entry(dev, &clockevent_devices, list)
665 if (dev->resume && !clockevent_state_detached(dev))
666 dev->resume(dev);
667 }
668
669 #ifdef CONFIG_HOTPLUG_CPU
670
671 /**
672 * tick_offline_cpu - Shutdown all clock events related
673 * to this CPU and take it out of the
674 * broadcast mechanism.
675 * @cpu: The outgoing CPU
676 *
677 * Called by the dying CPU during teardown.
678 */
tick_offline_cpu(unsigned int cpu)679 void tick_offline_cpu(unsigned int cpu)
680 {
681 struct clock_event_device *dev, *tmp;
682
683 raw_spin_lock(&clockevents_lock);
684
685 tick_broadcast_offline(cpu);
686 tick_shutdown();
687
688 /*
689 * Unregister the clock event devices which were
690 * released above.
691 */
692 list_for_each_entry_safe(dev, tmp, &clockevents_released, list)
693 list_del(&dev->list);
694
695 /*
696 * Now check whether the CPU has left unused per cpu devices
697 */
698 list_for_each_entry_safe(dev, tmp, &clockevent_devices, list) {
699 if (cpumask_test_cpu(cpu, dev->cpumask) &&
700 cpumask_weight(dev->cpumask) == 1 &&
701 !tick_is_broadcast_device(dev)) {
702 BUG_ON(!clockevent_state_detached(dev));
703 list_del(&dev->list);
704 }
705 }
706
707 raw_spin_unlock(&clockevents_lock);
708 }
709 #endif
710
711 #ifdef CONFIG_SYSFS
712 static const struct bus_type clockevents_subsys = {
713 .name = "clockevents",
714 .dev_name = "clockevent",
715 };
716
717 static DEFINE_PER_CPU(struct device, tick_percpu_dev);
718 static struct tick_device *tick_get_tick_dev(struct device *dev);
719
current_device_show(struct device * dev,struct device_attribute * attr,char * buf)720 static ssize_t current_device_show(struct device *dev,
721 struct device_attribute *attr,
722 char *buf)
723 {
724 struct tick_device *td;
725 ssize_t count = 0;
726
727 raw_spin_lock_irq(&clockevents_lock);
728 td = tick_get_tick_dev(dev);
729 if (td && td->evtdev)
730 count = sysfs_emit(buf, "%s\n", td->evtdev->name);
731 raw_spin_unlock_irq(&clockevents_lock);
732 return count;
733 }
734 static DEVICE_ATTR_RO(current_device);
735
736 /* We don't support the abomination of removable broadcast devices */
unbind_device_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)737 static ssize_t unbind_device_store(struct device *dev,
738 struct device_attribute *attr,
739 const char *buf, size_t count)
740 {
741 char name[CS_NAME_LEN];
742 ssize_t ret = sysfs_get_uname(buf, name, count);
743 struct clock_event_device *ce = NULL, *iter;
744
745 if (ret < 0)
746 return ret;
747
748 ret = -ENODEV;
749 mutex_lock(&clockevents_mutex);
750 raw_spin_lock_irq(&clockevents_lock);
751 list_for_each_entry(iter, &clockevent_devices, list) {
752 if (!strcmp(iter->name, name)) {
753 ret = __clockevents_try_unbind(iter, dev->id);
754 ce = iter;
755 break;
756 }
757 }
758 raw_spin_unlock_irq(&clockevents_lock);
759 /*
760 * We hold clockevents_mutex, so ce can't go away
761 */
762 if (ret == -EAGAIN)
763 ret = clockevents_unbind(ce, dev->id);
764 mutex_unlock(&clockevents_mutex);
765 return ret ? ret : count;
766 }
767 static DEVICE_ATTR_WO(unbind_device);
768
769 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
770 static struct device tick_bc_dev = {
771 .init_name = "broadcast",
772 .id = 0,
773 .bus = &clockevents_subsys,
774 };
775
tick_get_tick_dev(struct device * dev)776 static struct tick_device *tick_get_tick_dev(struct device *dev)
777 {
778 return dev == &tick_bc_dev ? tick_get_broadcast_device() :
779 &per_cpu(tick_cpu_device, dev->id);
780 }
781
tick_broadcast_init_sysfs(void)782 static __init int tick_broadcast_init_sysfs(void)
783 {
784 int err = device_register(&tick_bc_dev);
785
786 if (!err)
787 err = device_create_file(&tick_bc_dev, &dev_attr_current_device);
788 return err;
789 }
790 #else
tick_get_tick_dev(struct device * dev)791 static struct tick_device *tick_get_tick_dev(struct device *dev)
792 {
793 return &per_cpu(tick_cpu_device, dev->id);
794 }
tick_broadcast_init_sysfs(void)795 static inline int tick_broadcast_init_sysfs(void) { return 0; }
796 #endif
797
tick_init_sysfs(void)798 static int __init tick_init_sysfs(void)
799 {
800 int cpu;
801
802 for_each_possible_cpu(cpu) {
803 struct device *dev = &per_cpu(tick_percpu_dev, cpu);
804 int err;
805
806 dev->id = cpu;
807 dev->bus = &clockevents_subsys;
808 err = device_register(dev);
809 if (!err)
810 err = device_create_file(dev, &dev_attr_current_device);
811 if (!err)
812 err = device_create_file(dev, &dev_attr_unbind_device);
813 if (err)
814 return err;
815 }
816 return tick_broadcast_init_sysfs();
817 }
818
clockevents_init_sysfs(void)819 static int __init clockevents_init_sysfs(void)
820 {
821 int err = subsys_system_register(&clockevents_subsys, NULL);
822
823 if (!err)
824 err = tick_init_sysfs();
825 return err;
826 }
827 device_initcall(clockevents_init_sysfs);
828 #endif /* SYSFS */
829