xref: /linux/kernel/time/tick-common.c (revision 4407f967441aa1adfc11f739e8e9ec0f38fa839f)
1 /*
2  * linux/kernel/time/tick-common.c
3  *
4  * This file contains the base functions to manage periodic tick
5  * related events.
6  *
7  * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de>
8  * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar
9  * Copyright(C) 2006-2007, Timesys Corp., Thomas Gleixner
10  *
11  * This code is licenced under the GPL version 2. For details see
12  * kernel-base/COPYING.
13  */
14 #include <linux/cpu.h>
15 #include <linux/err.h>
16 #include <linux/hrtimer.h>
17 #include <linux/interrupt.h>
18 #include <linux/percpu.h>
19 #include <linux/profile.h>
20 #include <linux/sched.h>
21 #include <linux/module.h>
22 
23 #include <asm/irq_regs.h>
24 
25 #include "tick-internal.h"
26 
27 /*
28  * Tick devices
29  */
30 DEFINE_PER_CPU(struct tick_device, tick_cpu_device);
31 /*
32  * Tick next event: keeps track of the tick time
33  */
34 ktime_t tick_next_period;
35 ktime_t tick_period;
36 
37 /*
38  * tick_do_timer_cpu is a timer core internal variable which holds the CPU NR
39  * which is responsible for calling do_timer(), i.e. the timekeeping stuff. This
40  * variable has two functions:
41  *
42  * 1) Prevent a thundering herd issue of a gazillion of CPUs trying to grab the
43  *    timekeeping lock all at once. Only the CPU which is assigned to do the
44  *    update is handling it.
45  *
46  * 2) Hand off the duty in the NOHZ idle case by setting the value to
47  *    TICK_DO_TIMER_NONE, i.e. a non existing CPU. So the next cpu which looks
48  *    at it will take over and keep the time keeping alive.  The handover
49  *    procedure also covers cpu hotplug.
50  */
51 int tick_do_timer_cpu __read_mostly = TICK_DO_TIMER_BOOT;
52 
53 /*
54  * Debugging: see timer_list.c
55  */
56 struct tick_device *tick_get_device(int cpu)
57 {
58 	return &per_cpu(tick_cpu_device, cpu);
59 }
60 
61 /**
62  * tick_is_oneshot_available - check for a oneshot capable event device
63  */
64 int tick_is_oneshot_available(void)
65 {
66 	struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev);
67 
68 	if (!dev || !(dev->features & CLOCK_EVT_FEAT_ONESHOT))
69 		return 0;
70 	if (!(dev->features & CLOCK_EVT_FEAT_C3STOP))
71 		return 1;
72 	return tick_broadcast_oneshot_available();
73 }
74 
75 /*
76  * Periodic tick
77  */
78 static void tick_periodic(int cpu)
79 {
80 	if (tick_do_timer_cpu == cpu) {
81 		write_seqlock(&jiffies_lock);
82 
83 		/* Keep track of the next tick event */
84 		tick_next_period = ktime_add(tick_next_period, tick_period);
85 
86 		do_timer(1);
87 		write_sequnlock(&jiffies_lock);
88 		update_wall_time();
89 	}
90 
91 	update_process_times(user_mode(get_irq_regs()));
92 	profile_tick(CPU_PROFILING);
93 }
94 
95 /*
96  * Event handler for periodic ticks
97  */
98 void tick_handle_periodic(struct clock_event_device *dev)
99 {
100 	int cpu = smp_processor_id();
101 	ktime_t next = dev->next_event;
102 
103 	tick_periodic(cpu);
104 
105 #if defined(CONFIG_HIGH_RES_TIMERS) || defined(CONFIG_NO_HZ_COMMON)
106 	/*
107 	 * The cpu might have transitioned to HIGHRES or NOHZ mode via
108 	 * update_process_times() -> run_local_timers() ->
109 	 * hrtimer_run_queues().
110 	 */
111 	if (dev->event_handler != tick_handle_periodic)
112 		return;
113 #endif
114 
115 	if (!clockevent_state_oneshot(dev))
116 		return;
117 	for (;;) {
118 		/*
119 		 * Setup the next period for devices, which do not have
120 		 * periodic mode:
121 		 */
122 		next = ktime_add(next, tick_period);
123 
124 		if (!clockevents_program_event(dev, next, false))
125 			return;
126 		/*
127 		 * Have to be careful here. If we're in oneshot mode,
128 		 * before we call tick_periodic() in a loop, we need
129 		 * to be sure we're using a real hardware clocksource.
130 		 * Otherwise we could get trapped in an infinite
131 		 * loop, as the tick_periodic() increments jiffies,
132 		 * which then will increment time, possibly causing
133 		 * the loop to trigger again and again.
134 		 */
135 		if (timekeeping_valid_for_hres())
136 			tick_periodic(cpu);
137 	}
138 }
139 
140 /*
141  * Setup the device for a periodic tick
142  */
143 void tick_setup_periodic(struct clock_event_device *dev, int broadcast)
144 {
145 	tick_set_periodic_handler(dev, broadcast);
146 
147 	/* Broadcast setup ? */
148 	if (!tick_device_is_functional(dev))
149 		return;
150 
151 	if ((dev->features & CLOCK_EVT_FEAT_PERIODIC) &&
152 	    !tick_broadcast_oneshot_active()) {
153 		clockevents_switch_state(dev, CLOCK_EVT_STATE_PERIODIC);
154 	} else {
155 		unsigned long seq;
156 		ktime_t next;
157 
158 		do {
159 			seq = read_seqbegin(&jiffies_lock);
160 			next = tick_next_period;
161 		} while (read_seqretry(&jiffies_lock, seq));
162 
163 		clockevents_switch_state(dev, CLOCK_EVT_STATE_ONESHOT);
164 
165 		for (;;) {
166 			if (!clockevents_program_event(dev, next, false))
167 				return;
168 			next = ktime_add(next, tick_period);
169 		}
170 	}
171 }
172 
173 /*
174  * Setup the tick device
175  */
176 static void tick_setup_device(struct tick_device *td,
177 			      struct clock_event_device *newdev, int cpu,
178 			      const struct cpumask *cpumask)
179 {
180 	ktime_t next_event;
181 	void (*handler)(struct clock_event_device *) = NULL;
182 
183 	/*
184 	 * First device setup ?
185 	 */
186 	if (!td->evtdev) {
187 		/*
188 		 * If no cpu took the do_timer update, assign it to
189 		 * this cpu:
190 		 */
191 		if (tick_do_timer_cpu == TICK_DO_TIMER_BOOT) {
192 			if (!tick_nohz_full_cpu(cpu))
193 				tick_do_timer_cpu = cpu;
194 			else
195 				tick_do_timer_cpu = TICK_DO_TIMER_NONE;
196 			tick_next_period = ktime_get();
197 			tick_period = ktime_set(0, NSEC_PER_SEC / HZ);
198 		}
199 
200 		/*
201 		 * Startup in periodic mode first.
202 		 */
203 		td->mode = TICKDEV_MODE_PERIODIC;
204 	} else {
205 		handler = td->evtdev->event_handler;
206 		next_event = td->evtdev->next_event;
207 		td->evtdev->event_handler = clockevents_handle_noop;
208 	}
209 
210 	td->evtdev = newdev;
211 
212 	/*
213 	 * When the device is not per cpu, pin the interrupt to the
214 	 * current cpu:
215 	 */
216 	if (!cpumask_equal(newdev->cpumask, cpumask))
217 		irq_set_affinity(newdev->irq, cpumask);
218 
219 	/*
220 	 * When global broadcasting is active, check if the current
221 	 * device is registered as a placeholder for broadcast mode.
222 	 * This allows us to handle this x86 misfeature in a generic
223 	 * way. This function also returns !=0 when we keep the
224 	 * current active broadcast state for this CPU.
225 	 */
226 	if (tick_device_uses_broadcast(newdev, cpu))
227 		return;
228 
229 	if (td->mode == TICKDEV_MODE_PERIODIC)
230 		tick_setup_periodic(newdev, 0);
231 	else
232 		tick_setup_oneshot(newdev, handler, next_event);
233 }
234 
235 void tick_install_replacement(struct clock_event_device *newdev)
236 {
237 	struct tick_device *td = this_cpu_ptr(&tick_cpu_device);
238 	int cpu = smp_processor_id();
239 
240 	clockevents_exchange_device(td->evtdev, newdev);
241 	tick_setup_device(td, newdev, cpu, cpumask_of(cpu));
242 	if (newdev->features & CLOCK_EVT_FEAT_ONESHOT)
243 		tick_oneshot_notify();
244 }
245 
246 static bool tick_check_percpu(struct clock_event_device *curdev,
247 			      struct clock_event_device *newdev, int cpu)
248 {
249 	if (!cpumask_test_cpu(cpu, newdev->cpumask))
250 		return false;
251 	if (cpumask_equal(newdev->cpumask, cpumask_of(cpu)))
252 		return true;
253 	/* Check if irq affinity can be set */
254 	if (newdev->irq >= 0 && !irq_can_set_affinity(newdev->irq))
255 		return false;
256 	/* Prefer an existing cpu local device */
257 	if (curdev && cpumask_equal(curdev->cpumask, cpumask_of(cpu)))
258 		return false;
259 	return true;
260 }
261 
262 static bool tick_check_preferred(struct clock_event_device *curdev,
263 				 struct clock_event_device *newdev)
264 {
265 	/* Prefer oneshot capable device */
266 	if (!(newdev->features & CLOCK_EVT_FEAT_ONESHOT)) {
267 		if (curdev && (curdev->features & CLOCK_EVT_FEAT_ONESHOT))
268 			return false;
269 		if (tick_oneshot_mode_active())
270 			return false;
271 	}
272 
273 	/*
274 	 * Use the higher rated one, but prefer a CPU local device with a lower
275 	 * rating than a non-CPU local device
276 	 */
277 	return !curdev ||
278 		newdev->rating > curdev->rating ||
279 	       !cpumask_equal(curdev->cpumask, newdev->cpumask);
280 }
281 
282 /*
283  * Check whether the new device is a better fit than curdev. curdev
284  * can be NULL !
285  */
286 bool tick_check_replacement(struct clock_event_device *curdev,
287 			    struct clock_event_device *newdev)
288 {
289 	if (!tick_check_percpu(curdev, newdev, smp_processor_id()))
290 		return false;
291 
292 	return tick_check_preferred(curdev, newdev);
293 }
294 
295 /*
296  * Check, if the new registered device should be used. Called with
297  * clockevents_lock held and interrupts disabled.
298  */
299 void tick_check_new_device(struct clock_event_device *newdev)
300 {
301 	struct clock_event_device *curdev;
302 	struct tick_device *td;
303 	int cpu;
304 
305 	cpu = smp_processor_id();
306 	if (!cpumask_test_cpu(cpu, newdev->cpumask))
307 		goto out_bc;
308 
309 	td = &per_cpu(tick_cpu_device, cpu);
310 	curdev = td->evtdev;
311 
312 	/* cpu local device ? */
313 	if (!tick_check_percpu(curdev, newdev, cpu))
314 		goto out_bc;
315 
316 	/* Preference decision */
317 	if (!tick_check_preferred(curdev, newdev))
318 		goto out_bc;
319 
320 	if (!try_module_get(newdev->owner))
321 		return;
322 
323 	/*
324 	 * Replace the eventually existing device by the new
325 	 * device. If the current device is the broadcast device, do
326 	 * not give it back to the clockevents layer !
327 	 */
328 	if (tick_is_broadcast_device(curdev)) {
329 		clockevents_shutdown(curdev);
330 		curdev = NULL;
331 	}
332 	clockevents_exchange_device(curdev, newdev);
333 	tick_setup_device(td, newdev, cpu, cpumask_of(cpu));
334 	if (newdev->features & CLOCK_EVT_FEAT_ONESHOT)
335 		tick_oneshot_notify();
336 	return;
337 
338 out_bc:
339 	/*
340 	 * Can the new device be used as a broadcast device ?
341 	 */
342 	tick_install_broadcast_device(newdev);
343 }
344 
345 #ifdef CONFIG_HOTPLUG_CPU
346 /*
347  * Transfer the do_timer job away from a dying cpu.
348  *
349  * Called with interrupts disabled. Not locking required. If
350  * tick_do_timer_cpu is owned by this cpu, nothing can change it.
351  */
352 void tick_handover_do_timer(void)
353 {
354 	if (tick_do_timer_cpu == smp_processor_id()) {
355 		int cpu = cpumask_first(cpu_online_mask);
356 
357 		tick_do_timer_cpu = (cpu < nr_cpu_ids) ? cpu :
358 			TICK_DO_TIMER_NONE;
359 	}
360 }
361 
362 /*
363  * Shutdown an event device on a given cpu:
364  *
365  * This is called on a life CPU, when a CPU is dead. So we cannot
366  * access the hardware device itself.
367  * We just set the mode and remove it from the lists.
368  */
369 void tick_shutdown(unsigned int cpu)
370 {
371 	struct tick_device *td = &per_cpu(tick_cpu_device, cpu);
372 	struct clock_event_device *dev = td->evtdev;
373 
374 	td->mode = TICKDEV_MODE_PERIODIC;
375 	if (dev) {
376 		/*
377 		 * Prevent that the clock events layer tries to call
378 		 * the set mode function!
379 		 */
380 		clockevent_set_state(dev, CLOCK_EVT_STATE_DETACHED);
381 		dev->mode = CLOCK_EVT_MODE_UNUSED;
382 		clockevents_exchange_device(dev, NULL);
383 		dev->event_handler = clockevents_handle_noop;
384 		td->evtdev = NULL;
385 	}
386 }
387 #endif
388 
389 /**
390  * tick_suspend_local - Suspend the local tick device
391  *
392  * Called from the local cpu for freeze with interrupts disabled.
393  *
394  * No locks required. Nothing can change the per cpu device.
395  */
396 void tick_suspend_local(void)
397 {
398 	struct tick_device *td = this_cpu_ptr(&tick_cpu_device);
399 
400 	clockevents_shutdown(td->evtdev);
401 }
402 
403 /**
404  * tick_resume_local - Resume the local tick device
405  *
406  * Called from the local CPU for unfreeze or XEN resume magic.
407  *
408  * No locks required. Nothing can change the per cpu device.
409  */
410 void tick_resume_local(void)
411 {
412 	struct tick_device *td = this_cpu_ptr(&tick_cpu_device);
413 	bool broadcast = tick_resume_check_broadcast();
414 
415 	clockevents_tick_resume(td->evtdev);
416 	if (!broadcast) {
417 		if (td->mode == TICKDEV_MODE_PERIODIC)
418 			tick_setup_periodic(td->evtdev, 0);
419 		else
420 			tick_resume_oneshot();
421 	}
422 }
423 
424 /**
425  * tick_suspend - Suspend the tick and the broadcast device
426  *
427  * Called from syscore_suspend() via timekeeping_suspend with only one
428  * CPU online and interrupts disabled or from tick_unfreeze() under
429  * tick_freeze_lock.
430  *
431  * No locks required. Nothing can change the per cpu device.
432  */
433 void tick_suspend(void)
434 {
435 	tick_suspend_local();
436 	tick_suspend_broadcast();
437 }
438 
439 /**
440  * tick_resume - Resume the tick and the broadcast device
441  *
442  * Called from syscore_resume() via timekeeping_resume with only one
443  * CPU online and interrupts disabled.
444  *
445  * No locks required. Nothing can change the per cpu device.
446  */
447 void tick_resume(void)
448 {
449 	tick_resume_broadcast();
450 	tick_resume_local();
451 }
452 
453 static DEFINE_RAW_SPINLOCK(tick_freeze_lock);
454 static unsigned int tick_freeze_depth;
455 
456 /**
457  * tick_freeze - Suspend the local tick and (possibly) timekeeping.
458  *
459  * Check if this is the last online CPU executing the function and if so,
460  * suspend timekeeping.  Otherwise suspend the local tick.
461  *
462  * Call with interrupts disabled.  Must be balanced with %tick_unfreeze().
463  * Interrupts must not be enabled before the subsequent %tick_unfreeze().
464  */
465 void tick_freeze(void)
466 {
467 	raw_spin_lock(&tick_freeze_lock);
468 
469 	tick_freeze_depth++;
470 	if (tick_freeze_depth == num_online_cpus())
471 		timekeeping_suspend();
472 	else
473 		tick_suspend_local();
474 
475 	raw_spin_unlock(&tick_freeze_lock);
476 }
477 
478 /**
479  * tick_unfreeze - Resume the local tick and (possibly) timekeeping.
480  *
481  * Check if this is the first CPU executing the function and if so, resume
482  * timekeeping.  Otherwise resume the local tick.
483  *
484  * Call with interrupts disabled.  Must be balanced with %tick_freeze().
485  * Interrupts must not be enabled after the preceding %tick_freeze().
486  */
487 void tick_unfreeze(void)
488 {
489 	raw_spin_lock(&tick_freeze_lock);
490 
491 	if (tick_freeze_depth == num_online_cpus())
492 		timekeeping_resume();
493 	else
494 		tick_resume_local();
495 
496 	tick_freeze_depth--;
497 
498 	raw_spin_unlock(&tick_freeze_lock);
499 }
500 
501 /**
502  * tick_init - initialize the tick control
503  */
504 void __init tick_init(void)
505 {
506 	tick_broadcast_init();
507 	tick_nohz_init();
508 }
509