xref: /linux/kernel/time/tick-sched.c (revision 3f07c0144132e4f59d88055ac8ff3e691a5fa2b8)
1 /*
2  *  linux/kernel/time/tick-sched.c
3  *
4  *  Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de>
5  *  Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar
6  *  Copyright(C) 2006-2007  Timesys Corp., Thomas Gleixner
7  *
8  *  No idle tick implementation for low and high resolution timers
9  *
10  *  Started by: Thomas Gleixner and Ingo Molnar
11  *
12  *  Distribute under GPLv2.
13  */
14 #include <linux/cpu.h>
15 #include <linux/err.h>
16 #include <linux/hrtimer.h>
17 #include <linux/interrupt.h>
18 #include <linux/kernel_stat.h>
19 #include <linux/percpu.h>
20 #include <linux/profile.h>
21 #include <linux/sched/signal.h>
22 #include <linux/sched/clock.h>
23 #include <linux/module.h>
24 #include <linux/irq_work.h>
25 #include <linux/posix-timers.h>
26 #include <linux/context_tracking.h>
27 
28 #include <asm/irq_regs.h>
29 
30 #include "tick-internal.h"
31 
32 #include <trace/events/timer.h>
33 
34 /*
35  * Per-CPU nohz control structure
36  */
37 static DEFINE_PER_CPU(struct tick_sched, tick_cpu_sched);
38 
39 struct tick_sched *tick_get_tick_sched(int cpu)
40 {
41 	return &per_cpu(tick_cpu_sched, cpu);
42 }
43 
44 #if defined(CONFIG_NO_HZ_COMMON) || defined(CONFIG_HIGH_RES_TIMERS)
45 /*
46  * The time, when the last jiffy update happened. Protected by jiffies_lock.
47  */
48 static ktime_t last_jiffies_update;
49 
50 /*
51  * Must be called with interrupts disabled !
52  */
53 static void tick_do_update_jiffies64(ktime_t now)
54 {
55 	unsigned long ticks = 0;
56 	ktime_t delta;
57 
58 	/*
59 	 * Do a quick check without holding jiffies_lock:
60 	 */
61 	delta = ktime_sub(now, last_jiffies_update);
62 	if (delta < tick_period)
63 		return;
64 
65 	/* Reevaluate with jiffies_lock held */
66 	write_seqlock(&jiffies_lock);
67 
68 	delta = ktime_sub(now, last_jiffies_update);
69 	if (delta >= tick_period) {
70 
71 		delta = ktime_sub(delta, tick_period);
72 		last_jiffies_update = ktime_add(last_jiffies_update,
73 						tick_period);
74 
75 		/* Slow path for long timeouts */
76 		if (unlikely(delta >= tick_period)) {
77 			s64 incr = ktime_to_ns(tick_period);
78 
79 			ticks = ktime_divns(delta, incr);
80 
81 			last_jiffies_update = ktime_add_ns(last_jiffies_update,
82 							   incr * ticks);
83 		}
84 		do_timer(++ticks);
85 
86 		/* Keep the tick_next_period variable up to date */
87 		tick_next_period = ktime_add(last_jiffies_update, tick_period);
88 	} else {
89 		write_sequnlock(&jiffies_lock);
90 		return;
91 	}
92 	write_sequnlock(&jiffies_lock);
93 	update_wall_time();
94 }
95 
96 /*
97  * Initialize and return retrieve the jiffies update.
98  */
99 static ktime_t tick_init_jiffy_update(void)
100 {
101 	ktime_t period;
102 
103 	write_seqlock(&jiffies_lock);
104 	/* Did we start the jiffies update yet ? */
105 	if (last_jiffies_update == 0)
106 		last_jiffies_update = tick_next_period;
107 	period = last_jiffies_update;
108 	write_sequnlock(&jiffies_lock);
109 	return period;
110 }
111 
112 
113 static void tick_sched_do_timer(ktime_t now)
114 {
115 	int cpu = smp_processor_id();
116 
117 #ifdef CONFIG_NO_HZ_COMMON
118 	/*
119 	 * Check if the do_timer duty was dropped. We don't care about
120 	 * concurrency: This happens only when the CPU in charge went
121 	 * into a long sleep. If two CPUs happen to assign themselves to
122 	 * this duty, then the jiffies update is still serialized by
123 	 * jiffies_lock.
124 	 */
125 	if (unlikely(tick_do_timer_cpu == TICK_DO_TIMER_NONE)
126 	    && !tick_nohz_full_cpu(cpu))
127 		tick_do_timer_cpu = cpu;
128 #endif
129 
130 	/* Check, if the jiffies need an update */
131 	if (tick_do_timer_cpu == cpu)
132 		tick_do_update_jiffies64(now);
133 }
134 
135 static void tick_sched_handle(struct tick_sched *ts, struct pt_regs *regs)
136 {
137 #ifdef CONFIG_NO_HZ_COMMON
138 	/*
139 	 * When we are idle and the tick is stopped, we have to touch
140 	 * the watchdog as we might not schedule for a really long
141 	 * time. This happens on complete idle SMP systems while
142 	 * waiting on the login prompt. We also increment the "start of
143 	 * idle" jiffy stamp so the idle accounting adjustment we do
144 	 * when we go busy again does not account too much ticks.
145 	 */
146 	if (ts->tick_stopped) {
147 		touch_softlockup_watchdog_sched();
148 		if (is_idle_task(current))
149 			ts->idle_jiffies++;
150 	}
151 #endif
152 	update_process_times(user_mode(regs));
153 	profile_tick(CPU_PROFILING);
154 }
155 #endif
156 
157 #ifdef CONFIG_NO_HZ_FULL
158 cpumask_var_t tick_nohz_full_mask;
159 cpumask_var_t housekeeping_mask;
160 bool tick_nohz_full_running;
161 static atomic_t tick_dep_mask;
162 
163 static bool check_tick_dependency(atomic_t *dep)
164 {
165 	int val = atomic_read(dep);
166 
167 	if (val & TICK_DEP_MASK_POSIX_TIMER) {
168 		trace_tick_stop(0, TICK_DEP_MASK_POSIX_TIMER);
169 		return true;
170 	}
171 
172 	if (val & TICK_DEP_MASK_PERF_EVENTS) {
173 		trace_tick_stop(0, TICK_DEP_MASK_PERF_EVENTS);
174 		return true;
175 	}
176 
177 	if (val & TICK_DEP_MASK_SCHED) {
178 		trace_tick_stop(0, TICK_DEP_MASK_SCHED);
179 		return true;
180 	}
181 
182 	if (val & TICK_DEP_MASK_CLOCK_UNSTABLE) {
183 		trace_tick_stop(0, TICK_DEP_MASK_CLOCK_UNSTABLE);
184 		return true;
185 	}
186 
187 	return false;
188 }
189 
190 static bool can_stop_full_tick(int cpu, struct tick_sched *ts)
191 {
192 	WARN_ON_ONCE(!irqs_disabled());
193 
194 	if (unlikely(!cpu_online(cpu)))
195 		return false;
196 
197 	if (check_tick_dependency(&tick_dep_mask))
198 		return false;
199 
200 	if (check_tick_dependency(&ts->tick_dep_mask))
201 		return false;
202 
203 	if (check_tick_dependency(&current->tick_dep_mask))
204 		return false;
205 
206 	if (check_tick_dependency(&current->signal->tick_dep_mask))
207 		return false;
208 
209 	return true;
210 }
211 
212 static void nohz_full_kick_func(struct irq_work *work)
213 {
214 	/* Empty, the tick restart happens on tick_nohz_irq_exit() */
215 }
216 
217 static DEFINE_PER_CPU(struct irq_work, nohz_full_kick_work) = {
218 	.func = nohz_full_kick_func,
219 };
220 
221 /*
222  * Kick this CPU if it's full dynticks in order to force it to
223  * re-evaluate its dependency on the tick and restart it if necessary.
224  * This kick, unlike tick_nohz_full_kick_cpu() and tick_nohz_full_kick_all(),
225  * is NMI safe.
226  */
227 static void tick_nohz_full_kick(void)
228 {
229 	if (!tick_nohz_full_cpu(smp_processor_id()))
230 		return;
231 
232 	irq_work_queue(this_cpu_ptr(&nohz_full_kick_work));
233 }
234 
235 /*
236  * Kick the CPU if it's full dynticks in order to force it to
237  * re-evaluate its dependency on the tick and restart it if necessary.
238  */
239 void tick_nohz_full_kick_cpu(int cpu)
240 {
241 	if (!tick_nohz_full_cpu(cpu))
242 		return;
243 
244 	irq_work_queue_on(&per_cpu(nohz_full_kick_work, cpu), cpu);
245 }
246 
247 /*
248  * Kick all full dynticks CPUs in order to force these to re-evaluate
249  * their dependency on the tick and restart it if necessary.
250  */
251 static void tick_nohz_full_kick_all(void)
252 {
253 	int cpu;
254 
255 	if (!tick_nohz_full_running)
256 		return;
257 
258 	preempt_disable();
259 	for_each_cpu_and(cpu, tick_nohz_full_mask, cpu_online_mask)
260 		tick_nohz_full_kick_cpu(cpu);
261 	preempt_enable();
262 }
263 
264 static void tick_nohz_dep_set_all(atomic_t *dep,
265 				  enum tick_dep_bits bit)
266 {
267 	int prev;
268 
269 	prev = atomic_fetch_or(BIT(bit), dep);
270 	if (!prev)
271 		tick_nohz_full_kick_all();
272 }
273 
274 /*
275  * Set a global tick dependency. Used by perf events that rely on freq and
276  * by unstable clock.
277  */
278 void tick_nohz_dep_set(enum tick_dep_bits bit)
279 {
280 	tick_nohz_dep_set_all(&tick_dep_mask, bit);
281 }
282 
283 void tick_nohz_dep_clear(enum tick_dep_bits bit)
284 {
285 	atomic_andnot(BIT(bit), &tick_dep_mask);
286 }
287 
288 /*
289  * Set per-CPU tick dependency. Used by scheduler and perf events in order to
290  * manage events throttling.
291  */
292 void tick_nohz_dep_set_cpu(int cpu, enum tick_dep_bits bit)
293 {
294 	int prev;
295 	struct tick_sched *ts;
296 
297 	ts = per_cpu_ptr(&tick_cpu_sched, cpu);
298 
299 	prev = atomic_fetch_or(BIT(bit), &ts->tick_dep_mask);
300 	if (!prev) {
301 		preempt_disable();
302 		/* Perf needs local kick that is NMI safe */
303 		if (cpu == smp_processor_id()) {
304 			tick_nohz_full_kick();
305 		} else {
306 			/* Remote irq work not NMI-safe */
307 			if (!WARN_ON_ONCE(in_nmi()))
308 				tick_nohz_full_kick_cpu(cpu);
309 		}
310 		preempt_enable();
311 	}
312 }
313 
314 void tick_nohz_dep_clear_cpu(int cpu, enum tick_dep_bits bit)
315 {
316 	struct tick_sched *ts = per_cpu_ptr(&tick_cpu_sched, cpu);
317 
318 	atomic_andnot(BIT(bit), &ts->tick_dep_mask);
319 }
320 
321 /*
322  * Set a per-task tick dependency. Posix CPU timers need this in order to elapse
323  * per task timers.
324  */
325 void tick_nohz_dep_set_task(struct task_struct *tsk, enum tick_dep_bits bit)
326 {
327 	/*
328 	 * We could optimize this with just kicking the target running the task
329 	 * if that noise matters for nohz full users.
330 	 */
331 	tick_nohz_dep_set_all(&tsk->tick_dep_mask, bit);
332 }
333 
334 void tick_nohz_dep_clear_task(struct task_struct *tsk, enum tick_dep_bits bit)
335 {
336 	atomic_andnot(BIT(bit), &tsk->tick_dep_mask);
337 }
338 
339 /*
340  * Set a per-taskgroup tick dependency. Posix CPU timers need this in order to elapse
341  * per process timers.
342  */
343 void tick_nohz_dep_set_signal(struct signal_struct *sig, enum tick_dep_bits bit)
344 {
345 	tick_nohz_dep_set_all(&sig->tick_dep_mask, bit);
346 }
347 
348 void tick_nohz_dep_clear_signal(struct signal_struct *sig, enum tick_dep_bits bit)
349 {
350 	atomic_andnot(BIT(bit), &sig->tick_dep_mask);
351 }
352 
353 /*
354  * Re-evaluate the need for the tick as we switch the current task.
355  * It might need the tick due to per task/process properties:
356  * perf events, posix CPU timers, ...
357  */
358 void __tick_nohz_task_switch(void)
359 {
360 	unsigned long flags;
361 	struct tick_sched *ts;
362 
363 	local_irq_save(flags);
364 
365 	if (!tick_nohz_full_cpu(smp_processor_id()))
366 		goto out;
367 
368 	ts = this_cpu_ptr(&tick_cpu_sched);
369 
370 	if (ts->tick_stopped) {
371 		if (atomic_read(&current->tick_dep_mask) ||
372 		    atomic_read(&current->signal->tick_dep_mask))
373 			tick_nohz_full_kick();
374 	}
375 out:
376 	local_irq_restore(flags);
377 }
378 
379 /* Parse the boot-time nohz CPU list from the kernel parameters. */
380 static int __init tick_nohz_full_setup(char *str)
381 {
382 	alloc_bootmem_cpumask_var(&tick_nohz_full_mask);
383 	if (cpulist_parse(str, tick_nohz_full_mask) < 0) {
384 		pr_warn("NO_HZ: Incorrect nohz_full cpumask\n");
385 		free_bootmem_cpumask_var(tick_nohz_full_mask);
386 		return 1;
387 	}
388 	tick_nohz_full_running = true;
389 
390 	return 1;
391 }
392 __setup("nohz_full=", tick_nohz_full_setup);
393 
394 static int tick_nohz_cpu_down(unsigned int cpu)
395 {
396 	/*
397 	 * The boot CPU handles housekeeping duty (unbound timers,
398 	 * workqueues, timekeeping, ...) on behalf of full dynticks
399 	 * CPUs. It must remain online when nohz full is enabled.
400 	 */
401 	if (tick_nohz_full_running && tick_do_timer_cpu == cpu)
402 		return -EBUSY;
403 	return 0;
404 }
405 
406 static int tick_nohz_init_all(void)
407 {
408 	int err = -1;
409 
410 #ifdef CONFIG_NO_HZ_FULL_ALL
411 	if (!alloc_cpumask_var(&tick_nohz_full_mask, GFP_KERNEL)) {
412 		WARN(1, "NO_HZ: Can't allocate full dynticks cpumask\n");
413 		return err;
414 	}
415 	err = 0;
416 	cpumask_setall(tick_nohz_full_mask);
417 	tick_nohz_full_running = true;
418 #endif
419 	return err;
420 }
421 
422 void __init tick_nohz_init(void)
423 {
424 	int cpu, ret;
425 
426 	if (!tick_nohz_full_running) {
427 		if (tick_nohz_init_all() < 0)
428 			return;
429 	}
430 
431 	if (!alloc_cpumask_var(&housekeeping_mask, GFP_KERNEL)) {
432 		WARN(1, "NO_HZ: Can't allocate not-full dynticks cpumask\n");
433 		cpumask_clear(tick_nohz_full_mask);
434 		tick_nohz_full_running = false;
435 		return;
436 	}
437 
438 	/*
439 	 * Full dynticks uses irq work to drive the tick rescheduling on safe
440 	 * locking contexts. But then we need irq work to raise its own
441 	 * interrupts to avoid circular dependency on the tick
442 	 */
443 	if (!arch_irq_work_has_interrupt()) {
444 		pr_warn("NO_HZ: Can't run full dynticks because arch doesn't support irq work self-IPIs\n");
445 		cpumask_clear(tick_nohz_full_mask);
446 		cpumask_copy(housekeeping_mask, cpu_possible_mask);
447 		tick_nohz_full_running = false;
448 		return;
449 	}
450 
451 	cpu = smp_processor_id();
452 
453 	if (cpumask_test_cpu(cpu, tick_nohz_full_mask)) {
454 		pr_warn("NO_HZ: Clearing %d from nohz_full range for timekeeping\n",
455 			cpu);
456 		cpumask_clear_cpu(cpu, tick_nohz_full_mask);
457 	}
458 
459 	cpumask_andnot(housekeeping_mask,
460 		       cpu_possible_mask, tick_nohz_full_mask);
461 
462 	for_each_cpu(cpu, tick_nohz_full_mask)
463 		context_tracking_cpu_set(cpu);
464 
465 	ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
466 					"kernel/nohz:predown", NULL,
467 					tick_nohz_cpu_down);
468 	WARN_ON(ret < 0);
469 	pr_info("NO_HZ: Full dynticks CPUs: %*pbl.\n",
470 		cpumask_pr_args(tick_nohz_full_mask));
471 
472 	/*
473 	 * We need at least one CPU to handle housekeeping work such
474 	 * as timekeeping, unbound timers, workqueues, ...
475 	 */
476 	WARN_ON_ONCE(cpumask_empty(housekeeping_mask));
477 }
478 #endif
479 
480 /*
481  * NOHZ - aka dynamic tick functionality
482  */
483 #ifdef CONFIG_NO_HZ_COMMON
484 /*
485  * NO HZ enabled ?
486  */
487 bool tick_nohz_enabled __read_mostly  = true;
488 unsigned long tick_nohz_active  __read_mostly;
489 /*
490  * Enable / Disable tickless mode
491  */
492 static int __init setup_tick_nohz(char *str)
493 {
494 	return (kstrtobool(str, &tick_nohz_enabled) == 0);
495 }
496 
497 __setup("nohz=", setup_tick_nohz);
498 
499 int tick_nohz_tick_stopped(void)
500 {
501 	return __this_cpu_read(tick_cpu_sched.tick_stopped);
502 }
503 
504 /**
505  * tick_nohz_update_jiffies - update jiffies when idle was interrupted
506  *
507  * Called from interrupt entry when the CPU was idle
508  *
509  * In case the sched_tick was stopped on this CPU, we have to check if jiffies
510  * must be updated. Otherwise an interrupt handler could use a stale jiffy
511  * value. We do this unconditionally on any CPU, as we don't know whether the
512  * CPU, which has the update task assigned is in a long sleep.
513  */
514 static void tick_nohz_update_jiffies(ktime_t now)
515 {
516 	unsigned long flags;
517 
518 	__this_cpu_write(tick_cpu_sched.idle_waketime, now);
519 
520 	local_irq_save(flags);
521 	tick_do_update_jiffies64(now);
522 	local_irq_restore(flags);
523 
524 	touch_softlockup_watchdog_sched();
525 }
526 
527 /*
528  * Updates the per-CPU time idle statistics counters
529  */
530 static void
531 update_ts_time_stats(int cpu, struct tick_sched *ts, ktime_t now, u64 *last_update_time)
532 {
533 	ktime_t delta;
534 
535 	if (ts->idle_active) {
536 		delta = ktime_sub(now, ts->idle_entrytime);
537 		if (nr_iowait_cpu(cpu) > 0)
538 			ts->iowait_sleeptime = ktime_add(ts->iowait_sleeptime, delta);
539 		else
540 			ts->idle_sleeptime = ktime_add(ts->idle_sleeptime, delta);
541 		ts->idle_entrytime = now;
542 	}
543 
544 	if (last_update_time)
545 		*last_update_time = ktime_to_us(now);
546 
547 }
548 
549 static void tick_nohz_stop_idle(struct tick_sched *ts, ktime_t now)
550 {
551 	update_ts_time_stats(smp_processor_id(), ts, now, NULL);
552 	ts->idle_active = 0;
553 
554 	sched_clock_idle_wakeup_event(0);
555 }
556 
557 static ktime_t tick_nohz_start_idle(struct tick_sched *ts)
558 {
559 	ktime_t now = ktime_get();
560 
561 	ts->idle_entrytime = now;
562 	ts->idle_active = 1;
563 	sched_clock_idle_sleep_event();
564 	return now;
565 }
566 
567 /**
568  * get_cpu_idle_time_us - get the total idle time of a CPU
569  * @cpu: CPU number to query
570  * @last_update_time: variable to store update time in. Do not update
571  * counters if NULL.
572  *
573  * Return the cumulative idle time (since boot) for a given
574  * CPU, in microseconds.
575  *
576  * This time is measured via accounting rather than sampling,
577  * and is as accurate as ktime_get() is.
578  *
579  * This function returns -1 if NOHZ is not enabled.
580  */
581 u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time)
582 {
583 	struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
584 	ktime_t now, idle;
585 
586 	if (!tick_nohz_active)
587 		return -1;
588 
589 	now = ktime_get();
590 	if (last_update_time) {
591 		update_ts_time_stats(cpu, ts, now, last_update_time);
592 		idle = ts->idle_sleeptime;
593 	} else {
594 		if (ts->idle_active && !nr_iowait_cpu(cpu)) {
595 			ktime_t delta = ktime_sub(now, ts->idle_entrytime);
596 
597 			idle = ktime_add(ts->idle_sleeptime, delta);
598 		} else {
599 			idle = ts->idle_sleeptime;
600 		}
601 	}
602 
603 	return ktime_to_us(idle);
604 
605 }
606 EXPORT_SYMBOL_GPL(get_cpu_idle_time_us);
607 
608 /**
609  * get_cpu_iowait_time_us - get the total iowait time of a CPU
610  * @cpu: CPU number to query
611  * @last_update_time: variable to store update time in. Do not update
612  * counters if NULL.
613  *
614  * Return the cumulative iowait time (since boot) for a given
615  * CPU, in microseconds.
616  *
617  * This time is measured via accounting rather than sampling,
618  * and is as accurate as ktime_get() is.
619  *
620  * This function returns -1 if NOHZ is not enabled.
621  */
622 u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time)
623 {
624 	struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
625 	ktime_t now, iowait;
626 
627 	if (!tick_nohz_active)
628 		return -1;
629 
630 	now = ktime_get();
631 	if (last_update_time) {
632 		update_ts_time_stats(cpu, ts, now, last_update_time);
633 		iowait = ts->iowait_sleeptime;
634 	} else {
635 		if (ts->idle_active && nr_iowait_cpu(cpu) > 0) {
636 			ktime_t delta = ktime_sub(now, ts->idle_entrytime);
637 
638 			iowait = ktime_add(ts->iowait_sleeptime, delta);
639 		} else {
640 			iowait = ts->iowait_sleeptime;
641 		}
642 	}
643 
644 	return ktime_to_us(iowait);
645 }
646 EXPORT_SYMBOL_GPL(get_cpu_iowait_time_us);
647 
648 static void tick_nohz_restart(struct tick_sched *ts, ktime_t now)
649 {
650 	hrtimer_cancel(&ts->sched_timer);
651 	hrtimer_set_expires(&ts->sched_timer, ts->last_tick);
652 
653 	/* Forward the time to expire in the future */
654 	hrtimer_forward(&ts->sched_timer, now, tick_period);
655 
656 	if (ts->nohz_mode == NOHZ_MODE_HIGHRES)
657 		hrtimer_start_expires(&ts->sched_timer, HRTIMER_MODE_ABS_PINNED);
658 	else
659 		tick_program_event(hrtimer_get_expires(&ts->sched_timer), 1);
660 }
661 
662 static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts,
663 					 ktime_t now, int cpu)
664 {
665 	struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev);
666 	u64 basemono, next_tick, next_tmr, next_rcu, delta, expires;
667 	unsigned long seq, basejiff;
668 	ktime_t	tick;
669 
670 	/* Read jiffies and the time when jiffies were updated last */
671 	do {
672 		seq = read_seqbegin(&jiffies_lock);
673 		basemono = last_jiffies_update;
674 		basejiff = jiffies;
675 	} while (read_seqretry(&jiffies_lock, seq));
676 	ts->last_jiffies = basejiff;
677 
678 	if (rcu_needs_cpu(basemono, &next_rcu) ||
679 	    arch_needs_cpu() || irq_work_needs_cpu()) {
680 		next_tick = basemono + TICK_NSEC;
681 	} else {
682 		/*
683 		 * Get the next pending timer. If high resolution
684 		 * timers are enabled this only takes the timer wheel
685 		 * timers into account. If high resolution timers are
686 		 * disabled this also looks at the next expiring
687 		 * hrtimer.
688 		 */
689 		next_tmr = get_next_timer_interrupt(basejiff, basemono);
690 		ts->next_timer = next_tmr;
691 		/* Take the next rcu event into account */
692 		next_tick = next_rcu < next_tmr ? next_rcu : next_tmr;
693 	}
694 
695 	/*
696 	 * If the tick is due in the next period, keep it ticking or
697 	 * force prod the timer.
698 	 */
699 	delta = next_tick - basemono;
700 	if (delta <= (u64)TICK_NSEC) {
701 		tick = 0;
702 
703 		/*
704 		 * Tell the timer code that the base is not idle, i.e. undo
705 		 * the effect of get_next_timer_interrupt():
706 		 */
707 		timer_clear_idle();
708 		/*
709 		 * We've not stopped the tick yet, and there's a timer in the
710 		 * next period, so no point in stopping it either, bail.
711 		 */
712 		if (!ts->tick_stopped)
713 			goto out;
714 
715 		/*
716 		 * If, OTOH, we did stop it, but there's a pending (expired)
717 		 * timer reprogram the timer hardware to fire now.
718 		 *
719 		 * We will not restart the tick proper, just prod the timer
720 		 * hardware into firing an interrupt to process the pending
721 		 * timers. Just like tick_irq_exit() will not restart the tick
722 		 * for 'normal' interrupts.
723 		 *
724 		 * Only once we exit the idle loop will we re-enable the tick,
725 		 * see tick_nohz_idle_exit().
726 		 */
727 		if (delta == 0) {
728 			tick_nohz_restart(ts, now);
729 			goto out;
730 		}
731 	}
732 
733 	/*
734 	 * If this CPU is the one which updates jiffies, then give up
735 	 * the assignment and let it be taken by the CPU which runs
736 	 * the tick timer next, which might be this CPU as well. If we
737 	 * don't drop this here the jiffies might be stale and
738 	 * do_timer() never invoked. Keep track of the fact that it
739 	 * was the one which had the do_timer() duty last. If this CPU
740 	 * is the one which had the do_timer() duty last, we limit the
741 	 * sleep time to the timekeeping max_deferment value.
742 	 * Otherwise we can sleep as long as we want.
743 	 */
744 	delta = timekeeping_max_deferment();
745 	if (cpu == tick_do_timer_cpu) {
746 		tick_do_timer_cpu = TICK_DO_TIMER_NONE;
747 		ts->do_timer_last = 1;
748 	} else if (tick_do_timer_cpu != TICK_DO_TIMER_NONE) {
749 		delta = KTIME_MAX;
750 		ts->do_timer_last = 0;
751 	} else if (!ts->do_timer_last) {
752 		delta = KTIME_MAX;
753 	}
754 
755 #ifdef CONFIG_NO_HZ_FULL
756 	/* Limit the tick delta to the maximum scheduler deferment */
757 	if (!ts->inidle)
758 		delta = min(delta, scheduler_tick_max_deferment());
759 #endif
760 
761 	/* Calculate the next expiry time */
762 	if (delta < (KTIME_MAX - basemono))
763 		expires = basemono + delta;
764 	else
765 		expires = KTIME_MAX;
766 
767 	expires = min_t(u64, expires, next_tick);
768 	tick = expires;
769 
770 	/* Skip reprogram of event if its not changed */
771 	if (ts->tick_stopped && (expires == dev->next_event))
772 		goto out;
773 
774 	/*
775 	 * nohz_stop_sched_tick can be called several times before
776 	 * the nohz_restart_sched_tick is called. This happens when
777 	 * interrupts arrive which do not cause a reschedule. In the
778 	 * first call we save the current tick time, so we can restart
779 	 * the scheduler tick in nohz_restart_sched_tick.
780 	 */
781 	if (!ts->tick_stopped) {
782 		nohz_balance_enter_idle(cpu);
783 		calc_load_enter_idle();
784 		cpu_load_update_nohz_start();
785 
786 		ts->last_tick = hrtimer_get_expires(&ts->sched_timer);
787 		ts->tick_stopped = 1;
788 		trace_tick_stop(1, TICK_DEP_MASK_NONE);
789 	}
790 
791 	/*
792 	 * If the expiration time == KTIME_MAX, then we simply stop
793 	 * the tick timer.
794 	 */
795 	if (unlikely(expires == KTIME_MAX)) {
796 		if (ts->nohz_mode == NOHZ_MODE_HIGHRES)
797 			hrtimer_cancel(&ts->sched_timer);
798 		goto out;
799 	}
800 
801 	if (ts->nohz_mode == NOHZ_MODE_HIGHRES)
802 		hrtimer_start(&ts->sched_timer, tick, HRTIMER_MODE_ABS_PINNED);
803 	else
804 		tick_program_event(tick, 1);
805 out:
806 	/* Update the estimated sleep length */
807 	ts->sleep_length = ktime_sub(dev->next_event, now);
808 	return tick;
809 }
810 
811 static void tick_nohz_restart_sched_tick(struct tick_sched *ts, ktime_t now)
812 {
813 	/* Update jiffies first */
814 	tick_do_update_jiffies64(now);
815 	cpu_load_update_nohz_stop();
816 
817 	/*
818 	 * Clear the timer idle flag, so we avoid IPIs on remote queueing and
819 	 * the clock forward checks in the enqueue path:
820 	 */
821 	timer_clear_idle();
822 
823 	calc_load_exit_idle();
824 	touch_softlockup_watchdog_sched();
825 	/*
826 	 * Cancel the scheduled timer and restore the tick
827 	 */
828 	ts->tick_stopped  = 0;
829 	ts->idle_exittime = now;
830 
831 	tick_nohz_restart(ts, now);
832 }
833 
834 static void tick_nohz_full_update_tick(struct tick_sched *ts)
835 {
836 #ifdef CONFIG_NO_HZ_FULL
837 	int cpu = smp_processor_id();
838 
839 	if (!tick_nohz_full_cpu(cpu))
840 		return;
841 
842 	if (!ts->tick_stopped && ts->nohz_mode == NOHZ_MODE_INACTIVE)
843 		return;
844 
845 	if (can_stop_full_tick(cpu, ts))
846 		tick_nohz_stop_sched_tick(ts, ktime_get(), cpu);
847 	else if (ts->tick_stopped)
848 		tick_nohz_restart_sched_tick(ts, ktime_get());
849 #endif
850 }
851 
852 static bool can_stop_idle_tick(int cpu, struct tick_sched *ts)
853 {
854 	/*
855 	 * If this CPU is offline and it is the one which updates
856 	 * jiffies, then give up the assignment and let it be taken by
857 	 * the CPU which runs the tick timer next. If we don't drop
858 	 * this here the jiffies might be stale and do_timer() never
859 	 * invoked.
860 	 */
861 	if (unlikely(!cpu_online(cpu))) {
862 		if (cpu == tick_do_timer_cpu)
863 			tick_do_timer_cpu = TICK_DO_TIMER_NONE;
864 		return false;
865 	}
866 
867 	if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE)) {
868 		ts->sleep_length = NSEC_PER_SEC / HZ;
869 		return false;
870 	}
871 
872 	if (need_resched())
873 		return false;
874 
875 	if (unlikely(local_softirq_pending() && cpu_online(cpu))) {
876 		static int ratelimit;
877 
878 		if (ratelimit < 10 &&
879 		    (local_softirq_pending() & SOFTIRQ_STOP_IDLE_MASK)) {
880 			pr_warn("NOHZ: local_softirq_pending %02x\n",
881 				(unsigned int) local_softirq_pending());
882 			ratelimit++;
883 		}
884 		return false;
885 	}
886 
887 	if (tick_nohz_full_enabled()) {
888 		/*
889 		 * Keep the tick alive to guarantee timekeeping progression
890 		 * if there are full dynticks CPUs around
891 		 */
892 		if (tick_do_timer_cpu == cpu)
893 			return false;
894 		/*
895 		 * Boot safety: make sure the timekeeping duty has been
896 		 * assigned before entering dyntick-idle mode,
897 		 */
898 		if (tick_do_timer_cpu == TICK_DO_TIMER_NONE)
899 			return false;
900 	}
901 
902 	return true;
903 }
904 
905 static void __tick_nohz_idle_enter(struct tick_sched *ts)
906 {
907 	ktime_t now, expires;
908 	int cpu = smp_processor_id();
909 
910 	now = tick_nohz_start_idle(ts);
911 
912 	if (can_stop_idle_tick(cpu, ts)) {
913 		int was_stopped = ts->tick_stopped;
914 
915 		ts->idle_calls++;
916 
917 		expires = tick_nohz_stop_sched_tick(ts, now, cpu);
918 		if (expires > 0LL) {
919 			ts->idle_sleeps++;
920 			ts->idle_expires = expires;
921 		}
922 
923 		if (!was_stopped && ts->tick_stopped)
924 			ts->idle_jiffies = ts->last_jiffies;
925 	}
926 }
927 
928 /**
929  * tick_nohz_idle_enter - stop the idle tick from the idle task
930  *
931  * When the next event is more than a tick into the future, stop the idle tick
932  * Called when we start the idle loop.
933  *
934  * The arch is responsible of calling:
935  *
936  * - rcu_idle_enter() after its last use of RCU before the CPU is put
937  *  to sleep.
938  * - rcu_idle_exit() before the first use of RCU after the CPU is woken up.
939  */
940 void tick_nohz_idle_enter(void)
941 {
942 	struct tick_sched *ts;
943 
944 	WARN_ON_ONCE(irqs_disabled());
945 
946 	/*
947 	 * Update the idle state in the scheduler domain hierarchy
948 	 * when tick_nohz_stop_sched_tick() is called from the idle loop.
949 	 * State will be updated to busy during the first busy tick after
950 	 * exiting idle.
951 	 */
952 	set_cpu_sd_state_idle();
953 
954 	local_irq_disable();
955 
956 	ts = this_cpu_ptr(&tick_cpu_sched);
957 	ts->inidle = 1;
958 	__tick_nohz_idle_enter(ts);
959 
960 	local_irq_enable();
961 }
962 
963 /**
964  * tick_nohz_irq_exit - update next tick event from interrupt exit
965  *
966  * When an interrupt fires while we are idle and it doesn't cause
967  * a reschedule, it may still add, modify or delete a timer, enqueue
968  * an RCU callback, etc...
969  * So we need to re-calculate and reprogram the next tick event.
970  */
971 void tick_nohz_irq_exit(void)
972 {
973 	struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
974 
975 	if (ts->inidle)
976 		__tick_nohz_idle_enter(ts);
977 	else
978 		tick_nohz_full_update_tick(ts);
979 }
980 
981 /**
982  * tick_nohz_get_sleep_length - return the length of the current sleep
983  *
984  * Called from power state control code with interrupts disabled
985  */
986 ktime_t tick_nohz_get_sleep_length(void)
987 {
988 	struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
989 
990 	return ts->sleep_length;
991 }
992 
993 static void tick_nohz_account_idle_ticks(struct tick_sched *ts)
994 {
995 #ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
996 	unsigned long ticks;
997 
998 	if (vtime_accounting_cpu_enabled())
999 		return;
1000 	/*
1001 	 * We stopped the tick in idle. Update process times would miss the
1002 	 * time we slept as update_process_times does only a 1 tick
1003 	 * accounting. Enforce that this is accounted to idle !
1004 	 */
1005 	ticks = jiffies - ts->idle_jiffies;
1006 	/*
1007 	 * We might be one off. Do not randomly account a huge number of ticks!
1008 	 */
1009 	if (ticks && ticks < LONG_MAX)
1010 		account_idle_ticks(ticks);
1011 #endif
1012 }
1013 
1014 /**
1015  * tick_nohz_idle_exit - restart the idle tick from the idle task
1016  *
1017  * Restart the idle tick when the CPU is woken up from idle
1018  * This also exit the RCU extended quiescent state. The CPU
1019  * can use RCU again after this function is called.
1020  */
1021 void tick_nohz_idle_exit(void)
1022 {
1023 	struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
1024 	ktime_t now;
1025 
1026 	local_irq_disable();
1027 
1028 	WARN_ON_ONCE(!ts->inidle);
1029 
1030 	ts->inidle = 0;
1031 
1032 	if (ts->idle_active || ts->tick_stopped)
1033 		now = ktime_get();
1034 
1035 	if (ts->idle_active)
1036 		tick_nohz_stop_idle(ts, now);
1037 
1038 	if (ts->tick_stopped) {
1039 		tick_nohz_restart_sched_tick(ts, now);
1040 		tick_nohz_account_idle_ticks(ts);
1041 	}
1042 
1043 	local_irq_enable();
1044 }
1045 
1046 /*
1047  * The nohz low res interrupt handler
1048  */
1049 static void tick_nohz_handler(struct clock_event_device *dev)
1050 {
1051 	struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
1052 	struct pt_regs *regs = get_irq_regs();
1053 	ktime_t now = ktime_get();
1054 
1055 	dev->next_event = KTIME_MAX;
1056 
1057 	tick_sched_do_timer(now);
1058 	tick_sched_handle(ts, regs);
1059 
1060 	/* No need to reprogram if we are running tickless  */
1061 	if (unlikely(ts->tick_stopped))
1062 		return;
1063 
1064 	hrtimer_forward(&ts->sched_timer, now, tick_period);
1065 	tick_program_event(hrtimer_get_expires(&ts->sched_timer), 1);
1066 }
1067 
1068 static inline void tick_nohz_activate(struct tick_sched *ts, int mode)
1069 {
1070 	if (!tick_nohz_enabled)
1071 		return;
1072 	ts->nohz_mode = mode;
1073 	/* One update is enough */
1074 	if (!test_and_set_bit(0, &tick_nohz_active))
1075 		timers_update_migration(true);
1076 }
1077 
1078 /**
1079  * tick_nohz_switch_to_nohz - switch to nohz mode
1080  */
1081 static void tick_nohz_switch_to_nohz(void)
1082 {
1083 	struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
1084 	ktime_t next;
1085 
1086 	if (!tick_nohz_enabled)
1087 		return;
1088 
1089 	if (tick_switch_to_oneshot(tick_nohz_handler))
1090 		return;
1091 
1092 	/*
1093 	 * Recycle the hrtimer in ts, so we can share the
1094 	 * hrtimer_forward with the highres code.
1095 	 */
1096 	hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
1097 	/* Get the next period */
1098 	next = tick_init_jiffy_update();
1099 
1100 	hrtimer_set_expires(&ts->sched_timer, next);
1101 	hrtimer_forward_now(&ts->sched_timer, tick_period);
1102 	tick_program_event(hrtimer_get_expires(&ts->sched_timer), 1);
1103 	tick_nohz_activate(ts, NOHZ_MODE_LOWRES);
1104 }
1105 
1106 static inline void tick_nohz_irq_enter(void)
1107 {
1108 	struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
1109 	ktime_t now;
1110 
1111 	if (!ts->idle_active && !ts->tick_stopped)
1112 		return;
1113 	now = ktime_get();
1114 	if (ts->idle_active)
1115 		tick_nohz_stop_idle(ts, now);
1116 	if (ts->tick_stopped)
1117 		tick_nohz_update_jiffies(now);
1118 }
1119 
1120 #else
1121 
1122 static inline void tick_nohz_switch_to_nohz(void) { }
1123 static inline void tick_nohz_irq_enter(void) { }
1124 static inline void tick_nohz_activate(struct tick_sched *ts, int mode) { }
1125 
1126 #endif /* CONFIG_NO_HZ_COMMON */
1127 
1128 /*
1129  * Called from irq_enter to notify about the possible interruption of idle()
1130  */
1131 void tick_irq_enter(void)
1132 {
1133 	tick_check_oneshot_broadcast_this_cpu();
1134 	tick_nohz_irq_enter();
1135 }
1136 
1137 /*
1138  * High resolution timer specific code
1139  */
1140 #ifdef CONFIG_HIGH_RES_TIMERS
1141 /*
1142  * We rearm the timer until we get disabled by the idle code.
1143  * Called with interrupts disabled.
1144  */
1145 static enum hrtimer_restart tick_sched_timer(struct hrtimer *timer)
1146 {
1147 	struct tick_sched *ts =
1148 		container_of(timer, struct tick_sched, sched_timer);
1149 	struct pt_regs *regs = get_irq_regs();
1150 	ktime_t now = ktime_get();
1151 
1152 	tick_sched_do_timer(now);
1153 
1154 	/*
1155 	 * Do not call, when we are not in irq context and have
1156 	 * no valid regs pointer
1157 	 */
1158 	if (regs)
1159 		tick_sched_handle(ts, regs);
1160 
1161 	/* No need to reprogram if we are in idle or full dynticks mode */
1162 	if (unlikely(ts->tick_stopped))
1163 		return HRTIMER_NORESTART;
1164 
1165 	hrtimer_forward(timer, now, tick_period);
1166 
1167 	return HRTIMER_RESTART;
1168 }
1169 
1170 static int sched_skew_tick;
1171 
1172 static int __init skew_tick(char *str)
1173 {
1174 	get_option(&str, &sched_skew_tick);
1175 
1176 	return 0;
1177 }
1178 early_param("skew_tick", skew_tick);
1179 
1180 /**
1181  * tick_setup_sched_timer - setup the tick emulation timer
1182  */
1183 void tick_setup_sched_timer(void)
1184 {
1185 	struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
1186 	ktime_t now = ktime_get();
1187 
1188 	/*
1189 	 * Emulate tick processing via per-CPU hrtimers:
1190 	 */
1191 	hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
1192 	ts->sched_timer.function = tick_sched_timer;
1193 
1194 	/* Get the next period (per-CPU) */
1195 	hrtimer_set_expires(&ts->sched_timer, tick_init_jiffy_update());
1196 
1197 	/* Offset the tick to avert jiffies_lock contention. */
1198 	if (sched_skew_tick) {
1199 		u64 offset = ktime_to_ns(tick_period) >> 1;
1200 		do_div(offset, num_possible_cpus());
1201 		offset *= smp_processor_id();
1202 		hrtimer_add_expires_ns(&ts->sched_timer, offset);
1203 	}
1204 
1205 	hrtimer_forward(&ts->sched_timer, now, tick_period);
1206 	hrtimer_start_expires(&ts->sched_timer, HRTIMER_MODE_ABS_PINNED);
1207 	tick_nohz_activate(ts, NOHZ_MODE_HIGHRES);
1208 }
1209 #endif /* HIGH_RES_TIMERS */
1210 
1211 #if defined CONFIG_NO_HZ_COMMON || defined CONFIG_HIGH_RES_TIMERS
1212 void tick_cancel_sched_timer(int cpu)
1213 {
1214 	struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
1215 
1216 # ifdef CONFIG_HIGH_RES_TIMERS
1217 	if (ts->sched_timer.base)
1218 		hrtimer_cancel(&ts->sched_timer);
1219 # endif
1220 
1221 	memset(ts, 0, sizeof(*ts));
1222 }
1223 #endif
1224 
1225 /**
1226  * Async notification about clocksource changes
1227  */
1228 void tick_clock_notify(void)
1229 {
1230 	int cpu;
1231 
1232 	for_each_possible_cpu(cpu)
1233 		set_bit(0, &per_cpu(tick_cpu_sched, cpu).check_clocks);
1234 }
1235 
1236 /*
1237  * Async notification about clock event changes
1238  */
1239 void tick_oneshot_notify(void)
1240 {
1241 	struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
1242 
1243 	set_bit(0, &ts->check_clocks);
1244 }
1245 
1246 /**
1247  * Check, if a change happened, which makes oneshot possible.
1248  *
1249  * Called cyclic from the hrtimer softirq (driven by the timer
1250  * softirq) allow_nohz signals, that we can switch into low-res nohz
1251  * mode, because high resolution timers are disabled (either compile
1252  * or runtime). Called with interrupts disabled.
1253  */
1254 int tick_check_oneshot_change(int allow_nohz)
1255 {
1256 	struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
1257 
1258 	if (!test_and_clear_bit(0, &ts->check_clocks))
1259 		return 0;
1260 
1261 	if (ts->nohz_mode != NOHZ_MODE_INACTIVE)
1262 		return 0;
1263 
1264 	if (!timekeeping_valid_for_hres() || !tick_is_oneshot_available())
1265 		return 0;
1266 
1267 	if (!allow_nohz)
1268 		return 1;
1269 
1270 	tick_nohz_switch_to_nohz();
1271 	return 0;
1272 }
1273