Lines Matching full:ts

184 static inline int tick_sched_flag_test(struct tick_sched *ts,  in tick_sched_flag_test()  argument
187 return !!(ts->flags & flag); in tick_sched_flag_test()
190 static inline void tick_sched_flag_set(struct tick_sched *ts, in tick_sched_flag_set() argument
194 ts->flags |= flag; in tick_sched_flag_set()
197 static inline void tick_sched_flag_clear(struct tick_sched *ts, in tick_sched_flag_clear() argument
201 ts->flags &= ~flag; in tick_sched_flag_clear()
210 static bool tick_limited_update_jiffies64(struct tick_sched *ts, ktime_t now) in tick_limited_update_jiffies64() argument
219 if (ts->last_tick_jiffies == jiffies) in tick_limited_update_jiffies64()
227 static void tick_sched_do_timer(struct tick_sched *ts, ktime_t now) in tick_sched_do_timer() argument
259 if (ts->last_tick_jiffies != jiffies) { in tick_sched_do_timer()
260 ts->stalled_jiffies = 0; in tick_sched_do_timer()
261 ts->last_tick_jiffies = READ_ONCE(jiffies); in tick_sched_do_timer()
263 if (++ts->stalled_jiffies >= MAX_STALLED_JIFFIES) { in tick_sched_do_timer()
264 if (tick_limited_update_jiffies64(ts, now)) { in tick_sched_do_timer()
265 ts->stalled_jiffies = 0; in tick_sched_do_timer()
266 ts->last_tick_jiffies = READ_ONCE(jiffies); in tick_sched_do_timer()
271 if (tick_sched_flag_test(ts, TS_FLAG_INIDLE)) in tick_sched_do_timer()
272 ts->got_idle_tick = 1; in tick_sched_do_timer()
275 static void tick_sched_handle(struct tick_sched *ts, struct pt_regs *regs) in tick_sched_handle() argument
286 tick_sched_flag_test(ts, TS_FLAG_STOPPED)) { in tick_sched_handle()
289 ts->idle_jiffies++; in tick_sched_handle()
295 ts->next_tick = 0; in tick_sched_handle()
308 struct tick_sched *ts = container_of(timer, struct tick_sched, sched_timer); in tick_nohz_handler() local
312 tick_sched_do_timer(ts, now); in tick_nohz_handler()
319 tick_sched_handle(ts, regs); in tick_nohz_handler()
321 ts->next_tick = 0; in tick_nohz_handler()
328 if (unlikely(tick_sched_flag_test(ts, TS_FLAG_STOPPED))) in tick_nohz_handler()
380 static bool can_stop_full_tick(int cpu, struct tick_sched *ts) in can_stop_full_tick() argument
390 if (check_tick_dependency(&ts->tick_dep_mask)) in can_stop_full_tick()
530 struct tick_sched *ts; in tick_nohz_dep_set_cpu() local
532 ts = per_cpu_ptr(&tick_cpu_sched, cpu); in tick_nohz_dep_set_cpu()
534 prev = atomic_fetch_or(BIT(bit), &ts->tick_dep_mask); in tick_nohz_dep_set_cpu()
552 struct tick_sched *ts = per_cpu_ptr(&tick_cpu_sched, cpu); in tick_nohz_dep_clear_cpu() local
554 atomic_andnot(BIT(bit), &ts->tick_dep_mask); in tick_nohz_dep_clear_cpu()
607 struct tick_sched *ts; in __tick_nohz_task_switch() local
612 ts = this_cpu_ptr(&tick_cpu_sched); in __tick_nohz_task_switch()
614 if (tick_sched_flag_test(ts, TS_FLAG_STOPPED)) { in __tick_nohz_task_switch()
709 struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); in tick_nohz_tick_stopped() local
711 return tick_sched_flag_test(ts, TS_FLAG_STOPPED); in tick_nohz_tick_stopped()
716 struct tick_sched *ts = per_cpu_ptr(&tick_cpu_sched, cpu); in tick_nohz_tick_stopped_cpu() local
718 return tick_sched_flag_test(ts, TS_FLAG_STOPPED); in tick_nohz_tick_stopped_cpu()
745 static void tick_nohz_stop_idle(struct tick_sched *ts, ktime_t now) in tick_nohz_stop_idle() argument
749 if (WARN_ON_ONCE(!tick_sched_flag_test(ts, TS_FLAG_IDLE_ACTIVE))) in tick_nohz_stop_idle()
752 delta = ktime_sub(now, ts->idle_entrytime); in tick_nohz_stop_idle()
754 write_seqcount_begin(&ts->idle_sleeptime_seq); in tick_nohz_stop_idle()
756 ts->iowait_sleeptime = ktime_add(ts->iowait_sleeptime, delta); in tick_nohz_stop_idle()
758 ts->idle_sleeptime = ktime_add(ts->idle_sleeptime, delta); in tick_nohz_stop_idle()
760 ts->idle_entrytime = now; in tick_nohz_stop_idle()
761 tick_sched_flag_clear(ts, TS_FLAG_IDLE_ACTIVE); in tick_nohz_stop_idle()
762 write_seqcount_end(&ts->idle_sleeptime_seq); in tick_nohz_stop_idle()
767 static void tick_nohz_start_idle(struct tick_sched *ts) in tick_nohz_start_idle() argument
769 write_seqcount_begin(&ts->idle_sleeptime_seq); in tick_nohz_start_idle()
770 ts->idle_entrytime = ktime_get(); in tick_nohz_start_idle()
771 tick_sched_flag_set(ts, TS_FLAG_IDLE_ACTIVE); in tick_nohz_start_idle()
772 write_seqcount_end(&ts->idle_sleeptime_seq); in tick_nohz_start_idle()
777 static u64 get_cpu_sleep_time_us(struct tick_sched *ts, ktime_t *sleeptime, in get_cpu_sleep_time_us() argument
791 seq = read_seqcount_begin(&ts->idle_sleeptime_seq); in get_cpu_sleep_time_us()
793 if (tick_sched_flag_test(ts, TS_FLAG_IDLE_ACTIVE) && compute_delta) { in get_cpu_sleep_time_us()
794 ktime_t delta = ktime_sub(now, ts->idle_entrytime); in get_cpu_sleep_time_us()
800 } while (read_seqcount_retry(&ts->idle_sleeptime_seq, seq)); in get_cpu_sleep_time_us()
825 struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); in get_cpu_idle_time_us() local
827 return get_cpu_sleep_time_us(ts, &ts->idle_sleeptime, in get_cpu_idle_time_us()
851 struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); in get_cpu_iowait_time_us() local
853 return get_cpu_sleep_time_us(ts, &ts->iowait_sleeptime, in get_cpu_iowait_time_us()
858 static void tick_nohz_restart(struct tick_sched *ts, ktime_t now) in tick_nohz_restart() argument
860 hrtimer_cancel(&ts->sched_timer); in tick_nohz_restart()
861 hrtimer_set_expires(&ts->sched_timer, ts->last_tick); in tick_nohz_restart()
864 hrtimer_forward(&ts->sched_timer, now, TICK_NSEC); in tick_nohz_restart()
866 if (tick_sched_flag_test(ts, TS_FLAG_HIGHRES)) { in tick_nohz_restart()
867 hrtimer_start_expires(&ts->sched_timer, in tick_nohz_restart()
870 tick_program_event(hrtimer_get_expires(&ts->sched_timer), 1); in tick_nohz_restart()
877 ts->next_tick = 0; in tick_nohz_restart()
905 * @ts: pointer to tick_sched struct
913 static ktime_t tick_nohz_next_event(struct tick_sched *ts, int cpu) in tick_nohz_next_event() argument
920 ts->last_jiffies = basejiff; in tick_nohz_next_event()
921 ts->timer_expires_base = basemono; in tick_nohz_next_event()
945 ts->next_timer = next_tick; in tick_nohz_next_event()
962 if (!tick_sched_flag_test(ts, TS_FLAG_STOPPED)) { in tick_nohz_next_event()
963 ts->timer_expires = 0; in tick_nohz_next_event()
976 (tick_cpu != TICK_DO_TIMER_NONE || !tick_sched_flag_test(ts, TS_FLAG_DO_TIMER_LAST))) in tick_nohz_next_event()
985 ts->timer_expires = min_t(u64, expires, next_tick); in tick_nohz_next_event()
988 return ts->timer_expires; in tick_nohz_next_event()
991 static void tick_nohz_stop_tick(struct tick_sched *ts, int cpu) in tick_nohz_stop_tick() argument
994 unsigned long basejiff = ts->last_jiffies; in tick_nohz_stop_tick()
995 u64 basemono = ts->timer_expires_base; in tick_nohz_stop_tick()
996 bool timer_idle = tick_sched_flag_test(ts, TS_FLAG_STOPPED); in tick_nohz_stop_tick()
1001 ts->timer_expires_base = 0; in tick_nohz_stop_tick()
1008 if (expires > ts->timer_expires) { in tick_nohz_stop_tick()
1020 expires = ts->timer_expires; in tick_nohz_stop_tick()
1038 tick_sched_flag_set(ts, TS_FLAG_DO_TIMER_LAST); in tick_nohz_stop_tick()
1040 tick_sched_flag_clear(ts, TS_FLAG_DO_TIMER_LAST); in tick_nohz_stop_tick()
1044 if (tick_sched_flag_test(ts, TS_FLAG_STOPPED) && (expires == ts->next_tick)) { in tick_nohz_stop_tick()
1046 if (expires == KTIME_MAX || ts->next_tick == hrtimer_get_expires(&ts->sched_timer)) in tick_nohz_stop_tick()
1049 WARN_ONCE(1, "basemono: %llu ts->next_tick: %llu dev->next_event: %llu " in tick_nohz_stop_tick()
1050 "timer->active: %d timer->expires: %llu\n", basemono, ts->next_tick, in tick_nohz_stop_tick()
1051 dev->next_event, hrtimer_active(&ts->sched_timer), in tick_nohz_stop_tick()
1052 hrtimer_get_expires(&ts->sched_timer)); in tick_nohz_stop_tick()
1062 if (!tick_sched_flag_test(ts, TS_FLAG_STOPPED)) { in tick_nohz_stop_tick()
1066 ts->last_tick = hrtimer_get_expires(&ts->sched_timer); in tick_nohz_stop_tick()
1067 tick_sched_flag_set(ts, TS_FLAG_STOPPED); in tick_nohz_stop_tick()
1071 ts->next_tick = expires; in tick_nohz_stop_tick()
1078 if (tick_sched_flag_test(ts, TS_FLAG_HIGHRES)) in tick_nohz_stop_tick()
1079 hrtimer_cancel(&ts->sched_timer); in tick_nohz_stop_tick()
1085 if (tick_sched_flag_test(ts, TS_FLAG_HIGHRES)) { in tick_nohz_stop_tick()
1086 hrtimer_start(&ts->sched_timer, expires, in tick_nohz_stop_tick()
1089 hrtimer_set_expires(&ts->sched_timer, expires); in tick_nohz_stop_tick()
1094 static void tick_nohz_retain_tick(struct tick_sched *ts) in tick_nohz_retain_tick() argument
1096 ts->timer_expires_base = 0; in tick_nohz_retain_tick()
1100 static void tick_nohz_full_stop_tick(struct tick_sched *ts, int cpu) in tick_nohz_full_stop_tick() argument
1102 if (tick_nohz_next_event(ts, cpu)) in tick_nohz_full_stop_tick()
1103 tick_nohz_stop_tick(ts, cpu); in tick_nohz_full_stop_tick()
1105 tick_nohz_retain_tick(ts); in tick_nohz_full_stop_tick()
1109 static void tick_nohz_restart_sched_tick(struct tick_sched *ts, ktime_t now) in tick_nohz_restart_sched_tick() argument
1124 tick_sched_flag_clear(ts, TS_FLAG_STOPPED); in tick_nohz_restart_sched_tick()
1125 tick_nohz_restart(ts, now); in tick_nohz_restart_sched_tick()
1128 static void __tick_nohz_full_update_tick(struct tick_sched *ts, in __tick_nohz_full_update_tick() argument
1134 if (can_stop_full_tick(cpu, ts)) in __tick_nohz_full_update_tick()
1135 tick_nohz_full_stop_tick(ts, cpu); in __tick_nohz_full_update_tick()
1136 else if (tick_sched_flag_test(ts, TS_FLAG_STOPPED)) in __tick_nohz_full_update_tick()
1137 tick_nohz_restart_sched_tick(ts, now); in __tick_nohz_full_update_tick()
1141 static void tick_nohz_full_update_tick(struct tick_sched *ts) in tick_nohz_full_update_tick() argument
1146 if (!tick_sched_flag_test(ts, TS_FLAG_NOHZ)) in tick_nohz_full_update_tick()
1149 __tick_nohz_full_update_tick(ts, ktime_get()); in tick_nohz_full_update_tick()
1190 static bool can_stop_idle_tick(int cpu, struct tick_sched *ts) in can_stop_idle_tick() argument
1194 if (unlikely(!tick_sched_flag_test(ts, TS_FLAG_NOHZ))) in can_stop_idle_tick()
1228 struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); in tick_nohz_idle_stop_tick() local
1236 if (ts->timer_expires_base) in tick_nohz_idle_stop_tick()
1237 expires = ts->timer_expires; in tick_nohz_idle_stop_tick()
1238 else if (can_stop_idle_tick(cpu, ts)) in tick_nohz_idle_stop_tick()
1239 expires = tick_nohz_next_event(ts, cpu); in tick_nohz_idle_stop_tick()
1243 ts->idle_calls++; in tick_nohz_idle_stop_tick()
1246 int was_stopped = tick_sched_flag_test(ts, TS_FLAG_STOPPED); in tick_nohz_idle_stop_tick()
1248 tick_nohz_stop_tick(ts, cpu); in tick_nohz_idle_stop_tick()
1250 ts->idle_sleeps++; in tick_nohz_idle_stop_tick()
1251 ts->idle_expires = expires; in tick_nohz_idle_stop_tick()
1253 if (!was_stopped && tick_sched_flag_test(ts, TS_FLAG_STOPPED)) { in tick_nohz_idle_stop_tick()
1254 ts->idle_jiffies = ts->last_jiffies; in tick_nohz_idle_stop_tick()
1258 tick_nohz_retain_tick(ts); in tick_nohz_idle_stop_tick()
1274 struct tick_sched *ts; in tick_nohz_idle_enter() local
1280 ts = this_cpu_ptr(&tick_cpu_sched); in tick_nohz_idle_enter()
1282 WARN_ON_ONCE(ts->timer_expires_base); in tick_nohz_idle_enter()
1284 tick_sched_flag_set(ts, TS_FLAG_INIDLE); in tick_nohz_idle_enter()
1285 tick_nohz_start_idle(ts); in tick_nohz_idle_enter()
1311 struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); in tick_nohz_irq_exit() local
1313 if (tick_sched_flag_test(ts, TS_FLAG_INIDLE)) in tick_nohz_irq_exit()
1314 tick_nohz_start_idle(ts); in tick_nohz_irq_exit()
1316 tick_nohz_full_update_tick(ts); in tick_nohz_irq_exit()
1326 struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); in tick_nohz_idle_got_tick() local
1328 if (ts->got_idle_tick) { in tick_nohz_idle_got_tick()
1329 ts->got_idle_tick = 0; in tick_nohz_idle_got_tick()
1364 struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); in tick_nohz_get_sleep_length() local
1370 ktime_t now = ts->idle_entrytime; in tick_nohz_get_sleep_length()
1373 WARN_ON_ONCE(!tick_sched_flag_test(ts, TS_FLAG_INIDLE)); in tick_nohz_get_sleep_length()
1377 if (!can_stop_idle_tick(cpu, ts)) in tick_nohz_get_sleep_length()
1380 next_event = tick_nohz_next_event(ts, cpu); in tick_nohz_get_sleep_length()
1389 hrtimer_next_event_without(&ts->sched_timer)); in tick_nohz_get_sleep_length()
1405 struct tick_sched *ts = tick_get_tick_sched(cpu); in tick_nohz_get_idle_calls_cpu() local
1407 return ts->idle_calls; in tick_nohz_get_idle_calls_cpu()
1410 static void tick_nohz_account_idle_time(struct tick_sched *ts, in tick_nohz_account_idle_time() argument
1415 ts->idle_exittime = now; in tick_nohz_account_idle_time()
1424 ticks = jiffies - ts->idle_jiffies; in tick_nohz_account_idle_time()
1434 struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); in tick_nohz_idle_restart_tick() local
1436 if (tick_sched_flag_test(ts, TS_FLAG_STOPPED)) { in tick_nohz_idle_restart_tick()
1438 tick_nohz_restart_sched_tick(ts, now); in tick_nohz_idle_restart_tick()
1439 tick_nohz_account_idle_time(ts, now); in tick_nohz_idle_restart_tick()
1443 static void tick_nohz_idle_update_tick(struct tick_sched *ts, ktime_t now) in tick_nohz_idle_update_tick() argument
1446 __tick_nohz_full_update_tick(ts, now); in tick_nohz_idle_update_tick()
1448 tick_nohz_restart_sched_tick(ts, now); in tick_nohz_idle_update_tick()
1450 tick_nohz_account_idle_time(ts, now); in tick_nohz_idle_update_tick()
1471 struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); in tick_nohz_idle_exit() local
1477 WARN_ON_ONCE(!tick_sched_flag_test(ts, TS_FLAG_INIDLE)); in tick_nohz_idle_exit()
1478 WARN_ON_ONCE(ts->timer_expires_base); in tick_nohz_idle_exit()
1480 tick_sched_flag_clear(ts, TS_FLAG_INIDLE); in tick_nohz_idle_exit()
1481 idle_active = tick_sched_flag_test(ts, TS_FLAG_IDLE_ACTIVE); in tick_nohz_idle_exit()
1482 tick_stopped = tick_sched_flag_test(ts, TS_FLAG_STOPPED); in tick_nohz_idle_exit()
1488 tick_nohz_stop_idle(ts, now); in tick_nohz_idle_exit()
1491 tick_nohz_idle_update_tick(ts, now); in tick_nohz_idle_exit()
1504 struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); in tick_nohz_lowres_handler() local
1508 if (likely(tick_nohz_handler(&ts->sched_timer) == HRTIMER_RESTART)) in tick_nohz_lowres_handler()
1509 tick_program_event(hrtimer_get_expires(&ts->sched_timer), 1); in tick_nohz_lowres_handler()
1512 static inline void tick_nohz_activate(struct tick_sched *ts) in tick_nohz_activate() argument
1516 tick_sched_flag_set(ts, TS_FLAG_NOHZ); in tick_nohz_activate()
1534 * Recycle the hrtimer in 'ts', so we can share the in tick_nohz_switch_to_nohz()
1542 struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); in tick_nohz_irq_enter() local
1545 if (!tick_sched_flag_test(ts, TS_FLAG_STOPPED | TS_FLAG_IDLE_ACTIVE)) in tick_nohz_irq_enter()
1548 if (tick_sched_flag_test(ts, TS_FLAG_IDLE_ACTIVE)) in tick_nohz_irq_enter()
1549 tick_nohz_stop_idle(ts, now); in tick_nohz_irq_enter()
1557 if (tick_sched_flag_test(ts, TS_FLAG_STOPPED)) in tick_nohz_irq_enter()
1565 static inline void tick_nohz_activate(struct tick_sched *ts) { } in tick_nohz_activate() argument
1594 struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); in tick_setup_sched_timer() local
1597 hrtimer_setup(&ts->sched_timer, tick_nohz_handler, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_HARD); in tick_setup_sched_timer()
1600 tick_sched_flag_set(ts, TS_FLAG_HIGHRES); in tick_setup_sched_timer()
1603 hrtimer_set_expires(&ts->sched_timer, tick_init_jiffy_update()); in tick_setup_sched_timer()
1610 hrtimer_add_expires_ns(&ts->sched_timer, offset); in tick_setup_sched_timer()
1613 hrtimer_forward_now(&ts->sched_timer, TICK_NSEC); in tick_setup_sched_timer()
1615 hrtimer_start_expires(&ts->sched_timer, HRTIMER_MODE_ABS_PINNED_HARD); in tick_setup_sched_timer()
1617 tick_program_event(hrtimer_get_expires(&ts->sched_timer), 1); in tick_setup_sched_timer()
1618 tick_nohz_activate(ts); in tick_setup_sched_timer()
1627 struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); in tick_sched_timer_dying() local
1632 if (tick_sched_flag_test(ts, TS_FLAG_HIGHRES)) in tick_sched_timer_dying()
1633 hrtimer_cancel(&ts->sched_timer); in tick_sched_timer_dying()
1635 idle_sleeptime = ts->idle_sleeptime; in tick_sched_timer_dying()
1636 iowait_sleeptime = ts->iowait_sleeptime; in tick_sched_timer_dying()
1637 idle_calls = ts->idle_calls; in tick_sched_timer_dying()
1638 idle_sleeps = ts->idle_sleeps; in tick_sched_timer_dying()
1639 memset(ts, 0, sizeof(*ts)); in tick_sched_timer_dying()
1640 ts->idle_sleeptime = idle_sleeptime; in tick_sched_timer_dying()
1641 ts->iowait_sleeptime = iowait_sleeptime; in tick_sched_timer_dying()
1642 ts->idle_calls = idle_calls; in tick_sched_timer_dying()
1643 ts->idle_sleeps = idle_sleeps; in tick_sched_timer_dying()
1662 struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); in tick_oneshot_notify() local
1664 set_bit(0, &ts->check_clocks); in tick_oneshot_notify()
1677 struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); in tick_check_oneshot_change() local
1679 if (!test_and_clear_bit(0, &ts->check_clocks)) in tick_check_oneshot_change()
1682 if (tick_sched_flag_test(ts, TS_FLAG_NOHZ)) in tick_check_oneshot_change()