1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * Tick related global functions 4 */ 5 #ifndef _LINUX_TICK_H 6 #define _LINUX_TICK_H 7 8 #include <linux/clockchips.h> 9 #include <linux/irqflags.h> 10 #include <linux/percpu.h> 11 #include <linux/context_tracking_state.h> 12 #include <linux/cpumask.h> 13 #include <linux/sched.h> 14 #include <linux/rcupdate.h> 15 #include <linux/static_key.h> 16 17 #ifdef CONFIG_GENERIC_CLOCKEVENTS 18 extern void __init tick_init(void); 19 /* Should be core only, but ARM BL switcher requires it */ 20 extern void tick_suspend_local(void); 21 /* Should be core only, but XEN resume magic and ARM BL switcher require it */ 22 extern void tick_resume_local(void); 23 #else /* CONFIG_GENERIC_CLOCKEVENTS */ 24 static inline void tick_init(void) { } 25 static inline void tick_suspend_local(void) { } 26 static inline void tick_resume_local(void) { } 27 #endif /* !CONFIG_GENERIC_CLOCKEVENTS */ 28 29 #if defined(CONFIG_GENERIC_CLOCKEVENTS) && defined(CONFIG_HOTPLUG_CPU) 30 extern int tick_cpu_dying(unsigned int cpu); 31 extern void tick_assert_timekeeping_handover(void); 32 #else 33 #define tick_cpu_dying NULL 34 static inline void tick_assert_timekeeping_handover(void) { } 35 #endif 36 37 #if defined(CONFIG_GENERIC_CLOCKEVENTS) && defined(CONFIG_SUSPEND) 38 extern void tick_freeze(void); 39 extern void tick_unfreeze(void); 40 #else 41 static inline void tick_freeze(void) { } 42 static inline void tick_unfreeze(void) { } 43 #endif 44 45 #ifdef CONFIG_TICK_ONESHOT 46 extern void tick_irq_enter(void); 47 # ifndef arch_needs_cpu 48 # define arch_needs_cpu() (0) 49 # endif 50 # else 51 static inline void tick_irq_enter(void) { } 52 #endif 53 54 #if defined(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST) && defined(CONFIG_TICK_ONESHOT) 55 extern void hotplug_cpu__broadcast_tick_pull(int dead_cpu); 56 #else 57 static inline void hotplug_cpu__broadcast_tick_pull(int dead_cpu) { } 58 #endif 59 60 enum tick_broadcast_mode { 61 TICK_BROADCAST_OFF, 62 TICK_BROADCAST_ON, 63 TICK_BROADCAST_FORCE, 64 }; 65 66 enum tick_broadcast_state { 67 TICK_BROADCAST_EXIT, 68 TICK_BROADCAST_ENTER, 69 }; 70 71 extern struct static_key_false arch_needs_tick_broadcast; 72 73 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST 74 extern void tick_broadcast_control(enum tick_broadcast_mode mode); 75 #else 76 static inline void tick_broadcast_control(enum tick_broadcast_mode mode) { } 77 #endif /* BROADCAST */ 78 79 #ifdef CONFIG_GENERIC_CLOCKEVENTS 80 extern int tick_broadcast_oneshot_control(enum tick_broadcast_state state); 81 #else 82 static inline int tick_broadcast_oneshot_control(enum tick_broadcast_state state) 83 { 84 return 0; 85 } 86 #endif 87 88 static inline void tick_broadcast_enable(void) 89 { 90 tick_broadcast_control(TICK_BROADCAST_ON); 91 } 92 static inline void tick_broadcast_disable(void) 93 { 94 tick_broadcast_control(TICK_BROADCAST_OFF); 95 } 96 static inline void tick_broadcast_force(void) 97 { 98 tick_broadcast_control(TICK_BROADCAST_FORCE); 99 } 100 static inline int tick_broadcast_enter(void) 101 { 102 return tick_broadcast_oneshot_control(TICK_BROADCAST_ENTER); 103 } 104 static inline void tick_broadcast_exit(void) 105 { 106 tick_broadcast_oneshot_control(TICK_BROADCAST_EXIT); 107 } 108 109 enum tick_dep_bits { 110 TICK_DEP_BIT_POSIX_TIMER = 0, 111 TICK_DEP_BIT_PERF_EVENTS = 1, 112 TICK_DEP_BIT_SCHED = 2, 113 TICK_DEP_BIT_CLOCK_UNSTABLE = 3, 114 TICK_DEP_BIT_RCU = 4, 115 TICK_DEP_BIT_RCU_EXP = 5 116 }; 117 #define TICK_DEP_BIT_MAX TICK_DEP_BIT_RCU_EXP 118 119 #define TICK_DEP_MASK_NONE 0 120 #define TICK_DEP_MASK_POSIX_TIMER (1 << TICK_DEP_BIT_POSIX_TIMER) 121 #define TICK_DEP_MASK_PERF_EVENTS (1 << TICK_DEP_BIT_PERF_EVENTS) 122 #define TICK_DEP_MASK_SCHED (1 << TICK_DEP_BIT_SCHED) 123 #define TICK_DEP_MASK_CLOCK_UNSTABLE (1 << TICK_DEP_BIT_CLOCK_UNSTABLE) 124 #define TICK_DEP_MASK_RCU (1 << TICK_DEP_BIT_RCU) 125 #define TICK_DEP_MASK_RCU_EXP (1 << TICK_DEP_BIT_RCU_EXP) 126 127 #ifdef CONFIG_NO_HZ_COMMON 128 extern bool tick_nohz_enabled; 129 extern bool tick_nohz_is_active(void); 130 extern bool tick_nohz_tick_stopped(void); 131 extern bool tick_nohz_tick_stopped_cpu(int cpu); 132 extern void tick_nohz_idle_stop_tick(void); 133 extern void tick_nohz_idle_retain_tick(void); 134 extern void tick_nohz_idle_restart_tick(void); 135 extern void tick_nohz_idle_enter(void); 136 extern void tick_nohz_idle_exit(void); 137 extern void tick_nohz_irq_exit(void); 138 extern bool tick_nohz_idle_got_tick(void); 139 extern ktime_t tick_nohz_get_next_hrtimer(void); 140 extern ktime_t tick_nohz_get_sleep_length(ktime_t *delta_next); 141 extern unsigned long tick_nohz_get_idle_calls_cpu(int cpu); 142 extern u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time); 143 extern u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time); 144 #else /* !CONFIG_NO_HZ_COMMON */ 145 #define tick_nohz_enabled (0) 146 static inline bool tick_nohz_is_active(void) { return false; } 147 static inline int tick_nohz_tick_stopped(void) { return 0; } 148 static inline int tick_nohz_tick_stopped_cpu(int cpu) { return 0; } 149 static inline void tick_nohz_idle_stop_tick(void) { } 150 static inline void tick_nohz_idle_retain_tick(void) { } 151 static inline void tick_nohz_idle_restart_tick(void) { } 152 static inline void tick_nohz_idle_enter(void) { } 153 static inline void tick_nohz_idle_exit(void) { } 154 static inline bool tick_nohz_idle_got_tick(void) { return false; } 155 static inline ktime_t tick_nohz_get_next_hrtimer(void) 156 { 157 /* Next wake up is the tick period, assume it starts now */ 158 return ktime_add(ktime_get(), TICK_NSEC); 159 } 160 static inline ktime_t tick_nohz_get_sleep_length(ktime_t *delta_next) 161 { 162 *delta_next = TICK_NSEC; 163 return *delta_next; 164 } 165 static inline u64 get_cpu_idle_time_us(int cpu, u64 *unused) { return -1; } 166 static inline u64 get_cpu_iowait_time_us(int cpu, u64 *unused) { return -1; } 167 #endif /* !CONFIG_NO_HZ_COMMON */ 168 169 /* 170 * Mask of CPUs that are nohz_full. 171 * 172 * Users should be guarded by CONFIG_NO_HZ_FULL or a tick_nohz_full_cpu() 173 * check. 174 */ 175 extern cpumask_var_t tick_nohz_full_mask; 176 177 #ifdef CONFIG_NO_HZ_FULL 178 extern bool tick_nohz_full_running; 179 180 static inline bool tick_nohz_full_enabled(void) 181 { 182 if (!context_tracking_enabled()) 183 return false; 184 185 return tick_nohz_full_running; 186 } 187 188 /* 189 * Check if a CPU is part of the nohz_full subset. Arrange for evaluating 190 * the cpu expression (typically smp_processor_id()) _after_ the static 191 * key. 192 */ 193 #define tick_nohz_full_cpu(_cpu) ({ \ 194 bool __ret = false; \ 195 if (tick_nohz_full_enabled()) \ 196 __ret = cpumask_test_cpu((_cpu), tick_nohz_full_mask); \ 197 __ret; \ 198 }) 199 200 extern void tick_nohz_dep_set(enum tick_dep_bits bit); 201 extern void tick_nohz_dep_clear(enum tick_dep_bits bit); 202 extern void tick_nohz_dep_set_cpu(int cpu, enum tick_dep_bits bit); 203 extern void tick_nohz_dep_clear_cpu(int cpu, enum tick_dep_bits bit); 204 extern void tick_nohz_dep_set_task(struct task_struct *tsk, 205 enum tick_dep_bits bit); 206 extern void tick_nohz_dep_clear_task(struct task_struct *tsk, 207 enum tick_dep_bits bit); 208 extern void tick_nohz_dep_set_signal(struct task_struct *tsk, 209 enum tick_dep_bits bit); 210 extern void tick_nohz_dep_clear_signal(struct signal_struct *signal, 211 enum tick_dep_bits bit); 212 extern bool tick_nohz_cpu_hotpluggable(unsigned int cpu); 213 214 /* 215 * The below are tick_nohz_[set,clear]_dep() wrappers that optimize off-cases 216 * on top of static keys. 217 */ 218 static inline void tick_dep_set(enum tick_dep_bits bit) 219 { 220 if (tick_nohz_full_enabled()) 221 tick_nohz_dep_set(bit); 222 } 223 224 static inline void tick_dep_clear(enum tick_dep_bits bit) 225 { 226 if (tick_nohz_full_enabled()) 227 tick_nohz_dep_clear(bit); 228 } 229 230 static inline void tick_dep_set_cpu(int cpu, enum tick_dep_bits bit) 231 { 232 if (tick_nohz_full_cpu(cpu)) 233 tick_nohz_dep_set_cpu(cpu, bit); 234 } 235 236 static inline void tick_dep_clear_cpu(int cpu, enum tick_dep_bits bit) 237 { 238 if (tick_nohz_full_cpu(cpu)) 239 tick_nohz_dep_clear_cpu(cpu, bit); 240 } 241 242 static inline void tick_dep_set_task(struct task_struct *tsk, 243 enum tick_dep_bits bit) 244 { 245 if (tick_nohz_full_enabled()) 246 tick_nohz_dep_set_task(tsk, bit); 247 } 248 249 static inline void tick_dep_clear_task(struct task_struct *tsk, 250 enum tick_dep_bits bit) 251 { 252 if (tick_nohz_full_enabled()) 253 tick_nohz_dep_clear_task(tsk, bit); 254 } 255 256 static inline void tick_dep_init_task(struct task_struct *tsk) 257 { 258 atomic_set(&tsk->tick_dep_mask, 0); 259 } 260 261 static inline void tick_dep_set_signal(struct task_struct *tsk, 262 enum tick_dep_bits bit) 263 { 264 if (tick_nohz_full_enabled()) 265 tick_nohz_dep_set_signal(tsk, bit); 266 } 267 static inline void tick_dep_clear_signal(struct signal_struct *signal, 268 enum tick_dep_bits bit) 269 { 270 if (tick_nohz_full_enabled()) 271 tick_nohz_dep_clear_signal(signal, bit); 272 } 273 274 extern void tick_nohz_full_kick_cpu(int cpu); 275 extern void __tick_nohz_task_switch(void); 276 extern void __init tick_nohz_full_setup(cpumask_var_t cpumask); 277 #else 278 static inline bool tick_nohz_full_enabled(void) { return false; } 279 static inline bool tick_nohz_full_cpu(int cpu) { return false; } 280 281 static inline void tick_nohz_dep_set_cpu(int cpu, enum tick_dep_bits bit) { } 282 static inline void tick_nohz_dep_clear_cpu(int cpu, enum tick_dep_bits bit) { } 283 static inline bool tick_nohz_cpu_hotpluggable(unsigned int cpu) { return true; } 284 285 static inline void tick_dep_set(enum tick_dep_bits bit) { } 286 static inline void tick_dep_clear(enum tick_dep_bits bit) { } 287 static inline void tick_dep_set_cpu(int cpu, enum tick_dep_bits bit) { } 288 static inline void tick_dep_clear_cpu(int cpu, enum tick_dep_bits bit) { } 289 static inline void tick_dep_set_task(struct task_struct *tsk, 290 enum tick_dep_bits bit) { } 291 static inline void tick_dep_clear_task(struct task_struct *tsk, 292 enum tick_dep_bits bit) { } 293 static inline void tick_dep_init_task(struct task_struct *tsk) { } 294 static inline void tick_dep_set_signal(struct task_struct *tsk, 295 enum tick_dep_bits bit) { } 296 static inline void tick_dep_clear_signal(struct signal_struct *signal, 297 enum tick_dep_bits bit) { } 298 299 static inline void tick_nohz_full_kick_cpu(int cpu) { } 300 static inline void __tick_nohz_task_switch(void) { } 301 static inline void tick_nohz_full_setup(cpumask_var_t cpumask) { } 302 #endif 303 304 static inline void tick_nohz_task_switch(void) 305 { 306 if (tick_nohz_full_enabled()) 307 __tick_nohz_task_switch(); 308 } 309 310 static inline void tick_nohz_user_enter_prepare(void) 311 { 312 if (tick_nohz_full_cpu(smp_processor_id())) 313 rcu_nocb_flush_deferred_wakeup(); 314 } 315 316 #endif 317