1 // SPDX-License-Identifier: GPL-2.0 2 #include <linux/ftrace.h> 3 #include <linux/tracepoint.h> 4 #include <linux/init.h> 5 #include <linux/irqflags.h> 6 #include <linux/kernel.h> 7 #include <linux/module.h> 8 #include <linux/rv.h> 9 #include <linux/sched/deadline.h> 10 #include <linux/sched/rt.h> 11 #include <rv/instrumentation.h> 12 13 #define MODULE_NAME "sleep" 14 15 #include <trace/events/syscalls.h> 16 #include <trace/events/sched.h> 17 #include <trace/events/lock.h> 18 #include <uapi/linux/futex.h> 19 #include <rv_trace.h> 20 #include <monitors/rtapp/rtapp.h> 21 22 #include "sleep.h" 23 #include <rv/ltl_monitor.h> 24 25 static void ltl_atoms_fetch(struct task_struct *task, struct ltl_monitor *mon) 26 { 27 /* 28 * This includes "actual" real-time tasks and also PI-boosted 29 * tasks. A task being PI-boosted means it is blocking an "actual" 30 * real-task, therefore it should also obey the monitor's rule, 31 * otherwise the "actual" real-task may be delayed. 32 */ 33 ltl_atom_set(mon, LTL_RT, rt_or_dl_task(task)); 34 } 35 36 static void ltl_atoms_init(struct task_struct *task, struct ltl_monitor *mon, bool task_creation) 37 { 38 ltl_atom_set(mon, LTL_SLEEP, false); 39 ltl_atom_set(mon, LTL_WAKE, false); 40 ltl_atom_set(mon, LTL_ABORT_SLEEP, false); 41 ltl_atom_set(mon, LTL_WOKEN_BY_HARDIRQ, false); 42 ltl_atom_set(mon, LTL_WOKEN_BY_NMI, false); 43 ltl_atom_set(mon, LTL_WOKEN_BY_EQUAL_OR_HIGHER_PRIO, false); 44 45 if (task_creation) { 46 ltl_atom_set(mon, LTL_KTHREAD_SHOULD_STOP, false); 47 ltl_atom_set(mon, LTL_NANOSLEEP_CLOCK_MONOTONIC, false); 48 ltl_atom_set(mon, LTL_NANOSLEEP_CLOCK_TAI, false); 49 ltl_atom_set(mon, LTL_NANOSLEEP_TIMER_ABSTIME, false); 50 ltl_atom_set(mon, LTL_CLOCK_NANOSLEEP, false); 51 ltl_atom_set(mon, LTL_FUTEX_WAIT, false); 52 ltl_atom_set(mon, LTL_FUTEX_LOCK_PI, false); 53 ltl_atom_set(mon, LTL_BLOCK_ON_RT_MUTEX, false); 54 } 55 56 if (task->flags & PF_KTHREAD) { 57 ltl_atom_set(mon, LTL_KERNEL_THREAD, true); 58 59 /* kernel tasks do not do syscall */ 60 ltl_atom_set(mon, LTL_FUTEX_WAIT, false); 61 ltl_atom_set(mon, LTL_FUTEX_LOCK_PI, false); 62 ltl_atom_set(mon, LTL_NANOSLEEP_CLOCK_MONOTONIC, false); 63 ltl_atom_set(mon, LTL_NANOSLEEP_CLOCK_TAI, false); 64 ltl_atom_set(mon, LTL_NANOSLEEP_TIMER_ABSTIME, false); 65 ltl_atom_set(mon, LTL_CLOCK_NANOSLEEP, false); 66 67 if (strstarts(task->comm, "migration/")) 68 ltl_atom_set(mon, LTL_TASK_IS_MIGRATION, true); 69 else 70 ltl_atom_set(mon, LTL_TASK_IS_MIGRATION, false); 71 72 if (strstarts(task->comm, "rcu")) 73 ltl_atom_set(mon, LTL_TASK_IS_RCU, true); 74 else 75 ltl_atom_set(mon, LTL_TASK_IS_RCU, false); 76 } else { 77 ltl_atom_set(mon, LTL_KTHREAD_SHOULD_STOP, false); 78 ltl_atom_set(mon, LTL_KERNEL_THREAD, false); 79 ltl_atom_set(mon, LTL_TASK_IS_RCU, false); 80 ltl_atom_set(mon, LTL_TASK_IS_MIGRATION, false); 81 } 82 83 } 84 85 static void handle_sched_set_state(void *data, struct task_struct *task, int state) 86 { 87 if (state & TASK_INTERRUPTIBLE) 88 ltl_atom_pulse(task, LTL_SLEEP, true); 89 else if (state == TASK_RUNNING) 90 ltl_atom_pulse(task, LTL_ABORT_SLEEP, true); 91 } 92 93 static void handle_sched_wakeup(void *data, struct task_struct *task) 94 { 95 ltl_atom_pulse(task, LTL_WAKE, true); 96 } 97 98 static void handle_sched_waking(void *data, struct task_struct *task) 99 { 100 if (this_cpu_read(hardirq_context)) { 101 ltl_atom_pulse(task, LTL_WOKEN_BY_HARDIRQ, true); 102 } else if (in_task()) { 103 if (current->prio <= task->prio) 104 ltl_atom_pulse(task, LTL_WOKEN_BY_EQUAL_OR_HIGHER_PRIO, true); 105 } else if (in_nmi()) { 106 ltl_atom_pulse(task, LTL_WOKEN_BY_NMI, true); 107 } 108 } 109 110 static void handle_contention_begin(void *data, void *lock, unsigned int flags) 111 { 112 if (flags & LCB_F_RT) 113 ltl_atom_update(current, LTL_BLOCK_ON_RT_MUTEX, true); 114 } 115 116 static void handle_contention_end(void *data, void *lock, int ret) 117 { 118 ltl_atom_update(current, LTL_BLOCK_ON_RT_MUTEX, false); 119 } 120 121 static void handle_sys_enter(void *data, struct pt_regs *regs, long id) 122 { 123 struct ltl_monitor *mon; 124 unsigned long args[6]; 125 int op, cmd; 126 127 mon = ltl_get_monitor(current); 128 129 switch (id) { 130 #ifdef __NR_clock_nanosleep 131 case __NR_clock_nanosleep: 132 #endif 133 #ifdef __NR_clock_nanosleep_time64 134 case __NR_clock_nanosleep_time64: 135 #endif 136 syscall_get_arguments(current, regs, args); 137 ltl_atom_set(mon, LTL_NANOSLEEP_CLOCK_MONOTONIC, args[0] == CLOCK_MONOTONIC); 138 ltl_atom_set(mon, LTL_NANOSLEEP_CLOCK_TAI, args[0] == CLOCK_TAI); 139 ltl_atom_set(mon, LTL_NANOSLEEP_TIMER_ABSTIME, args[1] == TIMER_ABSTIME); 140 ltl_atom_update(current, LTL_CLOCK_NANOSLEEP, true); 141 break; 142 143 #ifdef __NR_futex 144 case __NR_futex: 145 #endif 146 #ifdef __NR_futex_time64 147 case __NR_futex_time64: 148 #endif 149 syscall_get_arguments(current, regs, args); 150 op = args[1]; 151 cmd = op & FUTEX_CMD_MASK; 152 153 switch (cmd) { 154 case FUTEX_LOCK_PI: 155 case FUTEX_LOCK_PI2: 156 ltl_atom_update(current, LTL_FUTEX_LOCK_PI, true); 157 break; 158 case FUTEX_WAIT: 159 case FUTEX_WAIT_BITSET: 160 case FUTEX_WAIT_REQUEUE_PI: 161 ltl_atom_update(current, LTL_FUTEX_WAIT, true); 162 break; 163 } 164 break; 165 } 166 } 167 168 static void handle_sys_exit(void *data, struct pt_regs *regs, long ret) 169 { 170 struct ltl_monitor *mon = ltl_get_monitor(current); 171 172 ltl_atom_set(mon, LTL_FUTEX_LOCK_PI, false); 173 ltl_atom_set(mon, LTL_FUTEX_WAIT, false); 174 ltl_atom_set(mon, LTL_NANOSLEEP_CLOCK_MONOTONIC, false); 175 ltl_atom_set(mon, LTL_NANOSLEEP_CLOCK_TAI, false); 176 ltl_atom_set(mon, LTL_NANOSLEEP_TIMER_ABSTIME, false); 177 ltl_atom_update(current, LTL_CLOCK_NANOSLEEP, false); 178 } 179 180 static void handle_kthread_stop(void *data, struct task_struct *task) 181 { 182 /* FIXME: this could race with other tracepoint handlers */ 183 ltl_atom_update(task, LTL_KTHREAD_SHOULD_STOP, true); 184 } 185 186 static int enable_sleep(void) 187 { 188 int retval; 189 190 retval = ltl_monitor_init(); 191 if (retval) 192 return retval; 193 194 rv_attach_trace_probe("rtapp_sleep", sched_waking, handle_sched_waking); 195 rv_attach_trace_probe("rtapp_sleep", sched_wakeup, handle_sched_wakeup); 196 rv_attach_trace_probe("rtapp_sleep", sched_set_state_tp, handle_sched_set_state); 197 rv_attach_trace_probe("rtapp_sleep", contention_begin, handle_contention_begin); 198 rv_attach_trace_probe("rtapp_sleep", contention_end, handle_contention_end); 199 rv_attach_trace_probe("rtapp_sleep", sched_kthread_stop, handle_kthread_stop); 200 rv_attach_trace_probe("rtapp_sleep", sys_enter, handle_sys_enter); 201 rv_attach_trace_probe("rtapp_sleep", sys_exit, handle_sys_exit); 202 return 0; 203 } 204 205 static void disable_sleep(void) 206 { 207 rv_detach_trace_probe("rtapp_sleep", sched_waking, handle_sched_waking); 208 rv_detach_trace_probe("rtapp_sleep", sched_wakeup, handle_sched_wakeup); 209 rv_detach_trace_probe("rtapp_sleep", sched_set_state_tp, handle_sched_set_state); 210 rv_detach_trace_probe("rtapp_sleep", contention_begin, handle_contention_begin); 211 rv_detach_trace_probe("rtapp_sleep", contention_end, handle_contention_end); 212 rv_detach_trace_probe("rtapp_sleep", sched_kthread_stop, handle_kthread_stop); 213 rv_detach_trace_probe("rtapp_sleep", sys_enter, handle_sys_enter); 214 rv_detach_trace_probe("rtapp_sleep", sys_exit, handle_sys_exit); 215 216 ltl_monitor_destroy(); 217 } 218 219 static struct rv_monitor rv_sleep = { 220 .name = "sleep", 221 .description = "Monitor that RT tasks do not undesirably sleep", 222 .enable = enable_sleep, 223 .disable = disable_sleep, 224 }; 225 226 static int __init register_sleep(void) 227 { 228 return rv_register_monitor(&rv_sleep, &rv_rtapp); 229 } 230 231 static void __exit unregister_sleep(void) 232 { 233 rv_unregister_monitor(&rv_sleep); 234 } 235 236 module_init(register_sleep); 237 module_exit(unregister_sleep); 238 239 MODULE_LICENSE("GPL"); 240 MODULE_AUTHOR("Nam Cao <namcao@linutronix.de>"); 241 MODULE_DESCRIPTION("sleep: Monitor that RT tasks do not undesirably sleep"); 242