1 /* SPDX-License-Identifier: GPL-2.0 */
2 /**
3 * This file must be combined with the $(MODEL_NAME).h file generated by
4 * tools/verification/rvgen.
5 */
6
7 #include <linux/args.h>
8 #include <linux/rv.h>
9 #include <linux/stringify.h>
10 #include <linux/seq_buf.h>
11 #include <rv/instrumentation.h>
12 #include <trace/events/task.h>
13 #include <trace/events/sched.h>
14
15 #ifndef MONITOR_NAME
16 #error "Please include $(MODEL_NAME).h generated by rvgen"
17 #endif
18
19 #ifdef CONFIG_RV_REACTORS
20 #define RV_MONITOR_NAME CONCATENATE(rv_, MONITOR_NAME)
21 static struct rv_monitor RV_MONITOR_NAME;
22
rv_cond_react(struct task_struct * task)23 static void rv_cond_react(struct task_struct *task)
24 {
25 if (!rv_reacting_on() || !RV_MONITOR_NAME.react)
26 return;
27 RV_MONITOR_NAME.react("rv: "__stringify(MONITOR_NAME)": %s[%d]: violation detected\n",
28 task->comm, task->pid);
29 }
30 #else
rv_cond_react(struct task_struct * task)31 static void rv_cond_react(struct task_struct *task)
32 {
33 }
34 #endif
35
36 static int ltl_monitor_slot = RV_PER_TASK_MONITOR_INIT;
37
38 static void ltl_atoms_fetch(struct task_struct *task, struct ltl_monitor *mon);
39 static void ltl_atoms_init(struct task_struct *task, struct ltl_monitor *mon, bool task_creation);
40
ltl_get_monitor(struct task_struct * task)41 static struct ltl_monitor *ltl_get_monitor(struct task_struct *task)
42 {
43 return &task->rv[ltl_monitor_slot].ltl_mon;
44 }
45
ltl_task_init(struct task_struct * task,bool task_creation)46 static void ltl_task_init(struct task_struct *task, bool task_creation)
47 {
48 struct ltl_monitor *mon = ltl_get_monitor(task);
49
50 memset(&mon->states, 0, sizeof(mon->states));
51
52 for (int i = 0; i < LTL_NUM_ATOM; ++i)
53 __set_bit(i, mon->unknown_atoms);
54
55 ltl_atoms_init(task, mon, task_creation);
56 ltl_atoms_fetch(task, mon);
57 }
58
handle_task_newtask(void * data,struct task_struct * task,unsigned long flags)59 static void handle_task_newtask(void *data, struct task_struct *task, unsigned long flags)
60 {
61 ltl_task_init(task, true);
62 }
63
ltl_monitor_init(void)64 static int ltl_monitor_init(void)
65 {
66 struct task_struct *g, *p;
67 int ret, cpu;
68
69 ret = rv_get_task_monitor_slot();
70 if (ret < 0)
71 return ret;
72
73 ltl_monitor_slot = ret;
74
75 rv_attach_trace_probe(name, task_newtask, handle_task_newtask);
76
77 read_lock(&tasklist_lock);
78
79 for_each_process_thread(g, p)
80 ltl_task_init(p, false);
81
82 for_each_present_cpu(cpu)
83 ltl_task_init(idle_task(cpu), false);
84
85 read_unlock(&tasklist_lock);
86
87 return 0;
88 }
89
ltl_monitor_destroy(void)90 static void ltl_monitor_destroy(void)
91 {
92 rv_detach_trace_probe(name, task_newtask, handle_task_newtask);
93
94 rv_put_task_monitor_slot(ltl_monitor_slot);
95 ltl_monitor_slot = RV_PER_TASK_MONITOR_INIT;
96 }
97
ltl_illegal_state(struct task_struct * task,struct ltl_monitor * mon)98 static void ltl_illegal_state(struct task_struct *task, struct ltl_monitor *mon)
99 {
100 CONCATENATE(trace_error_, MONITOR_NAME)(task);
101 rv_cond_react(task);
102 }
103
ltl_attempt_start(struct task_struct * task,struct ltl_monitor * mon)104 static void ltl_attempt_start(struct task_struct *task, struct ltl_monitor *mon)
105 {
106 if (rv_ltl_all_atoms_known(mon))
107 ltl_start(task, mon);
108 }
109
ltl_atom_set(struct ltl_monitor * mon,enum ltl_atom atom,bool value)110 static inline void ltl_atom_set(struct ltl_monitor *mon, enum ltl_atom atom, bool value)
111 {
112 __clear_bit(atom, mon->unknown_atoms);
113 if (value)
114 __set_bit(atom, mon->atoms);
115 else
116 __clear_bit(atom, mon->atoms);
117 }
118
119 static void
ltl_trace_event(struct task_struct * task,struct ltl_monitor * mon,unsigned long * next_state)120 ltl_trace_event(struct task_struct *task, struct ltl_monitor *mon, unsigned long *next_state)
121 {
122 const char *format_str = "%s";
123 DECLARE_SEQ_BUF(atoms, 64);
124 char states[32], next[32];
125 int i;
126
127 if (!CONCATENATE(CONCATENATE(trace_event_, MONITOR_NAME), _enabled)())
128 return;
129
130 snprintf(states, sizeof(states), "%*pbl", RV_MAX_BA_STATES, mon->states);
131 snprintf(next, sizeof(next), "%*pbl", RV_MAX_BA_STATES, next_state);
132
133 for (i = 0; i < LTL_NUM_ATOM; ++i) {
134 if (test_bit(i, mon->atoms)) {
135 seq_buf_printf(&atoms, format_str, ltl_atom_str(i));
136 format_str = ",%s";
137 }
138 }
139
140 CONCATENATE(trace_event_, MONITOR_NAME)(task, states, atoms.buffer, next);
141 }
142
ltl_validate(struct task_struct * task,struct ltl_monitor * mon)143 static void ltl_validate(struct task_struct *task, struct ltl_monitor *mon)
144 {
145 DECLARE_BITMAP(next_states, RV_MAX_BA_STATES) = {0};
146
147 if (!rv_ltl_valid_state(mon))
148 return;
149
150 for (unsigned int i = 0; i < RV_NUM_BA_STATES; ++i) {
151 if (test_bit(i, mon->states))
152 ltl_possible_next_states(mon, i, next_states);
153 }
154
155 ltl_trace_event(task, mon, next_states);
156
157 memcpy(mon->states, next_states, sizeof(next_states));
158
159 if (!rv_ltl_valid_state(mon))
160 ltl_illegal_state(task, mon);
161 }
162
ltl_atom_update(struct task_struct * task,enum ltl_atom atom,bool value)163 static void ltl_atom_update(struct task_struct *task, enum ltl_atom atom, bool value)
164 {
165 struct ltl_monitor *mon = ltl_get_monitor(task);
166
167 ltl_atom_set(mon, atom, value);
168 ltl_atoms_fetch(task, mon);
169
170 if (!rv_ltl_valid_state(mon)) {
171 ltl_attempt_start(task, mon);
172 return;
173 }
174
175 ltl_validate(task, mon);
176 }
177
ltl_atom_pulse(struct task_struct * task,enum ltl_atom atom,bool value)178 static void __maybe_unused ltl_atom_pulse(struct task_struct *task, enum ltl_atom atom, bool value)
179 {
180 struct ltl_monitor *mon = ltl_get_monitor(task);
181
182 ltl_atom_update(task, atom, value);
183
184 ltl_atom_set(mon, atom, !value);
185 ltl_validate(task, mon);
186 }
187