xref: /linux/include/rv/ltl_monitor.h (revision 0b1b4a3d8ebec3c42231c306d4b9a5153d047674)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /**
3  * This file must be combined with the $(MODEL_NAME).h file generated by
4  * tools/verification/rvgen.
5  */
6 
7 #include <linux/args.h>
8 #include <linux/rv.h>
9 #include <linux/stringify.h>
10 #include <linux/seq_buf.h>
11 #include <rv/instrumentation.h>
12 #include <trace/events/task.h>
13 #include <trace/events/sched.h>
14 
15 #ifndef MONITOR_NAME
16 #error "Please include $(MODEL_NAME).h generated by rvgen"
17 #endif
18 
19 #define RV_MONITOR_NAME CONCATENATE(rv_, MONITOR_NAME)
20 static struct rv_monitor RV_MONITOR_NAME;
21 
22 static int ltl_monitor_slot = RV_PER_TASK_MONITOR_INIT;
23 
24 static void ltl_atoms_fetch(struct task_struct *task, struct ltl_monitor *mon);
25 static void ltl_atoms_init(struct task_struct *task, struct ltl_monitor *mon, bool task_creation);
26 
ltl_get_monitor(struct task_struct * task)27 static struct ltl_monitor *ltl_get_monitor(struct task_struct *task)
28 {
29 	return &task->rv[ltl_monitor_slot].ltl_mon;
30 }
31 
ltl_task_init(struct task_struct * task,bool task_creation)32 static void ltl_task_init(struct task_struct *task, bool task_creation)
33 {
34 	struct ltl_monitor *mon = ltl_get_monitor(task);
35 
36 	memset(&mon->states, 0, sizeof(mon->states));
37 
38 	for (int i = 0; i < LTL_NUM_ATOM; ++i)
39 		__set_bit(i, mon->unknown_atoms);
40 
41 	ltl_atoms_init(task, mon, task_creation);
42 	ltl_atoms_fetch(task, mon);
43 }
44 
handle_task_newtask(void * data,struct task_struct * task,u64 flags)45 static void handle_task_newtask(void *data, struct task_struct *task, u64 flags)
46 {
47 	ltl_task_init(task, true);
48 }
49 
ltl_monitor_init(void)50 static int ltl_monitor_init(void)
51 {
52 	struct task_struct *g, *p;
53 	int ret, cpu;
54 
55 	ret = rv_get_task_monitor_slot();
56 	if (ret < 0)
57 		return ret;
58 
59 	ltl_monitor_slot = ret;
60 
61 	rv_attach_trace_probe(name, task_newtask, handle_task_newtask);
62 
63 	read_lock(&tasklist_lock);
64 
65 	for_each_process_thread(g, p)
66 		ltl_task_init(p, false);
67 
68 	for_each_present_cpu(cpu)
69 		ltl_task_init(idle_task(cpu), false);
70 
71 	read_unlock(&tasklist_lock);
72 
73 	return 0;
74 }
75 
ltl_monitor_destroy(void)76 static void ltl_monitor_destroy(void)
77 {
78 	rv_detach_trace_probe(name, task_newtask, handle_task_newtask);
79 
80 	rv_put_task_monitor_slot(ltl_monitor_slot);
81 	ltl_monitor_slot = RV_PER_TASK_MONITOR_INIT;
82 }
83 
ltl_illegal_state(struct task_struct * task,struct ltl_monitor * mon)84 static void ltl_illegal_state(struct task_struct *task, struct ltl_monitor *mon)
85 {
86 	CONCATENATE(trace_error_, MONITOR_NAME)(task);
87 	rv_react(&RV_MONITOR_NAME, "rv: "__stringify(MONITOR_NAME)": %s[%d]: violation detected\n",
88 		 task->comm, task->pid);
89 }
90 
ltl_attempt_start(struct task_struct * task,struct ltl_monitor * mon)91 static void ltl_attempt_start(struct task_struct *task, struct ltl_monitor *mon)
92 {
93 	if (rv_ltl_all_atoms_known(mon))
94 		ltl_start(task, mon);
95 }
96 
ltl_atom_set(struct ltl_monitor * mon,enum ltl_atom atom,bool value)97 static inline void ltl_atom_set(struct ltl_monitor *mon, enum ltl_atom atom, bool value)
98 {
99 	__clear_bit(atom, mon->unknown_atoms);
100 	if (value)
101 		__set_bit(atom, mon->atoms);
102 	else
103 		__clear_bit(atom, mon->atoms);
104 }
105 
106 static void
ltl_trace_event(struct task_struct * task,struct ltl_monitor * mon,unsigned long * next_state)107 ltl_trace_event(struct task_struct *task, struct ltl_monitor *mon, unsigned long *next_state)
108 {
109 	const char *format_str = "%s";
110 	DECLARE_SEQ_BUF(atoms, 64);
111 	char states[32], next[32];
112 	int i;
113 
114 	if (!CONCATENATE(CONCATENATE(trace_event_, MONITOR_NAME), _enabled)())
115 		return;
116 
117 	snprintf(states, sizeof(states), "%*pbl", RV_MAX_BA_STATES, mon->states);
118 	snprintf(next, sizeof(next), "%*pbl", RV_MAX_BA_STATES, next_state);
119 
120 	for (i = 0; i < LTL_NUM_ATOM; ++i) {
121 		if (test_bit(i, mon->atoms)) {
122 			seq_buf_printf(&atoms, format_str, ltl_atom_str(i));
123 			format_str = ",%s";
124 		}
125 	}
126 
127 	CONCATENATE(trace_event_, MONITOR_NAME)(task, states, atoms.buffer, next);
128 }
129 
ltl_validate(struct task_struct * task,struct ltl_monitor * mon)130 static void ltl_validate(struct task_struct *task, struct ltl_monitor *mon)
131 {
132 	DECLARE_BITMAP(next_states, RV_MAX_BA_STATES) = {0};
133 
134 	if (!rv_ltl_valid_state(mon))
135 		return;
136 
137 	for (unsigned int i = 0; i < RV_NUM_BA_STATES; ++i) {
138 		if (test_bit(i, mon->states))
139 			ltl_possible_next_states(mon, i, next_states);
140 	}
141 
142 	ltl_trace_event(task, mon, next_states);
143 
144 	memcpy(mon->states, next_states, sizeof(next_states));
145 
146 	if (!rv_ltl_valid_state(mon))
147 		ltl_illegal_state(task, mon);
148 }
149 
ltl_atom_update(struct task_struct * task,enum ltl_atom atom,bool value)150 static void ltl_atom_update(struct task_struct *task, enum ltl_atom atom, bool value)
151 {
152 	struct ltl_monitor *mon = ltl_get_monitor(task);
153 
154 	ltl_atom_set(mon, atom, value);
155 	ltl_atoms_fetch(task, mon);
156 
157 	if (!rv_ltl_valid_state(mon)) {
158 		ltl_attempt_start(task, mon);
159 		return;
160 	}
161 
162 	ltl_validate(task, mon);
163 }
164 
ltl_atom_pulse(struct task_struct * task,enum ltl_atom atom,bool value)165 static void __maybe_unused ltl_atom_pulse(struct task_struct *task, enum ltl_atom atom, bool value)
166 {
167 	struct ltl_monitor *mon = ltl_get_monitor(task);
168 
169 	ltl_atom_update(task, atom, value);
170 
171 	ltl_atom_set(mon, atom, !value);
172 	ltl_validate(task, mon);
173 }
174