1*a9769a5bSNam Cao /* SPDX-License-Identifier: GPL-2.0 */ 2*a9769a5bSNam Cao /** 3*a9769a5bSNam Cao * This file must be combined with the $(MODEL_NAME).h file generated by 4*a9769a5bSNam Cao * tools/verification/rvgen. 5*a9769a5bSNam Cao */ 6*a9769a5bSNam Cao 7*a9769a5bSNam Cao #include <linux/args.h> 8*a9769a5bSNam Cao #include <linux/rv.h> 9*a9769a5bSNam Cao #include <linux/stringify.h> 10*a9769a5bSNam Cao #include <linux/seq_buf.h> 11*a9769a5bSNam Cao #include <rv/instrumentation.h> 12*a9769a5bSNam Cao #include <trace/events/task.h> 13*a9769a5bSNam Cao #include <trace/events/sched.h> 14*a9769a5bSNam Cao 15*a9769a5bSNam Cao #ifndef MONITOR_NAME 16*a9769a5bSNam Cao #error "Please include $(MODEL_NAME).h generated by rvgen" 17*a9769a5bSNam Cao #endif 18*a9769a5bSNam Cao 19*a9769a5bSNam Cao #ifdef CONFIG_RV_REACTORS 20*a9769a5bSNam Cao #define RV_MONITOR_NAME CONCATENATE(rv_, MONITOR_NAME) 21*a9769a5bSNam Cao static struct rv_monitor RV_MONITOR_NAME; 22*a9769a5bSNam Cao 23*a9769a5bSNam Cao static void rv_cond_react(struct task_struct *task) 24*a9769a5bSNam Cao { 25*a9769a5bSNam Cao if (!rv_reacting_on() || !RV_MONITOR_NAME.react) 26*a9769a5bSNam Cao return; 27*a9769a5bSNam Cao RV_MONITOR_NAME.react("rv: "__stringify(MONITOR_NAME)": %s[%d]: violation detected\n", 28*a9769a5bSNam Cao task->comm, task->pid); 29*a9769a5bSNam Cao } 30*a9769a5bSNam Cao #else 31*a9769a5bSNam Cao static void rv_cond_react(struct task_struct *task) 32*a9769a5bSNam Cao { 33*a9769a5bSNam Cao } 34*a9769a5bSNam Cao #endif 35*a9769a5bSNam Cao 36*a9769a5bSNam Cao static int ltl_monitor_slot = RV_PER_TASK_MONITOR_INIT; 37*a9769a5bSNam Cao 38*a9769a5bSNam Cao static void ltl_atoms_fetch(struct task_struct *task, struct ltl_monitor *mon); 39*a9769a5bSNam Cao static void ltl_atoms_init(struct task_struct *task, struct ltl_monitor *mon, bool task_creation); 40*a9769a5bSNam Cao 41*a9769a5bSNam Cao static struct ltl_monitor *ltl_get_monitor(struct task_struct *task) 42*a9769a5bSNam Cao { 43*a9769a5bSNam Cao return &task->rv[ltl_monitor_slot].ltl_mon; 44*a9769a5bSNam Cao } 45*a9769a5bSNam Cao 46*a9769a5bSNam Cao static void ltl_task_init(struct task_struct *task, bool task_creation) 47*a9769a5bSNam Cao { 48*a9769a5bSNam Cao struct ltl_monitor *mon = ltl_get_monitor(task); 49*a9769a5bSNam Cao 50*a9769a5bSNam Cao memset(&mon->states, 0, sizeof(mon->states)); 51*a9769a5bSNam Cao 52*a9769a5bSNam Cao for (int i = 0; i < LTL_NUM_ATOM; ++i) 53*a9769a5bSNam Cao __set_bit(i, mon->unknown_atoms); 54*a9769a5bSNam Cao 55*a9769a5bSNam Cao ltl_atoms_init(task, mon, task_creation); 56*a9769a5bSNam Cao ltl_atoms_fetch(task, mon); 57*a9769a5bSNam Cao } 58*a9769a5bSNam Cao 59*a9769a5bSNam Cao static void handle_task_newtask(void *data, struct task_struct *task, unsigned long flags) 60*a9769a5bSNam Cao { 61*a9769a5bSNam Cao ltl_task_init(task, true); 62*a9769a5bSNam Cao } 63*a9769a5bSNam Cao 64*a9769a5bSNam Cao static int ltl_monitor_init(void) 65*a9769a5bSNam Cao { 66*a9769a5bSNam Cao struct task_struct *g, *p; 67*a9769a5bSNam Cao int ret, cpu; 68*a9769a5bSNam Cao 69*a9769a5bSNam Cao ret = rv_get_task_monitor_slot(); 70*a9769a5bSNam Cao if (ret < 0) 71*a9769a5bSNam Cao return ret; 72*a9769a5bSNam Cao 73*a9769a5bSNam Cao ltl_monitor_slot = ret; 74*a9769a5bSNam Cao 75*a9769a5bSNam Cao rv_attach_trace_probe(name, task_newtask, handle_task_newtask); 76*a9769a5bSNam Cao 77*a9769a5bSNam Cao read_lock(&tasklist_lock); 78*a9769a5bSNam Cao 79*a9769a5bSNam Cao for_each_process_thread(g, p) 80*a9769a5bSNam Cao ltl_task_init(p, false); 81*a9769a5bSNam Cao 82*a9769a5bSNam Cao for_each_present_cpu(cpu) 83*a9769a5bSNam Cao ltl_task_init(idle_task(cpu), false); 84*a9769a5bSNam Cao 85*a9769a5bSNam Cao read_unlock(&tasklist_lock); 86*a9769a5bSNam Cao 87*a9769a5bSNam Cao return 0; 88*a9769a5bSNam Cao } 89*a9769a5bSNam Cao 90*a9769a5bSNam Cao static void ltl_monitor_destroy(void) 91*a9769a5bSNam Cao { 92*a9769a5bSNam Cao rv_detach_trace_probe(name, task_newtask, handle_task_newtask); 93*a9769a5bSNam Cao 94*a9769a5bSNam Cao rv_put_task_monitor_slot(ltl_monitor_slot); 95*a9769a5bSNam Cao ltl_monitor_slot = RV_PER_TASK_MONITOR_INIT; 96*a9769a5bSNam Cao } 97*a9769a5bSNam Cao 98*a9769a5bSNam Cao static void ltl_illegal_state(struct task_struct *task, struct ltl_monitor *mon) 99*a9769a5bSNam Cao { 100*a9769a5bSNam Cao CONCATENATE(trace_error_, MONITOR_NAME)(task); 101*a9769a5bSNam Cao rv_cond_react(task); 102*a9769a5bSNam Cao } 103*a9769a5bSNam Cao 104*a9769a5bSNam Cao static void ltl_attempt_start(struct task_struct *task, struct ltl_monitor *mon) 105*a9769a5bSNam Cao { 106*a9769a5bSNam Cao if (rv_ltl_all_atoms_known(mon)) 107*a9769a5bSNam Cao ltl_start(task, mon); 108*a9769a5bSNam Cao } 109*a9769a5bSNam Cao 110*a9769a5bSNam Cao static inline void ltl_atom_set(struct ltl_monitor *mon, enum ltl_atom atom, bool value) 111*a9769a5bSNam Cao { 112*a9769a5bSNam Cao __clear_bit(atom, mon->unknown_atoms); 113*a9769a5bSNam Cao if (value) 114*a9769a5bSNam Cao __set_bit(atom, mon->atoms); 115*a9769a5bSNam Cao else 116*a9769a5bSNam Cao __clear_bit(atom, mon->atoms); 117*a9769a5bSNam Cao } 118*a9769a5bSNam Cao 119*a9769a5bSNam Cao static void 120*a9769a5bSNam Cao ltl_trace_event(struct task_struct *task, struct ltl_monitor *mon, unsigned long *next_state) 121*a9769a5bSNam Cao { 122*a9769a5bSNam Cao const char *format_str = "%s"; 123*a9769a5bSNam Cao DECLARE_SEQ_BUF(atoms, 64); 124*a9769a5bSNam Cao char states[32], next[32]; 125*a9769a5bSNam Cao int i; 126*a9769a5bSNam Cao 127*a9769a5bSNam Cao if (!CONCATENATE(CONCATENATE(trace_event_, MONITOR_NAME), _enabled)()) 128*a9769a5bSNam Cao return; 129*a9769a5bSNam Cao 130*a9769a5bSNam Cao snprintf(states, sizeof(states), "%*pbl", RV_MAX_BA_STATES, mon->states); 131*a9769a5bSNam Cao snprintf(next, sizeof(next), "%*pbl", RV_MAX_BA_STATES, next_state); 132*a9769a5bSNam Cao 133*a9769a5bSNam Cao for (i = 0; i < LTL_NUM_ATOM; ++i) { 134*a9769a5bSNam Cao if (test_bit(i, mon->atoms)) { 135*a9769a5bSNam Cao seq_buf_printf(&atoms, format_str, ltl_atom_str(i)); 136*a9769a5bSNam Cao format_str = ",%s"; 137*a9769a5bSNam Cao } 138*a9769a5bSNam Cao } 139*a9769a5bSNam Cao 140*a9769a5bSNam Cao CONCATENATE(trace_event_, MONITOR_NAME)(task, states, atoms.buffer, next); 141*a9769a5bSNam Cao } 142*a9769a5bSNam Cao 143*a9769a5bSNam Cao static void ltl_validate(struct task_struct *task, struct ltl_monitor *mon) 144*a9769a5bSNam Cao { 145*a9769a5bSNam Cao DECLARE_BITMAP(next_states, RV_MAX_BA_STATES) = {0}; 146*a9769a5bSNam Cao 147*a9769a5bSNam Cao if (!rv_ltl_valid_state(mon)) 148*a9769a5bSNam Cao return; 149*a9769a5bSNam Cao 150*a9769a5bSNam Cao for (unsigned int i = 0; i < RV_NUM_BA_STATES; ++i) { 151*a9769a5bSNam Cao if (test_bit(i, mon->states)) 152*a9769a5bSNam Cao ltl_possible_next_states(mon, i, next_states); 153*a9769a5bSNam Cao } 154*a9769a5bSNam Cao 155*a9769a5bSNam Cao ltl_trace_event(task, mon, next_states); 156*a9769a5bSNam Cao 157*a9769a5bSNam Cao memcpy(mon->states, next_states, sizeof(next_states)); 158*a9769a5bSNam Cao 159*a9769a5bSNam Cao if (!rv_ltl_valid_state(mon)) 160*a9769a5bSNam Cao ltl_illegal_state(task, mon); 161*a9769a5bSNam Cao } 162*a9769a5bSNam Cao 163*a9769a5bSNam Cao static void ltl_atom_update(struct task_struct *task, enum ltl_atom atom, bool value) 164*a9769a5bSNam Cao { 165*a9769a5bSNam Cao struct ltl_monitor *mon = ltl_get_monitor(task); 166*a9769a5bSNam Cao 167*a9769a5bSNam Cao ltl_atom_set(mon, atom, value); 168*a9769a5bSNam Cao ltl_atoms_fetch(task, mon); 169*a9769a5bSNam Cao 170*a9769a5bSNam Cao if (!rv_ltl_valid_state(mon)) 171*a9769a5bSNam Cao ltl_attempt_start(task, mon); 172*a9769a5bSNam Cao 173*a9769a5bSNam Cao ltl_validate(task, mon); 174*a9769a5bSNam Cao } 175*a9769a5bSNam Cao 176*a9769a5bSNam Cao static void __maybe_unused ltl_atom_pulse(struct task_struct *task, enum ltl_atom atom, bool value) 177*a9769a5bSNam Cao { 178*a9769a5bSNam Cao struct ltl_monitor *mon = ltl_get_monitor(task); 179*a9769a5bSNam Cao 180*a9769a5bSNam Cao ltl_atom_update(task, atom, value); 181*a9769a5bSNam Cao 182*a9769a5bSNam Cao ltl_atom_set(mon, atom, !value); 183*a9769a5bSNam Cao ltl_validate(task, mon); 184*a9769a5bSNam Cao } 185