1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2019-2022 Red Hat, Inc. Daniel Bristot de Oliveira <bristot@kernel.org>
4 *
5 * This is the online Runtime Verification (RV) interface.
6 *
7 * RV is a lightweight (yet rigorous) method that complements classical
8 * exhaustive verification techniques (such as model checking and
9 * theorem proving) with a more practical approach to complex systems.
10 *
11 * RV works by analyzing the trace of the system's actual execution,
12 * comparing it against a formal specification of the system behavior.
13 * RV can give precise information on the runtime behavior of the
14 * monitored system while enabling the reaction for unexpected
15 * events, avoiding, for example, the propagation of a failure on
16 * safety-critical systems.
17 *
18 * The development of this interface roots in the development of the
19 * paper:
20 *
21 * De Oliveira, Daniel Bristot; Cucinotta, Tommaso; De Oliveira, Romulo
22 * Silva. Efficient formal verification for the Linux kernel. In:
23 * International Conference on Software Engineering and Formal Methods.
24 * Springer, Cham, 2019. p. 315-332.
25 *
26 * And:
27 *
28 * De Oliveira, Daniel Bristot, et al. Automata-based formal analysis
29 * and verification of the real-time Linux kernel. PhD Thesis, 2020.
30 *
31 * == Runtime monitor interface ==
32 *
33 * A monitor is the central part of the runtime verification of a system.
34 *
35 * The monitor stands in between the formal specification of the desired
36 * (or undesired) behavior, and the trace of the actual system.
37 *
38 * In Linux terms, the runtime verification monitors are encapsulated
39 * inside the "RV monitor" abstraction. A RV monitor includes a reference
40 * model of the system, a set of instances of the monitor (per-cpu monitor,
41 * per-task monitor, and so on), and the helper functions that glue the
42 * monitor to the system via trace. Generally, a monitor includes some form
43 * of trace output as a reaction for event parsing and exceptions,
44 * as depicted below:
45 *
46 * Linux +----- RV Monitor ----------------------------------+ Formal
47 * Realm | | Realm
48 * +-------------------+ +----------------+ +-----------------+
49 * | Linux kernel | | Monitor | | Reference |
50 * | Tracing | -> | Instance(s) | <- | Model |
51 * | (instrumentation) | | (verification) | | (specification) |
52 * +-------------------+ +----------------+ +-----------------+
53 * | | |
54 * | V |
55 * | +----------+ |
56 * | | Reaction | |
57 * | +--+--+--+-+ |
58 * | | | | |
59 * | | | +-> trace output ? |
60 * +------------------------|--|----------------------+
61 * | +----> panic ?
62 * +-------> <user-specified>
63 *
64 * This file implements the interface for loading RV monitors, and
65 * to control the verification session.
66 *
67 * == Registering monitors ==
68 *
69 * The struct rv_monitor defines a set of callback functions to control
70 * a verification session. For instance, when a given monitor is enabled,
71 * the "enable" callback function is called to hook the instrumentation
72 * functions to the kernel trace events. The "disable" function is called
73 * when disabling the verification session.
74 *
75 * A RV monitor is registered via:
76 * int rv_register_monitor(struct rv_monitor *monitor);
77 * And unregistered via:
78 * int rv_unregister_monitor(struct rv_monitor *monitor);
79 *
80 * == User interface ==
81 *
82 * The user interface resembles kernel tracing interface. It presents
83 * these files:
84 *
85 * "available_monitors"
86 * - List the available monitors, one per line.
87 *
88 * For example:
89 * # cat available_monitors
90 * wip
91 * wwnr
92 *
93 * "enabled_monitors"
94 * - Lists the enabled monitors, one per line;
95 * - Writing to it enables a given monitor;
96 * - Writing a monitor name with a '!' prefix disables it;
97 * - Truncating the file disables all enabled monitors.
98 *
99 * For example:
100 * # cat enabled_monitors
101 * # echo wip > enabled_monitors
102 * # echo wwnr >> enabled_monitors
103 * # cat enabled_monitors
104 * wip
105 * wwnr
106 * # echo '!wip' >> enabled_monitors
107 * # cat enabled_monitors
108 * wwnr
109 * # echo > enabled_monitors
110 * # cat enabled_monitors
111 * #
112 *
113 * Note that more than one monitor can be enabled concurrently.
114 *
115 * "monitoring_on"
116 * - It is an on/off general switcher for monitoring. Note
117 * that it does not disable enabled monitors or detach events,
118 * but stops the per-entity monitors from monitoring the events
119 * received from the instrumentation. It resembles the "tracing_on"
120 * switcher.
121 *
122 * "monitors/"
123 * Each monitor will have its own directory inside "monitors/". There
124 * the monitor specific files will be presented.
125 * The "monitors/" directory resembles the "events" directory on
126 * tracefs.
127 *
128 * For example:
129 * # cd monitors/wip/
130 * # ls
131 * desc enable
132 * # cat desc
133 * auto-generated wakeup in preemptive monitor.
134 * # cat enable
135 * 0
136 *
137 * For further information, see:
138 * Documentation/trace/rv/runtime-verification.rst
139 */
140
141 #include <linux/kernel.h>
142 #include <linux/module.h>
143 #include <linux/init.h>
144 #include <linux/slab.h>
145
146 #ifdef CONFIG_DA_MON_EVENTS
147 #define CREATE_TRACE_POINTS
148 #include <rv_trace.h>
149 #endif
150
151 #include "rv.h"
152
153 DEFINE_MUTEX(rv_interface_lock);
154
155 static struct rv_interface rv_root;
156
get_monitors_root(void)157 struct dentry *get_monitors_root(void)
158 {
159 return rv_root.monitors_dir;
160 }
161
162 /*
163 * Interface for the monitor register.
164 */
165 LIST_HEAD(rv_monitors_list);
166
167 static int task_monitor_count;
168 static bool task_monitor_slots[RV_PER_TASK_MONITORS];
169
rv_get_task_monitor_slot(void)170 int rv_get_task_monitor_slot(void)
171 {
172 int i;
173
174 lockdep_assert_held(&rv_interface_lock);
175
176 if (task_monitor_count == RV_PER_TASK_MONITORS)
177 return -EBUSY;
178
179 task_monitor_count++;
180
181 for (i = 0; i < RV_PER_TASK_MONITORS; i++) {
182 if (task_monitor_slots[i] == false) {
183 task_monitor_slots[i] = true;
184 return i;
185 }
186 }
187
188 WARN_ONCE(1, "RV task_monitor_count and slots are out of sync\n");
189
190 return -EINVAL;
191 }
192
rv_put_task_monitor_slot(int slot)193 void rv_put_task_monitor_slot(int slot)
194 {
195 lockdep_assert_held(&rv_interface_lock);
196
197 if (slot < 0 || slot >= RV_PER_TASK_MONITORS) {
198 WARN_ONCE(1, "RV releasing an invalid slot!: %d\n", slot);
199 return;
200 }
201
202 WARN_ONCE(!task_monitor_slots[slot], "RV releasing unused task_monitor_slots: %d\n",
203 slot);
204
205 task_monitor_count--;
206 task_monitor_slots[slot] = false;
207 }
208
209 /*
210 * Monitors with a parent are nested,
211 * Monitors without a parent could be standalone or containers.
212 */
rv_is_nested_monitor(struct rv_monitor_def * mdef)213 bool rv_is_nested_monitor(struct rv_monitor_def *mdef)
214 {
215 return mdef->parent != NULL;
216 }
217
218 /*
219 * We set our list to have nested monitors listed after their parent
220 * if a monitor has a child element its a container.
221 * Containers can be also identified based on their function pointers:
222 * as they are not real monitors they do not need function definitions
223 * for enable()/disable(). Use this condition to find empty containers.
224 * Keep both conditions in case we have some non-compliant containers.
225 */
rv_is_container_monitor(struct rv_monitor_def * mdef)226 bool rv_is_container_monitor(struct rv_monitor_def *mdef)
227 {
228 struct rv_monitor_def *next;
229
230 if (list_is_last(&mdef->list, &rv_monitors_list))
231 return false;
232
233 next = list_next_entry(mdef, list);
234
235 return next->parent == mdef->monitor || !mdef->monitor->enable;
236 }
237
238 /*
239 * This section collects the monitor/ files and folders.
240 */
monitor_enable_read_data(struct file * filp,char __user * user_buf,size_t count,loff_t * ppos)241 static ssize_t monitor_enable_read_data(struct file *filp, char __user *user_buf, size_t count,
242 loff_t *ppos)
243 {
244 struct rv_monitor_def *mdef = filp->private_data;
245 const char *buff;
246
247 buff = mdef->monitor->enabled ? "1\n" : "0\n";
248
249 return simple_read_from_buffer(user_buf, count, ppos, buff, strlen(buff)+1);
250 }
251
252 /*
253 * __rv_disable_monitor - disabled an enabled monitor
254 */
__rv_disable_monitor(struct rv_monitor_def * mdef,bool sync)255 static int __rv_disable_monitor(struct rv_monitor_def *mdef, bool sync)
256 {
257 lockdep_assert_held(&rv_interface_lock);
258
259 if (mdef->monitor->enabled) {
260 mdef->monitor->enabled = 0;
261 if (mdef->monitor->disable)
262 mdef->monitor->disable();
263
264 /*
265 * Wait for the execution of all events to finish.
266 * Otherwise, the data used by the monitor could
267 * be inconsistent. i.e., if the monitor is re-enabled.
268 */
269 if (sync)
270 tracepoint_synchronize_unregister();
271 return 1;
272 }
273 return 0;
274 }
275
rv_disable_single(struct rv_monitor_def * mdef)276 static void rv_disable_single(struct rv_monitor_def *mdef)
277 {
278 __rv_disable_monitor(mdef, true);
279 }
280
rv_enable_single(struct rv_monitor_def * mdef)281 static int rv_enable_single(struct rv_monitor_def *mdef)
282 {
283 int retval;
284
285 lockdep_assert_held(&rv_interface_lock);
286
287 if (mdef->monitor->enabled)
288 return 0;
289
290 retval = mdef->monitor->enable();
291
292 if (!retval)
293 mdef->monitor->enabled = 1;
294
295 return retval;
296 }
297
rv_disable_container(struct rv_monitor_def * mdef)298 static void rv_disable_container(struct rv_monitor_def *mdef)
299 {
300 struct rv_monitor_def *p = mdef;
301 int enabled = 0;
302
303 list_for_each_entry_continue(p, &rv_monitors_list, list) {
304 if (p->parent != mdef->monitor)
305 break;
306 enabled += __rv_disable_monitor(p, false);
307 }
308 if (enabled)
309 tracepoint_synchronize_unregister();
310 mdef->monitor->enabled = 0;
311 }
312
rv_enable_container(struct rv_monitor_def * mdef)313 static int rv_enable_container(struct rv_monitor_def *mdef)
314 {
315 struct rv_monitor_def *p = mdef;
316 int retval = 0;
317
318 list_for_each_entry_continue(p, &rv_monitors_list, list) {
319 if (retval || p->parent != mdef->monitor)
320 break;
321 retval = rv_enable_single(p);
322 }
323 if (retval)
324 rv_disable_container(mdef);
325 else
326 mdef->monitor->enabled = 1;
327 return retval;
328 }
329
330 /**
331 * rv_disable_monitor - disable a given runtime monitor
332 * @mdef: Pointer to the monitor definition structure.
333 *
334 * Returns 0 on success.
335 */
rv_disable_monitor(struct rv_monitor_def * mdef)336 int rv_disable_monitor(struct rv_monitor_def *mdef)
337 {
338 if (rv_is_container_monitor(mdef))
339 rv_disable_container(mdef);
340 else
341 rv_disable_single(mdef);
342
343 return 0;
344 }
345
346 /**
347 * rv_enable_monitor - enable a given runtime monitor
348 * @mdef: Pointer to the monitor definition structure.
349 *
350 * Returns 0 on success, error otherwise.
351 */
rv_enable_monitor(struct rv_monitor_def * mdef)352 int rv_enable_monitor(struct rv_monitor_def *mdef)
353 {
354 int retval;
355
356 if (rv_is_container_monitor(mdef))
357 retval = rv_enable_container(mdef);
358 else
359 retval = rv_enable_single(mdef);
360
361 return retval;
362 }
363
364 /*
365 * interface for enabling/disabling a monitor.
366 */
monitor_enable_write_data(struct file * filp,const char __user * user_buf,size_t count,loff_t * ppos)367 static ssize_t monitor_enable_write_data(struct file *filp, const char __user *user_buf,
368 size_t count, loff_t *ppos)
369 {
370 struct rv_monitor_def *mdef = filp->private_data;
371 int retval;
372 bool val;
373
374 retval = kstrtobool_from_user(user_buf, count, &val);
375 if (retval)
376 return retval;
377
378 mutex_lock(&rv_interface_lock);
379
380 if (val)
381 retval = rv_enable_monitor(mdef);
382 else
383 retval = rv_disable_monitor(mdef);
384
385 mutex_unlock(&rv_interface_lock);
386
387 return retval ? : count;
388 }
389
390 static const struct file_operations interface_enable_fops = {
391 .open = simple_open,
392 .write = monitor_enable_write_data,
393 .read = monitor_enable_read_data,
394 };
395
396 /*
397 * Interface to read monitors description.
398 */
monitor_desc_read_data(struct file * filp,char __user * user_buf,size_t count,loff_t * ppos)399 static ssize_t monitor_desc_read_data(struct file *filp, char __user *user_buf, size_t count,
400 loff_t *ppos)
401 {
402 struct rv_monitor_def *mdef = filp->private_data;
403 char buff[256];
404
405 memset(buff, 0, sizeof(buff));
406
407 snprintf(buff, sizeof(buff), "%s\n", mdef->monitor->description);
408
409 return simple_read_from_buffer(user_buf, count, ppos, buff, strlen(buff) + 1);
410 }
411
412 static const struct file_operations interface_desc_fops = {
413 .open = simple_open,
414 .read = monitor_desc_read_data,
415 };
416
417 /*
418 * During the registration of a monitor, this function creates
419 * the monitor dir, where the specific options of the monitor
420 * are exposed.
421 */
create_monitor_dir(struct rv_monitor_def * mdef,struct rv_monitor_def * parent)422 static int create_monitor_dir(struct rv_monitor_def *mdef, struct rv_monitor_def *parent)
423 {
424 struct dentry *root = parent ? parent->root_d : get_monitors_root();
425 const char *name = mdef->monitor->name;
426 struct dentry *tmp;
427 int retval;
428
429 mdef->root_d = rv_create_dir(name, root);
430 if (!mdef->root_d)
431 return -ENOMEM;
432
433 tmp = rv_create_file("enable", RV_MODE_WRITE, mdef->root_d, mdef, &interface_enable_fops);
434 if (!tmp) {
435 retval = -ENOMEM;
436 goto out_remove_root;
437 }
438
439 tmp = rv_create_file("desc", RV_MODE_READ, mdef->root_d, mdef, &interface_desc_fops);
440 if (!tmp) {
441 retval = -ENOMEM;
442 goto out_remove_root;
443 }
444
445 retval = reactor_populate_monitor(mdef);
446 if (retval)
447 goto out_remove_root;
448
449 return 0;
450
451 out_remove_root:
452 rv_remove(mdef->root_d);
453 return retval;
454 }
455
456 /*
457 * Available/Enable monitor shared seq functions.
458 */
monitors_show(struct seq_file * m,void * p)459 static int monitors_show(struct seq_file *m, void *p)
460 {
461 struct rv_monitor_def *mon_def = p;
462
463 if (mon_def->parent)
464 seq_printf(m, "%s:%s\n", mon_def->parent->name,
465 mon_def->monitor->name);
466 else
467 seq_printf(m, "%s\n", mon_def->monitor->name);
468 return 0;
469 }
470
471 /*
472 * Used by the seq file operations at the end of a read
473 * operation.
474 */
monitors_stop(struct seq_file * m,void * p)475 static void monitors_stop(struct seq_file *m, void *p)
476 {
477 mutex_unlock(&rv_interface_lock);
478 }
479
480 /*
481 * Available monitor seq functions.
482 */
available_monitors_start(struct seq_file * m,loff_t * pos)483 static void *available_monitors_start(struct seq_file *m, loff_t *pos)
484 {
485 mutex_lock(&rv_interface_lock);
486 return seq_list_start(&rv_monitors_list, *pos);
487 }
488
available_monitors_next(struct seq_file * m,void * p,loff_t * pos)489 static void *available_monitors_next(struct seq_file *m, void *p, loff_t *pos)
490 {
491 return seq_list_next(p, &rv_monitors_list, pos);
492 }
493
494 /*
495 * Enable monitor seq functions.
496 */
enabled_monitors_next(struct seq_file * m,void * p,loff_t * pos)497 static void *enabled_monitors_next(struct seq_file *m, void *p, loff_t *pos)
498 {
499 struct rv_monitor_def *m_def = p;
500
501 (*pos)++;
502
503 list_for_each_entry_continue(m_def, &rv_monitors_list, list) {
504 if (m_def->monitor->enabled)
505 return m_def;
506 }
507
508 return NULL;
509 }
510
enabled_monitors_start(struct seq_file * m,loff_t * pos)511 static void *enabled_monitors_start(struct seq_file *m, loff_t *pos)
512 {
513 struct rv_monitor_def *m_def;
514 loff_t l;
515
516 mutex_lock(&rv_interface_lock);
517
518 if (list_empty(&rv_monitors_list))
519 return NULL;
520
521 m_def = list_entry(&rv_monitors_list, struct rv_monitor_def, list);
522
523 for (l = 0; l <= *pos; ) {
524 m_def = enabled_monitors_next(m, m_def, &l);
525 if (!m_def)
526 break;
527 }
528
529 return m_def;
530 }
531
532 /*
533 * available/enabled monitors seq definition.
534 */
535 static const struct seq_operations available_monitors_seq_ops = {
536 .start = available_monitors_start,
537 .next = available_monitors_next,
538 .stop = monitors_stop,
539 .show = monitors_show
540 };
541
542 static const struct seq_operations enabled_monitors_seq_ops = {
543 .start = enabled_monitors_start,
544 .next = enabled_monitors_next,
545 .stop = monitors_stop,
546 .show = monitors_show
547 };
548
549 /*
550 * available_monitors interface.
551 */
available_monitors_open(struct inode * inode,struct file * file)552 static int available_monitors_open(struct inode *inode, struct file *file)
553 {
554 return seq_open(file, &available_monitors_seq_ops);
555 };
556
557 static const struct file_operations available_monitors_ops = {
558 .open = available_monitors_open,
559 .read = seq_read,
560 .llseek = seq_lseek,
561 .release = seq_release
562 };
563
564 /*
565 * enabled_monitors interface.
566 */
disable_all_monitors(void)567 static void disable_all_monitors(void)
568 {
569 struct rv_monitor_def *mdef;
570 int enabled = 0;
571
572 mutex_lock(&rv_interface_lock);
573
574 list_for_each_entry(mdef, &rv_monitors_list, list)
575 enabled += __rv_disable_monitor(mdef, false);
576
577 if (enabled) {
578 /*
579 * Wait for the execution of all events to finish.
580 * Otherwise, the data used by the monitor could
581 * be inconsistent. i.e., if the monitor is re-enabled.
582 */
583 tracepoint_synchronize_unregister();
584 }
585
586 mutex_unlock(&rv_interface_lock);
587 }
588
enabled_monitors_open(struct inode * inode,struct file * file)589 static int enabled_monitors_open(struct inode *inode, struct file *file)
590 {
591 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC))
592 disable_all_monitors();
593
594 return seq_open(file, &enabled_monitors_seq_ops);
595 };
596
enabled_monitors_write(struct file * filp,const char __user * user_buf,size_t count,loff_t * ppos)597 static ssize_t enabled_monitors_write(struct file *filp, const char __user *user_buf,
598 size_t count, loff_t *ppos)
599 {
600 char buff[MAX_RV_MONITOR_NAME_SIZE + 2];
601 struct rv_monitor_def *mdef;
602 int retval = -EINVAL;
603 bool enable = true;
604 char *ptr, *tmp;
605 int len;
606
607 if (count < 1 || count > MAX_RV_MONITOR_NAME_SIZE + 1)
608 return -EINVAL;
609
610 memset(buff, 0, sizeof(buff));
611
612 retval = simple_write_to_buffer(buff, sizeof(buff) - 1, ppos, user_buf, count);
613 if (retval < 0)
614 return -EFAULT;
615
616 ptr = strim(buff);
617
618 if (ptr[0] == '!') {
619 enable = false;
620 ptr++;
621 }
622
623 len = strlen(ptr);
624 if (!len)
625 return count;
626
627 mutex_lock(&rv_interface_lock);
628
629 retval = -EINVAL;
630
631 /* we support 1 nesting level, trim the parent */
632 tmp = strstr(ptr, ":");
633 if (tmp)
634 ptr = tmp+1;
635
636 list_for_each_entry(mdef, &rv_monitors_list, list) {
637 if (strcmp(ptr, mdef->monitor->name) != 0)
638 continue;
639
640 /*
641 * Monitor found!
642 */
643 if (enable)
644 retval = rv_enable_monitor(mdef);
645 else
646 retval = rv_disable_monitor(mdef);
647
648 if (!retval)
649 retval = count;
650
651 break;
652 }
653
654 mutex_unlock(&rv_interface_lock);
655 return retval;
656 }
657
658 static const struct file_operations enabled_monitors_ops = {
659 .open = enabled_monitors_open,
660 .read = seq_read,
661 .write = enabled_monitors_write,
662 .llseek = seq_lseek,
663 .release = seq_release,
664 };
665
666 /*
667 * Monitoring on global switcher!
668 */
669 static bool __read_mostly monitoring_on;
670
671 /**
672 * rv_monitoring_on - checks if monitoring is on
673 *
674 * Returns 1 if on, 0 otherwise.
675 */
rv_monitoring_on(void)676 bool rv_monitoring_on(void)
677 {
678 /* Ensures that concurrent monitors read consistent monitoring_on */
679 smp_rmb();
680 return READ_ONCE(monitoring_on);
681 }
682
683 /*
684 * monitoring_on general switcher.
685 */
monitoring_on_read_data(struct file * filp,char __user * user_buf,size_t count,loff_t * ppos)686 static ssize_t monitoring_on_read_data(struct file *filp, char __user *user_buf,
687 size_t count, loff_t *ppos)
688 {
689 const char *buff;
690
691 buff = rv_monitoring_on() ? "1\n" : "0\n";
692
693 return simple_read_from_buffer(user_buf, count, ppos, buff, strlen(buff) + 1);
694 }
695
turn_monitoring_off(void)696 static void turn_monitoring_off(void)
697 {
698 WRITE_ONCE(monitoring_on, false);
699 /* Ensures that concurrent monitors read consistent monitoring_on */
700 smp_wmb();
701 }
702
reset_all_monitors(void)703 static void reset_all_monitors(void)
704 {
705 struct rv_monitor_def *mdef;
706
707 list_for_each_entry(mdef, &rv_monitors_list, list) {
708 if (mdef->monitor->enabled && mdef->monitor->reset)
709 mdef->monitor->reset();
710 }
711 }
712
turn_monitoring_on(void)713 static void turn_monitoring_on(void)
714 {
715 WRITE_ONCE(monitoring_on, true);
716 /* Ensures that concurrent monitors read consistent monitoring_on */
717 smp_wmb();
718 }
719
turn_monitoring_on_with_reset(void)720 static void turn_monitoring_on_with_reset(void)
721 {
722 lockdep_assert_held(&rv_interface_lock);
723
724 if (rv_monitoring_on())
725 return;
726
727 /*
728 * Monitors might be out of sync with the system if events were not
729 * processed because of !rv_monitoring_on().
730 *
731 * Reset all monitors, forcing a re-sync.
732 */
733 reset_all_monitors();
734 turn_monitoring_on();
735 }
736
monitoring_on_write_data(struct file * filp,const char __user * user_buf,size_t count,loff_t * ppos)737 static ssize_t monitoring_on_write_data(struct file *filp, const char __user *user_buf,
738 size_t count, loff_t *ppos)
739 {
740 int retval;
741 bool val;
742
743 retval = kstrtobool_from_user(user_buf, count, &val);
744 if (retval)
745 return retval;
746
747 mutex_lock(&rv_interface_lock);
748
749 if (val)
750 turn_monitoring_on_with_reset();
751 else
752 turn_monitoring_off();
753
754 /*
755 * Wait for the execution of all events to finish
756 * before returning to user-space.
757 */
758 tracepoint_synchronize_unregister();
759
760 mutex_unlock(&rv_interface_lock);
761
762 return count;
763 }
764
765 static const struct file_operations monitoring_on_fops = {
766 .open = simple_open,
767 .write = monitoring_on_write_data,
768 .read = monitoring_on_read_data,
769 };
770
destroy_monitor_dir(struct rv_monitor_def * mdef)771 static void destroy_monitor_dir(struct rv_monitor_def *mdef)
772 {
773 reactor_cleanup_monitor(mdef);
774 rv_remove(mdef->root_d);
775 }
776
777 /**
778 * rv_register_monitor - register a rv monitor.
779 * @monitor: The rv_monitor to be registered.
780 * @parent: The parent of the monitor to be registered, NULL if not nested.
781 *
782 * Returns 0 if successful, error otherwise.
783 */
rv_register_monitor(struct rv_monitor * monitor,struct rv_monitor * parent)784 int rv_register_monitor(struct rv_monitor *monitor, struct rv_monitor *parent)
785 {
786 struct rv_monitor_def *r, *p = NULL;
787 int retval = 0;
788
789 if (strlen(monitor->name) >= MAX_RV_MONITOR_NAME_SIZE) {
790 pr_info("Monitor %s has a name longer than %d\n", monitor->name,
791 MAX_RV_MONITOR_NAME_SIZE);
792 return -EINVAL;
793 }
794
795 mutex_lock(&rv_interface_lock);
796
797 list_for_each_entry(r, &rv_monitors_list, list) {
798 if (strcmp(monitor->name, r->monitor->name) == 0) {
799 pr_info("Monitor %s is already registered\n", monitor->name);
800 retval = -EEXIST;
801 goto out_unlock;
802 }
803 }
804
805 if (parent) {
806 list_for_each_entry(r, &rv_monitors_list, list) {
807 if (strcmp(parent->name, r->monitor->name) == 0) {
808 p = r;
809 break;
810 }
811 }
812 }
813
814 if (p && rv_is_nested_monitor(p)) {
815 pr_info("Parent monitor %s is already nested, cannot nest further\n",
816 parent->name);
817 retval = -EINVAL;
818 goto out_unlock;
819 }
820
821 r = kzalloc(sizeof(struct rv_monitor_def), GFP_KERNEL);
822 if (!r) {
823 retval = -ENOMEM;
824 goto out_unlock;
825 }
826
827 r->monitor = monitor;
828 r->parent = parent;
829
830 retval = create_monitor_dir(r, p);
831 if (retval) {
832 kfree(r);
833 goto out_unlock;
834 }
835
836 /* keep children close to the parent for easier visualisation */
837 if (p)
838 list_add(&r->list, &p->list);
839 else
840 list_add_tail(&r->list, &rv_monitors_list);
841
842 out_unlock:
843 mutex_unlock(&rv_interface_lock);
844 return retval;
845 }
846
847 /**
848 * rv_unregister_monitor - unregister a rv monitor.
849 * @monitor: The rv_monitor to be unregistered.
850 *
851 * Returns 0 if successful, error otherwise.
852 */
rv_unregister_monitor(struct rv_monitor * monitor)853 int rv_unregister_monitor(struct rv_monitor *monitor)
854 {
855 struct rv_monitor_def *ptr, *next;
856
857 mutex_lock(&rv_interface_lock);
858
859 list_for_each_entry_safe(ptr, next, &rv_monitors_list, list) {
860 if (strcmp(monitor->name, ptr->monitor->name) == 0) {
861 rv_disable_monitor(ptr);
862 list_del(&ptr->list);
863 destroy_monitor_dir(ptr);
864 }
865 }
866
867 mutex_unlock(&rv_interface_lock);
868 return 0;
869 }
870
rv_init_interface(void)871 int __init rv_init_interface(void)
872 {
873 struct dentry *tmp;
874 int retval;
875
876 rv_root.root_dir = rv_create_dir("rv", NULL);
877 if (!rv_root.root_dir)
878 goto out_err;
879
880 rv_root.monitors_dir = rv_create_dir("monitors", rv_root.root_dir);
881 if (!rv_root.monitors_dir)
882 goto out_err;
883
884 tmp = rv_create_file("available_monitors", RV_MODE_READ, rv_root.root_dir, NULL,
885 &available_monitors_ops);
886 if (!tmp)
887 goto out_err;
888
889 tmp = rv_create_file("enabled_monitors", RV_MODE_WRITE, rv_root.root_dir, NULL,
890 &enabled_monitors_ops);
891 if (!tmp)
892 goto out_err;
893
894 tmp = rv_create_file("monitoring_on", RV_MODE_WRITE, rv_root.root_dir, NULL,
895 &monitoring_on_fops);
896 if (!tmp)
897 goto out_err;
898 retval = init_rv_reactors(rv_root.root_dir);
899 if (retval)
900 goto out_err;
901
902 turn_monitoring_on();
903
904 return 0;
905
906 out_err:
907 rv_remove(rv_root.root_dir);
908 printk(KERN_ERR "RV: Error while creating the RV interface\n");
909 return 1;
910 }
911