1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2019-2022 Red Hat, Inc. Daniel Bristot de Oliveira <bristot@kernel.org>
4 *
5 * This is the online Runtime Verification (RV) interface.
6 *
7 * RV is a lightweight (yet rigorous) method that complements classical
8 * exhaustive verification techniques (such as model checking and
9 * theorem proving) with a more practical approach to complex systems.
10 *
11 * RV works by analyzing the trace of the system's actual execution,
12 * comparing it against a formal specification of the system behavior.
13 * RV can give precise information on the runtime behavior of the
14 * monitored system while enabling the reaction for unexpected
15 * events, avoiding, for example, the propagation of a failure on
16 * safety-critical systems.
17 *
18 * The development of this interface roots in the development of the
19 * paper:
20 *
21 * De Oliveira, Daniel Bristot; Cucinotta, Tommaso; De Oliveira, Romulo
22 * Silva. Efficient formal verification for the Linux kernel. In:
23 * International Conference on Software Engineering and Formal Methods.
24 * Springer, Cham, 2019. p. 315-332.
25 *
26 * And:
27 *
28 * De Oliveira, Daniel Bristot, et al. Automata-based formal analysis
29 * and verification of the real-time Linux kernel. PhD Thesis, 2020.
30 *
31 * == Runtime monitor interface ==
32 *
33 * A monitor is the central part of the runtime verification of a system.
34 *
35 * The monitor stands in between the formal specification of the desired
36 * (or undesired) behavior, and the trace of the actual system.
37 *
38 * In Linux terms, the runtime verification monitors are encapsulated
39 * inside the "RV monitor" abstraction. A RV monitor includes a reference
40 * model of the system, a set of instances of the monitor (per-cpu monitor,
41 * per-task monitor, and so on), and the helper functions that glue the
42 * monitor to the system via trace. Generally, a monitor includes some form
43 * of trace output as a reaction for event parsing and exceptions,
44 * as depicted below:
45 *
46 * Linux +----- RV Monitor ----------------------------------+ Formal
47 * Realm | | Realm
48 * +-------------------+ +----------------+ +-----------------+
49 * | Linux kernel | | Monitor | | Reference |
50 * | Tracing | -> | Instance(s) | <- | Model |
51 * | (instrumentation) | | (verification) | | (specification) |
52 * +-------------------+ +----------------+ +-----------------+
53 * | | |
54 * | V |
55 * | +----------+ |
56 * | | Reaction | |
57 * | +--+--+--+-+ |
58 * | | | | |
59 * | | | +-> trace output ? |
60 * +------------------------|--|----------------------+
61 * | +----> panic ?
62 * +-------> <user-specified>
63 *
64 * This file implements the interface for loading RV monitors, and
65 * to control the verification session.
66 *
67 * == Registering monitors ==
68 *
69 * The struct rv_monitor defines a set of callback functions to control
70 * a verification session. For instance, when a given monitor is enabled,
71 * the "enable" callback function is called to hook the instrumentation
72 * functions to the kernel trace events. The "disable" function is called
73 * when disabling the verification session.
74 *
75 * A RV monitor is registered via:
76 * int rv_register_monitor(struct rv_monitor *monitor);
77 * And unregistered via:
78 * int rv_unregister_monitor(struct rv_monitor *monitor);
79 *
80 * == User interface ==
81 *
82 * The user interface resembles kernel tracing interface. It presents
83 * these files:
84 *
85 * "available_monitors"
86 * - List the available monitors, one per line.
87 *
88 * For example:
89 * # cat available_monitors
90 * wip
91 * wwnr
92 *
93 * "enabled_monitors"
94 * - Lists the enabled monitors, one per line;
95 * - Writing to it enables a given monitor;
96 * - Writing a monitor name with a '!' prefix disables it;
97 * - Truncating the file disables all enabled monitors.
98 *
99 * For example:
100 * # cat enabled_monitors
101 * # echo wip > enabled_monitors
102 * # echo wwnr >> enabled_monitors
103 * # cat enabled_monitors
104 * wip
105 * wwnr
106 * # echo '!wip' >> enabled_monitors
107 * # cat enabled_monitors
108 * wwnr
109 * # echo > enabled_monitors
110 * # cat enabled_monitors
111 * #
112 *
113 * Note that more than one monitor can be enabled concurrently.
114 *
115 * "monitoring_on"
116 * - It is an on/off general switcher for monitoring. Note
117 * that it does not disable enabled monitors or detach events,
118 * but stops the per-entity monitors from monitoring the events
119 * received from the instrumentation. It resembles the "tracing_on"
120 * switcher.
121 *
122 * "monitors/"
123 * Each monitor will have its own directory inside "monitors/". There
124 * the monitor specific files will be presented.
125 * The "monitors/" directory resembles the "events" directory on
126 * tracefs.
127 *
128 * For example:
129 * # cd monitors/wip/
130 * # ls
131 * desc enable
132 * # cat desc
133 * auto-generated wakeup in preemptive monitor.
134 * # cat enable
135 * 0
136 *
137 * For further information, see:
138 * Documentation/trace/rv/runtime-verification.rst
139 */
140
141 #include <linux/kernel.h>
142 #include <linux/module.h>
143 #include <linux/init.h>
144 #include <linux/slab.h>
145
146 #ifdef CONFIG_DA_MON_EVENTS
147 #define CREATE_TRACE_POINTS
148 #include <rv_trace.h>
149 #endif
150
151 #include "rv.h"
152
153 DEFINE_MUTEX(rv_interface_lock);
154
155 static struct rv_interface rv_root;
156
get_monitors_root(void)157 struct dentry *get_monitors_root(void)
158 {
159 return rv_root.monitors_dir;
160 }
161
162 /*
163 * Interface for the monitor register.
164 */
165 LIST_HEAD(rv_monitors_list);
166
167 static int task_monitor_count;
168 static bool task_monitor_slots[RV_PER_TASK_MONITORS];
169
rv_get_task_monitor_slot(void)170 int rv_get_task_monitor_slot(void)
171 {
172 int i;
173
174 lockdep_assert_held(&rv_interface_lock);
175
176 if (task_monitor_count == RV_PER_TASK_MONITORS)
177 return -EBUSY;
178
179 task_monitor_count++;
180
181 for (i = 0; i < RV_PER_TASK_MONITORS; i++) {
182 if (task_monitor_slots[i] == false) {
183 task_monitor_slots[i] = true;
184 return i;
185 }
186 }
187
188 WARN_ONCE(1, "RV task_monitor_count and slots are out of sync\n");
189
190 return -EINVAL;
191 }
192
rv_put_task_monitor_slot(int slot)193 void rv_put_task_monitor_slot(int slot)
194 {
195 lockdep_assert_held(&rv_interface_lock);
196
197 if (slot < 0 || slot >= RV_PER_TASK_MONITORS) {
198 WARN_ONCE(1, "RV releasing an invalid slot!: %d\n", slot);
199 return;
200 }
201
202 WARN_ONCE(!task_monitor_slots[slot], "RV releasing unused task_monitor_slots: %d\n",
203 slot);
204
205 task_monitor_count--;
206 task_monitor_slots[slot] = false;
207 }
208
209 /*
210 * Monitors with a parent are nested,
211 * Monitors without a parent could be standalone or containers.
212 */
rv_is_nested_monitor(struct rv_monitor_def * mdef)213 bool rv_is_nested_monitor(struct rv_monitor_def *mdef)
214 {
215 return mdef->parent != NULL;
216 }
217
218 /*
219 * We set our list to have nested monitors listed after their parent
220 * if a monitor has a child element its a container.
221 * Containers can be also identified based on their function pointers:
222 * as they are not real monitors they do not need function definitions
223 * for enable()/disable(). Use this condition to find empty containers.
224 * Keep both conditions in case we have some non-compliant containers.
225 */
rv_is_container_monitor(struct rv_monitor_def * mdef)226 bool rv_is_container_monitor(struct rv_monitor_def *mdef)
227 {
228 struct rv_monitor_def *next = list_next_entry(mdef, list);
229
230 return next->parent == mdef->monitor || !mdef->monitor->enable;
231 }
232
233 /*
234 * This section collects the monitor/ files and folders.
235 */
monitor_enable_read_data(struct file * filp,char __user * user_buf,size_t count,loff_t * ppos)236 static ssize_t monitor_enable_read_data(struct file *filp, char __user *user_buf, size_t count,
237 loff_t *ppos)
238 {
239 struct rv_monitor_def *mdef = filp->private_data;
240 const char *buff;
241
242 buff = mdef->monitor->enabled ? "1\n" : "0\n";
243
244 return simple_read_from_buffer(user_buf, count, ppos, buff, strlen(buff)+1);
245 }
246
247 /*
248 * __rv_disable_monitor - disabled an enabled monitor
249 */
__rv_disable_monitor(struct rv_monitor_def * mdef,bool sync)250 static int __rv_disable_monitor(struct rv_monitor_def *mdef, bool sync)
251 {
252 lockdep_assert_held(&rv_interface_lock);
253
254 if (mdef->monitor->enabled) {
255 mdef->monitor->enabled = 0;
256 if (mdef->monitor->disable)
257 mdef->monitor->disable();
258
259 /*
260 * Wait for the execution of all events to finish.
261 * Otherwise, the data used by the monitor could
262 * be inconsistent. i.e., if the monitor is re-enabled.
263 */
264 if (sync)
265 tracepoint_synchronize_unregister();
266 return 1;
267 }
268 return 0;
269 }
270
rv_disable_single(struct rv_monitor_def * mdef)271 static void rv_disable_single(struct rv_monitor_def *mdef)
272 {
273 __rv_disable_monitor(mdef, true);
274 }
275
rv_enable_single(struct rv_monitor_def * mdef)276 static int rv_enable_single(struct rv_monitor_def *mdef)
277 {
278 int retval;
279
280 lockdep_assert_held(&rv_interface_lock);
281
282 if (mdef->monitor->enabled)
283 return 0;
284
285 retval = mdef->monitor->enable();
286
287 if (!retval)
288 mdef->monitor->enabled = 1;
289
290 return retval;
291 }
292
rv_disable_container(struct rv_monitor_def * mdef)293 static void rv_disable_container(struct rv_monitor_def *mdef)
294 {
295 struct rv_monitor_def *p = mdef;
296 int enabled = 0;
297
298 list_for_each_entry_continue(p, &rv_monitors_list, list) {
299 if (p->parent != mdef->monitor)
300 break;
301 enabled += __rv_disable_monitor(p, false);
302 }
303 if (enabled)
304 tracepoint_synchronize_unregister();
305 mdef->monitor->enabled = 0;
306 }
307
rv_enable_container(struct rv_monitor_def * mdef)308 static int rv_enable_container(struct rv_monitor_def *mdef)
309 {
310 struct rv_monitor_def *p = mdef;
311 int retval = 0;
312
313 list_for_each_entry_continue(p, &rv_monitors_list, list) {
314 if (retval || p->parent != mdef->monitor)
315 break;
316 retval = rv_enable_single(p);
317 }
318 if (retval)
319 rv_disable_container(mdef);
320 else
321 mdef->monitor->enabled = 1;
322 return retval;
323 }
324
325 /**
326 * rv_disable_monitor - disable a given runtime monitor
327 * @mdef: Pointer to the monitor definition structure.
328 *
329 * Returns 0 on success.
330 */
rv_disable_monitor(struct rv_monitor_def * mdef)331 int rv_disable_monitor(struct rv_monitor_def *mdef)
332 {
333 if (rv_is_container_monitor(mdef))
334 rv_disable_container(mdef);
335 else
336 rv_disable_single(mdef);
337
338 return 0;
339 }
340
341 /**
342 * rv_enable_monitor - enable a given runtime monitor
343 * @mdef: Pointer to the monitor definition structure.
344 *
345 * Returns 0 on success, error otherwise.
346 */
rv_enable_monitor(struct rv_monitor_def * mdef)347 int rv_enable_monitor(struct rv_monitor_def *mdef)
348 {
349 int retval;
350
351 if (rv_is_container_monitor(mdef))
352 retval = rv_enable_container(mdef);
353 else
354 retval = rv_enable_single(mdef);
355
356 return retval;
357 }
358
359 /*
360 * interface for enabling/disabling a monitor.
361 */
monitor_enable_write_data(struct file * filp,const char __user * user_buf,size_t count,loff_t * ppos)362 static ssize_t monitor_enable_write_data(struct file *filp, const char __user *user_buf,
363 size_t count, loff_t *ppos)
364 {
365 struct rv_monitor_def *mdef = filp->private_data;
366 int retval;
367 bool val;
368
369 retval = kstrtobool_from_user(user_buf, count, &val);
370 if (retval)
371 return retval;
372
373 mutex_lock(&rv_interface_lock);
374
375 if (val)
376 retval = rv_enable_monitor(mdef);
377 else
378 retval = rv_disable_monitor(mdef);
379
380 mutex_unlock(&rv_interface_lock);
381
382 return retval ? : count;
383 }
384
385 static const struct file_operations interface_enable_fops = {
386 .open = simple_open,
387 .write = monitor_enable_write_data,
388 .read = monitor_enable_read_data,
389 };
390
391 /*
392 * Interface to read monitors description.
393 */
monitor_desc_read_data(struct file * filp,char __user * user_buf,size_t count,loff_t * ppos)394 static ssize_t monitor_desc_read_data(struct file *filp, char __user *user_buf, size_t count,
395 loff_t *ppos)
396 {
397 struct rv_monitor_def *mdef = filp->private_data;
398 char buff[256];
399
400 memset(buff, 0, sizeof(buff));
401
402 snprintf(buff, sizeof(buff), "%s\n", mdef->monitor->description);
403
404 return simple_read_from_buffer(user_buf, count, ppos, buff, strlen(buff) + 1);
405 }
406
407 static const struct file_operations interface_desc_fops = {
408 .open = simple_open,
409 .read = monitor_desc_read_data,
410 };
411
412 /*
413 * During the registration of a monitor, this function creates
414 * the monitor dir, where the specific options of the monitor
415 * are exposed.
416 */
create_monitor_dir(struct rv_monitor_def * mdef,struct rv_monitor_def * parent)417 static int create_monitor_dir(struct rv_monitor_def *mdef, struct rv_monitor_def *parent)
418 {
419 struct dentry *root = parent ? parent->root_d : get_monitors_root();
420 const char *name = mdef->monitor->name;
421 struct dentry *tmp;
422 int retval;
423
424 mdef->root_d = rv_create_dir(name, root);
425 if (!mdef->root_d)
426 return -ENOMEM;
427
428 tmp = rv_create_file("enable", RV_MODE_WRITE, mdef->root_d, mdef, &interface_enable_fops);
429 if (!tmp) {
430 retval = -ENOMEM;
431 goto out_remove_root;
432 }
433
434 tmp = rv_create_file("desc", RV_MODE_READ, mdef->root_d, mdef, &interface_desc_fops);
435 if (!tmp) {
436 retval = -ENOMEM;
437 goto out_remove_root;
438 }
439
440 retval = reactor_populate_monitor(mdef);
441 if (retval)
442 goto out_remove_root;
443
444 return 0;
445
446 out_remove_root:
447 rv_remove(mdef->root_d);
448 return retval;
449 }
450
451 /*
452 * Available/Enable monitor shared seq functions.
453 */
monitors_show(struct seq_file * m,void * p)454 static int monitors_show(struct seq_file *m, void *p)
455 {
456 struct rv_monitor_def *mon_def = p;
457
458 if (mon_def->parent)
459 seq_printf(m, "%s:%s\n", mon_def->parent->name,
460 mon_def->monitor->name);
461 else
462 seq_printf(m, "%s\n", mon_def->monitor->name);
463 return 0;
464 }
465
466 /*
467 * Used by the seq file operations at the end of a read
468 * operation.
469 */
monitors_stop(struct seq_file * m,void * p)470 static void monitors_stop(struct seq_file *m, void *p)
471 {
472 mutex_unlock(&rv_interface_lock);
473 }
474
475 /*
476 * Available monitor seq functions.
477 */
available_monitors_start(struct seq_file * m,loff_t * pos)478 static void *available_monitors_start(struct seq_file *m, loff_t *pos)
479 {
480 mutex_lock(&rv_interface_lock);
481 return seq_list_start(&rv_monitors_list, *pos);
482 }
483
available_monitors_next(struct seq_file * m,void * p,loff_t * pos)484 static void *available_monitors_next(struct seq_file *m, void *p, loff_t *pos)
485 {
486 return seq_list_next(p, &rv_monitors_list, pos);
487 }
488
489 /*
490 * Enable monitor seq functions.
491 */
enabled_monitors_next(struct seq_file * m,void * p,loff_t * pos)492 static void *enabled_monitors_next(struct seq_file *m, void *p, loff_t *pos)
493 {
494 struct rv_monitor_def *m_def = p;
495
496 (*pos)++;
497
498 list_for_each_entry_continue(m_def, &rv_monitors_list, list) {
499 if (m_def->monitor->enabled)
500 return m_def;
501 }
502
503 return NULL;
504 }
505
enabled_monitors_start(struct seq_file * m,loff_t * pos)506 static void *enabled_monitors_start(struct seq_file *m, loff_t *pos)
507 {
508 struct rv_monitor_def *m_def;
509 loff_t l;
510
511 mutex_lock(&rv_interface_lock);
512
513 if (list_empty(&rv_monitors_list))
514 return NULL;
515
516 m_def = list_entry(&rv_monitors_list, struct rv_monitor_def, list);
517
518 for (l = 0; l <= *pos; ) {
519 m_def = enabled_monitors_next(m, m_def, &l);
520 if (!m_def)
521 break;
522 }
523
524 return m_def;
525 }
526
527 /*
528 * available/enabled monitors seq definition.
529 */
530 static const struct seq_operations available_monitors_seq_ops = {
531 .start = available_monitors_start,
532 .next = available_monitors_next,
533 .stop = monitors_stop,
534 .show = monitors_show
535 };
536
537 static const struct seq_operations enabled_monitors_seq_ops = {
538 .start = enabled_monitors_start,
539 .next = enabled_monitors_next,
540 .stop = monitors_stop,
541 .show = monitors_show
542 };
543
544 /*
545 * available_monitors interface.
546 */
available_monitors_open(struct inode * inode,struct file * file)547 static int available_monitors_open(struct inode *inode, struct file *file)
548 {
549 return seq_open(file, &available_monitors_seq_ops);
550 };
551
552 static const struct file_operations available_monitors_ops = {
553 .open = available_monitors_open,
554 .read = seq_read,
555 .llseek = seq_lseek,
556 .release = seq_release
557 };
558
559 /*
560 * enabled_monitors interface.
561 */
disable_all_monitors(void)562 static void disable_all_monitors(void)
563 {
564 struct rv_monitor_def *mdef;
565 int enabled = 0;
566
567 mutex_lock(&rv_interface_lock);
568
569 list_for_each_entry(mdef, &rv_monitors_list, list)
570 enabled += __rv_disable_monitor(mdef, false);
571
572 if (enabled) {
573 /*
574 * Wait for the execution of all events to finish.
575 * Otherwise, the data used by the monitor could
576 * be inconsistent. i.e., if the monitor is re-enabled.
577 */
578 tracepoint_synchronize_unregister();
579 }
580
581 mutex_unlock(&rv_interface_lock);
582 }
583
enabled_monitors_open(struct inode * inode,struct file * file)584 static int enabled_monitors_open(struct inode *inode, struct file *file)
585 {
586 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC))
587 disable_all_monitors();
588
589 return seq_open(file, &enabled_monitors_seq_ops);
590 };
591
enabled_monitors_write(struct file * filp,const char __user * user_buf,size_t count,loff_t * ppos)592 static ssize_t enabled_monitors_write(struct file *filp, const char __user *user_buf,
593 size_t count, loff_t *ppos)
594 {
595 char buff[MAX_RV_MONITOR_NAME_SIZE + 2];
596 struct rv_monitor_def *mdef;
597 int retval = -EINVAL;
598 bool enable = true;
599 char *ptr, *tmp;
600 int len;
601
602 if (count < 1 || count > MAX_RV_MONITOR_NAME_SIZE + 1)
603 return -EINVAL;
604
605 memset(buff, 0, sizeof(buff));
606
607 retval = simple_write_to_buffer(buff, sizeof(buff) - 1, ppos, user_buf, count);
608 if (retval < 0)
609 return -EFAULT;
610
611 ptr = strim(buff);
612
613 if (ptr[0] == '!') {
614 enable = false;
615 ptr++;
616 }
617
618 len = strlen(ptr);
619 if (!len)
620 return count;
621
622 mutex_lock(&rv_interface_lock);
623
624 retval = -EINVAL;
625
626 /* we support 1 nesting level, trim the parent */
627 tmp = strstr(ptr, ":");
628 if (tmp)
629 ptr = tmp+1;
630
631 list_for_each_entry(mdef, &rv_monitors_list, list) {
632 if (strcmp(ptr, mdef->monitor->name) != 0)
633 continue;
634
635 /*
636 * Monitor found!
637 */
638 if (enable)
639 retval = rv_enable_monitor(mdef);
640 else
641 retval = rv_disable_monitor(mdef);
642
643 if (!retval)
644 retval = count;
645
646 break;
647 }
648
649 mutex_unlock(&rv_interface_lock);
650 return retval;
651 }
652
653 static const struct file_operations enabled_monitors_ops = {
654 .open = enabled_monitors_open,
655 .read = seq_read,
656 .write = enabled_monitors_write,
657 .llseek = seq_lseek,
658 .release = seq_release,
659 };
660
661 /*
662 * Monitoring on global switcher!
663 */
664 static bool __read_mostly monitoring_on;
665
666 /**
667 * rv_monitoring_on - checks if monitoring is on
668 *
669 * Returns 1 if on, 0 otherwise.
670 */
rv_monitoring_on(void)671 bool rv_monitoring_on(void)
672 {
673 /* Ensures that concurrent monitors read consistent monitoring_on */
674 smp_rmb();
675 return READ_ONCE(monitoring_on);
676 }
677
678 /*
679 * monitoring_on general switcher.
680 */
monitoring_on_read_data(struct file * filp,char __user * user_buf,size_t count,loff_t * ppos)681 static ssize_t monitoring_on_read_data(struct file *filp, char __user *user_buf,
682 size_t count, loff_t *ppos)
683 {
684 const char *buff;
685
686 buff = rv_monitoring_on() ? "1\n" : "0\n";
687
688 return simple_read_from_buffer(user_buf, count, ppos, buff, strlen(buff) + 1);
689 }
690
turn_monitoring_off(void)691 static void turn_monitoring_off(void)
692 {
693 WRITE_ONCE(monitoring_on, false);
694 /* Ensures that concurrent monitors read consistent monitoring_on */
695 smp_wmb();
696 }
697
reset_all_monitors(void)698 static void reset_all_monitors(void)
699 {
700 struct rv_monitor_def *mdef;
701
702 list_for_each_entry(mdef, &rv_monitors_list, list) {
703 if (mdef->monitor->enabled && mdef->monitor->reset)
704 mdef->monitor->reset();
705 }
706 }
707
turn_monitoring_on(void)708 static void turn_monitoring_on(void)
709 {
710 WRITE_ONCE(monitoring_on, true);
711 /* Ensures that concurrent monitors read consistent monitoring_on */
712 smp_wmb();
713 }
714
turn_monitoring_on_with_reset(void)715 static void turn_monitoring_on_with_reset(void)
716 {
717 lockdep_assert_held(&rv_interface_lock);
718
719 if (rv_monitoring_on())
720 return;
721
722 /*
723 * Monitors might be out of sync with the system if events were not
724 * processed because of !rv_monitoring_on().
725 *
726 * Reset all monitors, forcing a re-sync.
727 */
728 reset_all_monitors();
729 turn_monitoring_on();
730 }
731
monitoring_on_write_data(struct file * filp,const char __user * user_buf,size_t count,loff_t * ppos)732 static ssize_t monitoring_on_write_data(struct file *filp, const char __user *user_buf,
733 size_t count, loff_t *ppos)
734 {
735 int retval;
736 bool val;
737
738 retval = kstrtobool_from_user(user_buf, count, &val);
739 if (retval)
740 return retval;
741
742 mutex_lock(&rv_interface_lock);
743
744 if (val)
745 turn_monitoring_on_with_reset();
746 else
747 turn_monitoring_off();
748
749 /*
750 * Wait for the execution of all events to finish
751 * before returning to user-space.
752 */
753 tracepoint_synchronize_unregister();
754
755 mutex_unlock(&rv_interface_lock);
756
757 return count;
758 }
759
760 static const struct file_operations monitoring_on_fops = {
761 .open = simple_open,
762 .write = monitoring_on_write_data,
763 .read = monitoring_on_read_data,
764 };
765
destroy_monitor_dir(struct rv_monitor_def * mdef)766 static void destroy_monitor_dir(struct rv_monitor_def *mdef)
767 {
768 reactor_cleanup_monitor(mdef);
769 rv_remove(mdef->root_d);
770 }
771
772 /**
773 * rv_register_monitor - register a rv monitor.
774 * @monitor: The rv_monitor to be registered.
775 * @parent: The parent of the monitor to be registered, NULL if not nested.
776 *
777 * Returns 0 if successful, error otherwise.
778 */
rv_register_monitor(struct rv_monitor * monitor,struct rv_monitor * parent)779 int rv_register_monitor(struct rv_monitor *monitor, struct rv_monitor *parent)
780 {
781 struct rv_monitor_def *r, *p = NULL;
782 int retval = 0;
783
784 if (strlen(monitor->name) >= MAX_RV_MONITOR_NAME_SIZE) {
785 pr_info("Monitor %s has a name longer than %d\n", monitor->name,
786 MAX_RV_MONITOR_NAME_SIZE);
787 return -EINVAL;
788 }
789
790 mutex_lock(&rv_interface_lock);
791
792 list_for_each_entry(r, &rv_monitors_list, list) {
793 if (strcmp(monitor->name, r->monitor->name) == 0) {
794 pr_info("Monitor %s is already registered\n", monitor->name);
795 retval = -EEXIST;
796 goto out_unlock;
797 }
798 }
799
800 if (parent) {
801 list_for_each_entry(r, &rv_monitors_list, list) {
802 if (strcmp(parent->name, r->monitor->name) == 0) {
803 p = r;
804 break;
805 }
806 }
807 }
808
809 if (p && rv_is_nested_monitor(p)) {
810 pr_info("Parent monitor %s is already nested, cannot nest further\n",
811 parent->name);
812 return -EINVAL;
813 }
814
815 r = kzalloc(sizeof(struct rv_monitor_def), GFP_KERNEL);
816 if (!r) {
817 retval = -ENOMEM;
818 goto out_unlock;
819 }
820
821 r->monitor = monitor;
822 r->parent = parent;
823
824 retval = create_monitor_dir(r, p);
825 if (retval) {
826 kfree(r);
827 goto out_unlock;
828 }
829
830 /* keep children close to the parent for easier visualisation */
831 if (p)
832 list_add(&r->list, &p->list);
833 else
834 list_add_tail(&r->list, &rv_monitors_list);
835
836 out_unlock:
837 mutex_unlock(&rv_interface_lock);
838 return retval;
839 }
840
841 /**
842 * rv_unregister_monitor - unregister a rv monitor.
843 * @monitor: The rv_monitor to be unregistered.
844 *
845 * Returns 0 if successful, error otherwise.
846 */
rv_unregister_monitor(struct rv_monitor * monitor)847 int rv_unregister_monitor(struct rv_monitor *monitor)
848 {
849 struct rv_monitor_def *ptr, *next;
850
851 mutex_lock(&rv_interface_lock);
852
853 list_for_each_entry_safe(ptr, next, &rv_monitors_list, list) {
854 if (strcmp(monitor->name, ptr->monitor->name) == 0) {
855 rv_disable_monitor(ptr);
856 list_del(&ptr->list);
857 destroy_monitor_dir(ptr);
858 }
859 }
860
861 mutex_unlock(&rv_interface_lock);
862 return 0;
863 }
864
rv_init_interface(void)865 int __init rv_init_interface(void)
866 {
867 struct dentry *tmp;
868 int retval;
869
870 rv_root.root_dir = rv_create_dir("rv", NULL);
871 if (!rv_root.root_dir)
872 goto out_err;
873
874 rv_root.monitors_dir = rv_create_dir("monitors", rv_root.root_dir);
875 if (!rv_root.monitors_dir)
876 goto out_err;
877
878 tmp = rv_create_file("available_monitors", RV_MODE_READ, rv_root.root_dir, NULL,
879 &available_monitors_ops);
880 if (!tmp)
881 goto out_err;
882
883 tmp = rv_create_file("enabled_monitors", RV_MODE_WRITE, rv_root.root_dir, NULL,
884 &enabled_monitors_ops);
885 if (!tmp)
886 goto out_err;
887
888 tmp = rv_create_file("monitoring_on", RV_MODE_WRITE, rv_root.root_dir, NULL,
889 &monitoring_on_fops);
890 if (!tmp)
891 goto out_err;
892 retval = init_rv_reactors(rv_root.root_dir);
893 if (retval)
894 goto out_err;
895
896 turn_monitoring_on();
897
898 return 0;
899
900 out_err:
901 rv_remove(rv_root.root_dir);
902 printk(KERN_ERR "RV: Error while creating the RV interface\n");
903 return 1;
904 }
905