1 // SPDX-License-Identifier: GPL-2.0
2 #include "audit.h"
3 #include <linux/fsnotify_backend.h>
4 #include <linux/namei.h>
5 #include <linux/mount.h>
6 #include <linux/kthread.h>
7 #include <linux/refcount.h>
8 #include <linux/slab.h>
9
10 struct audit_tree;
11 struct audit_chunk;
12
13 struct audit_tree {
14 refcount_t count;
15 int goner;
16 struct audit_chunk *root;
17 struct list_head chunks;
18 struct list_head rules;
19 struct list_head list;
20 struct list_head same_root;
21 struct rcu_head head;
22 char pathname[];
23 };
24
25 struct audit_chunk {
26 struct list_head hash;
27 unsigned long key;
28 struct fsnotify_mark *mark;
29 struct list_head trees; /* with root here */
30 int count;
31 atomic_long_t refs;
32 struct rcu_head head;
33 struct audit_node {
34 struct list_head list;
35 struct audit_tree *owner;
36 unsigned index; /* index; upper bit indicates 'will prune' */
37 } owners[] __counted_by(count);
38 };
39
40 struct audit_tree_mark {
41 struct fsnotify_mark mark;
42 struct audit_chunk *chunk;
43 };
44
45 static LIST_HEAD(tree_list);
46 static LIST_HEAD(prune_list);
47 static struct task_struct *prune_thread;
48
49 /*
50 * One struct chunk is attached to each inode of interest through
51 * audit_tree_mark (fsnotify mark). We replace struct chunk on tagging /
52 * untagging, the mark is stable as long as there is chunk attached. The
53 * association between mark and chunk is protected by hash_lock and
54 * audit_tree_group->mark_mutex. Thus as long as we hold
55 * audit_tree_group->mark_mutex and check that the mark is alive by
56 * FSNOTIFY_MARK_FLAG_ATTACHED flag check, we are sure the mark points to
57 * the current chunk.
58 *
59 * Rules have pointer to struct audit_tree.
60 * Rules have struct list_head rlist forming a list of rules over
61 * the same tree.
62 * References to struct chunk are collected at audit_inode{,_child}()
63 * time and used in AUDIT_TREE rule matching.
64 * These references are dropped at the same time we are calling
65 * audit_free_names(), etc.
66 *
67 * Cyclic lists galore:
68 * tree.chunks anchors chunk.owners[].list hash_lock
69 * tree.rules anchors rule.rlist audit_filter_mutex
70 * chunk.trees anchors tree.same_root hash_lock
71 * chunk.hash is a hash with middle bits of watch.inode as
72 * a hash function. RCU, hash_lock
73 *
74 * tree is refcounted; one reference for "some rules on rules_list refer to
75 * it", one for each chunk with pointer to it.
76 *
77 * chunk is refcounted by embedded .refs. Mark associated with the chunk holds
78 * one chunk reference. This reference is dropped either when a mark is going
79 * to be freed (corresponding inode goes away) or when chunk attached to the
80 * mark gets replaced. This reference must be dropped using
81 * audit_mark_put_chunk() to make sure the reference is dropped only after RCU
82 * grace period as it protects RCU readers of the hash table.
83 *
84 * node.index allows to get from node.list to containing chunk.
85 * MSB of that sucker is stolen to mark taggings that we might have to
86 * revert - several operations have very unpleasant cleanup logics and
87 * that makes a difference. Some.
88 */
89
90 static struct fsnotify_group *audit_tree_group __ro_after_init;
91 static struct kmem_cache *audit_tree_mark_cachep __ro_after_init;
92
alloc_tree(const char * s)93 static struct audit_tree *alloc_tree(const char *s)
94 {
95 struct audit_tree *tree;
96
97 tree = kmalloc(struct_size(tree, pathname, strlen(s) + 1), GFP_KERNEL);
98 if (tree) {
99 refcount_set(&tree->count, 1);
100 tree->goner = 0;
101 INIT_LIST_HEAD(&tree->chunks);
102 INIT_LIST_HEAD(&tree->rules);
103 INIT_LIST_HEAD(&tree->list);
104 INIT_LIST_HEAD(&tree->same_root);
105 tree->root = NULL;
106 strcpy(tree->pathname, s);
107 }
108 return tree;
109 }
110
get_tree(struct audit_tree * tree)111 static inline void get_tree(struct audit_tree *tree)
112 {
113 refcount_inc(&tree->count);
114 }
115
put_tree(struct audit_tree * tree)116 static inline void put_tree(struct audit_tree *tree)
117 {
118 if (refcount_dec_and_test(&tree->count))
119 kfree_rcu(tree, head);
120 }
121
122 /* to avoid bringing the entire thing in audit.h */
audit_tree_path(struct audit_tree * tree)123 const char *audit_tree_path(struct audit_tree *tree)
124 {
125 return tree->pathname;
126 }
127
free_chunk(struct audit_chunk * chunk)128 static void free_chunk(struct audit_chunk *chunk)
129 {
130 int i;
131
132 for (i = 0; i < chunk->count; i++) {
133 if (chunk->owners[i].owner)
134 put_tree(chunk->owners[i].owner);
135 }
136 kfree(chunk);
137 }
138
audit_put_chunk(struct audit_chunk * chunk)139 void audit_put_chunk(struct audit_chunk *chunk)
140 {
141 if (atomic_long_dec_and_test(&chunk->refs))
142 free_chunk(chunk);
143 }
144
__put_chunk(struct rcu_head * rcu)145 static void __put_chunk(struct rcu_head *rcu)
146 {
147 struct audit_chunk *chunk = container_of(rcu, struct audit_chunk, head);
148 audit_put_chunk(chunk);
149 }
150
151 /*
152 * Drop reference to the chunk that was held by the mark. This is the reference
153 * that gets dropped after we've removed the chunk from the hash table and we
154 * use it to make sure chunk cannot be freed before RCU grace period expires.
155 */
audit_mark_put_chunk(struct audit_chunk * chunk)156 static void audit_mark_put_chunk(struct audit_chunk *chunk)
157 {
158 call_rcu(&chunk->head, __put_chunk);
159 }
160
audit_mark(struct fsnotify_mark * mark)161 static inline struct audit_tree_mark *audit_mark(struct fsnotify_mark *mark)
162 {
163 return container_of(mark, struct audit_tree_mark, mark);
164 }
165
mark_chunk(struct fsnotify_mark * mark)166 static struct audit_chunk *mark_chunk(struct fsnotify_mark *mark)
167 {
168 return audit_mark(mark)->chunk;
169 }
170
audit_tree_destroy_watch(struct fsnotify_mark * mark)171 static void audit_tree_destroy_watch(struct fsnotify_mark *mark)
172 {
173 kmem_cache_free(audit_tree_mark_cachep, audit_mark(mark));
174 }
175
alloc_mark(void)176 static struct fsnotify_mark *alloc_mark(void)
177 {
178 struct audit_tree_mark *amark;
179
180 amark = kmem_cache_zalloc(audit_tree_mark_cachep, GFP_KERNEL);
181 if (!amark)
182 return NULL;
183 fsnotify_init_mark(&amark->mark, audit_tree_group);
184 amark->mark.mask = FS_IN_IGNORED;
185 return &amark->mark;
186 }
187
alloc_chunk(int count)188 static struct audit_chunk *alloc_chunk(int count)
189 {
190 struct audit_chunk *chunk;
191 int i;
192
193 chunk = kzalloc(struct_size(chunk, owners, count), GFP_KERNEL);
194 if (!chunk)
195 return NULL;
196
197 INIT_LIST_HEAD(&chunk->hash);
198 INIT_LIST_HEAD(&chunk->trees);
199 chunk->count = count;
200 atomic_long_set(&chunk->refs, 1);
201 for (i = 0; i < count; i++) {
202 INIT_LIST_HEAD(&chunk->owners[i].list);
203 chunk->owners[i].index = i;
204 }
205 return chunk;
206 }
207
208 enum {HASH_SIZE = 128};
209 static struct list_head chunk_hash_heads[HASH_SIZE];
210 static __cacheline_aligned_in_smp DEFINE_SPINLOCK(hash_lock);
211
212 /* Function to return search key in our hash from inode. */
inode_to_key(const struct inode * inode)213 static unsigned long inode_to_key(const struct inode *inode)
214 {
215 /* Use address pointed to by connector->obj as the key */
216 return (unsigned long)&inode->i_fsnotify_marks;
217 }
218
chunk_hash(unsigned long key)219 static inline struct list_head *chunk_hash(unsigned long key)
220 {
221 unsigned long n = key / L1_CACHE_BYTES;
222 return chunk_hash_heads + n % HASH_SIZE;
223 }
224
225 /* hash_lock & mark->group->mark_mutex is held by caller */
insert_hash(struct audit_chunk * chunk)226 static void insert_hash(struct audit_chunk *chunk)
227 {
228 struct list_head *list;
229
230 /*
231 * Make sure chunk is fully initialized before making it visible in the
232 * hash. Pairs with a data dependency barrier in READ_ONCE() in
233 * audit_tree_lookup().
234 */
235 smp_wmb();
236 WARN_ON_ONCE(!chunk->key);
237 list = chunk_hash(chunk->key);
238 list_add_rcu(&chunk->hash, list);
239 }
240
241 /* called under rcu_read_lock */
audit_tree_lookup(const struct inode * inode)242 struct audit_chunk *audit_tree_lookup(const struct inode *inode)
243 {
244 unsigned long key = inode_to_key(inode);
245 struct list_head *list = chunk_hash(key);
246 struct audit_chunk *p;
247
248 list_for_each_entry_rcu(p, list, hash) {
249 /*
250 * We use a data dependency barrier in READ_ONCE() to make sure
251 * the chunk we see is fully initialized.
252 */
253 if (READ_ONCE(p->key) == key) {
254 atomic_long_inc(&p->refs);
255 return p;
256 }
257 }
258 return NULL;
259 }
260
audit_tree_match(struct audit_chunk * chunk,struct audit_tree * tree)261 bool audit_tree_match(struct audit_chunk *chunk, struct audit_tree *tree)
262 {
263 int n;
264 for (n = 0; n < chunk->count; n++)
265 if (chunk->owners[n].owner == tree)
266 return true;
267 return false;
268 }
269
270 /* tagging and untagging inodes with trees */
271
find_chunk(struct audit_node * p)272 static struct audit_chunk *find_chunk(struct audit_node *p)
273 {
274 int index = p->index & ~(1U<<31);
275 p -= index;
276 return container_of(p, struct audit_chunk, owners[0]);
277 }
278
replace_mark_chunk(struct fsnotify_mark * mark,struct audit_chunk * chunk)279 static void replace_mark_chunk(struct fsnotify_mark *mark,
280 struct audit_chunk *chunk)
281 {
282 struct audit_chunk *old;
283
284 assert_spin_locked(&hash_lock);
285 old = mark_chunk(mark);
286 audit_mark(mark)->chunk = chunk;
287 if (chunk)
288 chunk->mark = mark;
289 if (old)
290 old->mark = NULL;
291 }
292
replace_chunk(struct audit_chunk * new,struct audit_chunk * old)293 static void replace_chunk(struct audit_chunk *new, struct audit_chunk *old)
294 {
295 struct audit_tree *owner;
296 int i, j;
297
298 new->key = old->key;
299 list_splice_init(&old->trees, &new->trees);
300 list_for_each_entry(owner, &new->trees, same_root)
301 owner->root = new;
302 for (i = j = 0; j < old->count; i++, j++) {
303 if (!old->owners[j].owner) {
304 i--;
305 continue;
306 }
307 owner = old->owners[j].owner;
308 new->owners[i].owner = owner;
309 new->owners[i].index = old->owners[j].index - j + i;
310 if (!owner) /* result of earlier fallback */
311 continue;
312 get_tree(owner);
313 list_replace_init(&old->owners[j].list, &new->owners[i].list);
314 }
315 replace_mark_chunk(old->mark, new);
316 /*
317 * Make sure chunk is fully initialized before making it visible in the
318 * hash. Pairs with a data dependency barrier in READ_ONCE() in
319 * audit_tree_lookup().
320 */
321 smp_wmb();
322 list_replace_rcu(&old->hash, &new->hash);
323 }
324
remove_chunk_node(struct audit_chunk * chunk,struct audit_node * p)325 static void remove_chunk_node(struct audit_chunk *chunk, struct audit_node *p)
326 {
327 struct audit_tree *owner = p->owner;
328
329 if (owner->root == chunk) {
330 list_del_init(&owner->same_root);
331 owner->root = NULL;
332 }
333 list_del_init(&p->list);
334 p->owner = NULL;
335 put_tree(owner);
336 }
337
chunk_count_trees(struct audit_chunk * chunk)338 static int chunk_count_trees(struct audit_chunk *chunk)
339 {
340 int i;
341 int ret = 0;
342
343 for (i = 0; i < chunk->count; i++)
344 if (chunk->owners[i].owner)
345 ret++;
346 return ret;
347 }
348
untag_chunk(struct audit_chunk * chunk,struct fsnotify_mark * mark)349 static void untag_chunk(struct audit_chunk *chunk, struct fsnotify_mark *mark)
350 {
351 struct audit_chunk *new;
352 int size;
353
354 fsnotify_group_lock(audit_tree_group);
355 /*
356 * mark_mutex stabilizes chunk attached to the mark so we can check
357 * whether it didn't change while we've dropped hash_lock.
358 */
359 if (!(mark->flags & FSNOTIFY_MARK_FLAG_ATTACHED) ||
360 mark_chunk(mark) != chunk)
361 goto out_mutex;
362
363 size = chunk_count_trees(chunk);
364 if (!size) {
365 spin_lock(&hash_lock);
366 list_del_init(&chunk->trees);
367 list_del_rcu(&chunk->hash);
368 replace_mark_chunk(mark, NULL);
369 spin_unlock(&hash_lock);
370 fsnotify_detach_mark(mark);
371 fsnotify_group_unlock(audit_tree_group);
372 audit_mark_put_chunk(chunk);
373 fsnotify_free_mark(mark);
374 return;
375 }
376
377 new = alloc_chunk(size);
378 if (!new)
379 goto out_mutex;
380
381 spin_lock(&hash_lock);
382 /*
383 * This has to go last when updating chunk as once replace_chunk() is
384 * called, new RCU readers can see the new chunk.
385 */
386 replace_chunk(new, chunk);
387 spin_unlock(&hash_lock);
388 fsnotify_group_unlock(audit_tree_group);
389 audit_mark_put_chunk(chunk);
390 return;
391
392 out_mutex:
393 fsnotify_group_unlock(audit_tree_group);
394 }
395
396 /* Call with group->mark_mutex held, releases it */
create_chunk(struct inode * inode,struct audit_tree * tree)397 static int create_chunk(struct inode *inode, struct audit_tree *tree)
398 {
399 struct fsnotify_mark *mark;
400 struct audit_chunk *chunk = alloc_chunk(1);
401
402 if (!chunk) {
403 fsnotify_group_unlock(audit_tree_group);
404 return -ENOMEM;
405 }
406
407 mark = alloc_mark();
408 if (!mark) {
409 fsnotify_group_unlock(audit_tree_group);
410 kfree(chunk);
411 return -ENOMEM;
412 }
413
414 if (fsnotify_add_inode_mark_locked(mark, inode, 0)) {
415 fsnotify_group_unlock(audit_tree_group);
416 fsnotify_put_mark(mark);
417 kfree(chunk);
418 return -ENOSPC;
419 }
420
421 spin_lock(&hash_lock);
422 if (tree->goner) {
423 spin_unlock(&hash_lock);
424 fsnotify_detach_mark(mark);
425 fsnotify_group_unlock(audit_tree_group);
426 fsnotify_free_mark(mark);
427 fsnotify_put_mark(mark);
428 kfree(chunk);
429 return 0;
430 }
431 replace_mark_chunk(mark, chunk);
432 chunk->owners[0].index = (1U << 31);
433 chunk->owners[0].owner = tree;
434 get_tree(tree);
435 list_add(&chunk->owners[0].list, &tree->chunks);
436 if (!tree->root) {
437 tree->root = chunk;
438 list_add(&tree->same_root, &chunk->trees);
439 }
440 chunk->key = inode_to_key(inode);
441 /*
442 * Inserting into the hash table has to go last as once we do that RCU
443 * readers can see the chunk.
444 */
445 insert_hash(chunk);
446 spin_unlock(&hash_lock);
447 fsnotify_group_unlock(audit_tree_group);
448 /*
449 * Drop our initial reference. When mark we point to is getting freed,
450 * we get notification through ->freeing_mark callback and cleanup
451 * chunk pointing to this mark.
452 */
453 fsnotify_put_mark(mark);
454 return 0;
455 }
456
457 /* the first tagged inode becomes root of tree */
tag_chunk(struct inode * inode,struct audit_tree * tree)458 static int tag_chunk(struct inode *inode, struct audit_tree *tree)
459 {
460 struct fsnotify_mark *mark;
461 struct audit_chunk *chunk, *old;
462 struct audit_node *p;
463 int n;
464
465 fsnotify_group_lock(audit_tree_group);
466 mark = fsnotify_find_inode_mark(inode, audit_tree_group);
467 if (!mark)
468 return create_chunk(inode, tree);
469
470 /*
471 * Found mark is guaranteed to be attached and mark_mutex protects mark
472 * from getting detached and thus it makes sure there is chunk attached
473 * to the mark.
474 */
475 /* are we already there? */
476 spin_lock(&hash_lock);
477 old = mark_chunk(mark);
478 for (n = 0; n < old->count; n++) {
479 if (old->owners[n].owner == tree) {
480 spin_unlock(&hash_lock);
481 fsnotify_group_unlock(audit_tree_group);
482 fsnotify_put_mark(mark);
483 return 0;
484 }
485 }
486 spin_unlock(&hash_lock);
487
488 chunk = alloc_chunk(old->count + 1);
489 if (!chunk) {
490 fsnotify_group_unlock(audit_tree_group);
491 fsnotify_put_mark(mark);
492 return -ENOMEM;
493 }
494
495 spin_lock(&hash_lock);
496 if (tree->goner) {
497 spin_unlock(&hash_lock);
498 fsnotify_group_unlock(audit_tree_group);
499 fsnotify_put_mark(mark);
500 kfree(chunk);
501 return 0;
502 }
503 p = &chunk->owners[chunk->count - 1];
504 p->index = (chunk->count - 1) | (1U<<31);
505 p->owner = tree;
506 get_tree(tree);
507 list_add(&p->list, &tree->chunks);
508 if (!tree->root) {
509 tree->root = chunk;
510 list_add(&tree->same_root, &chunk->trees);
511 }
512 /*
513 * This has to go last when updating chunk as once replace_chunk() is
514 * called, new RCU readers can see the new chunk.
515 */
516 replace_chunk(chunk, old);
517 spin_unlock(&hash_lock);
518 fsnotify_group_unlock(audit_tree_group);
519 fsnotify_put_mark(mark); /* pair to fsnotify_find_mark */
520 audit_mark_put_chunk(old);
521
522 return 0;
523 }
524
audit_tree_log_remove_rule(struct audit_context * context,struct audit_krule * rule)525 static void audit_tree_log_remove_rule(struct audit_context *context,
526 struct audit_krule *rule)
527 {
528 struct audit_buffer *ab;
529
530 if (!audit_enabled)
531 return;
532 ab = audit_log_start(context, GFP_KERNEL, AUDIT_CONFIG_CHANGE);
533 if (unlikely(!ab))
534 return;
535 audit_log_format(ab, "op=remove_rule dir=");
536 audit_log_untrustedstring(ab, rule->tree->pathname);
537 audit_log_key(ab, rule->filterkey);
538 audit_log_format(ab, " list=%d res=1", rule->listnr);
539 audit_log_end(ab);
540 }
541
kill_rules(struct audit_context * context,struct audit_tree * tree)542 static void kill_rules(struct audit_context *context, struct audit_tree *tree)
543 {
544 struct audit_krule *rule, *next;
545 struct audit_entry *entry;
546
547 list_for_each_entry_safe(rule, next, &tree->rules, rlist) {
548 entry = container_of(rule, struct audit_entry, rule);
549
550 list_del_init(&rule->rlist);
551 if (rule->tree) {
552 /* not a half-baked one */
553 audit_tree_log_remove_rule(context, rule);
554 if (entry->rule.exe)
555 audit_remove_mark(entry->rule.exe);
556 rule->tree = NULL;
557 list_del_rcu(&entry->list);
558 list_del(&entry->rule.list);
559 call_rcu(&entry->rcu, audit_free_rule_rcu);
560 }
561 }
562 }
563
564 /*
565 * Remove tree from chunks. If 'tagged' is set, remove tree only from tagged
566 * chunks. The function expects tagged chunks are all at the beginning of the
567 * chunks list.
568 */
prune_tree_chunks(struct audit_tree * victim,bool tagged)569 static void prune_tree_chunks(struct audit_tree *victim, bool tagged)
570 {
571 spin_lock(&hash_lock);
572 while (!list_empty(&victim->chunks)) {
573 struct audit_node *p;
574 struct audit_chunk *chunk;
575 struct fsnotify_mark *mark;
576
577 p = list_first_entry(&victim->chunks, struct audit_node, list);
578 /* have we run out of marked? */
579 if (tagged && !(p->index & (1U<<31)))
580 break;
581 chunk = find_chunk(p);
582 mark = chunk->mark;
583 remove_chunk_node(chunk, p);
584 /* Racing with audit_tree_freeing_mark()? */
585 if (!mark)
586 continue;
587 fsnotify_get_mark(mark);
588 spin_unlock(&hash_lock);
589
590 untag_chunk(chunk, mark);
591 fsnotify_put_mark(mark);
592
593 spin_lock(&hash_lock);
594 }
595 spin_unlock(&hash_lock);
596 }
597
598 /*
599 * finish killing struct audit_tree
600 */
prune_one(struct audit_tree * victim)601 static void prune_one(struct audit_tree *victim)
602 {
603 prune_tree_chunks(victim, false);
604 put_tree(victim);
605 }
606
607 /* trim the uncommitted chunks from tree */
608
trim_marked(struct audit_tree * tree)609 static void trim_marked(struct audit_tree *tree)
610 {
611 struct list_head *p, *q;
612 spin_lock(&hash_lock);
613 if (tree->goner) {
614 spin_unlock(&hash_lock);
615 return;
616 }
617 /* reorder */
618 for (p = tree->chunks.next; p != &tree->chunks; p = q) {
619 struct audit_node *node = list_entry(p, struct audit_node, list);
620 q = p->next;
621 if (node->index & (1U<<31)) {
622 list_del_init(p);
623 list_add(p, &tree->chunks);
624 }
625 }
626 spin_unlock(&hash_lock);
627
628 prune_tree_chunks(tree, true);
629
630 spin_lock(&hash_lock);
631 if (!tree->root && !tree->goner) {
632 tree->goner = 1;
633 spin_unlock(&hash_lock);
634 mutex_lock(&audit_filter_mutex);
635 kill_rules(audit_context(), tree);
636 list_del_init(&tree->list);
637 mutex_unlock(&audit_filter_mutex);
638 prune_one(tree);
639 } else {
640 spin_unlock(&hash_lock);
641 }
642 }
643
644 static void audit_schedule_prune(void);
645
646 /* called with audit_filter_mutex */
audit_remove_tree_rule(struct audit_krule * rule)647 int audit_remove_tree_rule(struct audit_krule *rule)
648 {
649 struct audit_tree *tree;
650 tree = rule->tree;
651 if (tree) {
652 spin_lock(&hash_lock);
653 list_del_init(&rule->rlist);
654 if (list_empty(&tree->rules) && !tree->goner) {
655 tree->root = NULL;
656 list_del_init(&tree->same_root);
657 tree->goner = 1;
658 list_move(&tree->list, &prune_list);
659 rule->tree = NULL;
660 spin_unlock(&hash_lock);
661 audit_schedule_prune();
662 return 1;
663 }
664 rule->tree = NULL;
665 spin_unlock(&hash_lock);
666 return 1;
667 }
668 return 0;
669 }
670
audit_trim_trees(void)671 void audit_trim_trees(void)
672 {
673 struct list_head cursor;
674
675 mutex_lock(&audit_filter_mutex);
676 list_add(&cursor, &tree_list);
677 while (cursor.next != &tree_list) {
678 struct audit_tree *tree;
679 struct path path;
680 struct audit_node *node;
681 struct path *paths;
682 struct path array[16];
683 int err;
684
685 tree = container_of(cursor.next, struct audit_tree, list);
686 get_tree(tree);
687 list_move(&cursor, &tree->list);
688 mutex_unlock(&audit_filter_mutex);
689
690 err = kern_path(tree->pathname, 0, &path);
691 if (err)
692 goto skip_it;
693
694 paths = collect_paths(&path, array, 16);
695 path_put(&path);
696 if (IS_ERR(paths))
697 goto skip_it;
698
699 spin_lock(&hash_lock);
700 list_for_each_entry(node, &tree->chunks, list) {
701 struct audit_chunk *chunk = find_chunk(node);
702 /* this could be NULL if the watch is dying else where... */
703 node->index |= 1U<<31;
704 for (struct path *p = paths; p->dentry; p++) {
705 struct inode *inode = p->dentry->d_inode;
706 if (inode_to_key(inode) == chunk->key) {
707 node->index &= ~(1U<<31);
708 break;
709 }
710 }
711 }
712 spin_unlock(&hash_lock);
713 trim_marked(tree);
714 drop_collected_paths(paths, array);
715 skip_it:
716 put_tree(tree);
717 mutex_lock(&audit_filter_mutex);
718 }
719 list_del(&cursor);
720 mutex_unlock(&audit_filter_mutex);
721 }
722
audit_make_tree(struct audit_krule * rule,char * pathname,u32 op)723 int audit_make_tree(struct audit_krule *rule, char *pathname, u32 op)
724 {
725
726 if (pathname[0] != '/' ||
727 (rule->listnr != AUDIT_FILTER_EXIT &&
728 rule->listnr != AUDIT_FILTER_URING_EXIT) ||
729 op != Audit_equal ||
730 rule->inode_f || rule->watch || rule->tree)
731 return -EINVAL;
732 rule->tree = alloc_tree(pathname);
733 if (!rule->tree)
734 return -ENOMEM;
735 return 0;
736 }
737
audit_put_tree(struct audit_tree * tree)738 void audit_put_tree(struct audit_tree *tree)
739 {
740 put_tree(tree);
741 }
742
tag_mounts(struct path * paths,struct audit_tree * tree)743 static int tag_mounts(struct path *paths, struct audit_tree *tree)
744 {
745 for (struct path *p = paths; p->dentry; p++) {
746 int err = tag_chunk(p->dentry->d_inode, tree);
747 if (err)
748 return err;
749 }
750 return 0;
751 }
752
753 /*
754 * That gets run when evict_chunk() ends up needing to kill audit_tree.
755 * Runs from a separate thread.
756 */
prune_tree_thread(void * unused)757 static int prune_tree_thread(void *unused)
758 {
759 for (;;) {
760 if (list_empty(&prune_list)) {
761 set_current_state(TASK_INTERRUPTIBLE);
762 schedule();
763 }
764
765 audit_ctl_lock();
766 mutex_lock(&audit_filter_mutex);
767
768 while (!list_empty(&prune_list)) {
769 struct audit_tree *victim;
770
771 victim = list_entry(prune_list.next,
772 struct audit_tree, list);
773 list_del_init(&victim->list);
774
775 mutex_unlock(&audit_filter_mutex);
776
777 prune_one(victim);
778
779 mutex_lock(&audit_filter_mutex);
780 }
781
782 mutex_unlock(&audit_filter_mutex);
783 audit_ctl_unlock();
784 }
785 return 0;
786 }
787
audit_launch_prune(void)788 static int audit_launch_prune(void)
789 {
790 if (prune_thread)
791 return 0;
792 prune_thread = kthread_run(prune_tree_thread, NULL,
793 "audit_prune_tree");
794 if (IS_ERR(prune_thread)) {
795 pr_err("cannot start thread audit_prune_tree");
796 prune_thread = NULL;
797 return -ENOMEM;
798 }
799 return 0;
800 }
801
802 /* called with audit_filter_mutex */
audit_add_tree_rule(struct audit_krule * rule)803 int audit_add_tree_rule(struct audit_krule *rule)
804 {
805 struct audit_tree *seed = rule->tree, *tree;
806 struct path path;
807 struct path array[16];
808 struct path *paths;
809 int err;
810
811 rule->tree = NULL;
812 list_for_each_entry(tree, &tree_list, list) {
813 if (!strcmp(seed->pathname, tree->pathname)) {
814 put_tree(seed);
815 rule->tree = tree;
816 list_add(&rule->rlist, &tree->rules);
817 return 0;
818 }
819 }
820 tree = seed;
821 list_add(&tree->list, &tree_list);
822 list_add(&rule->rlist, &tree->rules);
823 /* do not set rule->tree yet */
824 mutex_unlock(&audit_filter_mutex);
825
826 if (unlikely(!prune_thread)) {
827 err = audit_launch_prune();
828 if (err)
829 goto Err;
830 }
831
832 err = kern_path(tree->pathname, 0, &path);
833 if (err)
834 goto Err;
835 paths = collect_paths(&path, array, 16);
836 path_put(&path);
837 if (IS_ERR(paths)) {
838 err = PTR_ERR(paths);
839 goto Err;
840 }
841
842 get_tree(tree);
843 err = tag_mounts(paths, tree);
844 drop_collected_paths(paths, array);
845
846 if (!err) {
847 struct audit_node *node;
848 spin_lock(&hash_lock);
849 list_for_each_entry(node, &tree->chunks, list)
850 node->index &= ~(1U<<31);
851 spin_unlock(&hash_lock);
852 } else {
853 trim_marked(tree);
854 goto Err;
855 }
856
857 mutex_lock(&audit_filter_mutex);
858 if (list_empty(&rule->rlist)) {
859 put_tree(tree);
860 return -ENOENT;
861 }
862 rule->tree = tree;
863 put_tree(tree);
864
865 return 0;
866 Err:
867 mutex_lock(&audit_filter_mutex);
868 list_del_init(&tree->list);
869 list_del_init(&tree->rules);
870 put_tree(tree);
871 return err;
872 }
873
audit_tag_tree(char * old,char * new)874 int audit_tag_tree(char *old, char *new)
875 {
876 struct list_head cursor, barrier;
877 int failed = 0;
878 struct path path1, path2;
879 struct path array[16];
880 struct path *paths;
881 int err;
882
883 err = kern_path(new, 0, &path2);
884 if (err)
885 return err;
886 paths = collect_paths(&path2, array, 16);
887 path_put(&path2);
888 if (IS_ERR(paths))
889 return PTR_ERR(paths);
890
891 err = kern_path(old, 0, &path1);
892 if (err) {
893 drop_collected_paths(paths, array);
894 return err;
895 }
896
897 mutex_lock(&audit_filter_mutex);
898 list_add(&barrier, &tree_list);
899 list_add(&cursor, &barrier);
900
901 while (cursor.next != &tree_list) {
902 struct audit_tree *tree;
903 int good_one = 0;
904
905 tree = container_of(cursor.next, struct audit_tree, list);
906 get_tree(tree);
907 list_move(&cursor, &tree->list);
908 mutex_unlock(&audit_filter_mutex);
909
910 err = kern_path(tree->pathname, 0, &path2);
911 if (!err) {
912 good_one = path_is_under(&path1, &path2);
913 path_put(&path2);
914 }
915
916 if (!good_one) {
917 put_tree(tree);
918 mutex_lock(&audit_filter_mutex);
919 continue;
920 }
921
922 failed = tag_mounts(paths, tree);
923 if (failed) {
924 put_tree(tree);
925 mutex_lock(&audit_filter_mutex);
926 break;
927 }
928
929 mutex_lock(&audit_filter_mutex);
930 spin_lock(&hash_lock);
931 if (!tree->goner) {
932 list_move(&tree->list, &tree_list);
933 }
934 spin_unlock(&hash_lock);
935 put_tree(tree);
936 }
937
938 while (barrier.prev != &tree_list) {
939 struct audit_tree *tree;
940
941 tree = container_of(barrier.prev, struct audit_tree, list);
942 get_tree(tree);
943 list_move(&tree->list, &barrier);
944 mutex_unlock(&audit_filter_mutex);
945
946 if (!failed) {
947 struct audit_node *node;
948 spin_lock(&hash_lock);
949 list_for_each_entry(node, &tree->chunks, list)
950 node->index &= ~(1U<<31);
951 spin_unlock(&hash_lock);
952 } else {
953 trim_marked(tree);
954 }
955
956 put_tree(tree);
957 mutex_lock(&audit_filter_mutex);
958 }
959 list_del(&barrier);
960 list_del(&cursor);
961 mutex_unlock(&audit_filter_mutex);
962 path_put(&path1);
963 drop_collected_paths(paths, array);
964 return failed;
965 }
966
967
audit_schedule_prune(void)968 static void audit_schedule_prune(void)
969 {
970 wake_up_process(prune_thread);
971 }
972
973 /*
974 * ... and that one is done if evict_chunk() decides to delay until the end
975 * of syscall. Runs synchronously.
976 */
audit_kill_trees(struct audit_context * context)977 void audit_kill_trees(struct audit_context *context)
978 {
979 struct list_head *list = &context->killed_trees;
980
981 audit_ctl_lock();
982 mutex_lock(&audit_filter_mutex);
983
984 while (!list_empty(list)) {
985 struct audit_tree *victim;
986
987 victim = list_entry(list->next, struct audit_tree, list);
988 kill_rules(context, victim);
989 list_del_init(&victim->list);
990
991 mutex_unlock(&audit_filter_mutex);
992
993 prune_one(victim);
994
995 mutex_lock(&audit_filter_mutex);
996 }
997
998 mutex_unlock(&audit_filter_mutex);
999 audit_ctl_unlock();
1000 }
1001
1002 /*
1003 * Here comes the stuff asynchronous to auditctl operations
1004 */
1005
evict_chunk(struct audit_chunk * chunk)1006 static void evict_chunk(struct audit_chunk *chunk)
1007 {
1008 struct audit_tree *owner;
1009 struct list_head *postponed = audit_killed_trees();
1010 int need_prune = 0;
1011 int n;
1012
1013 mutex_lock(&audit_filter_mutex);
1014 spin_lock(&hash_lock);
1015 while (!list_empty(&chunk->trees)) {
1016 owner = list_entry(chunk->trees.next,
1017 struct audit_tree, same_root);
1018 owner->goner = 1;
1019 owner->root = NULL;
1020 list_del_init(&owner->same_root);
1021 spin_unlock(&hash_lock);
1022 if (!postponed) {
1023 kill_rules(audit_context(), owner);
1024 list_move(&owner->list, &prune_list);
1025 need_prune = 1;
1026 } else {
1027 list_move(&owner->list, postponed);
1028 }
1029 spin_lock(&hash_lock);
1030 }
1031 list_del_rcu(&chunk->hash);
1032 for (n = 0; n < chunk->count; n++)
1033 list_del_init(&chunk->owners[n].list);
1034 spin_unlock(&hash_lock);
1035 mutex_unlock(&audit_filter_mutex);
1036 if (need_prune)
1037 audit_schedule_prune();
1038 }
1039
audit_tree_handle_event(struct fsnotify_mark * mark,u32 mask,struct inode * inode,struct inode * dir,const struct qstr * file_name,u32 cookie)1040 static int audit_tree_handle_event(struct fsnotify_mark *mark, u32 mask,
1041 struct inode *inode, struct inode *dir,
1042 const struct qstr *file_name, u32 cookie)
1043 {
1044 return 0;
1045 }
1046
audit_tree_freeing_mark(struct fsnotify_mark * mark,struct fsnotify_group * group)1047 static void audit_tree_freeing_mark(struct fsnotify_mark *mark,
1048 struct fsnotify_group *group)
1049 {
1050 struct audit_chunk *chunk;
1051
1052 fsnotify_group_lock(mark->group);
1053 spin_lock(&hash_lock);
1054 chunk = mark_chunk(mark);
1055 replace_mark_chunk(mark, NULL);
1056 spin_unlock(&hash_lock);
1057 fsnotify_group_unlock(mark->group);
1058 if (chunk) {
1059 evict_chunk(chunk);
1060 audit_mark_put_chunk(chunk);
1061 }
1062
1063 /*
1064 * We are guaranteed to have at least one reference to the mark from
1065 * either the inode or the caller of fsnotify_destroy_mark().
1066 */
1067 BUG_ON(refcount_read(&mark->refcnt) < 1);
1068 }
1069
1070 static const struct fsnotify_ops audit_tree_ops = {
1071 .handle_inode_event = audit_tree_handle_event,
1072 .freeing_mark = audit_tree_freeing_mark,
1073 .free_mark = audit_tree_destroy_watch,
1074 };
1075
audit_tree_init(void)1076 static int __init audit_tree_init(void)
1077 {
1078 int i;
1079
1080 audit_tree_mark_cachep = KMEM_CACHE(audit_tree_mark, SLAB_PANIC);
1081
1082 audit_tree_group = fsnotify_alloc_group(&audit_tree_ops, 0);
1083 if (IS_ERR(audit_tree_group))
1084 audit_panic("cannot initialize fsnotify group for rectree watches");
1085
1086 for (i = 0; i < HASH_SIZE; i++)
1087 INIT_LIST_HEAD(&chunk_hash_heads[i]);
1088
1089 return 0;
1090 }
1091 __initcall(audit_tree_init);
1092