xref: /linux/kernel/audit_tree.c (revision ca55b2fef3a9373fcfc30f82fd26bc7fccbda732)
1 #include "audit.h"
2 #include <linux/fsnotify_backend.h>
3 #include <linux/namei.h>
4 #include <linux/mount.h>
5 #include <linux/kthread.h>
6 #include <linux/slab.h>
7 
8 struct audit_tree;
9 struct audit_chunk;
10 
11 struct audit_tree {
12 	atomic_t count;
13 	int goner;
14 	struct audit_chunk *root;
15 	struct list_head chunks;
16 	struct list_head rules;
17 	struct list_head list;
18 	struct list_head same_root;
19 	struct rcu_head head;
20 	char pathname[];
21 };
22 
23 struct audit_chunk {
24 	struct list_head hash;
25 	struct fsnotify_mark mark;
26 	struct list_head trees;		/* with root here */
27 	int dead;
28 	int count;
29 	atomic_long_t refs;
30 	struct rcu_head head;
31 	struct node {
32 		struct list_head list;
33 		struct audit_tree *owner;
34 		unsigned index;		/* index; upper bit indicates 'will prune' */
35 	} owners[];
36 };
37 
38 static LIST_HEAD(tree_list);
39 static LIST_HEAD(prune_list);
40 static struct task_struct *prune_thread;
41 
42 /*
43  * One struct chunk is attached to each inode of interest.
44  * We replace struct chunk on tagging/untagging.
45  * Rules have pointer to struct audit_tree.
46  * Rules have struct list_head rlist forming a list of rules over
47  * the same tree.
48  * References to struct chunk are collected at audit_inode{,_child}()
49  * time and used in AUDIT_TREE rule matching.
50  * These references are dropped at the same time we are calling
51  * audit_free_names(), etc.
52  *
53  * Cyclic lists galore:
54  * tree.chunks anchors chunk.owners[].list			hash_lock
55  * tree.rules anchors rule.rlist				audit_filter_mutex
56  * chunk.trees anchors tree.same_root				hash_lock
57  * chunk.hash is a hash with middle bits of watch.inode as
58  * a hash function.						RCU, hash_lock
59  *
60  * tree is refcounted; one reference for "some rules on rules_list refer to
61  * it", one for each chunk with pointer to it.
62  *
63  * chunk is refcounted by embedded fsnotify_mark + .refs (non-zero refcount
64  * of watch contributes 1 to .refs).
65  *
66  * node.index allows to get from node.list to containing chunk.
67  * MSB of that sucker is stolen to mark taggings that we might have to
68  * revert - several operations have very unpleasant cleanup logics and
69  * that makes a difference.  Some.
70  */
71 
72 static struct fsnotify_group *audit_tree_group;
73 
74 static struct audit_tree *alloc_tree(const char *s)
75 {
76 	struct audit_tree *tree;
77 
78 	tree = kmalloc(sizeof(struct audit_tree) + strlen(s) + 1, GFP_KERNEL);
79 	if (tree) {
80 		atomic_set(&tree->count, 1);
81 		tree->goner = 0;
82 		INIT_LIST_HEAD(&tree->chunks);
83 		INIT_LIST_HEAD(&tree->rules);
84 		INIT_LIST_HEAD(&tree->list);
85 		INIT_LIST_HEAD(&tree->same_root);
86 		tree->root = NULL;
87 		strcpy(tree->pathname, s);
88 	}
89 	return tree;
90 }
91 
92 static inline void get_tree(struct audit_tree *tree)
93 {
94 	atomic_inc(&tree->count);
95 }
96 
97 static inline void put_tree(struct audit_tree *tree)
98 {
99 	if (atomic_dec_and_test(&tree->count))
100 		kfree_rcu(tree, head);
101 }
102 
103 /* to avoid bringing the entire thing in audit.h */
104 const char *audit_tree_path(struct audit_tree *tree)
105 {
106 	return tree->pathname;
107 }
108 
109 static void free_chunk(struct audit_chunk *chunk)
110 {
111 	int i;
112 
113 	for (i = 0; i < chunk->count; i++) {
114 		if (chunk->owners[i].owner)
115 			put_tree(chunk->owners[i].owner);
116 	}
117 	kfree(chunk);
118 }
119 
120 void audit_put_chunk(struct audit_chunk *chunk)
121 {
122 	if (atomic_long_dec_and_test(&chunk->refs))
123 		free_chunk(chunk);
124 }
125 
126 static void __put_chunk(struct rcu_head *rcu)
127 {
128 	struct audit_chunk *chunk = container_of(rcu, struct audit_chunk, head);
129 	audit_put_chunk(chunk);
130 }
131 
132 static void audit_tree_destroy_watch(struct fsnotify_mark *entry)
133 {
134 	struct audit_chunk *chunk = container_of(entry, struct audit_chunk, mark);
135 	call_rcu(&chunk->head, __put_chunk);
136 }
137 
138 static struct audit_chunk *alloc_chunk(int count)
139 {
140 	struct audit_chunk *chunk;
141 	size_t size;
142 	int i;
143 
144 	size = offsetof(struct audit_chunk, owners) + count * sizeof(struct node);
145 	chunk = kzalloc(size, GFP_KERNEL);
146 	if (!chunk)
147 		return NULL;
148 
149 	INIT_LIST_HEAD(&chunk->hash);
150 	INIT_LIST_HEAD(&chunk->trees);
151 	chunk->count = count;
152 	atomic_long_set(&chunk->refs, 1);
153 	for (i = 0; i < count; i++) {
154 		INIT_LIST_HEAD(&chunk->owners[i].list);
155 		chunk->owners[i].index = i;
156 	}
157 	fsnotify_init_mark(&chunk->mark, audit_tree_destroy_watch);
158 	chunk->mark.mask = FS_IN_IGNORED;
159 	return chunk;
160 }
161 
162 enum {HASH_SIZE = 128};
163 static struct list_head chunk_hash_heads[HASH_SIZE];
164 static __cacheline_aligned_in_smp DEFINE_SPINLOCK(hash_lock);
165 
166 static inline struct list_head *chunk_hash(const struct inode *inode)
167 {
168 	unsigned long n = (unsigned long)inode / L1_CACHE_BYTES;
169 	return chunk_hash_heads + n % HASH_SIZE;
170 }
171 
172 /* hash_lock & entry->lock is held by caller */
173 static void insert_hash(struct audit_chunk *chunk)
174 {
175 	struct fsnotify_mark *entry = &chunk->mark;
176 	struct list_head *list;
177 
178 	if (!entry->inode)
179 		return;
180 	list = chunk_hash(entry->inode);
181 	list_add_rcu(&chunk->hash, list);
182 }
183 
184 /* called under rcu_read_lock */
185 struct audit_chunk *audit_tree_lookup(const struct inode *inode)
186 {
187 	struct list_head *list = chunk_hash(inode);
188 	struct audit_chunk *p;
189 
190 	list_for_each_entry_rcu(p, list, hash) {
191 		/* mark.inode may have gone NULL, but who cares? */
192 		if (p->mark.inode == inode) {
193 			atomic_long_inc(&p->refs);
194 			return p;
195 		}
196 	}
197 	return NULL;
198 }
199 
200 int audit_tree_match(struct audit_chunk *chunk, struct audit_tree *tree)
201 {
202 	int n;
203 	for (n = 0; n < chunk->count; n++)
204 		if (chunk->owners[n].owner == tree)
205 			return 1;
206 	return 0;
207 }
208 
209 /* tagging and untagging inodes with trees */
210 
211 static struct audit_chunk *find_chunk(struct node *p)
212 {
213 	int index = p->index & ~(1U<<31);
214 	p -= index;
215 	return container_of(p, struct audit_chunk, owners[0]);
216 }
217 
218 static void untag_chunk(struct node *p)
219 {
220 	struct audit_chunk *chunk = find_chunk(p);
221 	struct fsnotify_mark *entry = &chunk->mark;
222 	struct audit_chunk *new = NULL;
223 	struct audit_tree *owner;
224 	int size = chunk->count - 1;
225 	int i, j;
226 
227 	fsnotify_get_mark(entry);
228 
229 	spin_unlock(&hash_lock);
230 
231 	if (size)
232 		new = alloc_chunk(size);
233 
234 	spin_lock(&entry->lock);
235 	if (chunk->dead || !entry->inode) {
236 		spin_unlock(&entry->lock);
237 		if (new)
238 			free_chunk(new);
239 		goto out;
240 	}
241 
242 	owner = p->owner;
243 
244 	if (!size) {
245 		chunk->dead = 1;
246 		spin_lock(&hash_lock);
247 		list_del_init(&chunk->trees);
248 		if (owner->root == chunk)
249 			owner->root = NULL;
250 		list_del_init(&p->list);
251 		list_del_rcu(&chunk->hash);
252 		spin_unlock(&hash_lock);
253 		spin_unlock(&entry->lock);
254 		fsnotify_destroy_mark(entry, audit_tree_group);
255 		goto out;
256 	}
257 
258 	if (!new)
259 		goto Fallback;
260 
261 	fsnotify_duplicate_mark(&new->mark, entry);
262 	if (fsnotify_add_mark(&new->mark, new->mark.group, new->mark.inode, NULL, 1)) {
263 		fsnotify_put_mark(&new->mark);
264 		goto Fallback;
265 	}
266 
267 	chunk->dead = 1;
268 	spin_lock(&hash_lock);
269 	list_replace_init(&chunk->trees, &new->trees);
270 	if (owner->root == chunk) {
271 		list_del_init(&owner->same_root);
272 		owner->root = NULL;
273 	}
274 
275 	for (i = j = 0; j <= size; i++, j++) {
276 		struct audit_tree *s;
277 		if (&chunk->owners[j] == p) {
278 			list_del_init(&p->list);
279 			i--;
280 			continue;
281 		}
282 		s = chunk->owners[j].owner;
283 		new->owners[i].owner = s;
284 		new->owners[i].index = chunk->owners[j].index - j + i;
285 		if (!s) /* result of earlier fallback */
286 			continue;
287 		get_tree(s);
288 		list_replace_init(&chunk->owners[j].list, &new->owners[i].list);
289 	}
290 
291 	list_replace_rcu(&chunk->hash, &new->hash);
292 	list_for_each_entry(owner, &new->trees, same_root)
293 		owner->root = new;
294 	spin_unlock(&hash_lock);
295 	spin_unlock(&entry->lock);
296 	fsnotify_destroy_mark(entry, audit_tree_group);
297 	fsnotify_put_mark(&new->mark);	/* drop initial reference */
298 	goto out;
299 
300 Fallback:
301 	// do the best we can
302 	spin_lock(&hash_lock);
303 	if (owner->root == chunk) {
304 		list_del_init(&owner->same_root);
305 		owner->root = NULL;
306 	}
307 	list_del_init(&p->list);
308 	p->owner = NULL;
309 	put_tree(owner);
310 	spin_unlock(&hash_lock);
311 	spin_unlock(&entry->lock);
312 out:
313 	fsnotify_put_mark(entry);
314 	spin_lock(&hash_lock);
315 }
316 
317 static int create_chunk(struct inode *inode, struct audit_tree *tree)
318 {
319 	struct fsnotify_mark *entry;
320 	struct audit_chunk *chunk = alloc_chunk(1);
321 	if (!chunk)
322 		return -ENOMEM;
323 
324 	entry = &chunk->mark;
325 	if (fsnotify_add_mark(entry, audit_tree_group, inode, NULL, 0)) {
326 		fsnotify_put_mark(entry);
327 		return -ENOSPC;
328 	}
329 
330 	spin_lock(&entry->lock);
331 	spin_lock(&hash_lock);
332 	if (tree->goner) {
333 		spin_unlock(&hash_lock);
334 		chunk->dead = 1;
335 		spin_unlock(&entry->lock);
336 		fsnotify_destroy_mark(entry, audit_tree_group);
337 		fsnotify_put_mark(entry);
338 		return 0;
339 	}
340 	chunk->owners[0].index = (1U << 31);
341 	chunk->owners[0].owner = tree;
342 	get_tree(tree);
343 	list_add(&chunk->owners[0].list, &tree->chunks);
344 	if (!tree->root) {
345 		tree->root = chunk;
346 		list_add(&tree->same_root, &chunk->trees);
347 	}
348 	insert_hash(chunk);
349 	spin_unlock(&hash_lock);
350 	spin_unlock(&entry->lock);
351 	fsnotify_put_mark(entry);	/* drop initial reference */
352 	return 0;
353 }
354 
355 /* the first tagged inode becomes root of tree */
356 static int tag_chunk(struct inode *inode, struct audit_tree *tree)
357 {
358 	struct fsnotify_mark *old_entry, *chunk_entry;
359 	struct audit_tree *owner;
360 	struct audit_chunk *chunk, *old;
361 	struct node *p;
362 	int n;
363 
364 	old_entry = fsnotify_find_inode_mark(audit_tree_group, inode);
365 	if (!old_entry)
366 		return create_chunk(inode, tree);
367 
368 	old = container_of(old_entry, struct audit_chunk, mark);
369 
370 	/* are we already there? */
371 	spin_lock(&hash_lock);
372 	for (n = 0; n < old->count; n++) {
373 		if (old->owners[n].owner == tree) {
374 			spin_unlock(&hash_lock);
375 			fsnotify_put_mark(old_entry);
376 			return 0;
377 		}
378 	}
379 	spin_unlock(&hash_lock);
380 
381 	chunk = alloc_chunk(old->count + 1);
382 	if (!chunk) {
383 		fsnotify_put_mark(old_entry);
384 		return -ENOMEM;
385 	}
386 
387 	chunk_entry = &chunk->mark;
388 
389 	spin_lock(&old_entry->lock);
390 	if (!old_entry->inode) {
391 		/* old_entry is being shot, lets just lie */
392 		spin_unlock(&old_entry->lock);
393 		fsnotify_put_mark(old_entry);
394 		free_chunk(chunk);
395 		return -ENOENT;
396 	}
397 
398 	fsnotify_duplicate_mark(chunk_entry, old_entry);
399 	if (fsnotify_add_mark(chunk_entry, chunk_entry->group, chunk_entry->inode, NULL, 1)) {
400 		spin_unlock(&old_entry->lock);
401 		fsnotify_put_mark(chunk_entry);
402 		fsnotify_put_mark(old_entry);
403 		return -ENOSPC;
404 	}
405 
406 	/* even though we hold old_entry->lock, this is safe since chunk_entry->lock could NEVER have been grabbed before */
407 	spin_lock(&chunk_entry->lock);
408 	spin_lock(&hash_lock);
409 
410 	/* we now hold old_entry->lock, chunk_entry->lock, and hash_lock */
411 	if (tree->goner) {
412 		spin_unlock(&hash_lock);
413 		chunk->dead = 1;
414 		spin_unlock(&chunk_entry->lock);
415 		spin_unlock(&old_entry->lock);
416 
417 		fsnotify_destroy_mark(chunk_entry, audit_tree_group);
418 
419 		fsnotify_put_mark(chunk_entry);
420 		fsnotify_put_mark(old_entry);
421 		return 0;
422 	}
423 	list_replace_init(&old->trees, &chunk->trees);
424 	for (n = 0, p = chunk->owners; n < old->count; n++, p++) {
425 		struct audit_tree *s = old->owners[n].owner;
426 		p->owner = s;
427 		p->index = old->owners[n].index;
428 		if (!s) /* result of fallback in untag */
429 			continue;
430 		get_tree(s);
431 		list_replace_init(&old->owners[n].list, &p->list);
432 	}
433 	p->index = (chunk->count - 1) | (1U<<31);
434 	p->owner = tree;
435 	get_tree(tree);
436 	list_add(&p->list, &tree->chunks);
437 	list_replace_rcu(&old->hash, &chunk->hash);
438 	list_for_each_entry(owner, &chunk->trees, same_root)
439 		owner->root = chunk;
440 	old->dead = 1;
441 	if (!tree->root) {
442 		tree->root = chunk;
443 		list_add(&tree->same_root, &chunk->trees);
444 	}
445 	spin_unlock(&hash_lock);
446 	spin_unlock(&chunk_entry->lock);
447 	spin_unlock(&old_entry->lock);
448 	fsnotify_destroy_mark(old_entry, audit_tree_group);
449 	fsnotify_put_mark(chunk_entry);	/* drop initial reference */
450 	fsnotify_put_mark(old_entry); /* pair to fsnotify_find mark_entry */
451 	return 0;
452 }
453 
454 static void audit_tree_log_remove_rule(struct audit_krule *rule)
455 {
456 	struct audit_buffer *ab;
457 
458 	ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE);
459 	if (unlikely(!ab))
460 		return;
461 	audit_log_format(ab, "op=");
462 	audit_log_string(ab, "remove_rule");
463 	audit_log_format(ab, " dir=");
464 	audit_log_untrustedstring(ab, rule->tree->pathname);
465 	audit_log_key(ab, rule->filterkey);
466 	audit_log_format(ab, " list=%d res=1", rule->listnr);
467 	audit_log_end(ab);
468 }
469 
470 static void kill_rules(struct audit_tree *tree)
471 {
472 	struct audit_krule *rule, *next;
473 	struct audit_entry *entry;
474 
475 	list_for_each_entry_safe(rule, next, &tree->rules, rlist) {
476 		entry = container_of(rule, struct audit_entry, rule);
477 
478 		list_del_init(&rule->rlist);
479 		if (rule->tree) {
480 			/* not a half-baked one */
481 			audit_tree_log_remove_rule(rule);
482 			if (entry->rule.exe)
483 				audit_remove_mark(entry->rule.exe);
484 			rule->tree = NULL;
485 			list_del_rcu(&entry->list);
486 			list_del(&entry->rule.list);
487 			call_rcu(&entry->rcu, audit_free_rule_rcu);
488 		}
489 	}
490 }
491 
492 /*
493  * finish killing struct audit_tree
494  */
495 static void prune_one(struct audit_tree *victim)
496 {
497 	spin_lock(&hash_lock);
498 	while (!list_empty(&victim->chunks)) {
499 		struct node *p;
500 
501 		p = list_entry(victim->chunks.next, struct node, list);
502 
503 		untag_chunk(p);
504 	}
505 	spin_unlock(&hash_lock);
506 	put_tree(victim);
507 }
508 
509 /* trim the uncommitted chunks from tree */
510 
511 static void trim_marked(struct audit_tree *tree)
512 {
513 	struct list_head *p, *q;
514 	spin_lock(&hash_lock);
515 	if (tree->goner) {
516 		spin_unlock(&hash_lock);
517 		return;
518 	}
519 	/* reorder */
520 	for (p = tree->chunks.next; p != &tree->chunks; p = q) {
521 		struct node *node = list_entry(p, struct node, list);
522 		q = p->next;
523 		if (node->index & (1U<<31)) {
524 			list_del_init(p);
525 			list_add(p, &tree->chunks);
526 		}
527 	}
528 
529 	while (!list_empty(&tree->chunks)) {
530 		struct node *node;
531 
532 		node = list_entry(tree->chunks.next, struct node, list);
533 
534 		/* have we run out of marked? */
535 		if (!(node->index & (1U<<31)))
536 			break;
537 
538 		untag_chunk(node);
539 	}
540 	if (!tree->root && !tree->goner) {
541 		tree->goner = 1;
542 		spin_unlock(&hash_lock);
543 		mutex_lock(&audit_filter_mutex);
544 		kill_rules(tree);
545 		list_del_init(&tree->list);
546 		mutex_unlock(&audit_filter_mutex);
547 		prune_one(tree);
548 	} else {
549 		spin_unlock(&hash_lock);
550 	}
551 }
552 
553 static void audit_schedule_prune(void);
554 
555 /* called with audit_filter_mutex */
556 int audit_remove_tree_rule(struct audit_krule *rule)
557 {
558 	struct audit_tree *tree;
559 	tree = rule->tree;
560 	if (tree) {
561 		spin_lock(&hash_lock);
562 		list_del_init(&rule->rlist);
563 		if (list_empty(&tree->rules) && !tree->goner) {
564 			tree->root = NULL;
565 			list_del_init(&tree->same_root);
566 			tree->goner = 1;
567 			list_move(&tree->list, &prune_list);
568 			rule->tree = NULL;
569 			spin_unlock(&hash_lock);
570 			audit_schedule_prune();
571 			return 1;
572 		}
573 		rule->tree = NULL;
574 		spin_unlock(&hash_lock);
575 		return 1;
576 	}
577 	return 0;
578 }
579 
580 static int compare_root(struct vfsmount *mnt, void *arg)
581 {
582 	return d_backing_inode(mnt->mnt_root) == arg;
583 }
584 
585 void audit_trim_trees(void)
586 {
587 	struct list_head cursor;
588 
589 	mutex_lock(&audit_filter_mutex);
590 	list_add(&cursor, &tree_list);
591 	while (cursor.next != &tree_list) {
592 		struct audit_tree *tree;
593 		struct path path;
594 		struct vfsmount *root_mnt;
595 		struct node *node;
596 		int err;
597 
598 		tree = container_of(cursor.next, struct audit_tree, list);
599 		get_tree(tree);
600 		list_del(&cursor);
601 		list_add(&cursor, &tree->list);
602 		mutex_unlock(&audit_filter_mutex);
603 
604 		err = kern_path(tree->pathname, 0, &path);
605 		if (err)
606 			goto skip_it;
607 
608 		root_mnt = collect_mounts(&path);
609 		path_put(&path);
610 		if (IS_ERR(root_mnt))
611 			goto skip_it;
612 
613 		spin_lock(&hash_lock);
614 		list_for_each_entry(node, &tree->chunks, list) {
615 			struct audit_chunk *chunk = find_chunk(node);
616 			/* this could be NULL if the watch is dying else where... */
617 			struct inode *inode = chunk->mark.inode;
618 			node->index |= 1U<<31;
619 			if (iterate_mounts(compare_root, inode, root_mnt))
620 				node->index &= ~(1U<<31);
621 		}
622 		spin_unlock(&hash_lock);
623 		trim_marked(tree);
624 		drop_collected_mounts(root_mnt);
625 skip_it:
626 		put_tree(tree);
627 		mutex_lock(&audit_filter_mutex);
628 	}
629 	list_del(&cursor);
630 	mutex_unlock(&audit_filter_mutex);
631 }
632 
633 int audit_make_tree(struct audit_krule *rule, char *pathname, u32 op)
634 {
635 
636 	if (pathname[0] != '/' ||
637 	    rule->listnr != AUDIT_FILTER_EXIT ||
638 	    op != Audit_equal ||
639 	    rule->inode_f || rule->watch || rule->tree)
640 		return -EINVAL;
641 	rule->tree = alloc_tree(pathname);
642 	if (!rule->tree)
643 		return -ENOMEM;
644 	return 0;
645 }
646 
647 void audit_put_tree(struct audit_tree *tree)
648 {
649 	put_tree(tree);
650 }
651 
652 static int tag_mount(struct vfsmount *mnt, void *arg)
653 {
654 	return tag_chunk(d_backing_inode(mnt->mnt_root), arg);
655 }
656 
657 /*
658  * That gets run when evict_chunk() ends up needing to kill audit_tree.
659  * Runs from a separate thread.
660  */
661 static int prune_tree_thread(void *unused)
662 {
663 	for (;;) {
664 		set_current_state(TASK_INTERRUPTIBLE);
665 		if (list_empty(&prune_list))
666 			schedule();
667 		__set_current_state(TASK_RUNNING);
668 
669 		mutex_lock(&audit_cmd_mutex);
670 		mutex_lock(&audit_filter_mutex);
671 
672 		while (!list_empty(&prune_list)) {
673 			struct audit_tree *victim;
674 
675 			victim = list_entry(prune_list.next,
676 					struct audit_tree, list);
677 			list_del_init(&victim->list);
678 
679 			mutex_unlock(&audit_filter_mutex);
680 
681 			prune_one(victim);
682 
683 			mutex_lock(&audit_filter_mutex);
684 		}
685 
686 		mutex_unlock(&audit_filter_mutex);
687 		mutex_unlock(&audit_cmd_mutex);
688 	}
689 	return 0;
690 }
691 
692 static int audit_launch_prune(void)
693 {
694 	if (prune_thread)
695 		return 0;
696 	prune_thread = kthread_create(prune_tree_thread, NULL,
697 				"audit_prune_tree");
698 	if (IS_ERR(prune_thread)) {
699 		pr_err("cannot start thread audit_prune_tree");
700 		prune_thread = NULL;
701 		return -ENOMEM;
702 	} else {
703 		wake_up_process(prune_thread);
704 		return 0;
705 	}
706 }
707 
708 /* called with audit_filter_mutex */
709 int audit_add_tree_rule(struct audit_krule *rule)
710 {
711 	struct audit_tree *seed = rule->tree, *tree;
712 	struct path path;
713 	struct vfsmount *mnt;
714 	int err;
715 
716 	rule->tree = NULL;
717 	list_for_each_entry(tree, &tree_list, list) {
718 		if (!strcmp(seed->pathname, tree->pathname)) {
719 			put_tree(seed);
720 			rule->tree = tree;
721 			list_add(&rule->rlist, &tree->rules);
722 			return 0;
723 		}
724 	}
725 	tree = seed;
726 	list_add(&tree->list, &tree_list);
727 	list_add(&rule->rlist, &tree->rules);
728 	/* do not set rule->tree yet */
729 	mutex_unlock(&audit_filter_mutex);
730 
731 	if (unlikely(!prune_thread)) {
732 		err = audit_launch_prune();
733 		if (err)
734 			goto Err;
735 	}
736 
737 	err = kern_path(tree->pathname, 0, &path);
738 	if (err)
739 		goto Err;
740 	mnt = collect_mounts(&path);
741 	path_put(&path);
742 	if (IS_ERR(mnt)) {
743 		err = PTR_ERR(mnt);
744 		goto Err;
745 	}
746 
747 	get_tree(tree);
748 	err = iterate_mounts(tag_mount, tree, mnt);
749 	drop_collected_mounts(mnt);
750 
751 	if (!err) {
752 		struct node *node;
753 		spin_lock(&hash_lock);
754 		list_for_each_entry(node, &tree->chunks, list)
755 			node->index &= ~(1U<<31);
756 		spin_unlock(&hash_lock);
757 	} else {
758 		trim_marked(tree);
759 		goto Err;
760 	}
761 
762 	mutex_lock(&audit_filter_mutex);
763 	if (list_empty(&rule->rlist)) {
764 		put_tree(tree);
765 		return -ENOENT;
766 	}
767 	rule->tree = tree;
768 	put_tree(tree);
769 
770 	return 0;
771 Err:
772 	mutex_lock(&audit_filter_mutex);
773 	list_del_init(&tree->list);
774 	list_del_init(&tree->rules);
775 	put_tree(tree);
776 	return err;
777 }
778 
779 int audit_tag_tree(char *old, char *new)
780 {
781 	struct list_head cursor, barrier;
782 	int failed = 0;
783 	struct path path1, path2;
784 	struct vfsmount *tagged;
785 	int err;
786 
787 	err = kern_path(new, 0, &path2);
788 	if (err)
789 		return err;
790 	tagged = collect_mounts(&path2);
791 	path_put(&path2);
792 	if (IS_ERR(tagged))
793 		return PTR_ERR(tagged);
794 
795 	err = kern_path(old, 0, &path1);
796 	if (err) {
797 		drop_collected_mounts(tagged);
798 		return err;
799 	}
800 
801 	mutex_lock(&audit_filter_mutex);
802 	list_add(&barrier, &tree_list);
803 	list_add(&cursor, &barrier);
804 
805 	while (cursor.next != &tree_list) {
806 		struct audit_tree *tree;
807 		int good_one = 0;
808 
809 		tree = container_of(cursor.next, struct audit_tree, list);
810 		get_tree(tree);
811 		list_del(&cursor);
812 		list_add(&cursor, &tree->list);
813 		mutex_unlock(&audit_filter_mutex);
814 
815 		err = kern_path(tree->pathname, 0, &path2);
816 		if (!err) {
817 			good_one = path_is_under(&path1, &path2);
818 			path_put(&path2);
819 		}
820 
821 		if (!good_one) {
822 			put_tree(tree);
823 			mutex_lock(&audit_filter_mutex);
824 			continue;
825 		}
826 
827 		failed = iterate_mounts(tag_mount, tree, tagged);
828 		if (failed) {
829 			put_tree(tree);
830 			mutex_lock(&audit_filter_mutex);
831 			break;
832 		}
833 
834 		mutex_lock(&audit_filter_mutex);
835 		spin_lock(&hash_lock);
836 		if (!tree->goner) {
837 			list_del(&tree->list);
838 			list_add(&tree->list, &tree_list);
839 		}
840 		spin_unlock(&hash_lock);
841 		put_tree(tree);
842 	}
843 
844 	while (barrier.prev != &tree_list) {
845 		struct audit_tree *tree;
846 
847 		tree = container_of(barrier.prev, struct audit_tree, list);
848 		get_tree(tree);
849 		list_del(&tree->list);
850 		list_add(&tree->list, &barrier);
851 		mutex_unlock(&audit_filter_mutex);
852 
853 		if (!failed) {
854 			struct node *node;
855 			spin_lock(&hash_lock);
856 			list_for_each_entry(node, &tree->chunks, list)
857 				node->index &= ~(1U<<31);
858 			spin_unlock(&hash_lock);
859 		} else {
860 			trim_marked(tree);
861 		}
862 
863 		put_tree(tree);
864 		mutex_lock(&audit_filter_mutex);
865 	}
866 	list_del(&barrier);
867 	list_del(&cursor);
868 	mutex_unlock(&audit_filter_mutex);
869 	path_put(&path1);
870 	drop_collected_mounts(tagged);
871 	return failed;
872 }
873 
874 
875 static void audit_schedule_prune(void)
876 {
877 	wake_up_process(prune_thread);
878 }
879 
880 /*
881  * ... and that one is done if evict_chunk() decides to delay until the end
882  * of syscall.  Runs synchronously.
883  */
884 void audit_kill_trees(struct list_head *list)
885 {
886 	mutex_lock(&audit_cmd_mutex);
887 	mutex_lock(&audit_filter_mutex);
888 
889 	while (!list_empty(list)) {
890 		struct audit_tree *victim;
891 
892 		victim = list_entry(list->next, struct audit_tree, list);
893 		kill_rules(victim);
894 		list_del_init(&victim->list);
895 
896 		mutex_unlock(&audit_filter_mutex);
897 
898 		prune_one(victim);
899 
900 		mutex_lock(&audit_filter_mutex);
901 	}
902 
903 	mutex_unlock(&audit_filter_mutex);
904 	mutex_unlock(&audit_cmd_mutex);
905 }
906 
907 /*
908  *  Here comes the stuff asynchronous to auditctl operations
909  */
910 
911 static void evict_chunk(struct audit_chunk *chunk)
912 {
913 	struct audit_tree *owner;
914 	struct list_head *postponed = audit_killed_trees();
915 	int need_prune = 0;
916 	int n;
917 
918 	if (chunk->dead)
919 		return;
920 
921 	chunk->dead = 1;
922 	mutex_lock(&audit_filter_mutex);
923 	spin_lock(&hash_lock);
924 	while (!list_empty(&chunk->trees)) {
925 		owner = list_entry(chunk->trees.next,
926 				   struct audit_tree, same_root);
927 		owner->goner = 1;
928 		owner->root = NULL;
929 		list_del_init(&owner->same_root);
930 		spin_unlock(&hash_lock);
931 		if (!postponed) {
932 			kill_rules(owner);
933 			list_move(&owner->list, &prune_list);
934 			need_prune = 1;
935 		} else {
936 			list_move(&owner->list, postponed);
937 		}
938 		spin_lock(&hash_lock);
939 	}
940 	list_del_rcu(&chunk->hash);
941 	for (n = 0; n < chunk->count; n++)
942 		list_del_init(&chunk->owners[n].list);
943 	spin_unlock(&hash_lock);
944 	mutex_unlock(&audit_filter_mutex);
945 	if (need_prune)
946 		audit_schedule_prune();
947 }
948 
949 static int audit_tree_handle_event(struct fsnotify_group *group,
950 				   struct inode *to_tell,
951 				   struct fsnotify_mark *inode_mark,
952 				   struct fsnotify_mark *vfsmount_mark,
953 				   u32 mask, void *data, int data_type,
954 				   const unsigned char *file_name, u32 cookie)
955 {
956 	return 0;
957 }
958 
959 static void audit_tree_freeing_mark(struct fsnotify_mark *entry, struct fsnotify_group *group)
960 {
961 	struct audit_chunk *chunk = container_of(entry, struct audit_chunk, mark);
962 
963 	evict_chunk(chunk);
964 
965 	/*
966 	 * We are guaranteed to have at least one reference to the mark from
967 	 * either the inode or the caller of fsnotify_destroy_mark().
968 	 */
969 	BUG_ON(atomic_read(&entry->refcnt) < 1);
970 }
971 
972 static const struct fsnotify_ops audit_tree_ops = {
973 	.handle_event = audit_tree_handle_event,
974 	.freeing_mark = audit_tree_freeing_mark,
975 };
976 
977 static int __init audit_tree_init(void)
978 {
979 	int i;
980 
981 	audit_tree_group = fsnotify_alloc_group(&audit_tree_ops);
982 	if (IS_ERR(audit_tree_group))
983 		audit_panic("cannot initialize fsnotify group for rectree watches");
984 
985 	for (i = 0; i < HASH_SIZE; i++)
986 		INIT_LIST_HEAD(&chunk_hash_heads[i]);
987 
988 	return 0;
989 }
990 __initcall(audit_tree_init);
991