xref: /linux/security/integrity/ima/ima_iint.c (revision 0c93ea4064a209cdc36de8a9a3003d43d08f46f7)
1 /*
2  * Copyright (C) 2008 IBM Corporation
3  *
4  * Authors:
5  * Mimi Zohar <zohar@us.ibm.com>
6  *
7  * This program is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU General Public License as
9  * published by the Free Software Foundation, version 2 of the
10  * License.
11  *
12  * File: ima_iint.c
13  * 	- implements the IMA hooks: ima_inode_alloc, ima_inode_free
14  *	- cache integrity information associated with an inode
15  *	  using a radix tree.
16  */
17 #include <linux/module.h>
18 #include <linux/spinlock.h>
19 #include <linux/radix-tree.h>
20 #include "ima.h"
21 
22 #define ima_iint_delete ima_inode_free
23 
24 RADIX_TREE(ima_iint_store, GFP_ATOMIC);
25 DEFINE_SPINLOCK(ima_iint_lock);
26 
27 static struct kmem_cache *iint_cache __read_mostly;
28 
29 /* ima_iint_find_get - return the iint associated with an inode
30  *
31  * ima_iint_find_get gets a reference to the iint. Caller must
32  * remember to put the iint reference.
33  */
34 struct ima_iint_cache *ima_iint_find_get(struct inode *inode)
35 {
36 	struct ima_iint_cache *iint;
37 
38 	rcu_read_lock();
39 	iint = radix_tree_lookup(&ima_iint_store, (unsigned long)inode);
40 	if (!iint)
41 		goto out;
42 	kref_get(&iint->refcount);
43 out:
44 	rcu_read_unlock();
45 	return iint;
46 }
47 
48 /* Allocate memory for the iint associated with the inode
49  * from the iint_cache slab, initialize the iint, and
50  * insert it into the radix tree.
51  *
52  * On success return a pointer to the iint; on failure return NULL.
53  */
54 struct ima_iint_cache *ima_iint_insert(struct inode *inode)
55 {
56 	struct ima_iint_cache *iint = NULL;
57 	int rc = 0;
58 
59 	if (!ima_initialized)
60 		return iint;
61 	iint = kmem_cache_alloc(iint_cache, GFP_KERNEL);
62 	if (!iint)
63 		return iint;
64 
65 	rc = radix_tree_preload(GFP_KERNEL);
66 	if (rc < 0)
67 		goto out;
68 
69 	spin_lock(&ima_iint_lock);
70 	rc = radix_tree_insert(&ima_iint_store, (unsigned long)inode, iint);
71 	spin_unlock(&ima_iint_lock);
72 out:
73 	if (rc < 0) {
74 		kmem_cache_free(iint_cache, iint);
75 		if (rc == -EEXIST) {
76 			spin_lock(&ima_iint_lock);
77 			iint = radix_tree_lookup(&ima_iint_store,
78 						 (unsigned long)inode);
79 			spin_unlock(&ima_iint_lock);
80 		} else
81 			iint = NULL;
82 	}
83 	radix_tree_preload_end();
84 	return iint;
85 }
86 
87 /**
88  * ima_inode_alloc - allocate an iint associated with an inode
89  * @inode: pointer to the inode
90  *
91  * Return 0 on success, 1 on failure.
92  */
93 int ima_inode_alloc(struct inode *inode)
94 {
95 	struct ima_iint_cache *iint;
96 
97 	if (!ima_initialized)
98 		return 0;
99 
100 	iint = ima_iint_insert(inode);
101 	if (!iint)
102 		return 1;
103 	return 0;
104 }
105 
106 /* ima_iint_find_insert_get - get the iint associated with an inode
107  *
108  * Most insertions are done at inode_alloc, except those allocated
109  * before late_initcall. When the iint does not exist, allocate it,
110  * initialize and insert it, and increment the iint refcount.
111  *
112  * (Can't initialize at security_initcall before any inodes are
113  * allocated, got to wait at least until proc_init.)
114  *
115  *  Return the iint.
116  */
117 struct ima_iint_cache *ima_iint_find_insert_get(struct inode *inode)
118 {
119 	struct ima_iint_cache *iint = NULL;
120 
121 	iint = ima_iint_find_get(inode);
122 	if (iint)
123 		return iint;
124 
125 	iint = ima_iint_insert(inode);
126 	if (iint)
127 		kref_get(&iint->refcount);
128 
129 	return iint;
130 }
131 EXPORT_SYMBOL_GPL(ima_iint_find_insert_get);
132 
133 /* iint_free - called when the iint refcount goes to zero */
134 void iint_free(struct kref *kref)
135 {
136 	struct ima_iint_cache *iint = container_of(kref, struct ima_iint_cache,
137 						   refcount);
138 	iint->version = 0;
139 	iint->flags = 0UL;
140 	if (iint->readcount != 0) {
141 		printk(KERN_INFO "%s: readcount: %ld\n", __FUNCTION__,
142 		       iint->readcount);
143 		iint->readcount = 0;
144 	}
145 	if (iint->writecount != 0) {
146 		printk(KERN_INFO "%s: writecount: %ld\n", __FUNCTION__,
147 		       iint->writecount);
148 		iint->writecount = 0;
149 	}
150 	if (iint->opencount != 0) {
151 		printk(KERN_INFO "%s: opencount: %ld\n", __FUNCTION__,
152 		       iint->opencount);
153 		iint->opencount = 0;
154 	}
155 	kref_set(&iint->refcount, 1);
156 	kmem_cache_free(iint_cache, iint);
157 }
158 
159 void iint_rcu_free(struct rcu_head *rcu_head)
160 {
161 	struct ima_iint_cache *iint = container_of(rcu_head,
162 						   struct ima_iint_cache, rcu);
163 	kref_put(&iint->refcount, iint_free);
164 }
165 
166 /**
167  * ima_iint_delete - called on integrity_inode_free
168  * @inode: pointer to the inode
169  *
170  * Free the integrity information(iint) associated with an inode.
171  */
172 void ima_iint_delete(struct inode *inode)
173 {
174 	struct ima_iint_cache *iint;
175 
176 	if (!ima_initialized)
177 		return;
178 	spin_lock(&ima_iint_lock);
179 	iint = radix_tree_delete(&ima_iint_store, (unsigned long)inode);
180 	spin_unlock(&ima_iint_lock);
181 	if (iint)
182 		call_rcu(&iint->rcu, iint_rcu_free);
183 }
184 
185 static void init_once(void *foo)
186 {
187 	struct ima_iint_cache *iint = foo;
188 
189 	memset(iint, 0, sizeof *iint);
190 	iint->version = 0;
191 	iint->flags = 0UL;
192 	mutex_init(&iint->mutex);
193 	iint->readcount = 0;
194 	iint->writecount = 0;
195 	iint->opencount = 0;
196 	kref_set(&iint->refcount, 1);
197 }
198 
199 void ima_iintcache_init(void)
200 {
201 	iint_cache =
202 	    kmem_cache_create("iint_cache", sizeof(struct ima_iint_cache), 0,
203 			      SLAB_PANIC, init_once);
204 }
205