xref: /linux/kernel/kcmp.c (revision 7aacf86b75bc5523d20fd9127104384fce51ce9c)
1 #include <linux/kernel.h>
2 #include <linux/syscalls.h>
3 #include <linux/fdtable.h>
4 #include <linux/string.h>
5 #include <linux/random.h>
6 #include <linux/module.h>
7 #include <linux/ptrace.h>
8 #include <linux/init.h>
9 #include <linux/errno.h>
10 #include <linux/cache.h>
11 #include <linux/bug.h>
12 #include <linux/err.h>
13 #include <linux/kcmp.h>
14 #include <linux/capability.h>
15 #include <linux/list.h>
16 #include <linux/eventpoll.h>
17 #include <linux/file.h>
18 
19 #include <asm/unistd.h>
20 
21 /*
22  * We don't expose the real in-memory order of objects for security reasons.
23  * But still the comparison results should be suitable for sorting. So we
24  * obfuscate kernel pointers values and compare the production instead.
25  *
26  * The obfuscation is done in two steps. First we xor the kernel pointer with
27  * a random value, which puts pointer into a new position in a reordered space.
28  * Secondly we multiply the xor production with a large odd random number to
29  * permute its bits even more (the odd multiplier guarantees that the product
30  * is unique ever after the high bits are truncated, since any odd number is
31  * relative prime to 2^n).
32  *
33  * Note also that the obfuscation itself is invisible to userspace and if needed
34  * it can be changed to an alternate scheme.
35  */
36 static unsigned long cookies[KCMP_TYPES][2] __read_mostly;
37 
38 static long kptr_obfuscate(long v, int type)
39 {
40 	return (v ^ cookies[type][0]) * cookies[type][1];
41 }
42 
43 /*
44  * 0 - equal, i.e. v1 = v2
45  * 1 - less than, i.e. v1 < v2
46  * 2 - greater than, i.e. v1 > v2
47  * 3 - not equal but ordering unavailable (reserved for future)
48  */
49 static int kcmp_ptr(void *v1, void *v2, enum kcmp_type type)
50 {
51 	long t1, t2;
52 
53 	t1 = kptr_obfuscate((long)v1, type);
54 	t2 = kptr_obfuscate((long)v2, type);
55 
56 	return (t1 < t2) | ((t1 > t2) << 1);
57 }
58 
59 /* The caller must have pinned the task */
60 static struct file *
61 get_file_raw_ptr(struct task_struct *task, unsigned int idx)
62 {
63 	struct file *file = NULL;
64 
65 	task_lock(task);
66 	rcu_read_lock();
67 
68 	if (task->files)
69 		file = fcheck_files(task->files, idx);
70 
71 	rcu_read_unlock();
72 	task_unlock(task);
73 
74 	return file;
75 }
76 
77 static void kcmp_unlock(struct mutex *m1, struct mutex *m2)
78 {
79 	if (likely(m2 != m1))
80 		mutex_unlock(m2);
81 	mutex_unlock(m1);
82 }
83 
84 static int kcmp_lock(struct mutex *m1, struct mutex *m2)
85 {
86 	int err;
87 
88 	if (m2 > m1)
89 		swap(m1, m2);
90 
91 	err = mutex_lock_killable(m1);
92 	if (!err && likely(m1 != m2)) {
93 		err = mutex_lock_killable_nested(m2, SINGLE_DEPTH_NESTING);
94 		if (err)
95 			mutex_unlock(m1);
96 	}
97 
98 	return err;
99 }
100 
101 #ifdef CONFIG_EPOLL
102 static int kcmp_epoll_target(struct task_struct *task1,
103 			     struct task_struct *task2,
104 			     unsigned long idx1,
105 			     struct kcmp_epoll_slot __user *uslot)
106 {
107 	struct file *filp, *filp_epoll, *filp_tgt;
108 	struct kcmp_epoll_slot slot;
109 	struct files_struct *files;
110 
111 	if (copy_from_user(&slot, uslot, sizeof(slot)))
112 		return -EFAULT;
113 
114 	filp = get_file_raw_ptr(task1, idx1);
115 	if (!filp)
116 		return -EBADF;
117 
118 	files = get_files_struct(task2);
119 	if (!files)
120 		return -EBADF;
121 
122 	spin_lock(&files->file_lock);
123 	filp_epoll = fcheck_files(files, slot.efd);
124 	if (filp_epoll)
125 		get_file(filp_epoll);
126 	else
127 		filp_tgt = ERR_PTR(-EBADF);
128 	spin_unlock(&files->file_lock);
129 	put_files_struct(files);
130 
131 	if (filp_epoll) {
132 		filp_tgt = get_epoll_tfile_raw_ptr(filp_epoll, slot.tfd, slot.toff);
133 		fput(filp_epoll);
134 	} else
135 
136 	if (IS_ERR(filp_tgt))
137 		return PTR_ERR(filp_tgt);
138 
139 	return kcmp_ptr(filp, filp_tgt, KCMP_FILE);
140 }
141 #else
142 static int kcmp_epoll_target(struct task_struct *task1,
143 			     struct task_struct *task2,
144 			     unsigned long idx1,
145 			     struct kcmp_epoll_slot __user *uslot)
146 {
147 	return -EOPNOTSUPP;
148 }
149 #endif
150 
151 SYSCALL_DEFINE5(kcmp, pid_t, pid1, pid_t, pid2, int, type,
152 		unsigned long, idx1, unsigned long, idx2)
153 {
154 	struct task_struct *task1, *task2;
155 	int ret;
156 
157 	rcu_read_lock();
158 
159 	/*
160 	 * Tasks are looked up in caller's PID namespace only.
161 	 */
162 	task1 = find_task_by_vpid(pid1);
163 	task2 = find_task_by_vpid(pid2);
164 	if (!task1 || !task2)
165 		goto err_no_task;
166 
167 	get_task_struct(task1);
168 	get_task_struct(task2);
169 
170 	rcu_read_unlock();
171 
172 	/*
173 	 * One should have enough rights to inspect task details.
174 	 */
175 	ret = kcmp_lock(&task1->signal->cred_guard_mutex,
176 			&task2->signal->cred_guard_mutex);
177 	if (ret)
178 		goto err;
179 	if (!ptrace_may_access(task1, PTRACE_MODE_READ_REALCREDS) ||
180 	    !ptrace_may_access(task2, PTRACE_MODE_READ_REALCREDS)) {
181 		ret = -EPERM;
182 		goto err_unlock;
183 	}
184 
185 	switch (type) {
186 	case KCMP_FILE: {
187 		struct file *filp1, *filp2;
188 
189 		filp1 = get_file_raw_ptr(task1, idx1);
190 		filp2 = get_file_raw_ptr(task2, idx2);
191 
192 		if (filp1 && filp2)
193 			ret = kcmp_ptr(filp1, filp2, KCMP_FILE);
194 		else
195 			ret = -EBADF;
196 		break;
197 	}
198 	case KCMP_VM:
199 		ret = kcmp_ptr(task1->mm, task2->mm, KCMP_VM);
200 		break;
201 	case KCMP_FILES:
202 		ret = kcmp_ptr(task1->files, task2->files, KCMP_FILES);
203 		break;
204 	case KCMP_FS:
205 		ret = kcmp_ptr(task1->fs, task2->fs, KCMP_FS);
206 		break;
207 	case KCMP_SIGHAND:
208 		ret = kcmp_ptr(task1->sighand, task2->sighand, KCMP_SIGHAND);
209 		break;
210 	case KCMP_IO:
211 		ret = kcmp_ptr(task1->io_context, task2->io_context, KCMP_IO);
212 		break;
213 	case KCMP_SYSVSEM:
214 #ifdef CONFIG_SYSVIPC
215 		ret = kcmp_ptr(task1->sysvsem.undo_list,
216 			       task2->sysvsem.undo_list,
217 			       KCMP_SYSVSEM);
218 #else
219 		ret = -EOPNOTSUPP;
220 #endif
221 		break;
222 	case KCMP_EPOLL_TFD:
223 		ret = kcmp_epoll_target(task1, task2, idx1, (void *)idx2);
224 		break;
225 	default:
226 		ret = -EINVAL;
227 		break;
228 	}
229 
230 err_unlock:
231 	kcmp_unlock(&task1->signal->cred_guard_mutex,
232 		    &task2->signal->cred_guard_mutex);
233 err:
234 	put_task_struct(task1);
235 	put_task_struct(task2);
236 
237 	return ret;
238 
239 err_no_task:
240 	rcu_read_unlock();
241 	return -ESRCH;
242 }
243 
244 static __init int kcmp_cookies_init(void)
245 {
246 	int i;
247 
248 	get_random_bytes(cookies, sizeof(cookies));
249 
250 	for (i = 0; i < KCMP_TYPES; i++)
251 		cookies[i][1] |= (~(~0UL >>  1) | 1);
252 
253 	return 0;
254 }
255 arch_initcall(kcmp_cookies_init);
256