xref: /linux/security/landlock/id.c (revision 1260ed77798502de9c98020040d2995008de10cc)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Landlock - Unique identification number generator
4  *
5  * Copyright © 2024-2025 Microsoft Corporation
6  */
7 
8 #include <kunit/test.h>
9 #include <linux/atomic.h>
10 #include <linux/random.h>
11 #include <linux/spinlock.h>
12 
13 #include "common.h"
14 #include "id.h"
15 
16 #define COUNTER_PRE_INIT 0
17 
18 static atomic64_t next_id = ATOMIC64_INIT(COUNTER_PRE_INIT);
19 
20 static void __init init_id(atomic64_t *const counter, const u32 random_32bits)
21 {
22 	u64 init;
23 
24 	/*
25 	 * Ensures sure 64-bit values are always used by user space (or may
26 	 * fail with -EOVERFLOW), and makes this testable.
27 	 */
28 	init = 1ULL << 32;
29 
30 	/*
31 	 * Makes a large (2^32) boot-time value to limit ID collision in logs
32 	 * from different boots, and to limit info leak about the number of
33 	 * initially (relative to the reader) created elements (e.g. domains).
34 	 */
35 	init += random_32bits;
36 
37 	/* Sets first or ignores.  This will be the first ID. */
38 	atomic64_cmpxchg(counter, COUNTER_PRE_INIT, init);
39 }
40 
41 #ifdef CONFIG_SECURITY_LANDLOCK_KUNIT_TEST
42 
43 static void __init test_init_min(struct kunit *const test)
44 {
45 	atomic64_t counter = ATOMIC64_INIT(COUNTER_PRE_INIT);
46 
47 	init_id(&counter, 0);
48 	KUNIT_EXPECT_EQ(test, atomic64_read(&counter), 1ULL + U32_MAX);
49 }
50 
51 static void __init test_init_max(struct kunit *const test)
52 {
53 	atomic64_t counter = ATOMIC64_INIT(COUNTER_PRE_INIT);
54 
55 	init_id(&counter, ~0);
56 	KUNIT_EXPECT_EQ(test, atomic64_read(&counter), 1 + (2ULL * U32_MAX));
57 }
58 
59 static void __init test_init_once(struct kunit *const test)
60 {
61 	const u64 first_init = 1ULL + U32_MAX;
62 	atomic64_t counter = ATOMIC64_INIT(COUNTER_PRE_INIT);
63 
64 	init_id(&counter, 0);
65 	KUNIT_EXPECT_EQ(test, atomic64_read(&counter), first_init);
66 
67 	init_id(&counter, ~0);
68 	KUNIT_EXPECT_EQ_MSG(
69 		test, atomic64_read(&counter), first_init,
70 		"Should still have the same value after the subsequent init_id()");
71 }
72 
73 #endif /* CONFIG_SECURITY_LANDLOCK_KUNIT_TEST */
74 
75 void __init landlock_init_id(void)
76 {
77 	return init_id(&next_id, get_random_u32());
78 }
79 
80 /*
81  * It's not worth it to try to hide the monotonic counter because it can still
82  * be inferred (with N counter ranges), and if we are allowed to read the inode
83  * number we should also be allowed to read the time creation anyway, and it
84  * can be handy to store and sort domain IDs for user space.
85  *
86  * Returns the value of next_id and increment it to let some space for the next
87  * one.
88  */
89 static u64 get_id_range(size_t number_of_ids, atomic64_t *const counter,
90 			u8 random_4bits)
91 {
92 	u64 id, step;
93 
94 	/*
95 	 * We should return at least 1 ID, and we may need a set of consecutive
96 	 * ones (e.g. to generate a set of inodes).
97 	 */
98 	if (WARN_ON_ONCE(number_of_ids <= 0))
99 		number_of_ids = 1;
100 
101 	/*
102 	 * Blurs the next ID guess with 1/16 ratio.  We get 2^(64 - 4) -
103 	 * (2 * 2^32), so a bit less than 2^60 available IDs, which should be
104 	 * much more than enough considering the number of CPU cycles required
105 	 * to get a new ID (e.g. a full landlock_restrict_self() call), and the
106 	 * cost of draining all available IDs during the system's uptime.
107 	 */
108 	random_4bits = random_4bits % (1 << 4);
109 	step = number_of_ids + random_4bits;
110 
111 	/* It is safe to cast a signed atomic to an unsigned value. */
112 	id = atomic64_fetch_add(step, counter);
113 
114 	/* Warns if landlock_init_id() was not called. */
115 	WARN_ON_ONCE(id == COUNTER_PRE_INIT);
116 	return id;
117 }
118 
119 #ifdef CONFIG_SECURITY_LANDLOCK_KUNIT_TEST
120 
121 static void test_range1_rand0(struct kunit *const test)
122 {
123 	atomic64_t counter;
124 	u64 init;
125 
126 	init = get_random_u32();
127 	atomic64_set(&counter, init);
128 	KUNIT_EXPECT_EQ(test, get_id_range(1, &counter, 0), init);
129 	KUNIT_EXPECT_EQ(
130 		test, get_id_range(get_random_u8(), &counter, get_random_u8()),
131 		init + 1);
132 }
133 
134 static void test_range1_rand1(struct kunit *const test)
135 {
136 	atomic64_t counter;
137 	u64 init;
138 
139 	init = get_random_u32();
140 	atomic64_set(&counter, init);
141 	KUNIT_EXPECT_EQ(test, get_id_range(1, &counter, 1), init);
142 	KUNIT_EXPECT_EQ(
143 		test, get_id_range(get_random_u8(), &counter, get_random_u8()),
144 		init + 2);
145 }
146 
147 static void test_range1_rand16(struct kunit *const test)
148 {
149 	atomic64_t counter;
150 	u64 init;
151 
152 	init = get_random_u32();
153 	atomic64_set(&counter, init);
154 	KUNIT_EXPECT_EQ(test, get_id_range(1, &counter, 16), init);
155 	KUNIT_EXPECT_EQ(
156 		test, get_id_range(get_random_u8(), &counter, get_random_u8()),
157 		init + 1);
158 }
159 
160 static void test_range2_rand0(struct kunit *const test)
161 {
162 	atomic64_t counter;
163 	u64 init;
164 
165 	init = get_random_u32();
166 	atomic64_set(&counter, init);
167 	KUNIT_EXPECT_EQ(test, get_id_range(2, &counter, 0), init);
168 	KUNIT_EXPECT_EQ(
169 		test, get_id_range(get_random_u8(), &counter, get_random_u8()),
170 		init + 2);
171 }
172 
173 static void test_range2_rand1(struct kunit *const test)
174 {
175 	atomic64_t counter;
176 	u64 init;
177 
178 	init = get_random_u32();
179 	atomic64_set(&counter, init);
180 	KUNIT_EXPECT_EQ(test, get_id_range(2, &counter, 1), init);
181 	KUNIT_EXPECT_EQ(
182 		test, get_id_range(get_random_u8(), &counter, get_random_u8()),
183 		init + 3);
184 }
185 
186 static void test_range2_rand2(struct kunit *const test)
187 {
188 	atomic64_t counter;
189 	u64 init;
190 
191 	init = get_random_u32();
192 	atomic64_set(&counter, init);
193 	KUNIT_EXPECT_EQ(test, get_id_range(2, &counter, 2), init);
194 	KUNIT_EXPECT_EQ(
195 		test, get_id_range(get_random_u8(), &counter, get_random_u8()),
196 		init + 4);
197 }
198 
199 static void test_range2_rand16(struct kunit *const test)
200 {
201 	atomic64_t counter;
202 	u64 init;
203 
204 	init = get_random_u32();
205 	atomic64_set(&counter, init);
206 	KUNIT_EXPECT_EQ(test, get_id_range(2, &counter, 16), init);
207 	KUNIT_EXPECT_EQ(
208 		test, get_id_range(get_random_u8(), &counter, get_random_u8()),
209 		init + 2);
210 }
211 
212 #endif /* CONFIG_SECURITY_LANDLOCK_KUNIT_TEST */
213 
214 /**
215  * landlock_get_id_range - Get a range of unique IDs
216  *
217  * @number_of_ids: Number of IDs to hold.  Must be greater than one.
218  *
219  * Returns: The first ID in the range.
220  */
221 u64 landlock_get_id_range(size_t number_of_ids)
222 {
223 	return get_id_range(number_of_ids, &next_id, get_random_u8());
224 }
225 
226 #ifdef CONFIG_SECURITY_LANDLOCK_KUNIT_TEST
227 
228 static struct kunit_case __refdata test_cases[] = {
229 	/* clang-format off */
230 	KUNIT_CASE(test_init_min),
231 	KUNIT_CASE(test_init_max),
232 	KUNIT_CASE(test_init_once),
233 	KUNIT_CASE(test_range1_rand0),
234 	KUNIT_CASE(test_range1_rand1),
235 	KUNIT_CASE(test_range1_rand16),
236 	KUNIT_CASE(test_range2_rand0),
237 	KUNIT_CASE(test_range2_rand1),
238 	KUNIT_CASE(test_range2_rand2),
239 	KUNIT_CASE(test_range2_rand16),
240 	{}
241 	/* clang-format on */
242 };
243 
244 static struct kunit_suite test_suite = {
245 	.name = "landlock_id",
246 	.test_cases = test_cases,
247 };
248 
249 kunit_test_init_section_suite(test_suite);
250 
251 #endif /* CONFIG_SECURITY_LANDLOCK_KUNIT_TEST */
252