1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Landlock - Unique identification number generator 4 * 5 * Copyright © 2024-2025 Microsoft Corporation 6 */ 7 8 #include <kunit/test.h> 9 #include <linux/atomic.h> 10 #include <linux/bitops.h> 11 #include <linux/random.h> 12 #include <linux/spinlock.h> 13 14 #include "common.h" 15 #include "id.h" 16 17 #define COUNTER_PRE_INIT 0 18 19 static atomic64_t next_id = ATOMIC64_INIT(COUNTER_PRE_INIT); 20 21 static void __init init_id(atomic64_t *const counter, const u32 random_32bits) 22 { 23 u64 init; 24 25 /* 26 * Ensures sure 64-bit values are always used by user space (or may 27 * fail with -EOVERFLOW), and makes this testable. 28 */ 29 init = BIT_ULL(32); 30 31 /* 32 * Makes a large (2^32) boot-time value to limit ID collision in logs 33 * from different boots, and to limit info leak about the number of 34 * initially (relative to the reader) created elements (e.g. domains). 35 */ 36 init += random_32bits; 37 38 /* Sets first or ignores. This will be the first ID. */ 39 atomic64_cmpxchg(counter, COUNTER_PRE_INIT, init); 40 } 41 42 #ifdef CONFIG_SECURITY_LANDLOCK_KUNIT_TEST 43 44 static void __init test_init_min(struct kunit *const test) 45 { 46 atomic64_t counter = ATOMIC64_INIT(COUNTER_PRE_INIT); 47 48 init_id(&counter, 0); 49 KUNIT_EXPECT_EQ(test, atomic64_read(&counter), 1ULL + U32_MAX); 50 } 51 52 static void __init test_init_max(struct kunit *const test) 53 { 54 atomic64_t counter = ATOMIC64_INIT(COUNTER_PRE_INIT); 55 56 init_id(&counter, ~0); 57 KUNIT_EXPECT_EQ(test, atomic64_read(&counter), 1 + (2ULL * U32_MAX)); 58 } 59 60 static void __init test_init_once(struct kunit *const test) 61 { 62 const u64 first_init = 1ULL + U32_MAX; 63 atomic64_t counter = ATOMIC64_INIT(COUNTER_PRE_INIT); 64 65 init_id(&counter, 0); 66 KUNIT_EXPECT_EQ(test, atomic64_read(&counter), first_init); 67 68 init_id(&counter, ~0); 69 KUNIT_EXPECT_EQ_MSG( 70 test, atomic64_read(&counter), first_init, 71 "Should still have the same value after the subsequent init_id()"); 72 } 73 74 #endif /* CONFIG_SECURITY_LANDLOCK_KUNIT_TEST */ 75 76 void __init landlock_init_id(void) 77 { 78 return init_id(&next_id, get_random_u32()); 79 } 80 81 /* 82 * It's not worth it to try to hide the monotonic counter because it can still 83 * be inferred (with N counter ranges), and if we are allowed to read the inode 84 * number we should also be allowed to read the time creation anyway, and it 85 * can be handy to store and sort domain IDs for user space. 86 * 87 * Returns the value of next_id and increment it to let some space for the next 88 * one. 89 */ 90 static u64 get_id_range(size_t number_of_ids, atomic64_t *const counter, 91 u8 random_4bits) 92 { 93 u64 id, step; 94 95 /* 96 * We should return at least 1 ID, and we may need a set of consecutive 97 * ones (e.g. to generate a set of inodes). 98 */ 99 if (WARN_ON_ONCE(number_of_ids <= 0)) 100 number_of_ids = 1; 101 102 /* 103 * Blurs the next ID guess with 1/16 ratio. We get 2^(64 - 4) - 104 * (2 * 2^32), so a bit less than 2^60 available IDs, which should be 105 * much more than enough considering the number of CPU cycles required 106 * to get a new ID (e.g. a full landlock_restrict_self() call), and the 107 * cost of draining all available IDs during the system's uptime. 108 */ 109 random_4bits &= 0b1111; 110 step = number_of_ids + random_4bits; 111 112 /* It is safe to cast a signed atomic to an unsigned value. */ 113 id = atomic64_fetch_add(step, counter); 114 115 /* Warns if landlock_init_id() was not called. */ 116 WARN_ON_ONCE(id == COUNTER_PRE_INIT); 117 return id; 118 } 119 120 #ifdef CONFIG_SECURITY_LANDLOCK_KUNIT_TEST 121 122 static void test_range1_rand0(struct kunit *const test) 123 { 124 atomic64_t counter; 125 u64 init; 126 127 init = get_random_u32(); 128 atomic64_set(&counter, init); 129 KUNIT_EXPECT_EQ(test, get_id_range(1, &counter, 0), init); 130 KUNIT_EXPECT_EQ( 131 test, get_id_range(get_random_u8(), &counter, get_random_u8()), 132 init + 1); 133 } 134 135 static void test_range1_rand1(struct kunit *const test) 136 { 137 atomic64_t counter; 138 u64 init; 139 140 init = get_random_u32(); 141 atomic64_set(&counter, init); 142 KUNIT_EXPECT_EQ(test, get_id_range(1, &counter, 1), init); 143 KUNIT_EXPECT_EQ( 144 test, get_id_range(get_random_u8(), &counter, get_random_u8()), 145 init + 2); 146 } 147 148 static void test_range1_rand15(struct kunit *const test) 149 { 150 atomic64_t counter; 151 u64 init; 152 153 init = get_random_u32(); 154 atomic64_set(&counter, init); 155 KUNIT_EXPECT_EQ(test, get_id_range(1, &counter, 15), init); 156 KUNIT_EXPECT_EQ( 157 test, get_id_range(get_random_u8(), &counter, get_random_u8()), 158 init + 16); 159 } 160 161 static void test_range1_rand16(struct kunit *const test) 162 { 163 atomic64_t counter; 164 u64 init; 165 166 init = get_random_u32(); 167 atomic64_set(&counter, init); 168 KUNIT_EXPECT_EQ(test, get_id_range(1, &counter, 16), init); 169 KUNIT_EXPECT_EQ( 170 test, get_id_range(get_random_u8(), &counter, get_random_u8()), 171 init + 1); 172 } 173 174 static void test_range2_rand0(struct kunit *const test) 175 { 176 atomic64_t counter; 177 u64 init; 178 179 init = get_random_u32(); 180 atomic64_set(&counter, init); 181 KUNIT_EXPECT_EQ(test, get_id_range(2, &counter, 0), init); 182 KUNIT_EXPECT_EQ( 183 test, get_id_range(get_random_u8(), &counter, get_random_u8()), 184 init + 2); 185 } 186 187 static void test_range2_rand1(struct kunit *const test) 188 { 189 atomic64_t counter; 190 u64 init; 191 192 init = get_random_u32(); 193 atomic64_set(&counter, init); 194 KUNIT_EXPECT_EQ(test, get_id_range(2, &counter, 1), init); 195 KUNIT_EXPECT_EQ( 196 test, get_id_range(get_random_u8(), &counter, get_random_u8()), 197 init + 3); 198 } 199 200 static void test_range2_rand2(struct kunit *const test) 201 { 202 atomic64_t counter; 203 u64 init; 204 205 init = get_random_u32(); 206 atomic64_set(&counter, init); 207 KUNIT_EXPECT_EQ(test, get_id_range(2, &counter, 2), init); 208 KUNIT_EXPECT_EQ( 209 test, get_id_range(get_random_u8(), &counter, get_random_u8()), 210 init + 4); 211 } 212 213 static void test_range2_rand15(struct kunit *const test) 214 { 215 atomic64_t counter; 216 u64 init; 217 218 init = get_random_u32(); 219 atomic64_set(&counter, init); 220 KUNIT_EXPECT_EQ(test, get_id_range(2, &counter, 15), init); 221 KUNIT_EXPECT_EQ( 222 test, get_id_range(get_random_u8(), &counter, get_random_u8()), 223 init + 17); 224 } 225 226 static void test_range2_rand16(struct kunit *const test) 227 { 228 atomic64_t counter; 229 u64 init; 230 231 init = get_random_u32(); 232 atomic64_set(&counter, init); 233 KUNIT_EXPECT_EQ(test, get_id_range(2, &counter, 16), init); 234 KUNIT_EXPECT_EQ( 235 test, get_id_range(get_random_u8(), &counter, get_random_u8()), 236 init + 2); 237 } 238 239 #endif /* CONFIG_SECURITY_LANDLOCK_KUNIT_TEST */ 240 241 /** 242 * landlock_get_id_range - Get a range of unique IDs 243 * 244 * @number_of_ids: Number of IDs to hold. Must be greater than one. 245 * 246 * Returns: The first ID in the range. 247 */ 248 u64 landlock_get_id_range(size_t number_of_ids) 249 { 250 return get_id_range(number_of_ids, &next_id, get_random_u8()); 251 } 252 253 #ifdef CONFIG_SECURITY_LANDLOCK_KUNIT_TEST 254 255 static struct kunit_case __refdata test_cases[] = { 256 /* clang-format off */ 257 KUNIT_CASE(test_init_min), 258 KUNIT_CASE(test_init_max), 259 KUNIT_CASE(test_init_once), 260 KUNIT_CASE(test_range1_rand0), 261 KUNIT_CASE(test_range1_rand1), 262 KUNIT_CASE(test_range1_rand15), 263 KUNIT_CASE(test_range1_rand16), 264 KUNIT_CASE(test_range2_rand0), 265 KUNIT_CASE(test_range2_rand1), 266 KUNIT_CASE(test_range2_rand2), 267 KUNIT_CASE(test_range2_rand15), 268 KUNIT_CASE(test_range2_rand16), 269 {} 270 /* clang-format on */ 271 }; 272 273 static struct kunit_suite test_suite = { 274 .name = "landlock_id", 275 .test_cases = test_cases, 276 }; 277 278 kunit_test_init_section_suite(test_suite); 279 280 #endif /* CONFIG_SECURITY_LANDLOCK_KUNIT_TEST */ 281