1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Landlock - Domain management 4 * 5 * Copyright © 2016-2020 Mickaël Salaün <mic@digikod.net> 6 * Copyright © 2018-2020 ANSSI 7 * Copyright © 2024-2025 Microsoft Corporation 8 */ 9 10 #include <kunit/test.h> 11 #include <linux/bitops.h> 12 #include <linux/bits.h> 13 #include <linux/cred.h> 14 #include <linux/file.h> 15 #include <linux/mm.h> 16 #include <linux/path.h> 17 #include <linux/pid.h> 18 #include <linux/sched.h> 19 #include <linux/uidgid.h> 20 21 #include "access.h" 22 #include "common.h" 23 #include "domain.h" 24 #include "id.h" 25 26 #ifdef CONFIG_AUDIT 27 28 /** 29 * get_current_exe - Get the current's executable path, if any 30 * 31 * @exe_str: Returned pointer to a path string with a lifetime tied to the 32 * returned buffer, if any. 33 * @exe_size: Returned size of @exe_str (including the trailing null 34 * character), if any. 35 * 36 * Returns: A pointer to an allocated buffer where @exe_str point to, %NULL if 37 * there is no executable path, or an error otherwise. 38 */ 39 static const void *get_current_exe(const char **const exe_str, 40 size_t *const exe_size) 41 { 42 const size_t buffer_size = LANDLOCK_PATH_MAX_SIZE; 43 struct mm_struct *mm = current->mm; 44 struct file *file __free(fput) = NULL; 45 char *buffer __free(kfree) = NULL; 46 const char *exe; 47 ssize_t size; 48 49 if (!mm) 50 return NULL; 51 52 file = get_mm_exe_file(mm); 53 if (!file) 54 return NULL; 55 56 buffer = kmalloc(buffer_size, GFP_KERNEL); 57 if (!buffer) 58 return ERR_PTR(-ENOMEM); 59 60 exe = d_path(&file->f_path, buffer, buffer_size); 61 if (WARN_ON_ONCE(IS_ERR(exe))) 62 /* Should never happen according to LANDLOCK_PATH_MAX_SIZE. */ 63 return ERR_CAST(exe); 64 65 size = buffer + buffer_size - exe; 66 if (WARN_ON_ONCE(size <= 0)) 67 return ERR_PTR(-ENAMETOOLONG); 68 69 *exe_size = size; 70 *exe_str = exe; 71 return no_free_ptr(buffer); 72 } 73 74 /* 75 * Returns: A newly allocated object describing a domain, or an error 76 * otherwise. 77 */ 78 static struct landlock_details *get_current_details(void) 79 { 80 /* Cf. audit_log_d_path_exe() */ 81 static const char null_path[] = "(null)"; 82 const char *path_str = null_path; 83 size_t path_size = sizeof(null_path); 84 const void *buffer __free(kfree) = NULL; 85 struct landlock_details *details; 86 87 buffer = get_current_exe(&path_str, &path_size); 88 if (IS_ERR(buffer)) 89 return ERR_CAST(buffer); 90 91 /* 92 * Create the new details according to the path's length. Do not 93 * allocate with GFP_KERNEL_ACCOUNT because it is independent from the 94 * caller. 95 */ 96 details = 97 kzalloc(struct_size(details, exe_path, path_size), GFP_KERNEL); 98 if (!details) 99 return ERR_PTR(-ENOMEM); 100 101 memcpy(details->exe_path, path_str, path_size); 102 WARN_ON_ONCE(current_cred() != current_real_cred()); 103 details->pid = get_pid(task_pid(current)); 104 details->uid = from_kuid(&init_user_ns, current_uid()); 105 get_task_comm(details->comm, current); 106 return details; 107 } 108 109 /** 110 * landlock_init_hierarchy_log - Partially initialize landlock_hierarchy 111 * 112 * @hierarchy: The hierarchy to initialize. 113 * 114 * The current task is referenced as the domain that is enforcing the 115 * restriction. The subjective credentials must not be in an overridden state. 116 * 117 * @hierarchy->parent and @hierarchy->usage should already be set. 118 */ 119 int landlock_init_hierarchy_log(struct landlock_hierarchy *const hierarchy) 120 { 121 struct landlock_details *details; 122 123 details = get_current_details(); 124 if (IS_ERR(details)) 125 return PTR_ERR(details); 126 127 hierarchy->details = details; 128 hierarchy->id = landlock_get_id_range(1); 129 hierarchy->log_status = LANDLOCK_LOG_PENDING; 130 hierarchy->log_same_exec = true; 131 hierarchy->log_new_exec = false; 132 atomic64_set(&hierarchy->num_denials, 0); 133 return 0; 134 } 135 136 static deny_masks_t 137 get_layer_deny_mask(const access_mask_t all_existing_optional_access, 138 const unsigned long access_bit, const size_t layer) 139 { 140 unsigned long access_weight; 141 142 /* This may require change with new object types. */ 143 WARN_ON_ONCE(all_existing_optional_access != 144 _LANDLOCK_ACCESS_FS_OPTIONAL); 145 146 if (WARN_ON_ONCE(layer >= LANDLOCK_MAX_NUM_LAYERS)) 147 return 0; 148 149 access_weight = hweight_long(all_existing_optional_access & 150 GENMASK(access_bit, 0)); 151 if (WARN_ON_ONCE(access_weight < 1)) 152 return 0; 153 154 return layer 155 << ((access_weight - 1) * HWEIGHT(LANDLOCK_MAX_NUM_LAYERS - 1)); 156 } 157 158 #ifdef CONFIG_SECURITY_LANDLOCK_KUNIT_TEST 159 160 static void test_get_layer_deny_mask(struct kunit *const test) 161 { 162 const unsigned long truncate = BIT_INDEX(LANDLOCK_ACCESS_FS_TRUNCATE); 163 const unsigned long ioctl_dev = BIT_INDEX(LANDLOCK_ACCESS_FS_IOCTL_DEV); 164 165 KUNIT_EXPECT_EQ(test, 0, 166 get_layer_deny_mask(_LANDLOCK_ACCESS_FS_OPTIONAL, 167 truncate, 0)); 168 KUNIT_EXPECT_EQ(test, 0x3, 169 get_layer_deny_mask(_LANDLOCK_ACCESS_FS_OPTIONAL, 170 truncate, 3)); 171 172 KUNIT_EXPECT_EQ(test, 0, 173 get_layer_deny_mask(_LANDLOCK_ACCESS_FS_OPTIONAL, 174 ioctl_dev, 0)); 175 KUNIT_EXPECT_EQ(test, 0xf0, 176 get_layer_deny_mask(_LANDLOCK_ACCESS_FS_OPTIONAL, 177 ioctl_dev, 15)); 178 } 179 180 #endif /* CONFIG_SECURITY_LANDLOCK_KUNIT_TEST */ 181 182 deny_masks_t 183 landlock_get_deny_masks(const access_mask_t all_existing_optional_access, 184 const access_mask_t optional_access, 185 const layer_mask_t (*const layer_masks)[], 186 const size_t layer_masks_size) 187 { 188 const unsigned long access_opt = optional_access; 189 unsigned long access_bit; 190 deny_masks_t deny_masks = 0; 191 192 /* This may require change with new object types. */ 193 WARN_ON_ONCE(access_opt != 194 (optional_access & all_existing_optional_access)); 195 196 if (WARN_ON_ONCE(!layer_masks)) 197 return 0; 198 199 if (WARN_ON_ONCE(!access_opt)) 200 return 0; 201 202 for_each_set_bit(access_bit, &access_opt, layer_masks_size) { 203 const layer_mask_t mask = (*layer_masks)[access_bit]; 204 205 if (!mask) 206 continue; 207 208 /* __fls(1) == 0 */ 209 deny_masks |= get_layer_deny_mask(all_existing_optional_access, 210 access_bit, __fls(mask)); 211 } 212 return deny_masks; 213 } 214 215 #ifdef CONFIG_SECURITY_LANDLOCK_KUNIT_TEST 216 217 static void test_landlock_get_deny_masks(struct kunit *const test) 218 { 219 const layer_mask_t layers1[BITS_PER_TYPE(access_mask_t)] = { 220 [BIT_INDEX(LANDLOCK_ACCESS_FS_EXECUTE)] = BIT_ULL(0) | 221 BIT_ULL(9), 222 [BIT_INDEX(LANDLOCK_ACCESS_FS_TRUNCATE)] = BIT_ULL(1), 223 [BIT_INDEX(LANDLOCK_ACCESS_FS_IOCTL_DEV)] = BIT_ULL(2) | 224 BIT_ULL(0), 225 }; 226 227 KUNIT_EXPECT_EQ(test, 0x1, 228 landlock_get_deny_masks(_LANDLOCK_ACCESS_FS_OPTIONAL, 229 LANDLOCK_ACCESS_FS_TRUNCATE, 230 &layers1, ARRAY_SIZE(layers1))); 231 KUNIT_EXPECT_EQ(test, 0x20, 232 landlock_get_deny_masks(_LANDLOCK_ACCESS_FS_OPTIONAL, 233 LANDLOCK_ACCESS_FS_IOCTL_DEV, 234 &layers1, ARRAY_SIZE(layers1))); 235 KUNIT_EXPECT_EQ( 236 test, 0x21, 237 landlock_get_deny_masks(_LANDLOCK_ACCESS_FS_OPTIONAL, 238 LANDLOCK_ACCESS_FS_TRUNCATE | 239 LANDLOCK_ACCESS_FS_IOCTL_DEV, 240 &layers1, ARRAY_SIZE(layers1))); 241 } 242 243 #endif /* CONFIG_SECURITY_LANDLOCK_KUNIT_TEST */ 244 245 #ifdef CONFIG_SECURITY_LANDLOCK_KUNIT_TEST 246 247 static struct kunit_case test_cases[] = { 248 /* clang-format off */ 249 KUNIT_CASE(test_get_layer_deny_mask), 250 KUNIT_CASE(test_landlock_get_deny_masks), 251 {} 252 /* clang-format on */ 253 }; 254 255 static struct kunit_suite test_suite = { 256 .name = "landlock_domain", 257 .test_cases = test_cases, 258 }; 259 260 kunit_test_suite(test_suite); 261 262 #endif /* CONFIG_SECURITY_LANDLOCK_KUNIT_TEST */ 263 264 #endif /* CONFIG_AUDIT */ 265