1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Landlock - Domain management 4 * 5 * Copyright © 2016-2020 Mickaël Salaün <mic@digikod.net> 6 * Copyright © 2018-2020 ANSSI 7 * Copyright © 2024-2025 Microsoft Corporation 8 */ 9 10 #include <kunit/test.h> 11 #include <linux/bitops.h> 12 #include <linux/bits.h> 13 #include <linux/cred.h> 14 #include <linux/file.h> 15 #include <linux/mm.h> 16 #include <linux/path.h> 17 #include <linux/pid.h> 18 #include <linux/sched.h> 19 #include <linux/signal.h> 20 #include <linux/uidgid.h> 21 22 #include "access.h" 23 #include "common.h" 24 #include "domain.h" 25 #include "id.h" 26 27 #ifdef CONFIG_AUDIT 28 29 /** 30 * get_current_exe - Get the current's executable path, if any 31 * 32 * @exe_str: Returned pointer to a path string with a lifetime tied to the 33 * returned buffer, if any. 34 * @exe_size: Returned size of @exe_str (including the trailing null 35 * character), if any. 36 * 37 * Return: A pointer to an allocated buffer where @exe_str point to, %NULL if 38 * there is no executable path, or an error otherwise. 39 */ 40 static const void *get_current_exe(const char **const exe_str, 41 size_t *const exe_size) 42 { 43 const size_t buffer_size = LANDLOCK_PATH_MAX_SIZE; 44 struct mm_struct *mm = current->mm; 45 struct file *file __free(fput) = NULL; 46 char *buffer __free(kfree) = NULL; 47 const char *exe; 48 ssize_t size; 49 50 if (!mm) 51 return NULL; 52 53 file = get_mm_exe_file(mm); 54 if (!file) 55 return NULL; 56 57 buffer = kmalloc(buffer_size, GFP_KERNEL); 58 if (!buffer) 59 return ERR_PTR(-ENOMEM); 60 61 exe = d_path(&file->f_path, buffer, buffer_size); 62 if (WARN_ON_ONCE(IS_ERR(exe))) 63 /* Should never happen according to LANDLOCK_PATH_MAX_SIZE. */ 64 return ERR_CAST(exe); 65 66 size = buffer + buffer_size - exe; 67 if (WARN_ON_ONCE(size <= 0)) 68 return ERR_PTR(-ENAMETOOLONG); 69 70 *exe_size = size; 71 *exe_str = exe; 72 return no_free_ptr(buffer); 73 } 74 75 /* 76 * Return: A newly allocated object describing a domain, or an error 77 * otherwise. 78 */ 79 static struct landlock_details *get_current_details(void) 80 { 81 /* Cf. audit_log_d_path_exe() */ 82 static const char null_path[] = "(null)"; 83 const char *path_str = null_path; 84 size_t path_size = sizeof(null_path); 85 const void *buffer __free(kfree) = NULL; 86 struct landlock_details *details; 87 88 buffer = get_current_exe(&path_str, &path_size); 89 if (IS_ERR(buffer)) 90 return ERR_CAST(buffer); 91 92 /* 93 * Create the new details according to the path's length. Do not 94 * allocate with GFP_KERNEL_ACCOUNT because it is independent from the 95 * caller. 96 */ 97 details = kzalloc_flex(*details, exe_path, path_size); 98 if (!details) 99 return ERR_PTR(-ENOMEM); 100 101 memcpy(details->exe_path, path_str, path_size); 102 details->pid = get_pid(task_tgid(current)); 103 details->uid = from_kuid(&init_user_ns, current_uid()); 104 get_task_comm(details->comm, current); 105 return details; 106 } 107 108 /** 109 * landlock_init_hierarchy_log - Partially initialize landlock_hierarchy 110 * 111 * @hierarchy: The hierarchy to initialize. 112 * 113 * The current task is referenced as the domain that is enforcing the 114 * restriction. The subjective credentials must not be in an overridden state. 115 * 116 * @hierarchy->parent and @hierarchy->usage should already be set. 117 * 118 * Return: 0 on success, -errno on failure. 119 */ 120 int landlock_init_hierarchy_log(struct landlock_hierarchy *const hierarchy) 121 { 122 struct landlock_details *details; 123 124 details = get_current_details(); 125 if (IS_ERR(details)) 126 return PTR_ERR(details); 127 128 hierarchy->details = details; 129 hierarchy->id = landlock_get_id_range(1); 130 hierarchy->log_status = LANDLOCK_LOG_PENDING; 131 hierarchy->log_same_exec = true; 132 hierarchy->log_new_exec = false; 133 atomic64_set(&hierarchy->num_denials, 0); 134 return 0; 135 } 136 137 static deny_masks_t 138 get_layer_deny_mask(const access_mask_t all_existing_optional_access, 139 const unsigned long access_bit, const size_t layer) 140 { 141 unsigned long access_weight; 142 143 /* This may require change with new object types. */ 144 WARN_ON_ONCE(all_existing_optional_access != 145 _LANDLOCK_ACCESS_FS_OPTIONAL); 146 147 if (WARN_ON_ONCE(layer >= LANDLOCK_MAX_NUM_LAYERS)) 148 return 0; 149 150 access_weight = hweight_long(all_existing_optional_access & 151 GENMASK(access_bit, 0)); 152 if (WARN_ON_ONCE(access_weight < 1)) 153 return 0; 154 155 return layer 156 << ((access_weight - 1) * HWEIGHT(LANDLOCK_MAX_NUM_LAYERS - 1)); 157 } 158 159 #ifdef CONFIG_SECURITY_LANDLOCK_KUNIT_TEST 160 161 static void test_get_layer_deny_mask(struct kunit *const test) 162 { 163 const unsigned long truncate = BIT_INDEX(LANDLOCK_ACCESS_FS_TRUNCATE); 164 const unsigned long ioctl_dev = BIT_INDEX(LANDLOCK_ACCESS_FS_IOCTL_DEV); 165 166 KUNIT_EXPECT_EQ(test, 0, 167 get_layer_deny_mask(_LANDLOCK_ACCESS_FS_OPTIONAL, 168 truncate, 0)); 169 KUNIT_EXPECT_EQ(test, 0x3, 170 get_layer_deny_mask(_LANDLOCK_ACCESS_FS_OPTIONAL, 171 truncate, 3)); 172 173 KUNIT_EXPECT_EQ(test, 0, 174 get_layer_deny_mask(_LANDLOCK_ACCESS_FS_OPTIONAL, 175 ioctl_dev, 0)); 176 KUNIT_EXPECT_EQ(test, 0xf0, 177 get_layer_deny_mask(_LANDLOCK_ACCESS_FS_OPTIONAL, 178 ioctl_dev, 15)); 179 } 180 181 #endif /* CONFIG_SECURITY_LANDLOCK_KUNIT_TEST */ 182 183 deny_masks_t 184 landlock_get_deny_masks(const access_mask_t all_existing_optional_access, 185 const access_mask_t optional_access, 186 const struct layer_access_masks *const masks) 187 { 188 const unsigned long access_opt = optional_access; 189 unsigned long access_bit; 190 deny_masks_t deny_masks = 0; 191 access_mask_t all_denied = 0; 192 193 /* This may require change with new object types. */ 194 WARN_ON_ONCE(!access_mask_subset(optional_access, 195 all_existing_optional_access)); 196 197 if (WARN_ON_ONCE(!masks)) 198 return 0; 199 200 if (WARN_ON_ONCE(!access_opt)) 201 return 0; 202 203 for (ssize_t i = ARRAY_SIZE(masks->access) - 1; i >= 0; i--) { 204 const access_mask_t denied = masks->access[i] & optional_access; 205 const unsigned long newly_denied = denied & ~all_denied; 206 207 if (!newly_denied) 208 continue; 209 210 for_each_set_bit(access_bit, &newly_denied, 211 8 * sizeof(access_mask_t)) { 212 deny_masks |= get_layer_deny_mask( 213 all_existing_optional_access, access_bit, i); 214 } 215 all_denied |= denied; 216 } 217 return deny_masks; 218 } 219 220 #ifdef CONFIG_SECURITY_LANDLOCK_KUNIT_TEST 221 222 static void test_landlock_get_deny_masks(struct kunit *const test) 223 { 224 const struct layer_access_masks layers1 = { 225 .access[0] = LANDLOCK_ACCESS_FS_EXECUTE | 226 LANDLOCK_ACCESS_FS_IOCTL_DEV, 227 .access[1] = LANDLOCK_ACCESS_FS_TRUNCATE, 228 .access[2] = LANDLOCK_ACCESS_FS_IOCTL_DEV, 229 .access[9] = LANDLOCK_ACCESS_FS_EXECUTE, 230 }; 231 232 KUNIT_EXPECT_EQ(test, 0x1, 233 landlock_get_deny_masks(_LANDLOCK_ACCESS_FS_OPTIONAL, 234 LANDLOCK_ACCESS_FS_TRUNCATE, 235 &layers1)); 236 KUNIT_EXPECT_EQ(test, 0x20, 237 landlock_get_deny_masks(_LANDLOCK_ACCESS_FS_OPTIONAL, 238 LANDLOCK_ACCESS_FS_IOCTL_DEV, 239 &layers1)); 240 KUNIT_EXPECT_EQ( 241 test, 0x21, 242 landlock_get_deny_masks(_LANDLOCK_ACCESS_FS_OPTIONAL, 243 LANDLOCK_ACCESS_FS_TRUNCATE | 244 LANDLOCK_ACCESS_FS_IOCTL_DEV, 245 &layers1)); 246 } 247 248 #endif /* CONFIG_SECURITY_LANDLOCK_KUNIT_TEST */ 249 250 #ifdef CONFIG_SECURITY_LANDLOCK_KUNIT_TEST 251 252 static struct kunit_case test_cases[] = { 253 /* clang-format off */ 254 KUNIT_CASE(test_get_layer_deny_mask), 255 KUNIT_CASE(test_landlock_get_deny_masks), 256 {} 257 /* clang-format on */ 258 }; 259 260 static struct kunit_suite test_suite = { 261 .name = "landlock_domain", 262 .test_cases = test_cases, 263 }; 264 265 kunit_test_suite(test_suite); 266 267 #endif /* CONFIG_SECURITY_LANDLOCK_KUNIT_TEST */ 268 269 #endif /* CONFIG_AUDIT */ 270