1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Landlock - Domain management 4 * 5 * Copyright © 2016-2020 Mickaël Salaün <mic@digikod.net> 6 * Copyright © 2018-2020 ANSSI 7 * Copyright © 2024-2025 Microsoft Corporation 8 */ 9 10 #include <kunit/test.h> 11 #include <linux/bitops.h> 12 #include <linux/bits.h> 13 #include <linux/cred.h> 14 #include <linux/file.h> 15 #include <linux/mm.h> 16 #include <linux/path.h> 17 #include <linux/pid.h> 18 #include <linux/sched.h> 19 #include <linux/signal.h> 20 #include <linux/uidgid.h> 21 22 #include "access.h" 23 #include "common.h" 24 #include "domain.h" 25 #include "id.h" 26 27 #ifdef CONFIG_AUDIT 28 29 /** 30 * get_current_exe - Get the current's executable path, if any 31 * 32 * @exe_str: Returned pointer to a path string with a lifetime tied to the 33 * returned buffer, if any. 34 * @exe_size: Returned size of @exe_str (including the trailing null 35 * character), if any. 36 * 37 * Returns: A pointer to an allocated buffer where @exe_str point to, %NULL if 38 * there is no executable path, or an error otherwise. 39 */ 40 static const void *get_current_exe(const char **const exe_str, 41 size_t *const exe_size) 42 { 43 const size_t buffer_size = LANDLOCK_PATH_MAX_SIZE; 44 struct mm_struct *mm = current->mm; 45 struct file *file __free(fput) = NULL; 46 char *buffer __free(kfree) = NULL; 47 const char *exe; 48 ssize_t size; 49 50 if (!mm) 51 return NULL; 52 53 file = get_mm_exe_file(mm); 54 if (!file) 55 return NULL; 56 57 buffer = kmalloc(buffer_size, GFP_KERNEL); 58 if (!buffer) 59 return ERR_PTR(-ENOMEM); 60 61 exe = d_path(&file->f_path, buffer, buffer_size); 62 if (WARN_ON_ONCE(IS_ERR(exe))) 63 /* Should never happen according to LANDLOCK_PATH_MAX_SIZE. */ 64 return ERR_CAST(exe); 65 66 size = buffer + buffer_size - exe; 67 if (WARN_ON_ONCE(size <= 0)) 68 return ERR_PTR(-ENAMETOOLONG); 69 70 *exe_size = size; 71 *exe_str = exe; 72 return no_free_ptr(buffer); 73 } 74 75 /* 76 * Returns: A newly allocated object describing a domain, or an error 77 * otherwise. 78 */ 79 static struct landlock_details *get_current_details(void) 80 { 81 /* Cf. audit_log_d_path_exe() */ 82 static const char null_path[] = "(null)"; 83 const char *path_str = null_path; 84 size_t path_size = sizeof(null_path); 85 const void *buffer __free(kfree) = NULL; 86 struct landlock_details *details; 87 88 buffer = get_current_exe(&path_str, &path_size); 89 if (IS_ERR(buffer)) 90 return ERR_CAST(buffer); 91 92 /* 93 * Create the new details according to the path's length. Do not 94 * allocate with GFP_KERNEL_ACCOUNT because it is independent from the 95 * caller. 96 */ 97 details = 98 kzalloc(struct_size(details, exe_path, path_size), GFP_KERNEL); 99 if (!details) 100 return ERR_PTR(-ENOMEM); 101 102 memcpy(details->exe_path, path_str, path_size); 103 details->pid = get_pid(task_tgid(current)); 104 details->uid = from_kuid(&init_user_ns, current_uid()); 105 get_task_comm(details->comm, current); 106 return details; 107 } 108 109 /** 110 * landlock_init_hierarchy_log - Partially initialize landlock_hierarchy 111 * 112 * @hierarchy: The hierarchy to initialize. 113 * 114 * The current task is referenced as the domain that is enforcing the 115 * restriction. The subjective credentials must not be in an overridden state. 116 * 117 * @hierarchy->parent and @hierarchy->usage should already be set. 118 */ 119 int landlock_init_hierarchy_log(struct landlock_hierarchy *const hierarchy) 120 { 121 struct landlock_details *details; 122 123 details = get_current_details(); 124 if (IS_ERR(details)) 125 return PTR_ERR(details); 126 127 hierarchy->details = details; 128 hierarchy->id = landlock_get_id_range(1); 129 hierarchy->log_status = LANDLOCK_LOG_PENDING; 130 hierarchy->log_same_exec = true; 131 hierarchy->log_new_exec = false; 132 atomic64_set(&hierarchy->num_denials, 0); 133 return 0; 134 } 135 136 static deny_masks_t 137 get_layer_deny_mask(const access_mask_t all_existing_optional_access, 138 const unsigned long access_bit, const size_t layer) 139 { 140 unsigned long access_weight; 141 142 /* This may require change with new object types. */ 143 WARN_ON_ONCE(all_existing_optional_access != 144 _LANDLOCK_ACCESS_FS_OPTIONAL); 145 146 if (WARN_ON_ONCE(layer >= LANDLOCK_MAX_NUM_LAYERS)) 147 return 0; 148 149 access_weight = hweight_long(all_existing_optional_access & 150 GENMASK(access_bit, 0)); 151 if (WARN_ON_ONCE(access_weight < 1)) 152 return 0; 153 154 return layer 155 << ((access_weight - 1) * HWEIGHT(LANDLOCK_MAX_NUM_LAYERS - 1)); 156 } 157 158 #ifdef CONFIG_SECURITY_LANDLOCK_KUNIT_TEST 159 160 static void test_get_layer_deny_mask(struct kunit *const test) 161 { 162 const unsigned long truncate = BIT_INDEX(LANDLOCK_ACCESS_FS_TRUNCATE); 163 const unsigned long ioctl_dev = BIT_INDEX(LANDLOCK_ACCESS_FS_IOCTL_DEV); 164 165 KUNIT_EXPECT_EQ(test, 0, 166 get_layer_deny_mask(_LANDLOCK_ACCESS_FS_OPTIONAL, 167 truncate, 0)); 168 KUNIT_EXPECT_EQ(test, 0x3, 169 get_layer_deny_mask(_LANDLOCK_ACCESS_FS_OPTIONAL, 170 truncate, 3)); 171 172 KUNIT_EXPECT_EQ(test, 0, 173 get_layer_deny_mask(_LANDLOCK_ACCESS_FS_OPTIONAL, 174 ioctl_dev, 0)); 175 KUNIT_EXPECT_EQ(test, 0xf0, 176 get_layer_deny_mask(_LANDLOCK_ACCESS_FS_OPTIONAL, 177 ioctl_dev, 15)); 178 } 179 180 #endif /* CONFIG_SECURITY_LANDLOCK_KUNIT_TEST */ 181 182 deny_masks_t 183 landlock_get_deny_masks(const access_mask_t all_existing_optional_access, 184 const access_mask_t optional_access, 185 const struct layer_access_masks *const masks) 186 { 187 const unsigned long access_opt = optional_access; 188 unsigned long access_bit; 189 deny_masks_t deny_masks = 0; 190 access_mask_t all_denied = 0; 191 192 /* This may require change with new object types. */ 193 WARN_ON_ONCE(!access_mask_subset(optional_access, 194 all_existing_optional_access)); 195 196 if (WARN_ON_ONCE(!masks)) 197 return 0; 198 199 if (WARN_ON_ONCE(!access_opt)) 200 return 0; 201 202 for (ssize_t i = ARRAY_SIZE(masks->access) - 1; i >= 0; i--) { 203 const access_mask_t denied = masks->access[i] & optional_access; 204 const unsigned long newly_denied = denied & ~all_denied; 205 206 if (!newly_denied) 207 continue; 208 209 for_each_set_bit(access_bit, &newly_denied, 210 8 * sizeof(access_mask_t)) { 211 deny_masks |= get_layer_deny_mask( 212 all_existing_optional_access, access_bit, i); 213 } 214 all_denied |= denied; 215 } 216 return deny_masks; 217 } 218 219 #ifdef CONFIG_SECURITY_LANDLOCK_KUNIT_TEST 220 221 static void test_landlock_get_deny_masks(struct kunit *const test) 222 { 223 const struct layer_access_masks layers1 = { 224 .access[0] = LANDLOCK_ACCESS_FS_EXECUTE | 225 LANDLOCK_ACCESS_FS_IOCTL_DEV, 226 .access[1] = LANDLOCK_ACCESS_FS_TRUNCATE, 227 .access[2] = LANDLOCK_ACCESS_FS_IOCTL_DEV, 228 .access[9] = LANDLOCK_ACCESS_FS_EXECUTE, 229 }; 230 231 KUNIT_EXPECT_EQ(test, 0x1, 232 landlock_get_deny_masks(_LANDLOCK_ACCESS_FS_OPTIONAL, 233 LANDLOCK_ACCESS_FS_TRUNCATE, 234 &layers1)); 235 KUNIT_EXPECT_EQ(test, 0x20, 236 landlock_get_deny_masks(_LANDLOCK_ACCESS_FS_OPTIONAL, 237 LANDLOCK_ACCESS_FS_IOCTL_DEV, 238 &layers1)); 239 KUNIT_EXPECT_EQ( 240 test, 0x21, 241 landlock_get_deny_masks(_LANDLOCK_ACCESS_FS_OPTIONAL, 242 LANDLOCK_ACCESS_FS_TRUNCATE | 243 LANDLOCK_ACCESS_FS_IOCTL_DEV, 244 &layers1)); 245 } 246 247 #endif /* CONFIG_SECURITY_LANDLOCK_KUNIT_TEST */ 248 249 #ifdef CONFIG_SECURITY_LANDLOCK_KUNIT_TEST 250 251 static struct kunit_case test_cases[] = { 252 /* clang-format off */ 253 KUNIT_CASE(test_get_layer_deny_mask), 254 KUNIT_CASE(test_landlock_get_deny_masks), 255 {} 256 /* clang-format on */ 257 }; 258 259 static struct kunit_suite test_suite = { 260 .name = "landlock_domain", 261 .test_cases = test_cases, 262 }; 263 264 kunit_test_suite(test_suite); 265 266 #endif /* CONFIG_SECURITY_LANDLOCK_KUNIT_TEST */ 267 268 #endif /* CONFIG_AUDIT */ 269