1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Landlock - Domain management 4 * 5 * Copyright © 2016-2020 Mickaël Salaün <mic@digikod.net> 6 * Copyright © 2018-2020 ANSSI 7 * Copyright © 2024-2025 Microsoft Corporation 8 */ 9 10 #include <kunit/test.h> 11 #include <linux/bitops.h> 12 #include <linux/bits.h> 13 #include <linux/cred.h> 14 #include <linux/file.h> 15 #include <linux/mm.h> 16 #include <linux/path.h> 17 #include <linux/pid.h> 18 #include <linux/sched.h> 19 #include <linux/signal.h> 20 #include <linux/uidgid.h> 21 22 #include "access.h" 23 #include "common.h" 24 #include "domain.h" 25 #include "id.h" 26 27 #ifdef CONFIG_AUDIT 28 29 /** 30 * get_current_exe - Get the current's executable path, if any 31 * 32 * @exe_str: Returned pointer to a path string with a lifetime tied to the 33 * returned buffer, if any. 34 * @exe_size: Returned size of @exe_str (including the trailing null 35 * character), if any. 36 * 37 * Returns: A pointer to an allocated buffer where @exe_str point to, %NULL if 38 * there is no executable path, or an error otherwise. 39 */ 40 static const void *get_current_exe(const char **const exe_str, 41 size_t *const exe_size) 42 { 43 const size_t buffer_size = LANDLOCK_PATH_MAX_SIZE; 44 struct mm_struct *mm = current->mm; 45 struct file *file __free(fput) = NULL; 46 char *buffer __free(kfree) = NULL; 47 const char *exe; 48 ssize_t size; 49 50 if (!mm) 51 return NULL; 52 53 file = get_mm_exe_file(mm); 54 if (!file) 55 return NULL; 56 57 buffer = kmalloc(buffer_size, GFP_KERNEL); 58 if (!buffer) 59 return ERR_PTR(-ENOMEM); 60 61 exe = d_path(&file->f_path, buffer, buffer_size); 62 if (WARN_ON_ONCE(IS_ERR(exe))) 63 /* Should never happen according to LANDLOCK_PATH_MAX_SIZE. */ 64 return ERR_CAST(exe); 65 66 size = buffer + buffer_size - exe; 67 if (WARN_ON_ONCE(size <= 0)) 68 return ERR_PTR(-ENAMETOOLONG); 69 70 *exe_size = size; 71 *exe_str = exe; 72 return no_free_ptr(buffer); 73 } 74 75 /* 76 * Returns: A newly allocated object describing a domain, or an error 77 * otherwise. 78 */ 79 static struct landlock_details *get_current_details(void) 80 { 81 /* Cf. audit_log_d_path_exe() */ 82 static const char null_path[] = "(null)"; 83 const char *path_str = null_path; 84 size_t path_size = sizeof(null_path); 85 const void *buffer __free(kfree) = NULL; 86 struct landlock_details *details; 87 88 buffer = get_current_exe(&path_str, &path_size); 89 if (IS_ERR(buffer)) 90 return ERR_CAST(buffer); 91 92 /* 93 * Create the new details according to the path's length. Do not 94 * allocate with GFP_KERNEL_ACCOUNT because it is independent from the 95 * caller. 96 */ 97 details = kzalloc_flex(*details, exe_path, path_size); 98 if (!details) 99 return ERR_PTR(-ENOMEM); 100 101 memcpy(details->exe_path, path_str, path_size); 102 details->pid = get_pid(task_tgid(current)); 103 details->uid = from_kuid(&init_user_ns, current_uid()); 104 get_task_comm(details->comm, current); 105 return details; 106 } 107 108 /** 109 * landlock_init_hierarchy_log - Partially initialize landlock_hierarchy 110 * 111 * @hierarchy: The hierarchy to initialize. 112 * 113 * The current task is referenced as the domain that is enforcing the 114 * restriction. The subjective credentials must not be in an overridden state. 115 * 116 * @hierarchy->parent and @hierarchy->usage should already be set. 117 */ 118 int landlock_init_hierarchy_log(struct landlock_hierarchy *const hierarchy) 119 { 120 struct landlock_details *details; 121 122 details = get_current_details(); 123 if (IS_ERR(details)) 124 return PTR_ERR(details); 125 126 hierarchy->details = details; 127 hierarchy->id = landlock_get_id_range(1); 128 hierarchy->log_status = LANDLOCK_LOG_PENDING; 129 hierarchy->log_same_exec = true; 130 hierarchy->log_new_exec = false; 131 atomic64_set(&hierarchy->num_denials, 0); 132 return 0; 133 } 134 135 static deny_masks_t 136 get_layer_deny_mask(const access_mask_t all_existing_optional_access, 137 const unsigned long access_bit, const size_t layer) 138 { 139 unsigned long access_weight; 140 141 /* This may require change with new object types. */ 142 WARN_ON_ONCE(all_existing_optional_access != 143 _LANDLOCK_ACCESS_FS_OPTIONAL); 144 145 if (WARN_ON_ONCE(layer >= LANDLOCK_MAX_NUM_LAYERS)) 146 return 0; 147 148 access_weight = hweight_long(all_existing_optional_access & 149 GENMASK(access_bit, 0)); 150 if (WARN_ON_ONCE(access_weight < 1)) 151 return 0; 152 153 return layer 154 << ((access_weight - 1) * HWEIGHT(LANDLOCK_MAX_NUM_LAYERS - 1)); 155 } 156 157 #ifdef CONFIG_SECURITY_LANDLOCK_KUNIT_TEST 158 159 static void test_get_layer_deny_mask(struct kunit *const test) 160 { 161 const unsigned long truncate = BIT_INDEX(LANDLOCK_ACCESS_FS_TRUNCATE); 162 const unsigned long ioctl_dev = BIT_INDEX(LANDLOCK_ACCESS_FS_IOCTL_DEV); 163 164 KUNIT_EXPECT_EQ(test, 0, 165 get_layer_deny_mask(_LANDLOCK_ACCESS_FS_OPTIONAL, 166 truncate, 0)); 167 KUNIT_EXPECT_EQ(test, 0x3, 168 get_layer_deny_mask(_LANDLOCK_ACCESS_FS_OPTIONAL, 169 truncate, 3)); 170 171 KUNIT_EXPECT_EQ(test, 0, 172 get_layer_deny_mask(_LANDLOCK_ACCESS_FS_OPTIONAL, 173 ioctl_dev, 0)); 174 KUNIT_EXPECT_EQ(test, 0xf0, 175 get_layer_deny_mask(_LANDLOCK_ACCESS_FS_OPTIONAL, 176 ioctl_dev, 15)); 177 } 178 179 #endif /* CONFIG_SECURITY_LANDLOCK_KUNIT_TEST */ 180 181 deny_masks_t 182 landlock_get_deny_masks(const access_mask_t all_existing_optional_access, 183 const access_mask_t optional_access, 184 const struct layer_access_masks *const masks) 185 { 186 const unsigned long access_opt = optional_access; 187 unsigned long access_bit; 188 deny_masks_t deny_masks = 0; 189 access_mask_t all_denied = 0; 190 191 /* This may require change with new object types. */ 192 WARN_ON_ONCE(!access_mask_subset(optional_access, 193 all_existing_optional_access)); 194 195 if (WARN_ON_ONCE(!masks)) 196 return 0; 197 198 if (WARN_ON_ONCE(!access_opt)) 199 return 0; 200 201 for (ssize_t i = ARRAY_SIZE(masks->access) - 1; i >= 0; i--) { 202 const access_mask_t denied = masks->access[i] & optional_access; 203 const unsigned long newly_denied = denied & ~all_denied; 204 205 if (!newly_denied) 206 continue; 207 208 for_each_set_bit(access_bit, &newly_denied, 209 8 * sizeof(access_mask_t)) { 210 deny_masks |= get_layer_deny_mask( 211 all_existing_optional_access, access_bit, i); 212 } 213 all_denied |= denied; 214 } 215 return deny_masks; 216 } 217 218 #ifdef CONFIG_SECURITY_LANDLOCK_KUNIT_TEST 219 220 static void test_landlock_get_deny_masks(struct kunit *const test) 221 { 222 const struct layer_access_masks layers1 = { 223 .access[0] = LANDLOCK_ACCESS_FS_EXECUTE | 224 LANDLOCK_ACCESS_FS_IOCTL_DEV, 225 .access[1] = LANDLOCK_ACCESS_FS_TRUNCATE, 226 .access[2] = LANDLOCK_ACCESS_FS_IOCTL_DEV, 227 .access[9] = LANDLOCK_ACCESS_FS_EXECUTE, 228 }; 229 230 KUNIT_EXPECT_EQ(test, 0x1, 231 landlock_get_deny_masks(_LANDLOCK_ACCESS_FS_OPTIONAL, 232 LANDLOCK_ACCESS_FS_TRUNCATE, 233 &layers1)); 234 KUNIT_EXPECT_EQ(test, 0x20, 235 landlock_get_deny_masks(_LANDLOCK_ACCESS_FS_OPTIONAL, 236 LANDLOCK_ACCESS_FS_IOCTL_DEV, 237 &layers1)); 238 KUNIT_EXPECT_EQ( 239 test, 0x21, 240 landlock_get_deny_masks(_LANDLOCK_ACCESS_FS_OPTIONAL, 241 LANDLOCK_ACCESS_FS_TRUNCATE | 242 LANDLOCK_ACCESS_FS_IOCTL_DEV, 243 &layers1)); 244 } 245 246 #endif /* CONFIG_SECURITY_LANDLOCK_KUNIT_TEST */ 247 248 #ifdef CONFIG_SECURITY_LANDLOCK_KUNIT_TEST 249 250 static struct kunit_case test_cases[] = { 251 /* clang-format off */ 252 KUNIT_CASE(test_get_layer_deny_mask), 253 KUNIT_CASE(test_landlock_get_deny_masks), 254 {} 255 /* clang-format on */ 256 }; 257 258 static struct kunit_suite test_suite = { 259 .name = "landlock_domain", 260 .test_cases = test_cases, 261 }; 262 263 kunit_test_suite(test_suite); 264 265 #endif /* CONFIG_SECURITY_LANDLOCK_KUNIT_TEST */ 266 267 #endif /* CONFIG_AUDIT */ 268