1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Implementation of the SID table type. 4 * 5 * Original author: Stephen Smalley, <sds@tycho.nsa.gov> 6 * Author: Ondrej Mosnacek, <omosnacek@gmail.com> 7 * 8 * Copyright (C) 2018 Red Hat, Inc. 9 */ 10 #include <linux/errno.h> 11 #include <linux/kernel.h> 12 #include <linux/slab.h> 13 #include <linux/sched.h> 14 #include <linux/spinlock.h> 15 #include <linux/atomic.h> 16 #include "flask.h" 17 #include "security.h" 18 #include "sidtab.h" 19 20 int sidtab_init(struct sidtab *s) 21 { 22 u32 i; 23 24 memset(s->roots, 0, sizeof(s->roots)); 25 26 for (i = 0; i < SIDTAB_RCACHE_SIZE; i++) 27 atomic_set(&s->rcache[i], -1); 28 29 for (i = 0; i < SECINITSID_NUM; i++) 30 s->isids[i].set = 0; 31 32 atomic_set(&s->count, 0); 33 34 s->convert = NULL; 35 36 spin_lock_init(&s->lock); 37 return 0; 38 } 39 40 int sidtab_set_initial(struct sidtab *s, u32 sid, struct context *context) 41 { 42 struct sidtab_isid_entry *entry; 43 int rc; 44 45 if (sid == 0 || sid > SECINITSID_NUM) 46 return -EINVAL; 47 48 entry = &s->isids[sid - 1]; 49 50 rc = context_cpy(&entry->context, context); 51 if (rc) 52 return rc; 53 54 entry->set = 1; 55 return 0; 56 } 57 58 static u32 sidtab_level_from_count(u32 count) 59 { 60 u32 capacity = SIDTAB_LEAF_ENTRIES; 61 u32 level = 0; 62 63 while (count > capacity) { 64 capacity <<= SIDTAB_INNER_SHIFT; 65 ++level; 66 } 67 return level; 68 } 69 70 static int sidtab_alloc_roots(struct sidtab *s, u32 level) 71 { 72 u32 l; 73 74 if (!s->roots[0].ptr_leaf) { 75 s->roots[0].ptr_leaf = kzalloc(SIDTAB_NODE_ALLOC_SIZE, 76 GFP_ATOMIC); 77 if (!s->roots[0].ptr_leaf) 78 return -ENOMEM; 79 } 80 for (l = 1; l <= level; ++l) 81 if (!s->roots[l].ptr_inner) { 82 s->roots[l].ptr_inner = kzalloc(SIDTAB_NODE_ALLOC_SIZE, 83 GFP_ATOMIC); 84 if (!s->roots[l].ptr_inner) 85 return -ENOMEM; 86 s->roots[l].ptr_inner->entries[0] = s->roots[l - 1]; 87 } 88 return 0; 89 } 90 91 static struct context *sidtab_do_lookup(struct sidtab *s, u32 index, int alloc) 92 { 93 union sidtab_entry_inner *entry; 94 u32 level, capacity_shift, leaf_index = index / SIDTAB_LEAF_ENTRIES; 95 96 /* find the level of the subtree we need */ 97 level = sidtab_level_from_count(index + 1); 98 capacity_shift = level * SIDTAB_INNER_SHIFT; 99 100 /* allocate roots if needed */ 101 if (alloc && sidtab_alloc_roots(s, level) != 0) 102 return NULL; 103 104 /* lookup inside the subtree */ 105 entry = &s->roots[level]; 106 while (level != 0) { 107 capacity_shift -= SIDTAB_INNER_SHIFT; 108 --level; 109 110 entry = &entry->ptr_inner->entries[leaf_index >> capacity_shift]; 111 leaf_index &= ((u32)1 << capacity_shift) - 1; 112 113 if (!entry->ptr_inner) { 114 if (alloc) 115 entry->ptr_inner = kzalloc(SIDTAB_NODE_ALLOC_SIZE, 116 GFP_ATOMIC); 117 if (!entry->ptr_inner) 118 return NULL; 119 } 120 } 121 if (!entry->ptr_leaf) { 122 if (alloc) 123 entry->ptr_leaf = kzalloc(SIDTAB_NODE_ALLOC_SIZE, 124 GFP_ATOMIC); 125 if (!entry->ptr_leaf) 126 return NULL; 127 } 128 return &entry->ptr_leaf->entries[index % SIDTAB_LEAF_ENTRIES].context; 129 } 130 131 static struct context *sidtab_lookup(struct sidtab *s, u32 index) 132 { 133 u32 count = (u32)atomic_read(&s->count); 134 135 if (index >= count) 136 return NULL; 137 138 /* read entries after reading count */ 139 smp_rmb(); 140 141 return sidtab_do_lookup(s, index, 0); 142 } 143 144 static struct context *sidtab_lookup_initial(struct sidtab *s, u32 sid) 145 { 146 return s->isids[sid - 1].set ? &s->isids[sid - 1].context : NULL; 147 } 148 149 static struct context *sidtab_search_core(struct sidtab *s, u32 sid, int force) 150 { 151 struct context *context; 152 153 if (sid != 0) { 154 if (sid > SECINITSID_NUM) 155 context = sidtab_lookup(s, sid - (SECINITSID_NUM + 1)); 156 else 157 context = sidtab_lookup_initial(s, sid); 158 if (context && (!context->len || force)) 159 return context; 160 } 161 162 return sidtab_lookup_initial(s, SECINITSID_UNLABELED); 163 } 164 165 struct context *sidtab_search(struct sidtab *s, u32 sid) 166 { 167 return sidtab_search_core(s, sid, 0); 168 } 169 170 struct context *sidtab_search_force(struct sidtab *s, u32 sid) 171 { 172 return sidtab_search_core(s, sid, 1); 173 } 174 175 static int sidtab_find_context(union sidtab_entry_inner entry, 176 u32 *pos, u32 count, u32 level, 177 struct context *context, u32 *index) 178 { 179 int rc; 180 u32 i; 181 182 if (level != 0) { 183 struct sidtab_node_inner *node = entry.ptr_inner; 184 185 i = 0; 186 while (i < SIDTAB_INNER_ENTRIES && *pos < count) { 187 rc = sidtab_find_context(node->entries[i], 188 pos, count, level - 1, 189 context, index); 190 if (rc == 0) 191 return 0; 192 i++; 193 } 194 } else { 195 struct sidtab_node_leaf *node = entry.ptr_leaf; 196 197 i = 0; 198 while (i < SIDTAB_LEAF_ENTRIES && *pos < count) { 199 if (context_cmp(&node->entries[i].context, context)) { 200 *index = *pos; 201 return 0; 202 } 203 (*pos)++; 204 i++; 205 } 206 } 207 return -ENOENT; 208 } 209 210 static void sidtab_rcache_update(struct sidtab *s, u32 index, u32 pos) 211 { 212 while (pos > 0) { 213 atomic_set(&s->rcache[pos], atomic_read(&s->rcache[pos - 1])); 214 --pos; 215 } 216 atomic_set(&s->rcache[0], (int)index); 217 } 218 219 static void sidtab_rcache_push(struct sidtab *s, u32 index) 220 { 221 sidtab_rcache_update(s, index, SIDTAB_RCACHE_SIZE - 1); 222 } 223 224 static int sidtab_rcache_search(struct sidtab *s, struct context *context, 225 u32 *index) 226 { 227 u32 i; 228 229 for (i = 0; i < SIDTAB_RCACHE_SIZE; i++) { 230 int v = atomic_read(&s->rcache[i]); 231 232 if (v < 0) 233 continue; 234 235 if (context_cmp(sidtab_do_lookup(s, (u32)v, 0), context)) { 236 sidtab_rcache_update(s, (u32)v, i); 237 *index = (u32)v; 238 return 0; 239 } 240 } 241 return -ENOENT; 242 } 243 244 static int sidtab_reverse_lookup(struct sidtab *s, struct context *context, 245 u32 *index) 246 { 247 unsigned long flags; 248 u32 count = (u32)atomic_read(&s->count); 249 u32 count_locked, level, pos; 250 struct sidtab_convert_params *convert; 251 struct context *dst, *dst_convert; 252 int rc; 253 254 rc = sidtab_rcache_search(s, context, index); 255 if (rc == 0) 256 return 0; 257 258 level = sidtab_level_from_count(count); 259 260 /* read entries after reading count */ 261 smp_rmb(); 262 263 pos = 0; 264 rc = sidtab_find_context(s->roots[level], &pos, count, level, 265 context, index); 266 if (rc == 0) { 267 sidtab_rcache_push(s, *index); 268 return 0; 269 } 270 271 /* lock-free search failed: lock, re-search, and insert if not found */ 272 spin_lock_irqsave(&s->lock, flags); 273 274 convert = s->convert; 275 count_locked = (u32)atomic_read(&s->count); 276 level = sidtab_level_from_count(count_locked); 277 278 /* if count has changed before we acquired the lock, then catch up */ 279 while (count < count_locked) { 280 if (context_cmp(sidtab_do_lookup(s, count, 0), context)) { 281 sidtab_rcache_push(s, count); 282 *index = count; 283 rc = 0; 284 goto out_unlock; 285 } 286 ++count; 287 } 288 289 /* insert context into new entry */ 290 rc = -ENOMEM; 291 dst = sidtab_do_lookup(s, count, 1); 292 if (!dst) 293 goto out_unlock; 294 295 rc = context_cpy(dst, context); 296 if (rc) 297 goto out_unlock; 298 299 /* 300 * if we are building a new sidtab, we need to convert the context 301 * and insert it there as well 302 */ 303 if (convert) { 304 rc = -ENOMEM; 305 dst_convert = sidtab_do_lookup(convert->target, count, 1); 306 if (!dst_convert) { 307 context_destroy(dst); 308 goto out_unlock; 309 } 310 311 rc = convert->func(context, dst_convert, convert->args); 312 if (rc) { 313 context_destroy(dst); 314 goto out_unlock; 315 } 316 317 /* at this point we know the insert won't fail */ 318 atomic_set(&convert->target->count, count + 1); 319 } 320 321 if (context->len) 322 pr_info("SELinux: Context %s is not valid (left unmapped).\n", 323 context->str); 324 325 sidtab_rcache_push(s, count); 326 *index = count; 327 328 /* write entries before writing new count */ 329 smp_wmb(); 330 331 atomic_set(&s->count, count + 1); 332 333 rc = 0; 334 out_unlock: 335 spin_unlock_irqrestore(&s->lock, flags); 336 return rc; 337 } 338 339 int sidtab_context_to_sid(struct sidtab *s, struct context *context, u32 *sid) 340 { 341 int rc; 342 u32 i; 343 344 for (i = 0; i < SECINITSID_NUM; i++) { 345 struct sidtab_isid_entry *entry = &s->isids[i]; 346 347 if (entry->set && context_cmp(context, &entry->context)) { 348 *sid = i + 1; 349 return 0; 350 } 351 } 352 353 rc = sidtab_reverse_lookup(s, context, sid); 354 if (rc) 355 return rc; 356 *sid += SECINITSID_NUM + 1; 357 return 0; 358 } 359 360 static int sidtab_convert_tree(union sidtab_entry_inner *edst, 361 union sidtab_entry_inner *esrc, 362 u32 *pos, u32 count, u32 level, 363 struct sidtab_convert_params *convert) 364 { 365 int rc; 366 u32 i; 367 368 if (level != 0) { 369 if (!edst->ptr_inner) { 370 edst->ptr_inner = kzalloc(SIDTAB_NODE_ALLOC_SIZE, 371 GFP_KERNEL); 372 if (!edst->ptr_inner) 373 return -ENOMEM; 374 } 375 i = 0; 376 while (i < SIDTAB_INNER_ENTRIES && *pos < count) { 377 rc = sidtab_convert_tree(&edst->ptr_inner->entries[i], 378 &esrc->ptr_inner->entries[i], 379 pos, count, level - 1, 380 convert); 381 if (rc) 382 return rc; 383 i++; 384 } 385 } else { 386 if (!edst->ptr_leaf) { 387 edst->ptr_leaf = kzalloc(SIDTAB_NODE_ALLOC_SIZE, 388 GFP_KERNEL); 389 if (!edst->ptr_leaf) 390 return -ENOMEM; 391 } 392 i = 0; 393 while (i < SIDTAB_LEAF_ENTRIES && *pos < count) { 394 rc = convert->func(&esrc->ptr_leaf->entries[i].context, 395 &edst->ptr_leaf->entries[i].context, 396 convert->args); 397 if (rc) 398 return rc; 399 (*pos)++; 400 i++; 401 } 402 cond_resched(); 403 } 404 return 0; 405 } 406 407 int sidtab_convert(struct sidtab *s, struct sidtab_convert_params *params) 408 { 409 unsigned long flags; 410 u32 count, level, pos; 411 int rc; 412 413 spin_lock_irqsave(&s->lock, flags); 414 415 /* concurrent policy loads are not allowed */ 416 if (s->convert) { 417 spin_unlock_irqrestore(&s->lock, flags); 418 return -EBUSY; 419 } 420 421 count = (u32)atomic_read(&s->count); 422 level = sidtab_level_from_count(count); 423 424 /* allocate last leaf in the new sidtab (to avoid race with 425 * live convert) 426 */ 427 rc = sidtab_do_lookup(params->target, count - 1, 1) ? 0 : -ENOMEM; 428 if (rc) { 429 spin_unlock_irqrestore(&s->lock, flags); 430 return rc; 431 } 432 433 /* set count in case no new entries are added during conversion */ 434 atomic_set(¶ms->target->count, count); 435 436 /* enable live convert of new entries */ 437 s->convert = params; 438 439 /* we can safely do the rest of the conversion outside the lock */ 440 spin_unlock_irqrestore(&s->lock, flags); 441 442 pr_info("SELinux: Converting %u SID table entries...\n", count); 443 444 /* convert all entries not covered by live convert */ 445 pos = 0; 446 rc = sidtab_convert_tree(¶ms->target->roots[level], 447 &s->roots[level], &pos, count, level, params); 448 if (rc) { 449 /* we need to keep the old table - disable live convert */ 450 spin_lock_irqsave(&s->lock, flags); 451 s->convert = NULL; 452 spin_unlock_irqrestore(&s->lock, flags); 453 } 454 return rc; 455 } 456 457 static void sidtab_destroy_tree(union sidtab_entry_inner entry, u32 level) 458 { 459 u32 i; 460 461 if (level != 0) { 462 struct sidtab_node_inner *node = entry.ptr_inner; 463 464 if (!node) 465 return; 466 467 for (i = 0; i < SIDTAB_INNER_ENTRIES; i++) 468 sidtab_destroy_tree(node->entries[i], level - 1); 469 kfree(node); 470 } else { 471 struct sidtab_node_leaf *node = entry.ptr_leaf; 472 473 if (!node) 474 return; 475 476 for (i = 0; i < SIDTAB_LEAF_ENTRIES; i++) 477 context_destroy(&node->entries[i].context); 478 kfree(node); 479 } 480 } 481 482 void sidtab_destroy(struct sidtab *s) 483 { 484 u32 i, level; 485 486 for (i = 0; i < SECINITSID_NUM; i++) 487 if (s->isids[i].set) 488 context_destroy(&s->isids[i].context); 489 490 level = SIDTAB_MAX_LEVEL; 491 while (level && !s->roots[level].ptr_inner) 492 --level; 493 494 sidtab_destroy_tree(s->roots[level], level); 495 } 496