1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Implementation of the SID table type. 4 * 5 * Original author: Stephen Smalley, <stephen.smalley.work@gmail.com> 6 * Author: Ondrej Mosnacek, <omosnacek@gmail.com> 7 * 8 * Copyright (C) 2018 Red Hat, Inc. 9 */ 10 #include <linux/errno.h> 11 #include <linux/kernel.h> 12 #include <linux/list.h> 13 #include <linux/rcupdate.h> 14 #include <linux/slab.h> 15 #include <linux/sched.h> 16 #include <linux/spinlock.h> 17 #include <asm/barrier.h> 18 #include "flask.h" 19 #include "security.h" 20 #include "sidtab.h" 21 #include "services.h" 22 23 struct sidtab_str_cache { 24 struct rcu_head rcu_member; 25 struct list_head lru_member; 26 struct sidtab_entry *parent; 27 u32 len; 28 char str[] __counted_by(len); 29 }; 30 31 #define index_to_sid(index) ((index) + SECINITSID_NUM + 1) 32 #define sid_to_index(sid) ((sid) - (SECINITSID_NUM + 1)) 33 34 int sidtab_init(struct sidtab *s) 35 { 36 u32 i; 37 38 memset(s->roots, 0, sizeof(s->roots)); 39 40 for (i = 0; i < SECINITSID_NUM; i++) 41 s->isids[i].set = 0; 42 43 s->frozen = false; 44 s->count = 0; 45 s->convert = NULL; 46 hash_init(s->context_to_sid); 47 48 spin_lock_init(&s->lock); 49 50 #if CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE > 0 51 s->cache_free_slots = CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE; 52 INIT_LIST_HEAD(&s->cache_lru_list); 53 spin_lock_init(&s->cache_lock); 54 #endif 55 56 return 0; 57 } 58 59 static u32 context_to_sid(struct sidtab *s, struct context *context, u32 hash) 60 { 61 struct sidtab_entry *entry; 62 u32 sid = 0; 63 64 rcu_read_lock(); 65 hash_for_each_possible_rcu(s->context_to_sid, entry, list, hash) { 66 if (entry->hash != hash) 67 continue; 68 if (context_cmp(&entry->context, context)) { 69 sid = entry->sid; 70 break; 71 } 72 } 73 rcu_read_unlock(); 74 return sid; 75 } 76 77 int sidtab_set_initial(struct sidtab *s, u32 sid, struct context *context) 78 { 79 struct sidtab_isid_entry *isid; 80 u32 hash; 81 int rc; 82 83 if (sid == 0 || sid > SECINITSID_NUM) 84 return -EINVAL; 85 86 isid = &s->isids[sid - 1]; 87 88 rc = context_cpy(&isid->entry.context, context); 89 if (rc) 90 return rc; 91 92 #if CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE > 0 93 isid->entry.cache = NULL; 94 #endif 95 isid->set = 1; 96 97 hash = context_compute_hash(context); 98 99 /* 100 * Multiple initial sids may map to the same context. Check that this 101 * context is not already represented in the context_to_sid hashtable 102 * to avoid duplicate entries and long linked lists upon hash 103 * collision. 104 */ 105 if (!context_to_sid(s, context, hash)) { 106 isid->entry.sid = sid; 107 isid->entry.hash = hash; 108 hash_add(s->context_to_sid, &isid->entry.list, hash); 109 } 110 111 return 0; 112 } 113 114 int sidtab_hash_stats(struct sidtab *sidtab, char *page) 115 { 116 int i; 117 int chain_len = 0; 118 int slots_used = 0; 119 int entries = 0; 120 int max_chain_len = 0; 121 int cur_bucket = 0; 122 struct sidtab_entry *entry; 123 124 rcu_read_lock(); 125 hash_for_each_rcu(sidtab->context_to_sid, i, entry, list) { 126 entries++; 127 if (i == cur_bucket) { 128 chain_len++; 129 if (chain_len == 1) 130 slots_used++; 131 } else { 132 cur_bucket = i; 133 if (chain_len > max_chain_len) 134 max_chain_len = chain_len; 135 chain_len = 0; 136 } 137 } 138 rcu_read_unlock(); 139 140 if (chain_len > max_chain_len) 141 max_chain_len = chain_len; 142 143 return scnprintf(page, PAGE_SIZE, "entries: %d\nbuckets used: %d/%d\n" 144 "longest chain: %d\n", entries, 145 slots_used, SIDTAB_HASH_BUCKETS, max_chain_len); 146 } 147 148 static u32 sidtab_level_from_count(u32 count) 149 { 150 u32 capacity = SIDTAB_LEAF_ENTRIES; 151 u32 level = 0; 152 153 while (count > capacity) { 154 capacity <<= SIDTAB_INNER_SHIFT; 155 ++level; 156 } 157 return level; 158 } 159 160 static int sidtab_alloc_roots(struct sidtab *s, u32 level) 161 { 162 u32 l; 163 164 if (!s->roots[0].ptr_leaf) { 165 s->roots[0].ptr_leaf = kzalloc(SIDTAB_NODE_ALLOC_SIZE, 166 GFP_ATOMIC); 167 if (!s->roots[0].ptr_leaf) 168 return -ENOMEM; 169 } 170 for (l = 1; l <= level; ++l) 171 if (!s->roots[l].ptr_inner) { 172 s->roots[l].ptr_inner = kzalloc(SIDTAB_NODE_ALLOC_SIZE, 173 GFP_ATOMIC); 174 if (!s->roots[l].ptr_inner) 175 return -ENOMEM; 176 s->roots[l].ptr_inner->entries[0] = s->roots[l - 1]; 177 } 178 return 0; 179 } 180 181 static struct sidtab_entry *sidtab_do_lookup(struct sidtab *s, u32 index, 182 int alloc) 183 { 184 union sidtab_entry_inner *entry; 185 u32 level, capacity_shift, leaf_index = index / SIDTAB_LEAF_ENTRIES; 186 187 /* find the level of the subtree we need */ 188 level = sidtab_level_from_count(index + 1); 189 capacity_shift = level * SIDTAB_INNER_SHIFT; 190 191 /* allocate roots if needed */ 192 if (alloc && sidtab_alloc_roots(s, level) != 0) 193 return NULL; 194 195 /* lookup inside the subtree */ 196 entry = &s->roots[level]; 197 while (level != 0) { 198 capacity_shift -= SIDTAB_INNER_SHIFT; 199 --level; 200 201 entry = &entry->ptr_inner->entries[leaf_index >> capacity_shift]; 202 leaf_index &= ((u32)1 << capacity_shift) - 1; 203 204 if (!entry->ptr_inner) { 205 if (alloc) 206 entry->ptr_inner = kzalloc(SIDTAB_NODE_ALLOC_SIZE, 207 GFP_ATOMIC); 208 if (!entry->ptr_inner) 209 return NULL; 210 } 211 } 212 if (!entry->ptr_leaf) { 213 if (alloc) 214 entry->ptr_leaf = kzalloc(SIDTAB_NODE_ALLOC_SIZE, 215 GFP_ATOMIC); 216 if (!entry->ptr_leaf) 217 return NULL; 218 } 219 return &entry->ptr_leaf->entries[index % SIDTAB_LEAF_ENTRIES]; 220 } 221 222 static struct sidtab_entry *sidtab_lookup(struct sidtab *s, u32 index) 223 { 224 /* read entries only after reading count */ 225 u32 count = smp_load_acquire(&s->count); 226 227 if (index >= count) 228 return NULL; 229 230 return sidtab_do_lookup(s, index, 0); 231 } 232 233 static struct sidtab_entry *sidtab_lookup_initial(struct sidtab *s, u32 sid) 234 { 235 return s->isids[sid - 1].set ? &s->isids[sid - 1].entry : NULL; 236 } 237 238 static struct sidtab_entry *sidtab_search_core(struct sidtab *s, u32 sid, 239 int force) 240 { 241 if (sid != 0) { 242 struct sidtab_entry *entry; 243 244 if (sid > SECINITSID_NUM) 245 entry = sidtab_lookup(s, sid_to_index(sid)); 246 else 247 entry = sidtab_lookup_initial(s, sid); 248 if (entry && (!entry->context.len || force)) 249 return entry; 250 } 251 252 return sidtab_lookup_initial(s, SECINITSID_UNLABELED); 253 } 254 255 struct sidtab_entry *sidtab_search_entry(struct sidtab *s, u32 sid) 256 { 257 return sidtab_search_core(s, sid, 0); 258 } 259 260 struct sidtab_entry *sidtab_search_entry_force(struct sidtab *s, u32 sid) 261 { 262 return sidtab_search_core(s, sid, 1); 263 } 264 265 int sidtab_context_to_sid(struct sidtab *s, struct context *context, 266 u32 *sid) 267 { 268 unsigned long flags; 269 u32 count, hash = context_compute_hash(context); 270 struct sidtab_convert_params *convert; 271 struct sidtab_entry *dst, *dst_convert; 272 int rc; 273 274 *sid = context_to_sid(s, context, hash); 275 if (*sid) 276 return 0; 277 278 /* lock-free search failed: lock, re-search, and insert if not found */ 279 spin_lock_irqsave(&s->lock, flags); 280 281 rc = 0; 282 *sid = context_to_sid(s, context, hash); 283 if (*sid) 284 goto out_unlock; 285 286 if (unlikely(s->frozen)) { 287 /* 288 * This sidtab is now frozen - tell the caller to abort and 289 * get the new one. 290 */ 291 rc = -ESTALE; 292 goto out_unlock; 293 } 294 295 count = s->count; 296 297 /* bail out if we already reached max entries */ 298 rc = -EOVERFLOW; 299 if (count >= SIDTAB_MAX) 300 goto out_unlock; 301 302 /* insert context into new entry */ 303 rc = -ENOMEM; 304 dst = sidtab_do_lookup(s, count, 1); 305 if (!dst) 306 goto out_unlock; 307 308 dst->sid = index_to_sid(count); 309 dst->hash = hash; 310 311 rc = context_cpy(&dst->context, context); 312 if (rc) 313 goto out_unlock; 314 315 /* 316 * if we are building a new sidtab, we need to convert the context 317 * and insert it there as well 318 */ 319 convert = s->convert; 320 if (convert) { 321 struct sidtab *target = convert->target; 322 323 rc = -ENOMEM; 324 dst_convert = sidtab_do_lookup(target, count, 1); 325 if (!dst_convert) { 326 context_destroy(&dst->context); 327 goto out_unlock; 328 } 329 330 rc = services_convert_context(convert->args, 331 context, &dst_convert->context, 332 GFP_ATOMIC); 333 if (rc) { 334 context_destroy(&dst->context); 335 goto out_unlock; 336 } 337 dst_convert->sid = index_to_sid(count); 338 dst_convert->hash = context_compute_hash(&dst_convert->context); 339 target->count = count + 1; 340 341 hash_add_rcu(target->context_to_sid, 342 &dst_convert->list, dst_convert->hash); 343 } 344 345 if (context->len) 346 pr_info("SELinux: Context %s is not valid (left unmapped).\n", 347 context->str); 348 349 *sid = index_to_sid(count); 350 351 /* write entries before updating count */ 352 smp_store_release(&s->count, count + 1); 353 hash_add_rcu(s->context_to_sid, &dst->list, dst->hash); 354 355 rc = 0; 356 out_unlock: 357 spin_unlock_irqrestore(&s->lock, flags); 358 return rc; 359 } 360 361 static void sidtab_convert_hashtable(struct sidtab *s, u32 count) 362 { 363 struct sidtab_entry *entry; 364 u32 i; 365 366 for (i = 0; i < count; i++) { 367 entry = sidtab_do_lookup(s, i, 0); 368 entry->sid = index_to_sid(i); 369 entry->hash = context_compute_hash(&entry->context); 370 371 hash_add_rcu(s->context_to_sid, &entry->list, entry->hash); 372 } 373 } 374 375 static int sidtab_convert_tree(union sidtab_entry_inner *edst, 376 union sidtab_entry_inner *esrc, 377 u32 *pos, u32 count, u32 level, 378 struct sidtab_convert_params *convert) 379 { 380 int rc; 381 u32 i; 382 383 if (level != 0) { 384 if (!edst->ptr_inner) { 385 edst->ptr_inner = kzalloc(SIDTAB_NODE_ALLOC_SIZE, 386 GFP_KERNEL); 387 if (!edst->ptr_inner) 388 return -ENOMEM; 389 } 390 i = 0; 391 while (i < SIDTAB_INNER_ENTRIES && *pos < count) { 392 rc = sidtab_convert_tree(&edst->ptr_inner->entries[i], 393 &esrc->ptr_inner->entries[i], 394 pos, count, level - 1, 395 convert); 396 if (rc) 397 return rc; 398 i++; 399 } 400 } else { 401 if (!edst->ptr_leaf) { 402 edst->ptr_leaf = kzalloc(SIDTAB_NODE_ALLOC_SIZE, 403 GFP_KERNEL); 404 if (!edst->ptr_leaf) 405 return -ENOMEM; 406 } 407 i = 0; 408 while (i < SIDTAB_LEAF_ENTRIES && *pos < count) { 409 rc = services_convert_context(convert->args, 410 &esrc->ptr_leaf->entries[i].context, 411 &edst->ptr_leaf->entries[i].context, 412 GFP_KERNEL); 413 if (rc) 414 return rc; 415 (*pos)++; 416 i++; 417 } 418 cond_resched(); 419 } 420 return 0; 421 } 422 423 int sidtab_convert(struct sidtab *s, struct sidtab_convert_params *params) 424 { 425 unsigned long flags; 426 u32 count, level, pos; 427 int rc; 428 429 spin_lock_irqsave(&s->lock, flags); 430 431 /* concurrent policy loads are not allowed */ 432 if (s->convert) { 433 spin_unlock_irqrestore(&s->lock, flags); 434 return -EBUSY; 435 } 436 437 count = s->count; 438 level = sidtab_level_from_count(count); 439 440 /* allocate last leaf in the new sidtab (to avoid race with 441 * live convert) 442 */ 443 rc = sidtab_do_lookup(params->target, count - 1, 1) ? 0 : -ENOMEM; 444 if (rc) { 445 spin_unlock_irqrestore(&s->lock, flags); 446 return rc; 447 } 448 449 /* set count in case no new entries are added during conversion */ 450 params->target->count = count; 451 452 /* enable live convert of new entries */ 453 s->convert = params; 454 455 /* we can safely convert the tree outside the lock */ 456 spin_unlock_irqrestore(&s->lock, flags); 457 458 pr_info("SELinux: Converting %u SID table entries...\n", count); 459 460 /* convert all entries not covered by live convert */ 461 pos = 0; 462 rc = sidtab_convert_tree(¶ms->target->roots[level], 463 &s->roots[level], &pos, count, level, params); 464 if (rc) { 465 /* we need to keep the old table - disable live convert */ 466 spin_lock_irqsave(&s->lock, flags); 467 s->convert = NULL; 468 spin_unlock_irqrestore(&s->lock, flags); 469 return rc; 470 } 471 /* 472 * The hashtable can also be modified in sidtab_context_to_sid() 473 * so we must re-acquire the lock here. 474 */ 475 spin_lock_irqsave(&s->lock, flags); 476 sidtab_convert_hashtable(params->target, count); 477 spin_unlock_irqrestore(&s->lock, flags); 478 479 return 0; 480 } 481 482 void sidtab_cancel_convert(struct sidtab *s) 483 { 484 unsigned long flags; 485 486 /* cancelling policy load - disable live convert of sidtab */ 487 spin_lock_irqsave(&s->lock, flags); 488 s->convert = NULL; 489 spin_unlock_irqrestore(&s->lock, flags); 490 } 491 492 void sidtab_freeze_begin(struct sidtab *s, unsigned long *flags) __acquires(&s->lock) 493 { 494 spin_lock_irqsave(&s->lock, *flags); 495 s->frozen = true; 496 s->convert = NULL; 497 } 498 void sidtab_freeze_end(struct sidtab *s, unsigned long *flags) __releases(&s->lock) 499 { 500 spin_unlock_irqrestore(&s->lock, *flags); 501 } 502 503 static void sidtab_destroy_entry(struct sidtab_entry *entry) 504 { 505 context_destroy(&entry->context); 506 #if CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE > 0 507 kfree(rcu_dereference_raw(entry->cache)); 508 #endif 509 } 510 511 static void sidtab_destroy_tree(union sidtab_entry_inner entry, u32 level) 512 { 513 u32 i; 514 515 if (level != 0) { 516 struct sidtab_node_inner *node = entry.ptr_inner; 517 518 if (!node) 519 return; 520 521 for (i = 0; i < SIDTAB_INNER_ENTRIES; i++) 522 sidtab_destroy_tree(node->entries[i], level - 1); 523 kfree(node); 524 } else { 525 struct sidtab_node_leaf *node = entry.ptr_leaf; 526 527 if (!node) 528 return; 529 530 for (i = 0; i < SIDTAB_LEAF_ENTRIES; i++) 531 sidtab_destroy_entry(&node->entries[i]); 532 kfree(node); 533 } 534 } 535 536 void sidtab_destroy(struct sidtab *s) 537 { 538 u32 i, level; 539 540 for (i = 0; i < SECINITSID_NUM; i++) 541 if (s->isids[i].set) 542 sidtab_destroy_entry(&s->isids[i].entry); 543 544 level = SIDTAB_MAX_LEVEL; 545 while (level && !s->roots[level].ptr_inner) 546 --level; 547 548 sidtab_destroy_tree(s->roots[level], level); 549 /* 550 * The context_to_sid hashtable's objects are all shared 551 * with the isids array and context tree, and so don't need 552 * to be cleaned up here. 553 */ 554 } 555 556 #if CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE > 0 557 558 void sidtab_sid2str_put(struct sidtab *s, struct sidtab_entry *entry, 559 const char *str, u32 str_len) 560 { 561 struct sidtab_str_cache *cache, *victim = NULL; 562 unsigned long flags; 563 564 /* do not cache invalid contexts */ 565 if (entry->context.len) 566 return; 567 568 spin_lock_irqsave(&s->cache_lock, flags); 569 570 cache = rcu_dereference_protected(entry->cache, 571 lockdep_is_held(&s->cache_lock)); 572 if (cache) { 573 /* entry in cache - just bump to the head of LRU list */ 574 list_move(&cache->lru_member, &s->cache_lru_list); 575 goto out_unlock; 576 } 577 578 cache = kmalloc(struct_size(cache, str, str_len), GFP_ATOMIC); 579 if (!cache) 580 goto out_unlock; 581 582 if (s->cache_free_slots == 0) { 583 /* pop a cache entry from the tail and free it */ 584 victim = container_of(s->cache_lru_list.prev, 585 struct sidtab_str_cache, lru_member); 586 list_del(&victim->lru_member); 587 rcu_assign_pointer(victim->parent->cache, NULL); 588 } else { 589 s->cache_free_slots--; 590 } 591 cache->parent = entry; 592 cache->len = str_len; 593 memcpy(cache->str, str, str_len); 594 list_add(&cache->lru_member, &s->cache_lru_list); 595 596 rcu_assign_pointer(entry->cache, cache); 597 598 out_unlock: 599 spin_unlock_irqrestore(&s->cache_lock, flags); 600 kfree_rcu(victim, rcu_member); 601 } 602 603 int sidtab_sid2str_get(struct sidtab *s, struct sidtab_entry *entry, 604 char **out, u32 *out_len) 605 { 606 struct sidtab_str_cache *cache; 607 int rc = 0; 608 609 if (entry->context.len) 610 return -ENOENT; /* do not cache invalid contexts */ 611 612 rcu_read_lock(); 613 614 cache = rcu_dereference(entry->cache); 615 if (!cache) { 616 rc = -ENOENT; 617 } else { 618 *out_len = cache->len; 619 if (out) { 620 *out = kmemdup(cache->str, cache->len, GFP_ATOMIC); 621 if (!*out) 622 rc = -ENOMEM; 623 } 624 } 625 626 rcu_read_unlock(); 627 628 if (!rc && out) 629 sidtab_sid2str_put(s, entry, *out, *out_len); 630 return rc; 631 } 632 633 #endif /* CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE > 0 */ 634