1 /* Basic authentication token and access key management 2 * 3 * Copyright (C) 2004-2008 Red Hat, Inc. All Rights Reserved. 4 * Written by David Howells (dhowells@redhat.com) 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License 8 * as published by the Free Software Foundation; either version 9 * 2 of the License, or (at your option) any later version. 10 */ 11 12 #include <linux/module.h> 13 #include <linux/init.h> 14 #include <linux/poison.h> 15 #include <linux/sched.h> 16 #include <linux/slab.h> 17 #include <linux/security.h> 18 #include <linux/workqueue.h> 19 #include <linux/random.h> 20 #include <linux/err.h> 21 #include <linux/user_namespace.h> 22 #include "internal.h" 23 24 static struct kmem_cache *key_jar; 25 struct rb_root key_serial_tree; /* tree of keys indexed by serial */ 26 DEFINE_SPINLOCK(key_serial_lock); 27 28 struct rb_root key_user_tree; /* tree of quota records indexed by UID */ 29 DEFINE_SPINLOCK(key_user_lock); 30 31 unsigned int key_quota_root_maxkeys = 200; /* root's key count quota */ 32 unsigned int key_quota_root_maxbytes = 20000; /* root's key space quota */ 33 unsigned int key_quota_maxkeys = 200; /* general key count quota */ 34 unsigned int key_quota_maxbytes = 20000; /* general key space quota */ 35 36 static LIST_HEAD(key_types_list); 37 static DECLARE_RWSEM(key_types_sem); 38 39 static void key_cleanup(struct work_struct *work); 40 static DECLARE_WORK(key_cleanup_task, key_cleanup); 41 42 /* We serialise key instantiation and link */ 43 DEFINE_MUTEX(key_construction_mutex); 44 45 /* Any key who's type gets unegistered will be re-typed to this */ 46 static struct key_type key_type_dead = { 47 .name = "dead", 48 }; 49 50 #ifdef KEY_DEBUGGING 51 void __key_check(const struct key *key) 52 { 53 printk("__key_check: key %p {%08x} should be {%08x}\n", 54 key, key->magic, KEY_DEBUG_MAGIC); 55 BUG(); 56 } 57 #endif 58 59 /* 60 * Get the key quota record for a user, allocating a new record if one doesn't 61 * already exist. 62 */ 63 struct key_user *key_user_lookup(uid_t uid, struct user_namespace *user_ns) 64 { 65 struct key_user *candidate = NULL, *user; 66 struct rb_node *parent = NULL; 67 struct rb_node **p; 68 69 try_again: 70 p = &key_user_tree.rb_node; 71 spin_lock(&key_user_lock); 72 73 /* search the tree for a user record with a matching UID */ 74 while (*p) { 75 parent = *p; 76 user = rb_entry(parent, struct key_user, node); 77 78 if (uid < user->uid) 79 p = &(*p)->rb_left; 80 else if (uid > user->uid) 81 p = &(*p)->rb_right; 82 else if (user_ns < user->user_ns) 83 p = &(*p)->rb_left; 84 else if (user_ns > user->user_ns) 85 p = &(*p)->rb_right; 86 else 87 goto found; 88 } 89 90 /* if we get here, we failed to find a match in the tree */ 91 if (!candidate) { 92 /* allocate a candidate user record if we don't already have 93 * one */ 94 spin_unlock(&key_user_lock); 95 96 user = NULL; 97 candidate = kmalloc(sizeof(struct key_user), GFP_KERNEL); 98 if (unlikely(!candidate)) 99 goto out; 100 101 /* the allocation may have scheduled, so we need to repeat the 102 * search lest someone else added the record whilst we were 103 * asleep */ 104 goto try_again; 105 } 106 107 /* if we get here, then the user record still hadn't appeared on the 108 * second pass - so we use the candidate record */ 109 atomic_set(&candidate->usage, 1); 110 atomic_set(&candidate->nkeys, 0); 111 atomic_set(&candidate->nikeys, 0); 112 candidate->uid = uid; 113 candidate->user_ns = get_user_ns(user_ns); 114 candidate->qnkeys = 0; 115 candidate->qnbytes = 0; 116 spin_lock_init(&candidate->lock); 117 mutex_init(&candidate->cons_lock); 118 119 rb_link_node(&candidate->node, parent, p); 120 rb_insert_color(&candidate->node, &key_user_tree); 121 spin_unlock(&key_user_lock); 122 user = candidate; 123 goto out; 124 125 /* okay - we found a user record for this UID */ 126 found: 127 atomic_inc(&user->usage); 128 spin_unlock(&key_user_lock); 129 kfree(candidate); 130 out: 131 return user; 132 } 133 134 /* 135 * Dispose of a user structure 136 */ 137 void key_user_put(struct key_user *user) 138 { 139 if (atomic_dec_and_lock(&user->usage, &key_user_lock)) { 140 rb_erase(&user->node, &key_user_tree); 141 spin_unlock(&key_user_lock); 142 put_user_ns(user->user_ns); 143 144 kfree(user); 145 } 146 } 147 148 /* 149 * Allocate a serial number for a key. These are assigned randomly to avoid 150 * security issues through covert channel problems. 151 */ 152 static inline void key_alloc_serial(struct key *key) 153 { 154 struct rb_node *parent, **p; 155 struct key *xkey; 156 157 /* propose a random serial number and look for a hole for it in the 158 * serial number tree */ 159 do { 160 get_random_bytes(&key->serial, sizeof(key->serial)); 161 162 key->serial >>= 1; /* negative numbers are not permitted */ 163 } while (key->serial < 3); 164 165 spin_lock(&key_serial_lock); 166 167 attempt_insertion: 168 parent = NULL; 169 p = &key_serial_tree.rb_node; 170 171 while (*p) { 172 parent = *p; 173 xkey = rb_entry(parent, struct key, serial_node); 174 175 if (key->serial < xkey->serial) 176 p = &(*p)->rb_left; 177 else if (key->serial > xkey->serial) 178 p = &(*p)->rb_right; 179 else 180 goto serial_exists; 181 } 182 183 /* we've found a suitable hole - arrange for this key to occupy it */ 184 rb_link_node(&key->serial_node, parent, p); 185 rb_insert_color(&key->serial_node, &key_serial_tree); 186 187 spin_unlock(&key_serial_lock); 188 return; 189 190 /* we found a key with the proposed serial number - walk the tree from 191 * that point looking for the next unused serial number */ 192 serial_exists: 193 for (;;) { 194 key->serial++; 195 if (key->serial < 3) { 196 key->serial = 3; 197 goto attempt_insertion; 198 } 199 200 parent = rb_next(parent); 201 if (!parent) 202 goto attempt_insertion; 203 204 xkey = rb_entry(parent, struct key, serial_node); 205 if (key->serial < xkey->serial) 206 goto attempt_insertion; 207 } 208 } 209 210 /** 211 * key_alloc - Allocate a key of the specified type. 212 * @type: The type of key to allocate. 213 * @desc: The key description to allow the key to be searched out. 214 * @uid: The owner of the new key. 215 * @gid: The group ID for the new key's group permissions. 216 * @cred: The credentials specifying UID namespace. 217 * @perm: The permissions mask of the new key. 218 * @flags: Flags specifying quota properties. 219 * 220 * Allocate a key of the specified type with the attributes given. The key is 221 * returned in an uninstantiated state and the caller needs to instantiate the 222 * key before returning. 223 * 224 * The user's key count quota is updated to reflect the creation of the key and 225 * the user's key data quota has the default for the key type reserved. The 226 * instantiation function should amend this as necessary. If insufficient 227 * quota is available, -EDQUOT will be returned. 228 * 229 * The LSM security modules can prevent a key being created, in which case 230 * -EACCES will be returned. 231 * 232 * Returns a pointer to the new key if successful and an error code otherwise. 233 * 234 * Note that the caller needs to ensure the key type isn't uninstantiated. 235 * Internally this can be done by locking key_types_sem. Externally, this can 236 * be done by either never unregistering the key type, or making sure 237 * key_alloc() calls don't race with module unloading. 238 */ 239 struct key *key_alloc(struct key_type *type, const char *desc, 240 uid_t uid, gid_t gid, const struct cred *cred, 241 key_perm_t perm, unsigned long flags) 242 { 243 struct key_user *user = NULL; 244 struct key *key; 245 size_t desclen, quotalen; 246 int ret; 247 248 key = ERR_PTR(-EINVAL); 249 if (!desc || !*desc) 250 goto error; 251 252 desclen = strlen(desc) + 1; 253 quotalen = desclen + type->def_datalen; 254 255 /* get hold of the key tracking for this user */ 256 user = key_user_lookup(uid, cred->user->user_ns); 257 if (!user) 258 goto no_memory_1; 259 260 /* check that the user's quota permits allocation of another key and 261 * its description */ 262 if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) { 263 unsigned maxkeys = (uid == 0) ? 264 key_quota_root_maxkeys : key_quota_maxkeys; 265 unsigned maxbytes = (uid == 0) ? 266 key_quota_root_maxbytes : key_quota_maxbytes; 267 268 spin_lock(&user->lock); 269 if (!(flags & KEY_ALLOC_QUOTA_OVERRUN)) { 270 if (user->qnkeys + 1 >= maxkeys || 271 user->qnbytes + quotalen >= maxbytes || 272 user->qnbytes + quotalen < user->qnbytes) 273 goto no_quota; 274 } 275 276 user->qnkeys++; 277 user->qnbytes += quotalen; 278 spin_unlock(&user->lock); 279 } 280 281 /* allocate and initialise the key and its description */ 282 key = kmem_cache_alloc(key_jar, GFP_KERNEL); 283 if (!key) 284 goto no_memory_2; 285 286 if (desc) { 287 key->description = kmemdup(desc, desclen, GFP_KERNEL); 288 if (!key->description) 289 goto no_memory_3; 290 } 291 292 atomic_set(&key->usage, 1); 293 init_rwsem(&key->sem); 294 key->type = type; 295 key->user = user; 296 key->quotalen = quotalen; 297 key->datalen = type->def_datalen; 298 key->uid = uid; 299 key->gid = gid; 300 key->perm = perm; 301 key->flags = 0; 302 key->expiry = 0; 303 key->payload.data = NULL; 304 key->security = NULL; 305 306 if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) 307 key->flags |= 1 << KEY_FLAG_IN_QUOTA; 308 309 memset(&key->type_data, 0, sizeof(key->type_data)); 310 311 #ifdef KEY_DEBUGGING 312 key->magic = KEY_DEBUG_MAGIC; 313 #endif 314 315 /* let the security module know about the key */ 316 ret = security_key_alloc(key, cred, flags); 317 if (ret < 0) 318 goto security_error; 319 320 /* publish the key by giving it a serial number */ 321 atomic_inc(&user->nkeys); 322 key_alloc_serial(key); 323 324 error: 325 return key; 326 327 security_error: 328 kfree(key->description); 329 kmem_cache_free(key_jar, key); 330 if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) { 331 spin_lock(&user->lock); 332 user->qnkeys--; 333 user->qnbytes -= quotalen; 334 spin_unlock(&user->lock); 335 } 336 key_user_put(user); 337 key = ERR_PTR(ret); 338 goto error; 339 340 no_memory_3: 341 kmem_cache_free(key_jar, key); 342 no_memory_2: 343 if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) { 344 spin_lock(&user->lock); 345 user->qnkeys--; 346 user->qnbytes -= quotalen; 347 spin_unlock(&user->lock); 348 } 349 key_user_put(user); 350 no_memory_1: 351 key = ERR_PTR(-ENOMEM); 352 goto error; 353 354 no_quota: 355 spin_unlock(&user->lock); 356 key_user_put(user); 357 key = ERR_PTR(-EDQUOT); 358 goto error; 359 } 360 EXPORT_SYMBOL(key_alloc); 361 362 /** 363 * key_payload_reserve - Adjust data quota reservation for the key's payload 364 * @key: The key to make the reservation for. 365 * @datalen: The amount of data payload the caller now wants. 366 * 367 * Adjust the amount of the owning user's key data quota that a key reserves. 368 * If the amount is increased, then -EDQUOT may be returned if there isn't 369 * enough free quota available. 370 * 371 * If successful, 0 is returned. 372 */ 373 int key_payload_reserve(struct key *key, size_t datalen) 374 { 375 int delta = (int)datalen - key->datalen; 376 int ret = 0; 377 378 key_check(key); 379 380 /* contemplate the quota adjustment */ 381 if (delta != 0 && test_bit(KEY_FLAG_IN_QUOTA, &key->flags)) { 382 unsigned maxbytes = (key->user->uid == 0) ? 383 key_quota_root_maxbytes : key_quota_maxbytes; 384 385 spin_lock(&key->user->lock); 386 387 if (delta > 0 && 388 (key->user->qnbytes + delta >= maxbytes || 389 key->user->qnbytes + delta < key->user->qnbytes)) { 390 ret = -EDQUOT; 391 } 392 else { 393 key->user->qnbytes += delta; 394 key->quotalen += delta; 395 } 396 spin_unlock(&key->user->lock); 397 } 398 399 /* change the recorded data length if that didn't generate an error */ 400 if (ret == 0) 401 key->datalen = datalen; 402 403 return ret; 404 } 405 EXPORT_SYMBOL(key_payload_reserve); 406 407 /* 408 * Instantiate a key and link it into the target keyring atomically. Must be 409 * called with the target keyring's semaphore writelocked. The target key's 410 * semaphore need not be locked as instantiation is serialised by 411 * key_construction_mutex. 412 */ 413 static int __key_instantiate_and_link(struct key *key, 414 const void *data, 415 size_t datalen, 416 struct key *keyring, 417 struct key *authkey, 418 unsigned long *_prealloc) 419 { 420 int ret, awaken; 421 422 key_check(key); 423 key_check(keyring); 424 425 awaken = 0; 426 ret = -EBUSY; 427 428 mutex_lock(&key_construction_mutex); 429 430 /* can't instantiate twice */ 431 if (!test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) { 432 /* instantiate the key */ 433 ret = key->type->instantiate(key, data, datalen); 434 435 if (ret == 0) { 436 /* mark the key as being instantiated */ 437 atomic_inc(&key->user->nikeys); 438 set_bit(KEY_FLAG_INSTANTIATED, &key->flags); 439 440 if (test_and_clear_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags)) 441 awaken = 1; 442 443 /* and link it into the destination keyring */ 444 if (keyring) 445 __key_link(keyring, key, _prealloc); 446 447 /* disable the authorisation key */ 448 if (authkey) 449 key_revoke(authkey); 450 } 451 } 452 453 mutex_unlock(&key_construction_mutex); 454 455 /* wake up anyone waiting for a key to be constructed */ 456 if (awaken) 457 wake_up_bit(&key->flags, KEY_FLAG_USER_CONSTRUCT); 458 459 return ret; 460 } 461 462 /** 463 * key_instantiate_and_link - Instantiate a key and link it into the keyring. 464 * @key: The key to instantiate. 465 * @data: The data to use to instantiate the keyring. 466 * @datalen: The length of @data. 467 * @keyring: Keyring to create a link in on success (or NULL). 468 * @authkey: The authorisation token permitting instantiation. 469 * 470 * Instantiate a key that's in the uninstantiated state using the provided data 471 * and, if successful, link it in to the destination keyring if one is 472 * supplied. 473 * 474 * If successful, 0 is returned, the authorisation token is revoked and anyone 475 * waiting for the key is woken up. If the key was already instantiated, 476 * -EBUSY will be returned. 477 */ 478 int key_instantiate_and_link(struct key *key, 479 const void *data, 480 size_t datalen, 481 struct key *keyring, 482 struct key *authkey) 483 { 484 unsigned long prealloc; 485 int ret; 486 487 if (keyring) { 488 ret = __key_link_begin(keyring, key->type, key->description, 489 &prealloc); 490 if (ret < 0) 491 return ret; 492 } 493 494 ret = __key_instantiate_and_link(key, data, datalen, keyring, authkey, 495 &prealloc); 496 497 if (keyring) 498 __key_link_end(keyring, key->type, prealloc); 499 500 return ret; 501 } 502 503 EXPORT_SYMBOL(key_instantiate_and_link); 504 505 /** 506 * key_negate_and_link - Negatively instantiate a key and link it into the keyring. 507 * @key: The key to instantiate. 508 * @timeout: The timeout on the negative key. 509 * @keyring: Keyring to create a link in on success (or NULL). 510 * @authkey: The authorisation token permitting instantiation. 511 * 512 * Negatively instantiate a key that's in the uninstantiated state and, if 513 * successful, set its timeout and link it in to the destination keyring if one 514 * is supplied. The key and any links to the key will be automatically garbage 515 * collected after the timeout expires. 516 * 517 * Negative keys are used to rate limit repeated request_key() calls by causing 518 * them to return -ENOKEY until the negative key expires. 519 * 520 * If successful, 0 is returned, the authorisation token is revoked and anyone 521 * waiting for the key is woken up. If the key was already instantiated, 522 * -EBUSY will be returned. 523 */ 524 int key_negate_and_link(struct key *key, 525 unsigned timeout, 526 struct key *keyring, 527 struct key *authkey) 528 { 529 unsigned long prealloc; 530 struct timespec now; 531 int ret, awaken, link_ret = 0; 532 533 key_check(key); 534 key_check(keyring); 535 536 awaken = 0; 537 ret = -EBUSY; 538 539 if (keyring) 540 link_ret = __key_link_begin(keyring, key->type, 541 key->description, &prealloc); 542 543 mutex_lock(&key_construction_mutex); 544 545 /* can't instantiate twice */ 546 if (!test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) { 547 /* mark the key as being negatively instantiated */ 548 atomic_inc(&key->user->nikeys); 549 set_bit(KEY_FLAG_NEGATIVE, &key->flags); 550 set_bit(KEY_FLAG_INSTANTIATED, &key->flags); 551 now = current_kernel_time(); 552 key->expiry = now.tv_sec + timeout; 553 key_schedule_gc(key->expiry + key_gc_delay); 554 555 if (test_and_clear_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags)) 556 awaken = 1; 557 558 ret = 0; 559 560 /* and link it into the destination keyring */ 561 if (keyring && link_ret == 0) 562 __key_link(keyring, key, &prealloc); 563 564 /* disable the authorisation key */ 565 if (authkey) 566 key_revoke(authkey); 567 } 568 569 mutex_unlock(&key_construction_mutex); 570 571 if (keyring) 572 __key_link_end(keyring, key->type, prealloc); 573 574 /* wake up anyone waiting for a key to be constructed */ 575 if (awaken) 576 wake_up_bit(&key->flags, KEY_FLAG_USER_CONSTRUCT); 577 578 return ret == 0 ? link_ret : ret; 579 } 580 581 EXPORT_SYMBOL(key_negate_and_link); 582 583 /* 584 * Garbage collect keys in process context so that we don't have to disable 585 * interrupts all over the place. 586 * 587 * key_put() schedules this rather than trying to do the cleanup itself, which 588 * means key_put() doesn't have to sleep. 589 */ 590 static void key_cleanup(struct work_struct *work) 591 { 592 struct rb_node *_n; 593 struct key *key; 594 595 go_again: 596 /* look for a dead key in the tree */ 597 spin_lock(&key_serial_lock); 598 599 for (_n = rb_first(&key_serial_tree); _n; _n = rb_next(_n)) { 600 key = rb_entry(_n, struct key, serial_node); 601 602 if (atomic_read(&key->usage) == 0) 603 goto found_dead_key; 604 } 605 606 spin_unlock(&key_serial_lock); 607 return; 608 609 found_dead_key: 610 /* we found a dead key - once we've removed it from the tree, we can 611 * drop the lock */ 612 rb_erase(&key->serial_node, &key_serial_tree); 613 spin_unlock(&key_serial_lock); 614 615 key_check(key); 616 617 security_key_free(key); 618 619 /* deal with the user's key tracking and quota */ 620 if (test_bit(KEY_FLAG_IN_QUOTA, &key->flags)) { 621 spin_lock(&key->user->lock); 622 key->user->qnkeys--; 623 key->user->qnbytes -= key->quotalen; 624 spin_unlock(&key->user->lock); 625 } 626 627 atomic_dec(&key->user->nkeys); 628 if (test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) 629 atomic_dec(&key->user->nikeys); 630 631 key_user_put(key->user); 632 633 /* now throw away the key memory */ 634 if (key->type->destroy) 635 key->type->destroy(key); 636 637 kfree(key->description); 638 639 #ifdef KEY_DEBUGGING 640 key->magic = KEY_DEBUG_MAGIC_X; 641 #endif 642 kmem_cache_free(key_jar, key); 643 644 /* there may, of course, be more than one key to destroy */ 645 goto go_again; 646 } 647 648 /** 649 * key_put - Discard a reference to a key. 650 * @key: The key to discard a reference from. 651 * 652 * Discard a reference to a key, and when all the references are gone, we 653 * schedule the cleanup task to come and pull it out of the tree in process 654 * context at some later time. 655 */ 656 void key_put(struct key *key) 657 { 658 if (key) { 659 key_check(key); 660 661 if (atomic_dec_and_test(&key->usage)) 662 schedule_work(&key_cleanup_task); 663 } 664 } 665 EXPORT_SYMBOL(key_put); 666 667 /* 668 * Find a key by its serial number. 669 */ 670 struct key *key_lookup(key_serial_t id) 671 { 672 struct rb_node *n; 673 struct key *key; 674 675 spin_lock(&key_serial_lock); 676 677 /* search the tree for the specified key */ 678 n = key_serial_tree.rb_node; 679 while (n) { 680 key = rb_entry(n, struct key, serial_node); 681 682 if (id < key->serial) 683 n = n->rb_left; 684 else if (id > key->serial) 685 n = n->rb_right; 686 else 687 goto found; 688 } 689 690 not_found: 691 key = ERR_PTR(-ENOKEY); 692 goto error; 693 694 found: 695 /* pretend it doesn't exist if it is awaiting deletion */ 696 if (atomic_read(&key->usage) == 0) 697 goto not_found; 698 699 /* this races with key_put(), but that doesn't matter since key_put() 700 * doesn't actually change the key 701 */ 702 atomic_inc(&key->usage); 703 704 error: 705 spin_unlock(&key_serial_lock); 706 return key; 707 } 708 709 /* 710 * Find and lock the specified key type against removal. 711 * 712 * We return with the sem read-locked if successful. If the type wasn't 713 * available -ENOKEY is returned instead. 714 */ 715 struct key_type *key_type_lookup(const char *type) 716 { 717 struct key_type *ktype; 718 719 down_read(&key_types_sem); 720 721 /* look up the key type to see if it's one of the registered kernel 722 * types */ 723 list_for_each_entry(ktype, &key_types_list, link) { 724 if (strcmp(ktype->name, type) == 0) 725 goto found_kernel_type; 726 } 727 728 up_read(&key_types_sem); 729 ktype = ERR_PTR(-ENOKEY); 730 731 found_kernel_type: 732 return ktype; 733 } 734 735 /* 736 * Unlock a key type locked by key_type_lookup(). 737 */ 738 void key_type_put(struct key_type *ktype) 739 { 740 up_read(&key_types_sem); 741 } 742 743 /* 744 * Attempt to update an existing key. 745 * 746 * The key is given to us with an incremented refcount that we need to discard 747 * if we get an error. 748 */ 749 static inline key_ref_t __key_update(key_ref_t key_ref, 750 const void *payload, size_t plen) 751 { 752 struct key *key = key_ref_to_ptr(key_ref); 753 int ret; 754 755 /* need write permission on the key to update it */ 756 ret = key_permission(key_ref, KEY_WRITE); 757 if (ret < 0) 758 goto error; 759 760 ret = -EEXIST; 761 if (!key->type->update) 762 goto error; 763 764 down_write(&key->sem); 765 766 ret = key->type->update(key, payload, plen); 767 if (ret == 0) 768 /* updating a negative key instantiates it */ 769 clear_bit(KEY_FLAG_NEGATIVE, &key->flags); 770 771 up_write(&key->sem); 772 773 if (ret < 0) 774 goto error; 775 out: 776 return key_ref; 777 778 error: 779 key_put(key); 780 key_ref = ERR_PTR(ret); 781 goto out; 782 } 783 784 /** 785 * key_create_or_update - Update or create and instantiate a key. 786 * @keyring_ref: A pointer to the destination keyring with possession flag. 787 * @type: The type of key. 788 * @description: The searchable description for the key. 789 * @payload: The data to use to instantiate or update the key. 790 * @plen: The length of @payload. 791 * @perm: The permissions mask for a new key. 792 * @flags: The quota flags for a new key. 793 * 794 * Search the destination keyring for a key of the same description and if one 795 * is found, update it, otherwise create and instantiate a new one and create a 796 * link to it from that keyring. 797 * 798 * If perm is KEY_PERM_UNDEF then an appropriate key permissions mask will be 799 * concocted. 800 * 801 * Returns a pointer to the new key if successful, -ENODEV if the key type 802 * wasn't available, -ENOTDIR if the keyring wasn't a keyring, -EACCES if the 803 * caller isn't permitted to modify the keyring or the LSM did not permit 804 * creation of the key. 805 * 806 * On success, the possession flag from the keyring ref will be tacked on to 807 * the key ref before it is returned. 808 */ 809 key_ref_t key_create_or_update(key_ref_t keyring_ref, 810 const char *type, 811 const char *description, 812 const void *payload, 813 size_t plen, 814 key_perm_t perm, 815 unsigned long flags) 816 { 817 unsigned long prealloc; 818 const struct cred *cred = current_cred(); 819 struct key_type *ktype; 820 struct key *keyring, *key = NULL; 821 key_ref_t key_ref; 822 int ret; 823 824 /* look up the key type to see if it's one of the registered kernel 825 * types */ 826 ktype = key_type_lookup(type); 827 if (IS_ERR(ktype)) { 828 key_ref = ERR_PTR(-ENODEV); 829 goto error; 830 } 831 832 key_ref = ERR_PTR(-EINVAL); 833 if (!ktype->match || !ktype->instantiate) 834 goto error_2; 835 836 keyring = key_ref_to_ptr(keyring_ref); 837 838 key_check(keyring); 839 840 key_ref = ERR_PTR(-ENOTDIR); 841 if (keyring->type != &key_type_keyring) 842 goto error_2; 843 844 ret = __key_link_begin(keyring, ktype, description, &prealloc); 845 if (ret < 0) 846 goto error_2; 847 848 /* if we're going to allocate a new key, we're going to have 849 * to modify the keyring */ 850 ret = key_permission(keyring_ref, KEY_WRITE); 851 if (ret < 0) { 852 key_ref = ERR_PTR(ret); 853 goto error_3; 854 } 855 856 /* if it's possible to update this type of key, search for an existing 857 * key of the same type and description in the destination keyring and 858 * update that instead if possible 859 */ 860 if (ktype->update) { 861 key_ref = __keyring_search_one(keyring_ref, ktype, description, 862 0); 863 if (!IS_ERR(key_ref)) 864 goto found_matching_key; 865 } 866 867 /* if the client doesn't provide, decide on the permissions we want */ 868 if (perm == KEY_PERM_UNDEF) { 869 perm = KEY_POS_VIEW | KEY_POS_SEARCH | KEY_POS_LINK | KEY_POS_SETATTR; 870 perm |= KEY_USR_VIEW | KEY_USR_SEARCH | KEY_USR_LINK | KEY_USR_SETATTR; 871 872 if (ktype->read) 873 perm |= KEY_POS_READ | KEY_USR_READ; 874 875 if (ktype == &key_type_keyring || ktype->update) 876 perm |= KEY_USR_WRITE; 877 } 878 879 /* allocate a new key */ 880 key = key_alloc(ktype, description, cred->fsuid, cred->fsgid, cred, 881 perm, flags); 882 if (IS_ERR(key)) { 883 key_ref = ERR_CAST(key); 884 goto error_3; 885 } 886 887 /* instantiate it and link it into the target keyring */ 888 ret = __key_instantiate_and_link(key, payload, plen, keyring, NULL, 889 &prealloc); 890 if (ret < 0) { 891 key_put(key); 892 key_ref = ERR_PTR(ret); 893 goto error_3; 894 } 895 896 key_ref = make_key_ref(key, is_key_possessed(keyring_ref)); 897 898 error_3: 899 __key_link_end(keyring, ktype, prealloc); 900 error_2: 901 key_type_put(ktype); 902 error: 903 return key_ref; 904 905 found_matching_key: 906 /* we found a matching key, so we're going to try to update it 907 * - we can drop the locks first as we have the key pinned 908 */ 909 __key_link_end(keyring, ktype, prealloc); 910 key_type_put(ktype); 911 912 key_ref = __key_update(key_ref, payload, plen); 913 goto error; 914 } 915 EXPORT_SYMBOL(key_create_or_update); 916 917 /** 918 * key_update - Update a key's contents. 919 * @key_ref: The pointer (plus possession flag) to the key. 920 * @payload: The data to be used to update the key. 921 * @plen: The length of @payload. 922 * 923 * Attempt to update the contents of a key with the given payload data. The 924 * caller must be granted Write permission on the key. Negative keys can be 925 * instantiated by this method. 926 * 927 * Returns 0 on success, -EACCES if not permitted and -EOPNOTSUPP if the key 928 * type does not support updating. The key type may return other errors. 929 */ 930 int key_update(key_ref_t key_ref, const void *payload, size_t plen) 931 { 932 struct key *key = key_ref_to_ptr(key_ref); 933 int ret; 934 935 key_check(key); 936 937 /* the key must be writable */ 938 ret = key_permission(key_ref, KEY_WRITE); 939 if (ret < 0) 940 goto error; 941 942 /* attempt to update it if supported */ 943 ret = -EOPNOTSUPP; 944 if (key->type->update) { 945 down_write(&key->sem); 946 947 ret = key->type->update(key, payload, plen); 948 if (ret == 0) 949 /* updating a negative key instantiates it */ 950 clear_bit(KEY_FLAG_NEGATIVE, &key->flags); 951 952 up_write(&key->sem); 953 } 954 955 error: 956 return ret; 957 } 958 EXPORT_SYMBOL(key_update); 959 960 /** 961 * key_revoke - Revoke a key. 962 * @key: The key to be revoked. 963 * 964 * Mark a key as being revoked and ask the type to free up its resources. The 965 * revocation timeout is set and the key and all its links will be 966 * automatically garbage collected after key_gc_delay amount of time if they 967 * are not manually dealt with first. 968 */ 969 void key_revoke(struct key *key) 970 { 971 struct timespec now; 972 time_t time; 973 974 key_check(key); 975 976 /* make sure no one's trying to change or use the key when we mark it 977 * - we tell lockdep that we might nest because we might be revoking an 978 * authorisation key whilst holding the sem on a key we've just 979 * instantiated 980 */ 981 down_write_nested(&key->sem, 1); 982 if (!test_and_set_bit(KEY_FLAG_REVOKED, &key->flags) && 983 key->type->revoke) 984 key->type->revoke(key); 985 986 /* set the death time to no more than the expiry time */ 987 now = current_kernel_time(); 988 time = now.tv_sec; 989 if (key->revoked_at == 0 || key->revoked_at > time) { 990 key->revoked_at = time; 991 key_schedule_gc(key->revoked_at + key_gc_delay); 992 } 993 994 up_write(&key->sem); 995 } 996 EXPORT_SYMBOL(key_revoke); 997 998 /** 999 * register_key_type - Register a type of key. 1000 * @ktype: The new key type. 1001 * 1002 * Register a new key type. 1003 * 1004 * Returns 0 on success or -EEXIST if a type of this name already exists. 1005 */ 1006 int register_key_type(struct key_type *ktype) 1007 { 1008 struct key_type *p; 1009 int ret; 1010 1011 ret = -EEXIST; 1012 down_write(&key_types_sem); 1013 1014 /* disallow key types with the same name */ 1015 list_for_each_entry(p, &key_types_list, link) { 1016 if (strcmp(p->name, ktype->name) == 0) 1017 goto out; 1018 } 1019 1020 /* store the type */ 1021 list_add(&ktype->link, &key_types_list); 1022 ret = 0; 1023 1024 out: 1025 up_write(&key_types_sem); 1026 return ret; 1027 } 1028 EXPORT_SYMBOL(register_key_type); 1029 1030 /** 1031 * unregister_key_type - Unregister a type of key. 1032 * @ktype: The key type. 1033 * 1034 * Unregister a key type and mark all the extant keys of this type as dead. 1035 * Those keys of this type are then destroyed to get rid of their payloads and 1036 * they and their links will be garbage collected as soon as possible. 1037 */ 1038 void unregister_key_type(struct key_type *ktype) 1039 { 1040 struct rb_node *_n; 1041 struct key *key; 1042 1043 down_write(&key_types_sem); 1044 1045 /* withdraw the key type */ 1046 list_del_init(&ktype->link); 1047 1048 /* mark all the keys of this type dead */ 1049 spin_lock(&key_serial_lock); 1050 1051 for (_n = rb_first(&key_serial_tree); _n; _n = rb_next(_n)) { 1052 key = rb_entry(_n, struct key, serial_node); 1053 1054 if (key->type == ktype) { 1055 key->type = &key_type_dead; 1056 set_bit(KEY_FLAG_DEAD, &key->flags); 1057 } 1058 } 1059 1060 spin_unlock(&key_serial_lock); 1061 1062 /* make sure everyone revalidates their keys */ 1063 synchronize_rcu(); 1064 1065 /* we should now be able to destroy the payloads of all the keys of 1066 * this type with impunity */ 1067 spin_lock(&key_serial_lock); 1068 1069 for (_n = rb_first(&key_serial_tree); _n; _n = rb_next(_n)) { 1070 key = rb_entry(_n, struct key, serial_node); 1071 1072 if (key->type == ktype) { 1073 if (ktype->destroy) 1074 ktype->destroy(key); 1075 memset(&key->payload, KEY_DESTROY, sizeof(key->payload)); 1076 } 1077 } 1078 1079 spin_unlock(&key_serial_lock); 1080 up_write(&key_types_sem); 1081 1082 key_schedule_gc(0); 1083 } 1084 EXPORT_SYMBOL(unregister_key_type); 1085 1086 /* 1087 * Initialise the key management state. 1088 */ 1089 void __init key_init(void) 1090 { 1091 /* allocate a slab in which we can store keys */ 1092 key_jar = kmem_cache_create("key_jar", sizeof(struct key), 1093 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); 1094 1095 /* add the special key types */ 1096 list_add_tail(&key_type_keyring.link, &key_types_list); 1097 list_add_tail(&key_type_dead.link, &key_types_list); 1098 list_add_tail(&key_type_user.link, &key_types_list); 1099 1100 /* record the root user tracking */ 1101 rb_link_node(&root_key_user.node, 1102 NULL, 1103 &key_user_tree.rb_node); 1104 1105 rb_insert_color(&root_key_user.node, 1106 &key_user_tree); 1107 } 1108