1 /* key.c: basic authentication token and access key management 2 * 3 * Copyright (C) 2004-6 Red Hat, Inc. All Rights Reserved. 4 * Written by David Howells (dhowells@redhat.com) 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License 8 * as published by the Free Software Foundation; either version 9 * 2 of the License, or (at your option) any later version. 10 */ 11 12 #include <linux/module.h> 13 #include <linux/init.h> 14 #include <linux/poison.h> 15 #include <linux/sched.h> 16 #include <linux/slab.h> 17 #include <linux/security.h> 18 #include <linux/workqueue.h> 19 #include <linux/random.h> 20 #include <linux/err.h> 21 #include "internal.h" 22 23 static kmem_cache_t *key_jar; 24 struct rb_root key_serial_tree; /* tree of keys indexed by serial */ 25 DEFINE_SPINLOCK(key_serial_lock); 26 27 struct rb_root key_user_tree; /* tree of quota records indexed by UID */ 28 DEFINE_SPINLOCK(key_user_lock); 29 30 static LIST_HEAD(key_types_list); 31 static DECLARE_RWSEM(key_types_sem); 32 33 static void key_cleanup(void *data); 34 static DECLARE_WORK(key_cleanup_task, key_cleanup, NULL); 35 36 /* we serialise key instantiation and link */ 37 DECLARE_RWSEM(key_construction_sem); 38 39 /* any key who's type gets unegistered will be re-typed to this */ 40 static struct key_type key_type_dead = { 41 .name = "dead", 42 }; 43 44 #ifdef KEY_DEBUGGING 45 void __key_check(const struct key *key) 46 { 47 printk("__key_check: key %p {%08x} should be {%08x}\n", 48 key, key->magic, KEY_DEBUG_MAGIC); 49 BUG(); 50 } 51 #endif 52 53 /*****************************************************************************/ 54 /* 55 * get the key quota record for a user, allocating a new record if one doesn't 56 * already exist 57 */ 58 struct key_user *key_user_lookup(uid_t uid) 59 { 60 struct key_user *candidate = NULL, *user; 61 struct rb_node *parent = NULL; 62 struct rb_node **p; 63 64 try_again: 65 p = &key_user_tree.rb_node; 66 spin_lock(&key_user_lock); 67 68 /* search the tree for a user record with a matching UID */ 69 while (*p) { 70 parent = *p; 71 user = rb_entry(parent, struct key_user, node); 72 73 if (uid < user->uid) 74 p = &(*p)->rb_left; 75 else if (uid > user->uid) 76 p = &(*p)->rb_right; 77 else 78 goto found; 79 } 80 81 /* if we get here, we failed to find a match in the tree */ 82 if (!candidate) { 83 /* allocate a candidate user record if we don't already have 84 * one */ 85 spin_unlock(&key_user_lock); 86 87 user = NULL; 88 candidate = kmalloc(sizeof(struct key_user), GFP_KERNEL); 89 if (unlikely(!candidate)) 90 goto out; 91 92 /* the allocation may have scheduled, so we need to repeat the 93 * search lest someone else added the record whilst we were 94 * asleep */ 95 goto try_again; 96 } 97 98 /* if we get here, then the user record still hadn't appeared on the 99 * second pass - so we use the candidate record */ 100 atomic_set(&candidate->usage, 1); 101 atomic_set(&candidate->nkeys, 0); 102 atomic_set(&candidate->nikeys, 0); 103 candidate->uid = uid; 104 candidate->qnkeys = 0; 105 candidate->qnbytes = 0; 106 spin_lock_init(&candidate->lock); 107 INIT_LIST_HEAD(&candidate->consq); 108 109 rb_link_node(&candidate->node, parent, p); 110 rb_insert_color(&candidate->node, &key_user_tree); 111 spin_unlock(&key_user_lock); 112 user = candidate; 113 goto out; 114 115 /* okay - we found a user record for this UID */ 116 found: 117 atomic_inc(&user->usage); 118 spin_unlock(&key_user_lock); 119 kfree(candidate); 120 out: 121 return user; 122 123 } /* end key_user_lookup() */ 124 125 /*****************************************************************************/ 126 /* 127 * dispose of a user structure 128 */ 129 void key_user_put(struct key_user *user) 130 { 131 if (atomic_dec_and_lock(&user->usage, &key_user_lock)) { 132 rb_erase(&user->node, &key_user_tree); 133 spin_unlock(&key_user_lock); 134 135 kfree(user); 136 } 137 138 } /* end key_user_put() */ 139 140 /*****************************************************************************/ 141 /* 142 * insert a key with a fixed serial number 143 */ 144 static void __init __key_insert_serial(struct key *key) 145 { 146 struct rb_node *parent, **p; 147 struct key *xkey; 148 149 parent = NULL; 150 p = &key_serial_tree.rb_node; 151 152 while (*p) { 153 parent = *p; 154 xkey = rb_entry(parent, struct key, serial_node); 155 156 if (key->serial < xkey->serial) 157 p = &(*p)->rb_left; 158 else if (key->serial > xkey->serial) 159 p = &(*p)->rb_right; 160 else 161 BUG(); 162 } 163 164 /* we've found a suitable hole - arrange for this key to occupy it */ 165 rb_link_node(&key->serial_node, parent, p); 166 rb_insert_color(&key->serial_node, &key_serial_tree); 167 168 } /* end __key_insert_serial() */ 169 170 /*****************************************************************************/ 171 /* 172 * assign a key the next unique serial number 173 * - these are assigned randomly to avoid security issues through covert 174 * channel problems 175 */ 176 static inline void key_alloc_serial(struct key *key) 177 { 178 struct rb_node *parent, **p; 179 struct key *xkey; 180 181 /* propose a random serial number and look for a hole for it in the 182 * serial number tree */ 183 do { 184 get_random_bytes(&key->serial, sizeof(key->serial)); 185 186 key->serial >>= 1; /* negative numbers are not permitted */ 187 } while (key->serial < 3); 188 189 spin_lock(&key_serial_lock); 190 191 parent = NULL; 192 p = &key_serial_tree.rb_node; 193 194 while (*p) { 195 parent = *p; 196 xkey = rb_entry(parent, struct key, serial_node); 197 198 if (key->serial < xkey->serial) 199 p = &(*p)->rb_left; 200 else if (key->serial > xkey->serial) 201 p = &(*p)->rb_right; 202 else 203 goto serial_exists; 204 } 205 goto insert_here; 206 207 /* we found a key with the proposed serial number - walk the tree from 208 * that point looking for the next unused serial number */ 209 serial_exists: 210 for (;;) { 211 key->serial++; 212 if (key->serial < 2) 213 key->serial = 2; 214 215 if (!rb_parent(parent)) 216 p = &key_serial_tree.rb_node; 217 else if (rb_parent(parent)->rb_left == parent) 218 p = &(rb_parent(parent)->rb_left); 219 else 220 p = &(rb_parent(parent)->rb_right); 221 222 parent = rb_next(parent); 223 if (!parent) 224 break; 225 226 xkey = rb_entry(parent, struct key, serial_node); 227 if (key->serial < xkey->serial) 228 goto insert_here; 229 } 230 231 /* we've found a suitable hole - arrange for this key to occupy it */ 232 insert_here: 233 rb_link_node(&key->serial_node, parent, p); 234 rb_insert_color(&key->serial_node, &key_serial_tree); 235 236 spin_unlock(&key_serial_lock); 237 238 } /* end key_alloc_serial() */ 239 240 /*****************************************************************************/ 241 /* 242 * allocate a key of the specified type 243 * - update the user's quota to reflect the existence of the key 244 * - called from a key-type operation with key_types_sem read-locked by 245 * key_create_or_update() 246 * - this prevents unregistration of the key type 247 * - upon return the key is as yet uninstantiated; the caller needs to either 248 * instantiate the key or discard it before returning 249 */ 250 struct key *key_alloc(struct key_type *type, const char *desc, 251 uid_t uid, gid_t gid, struct task_struct *ctx, 252 key_perm_t perm, unsigned long flags) 253 { 254 struct key_user *user = NULL; 255 struct key *key; 256 size_t desclen, quotalen; 257 int ret; 258 259 key = ERR_PTR(-EINVAL); 260 if (!desc || !*desc) 261 goto error; 262 263 desclen = strlen(desc) + 1; 264 quotalen = desclen + type->def_datalen; 265 266 /* get hold of the key tracking for this user */ 267 user = key_user_lookup(uid); 268 if (!user) 269 goto no_memory_1; 270 271 /* check that the user's quota permits allocation of another key and 272 * its description */ 273 if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) { 274 spin_lock(&user->lock); 275 if (!(flags & KEY_ALLOC_QUOTA_OVERRUN)) { 276 if (user->qnkeys + 1 >= KEYQUOTA_MAX_KEYS || 277 user->qnbytes + quotalen >= KEYQUOTA_MAX_BYTES 278 ) 279 goto no_quota; 280 } 281 282 user->qnkeys++; 283 user->qnbytes += quotalen; 284 spin_unlock(&user->lock); 285 } 286 287 /* allocate and initialise the key and its description */ 288 key = kmem_cache_alloc(key_jar, SLAB_KERNEL); 289 if (!key) 290 goto no_memory_2; 291 292 if (desc) { 293 key->description = kmalloc(desclen, GFP_KERNEL); 294 if (!key->description) 295 goto no_memory_3; 296 297 memcpy(key->description, desc, desclen); 298 } 299 300 atomic_set(&key->usage, 1); 301 init_rwsem(&key->sem); 302 key->type = type; 303 key->user = user; 304 key->quotalen = quotalen; 305 key->datalen = type->def_datalen; 306 key->uid = uid; 307 key->gid = gid; 308 key->perm = perm; 309 key->flags = 0; 310 key->expiry = 0; 311 key->payload.data = NULL; 312 key->security = NULL; 313 314 if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) 315 key->flags |= 1 << KEY_FLAG_IN_QUOTA; 316 317 memset(&key->type_data, 0, sizeof(key->type_data)); 318 319 #ifdef KEY_DEBUGGING 320 key->magic = KEY_DEBUG_MAGIC; 321 #endif 322 323 /* let the security module know about the key */ 324 ret = security_key_alloc(key, ctx, flags); 325 if (ret < 0) 326 goto security_error; 327 328 /* publish the key by giving it a serial number */ 329 atomic_inc(&user->nkeys); 330 key_alloc_serial(key); 331 332 error: 333 return key; 334 335 security_error: 336 kfree(key->description); 337 kmem_cache_free(key_jar, key); 338 if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) { 339 spin_lock(&user->lock); 340 user->qnkeys--; 341 user->qnbytes -= quotalen; 342 spin_unlock(&user->lock); 343 } 344 key_user_put(user); 345 key = ERR_PTR(ret); 346 goto error; 347 348 no_memory_3: 349 kmem_cache_free(key_jar, key); 350 no_memory_2: 351 if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) { 352 spin_lock(&user->lock); 353 user->qnkeys--; 354 user->qnbytes -= quotalen; 355 spin_unlock(&user->lock); 356 } 357 key_user_put(user); 358 no_memory_1: 359 key = ERR_PTR(-ENOMEM); 360 goto error; 361 362 no_quota: 363 spin_unlock(&user->lock); 364 key_user_put(user); 365 key = ERR_PTR(-EDQUOT); 366 goto error; 367 368 } /* end key_alloc() */ 369 370 EXPORT_SYMBOL(key_alloc); 371 372 /*****************************************************************************/ 373 /* 374 * reserve an amount of quota for the key's payload 375 */ 376 int key_payload_reserve(struct key *key, size_t datalen) 377 { 378 int delta = (int) datalen - key->datalen; 379 int ret = 0; 380 381 key_check(key); 382 383 /* contemplate the quota adjustment */ 384 if (delta != 0 && test_bit(KEY_FLAG_IN_QUOTA, &key->flags)) { 385 spin_lock(&key->user->lock); 386 387 if (delta > 0 && 388 key->user->qnbytes + delta > KEYQUOTA_MAX_BYTES 389 ) { 390 ret = -EDQUOT; 391 } 392 else { 393 key->user->qnbytes += delta; 394 key->quotalen += delta; 395 } 396 spin_unlock(&key->user->lock); 397 } 398 399 /* change the recorded data length if that didn't generate an error */ 400 if (ret == 0) 401 key->datalen = datalen; 402 403 return ret; 404 405 } /* end key_payload_reserve() */ 406 407 EXPORT_SYMBOL(key_payload_reserve); 408 409 /*****************************************************************************/ 410 /* 411 * instantiate a key and link it into the target keyring atomically 412 * - called with the target keyring's semaphore writelocked 413 */ 414 static int __key_instantiate_and_link(struct key *key, 415 const void *data, 416 size_t datalen, 417 struct key *keyring, 418 struct key *instkey) 419 { 420 int ret, awaken; 421 422 key_check(key); 423 key_check(keyring); 424 425 awaken = 0; 426 ret = -EBUSY; 427 428 down_write(&key_construction_sem); 429 430 /* can't instantiate twice */ 431 if (!test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) { 432 /* instantiate the key */ 433 ret = key->type->instantiate(key, data, datalen); 434 435 if (ret == 0) { 436 /* mark the key as being instantiated */ 437 atomic_inc(&key->user->nikeys); 438 set_bit(KEY_FLAG_INSTANTIATED, &key->flags); 439 440 if (test_and_clear_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags)) 441 awaken = 1; 442 443 /* and link it into the destination keyring */ 444 if (keyring) 445 ret = __key_link(keyring, key); 446 447 /* disable the authorisation key */ 448 if (instkey) 449 key_revoke(instkey); 450 } 451 } 452 453 up_write(&key_construction_sem); 454 455 /* wake up anyone waiting for a key to be constructed */ 456 if (awaken) 457 wake_up_all(&request_key_conswq); 458 459 return ret; 460 461 } /* end __key_instantiate_and_link() */ 462 463 /*****************************************************************************/ 464 /* 465 * instantiate a key and link it into the target keyring atomically 466 */ 467 int key_instantiate_and_link(struct key *key, 468 const void *data, 469 size_t datalen, 470 struct key *keyring, 471 struct key *instkey) 472 { 473 int ret; 474 475 if (keyring) 476 down_write(&keyring->sem); 477 478 ret = __key_instantiate_and_link(key, data, datalen, keyring, instkey); 479 480 if (keyring) 481 up_write(&keyring->sem); 482 483 return ret; 484 485 } /* end key_instantiate_and_link() */ 486 487 EXPORT_SYMBOL(key_instantiate_and_link); 488 489 /*****************************************************************************/ 490 /* 491 * negatively instantiate a key and link it into the target keyring atomically 492 */ 493 int key_negate_and_link(struct key *key, 494 unsigned timeout, 495 struct key *keyring, 496 struct key *instkey) 497 { 498 struct timespec now; 499 int ret, awaken; 500 501 key_check(key); 502 key_check(keyring); 503 504 awaken = 0; 505 ret = -EBUSY; 506 507 if (keyring) 508 down_write(&keyring->sem); 509 510 down_write(&key_construction_sem); 511 512 /* can't instantiate twice */ 513 if (!test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) { 514 /* mark the key as being negatively instantiated */ 515 atomic_inc(&key->user->nikeys); 516 set_bit(KEY_FLAG_NEGATIVE, &key->flags); 517 set_bit(KEY_FLAG_INSTANTIATED, &key->flags); 518 now = current_kernel_time(); 519 key->expiry = now.tv_sec + timeout; 520 521 if (test_and_clear_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags)) 522 awaken = 1; 523 524 ret = 0; 525 526 /* and link it into the destination keyring */ 527 if (keyring) 528 ret = __key_link(keyring, key); 529 530 /* disable the authorisation key */ 531 if (instkey) 532 key_revoke(instkey); 533 } 534 535 up_write(&key_construction_sem); 536 537 if (keyring) 538 up_write(&keyring->sem); 539 540 /* wake up anyone waiting for a key to be constructed */ 541 if (awaken) 542 wake_up_all(&request_key_conswq); 543 544 return ret; 545 546 } /* end key_negate_and_link() */ 547 548 EXPORT_SYMBOL(key_negate_and_link); 549 550 /*****************************************************************************/ 551 /* 552 * do cleaning up in process context so that we don't have to disable 553 * interrupts all over the place 554 */ 555 static void key_cleanup(void *data) 556 { 557 struct rb_node *_n; 558 struct key *key; 559 560 go_again: 561 /* look for a dead key in the tree */ 562 spin_lock(&key_serial_lock); 563 564 for (_n = rb_first(&key_serial_tree); _n; _n = rb_next(_n)) { 565 key = rb_entry(_n, struct key, serial_node); 566 567 if (atomic_read(&key->usage) == 0) 568 goto found_dead_key; 569 } 570 571 spin_unlock(&key_serial_lock); 572 return; 573 574 found_dead_key: 575 /* we found a dead key - once we've removed it from the tree, we can 576 * drop the lock */ 577 rb_erase(&key->serial_node, &key_serial_tree); 578 spin_unlock(&key_serial_lock); 579 580 key_check(key); 581 582 security_key_free(key); 583 584 /* deal with the user's key tracking and quota */ 585 if (test_bit(KEY_FLAG_IN_QUOTA, &key->flags)) { 586 spin_lock(&key->user->lock); 587 key->user->qnkeys--; 588 key->user->qnbytes -= key->quotalen; 589 spin_unlock(&key->user->lock); 590 } 591 592 atomic_dec(&key->user->nkeys); 593 if (test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) 594 atomic_dec(&key->user->nikeys); 595 596 key_user_put(key->user); 597 598 /* now throw away the key memory */ 599 if (key->type->destroy) 600 key->type->destroy(key); 601 602 kfree(key->description); 603 604 #ifdef KEY_DEBUGGING 605 key->magic = KEY_DEBUG_MAGIC_X; 606 #endif 607 kmem_cache_free(key_jar, key); 608 609 /* there may, of course, be more than one key to destroy */ 610 goto go_again; 611 612 } /* end key_cleanup() */ 613 614 /*****************************************************************************/ 615 /* 616 * dispose of a reference to a key 617 * - when all the references are gone, we schedule the cleanup task to come and 618 * pull it out of the tree in definite process context 619 */ 620 void key_put(struct key *key) 621 { 622 if (key) { 623 key_check(key); 624 625 if (atomic_dec_and_test(&key->usage)) 626 schedule_work(&key_cleanup_task); 627 } 628 629 } /* end key_put() */ 630 631 EXPORT_SYMBOL(key_put); 632 633 /*****************************************************************************/ 634 /* 635 * find a key by its serial number 636 */ 637 struct key *key_lookup(key_serial_t id) 638 { 639 struct rb_node *n; 640 struct key *key; 641 642 spin_lock(&key_serial_lock); 643 644 /* search the tree for the specified key */ 645 n = key_serial_tree.rb_node; 646 while (n) { 647 key = rb_entry(n, struct key, serial_node); 648 649 if (id < key->serial) 650 n = n->rb_left; 651 else if (id > key->serial) 652 n = n->rb_right; 653 else 654 goto found; 655 } 656 657 not_found: 658 key = ERR_PTR(-ENOKEY); 659 goto error; 660 661 found: 662 /* pretend it doesn't exist if it's dead */ 663 if (atomic_read(&key->usage) == 0 || 664 test_bit(KEY_FLAG_DEAD, &key->flags) || 665 key->type == &key_type_dead) 666 goto not_found; 667 668 /* this races with key_put(), but that doesn't matter since key_put() 669 * doesn't actually change the key 670 */ 671 atomic_inc(&key->usage); 672 673 error: 674 spin_unlock(&key_serial_lock); 675 return key; 676 677 } /* end key_lookup() */ 678 679 /*****************************************************************************/ 680 /* 681 * find and lock the specified key type against removal 682 * - we return with the sem readlocked 683 */ 684 struct key_type *key_type_lookup(const char *type) 685 { 686 struct key_type *ktype; 687 688 down_read(&key_types_sem); 689 690 /* look up the key type to see if it's one of the registered kernel 691 * types */ 692 list_for_each_entry(ktype, &key_types_list, link) { 693 if (strcmp(ktype->name, type) == 0) 694 goto found_kernel_type; 695 } 696 697 up_read(&key_types_sem); 698 ktype = ERR_PTR(-ENOKEY); 699 700 found_kernel_type: 701 return ktype; 702 703 } /* end key_type_lookup() */ 704 705 /*****************************************************************************/ 706 /* 707 * unlock a key type 708 */ 709 void key_type_put(struct key_type *ktype) 710 { 711 up_read(&key_types_sem); 712 713 } /* end key_type_put() */ 714 715 /*****************************************************************************/ 716 /* 717 * attempt to update an existing key 718 * - the key has an incremented refcount 719 * - we need to put the key if we get an error 720 */ 721 static inline key_ref_t __key_update(key_ref_t key_ref, 722 const void *payload, size_t plen) 723 { 724 struct key *key = key_ref_to_ptr(key_ref); 725 int ret; 726 727 /* need write permission on the key to update it */ 728 ret = key_permission(key_ref, KEY_WRITE); 729 if (ret < 0) 730 goto error; 731 732 ret = -EEXIST; 733 if (!key->type->update) 734 goto error; 735 736 down_write(&key->sem); 737 738 ret = key->type->update(key, payload, plen); 739 if (ret == 0) 740 /* updating a negative key instantiates it */ 741 clear_bit(KEY_FLAG_NEGATIVE, &key->flags); 742 743 up_write(&key->sem); 744 745 if (ret < 0) 746 goto error; 747 out: 748 return key_ref; 749 750 error: 751 key_put(key); 752 key_ref = ERR_PTR(ret); 753 goto out; 754 755 } /* end __key_update() */ 756 757 /*****************************************************************************/ 758 /* 759 * search the specified keyring for a key of the same description; if one is 760 * found, update it, otherwise add a new one 761 */ 762 key_ref_t key_create_or_update(key_ref_t keyring_ref, 763 const char *type, 764 const char *description, 765 const void *payload, 766 size_t plen, 767 unsigned long flags) 768 { 769 struct key_type *ktype; 770 struct key *keyring, *key = NULL; 771 key_perm_t perm; 772 key_ref_t key_ref; 773 int ret; 774 775 /* look up the key type to see if it's one of the registered kernel 776 * types */ 777 ktype = key_type_lookup(type); 778 if (IS_ERR(ktype)) { 779 key_ref = ERR_PTR(-ENODEV); 780 goto error; 781 } 782 783 key_ref = ERR_PTR(-EINVAL); 784 if (!ktype->match || !ktype->instantiate) 785 goto error_2; 786 787 keyring = key_ref_to_ptr(keyring_ref); 788 789 key_check(keyring); 790 791 key_ref = ERR_PTR(-ENOTDIR); 792 if (keyring->type != &key_type_keyring) 793 goto error_2; 794 795 down_write(&keyring->sem); 796 797 /* if we're going to allocate a new key, we're going to have 798 * to modify the keyring */ 799 ret = key_permission(keyring_ref, KEY_WRITE); 800 if (ret < 0) { 801 key_ref = ERR_PTR(ret); 802 goto error_3; 803 } 804 805 /* if it's possible to update this type of key, search for an existing 806 * key of the same type and description in the destination keyring and 807 * update that instead if possible 808 */ 809 if (ktype->update) { 810 key_ref = __keyring_search_one(keyring_ref, ktype, description, 811 0); 812 if (!IS_ERR(key_ref)) 813 goto found_matching_key; 814 } 815 816 /* decide on the permissions we want */ 817 perm = KEY_POS_VIEW | KEY_POS_SEARCH | KEY_POS_LINK | KEY_POS_SETATTR; 818 perm |= KEY_USR_VIEW | KEY_USR_SEARCH | KEY_USR_LINK | KEY_USR_SETATTR; 819 820 if (ktype->read) 821 perm |= KEY_POS_READ | KEY_USR_READ; 822 823 if (ktype == &key_type_keyring || ktype->update) 824 perm |= KEY_USR_WRITE; 825 826 /* allocate a new key */ 827 key = key_alloc(ktype, description, current->fsuid, current->fsgid, 828 current, perm, flags); 829 if (IS_ERR(key)) { 830 key_ref = ERR_PTR(PTR_ERR(key)); 831 goto error_3; 832 } 833 834 /* instantiate it and link it into the target keyring */ 835 ret = __key_instantiate_and_link(key, payload, plen, keyring, NULL); 836 if (ret < 0) { 837 key_put(key); 838 key_ref = ERR_PTR(ret); 839 goto error_3; 840 } 841 842 key_ref = make_key_ref(key, is_key_possessed(keyring_ref)); 843 844 error_3: 845 up_write(&keyring->sem); 846 error_2: 847 key_type_put(ktype); 848 error: 849 return key_ref; 850 851 found_matching_key: 852 /* we found a matching key, so we're going to try to update it 853 * - we can drop the locks first as we have the key pinned 854 */ 855 up_write(&keyring->sem); 856 key_type_put(ktype); 857 858 key_ref = __key_update(key_ref, payload, plen); 859 goto error; 860 861 } /* end key_create_or_update() */ 862 863 EXPORT_SYMBOL(key_create_or_update); 864 865 /*****************************************************************************/ 866 /* 867 * update a key 868 */ 869 int key_update(key_ref_t key_ref, const void *payload, size_t plen) 870 { 871 struct key *key = key_ref_to_ptr(key_ref); 872 int ret; 873 874 key_check(key); 875 876 /* the key must be writable */ 877 ret = key_permission(key_ref, KEY_WRITE); 878 if (ret < 0) 879 goto error; 880 881 /* attempt to update it if supported */ 882 ret = -EOPNOTSUPP; 883 if (key->type->update) { 884 down_write(&key->sem); 885 886 ret = key->type->update(key, payload, plen); 887 if (ret == 0) 888 /* updating a negative key instantiates it */ 889 clear_bit(KEY_FLAG_NEGATIVE, &key->flags); 890 891 up_write(&key->sem); 892 } 893 894 error: 895 return ret; 896 897 } /* end key_update() */ 898 899 EXPORT_SYMBOL(key_update); 900 901 /*****************************************************************************/ 902 /* 903 * revoke a key 904 */ 905 void key_revoke(struct key *key) 906 { 907 key_check(key); 908 909 /* make sure no one's trying to change or use the key when we mark 910 * it */ 911 down_write(&key->sem); 912 set_bit(KEY_FLAG_REVOKED, &key->flags); 913 914 if (key->type->revoke) 915 key->type->revoke(key); 916 917 up_write(&key->sem); 918 919 } /* end key_revoke() */ 920 921 EXPORT_SYMBOL(key_revoke); 922 923 /*****************************************************************************/ 924 /* 925 * register a type of key 926 */ 927 int register_key_type(struct key_type *ktype) 928 { 929 struct key_type *p; 930 int ret; 931 932 ret = -EEXIST; 933 down_write(&key_types_sem); 934 935 /* disallow key types with the same name */ 936 list_for_each_entry(p, &key_types_list, link) { 937 if (strcmp(p->name, ktype->name) == 0) 938 goto out; 939 } 940 941 /* store the type */ 942 list_add(&ktype->link, &key_types_list); 943 ret = 0; 944 945 out: 946 up_write(&key_types_sem); 947 return ret; 948 949 } /* end register_key_type() */ 950 951 EXPORT_SYMBOL(register_key_type); 952 953 /*****************************************************************************/ 954 /* 955 * unregister a type of key 956 */ 957 void unregister_key_type(struct key_type *ktype) 958 { 959 struct rb_node *_n; 960 struct key *key; 961 962 down_write(&key_types_sem); 963 964 /* withdraw the key type */ 965 list_del_init(&ktype->link); 966 967 /* mark all the keys of this type dead */ 968 spin_lock(&key_serial_lock); 969 970 for (_n = rb_first(&key_serial_tree); _n; _n = rb_next(_n)) { 971 key = rb_entry(_n, struct key, serial_node); 972 973 if (key->type == ktype) 974 key->type = &key_type_dead; 975 } 976 977 spin_unlock(&key_serial_lock); 978 979 /* make sure everyone revalidates their keys */ 980 synchronize_rcu(); 981 982 /* we should now be able to destroy the payloads of all the keys of 983 * this type with impunity */ 984 spin_lock(&key_serial_lock); 985 986 for (_n = rb_first(&key_serial_tree); _n; _n = rb_next(_n)) { 987 key = rb_entry(_n, struct key, serial_node); 988 989 if (key->type == ktype) { 990 if (ktype->destroy) 991 ktype->destroy(key); 992 memset(&key->payload, KEY_DESTROY, sizeof(key->payload)); 993 } 994 } 995 996 spin_unlock(&key_serial_lock); 997 up_write(&key_types_sem); 998 999 } /* end unregister_key_type() */ 1000 1001 EXPORT_SYMBOL(unregister_key_type); 1002 1003 /*****************************************************************************/ 1004 /* 1005 * initialise the key management stuff 1006 */ 1007 void __init key_init(void) 1008 { 1009 /* allocate a slab in which we can store keys */ 1010 key_jar = kmem_cache_create("key_jar", sizeof(struct key), 1011 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL); 1012 1013 /* add the special key types */ 1014 list_add_tail(&key_type_keyring.link, &key_types_list); 1015 list_add_tail(&key_type_dead.link, &key_types_list); 1016 list_add_tail(&key_type_user.link, &key_types_list); 1017 1018 /* record the root user tracking */ 1019 rb_link_node(&root_key_user.node, 1020 NULL, 1021 &key_user_tree.rb_node); 1022 1023 rb_insert_color(&root_key_user.node, 1024 &key_user_tree); 1025 1026 /* record root's user standard keyrings */ 1027 key_check(&root_user_keyring); 1028 key_check(&root_session_keyring); 1029 1030 __key_insert_serial(&root_user_keyring); 1031 __key_insert_serial(&root_session_keyring); 1032 1033 keyring_publish_name(&root_user_keyring); 1034 keyring_publish_name(&root_session_keyring); 1035 1036 /* link the two root keyrings together */ 1037 key_link(&root_session_keyring, &root_user_keyring); 1038 1039 } /* end key_init() */ 1040