1 /* key.c: basic authentication token and access key management 2 * 3 * Copyright (C) 2004-6 Red Hat, Inc. All Rights Reserved. 4 * Written by David Howells (dhowells@redhat.com) 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License 8 * as published by the Free Software Foundation; either version 9 * 2 of the License, or (at your option) any later version. 10 */ 11 12 #include <linux/module.h> 13 #include <linux/init.h> 14 #include <linux/poison.h> 15 #include <linux/sched.h> 16 #include <linux/slab.h> 17 #include <linux/security.h> 18 #include <linux/workqueue.h> 19 #include <linux/random.h> 20 #include <linux/err.h> 21 #include "internal.h" 22 23 static struct kmem_cache *key_jar; 24 struct rb_root key_serial_tree; /* tree of keys indexed by serial */ 25 DEFINE_SPINLOCK(key_serial_lock); 26 27 struct rb_root key_user_tree; /* tree of quota records indexed by UID */ 28 DEFINE_SPINLOCK(key_user_lock); 29 30 static LIST_HEAD(key_types_list); 31 static DECLARE_RWSEM(key_types_sem); 32 33 static void key_cleanup(struct work_struct *work); 34 static DECLARE_WORK(key_cleanup_task, key_cleanup); 35 36 /* we serialise key instantiation and link */ 37 DECLARE_RWSEM(key_construction_sem); 38 39 /* any key who's type gets unegistered will be re-typed to this */ 40 static struct key_type key_type_dead = { 41 .name = "dead", 42 }; 43 44 #ifdef KEY_DEBUGGING 45 void __key_check(const struct key *key) 46 { 47 printk("__key_check: key %p {%08x} should be {%08x}\n", 48 key, key->magic, KEY_DEBUG_MAGIC); 49 BUG(); 50 } 51 #endif 52 53 /*****************************************************************************/ 54 /* 55 * get the key quota record for a user, allocating a new record if one doesn't 56 * already exist 57 */ 58 struct key_user *key_user_lookup(uid_t uid) 59 { 60 struct key_user *candidate = NULL, *user; 61 struct rb_node *parent = NULL; 62 struct rb_node **p; 63 64 try_again: 65 p = &key_user_tree.rb_node; 66 spin_lock(&key_user_lock); 67 68 /* search the tree for a user record with a matching UID */ 69 while (*p) { 70 parent = *p; 71 user = rb_entry(parent, struct key_user, node); 72 73 if (uid < user->uid) 74 p = &(*p)->rb_left; 75 else if (uid > user->uid) 76 p = &(*p)->rb_right; 77 else 78 goto found; 79 } 80 81 /* if we get here, we failed to find a match in the tree */ 82 if (!candidate) { 83 /* allocate a candidate user record if we don't already have 84 * one */ 85 spin_unlock(&key_user_lock); 86 87 user = NULL; 88 candidate = kmalloc(sizeof(struct key_user), GFP_KERNEL); 89 if (unlikely(!candidate)) 90 goto out; 91 92 /* the allocation may have scheduled, so we need to repeat the 93 * search lest someone else added the record whilst we were 94 * asleep */ 95 goto try_again; 96 } 97 98 /* if we get here, then the user record still hadn't appeared on the 99 * second pass - so we use the candidate record */ 100 atomic_set(&candidate->usage, 1); 101 atomic_set(&candidate->nkeys, 0); 102 atomic_set(&candidate->nikeys, 0); 103 candidate->uid = uid; 104 candidate->qnkeys = 0; 105 candidate->qnbytes = 0; 106 spin_lock_init(&candidate->lock); 107 INIT_LIST_HEAD(&candidate->consq); 108 109 rb_link_node(&candidate->node, parent, p); 110 rb_insert_color(&candidate->node, &key_user_tree); 111 spin_unlock(&key_user_lock); 112 user = candidate; 113 goto out; 114 115 /* okay - we found a user record for this UID */ 116 found: 117 atomic_inc(&user->usage); 118 spin_unlock(&key_user_lock); 119 kfree(candidate); 120 out: 121 return user; 122 123 } /* end key_user_lookup() */ 124 125 /*****************************************************************************/ 126 /* 127 * dispose of a user structure 128 */ 129 void key_user_put(struct key_user *user) 130 { 131 if (atomic_dec_and_lock(&user->usage, &key_user_lock)) { 132 rb_erase(&user->node, &key_user_tree); 133 spin_unlock(&key_user_lock); 134 135 kfree(user); 136 } 137 138 } /* end key_user_put() */ 139 140 /*****************************************************************************/ 141 /* 142 * insert a key with a fixed serial number 143 */ 144 static void __init __key_insert_serial(struct key *key) 145 { 146 struct rb_node *parent, **p; 147 struct key *xkey; 148 149 parent = NULL; 150 p = &key_serial_tree.rb_node; 151 152 while (*p) { 153 parent = *p; 154 xkey = rb_entry(parent, struct key, serial_node); 155 156 if (key->serial < xkey->serial) 157 p = &(*p)->rb_left; 158 else if (key->serial > xkey->serial) 159 p = &(*p)->rb_right; 160 else 161 BUG(); 162 } 163 164 /* we've found a suitable hole - arrange for this key to occupy it */ 165 rb_link_node(&key->serial_node, parent, p); 166 rb_insert_color(&key->serial_node, &key_serial_tree); 167 168 } /* end __key_insert_serial() */ 169 170 /*****************************************************************************/ 171 /* 172 * assign a key the next unique serial number 173 * - these are assigned randomly to avoid security issues through covert 174 * channel problems 175 */ 176 static inline void key_alloc_serial(struct key *key) 177 { 178 struct rb_node *parent, **p; 179 struct key *xkey; 180 181 /* propose a random serial number and look for a hole for it in the 182 * serial number tree */ 183 do { 184 get_random_bytes(&key->serial, sizeof(key->serial)); 185 186 key->serial >>= 1; /* negative numbers are not permitted */ 187 } while (key->serial < 3); 188 189 spin_lock(&key_serial_lock); 190 191 parent = NULL; 192 p = &key_serial_tree.rb_node; 193 194 while (*p) { 195 parent = *p; 196 xkey = rb_entry(parent, struct key, serial_node); 197 198 if (key->serial < xkey->serial) 199 p = &(*p)->rb_left; 200 else if (key->serial > xkey->serial) 201 p = &(*p)->rb_right; 202 else 203 goto serial_exists; 204 } 205 goto insert_here; 206 207 /* we found a key with the proposed serial number - walk the tree from 208 * that point looking for the next unused serial number */ 209 serial_exists: 210 for (;;) { 211 key->serial++; 212 if (key->serial < 2) 213 key->serial = 2; 214 215 if (!rb_parent(parent)) 216 p = &key_serial_tree.rb_node; 217 else if (rb_parent(parent)->rb_left == parent) 218 p = &(rb_parent(parent)->rb_left); 219 else 220 p = &(rb_parent(parent)->rb_right); 221 222 parent = rb_next(parent); 223 if (!parent) 224 break; 225 226 xkey = rb_entry(parent, struct key, serial_node); 227 if (key->serial < xkey->serial) 228 goto insert_here; 229 } 230 231 /* we've found a suitable hole - arrange for this key to occupy it */ 232 insert_here: 233 rb_link_node(&key->serial_node, parent, p); 234 rb_insert_color(&key->serial_node, &key_serial_tree); 235 236 spin_unlock(&key_serial_lock); 237 238 } /* end key_alloc_serial() */ 239 240 /*****************************************************************************/ 241 /* 242 * allocate a key of the specified type 243 * - update the user's quota to reflect the existence of the key 244 * - called from a key-type operation with key_types_sem read-locked by 245 * key_create_or_update() 246 * - this prevents unregistration of the key type 247 * - upon return the key is as yet uninstantiated; the caller needs to either 248 * instantiate the key or discard it before returning 249 */ 250 struct key *key_alloc(struct key_type *type, const char *desc, 251 uid_t uid, gid_t gid, struct task_struct *ctx, 252 key_perm_t perm, unsigned long flags) 253 { 254 struct key_user *user = NULL; 255 struct key *key; 256 size_t desclen, quotalen; 257 int ret; 258 259 key = ERR_PTR(-EINVAL); 260 if (!desc || !*desc) 261 goto error; 262 263 desclen = strlen(desc) + 1; 264 quotalen = desclen + type->def_datalen; 265 266 /* get hold of the key tracking for this user */ 267 user = key_user_lookup(uid); 268 if (!user) 269 goto no_memory_1; 270 271 /* check that the user's quota permits allocation of another key and 272 * its description */ 273 if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) { 274 spin_lock(&user->lock); 275 if (!(flags & KEY_ALLOC_QUOTA_OVERRUN)) { 276 if (user->qnkeys + 1 >= KEYQUOTA_MAX_KEYS || 277 user->qnbytes + quotalen >= KEYQUOTA_MAX_BYTES 278 ) 279 goto no_quota; 280 } 281 282 user->qnkeys++; 283 user->qnbytes += quotalen; 284 spin_unlock(&user->lock); 285 } 286 287 /* allocate and initialise the key and its description */ 288 key = kmem_cache_alloc(key_jar, GFP_KERNEL); 289 if (!key) 290 goto no_memory_2; 291 292 if (desc) { 293 key->description = kmemdup(desc, desclen, GFP_KERNEL); 294 if (!key->description) 295 goto no_memory_3; 296 } 297 298 atomic_set(&key->usage, 1); 299 init_rwsem(&key->sem); 300 key->type = type; 301 key->user = user; 302 key->quotalen = quotalen; 303 key->datalen = type->def_datalen; 304 key->uid = uid; 305 key->gid = gid; 306 key->perm = perm; 307 key->flags = 0; 308 key->expiry = 0; 309 key->payload.data = NULL; 310 key->security = NULL; 311 312 if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) 313 key->flags |= 1 << KEY_FLAG_IN_QUOTA; 314 315 memset(&key->type_data, 0, sizeof(key->type_data)); 316 317 #ifdef KEY_DEBUGGING 318 key->magic = KEY_DEBUG_MAGIC; 319 #endif 320 321 /* let the security module know about the key */ 322 ret = security_key_alloc(key, ctx, flags); 323 if (ret < 0) 324 goto security_error; 325 326 /* publish the key by giving it a serial number */ 327 atomic_inc(&user->nkeys); 328 key_alloc_serial(key); 329 330 error: 331 return key; 332 333 security_error: 334 kfree(key->description); 335 kmem_cache_free(key_jar, key); 336 if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) { 337 spin_lock(&user->lock); 338 user->qnkeys--; 339 user->qnbytes -= quotalen; 340 spin_unlock(&user->lock); 341 } 342 key_user_put(user); 343 key = ERR_PTR(ret); 344 goto error; 345 346 no_memory_3: 347 kmem_cache_free(key_jar, key); 348 no_memory_2: 349 if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) { 350 spin_lock(&user->lock); 351 user->qnkeys--; 352 user->qnbytes -= quotalen; 353 spin_unlock(&user->lock); 354 } 355 key_user_put(user); 356 no_memory_1: 357 key = ERR_PTR(-ENOMEM); 358 goto error; 359 360 no_quota: 361 spin_unlock(&user->lock); 362 key_user_put(user); 363 key = ERR_PTR(-EDQUOT); 364 goto error; 365 366 } /* end key_alloc() */ 367 368 EXPORT_SYMBOL(key_alloc); 369 370 /*****************************************************************************/ 371 /* 372 * reserve an amount of quota for the key's payload 373 */ 374 int key_payload_reserve(struct key *key, size_t datalen) 375 { 376 int delta = (int) datalen - key->datalen; 377 int ret = 0; 378 379 key_check(key); 380 381 /* contemplate the quota adjustment */ 382 if (delta != 0 && test_bit(KEY_FLAG_IN_QUOTA, &key->flags)) { 383 spin_lock(&key->user->lock); 384 385 if (delta > 0 && 386 key->user->qnbytes + delta > KEYQUOTA_MAX_BYTES 387 ) { 388 ret = -EDQUOT; 389 } 390 else { 391 key->user->qnbytes += delta; 392 key->quotalen += delta; 393 } 394 spin_unlock(&key->user->lock); 395 } 396 397 /* change the recorded data length if that didn't generate an error */ 398 if (ret == 0) 399 key->datalen = datalen; 400 401 return ret; 402 403 } /* end key_payload_reserve() */ 404 405 EXPORT_SYMBOL(key_payload_reserve); 406 407 /*****************************************************************************/ 408 /* 409 * instantiate a key and link it into the target keyring atomically 410 * - called with the target keyring's semaphore writelocked 411 */ 412 static int __key_instantiate_and_link(struct key *key, 413 const void *data, 414 size_t datalen, 415 struct key *keyring, 416 struct key *instkey) 417 { 418 int ret, awaken; 419 420 key_check(key); 421 key_check(keyring); 422 423 awaken = 0; 424 ret = -EBUSY; 425 426 down_write(&key_construction_sem); 427 428 /* can't instantiate twice */ 429 if (!test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) { 430 /* instantiate the key */ 431 ret = key->type->instantiate(key, data, datalen); 432 433 if (ret == 0) { 434 /* mark the key as being instantiated */ 435 atomic_inc(&key->user->nikeys); 436 set_bit(KEY_FLAG_INSTANTIATED, &key->flags); 437 438 if (test_and_clear_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags)) 439 awaken = 1; 440 441 /* and link it into the destination keyring */ 442 if (keyring) 443 ret = __key_link(keyring, key); 444 445 /* disable the authorisation key */ 446 if (instkey) 447 key_revoke(instkey); 448 } 449 } 450 451 up_write(&key_construction_sem); 452 453 /* wake up anyone waiting for a key to be constructed */ 454 if (awaken) 455 wake_up_all(&request_key_conswq); 456 457 return ret; 458 459 } /* end __key_instantiate_and_link() */ 460 461 /*****************************************************************************/ 462 /* 463 * instantiate a key and link it into the target keyring atomically 464 */ 465 int key_instantiate_and_link(struct key *key, 466 const void *data, 467 size_t datalen, 468 struct key *keyring, 469 struct key *instkey) 470 { 471 int ret; 472 473 if (keyring) 474 down_write(&keyring->sem); 475 476 ret = __key_instantiate_and_link(key, data, datalen, keyring, instkey); 477 478 if (keyring) 479 up_write(&keyring->sem); 480 481 return ret; 482 483 } /* end key_instantiate_and_link() */ 484 485 EXPORT_SYMBOL(key_instantiate_and_link); 486 487 /*****************************************************************************/ 488 /* 489 * negatively instantiate a key and link it into the target keyring atomically 490 */ 491 int key_negate_and_link(struct key *key, 492 unsigned timeout, 493 struct key *keyring, 494 struct key *instkey) 495 { 496 struct timespec now; 497 int ret, awaken; 498 499 key_check(key); 500 key_check(keyring); 501 502 awaken = 0; 503 ret = -EBUSY; 504 505 if (keyring) 506 down_write(&keyring->sem); 507 508 down_write(&key_construction_sem); 509 510 /* can't instantiate twice */ 511 if (!test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) { 512 /* mark the key as being negatively instantiated */ 513 atomic_inc(&key->user->nikeys); 514 set_bit(KEY_FLAG_NEGATIVE, &key->flags); 515 set_bit(KEY_FLAG_INSTANTIATED, &key->flags); 516 now = current_kernel_time(); 517 key->expiry = now.tv_sec + timeout; 518 519 if (test_and_clear_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags)) 520 awaken = 1; 521 522 ret = 0; 523 524 /* and link it into the destination keyring */ 525 if (keyring) 526 ret = __key_link(keyring, key); 527 528 /* disable the authorisation key */ 529 if (instkey) 530 key_revoke(instkey); 531 } 532 533 up_write(&key_construction_sem); 534 535 if (keyring) 536 up_write(&keyring->sem); 537 538 /* wake up anyone waiting for a key to be constructed */ 539 if (awaken) 540 wake_up_all(&request_key_conswq); 541 542 return ret; 543 544 } /* end key_negate_and_link() */ 545 546 EXPORT_SYMBOL(key_negate_and_link); 547 548 /*****************************************************************************/ 549 /* 550 * do cleaning up in process context so that we don't have to disable 551 * interrupts all over the place 552 */ 553 static void key_cleanup(struct work_struct *work) 554 { 555 struct rb_node *_n; 556 struct key *key; 557 558 go_again: 559 /* look for a dead key in the tree */ 560 spin_lock(&key_serial_lock); 561 562 for (_n = rb_first(&key_serial_tree); _n; _n = rb_next(_n)) { 563 key = rb_entry(_n, struct key, serial_node); 564 565 if (atomic_read(&key->usage) == 0) 566 goto found_dead_key; 567 } 568 569 spin_unlock(&key_serial_lock); 570 return; 571 572 found_dead_key: 573 /* we found a dead key - once we've removed it from the tree, we can 574 * drop the lock */ 575 rb_erase(&key->serial_node, &key_serial_tree); 576 spin_unlock(&key_serial_lock); 577 578 key_check(key); 579 580 security_key_free(key); 581 582 /* deal with the user's key tracking and quota */ 583 if (test_bit(KEY_FLAG_IN_QUOTA, &key->flags)) { 584 spin_lock(&key->user->lock); 585 key->user->qnkeys--; 586 key->user->qnbytes -= key->quotalen; 587 spin_unlock(&key->user->lock); 588 } 589 590 atomic_dec(&key->user->nkeys); 591 if (test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) 592 atomic_dec(&key->user->nikeys); 593 594 key_user_put(key->user); 595 596 /* now throw away the key memory */ 597 if (key->type->destroy) 598 key->type->destroy(key); 599 600 kfree(key->description); 601 602 #ifdef KEY_DEBUGGING 603 key->magic = KEY_DEBUG_MAGIC_X; 604 #endif 605 kmem_cache_free(key_jar, key); 606 607 /* there may, of course, be more than one key to destroy */ 608 goto go_again; 609 610 } /* end key_cleanup() */ 611 612 /*****************************************************************************/ 613 /* 614 * dispose of a reference to a key 615 * - when all the references are gone, we schedule the cleanup task to come and 616 * pull it out of the tree in definite process context 617 */ 618 void key_put(struct key *key) 619 { 620 if (key) { 621 key_check(key); 622 623 if (atomic_dec_and_test(&key->usage)) 624 schedule_work(&key_cleanup_task); 625 } 626 627 } /* end key_put() */ 628 629 EXPORT_SYMBOL(key_put); 630 631 /*****************************************************************************/ 632 /* 633 * find a key by its serial number 634 */ 635 struct key *key_lookup(key_serial_t id) 636 { 637 struct rb_node *n; 638 struct key *key; 639 640 spin_lock(&key_serial_lock); 641 642 /* search the tree for the specified key */ 643 n = key_serial_tree.rb_node; 644 while (n) { 645 key = rb_entry(n, struct key, serial_node); 646 647 if (id < key->serial) 648 n = n->rb_left; 649 else if (id > key->serial) 650 n = n->rb_right; 651 else 652 goto found; 653 } 654 655 not_found: 656 key = ERR_PTR(-ENOKEY); 657 goto error; 658 659 found: 660 /* pretend it doesn't exist if it's dead */ 661 if (atomic_read(&key->usage) == 0 || 662 test_bit(KEY_FLAG_DEAD, &key->flags) || 663 key->type == &key_type_dead) 664 goto not_found; 665 666 /* this races with key_put(), but that doesn't matter since key_put() 667 * doesn't actually change the key 668 */ 669 atomic_inc(&key->usage); 670 671 error: 672 spin_unlock(&key_serial_lock); 673 return key; 674 675 } /* end key_lookup() */ 676 677 /*****************************************************************************/ 678 /* 679 * find and lock the specified key type against removal 680 * - we return with the sem readlocked 681 */ 682 struct key_type *key_type_lookup(const char *type) 683 { 684 struct key_type *ktype; 685 686 down_read(&key_types_sem); 687 688 /* look up the key type to see if it's one of the registered kernel 689 * types */ 690 list_for_each_entry(ktype, &key_types_list, link) { 691 if (strcmp(ktype->name, type) == 0) 692 goto found_kernel_type; 693 } 694 695 up_read(&key_types_sem); 696 ktype = ERR_PTR(-ENOKEY); 697 698 found_kernel_type: 699 return ktype; 700 701 } /* end key_type_lookup() */ 702 703 /*****************************************************************************/ 704 /* 705 * unlock a key type 706 */ 707 void key_type_put(struct key_type *ktype) 708 { 709 up_read(&key_types_sem); 710 711 } /* end key_type_put() */ 712 713 /*****************************************************************************/ 714 /* 715 * attempt to update an existing key 716 * - the key has an incremented refcount 717 * - we need to put the key if we get an error 718 */ 719 static inline key_ref_t __key_update(key_ref_t key_ref, 720 const void *payload, size_t plen) 721 { 722 struct key *key = key_ref_to_ptr(key_ref); 723 int ret; 724 725 /* need write permission on the key to update it */ 726 ret = key_permission(key_ref, KEY_WRITE); 727 if (ret < 0) 728 goto error; 729 730 ret = -EEXIST; 731 if (!key->type->update) 732 goto error; 733 734 down_write(&key->sem); 735 736 ret = key->type->update(key, payload, plen); 737 if (ret == 0) 738 /* updating a negative key instantiates it */ 739 clear_bit(KEY_FLAG_NEGATIVE, &key->flags); 740 741 up_write(&key->sem); 742 743 if (ret < 0) 744 goto error; 745 out: 746 return key_ref; 747 748 error: 749 key_put(key); 750 key_ref = ERR_PTR(ret); 751 goto out; 752 753 } /* end __key_update() */ 754 755 /*****************************************************************************/ 756 /* 757 * search the specified keyring for a key of the same description; if one is 758 * found, update it, otherwise add a new one 759 */ 760 key_ref_t key_create_or_update(key_ref_t keyring_ref, 761 const char *type, 762 const char *description, 763 const void *payload, 764 size_t plen, 765 unsigned long flags) 766 { 767 struct key_type *ktype; 768 struct key *keyring, *key = NULL; 769 key_perm_t perm; 770 key_ref_t key_ref; 771 int ret; 772 773 /* look up the key type to see if it's one of the registered kernel 774 * types */ 775 ktype = key_type_lookup(type); 776 if (IS_ERR(ktype)) { 777 key_ref = ERR_PTR(-ENODEV); 778 goto error; 779 } 780 781 key_ref = ERR_PTR(-EINVAL); 782 if (!ktype->match || !ktype->instantiate) 783 goto error_2; 784 785 keyring = key_ref_to_ptr(keyring_ref); 786 787 key_check(keyring); 788 789 key_ref = ERR_PTR(-ENOTDIR); 790 if (keyring->type != &key_type_keyring) 791 goto error_2; 792 793 down_write(&keyring->sem); 794 795 /* if we're going to allocate a new key, we're going to have 796 * to modify the keyring */ 797 ret = key_permission(keyring_ref, KEY_WRITE); 798 if (ret < 0) { 799 key_ref = ERR_PTR(ret); 800 goto error_3; 801 } 802 803 /* if it's possible to update this type of key, search for an existing 804 * key of the same type and description in the destination keyring and 805 * update that instead if possible 806 */ 807 if (ktype->update) { 808 key_ref = __keyring_search_one(keyring_ref, ktype, description, 809 0); 810 if (!IS_ERR(key_ref)) 811 goto found_matching_key; 812 } 813 814 /* decide on the permissions we want */ 815 perm = KEY_POS_VIEW | KEY_POS_SEARCH | KEY_POS_LINK | KEY_POS_SETATTR; 816 perm |= KEY_USR_VIEW | KEY_USR_SEARCH | KEY_USR_LINK | KEY_USR_SETATTR; 817 818 if (ktype->read) 819 perm |= KEY_POS_READ | KEY_USR_READ; 820 821 if (ktype == &key_type_keyring || ktype->update) 822 perm |= KEY_USR_WRITE; 823 824 /* allocate a new key */ 825 key = key_alloc(ktype, description, current->fsuid, current->fsgid, 826 current, perm, flags); 827 if (IS_ERR(key)) { 828 key_ref = ERR_PTR(PTR_ERR(key)); 829 goto error_3; 830 } 831 832 /* instantiate it and link it into the target keyring */ 833 ret = __key_instantiate_and_link(key, payload, plen, keyring, NULL); 834 if (ret < 0) { 835 key_put(key); 836 key_ref = ERR_PTR(ret); 837 goto error_3; 838 } 839 840 key_ref = make_key_ref(key, is_key_possessed(keyring_ref)); 841 842 error_3: 843 up_write(&keyring->sem); 844 error_2: 845 key_type_put(ktype); 846 error: 847 return key_ref; 848 849 found_matching_key: 850 /* we found a matching key, so we're going to try to update it 851 * - we can drop the locks first as we have the key pinned 852 */ 853 up_write(&keyring->sem); 854 key_type_put(ktype); 855 856 key_ref = __key_update(key_ref, payload, plen); 857 goto error; 858 859 } /* end key_create_or_update() */ 860 861 EXPORT_SYMBOL(key_create_or_update); 862 863 /*****************************************************************************/ 864 /* 865 * update a key 866 */ 867 int key_update(key_ref_t key_ref, const void *payload, size_t plen) 868 { 869 struct key *key = key_ref_to_ptr(key_ref); 870 int ret; 871 872 key_check(key); 873 874 /* the key must be writable */ 875 ret = key_permission(key_ref, KEY_WRITE); 876 if (ret < 0) 877 goto error; 878 879 /* attempt to update it if supported */ 880 ret = -EOPNOTSUPP; 881 if (key->type->update) { 882 down_write(&key->sem); 883 884 ret = key->type->update(key, payload, plen); 885 if (ret == 0) 886 /* updating a negative key instantiates it */ 887 clear_bit(KEY_FLAG_NEGATIVE, &key->flags); 888 889 up_write(&key->sem); 890 } 891 892 error: 893 return ret; 894 895 } /* end key_update() */ 896 897 EXPORT_SYMBOL(key_update); 898 899 /*****************************************************************************/ 900 /* 901 * revoke a key 902 */ 903 void key_revoke(struct key *key) 904 { 905 key_check(key); 906 907 /* make sure no one's trying to change or use the key when we mark 908 * it */ 909 down_write(&key->sem); 910 set_bit(KEY_FLAG_REVOKED, &key->flags); 911 912 if (key->type->revoke) 913 key->type->revoke(key); 914 915 up_write(&key->sem); 916 917 } /* end key_revoke() */ 918 919 EXPORT_SYMBOL(key_revoke); 920 921 /*****************************************************************************/ 922 /* 923 * register a type of key 924 */ 925 int register_key_type(struct key_type *ktype) 926 { 927 struct key_type *p; 928 int ret; 929 930 ret = -EEXIST; 931 down_write(&key_types_sem); 932 933 /* disallow key types with the same name */ 934 list_for_each_entry(p, &key_types_list, link) { 935 if (strcmp(p->name, ktype->name) == 0) 936 goto out; 937 } 938 939 /* store the type */ 940 list_add(&ktype->link, &key_types_list); 941 ret = 0; 942 943 out: 944 up_write(&key_types_sem); 945 return ret; 946 947 } /* end register_key_type() */ 948 949 EXPORT_SYMBOL(register_key_type); 950 951 /*****************************************************************************/ 952 /* 953 * unregister a type of key 954 */ 955 void unregister_key_type(struct key_type *ktype) 956 { 957 struct rb_node *_n; 958 struct key *key; 959 960 down_write(&key_types_sem); 961 962 /* withdraw the key type */ 963 list_del_init(&ktype->link); 964 965 /* mark all the keys of this type dead */ 966 spin_lock(&key_serial_lock); 967 968 for (_n = rb_first(&key_serial_tree); _n; _n = rb_next(_n)) { 969 key = rb_entry(_n, struct key, serial_node); 970 971 if (key->type == ktype) 972 key->type = &key_type_dead; 973 } 974 975 spin_unlock(&key_serial_lock); 976 977 /* make sure everyone revalidates their keys */ 978 synchronize_rcu(); 979 980 /* we should now be able to destroy the payloads of all the keys of 981 * this type with impunity */ 982 spin_lock(&key_serial_lock); 983 984 for (_n = rb_first(&key_serial_tree); _n; _n = rb_next(_n)) { 985 key = rb_entry(_n, struct key, serial_node); 986 987 if (key->type == ktype) { 988 if (ktype->destroy) 989 ktype->destroy(key); 990 memset(&key->payload, KEY_DESTROY, sizeof(key->payload)); 991 } 992 } 993 994 spin_unlock(&key_serial_lock); 995 up_write(&key_types_sem); 996 997 } /* end unregister_key_type() */ 998 999 EXPORT_SYMBOL(unregister_key_type); 1000 1001 /*****************************************************************************/ 1002 /* 1003 * initialise the key management stuff 1004 */ 1005 void __init key_init(void) 1006 { 1007 /* allocate a slab in which we can store keys */ 1008 key_jar = kmem_cache_create("key_jar", sizeof(struct key), 1009 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL); 1010 1011 /* add the special key types */ 1012 list_add_tail(&key_type_keyring.link, &key_types_list); 1013 list_add_tail(&key_type_dead.link, &key_types_list); 1014 list_add_tail(&key_type_user.link, &key_types_list); 1015 1016 /* record the root user tracking */ 1017 rb_link_node(&root_key_user.node, 1018 NULL, 1019 &key_user_tree.rb_node); 1020 1021 rb_insert_color(&root_key_user.node, 1022 &key_user_tree); 1023 1024 /* record root's user standard keyrings */ 1025 key_check(&root_user_keyring); 1026 key_check(&root_session_keyring); 1027 1028 __key_insert_serial(&root_user_keyring); 1029 __key_insert_serial(&root_session_keyring); 1030 1031 keyring_publish_name(&root_user_keyring); 1032 keyring_publish_name(&root_session_keyring); 1033 1034 /* link the two root keyrings together */ 1035 key_link(&root_session_keyring, &root_user_keyring); 1036 1037 } /* end key_init() */ 1038