1 /* key.c: basic authentication token and access key management 2 * 3 * Copyright (C) 2004-5 Red Hat, Inc. All Rights Reserved. 4 * Written by David Howells (dhowells@redhat.com) 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License 8 * as published by the Free Software Foundation; either version 9 * 2 of the License, or (at your option) any later version. 10 */ 11 12 #include <linux/module.h> 13 #include <linux/init.h> 14 #include <linux/sched.h> 15 #include <linux/slab.h> 16 #include <linux/workqueue.h> 17 #include <linux/err.h> 18 #include "internal.h" 19 20 static kmem_cache_t *key_jar; 21 static key_serial_t key_serial_next = 3; 22 struct rb_root key_serial_tree; /* tree of keys indexed by serial */ 23 DEFINE_SPINLOCK(key_serial_lock); 24 25 struct rb_root key_user_tree; /* tree of quota records indexed by UID */ 26 DEFINE_SPINLOCK(key_user_lock); 27 28 static LIST_HEAD(key_types_list); 29 static DECLARE_RWSEM(key_types_sem); 30 31 static void key_cleanup(void *data); 32 static DECLARE_WORK(key_cleanup_task, key_cleanup, NULL); 33 34 /* we serialise key instantiation and link */ 35 DECLARE_RWSEM(key_construction_sem); 36 37 /* any key who's type gets unegistered will be re-typed to this */ 38 struct key_type key_type_dead = { 39 .name = "dead", 40 }; 41 42 #ifdef KEY_DEBUGGING 43 void __key_check(const struct key *key) 44 { 45 printk("__key_check: key %p {%08x} should be {%08x}\n", 46 key, key->magic, KEY_DEBUG_MAGIC); 47 BUG(); 48 } 49 #endif 50 51 /*****************************************************************************/ 52 /* 53 * get the key quota record for a user, allocating a new record if one doesn't 54 * already exist 55 */ 56 struct key_user *key_user_lookup(uid_t uid) 57 { 58 struct key_user *candidate = NULL, *user; 59 struct rb_node *parent = NULL; 60 struct rb_node **p; 61 62 try_again: 63 p = &key_user_tree.rb_node; 64 spin_lock(&key_user_lock); 65 66 /* search the tree for a user record with a matching UID */ 67 while (*p) { 68 parent = *p; 69 user = rb_entry(parent, struct key_user, node); 70 71 if (uid < user->uid) 72 p = &(*p)->rb_left; 73 else if (uid > user->uid) 74 p = &(*p)->rb_right; 75 else 76 goto found; 77 } 78 79 /* if we get here, we failed to find a match in the tree */ 80 if (!candidate) { 81 /* allocate a candidate user record if we don't already have 82 * one */ 83 spin_unlock(&key_user_lock); 84 85 user = NULL; 86 candidate = kmalloc(sizeof(struct key_user), GFP_KERNEL); 87 if (unlikely(!candidate)) 88 goto out; 89 90 /* the allocation may have scheduled, so we need to repeat the 91 * search lest someone else added the record whilst we were 92 * asleep */ 93 goto try_again; 94 } 95 96 /* if we get here, then the user record still hadn't appeared on the 97 * second pass - so we use the candidate record */ 98 atomic_set(&candidate->usage, 1); 99 atomic_set(&candidate->nkeys, 0); 100 atomic_set(&candidate->nikeys, 0); 101 candidate->uid = uid; 102 candidate->qnkeys = 0; 103 candidate->qnbytes = 0; 104 spin_lock_init(&candidate->lock); 105 INIT_LIST_HEAD(&candidate->consq); 106 107 rb_link_node(&candidate->node, parent, p); 108 rb_insert_color(&candidate->node, &key_user_tree); 109 spin_unlock(&key_user_lock); 110 user = candidate; 111 goto out; 112 113 /* okay - we found a user record for this UID */ 114 found: 115 atomic_inc(&user->usage); 116 spin_unlock(&key_user_lock); 117 if (candidate) 118 kfree(candidate); 119 out: 120 return user; 121 122 } /* end key_user_lookup() */ 123 124 /*****************************************************************************/ 125 /* 126 * dispose of a user structure 127 */ 128 void key_user_put(struct key_user *user) 129 { 130 if (atomic_dec_and_lock(&user->usage, &key_user_lock)) { 131 rb_erase(&user->node, &key_user_tree); 132 spin_unlock(&key_user_lock); 133 134 kfree(user); 135 } 136 137 } /* end key_user_put() */ 138 139 /*****************************************************************************/ 140 /* 141 * insert a key with a fixed serial number 142 */ 143 static void __init __key_insert_serial(struct key *key) 144 { 145 struct rb_node *parent, **p; 146 struct key *xkey; 147 148 parent = NULL; 149 p = &key_serial_tree.rb_node; 150 151 while (*p) { 152 parent = *p; 153 xkey = rb_entry(parent, struct key, serial_node); 154 155 if (key->serial < xkey->serial) 156 p = &(*p)->rb_left; 157 else if (key->serial > xkey->serial) 158 p = &(*p)->rb_right; 159 else 160 BUG(); 161 } 162 163 /* we've found a suitable hole - arrange for this key to occupy it */ 164 rb_link_node(&key->serial_node, parent, p); 165 rb_insert_color(&key->serial_node, &key_serial_tree); 166 167 } /* end __key_insert_serial() */ 168 169 /*****************************************************************************/ 170 /* 171 * assign a key the next unique serial number 172 * - we work through all the serial numbers between 2 and 2^31-1 in turn and 173 * then wrap 174 */ 175 static inline void key_alloc_serial(struct key *key) 176 { 177 struct rb_node *parent, **p; 178 struct key *xkey; 179 180 spin_lock(&key_serial_lock); 181 182 /* propose a likely serial number and look for a hole for it in the 183 * serial number tree */ 184 key->serial = key_serial_next; 185 if (key->serial < 3) 186 key->serial = 3; 187 key_serial_next = key->serial + 1; 188 189 parent = NULL; 190 p = &key_serial_tree.rb_node; 191 192 while (*p) { 193 parent = *p; 194 xkey = rb_entry(parent, struct key, serial_node); 195 196 if (key->serial < xkey->serial) 197 p = &(*p)->rb_left; 198 else if (key->serial > xkey->serial) 199 p = &(*p)->rb_right; 200 else 201 goto serial_exists; 202 } 203 goto insert_here; 204 205 /* we found a key with the proposed serial number - walk the tree from 206 * that point looking for the next unused serial number */ 207 serial_exists: 208 for (;;) { 209 key->serial = key_serial_next; 210 if (key->serial < 2) 211 key->serial = 2; 212 key_serial_next = key->serial + 1; 213 214 if (!parent->rb_parent) 215 p = &key_serial_tree.rb_node; 216 else if (parent->rb_parent->rb_left == parent) 217 p = &parent->rb_parent->rb_left; 218 else 219 p = &parent->rb_parent->rb_right; 220 221 parent = rb_next(parent); 222 if (!parent) 223 break; 224 225 xkey = rb_entry(parent, struct key, serial_node); 226 if (key->serial < xkey->serial) 227 goto insert_here; 228 } 229 230 /* we've found a suitable hole - arrange for this key to occupy it */ 231 insert_here: 232 rb_link_node(&key->serial_node, parent, p); 233 rb_insert_color(&key->serial_node, &key_serial_tree); 234 235 spin_unlock(&key_serial_lock); 236 237 } /* end key_alloc_serial() */ 238 239 /*****************************************************************************/ 240 /* 241 * allocate a key of the specified type 242 * - update the user's quota to reflect the existence of the key 243 * - called from a key-type operation with key_types_sem read-locked by either 244 * key_create_or_update() or by key_duplicate(); this prevents unregistration 245 * of the key type 246 * - upon return the key is as yet uninstantiated; the caller needs to either 247 * instantiate the key or discard it before returning 248 */ 249 struct key *key_alloc(struct key_type *type, const char *desc, 250 uid_t uid, gid_t gid, key_perm_t perm, 251 int not_in_quota) 252 { 253 struct key_user *user = NULL; 254 struct key *key; 255 size_t desclen, quotalen; 256 257 key = ERR_PTR(-EINVAL); 258 if (!desc || !*desc) 259 goto error; 260 261 desclen = strlen(desc) + 1; 262 quotalen = desclen + type->def_datalen; 263 264 /* get hold of the key tracking for this user */ 265 user = key_user_lookup(uid); 266 if (!user) 267 goto no_memory_1; 268 269 /* check that the user's quota permits allocation of another key and 270 * its description */ 271 if (!not_in_quota) { 272 spin_lock(&user->lock); 273 if (user->qnkeys + 1 >= KEYQUOTA_MAX_KEYS && 274 user->qnbytes + quotalen >= KEYQUOTA_MAX_BYTES 275 ) 276 goto no_quota; 277 278 user->qnkeys++; 279 user->qnbytes += quotalen; 280 spin_unlock(&user->lock); 281 } 282 283 /* allocate and initialise the key and its description */ 284 key = kmem_cache_alloc(key_jar, SLAB_KERNEL); 285 if (!key) 286 goto no_memory_2; 287 288 if (desc) { 289 key->description = kmalloc(desclen, GFP_KERNEL); 290 if (!key->description) 291 goto no_memory_3; 292 293 memcpy(key->description, desc, desclen); 294 } 295 296 atomic_set(&key->usage, 1); 297 init_rwsem(&key->sem); 298 key->type = type; 299 key->user = user; 300 key->quotalen = quotalen; 301 key->datalen = type->def_datalen; 302 key->uid = uid; 303 key->gid = gid; 304 key->perm = perm; 305 key->flags = 0; 306 key->expiry = 0; 307 key->payload.data = NULL; 308 309 if (!not_in_quota) 310 key->flags |= 1 << KEY_FLAG_IN_QUOTA; 311 312 memset(&key->type_data, 0, sizeof(key->type_data)); 313 314 #ifdef KEY_DEBUGGING 315 key->magic = KEY_DEBUG_MAGIC; 316 #endif 317 318 /* publish the key by giving it a serial number */ 319 atomic_inc(&user->nkeys); 320 key_alloc_serial(key); 321 322 error: 323 return key; 324 325 no_memory_3: 326 kmem_cache_free(key_jar, key); 327 no_memory_2: 328 if (!not_in_quota) { 329 spin_lock(&user->lock); 330 user->qnkeys--; 331 user->qnbytes -= quotalen; 332 spin_unlock(&user->lock); 333 } 334 key_user_put(user); 335 no_memory_1: 336 key = ERR_PTR(-ENOMEM); 337 goto error; 338 339 no_quota: 340 spin_unlock(&user->lock); 341 key_user_put(user); 342 key = ERR_PTR(-EDQUOT); 343 goto error; 344 345 } /* end key_alloc() */ 346 347 EXPORT_SYMBOL(key_alloc); 348 349 /*****************************************************************************/ 350 /* 351 * reserve an amount of quota for the key's payload 352 */ 353 int key_payload_reserve(struct key *key, size_t datalen) 354 { 355 int delta = (int) datalen - key->datalen; 356 int ret = 0; 357 358 key_check(key); 359 360 /* contemplate the quota adjustment */ 361 if (delta != 0 && test_bit(KEY_FLAG_IN_QUOTA, &key->flags)) { 362 spin_lock(&key->user->lock); 363 364 if (delta > 0 && 365 key->user->qnbytes + delta > KEYQUOTA_MAX_BYTES 366 ) { 367 ret = -EDQUOT; 368 } 369 else { 370 key->user->qnbytes += delta; 371 key->quotalen += delta; 372 } 373 spin_unlock(&key->user->lock); 374 } 375 376 /* change the recorded data length if that didn't generate an error */ 377 if (ret == 0) 378 key->datalen = datalen; 379 380 return ret; 381 382 } /* end key_payload_reserve() */ 383 384 EXPORT_SYMBOL(key_payload_reserve); 385 386 /*****************************************************************************/ 387 /* 388 * instantiate a key and link it into the target keyring atomically 389 * - called with the target keyring's semaphore writelocked 390 */ 391 static int __key_instantiate_and_link(struct key *key, 392 const void *data, 393 size_t datalen, 394 struct key *keyring, 395 struct key *instkey) 396 { 397 int ret, awaken; 398 399 key_check(key); 400 key_check(keyring); 401 402 awaken = 0; 403 ret = -EBUSY; 404 405 down_write(&key_construction_sem); 406 407 /* can't instantiate twice */ 408 if (!test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) { 409 /* instantiate the key */ 410 ret = key->type->instantiate(key, data, datalen); 411 412 if (ret == 0) { 413 /* mark the key as being instantiated */ 414 atomic_inc(&key->user->nikeys); 415 set_bit(KEY_FLAG_INSTANTIATED, &key->flags); 416 417 if (test_and_clear_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags)) 418 awaken = 1; 419 420 /* and link it into the destination keyring */ 421 if (keyring) 422 ret = __key_link(keyring, key); 423 424 /* disable the authorisation key */ 425 if (instkey) 426 key_revoke(instkey); 427 } 428 } 429 430 up_write(&key_construction_sem); 431 432 /* wake up anyone waiting for a key to be constructed */ 433 if (awaken) 434 wake_up_all(&request_key_conswq); 435 436 return ret; 437 438 } /* end __key_instantiate_and_link() */ 439 440 /*****************************************************************************/ 441 /* 442 * instantiate a key and link it into the target keyring atomically 443 */ 444 int key_instantiate_and_link(struct key *key, 445 const void *data, 446 size_t datalen, 447 struct key *keyring, 448 struct key *instkey) 449 { 450 int ret; 451 452 if (keyring) 453 down_write(&keyring->sem); 454 455 ret = __key_instantiate_and_link(key, data, datalen, keyring, instkey); 456 457 if (keyring) 458 up_write(&keyring->sem); 459 460 return ret; 461 462 } /* end key_instantiate_and_link() */ 463 464 EXPORT_SYMBOL(key_instantiate_and_link); 465 466 /*****************************************************************************/ 467 /* 468 * negatively instantiate a key and link it into the target keyring atomically 469 */ 470 int key_negate_and_link(struct key *key, 471 unsigned timeout, 472 struct key *keyring, 473 struct key *instkey) 474 { 475 struct timespec now; 476 int ret, awaken; 477 478 key_check(key); 479 key_check(keyring); 480 481 awaken = 0; 482 ret = -EBUSY; 483 484 if (keyring) 485 down_write(&keyring->sem); 486 487 down_write(&key_construction_sem); 488 489 /* can't instantiate twice */ 490 if (!test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) { 491 /* mark the key as being negatively instantiated */ 492 atomic_inc(&key->user->nikeys); 493 set_bit(KEY_FLAG_NEGATIVE, &key->flags); 494 set_bit(KEY_FLAG_INSTANTIATED, &key->flags); 495 now = current_kernel_time(); 496 key->expiry = now.tv_sec + timeout; 497 498 if (test_and_clear_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags)) 499 awaken = 1; 500 501 ret = 0; 502 503 /* and link it into the destination keyring */ 504 if (keyring) 505 ret = __key_link(keyring, key); 506 507 /* disable the authorisation key */ 508 if (instkey) 509 key_revoke(instkey); 510 } 511 512 up_write(&key_construction_sem); 513 514 if (keyring) 515 up_write(&keyring->sem); 516 517 /* wake up anyone waiting for a key to be constructed */ 518 if (awaken) 519 wake_up_all(&request_key_conswq); 520 521 return ret; 522 523 } /* end key_negate_and_link() */ 524 525 EXPORT_SYMBOL(key_negate_and_link); 526 527 /*****************************************************************************/ 528 /* 529 * do cleaning up in process context so that we don't have to disable 530 * interrupts all over the place 531 */ 532 static void key_cleanup(void *data) 533 { 534 struct rb_node *_n; 535 struct key *key; 536 537 go_again: 538 /* look for a dead key in the tree */ 539 spin_lock(&key_serial_lock); 540 541 for (_n = rb_first(&key_serial_tree); _n; _n = rb_next(_n)) { 542 key = rb_entry(_n, struct key, serial_node); 543 544 if (atomic_read(&key->usage) == 0) 545 goto found_dead_key; 546 } 547 548 spin_unlock(&key_serial_lock); 549 return; 550 551 found_dead_key: 552 /* we found a dead key - once we've removed it from the tree, we can 553 * drop the lock */ 554 rb_erase(&key->serial_node, &key_serial_tree); 555 spin_unlock(&key_serial_lock); 556 557 key_check(key); 558 559 /* deal with the user's key tracking and quota */ 560 if (test_bit(KEY_FLAG_IN_QUOTA, &key->flags)) { 561 spin_lock(&key->user->lock); 562 key->user->qnkeys--; 563 key->user->qnbytes -= key->quotalen; 564 spin_unlock(&key->user->lock); 565 } 566 567 atomic_dec(&key->user->nkeys); 568 if (test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) 569 atomic_dec(&key->user->nikeys); 570 571 key_user_put(key->user); 572 573 /* now throw away the key memory */ 574 if (key->type->destroy) 575 key->type->destroy(key); 576 577 kfree(key->description); 578 579 #ifdef KEY_DEBUGGING 580 key->magic = KEY_DEBUG_MAGIC_X; 581 #endif 582 kmem_cache_free(key_jar, key); 583 584 /* there may, of course, be more than one key to destroy */ 585 goto go_again; 586 587 } /* end key_cleanup() */ 588 589 /*****************************************************************************/ 590 /* 591 * dispose of a reference to a key 592 * - when all the references are gone, we schedule the cleanup task to come and 593 * pull it out of the tree in definite process context 594 */ 595 void key_put(struct key *key) 596 { 597 if (key) { 598 key_check(key); 599 600 if (atomic_dec_and_test(&key->usage)) 601 schedule_work(&key_cleanup_task); 602 } 603 604 } /* end key_put() */ 605 606 EXPORT_SYMBOL(key_put); 607 608 /*****************************************************************************/ 609 /* 610 * find a key by its serial number 611 */ 612 struct key *key_lookup(key_serial_t id) 613 { 614 struct rb_node *n; 615 struct key *key; 616 617 spin_lock(&key_serial_lock); 618 619 /* search the tree for the specified key */ 620 n = key_serial_tree.rb_node; 621 while (n) { 622 key = rb_entry(n, struct key, serial_node); 623 624 if (id < key->serial) 625 n = n->rb_left; 626 else if (id > key->serial) 627 n = n->rb_right; 628 else 629 goto found; 630 } 631 632 not_found: 633 key = ERR_PTR(-ENOKEY); 634 goto error; 635 636 found: 637 /* pretend it doesn't exist if it's dead */ 638 if (atomic_read(&key->usage) == 0 || 639 test_bit(KEY_FLAG_DEAD, &key->flags) || 640 key->type == &key_type_dead) 641 goto not_found; 642 643 /* this races with key_put(), but that doesn't matter since key_put() 644 * doesn't actually change the key 645 */ 646 atomic_inc(&key->usage); 647 648 error: 649 spin_unlock(&key_serial_lock); 650 return key; 651 652 } /* end key_lookup() */ 653 654 /*****************************************************************************/ 655 /* 656 * find and lock the specified key type against removal 657 * - we return with the sem readlocked 658 */ 659 struct key_type *key_type_lookup(const char *type) 660 { 661 struct key_type *ktype; 662 663 down_read(&key_types_sem); 664 665 /* look up the key type to see if it's one of the registered kernel 666 * types */ 667 list_for_each_entry(ktype, &key_types_list, link) { 668 if (strcmp(ktype->name, type) == 0) 669 goto found_kernel_type; 670 } 671 672 up_read(&key_types_sem); 673 ktype = ERR_PTR(-ENOKEY); 674 675 found_kernel_type: 676 return ktype; 677 678 } /* end key_type_lookup() */ 679 680 /*****************************************************************************/ 681 /* 682 * unlock a key type 683 */ 684 void key_type_put(struct key_type *ktype) 685 { 686 up_read(&key_types_sem); 687 688 } /* end key_type_put() */ 689 690 /*****************************************************************************/ 691 /* 692 * attempt to update an existing key 693 * - the key has an incremented refcount 694 * - we need to put the key if we get an error 695 */ 696 static inline key_ref_t __key_update(key_ref_t key_ref, 697 const void *payload, size_t plen) 698 { 699 struct key *key = key_ref_to_ptr(key_ref); 700 int ret; 701 702 /* need write permission on the key to update it */ 703 ret = -EACCES; 704 if (!key_permission(key_ref, KEY_WRITE)) 705 goto error; 706 707 ret = -EEXIST; 708 if (!key->type->update) 709 goto error; 710 711 down_write(&key->sem); 712 713 ret = key->type->update(key, payload, plen); 714 715 if (ret == 0) 716 /* updating a negative key instantiates it */ 717 clear_bit(KEY_FLAG_NEGATIVE, &key->flags); 718 719 up_write(&key->sem); 720 721 if (ret < 0) 722 goto error; 723 out: 724 return key_ref; 725 726 error: 727 key_put(key); 728 key_ref = ERR_PTR(ret); 729 goto out; 730 731 } /* end __key_update() */ 732 733 /*****************************************************************************/ 734 /* 735 * search the specified keyring for a key of the same description; if one is 736 * found, update it, otherwise add a new one 737 */ 738 key_ref_t key_create_or_update(key_ref_t keyring_ref, 739 const char *type, 740 const char *description, 741 const void *payload, 742 size_t plen, 743 int not_in_quota) 744 { 745 struct key_type *ktype; 746 struct key *keyring, *key = NULL; 747 key_perm_t perm; 748 key_ref_t key_ref; 749 int ret; 750 751 /* look up the key type to see if it's one of the registered kernel 752 * types */ 753 ktype = key_type_lookup(type); 754 if (IS_ERR(ktype)) { 755 key_ref = ERR_PTR(-ENODEV); 756 goto error; 757 } 758 759 key_ref = ERR_PTR(-EINVAL); 760 if (!ktype->match || !ktype->instantiate) 761 goto error_2; 762 763 keyring = key_ref_to_ptr(keyring_ref); 764 765 key_check(keyring); 766 767 down_write(&keyring->sem); 768 769 /* if we're going to allocate a new key, we're going to have 770 * to modify the keyring */ 771 key_ref = ERR_PTR(-EACCES); 772 if (!key_permission(keyring_ref, KEY_WRITE)) 773 goto error_3; 774 775 /* search for an existing key of the same type and description in the 776 * destination keyring 777 */ 778 key_ref = __keyring_search_one(keyring_ref, ktype, description, 0); 779 if (!IS_ERR(key_ref)) 780 goto found_matching_key; 781 782 /* decide on the permissions we want */ 783 perm = KEY_POS_VIEW | KEY_POS_SEARCH | KEY_POS_LINK; 784 perm |= KEY_USR_VIEW | KEY_USR_SEARCH | KEY_USR_LINK; 785 786 if (ktype->read) 787 perm |= KEY_POS_READ | KEY_USR_READ; 788 789 if (ktype == &key_type_keyring || ktype->update) 790 perm |= KEY_USR_WRITE; 791 792 /* allocate a new key */ 793 key = key_alloc(ktype, description, current->fsuid, current->fsgid, 794 perm, not_in_quota); 795 if (IS_ERR(key)) { 796 key_ref = ERR_PTR(PTR_ERR(key)); 797 goto error_3; 798 } 799 800 /* instantiate it and link it into the target keyring */ 801 ret = __key_instantiate_and_link(key, payload, plen, keyring, NULL); 802 if (ret < 0) { 803 key_put(key); 804 key_ref = ERR_PTR(ret); 805 goto error_3; 806 } 807 808 key_ref = make_key_ref(key, is_key_possessed(keyring_ref)); 809 810 error_3: 811 up_write(&keyring->sem); 812 error_2: 813 key_type_put(ktype); 814 error: 815 return key_ref; 816 817 found_matching_key: 818 /* we found a matching key, so we're going to try to update it 819 * - we can drop the locks first as we have the key pinned 820 */ 821 up_write(&keyring->sem); 822 key_type_put(ktype); 823 824 key_ref = __key_update(key_ref, payload, plen); 825 goto error; 826 827 } /* end key_create_or_update() */ 828 829 EXPORT_SYMBOL(key_create_or_update); 830 831 /*****************************************************************************/ 832 /* 833 * update a key 834 */ 835 int key_update(key_ref_t key_ref, const void *payload, size_t plen) 836 { 837 struct key *key = key_ref_to_ptr(key_ref); 838 int ret; 839 840 key_check(key); 841 842 /* the key must be writable */ 843 ret = -EACCES; 844 if (!key_permission(key_ref, KEY_WRITE)) 845 goto error; 846 847 /* attempt to update it if supported */ 848 ret = -EOPNOTSUPP; 849 if (key->type->update) { 850 down_write(&key->sem); 851 ret = key->type->update(key, payload, plen); 852 853 if (ret == 0) 854 /* updating a negative key instantiates it */ 855 clear_bit(KEY_FLAG_NEGATIVE, &key->flags); 856 857 up_write(&key->sem); 858 } 859 860 error: 861 return ret; 862 863 } /* end key_update() */ 864 865 EXPORT_SYMBOL(key_update); 866 867 /*****************************************************************************/ 868 /* 869 * duplicate a key, potentially with a revised description 870 * - must be supported by the keytype (keyrings for instance can be duplicated) 871 */ 872 struct key *key_duplicate(struct key *source, const char *desc) 873 { 874 struct key *key; 875 int ret; 876 877 key_check(source); 878 879 if (!desc) 880 desc = source->description; 881 882 down_read(&key_types_sem); 883 884 ret = -EINVAL; 885 if (!source->type->duplicate) 886 goto error; 887 888 /* allocate and instantiate a key */ 889 key = key_alloc(source->type, desc, current->fsuid, current->fsgid, 890 source->perm, 0); 891 if (IS_ERR(key)) 892 goto error_k; 893 894 down_read(&source->sem); 895 ret = key->type->duplicate(key, source); 896 up_read(&source->sem); 897 if (ret < 0) 898 goto error2; 899 900 atomic_inc(&key->user->nikeys); 901 set_bit(KEY_FLAG_INSTANTIATED, &key->flags); 902 903 error_k: 904 up_read(&key_types_sem); 905 out: 906 return key; 907 908 error2: 909 key_put(key); 910 error: 911 up_read(&key_types_sem); 912 key = ERR_PTR(ret); 913 goto out; 914 915 } /* end key_duplicate() */ 916 917 /*****************************************************************************/ 918 /* 919 * revoke a key 920 */ 921 void key_revoke(struct key *key) 922 { 923 key_check(key); 924 925 /* make sure no one's trying to change or use the key when we mark 926 * it */ 927 down_write(&key->sem); 928 set_bit(KEY_FLAG_REVOKED, &key->flags); 929 up_write(&key->sem); 930 931 } /* end key_revoke() */ 932 933 EXPORT_SYMBOL(key_revoke); 934 935 /*****************************************************************************/ 936 /* 937 * register a type of key 938 */ 939 int register_key_type(struct key_type *ktype) 940 { 941 struct key_type *p; 942 int ret; 943 944 ret = -EEXIST; 945 down_write(&key_types_sem); 946 947 /* disallow key types with the same name */ 948 list_for_each_entry(p, &key_types_list, link) { 949 if (strcmp(p->name, ktype->name) == 0) 950 goto out; 951 } 952 953 /* store the type */ 954 list_add(&ktype->link, &key_types_list); 955 ret = 0; 956 957 out: 958 up_write(&key_types_sem); 959 return ret; 960 961 } /* end register_key_type() */ 962 963 EXPORT_SYMBOL(register_key_type); 964 965 /*****************************************************************************/ 966 /* 967 * unregister a type of key 968 */ 969 void unregister_key_type(struct key_type *ktype) 970 { 971 struct rb_node *_n; 972 struct key *key; 973 974 down_write(&key_types_sem); 975 976 /* withdraw the key type */ 977 list_del_init(&ktype->link); 978 979 /* mark all the keys of this type dead */ 980 spin_lock(&key_serial_lock); 981 982 for (_n = rb_first(&key_serial_tree); _n; _n = rb_next(_n)) { 983 key = rb_entry(_n, struct key, serial_node); 984 985 if (key->type == ktype) 986 key->type = &key_type_dead; 987 } 988 989 spin_unlock(&key_serial_lock); 990 991 /* make sure everyone revalidates their keys */ 992 synchronize_rcu(); 993 994 /* we should now be able to destroy the payloads of all the keys of 995 * this type with impunity */ 996 spin_lock(&key_serial_lock); 997 998 for (_n = rb_first(&key_serial_tree); _n; _n = rb_next(_n)) { 999 key = rb_entry(_n, struct key, serial_node); 1000 1001 if (key->type == ktype) { 1002 if (ktype->destroy) 1003 ktype->destroy(key); 1004 memset(&key->payload, 0xbd, sizeof(key->payload)); 1005 } 1006 } 1007 1008 spin_unlock(&key_serial_lock); 1009 up_write(&key_types_sem); 1010 1011 } /* end unregister_key_type() */ 1012 1013 EXPORT_SYMBOL(unregister_key_type); 1014 1015 /*****************************************************************************/ 1016 /* 1017 * initialise the key management stuff 1018 */ 1019 void __init key_init(void) 1020 { 1021 /* allocate a slab in which we can store keys */ 1022 key_jar = kmem_cache_create("key_jar", sizeof(struct key), 1023 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL); 1024 1025 /* add the special key types */ 1026 list_add_tail(&key_type_keyring.link, &key_types_list); 1027 list_add_tail(&key_type_dead.link, &key_types_list); 1028 list_add_tail(&key_type_user.link, &key_types_list); 1029 1030 /* record the root user tracking */ 1031 rb_link_node(&root_key_user.node, 1032 NULL, 1033 &key_user_tree.rb_node); 1034 1035 rb_insert_color(&root_key_user.node, 1036 &key_user_tree); 1037 1038 /* record root's user standard keyrings */ 1039 key_check(&root_user_keyring); 1040 key_check(&root_session_keyring); 1041 1042 __key_insert_serial(&root_user_keyring); 1043 __key_insert_serial(&root_session_keyring); 1044 1045 keyring_publish_name(&root_user_keyring); 1046 keyring_publish_name(&root_session_keyring); 1047 1048 /* link the two root keyrings together */ 1049 key_link(&root_session_keyring, &root_user_keyring); 1050 1051 } /* end key_init() */ 1052