xref: /linux/security/keys/key.c (revision d8327c784b51b57dac2c26cfad87dce0d68dfd98)
1 /* key.c: basic authentication token and access key management
2  *
3  * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
4  * Written by David Howells (dhowells@redhat.com)
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License
8  * as published by the Free Software Foundation; either version
9  * 2 of the License, or (at your option) any later version.
10  */
11 
12 #include <linux/module.h>
13 #include <linux/init.h>
14 #include <linux/sched.h>
15 #include <linux/slab.h>
16 #include <linux/security.h>
17 #include <linux/workqueue.h>
18 #include <linux/err.h>
19 #include "internal.h"
20 
21 static kmem_cache_t	*key_jar;
22 static key_serial_t	key_serial_next = 3;
23 struct rb_root		key_serial_tree; /* tree of keys indexed by serial */
24 DEFINE_SPINLOCK(key_serial_lock);
25 
26 struct rb_root	key_user_tree; /* tree of quota records indexed by UID */
27 DEFINE_SPINLOCK(key_user_lock);
28 
29 static LIST_HEAD(key_types_list);
30 static DECLARE_RWSEM(key_types_sem);
31 
32 static void key_cleanup(void *data);
33 static DECLARE_WORK(key_cleanup_task, key_cleanup, NULL);
34 
35 /* we serialise key instantiation and link */
36 DECLARE_RWSEM(key_construction_sem);
37 
38 /* any key who's type gets unegistered will be re-typed to this */
39 static struct key_type key_type_dead = {
40 	.name		= "dead",
41 };
42 
43 #ifdef KEY_DEBUGGING
44 void __key_check(const struct key *key)
45 {
46 	printk("__key_check: key %p {%08x} should be {%08x}\n",
47 	       key, key->magic, KEY_DEBUG_MAGIC);
48 	BUG();
49 }
50 #endif
51 
52 /*****************************************************************************/
53 /*
54  * get the key quota record for a user, allocating a new record if one doesn't
55  * already exist
56  */
57 struct key_user *key_user_lookup(uid_t uid)
58 {
59 	struct key_user *candidate = NULL, *user;
60 	struct rb_node *parent = NULL;
61 	struct rb_node **p;
62 
63  try_again:
64 	p = &key_user_tree.rb_node;
65 	spin_lock(&key_user_lock);
66 
67 	/* search the tree for a user record with a matching UID */
68 	while (*p) {
69 		parent = *p;
70 		user = rb_entry(parent, struct key_user, node);
71 
72 		if (uid < user->uid)
73 			p = &(*p)->rb_left;
74 		else if (uid > user->uid)
75 			p = &(*p)->rb_right;
76 		else
77 			goto found;
78 	}
79 
80 	/* if we get here, we failed to find a match in the tree */
81 	if (!candidate) {
82 		/* allocate a candidate user record if we don't already have
83 		 * one */
84 		spin_unlock(&key_user_lock);
85 
86 		user = NULL;
87 		candidate = kmalloc(sizeof(struct key_user), GFP_KERNEL);
88 		if (unlikely(!candidate))
89 			goto out;
90 
91 		/* the allocation may have scheduled, so we need to repeat the
92 		 * search lest someone else added the record whilst we were
93 		 * asleep */
94 		goto try_again;
95 	}
96 
97 	/* if we get here, then the user record still hadn't appeared on the
98 	 * second pass - so we use the candidate record */
99 	atomic_set(&candidate->usage, 1);
100 	atomic_set(&candidate->nkeys, 0);
101 	atomic_set(&candidate->nikeys, 0);
102 	candidate->uid = uid;
103 	candidate->qnkeys = 0;
104 	candidate->qnbytes = 0;
105 	spin_lock_init(&candidate->lock);
106 	INIT_LIST_HEAD(&candidate->consq);
107 
108 	rb_link_node(&candidate->node, parent, p);
109 	rb_insert_color(&candidate->node, &key_user_tree);
110 	spin_unlock(&key_user_lock);
111 	user = candidate;
112 	goto out;
113 
114 	/* okay - we found a user record for this UID */
115  found:
116 	atomic_inc(&user->usage);
117 	spin_unlock(&key_user_lock);
118 	kfree(candidate);
119  out:
120 	return user;
121 
122 } /* end key_user_lookup() */
123 
124 /*****************************************************************************/
125 /*
126  * dispose of a user structure
127  */
128 void key_user_put(struct key_user *user)
129 {
130 	if (atomic_dec_and_lock(&user->usage, &key_user_lock)) {
131 		rb_erase(&user->node, &key_user_tree);
132 		spin_unlock(&key_user_lock);
133 
134 		kfree(user);
135 	}
136 
137 } /* end key_user_put() */
138 
139 /*****************************************************************************/
140 /*
141  * insert a key with a fixed serial number
142  */
143 static void __init __key_insert_serial(struct key *key)
144 {
145 	struct rb_node *parent, **p;
146 	struct key *xkey;
147 
148 	parent = NULL;
149 	p = &key_serial_tree.rb_node;
150 
151 	while (*p) {
152 		parent = *p;
153 		xkey = rb_entry(parent, struct key, serial_node);
154 
155 		if (key->serial < xkey->serial)
156 			p = &(*p)->rb_left;
157 		else if (key->serial > xkey->serial)
158 			p = &(*p)->rb_right;
159 		else
160 			BUG();
161 	}
162 
163 	/* we've found a suitable hole - arrange for this key to occupy it */
164 	rb_link_node(&key->serial_node, parent, p);
165 	rb_insert_color(&key->serial_node, &key_serial_tree);
166 
167 } /* end __key_insert_serial() */
168 
169 /*****************************************************************************/
170 /*
171  * assign a key the next unique serial number
172  * - we work through all the serial numbers between 2 and 2^31-1 in turn and
173  *   then wrap
174  */
175 static inline void key_alloc_serial(struct key *key)
176 {
177 	struct rb_node *parent, **p;
178 	struct key *xkey;
179 
180 	spin_lock(&key_serial_lock);
181 
182 	/* propose a likely serial number and look for a hole for it in the
183 	 * serial number tree */
184 	key->serial = key_serial_next;
185 	if (key->serial < 3)
186 		key->serial = 3;
187 	key_serial_next = key->serial + 1;
188 
189 	parent = NULL;
190 	p = &key_serial_tree.rb_node;
191 
192 	while (*p) {
193 		parent = *p;
194 		xkey = rb_entry(parent, struct key, serial_node);
195 
196 		if (key->serial < xkey->serial)
197 			p = &(*p)->rb_left;
198 		else if (key->serial > xkey->serial)
199 			p = &(*p)->rb_right;
200 		else
201 			goto serial_exists;
202 	}
203 	goto insert_here;
204 
205 	/* we found a key with the proposed serial number - walk the tree from
206 	 * that point looking for the next unused serial number */
207  serial_exists:
208 	for (;;) {
209 		key->serial = key_serial_next;
210 		if (key->serial < 2)
211 			key->serial = 2;
212 		key_serial_next = key->serial + 1;
213 
214 		if (!parent->rb_parent)
215 			p = &key_serial_tree.rb_node;
216 		else if (parent->rb_parent->rb_left == parent)
217 			p = &parent->rb_parent->rb_left;
218 		else
219 			p = &parent->rb_parent->rb_right;
220 
221 		parent = rb_next(parent);
222 		if (!parent)
223 			break;
224 
225 		xkey = rb_entry(parent, struct key, serial_node);
226 		if (key->serial < xkey->serial)
227 			goto insert_here;
228 	}
229 
230 	/* we've found a suitable hole - arrange for this key to occupy it */
231  insert_here:
232 	rb_link_node(&key->serial_node, parent, p);
233 	rb_insert_color(&key->serial_node, &key_serial_tree);
234 
235 	spin_unlock(&key_serial_lock);
236 
237 } /* end key_alloc_serial() */
238 
239 /*****************************************************************************/
240 /*
241  * allocate a key of the specified type
242  * - update the user's quota to reflect the existence of the key
243  * - called from a key-type operation with key_types_sem read-locked by
244  *   key_create_or_update()
245  *   - this prevents unregistration of the key type
246  * - upon return the key is as yet uninstantiated; the caller needs to either
247  *   instantiate the key or discard it before returning
248  */
249 struct key *key_alloc(struct key_type *type, const char *desc,
250 		      uid_t uid, gid_t gid, key_perm_t perm,
251 		      int not_in_quota)
252 {
253 	struct key_user *user = NULL;
254 	struct key *key;
255 	size_t desclen, quotalen;
256 	int ret;
257 
258 	key = ERR_PTR(-EINVAL);
259 	if (!desc || !*desc)
260 		goto error;
261 
262 	desclen = strlen(desc) + 1;
263 	quotalen = desclen + type->def_datalen;
264 
265 	/* get hold of the key tracking for this user */
266 	user = key_user_lookup(uid);
267 	if (!user)
268 		goto no_memory_1;
269 
270 	/* check that the user's quota permits allocation of another key and
271 	 * its description */
272 	if (!not_in_quota) {
273 		spin_lock(&user->lock);
274 		if (user->qnkeys + 1 >= KEYQUOTA_MAX_KEYS &&
275 		    user->qnbytes + quotalen >= KEYQUOTA_MAX_BYTES
276 		    )
277 			goto no_quota;
278 
279 		user->qnkeys++;
280 		user->qnbytes += quotalen;
281 		spin_unlock(&user->lock);
282 	}
283 
284 	/* allocate and initialise the key and its description */
285 	key = kmem_cache_alloc(key_jar, SLAB_KERNEL);
286 	if (!key)
287 		goto no_memory_2;
288 
289 	if (desc) {
290 		key->description = kmalloc(desclen, GFP_KERNEL);
291 		if (!key->description)
292 			goto no_memory_3;
293 
294 		memcpy(key->description, desc, desclen);
295 	}
296 
297 	atomic_set(&key->usage, 1);
298 	init_rwsem(&key->sem);
299 	key->type = type;
300 	key->user = user;
301 	key->quotalen = quotalen;
302 	key->datalen = type->def_datalen;
303 	key->uid = uid;
304 	key->gid = gid;
305 	key->perm = perm;
306 	key->flags = 0;
307 	key->expiry = 0;
308 	key->payload.data = NULL;
309 	key->security = NULL;
310 
311 	if (!not_in_quota)
312 		key->flags |= 1 << KEY_FLAG_IN_QUOTA;
313 
314 	memset(&key->type_data, 0, sizeof(key->type_data));
315 
316 #ifdef KEY_DEBUGGING
317 	key->magic = KEY_DEBUG_MAGIC;
318 #endif
319 
320 	/* let the security module know about the key */
321 	ret = security_key_alloc(key);
322 	if (ret < 0)
323 		goto security_error;
324 
325 	/* publish the key by giving it a serial number */
326 	atomic_inc(&user->nkeys);
327 	key_alloc_serial(key);
328 
329 error:
330 	return key;
331 
332 security_error:
333 	kfree(key->description);
334 	kmem_cache_free(key_jar, key);
335 	if (!not_in_quota) {
336 		spin_lock(&user->lock);
337 		user->qnkeys--;
338 		user->qnbytes -= quotalen;
339 		spin_unlock(&user->lock);
340 	}
341 	key_user_put(user);
342 	key = ERR_PTR(ret);
343 	goto error;
344 
345 no_memory_3:
346 	kmem_cache_free(key_jar, key);
347 no_memory_2:
348 	if (!not_in_quota) {
349 		spin_lock(&user->lock);
350 		user->qnkeys--;
351 		user->qnbytes -= quotalen;
352 		spin_unlock(&user->lock);
353 	}
354 	key_user_put(user);
355 no_memory_1:
356 	key = ERR_PTR(-ENOMEM);
357 	goto error;
358 
359 no_quota:
360 	spin_unlock(&user->lock);
361 	key_user_put(user);
362 	key = ERR_PTR(-EDQUOT);
363 	goto error;
364 
365 } /* end key_alloc() */
366 
367 EXPORT_SYMBOL(key_alloc);
368 
369 /*****************************************************************************/
370 /*
371  * reserve an amount of quota for the key's payload
372  */
373 int key_payload_reserve(struct key *key, size_t datalen)
374 {
375 	int delta = (int) datalen - key->datalen;
376 	int ret = 0;
377 
378 	key_check(key);
379 
380 	/* contemplate the quota adjustment */
381 	if (delta != 0 && test_bit(KEY_FLAG_IN_QUOTA, &key->flags)) {
382 		spin_lock(&key->user->lock);
383 
384 		if (delta > 0 &&
385 		    key->user->qnbytes + delta > KEYQUOTA_MAX_BYTES
386 		    ) {
387 			ret = -EDQUOT;
388 		}
389 		else {
390 			key->user->qnbytes += delta;
391 			key->quotalen += delta;
392 		}
393 		spin_unlock(&key->user->lock);
394 	}
395 
396 	/* change the recorded data length if that didn't generate an error */
397 	if (ret == 0)
398 		key->datalen = datalen;
399 
400 	return ret;
401 
402 } /* end key_payload_reserve() */
403 
404 EXPORT_SYMBOL(key_payload_reserve);
405 
406 /*****************************************************************************/
407 /*
408  * instantiate a key and link it into the target keyring atomically
409  * - called with the target keyring's semaphore writelocked
410  */
411 static int __key_instantiate_and_link(struct key *key,
412 				      const void *data,
413 				      size_t datalen,
414 				      struct key *keyring,
415 				      struct key *instkey)
416 {
417 	int ret, awaken;
418 
419 	key_check(key);
420 	key_check(keyring);
421 
422 	awaken = 0;
423 	ret = -EBUSY;
424 
425 	down_write(&key_construction_sem);
426 
427 	/* can't instantiate twice */
428 	if (!test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) {
429 		/* instantiate the key */
430 		ret = key->type->instantiate(key, data, datalen);
431 
432 		if (ret == 0) {
433 			/* mark the key as being instantiated */
434 			atomic_inc(&key->user->nikeys);
435 			set_bit(KEY_FLAG_INSTANTIATED, &key->flags);
436 
437 			if (test_and_clear_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags))
438 				awaken = 1;
439 
440 			/* and link it into the destination keyring */
441 			if (keyring)
442 				ret = __key_link(keyring, key);
443 
444 			/* disable the authorisation key */
445 			if (instkey)
446 				key_revoke(instkey);
447 		}
448 	}
449 
450 	up_write(&key_construction_sem);
451 
452 	/* wake up anyone waiting for a key to be constructed */
453 	if (awaken)
454 		wake_up_all(&request_key_conswq);
455 
456 	return ret;
457 
458 } /* end __key_instantiate_and_link() */
459 
460 /*****************************************************************************/
461 /*
462  * instantiate a key and link it into the target keyring atomically
463  */
464 int key_instantiate_and_link(struct key *key,
465 			     const void *data,
466 			     size_t datalen,
467 			     struct key *keyring,
468 			     struct key *instkey)
469 {
470 	int ret;
471 
472 	if (keyring)
473 		down_write(&keyring->sem);
474 
475 	ret = __key_instantiate_and_link(key, data, datalen, keyring, instkey);
476 
477 	if (keyring)
478 		up_write(&keyring->sem);
479 
480 	return ret;
481 
482 } /* end key_instantiate_and_link() */
483 
484 EXPORT_SYMBOL(key_instantiate_and_link);
485 
486 /*****************************************************************************/
487 /*
488  * negatively instantiate a key and link it into the target keyring atomically
489  */
490 int key_negate_and_link(struct key *key,
491 			unsigned timeout,
492 			struct key *keyring,
493 			struct key *instkey)
494 {
495 	struct timespec now;
496 	int ret, awaken;
497 
498 	key_check(key);
499 	key_check(keyring);
500 
501 	awaken = 0;
502 	ret = -EBUSY;
503 
504 	if (keyring)
505 		down_write(&keyring->sem);
506 
507 	down_write(&key_construction_sem);
508 
509 	/* can't instantiate twice */
510 	if (!test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) {
511 		/* mark the key as being negatively instantiated */
512 		atomic_inc(&key->user->nikeys);
513 		set_bit(KEY_FLAG_NEGATIVE, &key->flags);
514 		set_bit(KEY_FLAG_INSTANTIATED, &key->flags);
515 		now = current_kernel_time();
516 		key->expiry = now.tv_sec + timeout;
517 
518 		if (test_and_clear_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags))
519 			awaken = 1;
520 
521 		ret = 0;
522 
523 		/* and link it into the destination keyring */
524 		if (keyring)
525 			ret = __key_link(keyring, key);
526 
527 		/* disable the authorisation key */
528 		if (instkey)
529 			key_revoke(instkey);
530 	}
531 
532 	up_write(&key_construction_sem);
533 
534 	if (keyring)
535 		up_write(&keyring->sem);
536 
537 	/* wake up anyone waiting for a key to be constructed */
538 	if (awaken)
539 		wake_up_all(&request_key_conswq);
540 
541 	return ret;
542 
543 } /* end key_negate_and_link() */
544 
545 EXPORT_SYMBOL(key_negate_and_link);
546 
547 /*****************************************************************************/
548 /*
549  * do cleaning up in process context so that we don't have to disable
550  * interrupts all over the place
551  */
552 static void key_cleanup(void *data)
553 {
554 	struct rb_node *_n;
555 	struct key *key;
556 
557  go_again:
558 	/* look for a dead key in the tree */
559 	spin_lock(&key_serial_lock);
560 
561 	for (_n = rb_first(&key_serial_tree); _n; _n = rb_next(_n)) {
562 		key = rb_entry(_n, struct key, serial_node);
563 
564 		if (atomic_read(&key->usage) == 0)
565 			goto found_dead_key;
566 	}
567 
568 	spin_unlock(&key_serial_lock);
569 	return;
570 
571  found_dead_key:
572 	/* we found a dead key - once we've removed it from the tree, we can
573 	 * drop the lock */
574 	rb_erase(&key->serial_node, &key_serial_tree);
575 	spin_unlock(&key_serial_lock);
576 
577 	key_check(key);
578 
579 	security_key_free(key);
580 
581 	/* deal with the user's key tracking and quota */
582 	if (test_bit(KEY_FLAG_IN_QUOTA, &key->flags)) {
583 		spin_lock(&key->user->lock);
584 		key->user->qnkeys--;
585 		key->user->qnbytes -= key->quotalen;
586 		spin_unlock(&key->user->lock);
587 	}
588 
589 	atomic_dec(&key->user->nkeys);
590 	if (test_bit(KEY_FLAG_INSTANTIATED, &key->flags))
591 		atomic_dec(&key->user->nikeys);
592 
593 	key_user_put(key->user);
594 
595 	/* now throw away the key memory */
596 	if (key->type->destroy)
597 		key->type->destroy(key);
598 
599 	kfree(key->description);
600 
601 #ifdef KEY_DEBUGGING
602 	key->magic = KEY_DEBUG_MAGIC_X;
603 #endif
604 	kmem_cache_free(key_jar, key);
605 
606 	/* there may, of course, be more than one key to destroy */
607 	goto go_again;
608 
609 } /* end key_cleanup() */
610 
611 /*****************************************************************************/
612 /*
613  * dispose of a reference to a key
614  * - when all the references are gone, we schedule the cleanup task to come and
615  *   pull it out of the tree in definite process context
616  */
617 void key_put(struct key *key)
618 {
619 	if (key) {
620 		key_check(key);
621 
622 		if (atomic_dec_and_test(&key->usage))
623 			schedule_work(&key_cleanup_task);
624 	}
625 
626 } /* end key_put() */
627 
628 EXPORT_SYMBOL(key_put);
629 
630 /*****************************************************************************/
631 /*
632  * find a key by its serial number
633  */
634 struct key *key_lookup(key_serial_t id)
635 {
636 	struct rb_node *n;
637 	struct key *key;
638 
639 	spin_lock(&key_serial_lock);
640 
641 	/* search the tree for the specified key */
642 	n = key_serial_tree.rb_node;
643 	while (n) {
644 		key = rb_entry(n, struct key, serial_node);
645 
646 		if (id < key->serial)
647 			n = n->rb_left;
648 		else if (id > key->serial)
649 			n = n->rb_right;
650 		else
651 			goto found;
652 	}
653 
654  not_found:
655 	key = ERR_PTR(-ENOKEY);
656 	goto error;
657 
658  found:
659 	/* pretend it doesn't exist if it's dead */
660 	if (atomic_read(&key->usage) == 0 ||
661 	    test_bit(KEY_FLAG_DEAD, &key->flags) ||
662 	    key->type == &key_type_dead)
663 		goto not_found;
664 
665 	/* this races with key_put(), but that doesn't matter since key_put()
666 	 * doesn't actually change the key
667 	 */
668 	atomic_inc(&key->usage);
669 
670  error:
671 	spin_unlock(&key_serial_lock);
672 	return key;
673 
674 } /* end key_lookup() */
675 
676 /*****************************************************************************/
677 /*
678  * find and lock the specified key type against removal
679  * - we return with the sem readlocked
680  */
681 struct key_type *key_type_lookup(const char *type)
682 {
683 	struct key_type *ktype;
684 
685 	down_read(&key_types_sem);
686 
687 	/* look up the key type to see if it's one of the registered kernel
688 	 * types */
689 	list_for_each_entry(ktype, &key_types_list, link) {
690 		if (strcmp(ktype->name, type) == 0)
691 			goto found_kernel_type;
692 	}
693 
694 	up_read(&key_types_sem);
695 	ktype = ERR_PTR(-ENOKEY);
696 
697  found_kernel_type:
698 	return ktype;
699 
700 } /* end key_type_lookup() */
701 
702 /*****************************************************************************/
703 /*
704  * unlock a key type
705  */
706 void key_type_put(struct key_type *ktype)
707 {
708 	up_read(&key_types_sem);
709 
710 } /* end key_type_put() */
711 
712 /*****************************************************************************/
713 /*
714  * attempt to update an existing key
715  * - the key has an incremented refcount
716  * - we need to put the key if we get an error
717  */
718 static inline key_ref_t __key_update(key_ref_t key_ref,
719 				     const void *payload, size_t plen)
720 {
721 	struct key *key = key_ref_to_ptr(key_ref);
722 	int ret;
723 
724 	/* need write permission on the key to update it */
725 	ret = key_permission(key_ref, KEY_WRITE);
726 	if (ret < 0)
727 		goto error;
728 
729 	ret = -EEXIST;
730 	if (!key->type->update)
731 		goto error;
732 
733 	down_write(&key->sem);
734 
735 	ret = key->type->update(key, payload, plen);
736 	if (ret == 0)
737 		/* updating a negative key instantiates it */
738 		clear_bit(KEY_FLAG_NEGATIVE, &key->flags);
739 
740 	up_write(&key->sem);
741 
742 	if (ret < 0)
743 		goto error;
744 out:
745 	return key_ref;
746 
747 error:
748 	key_put(key);
749 	key_ref = ERR_PTR(ret);
750 	goto out;
751 
752 } /* end __key_update() */
753 
754 /*****************************************************************************/
755 /*
756  * search the specified keyring for a key of the same description; if one is
757  * found, update it, otherwise add a new one
758  */
759 key_ref_t key_create_or_update(key_ref_t keyring_ref,
760 			       const char *type,
761 			       const char *description,
762 			       const void *payload,
763 			       size_t plen,
764 			       int not_in_quota)
765 {
766 	struct key_type *ktype;
767 	struct key *keyring, *key = NULL;
768 	key_perm_t perm;
769 	key_ref_t key_ref;
770 	int ret;
771 
772 	/* look up the key type to see if it's one of the registered kernel
773 	 * types */
774 	ktype = key_type_lookup(type);
775 	if (IS_ERR(ktype)) {
776 		key_ref = ERR_PTR(-ENODEV);
777 		goto error;
778 	}
779 
780 	key_ref = ERR_PTR(-EINVAL);
781 	if (!ktype->match || !ktype->instantiate)
782 		goto error_2;
783 
784 	keyring = key_ref_to_ptr(keyring_ref);
785 
786 	key_check(keyring);
787 
788 	down_write(&keyring->sem);
789 
790 	/* if we're going to allocate a new key, we're going to have
791 	 * to modify the keyring */
792 	ret = key_permission(keyring_ref, KEY_WRITE);
793 	if (ret < 0) {
794 		key_ref = ERR_PTR(ret);
795 		goto error_3;
796 	}
797 
798 	/* search for an existing key of the same type and description in the
799 	 * destination keyring
800 	 */
801 	key_ref = __keyring_search_one(keyring_ref, ktype, description, 0);
802 	if (!IS_ERR(key_ref))
803 		goto found_matching_key;
804 
805 	/* decide on the permissions we want */
806 	perm = KEY_POS_VIEW | KEY_POS_SEARCH | KEY_POS_LINK | KEY_POS_SETATTR;
807 	perm |= KEY_USR_VIEW | KEY_USR_SEARCH | KEY_USR_LINK | KEY_USR_SETATTR;
808 
809 	if (ktype->read)
810 		perm |= KEY_POS_READ | KEY_USR_READ;
811 
812 	if (ktype == &key_type_keyring || ktype->update)
813 		perm |= KEY_USR_WRITE;
814 
815 	/* allocate a new key */
816 	key = key_alloc(ktype, description, current->fsuid, current->fsgid,
817 			perm, not_in_quota);
818 	if (IS_ERR(key)) {
819 		key_ref = ERR_PTR(PTR_ERR(key));
820 		goto error_3;
821 	}
822 
823 	/* instantiate it and link it into the target keyring */
824 	ret = __key_instantiate_and_link(key, payload, plen, keyring, NULL);
825 	if (ret < 0) {
826 		key_put(key);
827 		key_ref = ERR_PTR(ret);
828 		goto error_3;
829 	}
830 
831 	key_ref = make_key_ref(key, is_key_possessed(keyring_ref));
832 
833  error_3:
834 	up_write(&keyring->sem);
835  error_2:
836 	key_type_put(ktype);
837  error:
838 	return key_ref;
839 
840  found_matching_key:
841 	/* we found a matching key, so we're going to try to update it
842 	 * - we can drop the locks first as we have the key pinned
843 	 */
844 	up_write(&keyring->sem);
845 	key_type_put(ktype);
846 
847 	key_ref = __key_update(key_ref, payload, plen);
848 	goto error;
849 
850 } /* end key_create_or_update() */
851 
852 EXPORT_SYMBOL(key_create_or_update);
853 
854 /*****************************************************************************/
855 /*
856  * update a key
857  */
858 int key_update(key_ref_t key_ref, const void *payload, size_t plen)
859 {
860 	struct key *key = key_ref_to_ptr(key_ref);
861 	int ret;
862 
863 	key_check(key);
864 
865 	/* the key must be writable */
866 	ret = key_permission(key_ref, KEY_WRITE);
867 	if (ret < 0)
868 		goto error;
869 
870 	/* attempt to update it if supported */
871 	ret = -EOPNOTSUPP;
872 	if (key->type->update) {
873 		down_write(&key->sem);
874 
875 		ret = key->type->update(key, payload, plen);
876 		if (ret == 0)
877 			/* updating a negative key instantiates it */
878 			clear_bit(KEY_FLAG_NEGATIVE, &key->flags);
879 
880 		up_write(&key->sem);
881 	}
882 
883  error:
884 	return ret;
885 
886 } /* end key_update() */
887 
888 EXPORT_SYMBOL(key_update);
889 
890 /*****************************************************************************/
891 /*
892  * revoke a key
893  */
894 void key_revoke(struct key *key)
895 {
896 	key_check(key);
897 
898 	/* make sure no one's trying to change or use the key when we mark
899 	 * it */
900 	down_write(&key->sem);
901 	set_bit(KEY_FLAG_REVOKED, &key->flags);
902 	up_write(&key->sem);
903 
904 } /* end key_revoke() */
905 
906 EXPORT_SYMBOL(key_revoke);
907 
908 /*****************************************************************************/
909 /*
910  * register a type of key
911  */
912 int register_key_type(struct key_type *ktype)
913 {
914 	struct key_type *p;
915 	int ret;
916 
917 	ret = -EEXIST;
918 	down_write(&key_types_sem);
919 
920 	/* disallow key types with the same name */
921 	list_for_each_entry(p, &key_types_list, link) {
922 		if (strcmp(p->name, ktype->name) == 0)
923 			goto out;
924 	}
925 
926 	/* store the type */
927 	list_add(&ktype->link, &key_types_list);
928 	ret = 0;
929 
930  out:
931 	up_write(&key_types_sem);
932 	return ret;
933 
934 } /* end register_key_type() */
935 
936 EXPORT_SYMBOL(register_key_type);
937 
938 /*****************************************************************************/
939 /*
940  * unregister a type of key
941  */
942 void unregister_key_type(struct key_type *ktype)
943 {
944 	struct rb_node *_n;
945 	struct key *key;
946 
947 	down_write(&key_types_sem);
948 
949 	/* withdraw the key type */
950 	list_del_init(&ktype->link);
951 
952 	/* mark all the keys of this type dead */
953 	spin_lock(&key_serial_lock);
954 
955 	for (_n = rb_first(&key_serial_tree); _n; _n = rb_next(_n)) {
956 		key = rb_entry(_n, struct key, serial_node);
957 
958 		if (key->type == ktype)
959 			key->type = &key_type_dead;
960 	}
961 
962 	spin_unlock(&key_serial_lock);
963 
964 	/* make sure everyone revalidates their keys */
965 	synchronize_rcu();
966 
967 	/* we should now be able to destroy the payloads of all the keys of
968 	 * this type with impunity */
969 	spin_lock(&key_serial_lock);
970 
971 	for (_n = rb_first(&key_serial_tree); _n; _n = rb_next(_n)) {
972 		key = rb_entry(_n, struct key, serial_node);
973 
974 		if (key->type == ktype) {
975 			if (ktype->destroy)
976 				ktype->destroy(key);
977 			memset(&key->payload, 0xbd, sizeof(key->payload));
978 		}
979 	}
980 
981 	spin_unlock(&key_serial_lock);
982 	up_write(&key_types_sem);
983 
984 } /* end unregister_key_type() */
985 
986 EXPORT_SYMBOL(unregister_key_type);
987 
988 /*****************************************************************************/
989 /*
990  * initialise the key management stuff
991  */
992 void __init key_init(void)
993 {
994 	/* allocate a slab in which we can store keys */
995 	key_jar = kmem_cache_create("key_jar", sizeof(struct key),
996 			0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);
997 
998 	/* add the special key types */
999 	list_add_tail(&key_type_keyring.link, &key_types_list);
1000 	list_add_tail(&key_type_dead.link, &key_types_list);
1001 	list_add_tail(&key_type_user.link, &key_types_list);
1002 
1003 	/* record the root user tracking */
1004 	rb_link_node(&root_key_user.node,
1005 		     NULL,
1006 		     &key_user_tree.rb_node);
1007 
1008 	rb_insert_color(&root_key_user.node,
1009 			&key_user_tree);
1010 
1011 	/* record root's user standard keyrings */
1012 	key_check(&root_user_keyring);
1013 	key_check(&root_session_keyring);
1014 
1015 	__key_insert_serial(&root_user_keyring);
1016 	__key_insert_serial(&root_session_keyring);
1017 
1018 	keyring_publish_name(&root_user_keyring);
1019 	keyring_publish_name(&root_session_keyring);
1020 
1021 	/* link the two root keyrings together */
1022 	key_link(&root_session_keyring, &root_user_keyring);
1023 
1024 } /* end key_init() */
1025