xref: /linux/security/keys/key.c (revision a67ff6a54095e27093ea501fb143fefe51a536c2)
1 /* Basic authentication token and access key management
2  *
3  * Copyright (C) 2004-2008 Red Hat, Inc. All Rights Reserved.
4  * Written by David Howells (dhowells@redhat.com)
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License
8  * as published by the Free Software Foundation; either version
9  * 2 of the License, or (at your option) any later version.
10  */
11 
12 #include <linux/module.h>
13 #include <linux/init.h>
14 #include <linux/poison.h>
15 #include <linux/sched.h>
16 #include <linux/slab.h>
17 #include <linux/security.h>
18 #include <linux/workqueue.h>
19 #include <linux/random.h>
20 #include <linux/err.h>
21 #include <linux/user_namespace.h>
22 #include "internal.h"
23 
24 struct kmem_cache *key_jar;
25 struct rb_root		key_serial_tree; /* tree of keys indexed by serial */
26 DEFINE_SPINLOCK(key_serial_lock);
27 
28 struct rb_root	key_user_tree; /* tree of quota records indexed by UID */
29 DEFINE_SPINLOCK(key_user_lock);
30 
31 unsigned int key_quota_root_maxkeys = 200;	/* root's key count quota */
32 unsigned int key_quota_root_maxbytes = 20000;	/* root's key space quota */
33 unsigned int key_quota_maxkeys = 200;		/* general key count quota */
34 unsigned int key_quota_maxbytes = 20000;	/* general key space quota */
35 
36 static LIST_HEAD(key_types_list);
37 static DECLARE_RWSEM(key_types_sem);
38 
39 /* We serialise key instantiation and link */
40 DEFINE_MUTEX(key_construction_mutex);
41 
42 #ifdef KEY_DEBUGGING
43 void __key_check(const struct key *key)
44 {
45 	printk("__key_check: key %p {%08x} should be {%08x}\n",
46 	       key, key->magic, KEY_DEBUG_MAGIC);
47 	BUG();
48 }
49 #endif
50 
51 /*
52  * Get the key quota record for a user, allocating a new record if one doesn't
53  * already exist.
54  */
55 struct key_user *key_user_lookup(uid_t uid, struct user_namespace *user_ns)
56 {
57 	struct key_user *candidate = NULL, *user;
58 	struct rb_node *parent = NULL;
59 	struct rb_node **p;
60 
61 try_again:
62 	p = &key_user_tree.rb_node;
63 	spin_lock(&key_user_lock);
64 
65 	/* search the tree for a user record with a matching UID */
66 	while (*p) {
67 		parent = *p;
68 		user = rb_entry(parent, struct key_user, node);
69 
70 		if (uid < user->uid)
71 			p = &(*p)->rb_left;
72 		else if (uid > user->uid)
73 			p = &(*p)->rb_right;
74 		else if (user_ns < user->user_ns)
75 			p = &(*p)->rb_left;
76 		else if (user_ns > user->user_ns)
77 			p = &(*p)->rb_right;
78 		else
79 			goto found;
80 	}
81 
82 	/* if we get here, we failed to find a match in the tree */
83 	if (!candidate) {
84 		/* allocate a candidate user record if we don't already have
85 		 * one */
86 		spin_unlock(&key_user_lock);
87 
88 		user = NULL;
89 		candidate = kmalloc(sizeof(struct key_user), GFP_KERNEL);
90 		if (unlikely(!candidate))
91 			goto out;
92 
93 		/* the allocation may have scheduled, so we need to repeat the
94 		 * search lest someone else added the record whilst we were
95 		 * asleep */
96 		goto try_again;
97 	}
98 
99 	/* if we get here, then the user record still hadn't appeared on the
100 	 * second pass - so we use the candidate record */
101 	atomic_set(&candidate->usage, 1);
102 	atomic_set(&candidate->nkeys, 0);
103 	atomic_set(&candidate->nikeys, 0);
104 	candidate->uid = uid;
105 	candidate->user_ns = get_user_ns(user_ns);
106 	candidate->qnkeys = 0;
107 	candidate->qnbytes = 0;
108 	spin_lock_init(&candidate->lock);
109 	mutex_init(&candidate->cons_lock);
110 
111 	rb_link_node(&candidate->node, parent, p);
112 	rb_insert_color(&candidate->node, &key_user_tree);
113 	spin_unlock(&key_user_lock);
114 	user = candidate;
115 	goto out;
116 
117 	/* okay - we found a user record for this UID */
118 found:
119 	atomic_inc(&user->usage);
120 	spin_unlock(&key_user_lock);
121 	kfree(candidate);
122 out:
123 	return user;
124 }
125 
126 /*
127  * Dispose of a user structure
128  */
129 void key_user_put(struct key_user *user)
130 {
131 	if (atomic_dec_and_lock(&user->usage, &key_user_lock)) {
132 		rb_erase(&user->node, &key_user_tree);
133 		spin_unlock(&key_user_lock);
134 		put_user_ns(user->user_ns);
135 
136 		kfree(user);
137 	}
138 }
139 
140 /*
141  * Allocate a serial number for a key.  These are assigned randomly to avoid
142  * security issues through covert channel problems.
143  */
144 static inline void key_alloc_serial(struct key *key)
145 {
146 	struct rb_node *parent, **p;
147 	struct key *xkey;
148 
149 	/* propose a random serial number and look for a hole for it in the
150 	 * serial number tree */
151 	do {
152 		get_random_bytes(&key->serial, sizeof(key->serial));
153 
154 		key->serial >>= 1; /* negative numbers are not permitted */
155 	} while (key->serial < 3);
156 
157 	spin_lock(&key_serial_lock);
158 
159 attempt_insertion:
160 	parent = NULL;
161 	p = &key_serial_tree.rb_node;
162 
163 	while (*p) {
164 		parent = *p;
165 		xkey = rb_entry(parent, struct key, serial_node);
166 
167 		if (key->serial < xkey->serial)
168 			p = &(*p)->rb_left;
169 		else if (key->serial > xkey->serial)
170 			p = &(*p)->rb_right;
171 		else
172 			goto serial_exists;
173 	}
174 
175 	/* we've found a suitable hole - arrange for this key to occupy it */
176 	rb_link_node(&key->serial_node, parent, p);
177 	rb_insert_color(&key->serial_node, &key_serial_tree);
178 
179 	spin_unlock(&key_serial_lock);
180 	return;
181 
182 	/* we found a key with the proposed serial number - walk the tree from
183 	 * that point looking for the next unused serial number */
184 serial_exists:
185 	for (;;) {
186 		key->serial++;
187 		if (key->serial < 3) {
188 			key->serial = 3;
189 			goto attempt_insertion;
190 		}
191 
192 		parent = rb_next(parent);
193 		if (!parent)
194 			goto attempt_insertion;
195 
196 		xkey = rb_entry(parent, struct key, serial_node);
197 		if (key->serial < xkey->serial)
198 			goto attempt_insertion;
199 	}
200 }
201 
202 /**
203  * key_alloc - Allocate a key of the specified type.
204  * @type: The type of key to allocate.
205  * @desc: The key description to allow the key to be searched out.
206  * @uid: The owner of the new key.
207  * @gid: The group ID for the new key's group permissions.
208  * @cred: The credentials specifying UID namespace.
209  * @perm: The permissions mask of the new key.
210  * @flags: Flags specifying quota properties.
211  *
212  * Allocate a key of the specified type with the attributes given.  The key is
213  * returned in an uninstantiated state and the caller needs to instantiate the
214  * key before returning.
215  *
216  * The user's key count quota is updated to reflect the creation of the key and
217  * the user's key data quota has the default for the key type reserved.  The
218  * instantiation function should amend this as necessary.  If insufficient
219  * quota is available, -EDQUOT will be returned.
220  *
221  * The LSM security modules can prevent a key being created, in which case
222  * -EACCES will be returned.
223  *
224  * Returns a pointer to the new key if successful and an error code otherwise.
225  *
226  * Note that the caller needs to ensure the key type isn't uninstantiated.
227  * Internally this can be done by locking key_types_sem.  Externally, this can
228  * be done by either never unregistering the key type, or making sure
229  * key_alloc() calls don't race with module unloading.
230  */
231 struct key *key_alloc(struct key_type *type, const char *desc,
232 		      uid_t uid, gid_t gid, const struct cred *cred,
233 		      key_perm_t perm, unsigned long flags)
234 {
235 	struct key_user *user = NULL;
236 	struct key *key;
237 	size_t desclen, quotalen;
238 	int ret;
239 
240 	key = ERR_PTR(-EINVAL);
241 	if (!desc || !*desc)
242 		goto error;
243 
244 	if (type->vet_description) {
245 		ret = type->vet_description(desc);
246 		if (ret < 0) {
247 			key = ERR_PTR(ret);
248 			goto error;
249 		}
250 	}
251 
252 	desclen = strlen(desc) + 1;
253 	quotalen = desclen + type->def_datalen;
254 
255 	/* get hold of the key tracking for this user */
256 	user = key_user_lookup(uid, cred->user->user_ns);
257 	if (!user)
258 		goto no_memory_1;
259 
260 	/* check that the user's quota permits allocation of another key and
261 	 * its description */
262 	if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) {
263 		unsigned maxkeys = (uid == 0) ?
264 			key_quota_root_maxkeys : key_quota_maxkeys;
265 		unsigned maxbytes = (uid == 0) ?
266 			key_quota_root_maxbytes : key_quota_maxbytes;
267 
268 		spin_lock(&user->lock);
269 		if (!(flags & KEY_ALLOC_QUOTA_OVERRUN)) {
270 			if (user->qnkeys + 1 >= maxkeys ||
271 			    user->qnbytes + quotalen >= maxbytes ||
272 			    user->qnbytes + quotalen < user->qnbytes)
273 				goto no_quota;
274 		}
275 
276 		user->qnkeys++;
277 		user->qnbytes += quotalen;
278 		spin_unlock(&user->lock);
279 	}
280 
281 	/* allocate and initialise the key and its description */
282 	key = kmem_cache_alloc(key_jar, GFP_KERNEL);
283 	if (!key)
284 		goto no_memory_2;
285 
286 	if (desc) {
287 		key->description = kmemdup(desc, desclen, GFP_KERNEL);
288 		if (!key->description)
289 			goto no_memory_3;
290 	}
291 
292 	atomic_set(&key->usage, 1);
293 	init_rwsem(&key->sem);
294 	key->type = type;
295 	key->user = user;
296 	key->quotalen = quotalen;
297 	key->datalen = type->def_datalen;
298 	key->uid = uid;
299 	key->gid = gid;
300 	key->perm = perm;
301 	key->flags = 0;
302 	key->expiry = 0;
303 	key->payload.data = NULL;
304 	key->security = NULL;
305 
306 	if (!(flags & KEY_ALLOC_NOT_IN_QUOTA))
307 		key->flags |= 1 << KEY_FLAG_IN_QUOTA;
308 
309 	memset(&key->type_data, 0, sizeof(key->type_data));
310 
311 #ifdef KEY_DEBUGGING
312 	key->magic = KEY_DEBUG_MAGIC;
313 #endif
314 
315 	/* let the security module know about the key */
316 	ret = security_key_alloc(key, cred, flags);
317 	if (ret < 0)
318 		goto security_error;
319 
320 	/* publish the key by giving it a serial number */
321 	atomic_inc(&user->nkeys);
322 	key_alloc_serial(key);
323 
324 error:
325 	return key;
326 
327 security_error:
328 	kfree(key->description);
329 	kmem_cache_free(key_jar, key);
330 	if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) {
331 		spin_lock(&user->lock);
332 		user->qnkeys--;
333 		user->qnbytes -= quotalen;
334 		spin_unlock(&user->lock);
335 	}
336 	key_user_put(user);
337 	key = ERR_PTR(ret);
338 	goto error;
339 
340 no_memory_3:
341 	kmem_cache_free(key_jar, key);
342 no_memory_2:
343 	if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) {
344 		spin_lock(&user->lock);
345 		user->qnkeys--;
346 		user->qnbytes -= quotalen;
347 		spin_unlock(&user->lock);
348 	}
349 	key_user_put(user);
350 no_memory_1:
351 	key = ERR_PTR(-ENOMEM);
352 	goto error;
353 
354 no_quota:
355 	spin_unlock(&user->lock);
356 	key_user_put(user);
357 	key = ERR_PTR(-EDQUOT);
358 	goto error;
359 }
360 EXPORT_SYMBOL(key_alloc);
361 
362 /**
363  * key_payload_reserve - Adjust data quota reservation for the key's payload
364  * @key: The key to make the reservation for.
365  * @datalen: The amount of data payload the caller now wants.
366  *
367  * Adjust the amount of the owning user's key data quota that a key reserves.
368  * If the amount is increased, then -EDQUOT may be returned if there isn't
369  * enough free quota available.
370  *
371  * If successful, 0 is returned.
372  */
373 int key_payload_reserve(struct key *key, size_t datalen)
374 {
375 	int delta = (int)datalen - key->datalen;
376 	int ret = 0;
377 
378 	key_check(key);
379 
380 	/* contemplate the quota adjustment */
381 	if (delta != 0 && test_bit(KEY_FLAG_IN_QUOTA, &key->flags)) {
382 		unsigned maxbytes = (key->user->uid == 0) ?
383 			key_quota_root_maxbytes : key_quota_maxbytes;
384 
385 		spin_lock(&key->user->lock);
386 
387 		if (delta > 0 &&
388 		    (key->user->qnbytes + delta >= maxbytes ||
389 		     key->user->qnbytes + delta < key->user->qnbytes)) {
390 			ret = -EDQUOT;
391 		}
392 		else {
393 			key->user->qnbytes += delta;
394 			key->quotalen += delta;
395 		}
396 		spin_unlock(&key->user->lock);
397 	}
398 
399 	/* change the recorded data length if that didn't generate an error */
400 	if (ret == 0)
401 		key->datalen = datalen;
402 
403 	return ret;
404 }
405 EXPORT_SYMBOL(key_payload_reserve);
406 
407 /*
408  * Instantiate a key and link it into the target keyring atomically.  Must be
409  * called with the target keyring's semaphore writelocked.  The target key's
410  * semaphore need not be locked as instantiation is serialised by
411  * key_construction_mutex.
412  */
413 static int __key_instantiate_and_link(struct key *key,
414 				      const void *data,
415 				      size_t datalen,
416 				      struct key *keyring,
417 				      struct key *authkey,
418 				      unsigned long *_prealloc)
419 {
420 	int ret, awaken;
421 
422 	key_check(key);
423 	key_check(keyring);
424 
425 	awaken = 0;
426 	ret = -EBUSY;
427 
428 	mutex_lock(&key_construction_mutex);
429 
430 	/* can't instantiate twice */
431 	if (!test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) {
432 		/* instantiate the key */
433 		ret = key->type->instantiate(key, data, datalen);
434 
435 		if (ret == 0) {
436 			/* mark the key as being instantiated */
437 			atomic_inc(&key->user->nikeys);
438 			set_bit(KEY_FLAG_INSTANTIATED, &key->flags);
439 
440 			if (test_and_clear_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags))
441 				awaken = 1;
442 
443 			/* and link it into the destination keyring */
444 			if (keyring)
445 				__key_link(keyring, key, _prealloc);
446 
447 			/* disable the authorisation key */
448 			if (authkey)
449 				key_revoke(authkey);
450 		}
451 	}
452 
453 	mutex_unlock(&key_construction_mutex);
454 
455 	/* wake up anyone waiting for a key to be constructed */
456 	if (awaken)
457 		wake_up_bit(&key->flags, KEY_FLAG_USER_CONSTRUCT);
458 
459 	return ret;
460 }
461 
462 /**
463  * key_instantiate_and_link - Instantiate a key and link it into the keyring.
464  * @key: The key to instantiate.
465  * @data: The data to use to instantiate the keyring.
466  * @datalen: The length of @data.
467  * @keyring: Keyring to create a link in on success (or NULL).
468  * @authkey: The authorisation token permitting instantiation.
469  *
470  * Instantiate a key that's in the uninstantiated state using the provided data
471  * and, if successful, link it in to the destination keyring if one is
472  * supplied.
473  *
474  * If successful, 0 is returned, the authorisation token is revoked and anyone
475  * waiting for the key is woken up.  If the key was already instantiated,
476  * -EBUSY will be returned.
477  */
478 int key_instantiate_and_link(struct key *key,
479 			     const void *data,
480 			     size_t datalen,
481 			     struct key *keyring,
482 			     struct key *authkey)
483 {
484 	unsigned long prealloc;
485 	int ret;
486 
487 	if (keyring) {
488 		ret = __key_link_begin(keyring, key->type, key->description,
489 				       &prealloc);
490 		if (ret < 0)
491 			return ret;
492 	}
493 
494 	ret = __key_instantiate_and_link(key, data, datalen, keyring, authkey,
495 					 &prealloc);
496 
497 	if (keyring)
498 		__key_link_end(keyring, key->type, prealloc);
499 
500 	return ret;
501 }
502 
503 EXPORT_SYMBOL(key_instantiate_and_link);
504 
505 /**
506  * key_reject_and_link - Negatively instantiate a key and link it into the keyring.
507  * @key: The key to instantiate.
508  * @timeout: The timeout on the negative key.
509  * @error: The error to return when the key is hit.
510  * @keyring: Keyring to create a link in on success (or NULL).
511  * @authkey: The authorisation token permitting instantiation.
512  *
513  * Negatively instantiate a key that's in the uninstantiated state and, if
514  * successful, set its timeout and stored error and link it in to the
515  * destination keyring if one is supplied.  The key and any links to the key
516  * will be automatically garbage collected after the timeout expires.
517  *
518  * Negative keys are used to rate limit repeated request_key() calls by causing
519  * them to return the stored error code (typically ENOKEY) until the negative
520  * key expires.
521  *
522  * If successful, 0 is returned, the authorisation token is revoked and anyone
523  * waiting for the key is woken up.  If the key was already instantiated,
524  * -EBUSY will be returned.
525  */
526 int key_reject_and_link(struct key *key,
527 			unsigned timeout,
528 			unsigned error,
529 			struct key *keyring,
530 			struct key *authkey)
531 {
532 	unsigned long prealloc;
533 	struct timespec now;
534 	int ret, awaken, link_ret = 0;
535 
536 	key_check(key);
537 	key_check(keyring);
538 
539 	awaken = 0;
540 	ret = -EBUSY;
541 
542 	if (keyring)
543 		link_ret = __key_link_begin(keyring, key->type,
544 					    key->description, &prealloc);
545 
546 	mutex_lock(&key_construction_mutex);
547 
548 	/* can't instantiate twice */
549 	if (!test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) {
550 		/* mark the key as being negatively instantiated */
551 		atomic_inc(&key->user->nikeys);
552 		set_bit(KEY_FLAG_NEGATIVE, &key->flags);
553 		set_bit(KEY_FLAG_INSTANTIATED, &key->flags);
554 		key->type_data.reject_error = -error;
555 		now = current_kernel_time();
556 		key->expiry = now.tv_sec + timeout;
557 		key_schedule_gc(key->expiry + key_gc_delay);
558 
559 		if (test_and_clear_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags))
560 			awaken = 1;
561 
562 		ret = 0;
563 
564 		/* and link it into the destination keyring */
565 		if (keyring && link_ret == 0)
566 			__key_link(keyring, key, &prealloc);
567 
568 		/* disable the authorisation key */
569 		if (authkey)
570 			key_revoke(authkey);
571 	}
572 
573 	mutex_unlock(&key_construction_mutex);
574 
575 	if (keyring)
576 		__key_link_end(keyring, key->type, prealloc);
577 
578 	/* wake up anyone waiting for a key to be constructed */
579 	if (awaken)
580 		wake_up_bit(&key->flags, KEY_FLAG_USER_CONSTRUCT);
581 
582 	return ret == 0 ? link_ret : ret;
583 }
584 EXPORT_SYMBOL(key_reject_and_link);
585 
586 /**
587  * key_put - Discard a reference to a key.
588  * @key: The key to discard a reference from.
589  *
590  * Discard a reference to a key, and when all the references are gone, we
591  * schedule the cleanup task to come and pull it out of the tree in process
592  * context at some later time.
593  */
594 void key_put(struct key *key)
595 {
596 	if (key) {
597 		key_check(key);
598 
599 		if (atomic_dec_and_test(&key->usage))
600 			queue_work(system_nrt_wq, &key_gc_work);
601 	}
602 }
603 EXPORT_SYMBOL(key_put);
604 
605 /*
606  * Find a key by its serial number.
607  */
608 struct key *key_lookup(key_serial_t id)
609 {
610 	struct rb_node *n;
611 	struct key *key;
612 
613 	spin_lock(&key_serial_lock);
614 
615 	/* search the tree for the specified key */
616 	n = key_serial_tree.rb_node;
617 	while (n) {
618 		key = rb_entry(n, struct key, serial_node);
619 
620 		if (id < key->serial)
621 			n = n->rb_left;
622 		else if (id > key->serial)
623 			n = n->rb_right;
624 		else
625 			goto found;
626 	}
627 
628 not_found:
629 	key = ERR_PTR(-ENOKEY);
630 	goto error;
631 
632 found:
633 	/* pretend it doesn't exist if it is awaiting deletion */
634 	if (atomic_read(&key->usage) == 0)
635 		goto not_found;
636 
637 	/* this races with key_put(), but that doesn't matter since key_put()
638 	 * doesn't actually change the key
639 	 */
640 	atomic_inc(&key->usage);
641 
642 error:
643 	spin_unlock(&key_serial_lock);
644 	return key;
645 }
646 
647 /*
648  * Find and lock the specified key type against removal.
649  *
650  * We return with the sem read-locked if successful.  If the type wasn't
651  * available -ENOKEY is returned instead.
652  */
653 struct key_type *key_type_lookup(const char *type)
654 {
655 	struct key_type *ktype;
656 
657 	down_read(&key_types_sem);
658 
659 	/* look up the key type to see if it's one of the registered kernel
660 	 * types */
661 	list_for_each_entry(ktype, &key_types_list, link) {
662 		if (strcmp(ktype->name, type) == 0)
663 			goto found_kernel_type;
664 	}
665 
666 	up_read(&key_types_sem);
667 	ktype = ERR_PTR(-ENOKEY);
668 
669 found_kernel_type:
670 	return ktype;
671 }
672 
673 /*
674  * Unlock a key type locked by key_type_lookup().
675  */
676 void key_type_put(struct key_type *ktype)
677 {
678 	up_read(&key_types_sem);
679 }
680 
681 /*
682  * Attempt to update an existing key.
683  *
684  * The key is given to us with an incremented refcount that we need to discard
685  * if we get an error.
686  */
687 static inline key_ref_t __key_update(key_ref_t key_ref,
688 				     const void *payload, size_t plen)
689 {
690 	struct key *key = key_ref_to_ptr(key_ref);
691 	int ret;
692 
693 	/* need write permission on the key to update it */
694 	ret = key_permission(key_ref, KEY_WRITE);
695 	if (ret < 0)
696 		goto error;
697 
698 	ret = -EEXIST;
699 	if (!key->type->update)
700 		goto error;
701 
702 	down_write(&key->sem);
703 
704 	ret = key->type->update(key, payload, plen);
705 	if (ret == 0)
706 		/* updating a negative key instantiates it */
707 		clear_bit(KEY_FLAG_NEGATIVE, &key->flags);
708 
709 	up_write(&key->sem);
710 
711 	if (ret < 0)
712 		goto error;
713 out:
714 	return key_ref;
715 
716 error:
717 	key_put(key);
718 	key_ref = ERR_PTR(ret);
719 	goto out;
720 }
721 
722 /**
723  * key_create_or_update - Update or create and instantiate a key.
724  * @keyring_ref: A pointer to the destination keyring with possession flag.
725  * @type: The type of key.
726  * @description: The searchable description for the key.
727  * @payload: The data to use to instantiate or update the key.
728  * @plen: The length of @payload.
729  * @perm: The permissions mask for a new key.
730  * @flags: The quota flags for a new key.
731  *
732  * Search the destination keyring for a key of the same description and if one
733  * is found, update it, otherwise create and instantiate a new one and create a
734  * link to it from that keyring.
735  *
736  * If perm is KEY_PERM_UNDEF then an appropriate key permissions mask will be
737  * concocted.
738  *
739  * Returns a pointer to the new key if successful, -ENODEV if the key type
740  * wasn't available, -ENOTDIR if the keyring wasn't a keyring, -EACCES if the
741  * caller isn't permitted to modify the keyring or the LSM did not permit
742  * creation of the key.
743  *
744  * On success, the possession flag from the keyring ref will be tacked on to
745  * the key ref before it is returned.
746  */
747 key_ref_t key_create_or_update(key_ref_t keyring_ref,
748 			       const char *type,
749 			       const char *description,
750 			       const void *payload,
751 			       size_t plen,
752 			       key_perm_t perm,
753 			       unsigned long flags)
754 {
755 	unsigned long prealloc;
756 	const struct cred *cred = current_cred();
757 	struct key_type *ktype;
758 	struct key *keyring, *key = NULL;
759 	key_ref_t key_ref;
760 	int ret;
761 
762 	/* look up the key type to see if it's one of the registered kernel
763 	 * types */
764 	ktype = key_type_lookup(type);
765 	if (IS_ERR(ktype)) {
766 		key_ref = ERR_PTR(-ENODEV);
767 		goto error;
768 	}
769 
770 	key_ref = ERR_PTR(-EINVAL);
771 	if (!ktype->match || !ktype->instantiate)
772 		goto error_2;
773 
774 	keyring = key_ref_to_ptr(keyring_ref);
775 
776 	key_check(keyring);
777 
778 	key_ref = ERR_PTR(-ENOTDIR);
779 	if (keyring->type != &key_type_keyring)
780 		goto error_2;
781 
782 	ret = __key_link_begin(keyring, ktype, description, &prealloc);
783 	if (ret < 0)
784 		goto error_2;
785 
786 	/* if we're going to allocate a new key, we're going to have
787 	 * to modify the keyring */
788 	ret = key_permission(keyring_ref, KEY_WRITE);
789 	if (ret < 0) {
790 		key_ref = ERR_PTR(ret);
791 		goto error_3;
792 	}
793 
794 	/* if it's possible to update this type of key, search for an existing
795 	 * key of the same type and description in the destination keyring and
796 	 * update that instead if possible
797 	 */
798 	if (ktype->update) {
799 		key_ref = __keyring_search_one(keyring_ref, ktype, description,
800 					       0);
801 		if (!IS_ERR(key_ref))
802 			goto found_matching_key;
803 	}
804 
805 	/* if the client doesn't provide, decide on the permissions we want */
806 	if (perm == KEY_PERM_UNDEF) {
807 		perm = KEY_POS_VIEW | KEY_POS_SEARCH | KEY_POS_LINK | KEY_POS_SETATTR;
808 		perm |= KEY_USR_VIEW | KEY_USR_SEARCH | KEY_USR_LINK | KEY_USR_SETATTR;
809 
810 		if (ktype->read)
811 			perm |= KEY_POS_READ | KEY_USR_READ;
812 
813 		if (ktype == &key_type_keyring || ktype->update)
814 			perm |= KEY_USR_WRITE;
815 	}
816 
817 	/* allocate a new key */
818 	key = key_alloc(ktype, description, cred->fsuid, cred->fsgid, cred,
819 			perm, flags);
820 	if (IS_ERR(key)) {
821 		key_ref = ERR_CAST(key);
822 		goto error_3;
823 	}
824 
825 	/* instantiate it and link it into the target keyring */
826 	ret = __key_instantiate_and_link(key, payload, plen, keyring, NULL,
827 					 &prealloc);
828 	if (ret < 0) {
829 		key_put(key);
830 		key_ref = ERR_PTR(ret);
831 		goto error_3;
832 	}
833 
834 	key_ref = make_key_ref(key, is_key_possessed(keyring_ref));
835 
836  error_3:
837 	__key_link_end(keyring, ktype, prealloc);
838  error_2:
839 	key_type_put(ktype);
840  error:
841 	return key_ref;
842 
843  found_matching_key:
844 	/* we found a matching key, so we're going to try to update it
845 	 * - we can drop the locks first as we have the key pinned
846 	 */
847 	__key_link_end(keyring, ktype, prealloc);
848 	key_type_put(ktype);
849 
850 	key_ref = __key_update(key_ref, payload, plen);
851 	goto error;
852 }
853 EXPORT_SYMBOL(key_create_or_update);
854 
855 /**
856  * key_update - Update a key's contents.
857  * @key_ref: The pointer (plus possession flag) to the key.
858  * @payload: The data to be used to update the key.
859  * @plen: The length of @payload.
860  *
861  * Attempt to update the contents of a key with the given payload data.  The
862  * caller must be granted Write permission on the key.  Negative keys can be
863  * instantiated by this method.
864  *
865  * Returns 0 on success, -EACCES if not permitted and -EOPNOTSUPP if the key
866  * type does not support updating.  The key type may return other errors.
867  */
868 int key_update(key_ref_t key_ref, const void *payload, size_t plen)
869 {
870 	struct key *key = key_ref_to_ptr(key_ref);
871 	int ret;
872 
873 	key_check(key);
874 
875 	/* the key must be writable */
876 	ret = key_permission(key_ref, KEY_WRITE);
877 	if (ret < 0)
878 		goto error;
879 
880 	/* attempt to update it if supported */
881 	ret = -EOPNOTSUPP;
882 	if (key->type->update) {
883 		down_write(&key->sem);
884 
885 		ret = key->type->update(key, payload, plen);
886 		if (ret == 0)
887 			/* updating a negative key instantiates it */
888 			clear_bit(KEY_FLAG_NEGATIVE, &key->flags);
889 
890 		up_write(&key->sem);
891 	}
892 
893  error:
894 	return ret;
895 }
896 EXPORT_SYMBOL(key_update);
897 
898 /**
899  * key_revoke - Revoke a key.
900  * @key: The key to be revoked.
901  *
902  * Mark a key as being revoked and ask the type to free up its resources.  The
903  * revocation timeout is set and the key and all its links will be
904  * automatically garbage collected after key_gc_delay amount of time if they
905  * are not manually dealt with first.
906  */
907 void key_revoke(struct key *key)
908 {
909 	struct timespec now;
910 	time_t time;
911 
912 	key_check(key);
913 
914 	/* make sure no one's trying to change or use the key when we mark it
915 	 * - we tell lockdep that we might nest because we might be revoking an
916 	 *   authorisation key whilst holding the sem on a key we've just
917 	 *   instantiated
918 	 */
919 	down_write_nested(&key->sem, 1);
920 	if (!test_and_set_bit(KEY_FLAG_REVOKED, &key->flags) &&
921 	    key->type->revoke)
922 		key->type->revoke(key);
923 
924 	/* set the death time to no more than the expiry time */
925 	now = current_kernel_time();
926 	time = now.tv_sec;
927 	if (key->revoked_at == 0 || key->revoked_at > time) {
928 		key->revoked_at = time;
929 		key_schedule_gc(key->revoked_at + key_gc_delay);
930 	}
931 
932 	up_write(&key->sem);
933 }
934 EXPORT_SYMBOL(key_revoke);
935 
936 /**
937  * register_key_type - Register a type of key.
938  * @ktype: The new key type.
939  *
940  * Register a new key type.
941  *
942  * Returns 0 on success or -EEXIST if a type of this name already exists.
943  */
944 int register_key_type(struct key_type *ktype)
945 {
946 	struct key_type *p;
947 	int ret;
948 
949 	ret = -EEXIST;
950 	down_write(&key_types_sem);
951 
952 	/* disallow key types with the same name */
953 	list_for_each_entry(p, &key_types_list, link) {
954 		if (strcmp(p->name, ktype->name) == 0)
955 			goto out;
956 	}
957 
958 	/* store the type */
959 	list_add(&ktype->link, &key_types_list);
960 	ret = 0;
961 
962 out:
963 	up_write(&key_types_sem);
964 	return ret;
965 }
966 EXPORT_SYMBOL(register_key_type);
967 
968 /**
969  * unregister_key_type - Unregister a type of key.
970  * @ktype: The key type.
971  *
972  * Unregister a key type and mark all the extant keys of this type as dead.
973  * Those keys of this type are then destroyed to get rid of their payloads and
974  * they and their links will be garbage collected as soon as possible.
975  */
976 void unregister_key_type(struct key_type *ktype)
977 {
978 	down_write(&key_types_sem);
979 	list_del_init(&ktype->link);
980 	downgrade_write(&key_types_sem);
981 	key_gc_keytype(ktype);
982 	up_read(&key_types_sem);
983 }
984 EXPORT_SYMBOL(unregister_key_type);
985 
986 /*
987  * Initialise the key management state.
988  */
989 void __init key_init(void)
990 {
991 	/* allocate a slab in which we can store keys */
992 	key_jar = kmem_cache_create("key_jar", sizeof(struct key),
993 			0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
994 
995 	/* add the special key types */
996 	list_add_tail(&key_type_keyring.link, &key_types_list);
997 	list_add_tail(&key_type_dead.link, &key_types_list);
998 	list_add_tail(&key_type_user.link, &key_types_list);
999 
1000 	/* record the root user tracking */
1001 	rb_link_node(&root_key_user.node,
1002 		     NULL,
1003 		     &key_user_tree.rb_node);
1004 
1005 	rb_insert_color(&root_key_user.node,
1006 			&key_user_tree);
1007 }
1008