xref: /linux/security/keys/keyring.c (revision d44a62742decca5ae5688a562584dc0fe9fc63f6)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* Keyring handling
3  *
4  * Copyright (C) 2004-2005, 2008, 2013 Red Hat, Inc. All Rights Reserved.
5  * Written by David Howells (dhowells@redhat.com)
6  */
7 
8 #include <linux/export.h>
9 #include <linux/init.h>
10 #include <linux/sched.h>
11 #include <linux/slab.h>
12 #include <linux/security.h>
13 #include <linux/seq_file.h>
14 #include <linux/err.h>
15 #include <keys/keyring-type.h>
16 #include <keys/user-type.h>
17 #include <linux/assoc_array_priv.h>
18 #include <linux/uaccess.h>
19 #include "internal.h"
20 
21 /*
22  * When plumbing the depths of the key tree, this sets a hard limit
23  * set on how deep we're willing to go.
24  */
25 #define KEYRING_SEARCH_MAX_DEPTH 6
26 
27 /*
28  * We keep all named keyrings in a hash to speed looking them up.
29  */
30 #define KEYRING_NAME_HASH_SIZE	(1 << 5)
31 
32 /*
33  * We mark pointers we pass to the associative array with bit 1 set if
34  * they're keyrings and clear otherwise.
35  */
36 #define KEYRING_PTR_SUBTYPE	0x2UL
37 
38 static inline bool keyring_ptr_is_keyring(const struct assoc_array_ptr *x)
39 {
40 	return (unsigned long)x & KEYRING_PTR_SUBTYPE;
41 }
42 static inline struct key *keyring_ptr_to_key(const struct assoc_array_ptr *x)
43 {
44 	void *object = assoc_array_ptr_to_leaf(x);
45 	return (struct key *)((unsigned long)object & ~KEYRING_PTR_SUBTYPE);
46 }
47 static inline void *keyring_key_to_ptr(struct key *key)
48 {
49 	if (key->type == &key_type_keyring)
50 		return (void *)((unsigned long)key | KEYRING_PTR_SUBTYPE);
51 	return key;
52 }
53 
54 static struct list_head	keyring_name_hash[KEYRING_NAME_HASH_SIZE];
55 static DEFINE_RWLOCK(keyring_name_lock);
56 
57 static inline unsigned keyring_hash(const char *desc)
58 {
59 	unsigned bucket = 0;
60 
61 	for (; *desc; desc++)
62 		bucket += (unsigned char)*desc;
63 
64 	return bucket & (KEYRING_NAME_HASH_SIZE - 1);
65 }
66 
67 /*
68  * The keyring key type definition.  Keyrings are simply keys of this type and
69  * can be treated as ordinary keys in addition to having their own special
70  * operations.
71  */
72 static int keyring_preparse(struct key_preparsed_payload *prep);
73 static void keyring_free_preparse(struct key_preparsed_payload *prep);
74 static int keyring_instantiate(struct key *keyring,
75 			       struct key_preparsed_payload *prep);
76 static void keyring_revoke(struct key *keyring);
77 static void keyring_destroy(struct key *keyring);
78 static void keyring_describe(const struct key *keyring, struct seq_file *m);
79 static long keyring_read(const struct key *keyring,
80 			 char __user *buffer, size_t buflen);
81 
82 struct key_type key_type_keyring = {
83 	.name		= "keyring",
84 	.def_datalen	= 0,
85 	.preparse	= keyring_preparse,
86 	.free_preparse	= keyring_free_preparse,
87 	.instantiate	= keyring_instantiate,
88 	.revoke		= keyring_revoke,
89 	.destroy	= keyring_destroy,
90 	.describe	= keyring_describe,
91 	.read		= keyring_read,
92 };
93 EXPORT_SYMBOL(key_type_keyring);
94 
95 /*
96  * Semaphore to serialise link/link calls to prevent two link calls in parallel
97  * introducing a cycle.
98  */
99 static DEFINE_MUTEX(keyring_serialise_link_lock);
100 
101 /*
102  * Publish the name of a keyring so that it can be found by name (if it has
103  * one).
104  */
105 static void keyring_publish_name(struct key *keyring)
106 {
107 	int bucket;
108 
109 	if (keyring->description) {
110 		bucket = keyring_hash(keyring->description);
111 
112 		write_lock(&keyring_name_lock);
113 
114 		if (!keyring_name_hash[bucket].next)
115 			INIT_LIST_HEAD(&keyring_name_hash[bucket]);
116 
117 		list_add_tail(&keyring->name_link,
118 			      &keyring_name_hash[bucket]);
119 
120 		write_unlock(&keyring_name_lock);
121 	}
122 }
123 
124 /*
125  * Preparse a keyring payload
126  */
127 static int keyring_preparse(struct key_preparsed_payload *prep)
128 {
129 	return prep->datalen != 0 ? -EINVAL : 0;
130 }
131 
132 /*
133  * Free a preparse of a user defined key payload
134  */
135 static void keyring_free_preparse(struct key_preparsed_payload *prep)
136 {
137 }
138 
139 /*
140  * Initialise a keyring.
141  *
142  * Returns 0 on success, -EINVAL if given any data.
143  */
144 static int keyring_instantiate(struct key *keyring,
145 			       struct key_preparsed_payload *prep)
146 {
147 	assoc_array_init(&keyring->keys);
148 	/* make the keyring available by name if it has one */
149 	keyring_publish_name(keyring);
150 	return 0;
151 }
152 
153 /*
154  * Multiply 64-bits by 32-bits to 96-bits and fold back to 64-bit.  Ideally we'd
155  * fold the carry back too, but that requires inline asm.
156  */
157 static u64 mult_64x32_and_fold(u64 x, u32 y)
158 {
159 	u64 hi = (u64)(u32)(x >> 32) * y;
160 	u64 lo = (u64)(u32)(x) * y;
161 	return lo + ((u64)(u32)hi << 32) + (u32)(hi >> 32);
162 }
163 
164 /*
165  * Hash a key type and description.
166  */
167 static unsigned long hash_key_type_and_desc(const struct keyring_index_key *index_key)
168 {
169 	const unsigned level_shift = ASSOC_ARRAY_LEVEL_STEP;
170 	const unsigned long fan_mask = ASSOC_ARRAY_FAN_MASK;
171 	const char *description = index_key->description;
172 	unsigned long hash, type;
173 	u32 piece;
174 	u64 acc;
175 	int n, desc_len = index_key->desc_len;
176 
177 	type = (unsigned long)index_key->type;
178 
179 	acc = mult_64x32_and_fold(type, desc_len + 13);
180 	acc = mult_64x32_and_fold(acc, 9207);
181 	for (;;) {
182 		n = desc_len;
183 		if (n <= 0)
184 			break;
185 		if (n > 4)
186 			n = 4;
187 		piece = 0;
188 		memcpy(&piece, description, n);
189 		description += n;
190 		desc_len -= n;
191 		acc = mult_64x32_and_fold(acc, piece);
192 		acc = mult_64x32_and_fold(acc, 9207);
193 	}
194 
195 	/* Fold the hash down to 32 bits if need be. */
196 	hash = acc;
197 	if (ASSOC_ARRAY_KEY_CHUNK_SIZE == 32)
198 		hash ^= acc >> 32;
199 
200 	/* Squidge all the keyrings into a separate part of the tree to
201 	 * ordinary keys by making sure the lowest level segment in the hash is
202 	 * zero for keyrings and non-zero otherwise.
203 	 */
204 	if (index_key->type != &key_type_keyring && (hash & fan_mask) == 0)
205 		return hash | (hash >> (ASSOC_ARRAY_KEY_CHUNK_SIZE - level_shift)) | 1;
206 	if (index_key->type == &key_type_keyring && (hash & fan_mask) != 0)
207 		return (hash + (hash << level_shift)) & ~fan_mask;
208 	return hash;
209 }
210 
211 /*
212  * Build the next index key chunk.
213  *
214  * On 32-bit systems the index key is laid out as:
215  *
216  *	0	4	5	9...
217  *	hash	desclen	typeptr	desc[]
218  *
219  * On 64-bit systems:
220  *
221  *	0	8	9	17...
222  *	hash	desclen	typeptr	desc[]
223  *
224  * We return it one word-sized chunk at a time.
225  */
226 static unsigned long keyring_get_key_chunk(const void *data, int level)
227 {
228 	const struct keyring_index_key *index_key = data;
229 	unsigned long chunk = 0;
230 	long offset = 0;
231 	int desc_len = index_key->desc_len, n = sizeof(chunk);
232 
233 	level /= ASSOC_ARRAY_KEY_CHUNK_SIZE;
234 	switch (level) {
235 	case 0:
236 		return hash_key_type_and_desc(index_key);
237 	case 1:
238 		return ((unsigned long)index_key->type << 8) | desc_len;
239 	case 2:
240 		if (desc_len == 0)
241 			return (u8)((unsigned long)index_key->type >>
242 				    (ASSOC_ARRAY_KEY_CHUNK_SIZE - 8));
243 		n--;
244 		offset = 1;
245 		/* fall through */
246 	default:
247 		offset += sizeof(chunk) - 1;
248 		offset += (level - 3) * sizeof(chunk);
249 		if (offset >= desc_len)
250 			return 0;
251 		desc_len -= offset;
252 		if (desc_len > n)
253 			desc_len = n;
254 		offset += desc_len;
255 		do {
256 			chunk <<= 8;
257 			chunk |= ((u8*)index_key->description)[--offset];
258 		} while (--desc_len > 0);
259 
260 		if (level == 2) {
261 			chunk <<= 8;
262 			chunk |= (u8)((unsigned long)index_key->type >>
263 				      (ASSOC_ARRAY_KEY_CHUNK_SIZE - 8));
264 		}
265 		return chunk;
266 	}
267 }
268 
269 static unsigned long keyring_get_object_key_chunk(const void *object, int level)
270 {
271 	const struct key *key = keyring_ptr_to_key(object);
272 	return keyring_get_key_chunk(&key->index_key, level);
273 }
274 
275 static bool keyring_compare_object(const void *object, const void *data)
276 {
277 	const struct keyring_index_key *index_key = data;
278 	const struct key *key = keyring_ptr_to_key(object);
279 
280 	return key->index_key.type == index_key->type &&
281 		key->index_key.desc_len == index_key->desc_len &&
282 		memcmp(key->index_key.description, index_key->description,
283 		       index_key->desc_len) == 0;
284 }
285 
286 /*
287  * Compare the index keys of a pair of objects and determine the bit position
288  * at which they differ - if they differ.
289  */
290 static int keyring_diff_objects(const void *object, const void *data)
291 {
292 	const struct key *key_a = keyring_ptr_to_key(object);
293 	const struct keyring_index_key *a = &key_a->index_key;
294 	const struct keyring_index_key *b = data;
295 	unsigned long seg_a, seg_b;
296 	int level, i;
297 
298 	level = 0;
299 	seg_a = hash_key_type_and_desc(a);
300 	seg_b = hash_key_type_and_desc(b);
301 	if ((seg_a ^ seg_b) != 0)
302 		goto differ;
303 
304 	/* The number of bits contributed by the hash is controlled by a
305 	 * constant in the assoc_array headers.  Everything else thereafter we
306 	 * can deal with as being machine word-size dependent.
307 	 */
308 	level += ASSOC_ARRAY_KEY_CHUNK_SIZE / 8;
309 	seg_a = a->desc_len;
310 	seg_b = b->desc_len;
311 	if ((seg_a ^ seg_b) != 0)
312 		goto differ;
313 
314 	/* The next bit may not work on big endian */
315 	level++;
316 	seg_a = (unsigned long)a->type;
317 	seg_b = (unsigned long)b->type;
318 	if ((seg_a ^ seg_b) != 0)
319 		goto differ;
320 
321 	level += sizeof(unsigned long);
322 	if (a->desc_len == 0)
323 		goto same;
324 
325 	i = 0;
326 	if (((unsigned long)a->description | (unsigned long)b->description) &
327 	    (sizeof(unsigned long) - 1)) {
328 		do {
329 			seg_a = *(unsigned long *)(a->description + i);
330 			seg_b = *(unsigned long *)(b->description + i);
331 			if ((seg_a ^ seg_b) != 0)
332 				goto differ_plus_i;
333 			i += sizeof(unsigned long);
334 		} while (i < (a->desc_len & (sizeof(unsigned long) - 1)));
335 	}
336 
337 	for (; i < a->desc_len; i++) {
338 		seg_a = *(unsigned char *)(a->description + i);
339 		seg_b = *(unsigned char *)(b->description + i);
340 		if ((seg_a ^ seg_b) != 0)
341 			goto differ_plus_i;
342 	}
343 
344 same:
345 	return -1;
346 
347 differ_plus_i:
348 	level += i;
349 differ:
350 	i = level * 8 + __ffs(seg_a ^ seg_b);
351 	return i;
352 }
353 
354 /*
355  * Free an object after stripping the keyring flag off of the pointer.
356  */
357 static void keyring_free_object(void *object)
358 {
359 	key_put(keyring_ptr_to_key(object));
360 }
361 
362 /*
363  * Operations for keyring management by the index-tree routines.
364  */
365 static const struct assoc_array_ops keyring_assoc_array_ops = {
366 	.get_key_chunk		= keyring_get_key_chunk,
367 	.get_object_key_chunk	= keyring_get_object_key_chunk,
368 	.compare_object		= keyring_compare_object,
369 	.diff_objects		= keyring_diff_objects,
370 	.free_object		= keyring_free_object,
371 };
372 
373 /*
374  * Clean up a keyring when it is destroyed.  Unpublish its name if it had one
375  * and dispose of its data.
376  *
377  * The garbage collector detects the final key_put(), removes the keyring from
378  * the serial number tree and then does RCU synchronisation before coming here,
379  * so we shouldn't need to worry about code poking around here with the RCU
380  * readlock held by this time.
381  */
382 static void keyring_destroy(struct key *keyring)
383 {
384 	if (keyring->description) {
385 		write_lock(&keyring_name_lock);
386 
387 		if (keyring->name_link.next != NULL &&
388 		    !list_empty(&keyring->name_link))
389 			list_del(&keyring->name_link);
390 
391 		write_unlock(&keyring_name_lock);
392 	}
393 
394 	if (keyring->restrict_link) {
395 		struct key_restriction *keyres = keyring->restrict_link;
396 
397 		key_put(keyres->key);
398 		kfree(keyres);
399 	}
400 
401 	assoc_array_destroy(&keyring->keys, &keyring_assoc_array_ops);
402 }
403 
404 /*
405  * Describe a keyring for /proc.
406  */
407 static void keyring_describe(const struct key *keyring, struct seq_file *m)
408 {
409 	if (keyring->description)
410 		seq_puts(m, keyring->description);
411 	else
412 		seq_puts(m, "[anon]");
413 
414 	if (key_is_positive(keyring)) {
415 		if (keyring->keys.nr_leaves_on_tree != 0)
416 			seq_printf(m, ": %lu", keyring->keys.nr_leaves_on_tree);
417 		else
418 			seq_puts(m, ": empty");
419 	}
420 }
421 
422 struct keyring_read_iterator_context {
423 	size_t			buflen;
424 	size_t			count;
425 	key_serial_t __user	*buffer;
426 };
427 
428 static int keyring_read_iterator(const void *object, void *data)
429 {
430 	struct keyring_read_iterator_context *ctx = data;
431 	const struct key *key = keyring_ptr_to_key(object);
432 	int ret;
433 
434 	kenter("{%s,%d},,{%zu/%zu}",
435 	       key->type->name, key->serial, ctx->count, ctx->buflen);
436 
437 	if (ctx->count >= ctx->buflen)
438 		return 1;
439 
440 	ret = put_user(key->serial, ctx->buffer);
441 	if (ret < 0)
442 		return ret;
443 	ctx->buffer++;
444 	ctx->count += sizeof(key->serial);
445 	return 0;
446 }
447 
448 /*
449  * Read a list of key IDs from the keyring's contents in binary form
450  *
451  * The keyring's semaphore is read-locked by the caller.  This prevents someone
452  * from modifying it under us - which could cause us to read key IDs multiple
453  * times.
454  */
455 static long keyring_read(const struct key *keyring,
456 			 char __user *buffer, size_t buflen)
457 {
458 	struct keyring_read_iterator_context ctx;
459 	long ret;
460 
461 	kenter("{%d},,%zu", key_serial(keyring), buflen);
462 
463 	if (buflen & (sizeof(key_serial_t) - 1))
464 		return -EINVAL;
465 
466 	/* Copy as many key IDs as fit into the buffer */
467 	if (buffer && buflen) {
468 		ctx.buffer = (key_serial_t __user *)buffer;
469 		ctx.buflen = buflen;
470 		ctx.count = 0;
471 		ret = assoc_array_iterate(&keyring->keys,
472 					  keyring_read_iterator, &ctx);
473 		if (ret < 0) {
474 			kleave(" = %ld [iterate]", ret);
475 			return ret;
476 		}
477 	}
478 
479 	/* Return the size of the buffer needed */
480 	ret = keyring->keys.nr_leaves_on_tree * sizeof(key_serial_t);
481 	if (ret <= buflen)
482 		kleave("= %ld [ok]", ret);
483 	else
484 		kleave("= %ld [buffer too small]", ret);
485 	return ret;
486 }
487 
488 /*
489  * Allocate a keyring and link into the destination keyring.
490  */
491 struct key *keyring_alloc(const char *description, kuid_t uid, kgid_t gid,
492 			  const struct cred *cred, key_perm_t perm,
493 			  unsigned long flags,
494 			  struct key_restriction *restrict_link,
495 			  struct key *dest)
496 {
497 	struct key *keyring;
498 	int ret;
499 
500 	keyring = key_alloc(&key_type_keyring, description,
501 			    uid, gid, cred, perm, flags, restrict_link);
502 	if (!IS_ERR(keyring)) {
503 		ret = key_instantiate_and_link(keyring, NULL, 0, dest, NULL);
504 		if (ret < 0) {
505 			key_put(keyring);
506 			keyring = ERR_PTR(ret);
507 		}
508 	}
509 
510 	return keyring;
511 }
512 EXPORT_SYMBOL(keyring_alloc);
513 
514 /**
515  * restrict_link_reject - Give -EPERM to restrict link
516  * @keyring: The keyring being added to.
517  * @type: The type of key being added.
518  * @payload: The payload of the key intended to be added.
519  * @restriction_key: Keys providing additional data for evaluating restriction.
520  *
521  * Reject the addition of any links to a keyring.  It can be overridden by
522  * passing KEY_ALLOC_BYPASS_RESTRICTION to key_instantiate_and_link() when
523  * adding a key to a keyring.
524  *
525  * This is meant to be stored in a key_restriction structure which is passed
526  * in the restrict_link parameter to keyring_alloc().
527  */
528 int restrict_link_reject(struct key *keyring,
529 			 const struct key_type *type,
530 			 const union key_payload *payload,
531 			 struct key *restriction_key)
532 {
533 	return -EPERM;
534 }
535 
536 /*
537  * By default, we keys found by getting an exact match on their descriptions.
538  */
539 bool key_default_cmp(const struct key *key,
540 		     const struct key_match_data *match_data)
541 {
542 	return strcmp(key->description, match_data->raw_data) == 0;
543 }
544 
545 /*
546  * Iteration function to consider each key found.
547  */
548 static int keyring_search_iterator(const void *object, void *iterator_data)
549 {
550 	struct keyring_search_context *ctx = iterator_data;
551 	const struct key *key = keyring_ptr_to_key(object);
552 	unsigned long kflags = READ_ONCE(key->flags);
553 	short state = READ_ONCE(key->state);
554 
555 	kenter("{%d}", key->serial);
556 
557 	/* ignore keys not of this type */
558 	if (key->type != ctx->index_key.type) {
559 		kleave(" = 0 [!type]");
560 		return 0;
561 	}
562 
563 	/* skip invalidated, revoked and expired keys */
564 	if (ctx->flags & KEYRING_SEARCH_DO_STATE_CHECK) {
565 		time64_t expiry = READ_ONCE(key->expiry);
566 
567 		if (kflags & ((1 << KEY_FLAG_INVALIDATED) |
568 			      (1 << KEY_FLAG_REVOKED))) {
569 			ctx->result = ERR_PTR(-EKEYREVOKED);
570 			kleave(" = %d [invrev]", ctx->skipped_ret);
571 			goto skipped;
572 		}
573 
574 		if (expiry && ctx->now >= expiry) {
575 			if (!(ctx->flags & KEYRING_SEARCH_SKIP_EXPIRED))
576 				ctx->result = ERR_PTR(-EKEYEXPIRED);
577 			kleave(" = %d [expire]", ctx->skipped_ret);
578 			goto skipped;
579 		}
580 	}
581 
582 	/* keys that don't match */
583 	if (!ctx->match_data.cmp(key, &ctx->match_data)) {
584 		kleave(" = 0 [!match]");
585 		return 0;
586 	}
587 
588 	/* key must have search permissions */
589 	if (!(ctx->flags & KEYRING_SEARCH_NO_CHECK_PERM) &&
590 	    key_task_permission(make_key_ref(key, ctx->possessed),
591 				ctx->cred, KEY_NEED_SEARCH) < 0) {
592 		ctx->result = ERR_PTR(-EACCES);
593 		kleave(" = %d [!perm]", ctx->skipped_ret);
594 		goto skipped;
595 	}
596 
597 	if (ctx->flags & KEYRING_SEARCH_DO_STATE_CHECK) {
598 		/* we set a different error code if we pass a negative key */
599 		if (state < 0) {
600 			ctx->result = ERR_PTR(state);
601 			kleave(" = %d [neg]", ctx->skipped_ret);
602 			goto skipped;
603 		}
604 	}
605 
606 	/* Found */
607 	ctx->result = make_key_ref(key, ctx->possessed);
608 	kleave(" = 1 [found]");
609 	return 1;
610 
611 skipped:
612 	return ctx->skipped_ret;
613 }
614 
615 /*
616  * Search inside a keyring for a key.  We can search by walking to it
617  * directly based on its index-key or we can iterate over the entire
618  * tree looking for it, based on the match function.
619  */
620 static int search_keyring(struct key *keyring, struct keyring_search_context *ctx)
621 {
622 	if (ctx->match_data.lookup_type == KEYRING_SEARCH_LOOKUP_DIRECT) {
623 		const void *object;
624 
625 		object = assoc_array_find(&keyring->keys,
626 					  &keyring_assoc_array_ops,
627 					  &ctx->index_key);
628 		return object ? ctx->iterator(object, ctx) : 0;
629 	}
630 	return assoc_array_iterate(&keyring->keys, ctx->iterator, ctx);
631 }
632 
633 /*
634  * Search a tree of keyrings that point to other keyrings up to the maximum
635  * depth.
636  */
637 static bool search_nested_keyrings(struct key *keyring,
638 				   struct keyring_search_context *ctx)
639 {
640 	struct {
641 		struct key *keyring;
642 		struct assoc_array_node *node;
643 		int slot;
644 	} stack[KEYRING_SEARCH_MAX_DEPTH];
645 
646 	struct assoc_array_shortcut *shortcut;
647 	struct assoc_array_node *node;
648 	struct assoc_array_ptr *ptr;
649 	struct key *key;
650 	int sp = 0, slot;
651 
652 	kenter("{%d},{%s,%s}",
653 	       keyring->serial,
654 	       ctx->index_key.type->name,
655 	       ctx->index_key.description);
656 
657 #define STATE_CHECKS (KEYRING_SEARCH_NO_STATE_CHECK | KEYRING_SEARCH_DO_STATE_CHECK)
658 	BUG_ON((ctx->flags & STATE_CHECKS) == 0 ||
659 	       (ctx->flags & STATE_CHECKS) == STATE_CHECKS);
660 
661 	/* Check to see if this top-level keyring is what we are looking for
662 	 * and whether it is valid or not.
663 	 */
664 	if (ctx->match_data.lookup_type == KEYRING_SEARCH_LOOKUP_ITERATE ||
665 	    keyring_compare_object(keyring, &ctx->index_key)) {
666 		ctx->skipped_ret = 2;
667 		switch (ctx->iterator(keyring_key_to_ptr(keyring), ctx)) {
668 		case 1:
669 			goto found;
670 		case 2:
671 			return false;
672 		default:
673 			break;
674 		}
675 	}
676 
677 	ctx->skipped_ret = 0;
678 
679 	/* Start processing a new keyring */
680 descend_to_keyring:
681 	kdebug("descend to %d", keyring->serial);
682 	if (keyring->flags & ((1 << KEY_FLAG_INVALIDATED) |
683 			      (1 << KEY_FLAG_REVOKED)))
684 		goto not_this_keyring;
685 
686 	/* Search through the keys in this keyring before its searching its
687 	 * subtrees.
688 	 */
689 	if (search_keyring(keyring, ctx))
690 		goto found;
691 
692 	/* Then manually iterate through the keyrings nested in this one.
693 	 *
694 	 * Start from the root node of the index tree.  Because of the way the
695 	 * hash function has been set up, keyrings cluster on the leftmost
696 	 * branch of the root node (root slot 0) or in the root node itself.
697 	 * Non-keyrings avoid the leftmost branch of the root entirely (root
698 	 * slots 1-15).
699 	 */
700 	ptr = READ_ONCE(keyring->keys.root);
701 	if (!ptr)
702 		goto not_this_keyring;
703 
704 	if (assoc_array_ptr_is_shortcut(ptr)) {
705 		/* If the root is a shortcut, either the keyring only contains
706 		 * keyring pointers (everything clusters behind root slot 0) or
707 		 * doesn't contain any keyring pointers.
708 		 */
709 		shortcut = assoc_array_ptr_to_shortcut(ptr);
710 		if ((shortcut->index_key[0] & ASSOC_ARRAY_FAN_MASK) != 0)
711 			goto not_this_keyring;
712 
713 		ptr = READ_ONCE(shortcut->next_node);
714 		node = assoc_array_ptr_to_node(ptr);
715 		goto begin_node;
716 	}
717 
718 	node = assoc_array_ptr_to_node(ptr);
719 	ptr = node->slots[0];
720 	if (!assoc_array_ptr_is_meta(ptr))
721 		goto begin_node;
722 
723 descend_to_node:
724 	/* Descend to a more distal node in this keyring's content tree and go
725 	 * through that.
726 	 */
727 	kdebug("descend");
728 	if (assoc_array_ptr_is_shortcut(ptr)) {
729 		shortcut = assoc_array_ptr_to_shortcut(ptr);
730 		ptr = READ_ONCE(shortcut->next_node);
731 		BUG_ON(!assoc_array_ptr_is_node(ptr));
732 	}
733 	node = assoc_array_ptr_to_node(ptr);
734 
735 begin_node:
736 	kdebug("begin_node");
737 	slot = 0;
738 ascend_to_node:
739 	/* Go through the slots in a node */
740 	for (; slot < ASSOC_ARRAY_FAN_OUT; slot++) {
741 		ptr = READ_ONCE(node->slots[slot]);
742 
743 		if (assoc_array_ptr_is_meta(ptr) && node->back_pointer)
744 			goto descend_to_node;
745 
746 		if (!keyring_ptr_is_keyring(ptr))
747 			continue;
748 
749 		key = keyring_ptr_to_key(ptr);
750 
751 		if (sp >= KEYRING_SEARCH_MAX_DEPTH) {
752 			if (ctx->flags & KEYRING_SEARCH_DETECT_TOO_DEEP) {
753 				ctx->result = ERR_PTR(-ELOOP);
754 				return false;
755 			}
756 			goto not_this_keyring;
757 		}
758 
759 		/* Search a nested keyring */
760 		if (!(ctx->flags & KEYRING_SEARCH_NO_CHECK_PERM) &&
761 		    key_task_permission(make_key_ref(key, ctx->possessed),
762 					ctx->cred, KEY_NEED_SEARCH) < 0)
763 			continue;
764 
765 		/* stack the current position */
766 		stack[sp].keyring = keyring;
767 		stack[sp].node = node;
768 		stack[sp].slot = slot;
769 		sp++;
770 
771 		/* begin again with the new keyring */
772 		keyring = key;
773 		goto descend_to_keyring;
774 	}
775 
776 	/* We've dealt with all the slots in the current node, so now we need
777 	 * to ascend to the parent and continue processing there.
778 	 */
779 	ptr = READ_ONCE(node->back_pointer);
780 	slot = node->parent_slot;
781 
782 	if (ptr && assoc_array_ptr_is_shortcut(ptr)) {
783 		shortcut = assoc_array_ptr_to_shortcut(ptr);
784 		ptr = READ_ONCE(shortcut->back_pointer);
785 		slot = shortcut->parent_slot;
786 	}
787 	if (!ptr)
788 		goto not_this_keyring;
789 	node = assoc_array_ptr_to_node(ptr);
790 	slot++;
791 
792 	/* If we've ascended to the root (zero backpointer), we must have just
793 	 * finished processing the leftmost branch rather than the root slots -
794 	 * so there can't be any more keyrings for us to find.
795 	 */
796 	if (node->back_pointer) {
797 		kdebug("ascend %d", slot);
798 		goto ascend_to_node;
799 	}
800 
801 	/* The keyring we're looking at was disqualified or didn't contain a
802 	 * matching key.
803 	 */
804 not_this_keyring:
805 	kdebug("not_this_keyring %d", sp);
806 	if (sp <= 0) {
807 		kleave(" = false");
808 		return false;
809 	}
810 
811 	/* Resume the processing of a keyring higher up in the tree */
812 	sp--;
813 	keyring = stack[sp].keyring;
814 	node = stack[sp].node;
815 	slot = stack[sp].slot + 1;
816 	kdebug("ascend to %d [%d]", keyring->serial, slot);
817 	goto ascend_to_node;
818 
819 	/* We found a viable match */
820 found:
821 	key = key_ref_to_ptr(ctx->result);
822 	key_check(key);
823 	if (!(ctx->flags & KEYRING_SEARCH_NO_UPDATE_TIME)) {
824 		key->last_used_at = ctx->now;
825 		keyring->last_used_at = ctx->now;
826 		while (sp > 0)
827 			stack[--sp].keyring->last_used_at = ctx->now;
828 	}
829 	kleave(" = true");
830 	return true;
831 }
832 
833 /**
834  * keyring_search_aux - Search a keyring tree for a key matching some criteria
835  * @keyring_ref: A pointer to the keyring with possession indicator.
836  * @ctx: The keyring search context.
837  *
838  * Search the supplied keyring tree for a key that matches the criteria given.
839  * The root keyring and any linked keyrings must grant Search permission to the
840  * caller to be searchable and keys can only be found if they too grant Search
841  * to the caller. The possession flag on the root keyring pointer controls use
842  * of the possessor bits in permissions checking of the entire tree.  In
843  * addition, the LSM gets to forbid keyring searches and key matches.
844  *
845  * The search is performed as a breadth-then-depth search up to the prescribed
846  * limit (KEYRING_SEARCH_MAX_DEPTH).
847  *
848  * Keys are matched to the type provided and are then filtered by the match
849  * function, which is given the description to use in any way it sees fit.  The
850  * match function may use any attributes of a key that it wishes to to
851  * determine the match.  Normally the match function from the key type would be
852  * used.
853  *
854  * RCU can be used to prevent the keyring key lists from disappearing without
855  * the need to take lots of locks.
856  *
857  * Returns a pointer to the found key and increments the key usage count if
858  * successful; -EAGAIN if no matching keys were found, or if expired or revoked
859  * keys were found; -ENOKEY if only negative keys were found; -ENOTDIR if the
860  * specified keyring wasn't a keyring.
861  *
862  * In the case of a successful return, the possession attribute from
863  * @keyring_ref is propagated to the returned key reference.
864  */
865 key_ref_t keyring_search_aux(key_ref_t keyring_ref,
866 			     struct keyring_search_context *ctx)
867 {
868 	struct key *keyring;
869 	long err;
870 
871 	ctx->iterator = keyring_search_iterator;
872 	ctx->possessed = is_key_possessed(keyring_ref);
873 	ctx->result = ERR_PTR(-EAGAIN);
874 
875 	keyring = key_ref_to_ptr(keyring_ref);
876 	key_check(keyring);
877 
878 	if (keyring->type != &key_type_keyring)
879 		return ERR_PTR(-ENOTDIR);
880 
881 	if (!(ctx->flags & KEYRING_SEARCH_NO_CHECK_PERM)) {
882 		err = key_task_permission(keyring_ref, ctx->cred, KEY_NEED_SEARCH);
883 		if (err < 0)
884 			return ERR_PTR(err);
885 	}
886 
887 	rcu_read_lock();
888 	ctx->now = ktime_get_real_seconds();
889 	if (search_nested_keyrings(keyring, ctx))
890 		__key_get(key_ref_to_ptr(ctx->result));
891 	rcu_read_unlock();
892 	return ctx->result;
893 }
894 
895 /**
896  * keyring_search - Search the supplied keyring tree for a matching key
897  * @keyring: The root of the keyring tree to be searched.
898  * @type: The type of keyring we want to find.
899  * @description: The name of the keyring we want to find.
900  *
901  * As keyring_search_aux() above, but using the current task's credentials and
902  * type's default matching function and preferred search method.
903  */
904 key_ref_t keyring_search(key_ref_t keyring,
905 			 struct key_type *type,
906 			 const char *description)
907 {
908 	struct keyring_search_context ctx = {
909 		.index_key.type		= type,
910 		.index_key.description	= description,
911 		.index_key.desc_len	= strlen(description),
912 		.cred			= current_cred(),
913 		.match_data.cmp		= key_default_cmp,
914 		.match_data.raw_data	= description,
915 		.match_data.lookup_type	= KEYRING_SEARCH_LOOKUP_DIRECT,
916 		.flags			= KEYRING_SEARCH_DO_STATE_CHECK,
917 	};
918 	key_ref_t key;
919 	int ret;
920 
921 	if (type->match_preparse) {
922 		ret = type->match_preparse(&ctx.match_data);
923 		if (ret < 0)
924 			return ERR_PTR(ret);
925 	}
926 
927 	key = keyring_search_aux(keyring, &ctx);
928 
929 	if (type->match_free)
930 		type->match_free(&ctx.match_data);
931 	return key;
932 }
933 EXPORT_SYMBOL(keyring_search);
934 
935 static struct key_restriction *keyring_restriction_alloc(
936 	key_restrict_link_func_t check)
937 {
938 	struct key_restriction *keyres =
939 		kzalloc(sizeof(struct key_restriction), GFP_KERNEL);
940 
941 	if (!keyres)
942 		return ERR_PTR(-ENOMEM);
943 
944 	keyres->check = check;
945 
946 	return keyres;
947 }
948 
949 /*
950  * Semaphore to serialise restriction setup to prevent reference count
951  * cycles through restriction key pointers.
952  */
953 static DECLARE_RWSEM(keyring_serialise_restrict_sem);
954 
955 /*
956  * Check for restriction cycles that would prevent keyring garbage collection.
957  * keyring_serialise_restrict_sem must be held.
958  */
959 static bool keyring_detect_restriction_cycle(const struct key *dest_keyring,
960 					     struct key_restriction *keyres)
961 {
962 	while (keyres && keyres->key &&
963 	       keyres->key->type == &key_type_keyring) {
964 		if (keyres->key == dest_keyring)
965 			return true;
966 
967 		keyres = keyres->key->restrict_link;
968 	}
969 
970 	return false;
971 }
972 
973 /**
974  * keyring_restrict - Look up and apply a restriction to a keyring
975  * @keyring_ref: The keyring to be restricted
976  * @type: The key type that will provide the restriction checker.
977  * @restriction: The restriction options to apply to the keyring
978  *
979  * Look up a keyring and apply a restriction to it.  The restriction is managed
980  * by the specific key type, but can be configured by the options specified in
981  * the restriction string.
982  */
983 int keyring_restrict(key_ref_t keyring_ref, const char *type,
984 		     const char *restriction)
985 {
986 	struct key *keyring;
987 	struct key_type *restrict_type = NULL;
988 	struct key_restriction *restrict_link;
989 	int ret = 0;
990 
991 	keyring = key_ref_to_ptr(keyring_ref);
992 	key_check(keyring);
993 
994 	if (keyring->type != &key_type_keyring)
995 		return -ENOTDIR;
996 
997 	if (!type) {
998 		restrict_link = keyring_restriction_alloc(restrict_link_reject);
999 	} else {
1000 		restrict_type = key_type_lookup(type);
1001 
1002 		if (IS_ERR(restrict_type))
1003 			return PTR_ERR(restrict_type);
1004 
1005 		if (!restrict_type->lookup_restriction) {
1006 			ret = -ENOENT;
1007 			goto error;
1008 		}
1009 
1010 		restrict_link = restrict_type->lookup_restriction(restriction);
1011 	}
1012 
1013 	if (IS_ERR(restrict_link)) {
1014 		ret = PTR_ERR(restrict_link);
1015 		goto error;
1016 	}
1017 
1018 	down_write(&keyring->sem);
1019 	down_write(&keyring_serialise_restrict_sem);
1020 
1021 	if (keyring->restrict_link)
1022 		ret = -EEXIST;
1023 	else if (keyring_detect_restriction_cycle(keyring, restrict_link))
1024 		ret = -EDEADLK;
1025 	else
1026 		keyring->restrict_link = restrict_link;
1027 
1028 	up_write(&keyring_serialise_restrict_sem);
1029 	up_write(&keyring->sem);
1030 
1031 	if (ret < 0) {
1032 		key_put(restrict_link->key);
1033 		kfree(restrict_link);
1034 	}
1035 
1036 error:
1037 	if (restrict_type)
1038 		key_type_put(restrict_type);
1039 
1040 	return ret;
1041 }
1042 EXPORT_SYMBOL(keyring_restrict);
1043 
1044 /*
1045  * Search the given keyring for a key that might be updated.
1046  *
1047  * The caller must guarantee that the keyring is a keyring and that the
1048  * permission is granted to modify the keyring as no check is made here.  The
1049  * caller must also hold a lock on the keyring semaphore.
1050  *
1051  * Returns a pointer to the found key with usage count incremented if
1052  * successful and returns NULL if not found.  Revoked and invalidated keys are
1053  * skipped over.
1054  *
1055  * If successful, the possession indicator is propagated from the keyring ref
1056  * to the returned key reference.
1057  */
1058 key_ref_t find_key_to_update(key_ref_t keyring_ref,
1059 			     const struct keyring_index_key *index_key)
1060 {
1061 	struct key *keyring, *key;
1062 	const void *object;
1063 
1064 	keyring = key_ref_to_ptr(keyring_ref);
1065 
1066 	kenter("{%d},{%s,%s}",
1067 	       keyring->serial, index_key->type->name, index_key->description);
1068 
1069 	object = assoc_array_find(&keyring->keys, &keyring_assoc_array_ops,
1070 				  index_key);
1071 
1072 	if (object)
1073 		goto found;
1074 
1075 	kleave(" = NULL");
1076 	return NULL;
1077 
1078 found:
1079 	key = keyring_ptr_to_key(object);
1080 	if (key->flags & ((1 << KEY_FLAG_INVALIDATED) |
1081 			  (1 << KEY_FLAG_REVOKED))) {
1082 		kleave(" = NULL [x]");
1083 		return NULL;
1084 	}
1085 	__key_get(key);
1086 	kleave(" = {%d}", key->serial);
1087 	return make_key_ref(key, is_key_possessed(keyring_ref));
1088 }
1089 
1090 /*
1091  * Find a keyring with the specified name.
1092  *
1093  * Only keyrings that have nonzero refcount, are not revoked, and are owned by a
1094  * user in the current user namespace are considered.  If @uid_keyring is %true,
1095  * the keyring additionally must have been allocated as a user or user session
1096  * keyring; otherwise, it must grant Search permission directly to the caller.
1097  *
1098  * Returns a pointer to the keyring with the keyring's refcount having being
1099  * incremented on success.  -ENOKEY is returned if a key could not be found.
1100  */
1101 struct key *find_keyring_by_name(const char *name, bool uid_keyring)
1102 {
1103 	struct key *keyring;
1104 	int bucket;
1105 
1106 	if (!name)
1107 		return ERR_PTR(-EINVAL);
1108 
1109 	bucket = keyring_hash(name);
1110 
1111 	read_lock(&keyring_name_lock);
1112 
1113 	if (keyring_name_hash[bucket].next) {
1114 		/* search this hash bucket for a keyring with a matching name
1115 		 * that's readable and that hasn't been revoked */
1116 		list_for_each_entry(keyring,
1117 				    &keyring_name_hash[bucket],
1118 				    name_link
1119 				    ) {
1120 			if (!kuid_has_mapping(current_user_ns(), keyring->user->uid))
1121 				continue;
1122 
1123 			if (test_bit(KEY_FLAG_REVOKED, &keyring->flags))
1124 				continue;
1125 
1126 			if (strcmp(keyring->description, name) != 0)
1127 				continue;
1128 
1129 			if (uid_keyring) {
1130 				if (!test_bit(KEY_FLAG_UID_KEYRING,
1131 					      &keyring->flags))
1132 					continue;
1133 			} else {
1134 				if (key_permission(make_key_ref(keyring, 0),
1135 						   KEY_NEED_SEARCH) < 0)
1136 					continue;
1137 			}
1138 
1139 			/* we've got a match but we might end up racing with
1140 			 * key_cleanup() if the keyring is currently 'dead'
1141 			 * (ie. it has a zero usage count) */
1142 			if (!refcount_inc_not_zero(&keyring->usage))
1143 				continue;
1144 			keyring->last_used_at = ktime_get_real_seconds();
1145 			goto out;
1146 		}
1147 	}
1148 
1149 	keyring = ERR_PTR(-ENOKEY);
1150 out:
1151 	read_unlock(&keyring_name_lock);
1152 	return keyring;
1153 }
1154 
1155 static int keyring_detect_cycle_iterator(const void *object,
1156 					 void *iterator_data)
1157 {
1158 	struct keyring_search_context *ctx = iterator_data;
1159 	const struct key *key = keyring_ptr_to_key(object);
1160 
1161 	kenter("{%d}", key->serial);
1162 
1163 	/* We might get a keyring with matching index-key that is nonetheless a
1164 	 * different keyring. */
1165 	if (key != ctx->match_data.raw_data)
1166 		return 0;
1167 
1168 	ctx->result = ERR_PTR(-EDEADLK);
1169 	return 1;
1170 }
1171 
1172 /*
1173  * See if a cycle will will be created by inserting acyclic tree B in acyclic
1174  * tree A at the topmost level (ie: as a direct child of A).
1175  *
1176  * Since we are adding B to A at the top level, checking for cycles should just
1177  * be a matter of seeing if node A is somewhere in tree B.
1178  */
1179 static int keyring_detect_cycle(struct key *A, struct key *B)
1180 {
1181 	struct keyring_search_context ctx = {
1182 		.index_key		= A->index_key,
1183 		.match_data.raw_data	= A,
1184 		.match_data.lookup_type = KEYRING_SEARCH_LOOKUP_DIRECT,
1185 		.iterator		= keyring_detect_cycle_iterator,
1186 		.flags			= (KEYRING_SEARCH_NO_STATE_CHECK |
1187 					   KEYRING_SEARCH_NO_UPDATE_TIME |
1188 					   KEYRING_SEARCH_NO_CHECK_PERM |
1189 					   KEYRING_SEARCH_DETECT_TOO_DEEP),
1190 	};
1191 
1192 	rcu_read_lock();
1193 	search_nested_keyrings(B, &ctx);
1194 	rcu_read_unlock();
1195 	return PTR_ERR(ctx.result) == -EAGAIN ? 0 : PTR_ERR(ctx.result);
1196 }
1197 
1198 /*
1199  * Lock keyring for link.
1200  */
1201 int __key_link_lock(struct key *keyring,
1202 		    const struct keyring_index_key *index_key)
1203 	__acquires(&keyring->sem)
1204 	__acquires(&keyring_serialise_link_lock)
1205 {
1206 	if (keyring->type != &key_type_keyring)
1207 		return -ENOTDIR;
1208 
1209 	down_write(&keyring->sem);
1210 
1211 	/* Serialise link/link calls to prevent parallel calls causing a cycle
1212 	 * when linking two keyring in opposite orders.
1213 	 */
1214 	if (index_key->type == &key_type_keyring)
1215 		mutex_lock(&keyring_serialise_link_lock);
1216 
1217 	return 0;
1218 }
1219 
1220 /*
1221  * Lock keyrings for move (link/unlink combination).
1222  */
1223 int __key_move_lock(struct key *l_keyring, struct key *u_keyring,
1224 		    const struct keyring_index_key *index_key)
1225 	__acquires(&l_keyring->sem)
1226 	__acquires(&u_keyring->sem)
1227 	__acquires(&keyring_serialise_link_lock)
1228 {
1229 	if (l_keyring->type != &key_type_keyring ||
1230 	    u_keyring->type != &key_type_keyring)
1231 		return -ENOTDIR;
1232 
1233 	/* We have to be very careful here to take the keyring locks in the
1234 	 * right order, lest we open ourselves to deadlocking against another
1235 	 * move operation.
1236 	 */
1237 	if (l_keyring < u_keyring) {
1238 		down_write(&l_keyring->sem);
1239 		down_write_nested(&u_keyring->sem, 1);
1240 	} else {
1241 		down_write(&u_keyring->sem);
1242 		down_write_nested(&l_keyring->sem, 1);
1243 	}
1244 
1245 	/* Serialise link/link calls to prevent parallel calls causing a cycle
1246 	 * when linking two keyring in opposite orders.
1247 	 */
1248 	if (index_key->type == &key_type_keyring)
1249 		mutex_lock(&keyring_serialise_link_lock);
1250 
1251 	return 0;
1252 }
1253 
1254 /*
1255  * Preallocate memory so that a key can be linked into to a keyring.
1256  */
1257 int __key_link_begin(struct key *keyring,
1258 		     const struct keyring_index_key *index_key,
1259 		     struct assoc_array_edit **_edit)
1260 {
1261 	struct assoc_array_edit *edit;
1262 	int ret;
1263 
1264 	kenter("%d,%s,%s,",
1265 	       keyring->serial, index_key->type->name, index_key->description);
1266 
1267 	BUG_ON(index_key->desc_len == 0);
1268 	BUG_ON(*_edit != NULL);
1269 
1270 	*_edit = NULL;
1271 
1272 	ret = -EKEYREVOKED;
1273 	if (test_bit(KEY_FLAG_REVOKED, &keyring->flags))
1274 		goto error;
1275 
1276 	/* Create an edit script that will insert/replace the key in the
1277 	 * keyring tree.
1278 	 */
1279 	edit = assoc_array_insert(&keyring->keys,
1280 				  &keyring_assoc_array_ops,
1281 				  index_key,
1282 				  NULL);
1283 	if (IS_ERR(edit)) {
1284 		ret = PTR_ERR(edit);
1285 		goto error;
1286 	}
1287 
1288 	/* If we're not replacing a link in-place then we're going to need some
1289 	 * extra quota.
1290 	 */
1291 	if (!edit->dead_leaf) {
1292 		ret = key_payload_reserve(keyring,
1293 					  keyring->datalen + KEYQUOTA_LINK_BYTES);
1294 		if (ret < 0)
1295 			goto error_cancel;
1296 	}
1297 
1298 	*_edit = edit;
1299 	kleave(" = 0");
1300 	return 0;
1301 
1302 error_cancel:
1303 	assoc_array_cancel_edit(edit);
1304 error:
1305 	kleave(" = %d", ret);
1306 	return ret;
1307 }
1308 
1309 /*
1310  * Check already instantiated keys aren't going to be a problem.
1311  *
1312  * The caller must have called __key_link_begin(). Don't need to call this for
1313  * keys that were created since __key_link_begin() was called.
1314  */
1315 int __key_link_check_live_key(struct key *keyring, struct key *key)
1316 {
1317 	if (key->type == &key_type_keyring)
1318 		/* check that we aren't going to create a cycle by linking one
1319 		 * keyring to another */
1320 		return keyring_detect_cycle(keyring, key);
1321 	return 0;
1322 }
1323 
1324 /*
1325  * Link a key into to a keyring.
1326  *
1327  * Must be called with __key_link_begin() having being called.  Discards any
1328  * already extant link to matching key if there is one, so that each keyring
1329  * holds at most one link to any given key of a particular type+description
1330  * combination.
1331  */
1332 void __key_link(struct key *key, struct assoc_array_edit **_edit)
1333 {
1334 	__key_get(key);
1335 	assoc_array_insert_set_object(*_edit, keyring_key_to_ptr(key));
1336 	assoc_array_apply_edit(*_edit);
1337 	*_edit = NULL;
1338 }
1339 
1340 /*
1341  * Finish linking a key into to a keyring.
1342  *
1343  * Must be called with __key_link_begin() having being called.
1344  */
1345 void __key_link_end(struct key *keyring,
1346 		    const struct keyring_index_key *index_key,
1347 		    struct assoc_array_edit *edit)
1348 	__releases(&keyring->sem)
1349 	__releases(&keyring_serialise_link_lock)
1350 {
1351 	BUG_ON(index_key->type == NULL);
1352 	kenter("%d,%s,", keyring->serial, index_key->type->name);
1353 
1354 	if (edit) {
1355 		if (!edit->dead_leaf) {
1356 			key_payload_reserve(keyring,
1357 				keyring->datalen - KEYQUOTA_LINK_BYTES);
1358 		}
1359 		assoc_array_cancel_edit(edit);
1360 	}
1361 	up_write(&keyring->sem);
1362 
1363 	if (index_key->type == &key_type_keyring)
1364 		mutex_unlock(&keyring_serialise_link_lock);
1365 }
1366 
1367 /*
1368  * Check addition of keys to restricted keyrings.
1369  */
1370 static int __key_link_check_restriction(struct key *keyring, struct key *key)
1371 {
1372 	if (!keyring->restrict_link || !keyring->restrict_link->check)
1373 		return 0;
1374 	return keyring->restrict_link->check(keyring, key->type, &key->payload,
1375 					     keyring->restrict_link->key);
1376 }
1377 
1378 /**
1379  * key_link - Link a key to a keyring
1380  * @keyring: The keyring to make the link in.
1381  * @key: The key to link to.
1382  *
1383  * Make a link in a keyring to a key, such that the keyring holds a reference
1384  * on that key and the key can potentially be found by searching that keyring.
1385  *
1386  * This function will write-lock the keyring's semaphore and will consume some
1387  * of the user's key data quota to hold the link.
1388  *
1389  * Returns 0 if successful, -ENOTDIR if the keyring isn't a keyring,
1390  * -EKEYREVOKED if the keyring has been revoked, -ENFILE if the keyring is
1391  * full, -EDQUOT if there is insufficient key data quota remaining to add
1392  * another link or -ENOMEM if there's insufficient memory.
1393  *
1394  * It is assumed that the caller has checked that it is permitted for a link to
1395  * be made (the keyring should have Write permission and the key Link
1396  * permission).
1397  */
1398 int key_link(struct key *keyring, struct key *key)
1399 {
1400 	struct assoc_array_edit *edit = NULL;
1401 	int ret;
1402 
1403 	kenter("{%d,%d}", keyring->serial, refcount_read(&keyring->usage));
1404 
1405 	key_check(keyring);
1406 	key_check(key);
1407 
1408 	ret = __key_link_lock(keyring, &key->index_key);
1409 	if (ret < 0)
1410 		goto error;
1411 
1412 	ret = __key_link_begin(keyring, &key->index_key, &edit);
1413 	if (ret < 0)
1414 		goto error_end;
1415 
1416 	kdebug("begun {%d,%d}", keyring->serial, refcount_read(&keyring->usage));
1417 	ret = __key_link_check_restriction(keyring, key);
1418 	if (ret == 0)
1419 		ret = __key_link_check_live_key(keyring, key);
1420 	if (ret == 0)
1421 		__key_link(key, &edit);
1422 
1423 error_end:
1424 	__key_link_end(keyring, &key->index_key, edit);
1425 error:
1426 	kleave(" = %d {%d,%d}", ret, keyring->serial, refcount_read(&keyring->usage));
1427 	return ret;
1428 }
1429 EXPORT_SYMBOL(key_link);
1430 
1431 /*
1432  * Lock a keyring for unlink.
1433  */
1434 static int __key_unlink_lock(struct key *keyring)
1435 	__acquires(&keyring->sem)
1436 {
1437 	if (keyring->type != &key_type_keyring)
1438 		return -ENOTDIR;
1439 
1440 	down_write(&keyring->sem);
1441 	return 0;
1442 }
1443 
1444 /*
1445  * Begin the process of unlinking a key from a keyring.
1446  */
1447 static int __key_unlink_begin(struct key *keyring, struct key *key,
1448 			      struct assoc_array_edit **_edit)
1449 {
1450 	struct assoc_array_edit *edit;
1451 
1452 	BUG_ON(*_edit != NULL);
1453 
1454 	edit = assoc_array_delete(&keyring->keys, &keyring_assoc_array_ops,
1455 				  &key->index_key);
1456 	if (IS_ERR(edit))
1457 		return PTR_ERR(edit);
1458 
1459 	if (!edit)
1460 		return -ENOENT;
1461 
1462 	*_edit = edit;
1463 	return 0;
1464 }
1465 
1466 /*
1467  * Apply an unlink change.
1468  */
1469 static void __key_unlink(struct key *keyring, struct key *key,
1470 			 struct assoc_array_edit **_edit)
1471 {
1472 	assoc_array_apply_edit(*_edit);
1473 	*_edit = NULL;
1474 	key_payload_reserve(keyring, keyring->datalen - KEYQUOTA_LINK_BYTES);
1475 }
1476 
1477 /*
1478  * Finish unlinking a key from to a keyring.
1479  */
1480 static void __key_unlink_end(struct key *keyring,
1481 			     struct key *key,
1482 			     struct assoc_array_edit *edit)
1483 	__releases(&keyring->sem)
1484 {
1485 	if (edit)
1486 		assoc_array_cancel_edit(edit);
1487 	up_write(&keyring->sem);
1488 }
1489 
1490 /**
1491  * key_unlink - Unlink the first link to a key from a keyring.
1492  * @keyring: The keyring to remove the link from.
1493  * @key: The key the link is to.
1494  *
1495  * Remove a link from a keyring to a key.
1496  *
1497  * This function will write-lock the keyring's semaphore.
1498  *
1499  * Returns 0 if successful, -ENOTDIR if the keyring isn't a keyring, -ENOENT if
1500  * the key isn't linked to by the keyring or -ENOMEM if there's insufficient
1501  * memory.
1502  *
1503  * It is assumed that the caller has checked that it is permitted for a link to
1504  * be removed (the keyring should have Write permission; no permissions are
1505  * required on the key).
1506  */
1507 int key_unlink(struct key *keyring, struct key *key)
1508 {
1509 	struct assoc_array_edit *edit = NULL;
1510 	int ret;
1511 
1512 	key_check(keyring);
1513 	key_check(key);
1514 
1515 	ret = __key_unlink_lock(keyring);
1516 	if (ret < 0)
1517 		return ret;
1518 
1519 	ret = __key_unlink_begin(keyring, key, &edit);
1520 	if (ret == 0)
1521 		__key_unlink(keyring, key, &edit);
1522 	__key_unlink_end(keyring, key, edit);
1523 	return ret;
1524 }
1525 EXPORT_SYMBOL(key_unlink);
1526 
1527 /**
1528  * key_move - Move a key from one keyring to another
1529  * @key: The key to move
1530  * @from_keyring: The keyring to remove the link from.
1531  * @to_keyring: The keyring to make the link in.
1532  * @flags: Qualifying flags, such as KEYCTL_MOVE_EXCL.
1533  *
1534  * Make a link in @to_keyring to a key, such that the keyring holds a reference
1535  * on that key and the key can potentially be found by searching that keyring
1536  * whilst simultaneously removing a link to the key from @from_keyring.
1537  *
1538  * This function will write-lock both keyring's semaphores and will consume
1539  * some of the user's key data quota to hold the link on @to_keyring.
1540  *
1541  * Returns 0 if successful, -ENOTDIR if either keyring isn't a keyring,
1542  * -EKEYREVOKED if either keyring has been revoked, -ENFILE if the second
1543  * keyring is full, -EDQUOT if there is insufficient key data quota remaining
1544  * to add another link or -ENOMEM if there's insufficient memory.  If
1545  * KEYCTL_MOVE_EXCL is set, then -EEXIST will be returned if there's already a
1546  * matching key in @to_keyring.
1547  *
1548  * It is assumed that the caller has checked that it is permitted for a link to
1549  * be made (the keyring should have Write permission and the key Link
1550  * permission).
1551  */
1552 int key_move(struct key *key,
1553 	     struct key *from_keyring,
1554 	     struct key *to_keyring,
1555 	     unsigned int flags)
1556 {
1557 	struct assoc_array_edit *from_edit = NULL, *to_edit = NULL;
1558 	int ret;
1559 
1560 	kenter("%d,%d,%d", key->serial, from_keyring->serial, to_keyring->serial);
1561 
1562 	if (from_keyring == to_keyring)
1563 		return 0;
1564 
1565 	key_check(key);
1566 	key_check(from_keyring);
1567 	key_check(to_keyring);
1568 
1569 	ret = __key_move_lock(from_keyring, to_keyring, &key->index_key);
1570 	if (ret < 0)
1571 		goto out;
1572 	ret = __key_unlink_begin(from_keyring, key, &from_edit);
1573 	if (ret < 0)
1574 		goto error;
1575 	ret = __key_link_begin(to_keyring, &key->index_key, &to_edit);
1576 	if (ret < 0)
1577 		goto error;
1578 
1579 	ret = -EEXIST;
1580 	if (to_edit->dead_leaf && (flags & KEYCTL_MOVE_EXCL))
1581 		goto error;
1582 
1583 	ret = __key_link_check_restriction(to_keyring, key);
1584 	if (ret < 0)
1585 		goto error;
1586 	ret = __key_link_check_live_key(to_keyring, key);
1587 	if (ret < 0)
1588 		goto error;
1589 
1590 	__key_unlink(from_keyring, key, &from_edit);
1591 	__key_link(key, &to_edit);
1592 error:
1593 	__key_link_end(to_keyring, &key->index_key, to_edit);
1594 	__key_unlink_end(from_keyring, key, from_edit);
1595 out:
1596 	kleave(" = %d", ret);
1597 	return ret;
1598 }
1599 EXPORT_SYMBOL(key_move);
1600 
1601 /**
1602  * keyring_clear - Clear a keyring
1603  * @keyring: The keyring to clear.
1604  *
1605  * Clear the contents of the specified keyring.
1606  *
1607  * Returns 0 if successful or -ENOTDIR if the keyring isn't a keyring.
1608  */
1609 int keyring_clear(struct key *keyring)
1610 {
1611 	struct assoc_array_edit *edit;
1612 	int ret;
1613 
1614 	if (keyring->type != &key_type_keyring)
1615 		return -ENOTDIR;
1616 
1617 	down_write(&keyring->sem);
1618 
1619 	edit = assoc_array_clear(&keyring->keys, &keyring_assoc_array_ops);
1620 	if (IS_ERR(edit)) {
1621 		ret = PTR_ERR(edit);
1622 	} else {
1623 		if (edit)
1624 			assoc_array_apply_edit(edit);
1625 		key_payload_reserve(keyring, 0);
1626 		ret = 0;
1627 	}
1628 
1629 	up_write(&keyring->sem);
1630 	return ret;
1631 }
1632 EXPORT_SYMBOL(keyring_clear);
1633 
1634 /*
1635  * Dispose of the links from a revoked keyring.
1636  *
1637  * This is called with the key sem write-locked.
1638  */
1639 static void keyring_revoke(struct key *keyring)
1640 {
1641 	struct assoc_array_edit *edit;
1642 
1643 	edit = assoc_array_clear(&keyring->keys, &keyring_assoc_array_ops);
1644 	if (!IS_ERR(edit)) {
1645 		if (edit)
1646 			assoc_array_apply_edit(edit);
1647 		key_payload_reserve(keyring, 0);
1648 	}
1649 }
1650 
1651 static bool keyring_gc_select_iterator(void *object, void *iterator_data)
1652 {
1653 	struct key *key = keyring_ptr_to_key(object);
1654 	time64_t *limit = iterator_data;
1655 
1656 	if (key_is_dead(key, *limit))
1657 		return false;
1658 	key_get(key);
1659 	return true;
1660 }
1661 
1662 static int keyring_gc_check_iterator(const void *object, void *iterator_data)
1663 {
1664 	const struct key *key = keyring_ptr_to_key(object);
1665 	time64_t *limit = iterator_data;
1666 
1667 	key_check(key);
1668 	return key_is_dead(key, *limit);
1669 }
1670 
1671 /*
1672  * Garbage collect pointers from a keyring.
1673  *
1674  * Not called with any locks held.  The keyring's key struct will not be
1675  * deallocated under us as only our caller may deallocate it.
1676  */
1677 void keyring_gc(struct key *keyring, time64_t limit)
1678 {
1679 	int result;
1680 
1681 	kenter("%x{%s}", keyring->serial, keyring->description ?: "");
1682 
1683 	if (keyring->flags & ((1 << KEY_FLAG_INVALIDATED) |
1684 			      (1 << KEY_FLAG_REVOKED)))
1685 		goto dont_gc;
1686 
1687 	/* scan the keyring looking for dead keys */
1688 	rcu_read_lock();
1689 	result = assoc_array_iterate(&keyring->keys,
1690 				     keyring_gc_check_iterator, &limit);
1691 	rcu_read_unlock();
1692 	if (result == true)
1693 		goto do_gc;
1694 
1695 dont_gc:
1696 	kleave(" [no gc]");
1697 	return;
1698 
1699 do_gc:
1700 	down_write(&keyring->sem);
1701 	assoc_array_gc(&keyring->keys, &keyring_assoc_array_ops,
1702 		       keyring_gc_select_iterator, &limit);
1703 	up_write(&keyring->sem);
1704 	kleave(" [gc]");
1705 }
1706 
1707 /*
1708  * Garbage collect restriction pointers from a keyring.
1709  *
1710  * Keyring restrictions are associated with a key type, and must be cleaned
1711  * up if the key type is unregistered. The restriction is altered to always
1712  * reject additional keys so a keyring cannot be opened up by unregistering
1713  * a key type.
1714  *
1715  * Not called with any keyring locks held. The keyring's key struct will not
1716  * be deallocated under us as only our caller may deallocate it.
1717  *
1718  * The caller is required to hold key_types_sem and dead_type->sem. This is
1719  * fulfilled by key_gc_keytype() holding the locks on behalf of
1720  * key_garbage_collector(), which it invokes on a workqueue.
1721  */
1722 void keyring_restriction_gc(struct key *keyring, struct key_type *dead_type)
1723 {
1724 	struct key_restriction *keyres;
1725 
1726 	kenter("%x{%s}", keyring->serial, keyring->description ?: "");
1727 
1728 	/*
1729 	 * keyring->restrict_link is only assigned at key allocation time
1730 	 * or with the key type locked, so the only values that could be
1731 	 * concurrently assigned to keyring->restrict_link are for key
1732 	 * types other than dead_type. Given this, it's ok to check
1733 	 * the key type before acquiring keyring->sem.
1734 	 */
1735 	if (!dead_type || !keyring->restrict_link ||
1736 	    keyring->restrict_link->keytype != dead_type) {
1737 		kleave(" [no restriction gc]");
1738 		return;
1739 	}
1740 
1741 	/* Lock the keyring to ensure that a link is not in progress */
1742 	down_write(&keyring->sem);
1743 
1744 	keyres = keyring->restrict_link;
1745 
1746 	keyres->check = restrict_link_reject;
1747 
1748 	key_put(keyres->key);
1749 	keyres->key = NULL;
1750 	keyres->keytype = NULL;
1751 
1752 	up_write(&keyring->sem);
1753 
1754 	kleave(" [restriction gc]");
1755 }
1756