xref: /linux/security/integrity/ima/ima_crypto.c (revision 7a60fe48af206d34571e446d685672f5730a6b90)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2005,2006,2007,2008 IBM Corporation
4  *
5  * Authors:
6  * Mimi Zohar <zohar@us.ibm.com>
7  * Kylene Hall <kjhall@us.ibm.com>
8  *
9  * File: ima_crypto.c
10  *	Calculates md5/sha1 file hash, template hash, boot-aggreate hash
11  */
12 
13 #include <linux/kernel.h>
14 #include <linux/file.h>
15 #include <linux/crypto.h>
16 #include <linux/err.h>
17 #include <linux/slab.h>
18 #include <crypto/hash.h>
19 
20 #include "ima.h"
21 
22 static struct crypto_shash *ima_shash_tfm;
23 
24 int ima_sha1_idx __ro_after_init;
25 int ima_hash_algo_idx __ro_after_init;
26 /*
27  * Additional number of slots reserved, as needed, for SHA1
28  * and IMA default algo.
29  */
30 int ima_extra_slots __ro_after_init;
31 
32 struct ima_algo_desc *ima_algo_array __ro_after_init;
33 
34 static int __init ima_init_ima_crypto(void)
35 {
36 	long rc;
37 
38 	ima_shash_tfm = crypto_alloc_shash(hash_algo_name[ima_hash_algo], 0, 0);
39 	if (IS_ERR(ima_shash_tfm)) {
40 		rc = PTR_ERR(ima_shash_tfm);
41 		pr_err("Can not allocate %s (reason: %ld)\n",
42 		       hash_algo_name[ima_hash_algo], rc);
43 		return rc;
44 	}
45 	pr_info("Allocated hash algorithm: %s\n",
46 		hash_algo_name[ima_hash_algo]);
47 	return 0;
48 }
49 
50 static struct crypto_shash *ima_alloc_tfm(enum hash_algo algo)
51 {
52 	struct crypto_shash *tfm = ima_shash_tfm;
53 	int rc, i;
54 
55 	if (algo < 0 || algo >= HASH_ALGO__LAST)
56 		algo = ima_hash_algo;
57 
58 	if (algo == ima_hash_algo)
59 		return tfm;
60 
61 	for (i = 0; i < NR_BANKS(ima_tpm_chip) + ima_extra_slots; i++)
62 		if (ima_algo_array[i].tfm && ima_algo_array[i].algo == algo)
63 			return ima_algo_array[i].tfm;
64 
65 	tfm = crypto_alloc_shash(hash_algo_name[algo], 0, 0);
66 	if (IS_ERR(tfm)) {
67 		rc = PTR_ERR(tfm);
68 		pr_err("Can not allocate %s (reason: %d)\n",
69 		       hash_algo_name[algo], rc);
70 	}
71 	return tfm;
72 }
73 
74 int __init ima_init_crypto(void)
75 {
76 	unsigned int digest_size;
77 	enum hash_algo algo;
78 	long rc;
79 	int i;
80 
81 	rc = ima_init_ima_crypto();
82 	if (rc)
83 		return rc;
84 
85 	ima_sha1_idx = -1;
86 	ima_hash_algo_idx = -1;
87 
88 	for (i = 0; i < NR_BANKS(ima_tpm_chip); i++) {
89 		algo = ima_tpm_chip->allocated_banks[i].crypto_id;
90 		if (algo == HASH_ALGO_SHA1)
91 			ima_sha1_idx = i;
92 
93 		if (algo == ima_hash_algo)
94 			ima_hash_algo_idx = i;
95 	}
96 
97 	if (ima_sha1_idx < 0) {
98 		ima_sha1_idx = NR_BANKS(ima_tpm_chip) + ima_extra_slots++;
99 		if (ima_hash_algo == HASH_ALGO_SHA1)
100 			ima_hash_algo_idx = ima_sha1_idx;
101 	}
102 
103 	if (ima_hash_algo_idx < 0)
104 		ima_hash_algo_idx = NR_BANKS(ima_tpm_chip) + ima_extra_slots++;
105 
106 	ima_algo_array = kzalloc_objs(*ima_algo_array,
107 				      NR_BANKS(ima_tpm_chip) + ima_extra_slots);
108 	if (!ima_algo_array) {
109 		rc = -ENOMEM;
110 		goto out;
111 	}
112 
113 	for (i = 0; i < NR_BANKS(ima_tpm_chip); i++) {
114 		algo = ima_tpm_chip->allocated_banks[i].crypto_id;
115 		digest_size = ima_tpm_chip->allocated_banks[i].digest_size;
116 		ima_algo_array[i].algo = algo;
117 		ima_algo_array[i].digest_size = digest_size;
118 
119 		/* unknown TPM algorithm */
120 		if (algo == HASH_ALGO__LAST)
121 			continue;
122 
123 		if (algo == ima_hash_algo) {
124 			ima_algo_array[i].tfm = ima_shash_tfm;
125 			continue;
126 		}
127 
128 		ima_algo_array[i].tfm = ima_alloc_tfm(algo);
129 		if (IS_ERR(ima_algo_array[i].tfm)) {
130 			if (algo == HASH_ALGO_SHA1) {
131 				rc = PTR_ERR(ima_algo_array[i].tfm);
132 				ima_algo_array[i].tfm = NULL;
133 				goto out_array;
134 			}
135 
136 			ima_algo_array[i].tfm = NULL;
137 		}
138 	}
139 
140 	if (ima_sha1_idx >= NR_BANKS(ima_tpm_chip)) {
141 		if (ima_hash_algo == HASH_ALGO_SHA1) {
142 			ima_algo_array[ima_sha1_idx].tfm = ima_shash_tfm;
143 		} else {
144 			ima_algo_array[ima_sha1_idx].tfm =
145 						ima_alloc_tfm(HASH_ALGO_SHA1);
146 			if (IS_ERR(ima_algo_array[ima_sha1_idx].tfm)) {
147 				rc = PTR_ERR(ima_algo_array[ima_sha1_idx].tfm);
148 				goto out_array;
149 			}
150 		}
151 
152 		ima_algo_array[ima_sha1_idx].algo = HASH_ALGO_SHA1;
153 		ima_algo_array[ima_sha1_idx].digest_size = SHA1_DIGEST_SIZE;
154 	}
155 
156 	if (ima_hash_algo_idx >= NR_BANKS(ima_tpm_chip) &&
157 	    ima_hash_algo_idx != ima_sha1_idx) {
158 		digest_size = hash_digest_size[ima_hash_algo];
159 		ima_algo_array[ima_hash_algo_idx].tfm = ima_shash_tfm;
160 		ima_algo_array[ima_hash_algo_idx].algo = ima_hash_algo;
161 		ima_algo_array[ima_hash_algo_idx].digest_size = digest_size;
162 	}
163 
164 	return 0;
165 out_array:
166 	for (i = 0; i < NR_BANKS(ima_tpm_chip) + ima_extra_slots; i++) {
167 		if (!ima_algo_array[i].tfm ||
168 		    ima_algo_array[i].tfm == ima_shash_tfm)
169 			continue;
170 
171 		crypto_free_shash(ima_algo_array[i].tfm);
172 	}
173 	kfree(ima_algo_array);
174 out:
175 	crypto_free_shash(ima_shash_tfm);
176 	return rc;
177 }
178 
179 static void ima_free_tfm(struct crypto_shash *tfm)
180 {
181 	int i;
182 
183 	if (tfm == ima_shash_tfm)
184 		return;
185 
186 	for (i = 0; i < NR_BANKS(ima_tpm_chip) + ima_extra_slots; i++)
187 		if (ima_algo_array[i].tfm == tfm)
188 			return;
189 
190 	crypto_free_shash(tfm);
191 }
192 
193 static int ima_calc_file_hash_tfm(struct file *file,
194 				  struct ima_digest_data *hash,
195 				  struct crypto_shash *tfm)
196 {
197 	loff_t i_size, offset = 0;
198 	char *rbuf;
199 	int rc;
200 	SHASH_DESC_ON_STACK(shash, tfm);
201 
202 	shash->tfm = tfm;
203 
204 	hash->length = crypto_shash_digestsize(tfm);
205 
206 	rc = crypto_shash_init(shash);
207 	if (rc != 0)
208 		return rc;
209 
210 	i_size = i_size_read(file_inode(file));
211 
212 	if (i_size == 0)
213 		goto out;
214 
215 	rbuf = kzalloc(PAGE_SIZE, GFP_KERNEL);
216 	if (!rbuf)
217 		return -ENOMEM;
218 
219 	while (offset < i_size) {
220 		int rbuf_len;
221 
222 		rbuf_len = integrity_kernel_read(file, offset, rbuf, PAGE_SIZE);
223 		if (rbuf_len < 0) {
224 			rc = rbuf_len;
225 			break;
226 		}
227 		if (rbuf_len == 0) {	/* unexpected EOF */
228 			rc = -EINVAL;
229 			break;
230 		}
231 		offset += rbuf_len;
232 
233 		rc = crypto_shash_update(shash, rbuf, rbuf_len);
234 		if (rc)
235 			break;
236 	}
237 	kfree(rbuf);
238 out:
239 	if (!rc)
240 		rc = crypto_shash_final(shash, hash->digest);
241 	return rc;
242 }
243 
244 /*
245  * ima_calc_file_hash - calculate file hash
246  */
247 int ima_calc_file_hash(struct file *file, struct ima_digest_data *hash)
248 {
249 	int rc;
250 	struct file *f = file;
251 	bool new_file_instance = false;
252 	struct crypto_shash *tfm;
253 
254 	/*
255 	 * For consistency, fail file's opened with the O_DIRECT flag on
256 	 * filesystems mounted with/without DAX option.
257 	 */
258 	if (file->f_flags & O_DIRECT) {
259 		hash->length = hash_digest_size[ima_hash_algo];
260 		hash->algo = ima_hash_algo;
261 		return -EINVAL;
262 	}
263 
264 	/* Open a new file instance in O_RDONLY if we cannot read */
265 	if (!(file->f_mode & FMODE_READ)) {
266 		int flags = file->f_flags & ~(O_WRONLY | O_APPEND |
267 				O_TRUNC | O_CREAT | O_NOCTTY | O_EXCL);
268 		flags |= O_RDONLY;
269 		f = dentry_open(&file->f_path, flags, file->f_cred);
270 		if (IS_ERR(f))
271 			return PTR_ERR(f);
272 
273 		new_file_instance = true;
274 	}
275 
276 	tfm = ima_alloc_tfm(hash->algo);
277 	if (IS_ERR(tfm)) {
278 		rc = PTR_ERR(tfm);
279 	} else {
280 		rc = ima_calc_file_hash_tfm(f, hash, tfm);
281 		ima_free_tfm(tfm);
282 	}
283 	if (new_file_instance)
284 		fput(f);
285 	return rc;
286 }
287 
288 /*
289  * Calculate the hash of template data
290  */
291 static int ima_calc_field_array_hash_tfm(struct ima_field_data *field_data,
292 					 struct ima_template_entry *entry,
293 					 int tfm_idx)
294 {
295 	SHASH_DESC_ON_STACK(shash, ima_algo_array[tfm_idx].tfm);
296 	struct ima_template_desc *td = entry->template_desc;
297 	int num_fields = entry->template_desc->num_fields;
298 	int rc, i;
299 
300 	shash->tfm = ima_algo_array[tfm_idx].tfm;
301 
302 	rc = crypto_shash_init(shash);
303 	if (rc != 0)
304 		return rc;
305 
306 	for (i = 0; i < num_fields; i++) {
307 		u8 buffer[IMA_EVENT_NAME_LEN_MAX + 1] = { 0 };
308 		u8 *data_to_hash = field_data[i].data;
309 		u32 datalen = field_data[i].len;
310 		u32 datalen_to_hash = !ima_canonical_fmt ?
311 				datalen : (__force u32)cpu_to_le32(datalen);
312 
313 		if (strcmp(td->name, IMA_TEMPLATE_IMA_NAME) != 0) {
314 			rc = crypto_shash_update(shash,
315 						(const u8 *) &datalen_to_hash,
316 						sizeof(datalen_to_hash));
317 			if (rc)
318 				break;
319 		} else if (strcmp(td->fields[i]->field_id, "n") == 0) {
320 			memcpy(buffer, data_to_hash, datalen);
321 			data_to_hash = buffer;
322 			datalen = IMA_EVENT_NAME_LEN_MAX + 1;
323 		}
324 		rc = crypto_shash_update(shash, data_to_hash, datalen);
325 		if (rc)
326 			break;
327 	}
328 
329 	if (!rc)
330 		rc = crypto_shash_final(shash, entry->digests[tfm_idx].digest);
331 
332 	return rc;
333 }
334 
335 int ima_calc_field_array_hash(struct ima_field_data *field_data,
336 			      struct ima_template_entry *entry)
337 {
338 	u16 alg_id;
339 	int rc, i;
340 
341 	rc = ima_calc_field_array_hash_tfm(field_data, entry, ima_sha1_idx);
342 	if (rc)
343 		return rc;
344 
345 	entry->digests[ima_sha1_idx].alg_id = TPM_ALG_SHA1;
346 
347 	for (i = 0; i < NR_BANKS(ima_tpm_chip) + ima_extra_slots; i++) {
348 		if (i == ima_sha1_idx)
349 			continue;
350 
351 		if (i < NR_BANKS(ima_tpm_chip)) {
352 			alg_id = ima_tpm_chip->allocated_banks[i].alg_id;
353 			entry->digests[i].alg_id = alg_id;
354 		}
355 
356 		/* for unmapped TPM algorithms digest is still a padded SHA1 */
357 		if (!ima_algo_array[i].tfm) {
358 			memcpy(entry->digests[i].digest,
359 			       entry->digests[ima_sha1_idx].digest,
360 			       TPM_DIGEST_SIZE);
361 			continue;
362 		}
363 
364 		rc = ima_calc_field_array_hash_tfm(field_data, entry, i);
365 		if (rc)
366 			return rc;
367 	}
368 	return rc;
369 }
370 
371 static int calc_buffer_shash_tfm(const void *buf, loff_t size,
372 				struct ima_digest_data *hash,
373 				struct crypto_shash *tfm)
374 {
375 	SHASH_DESC_ON_STACK(shash, tfm);
376 	unsigned int len;
377 	int rc;
378 
379 	shash->tfm = tfm;
380 
381 	hash->length = crypto_shash_digestsize(tfm);
382 
383 	rc = crypto_shash_init(shash);
384 	if (rc != 0)
385 		return rc;
386 
387 	while (size) {
388 		len = size < PAGE_SIZE ? size : PAGE_SIZE;
389 		rc = crypto_shash_update(shash, buf, len);
390 		if (rc)
391 			break;
392 		buf += len;
393 		size -= len;
394 	}
395 
396 	if (!rc)
397 		rc = crypto_shash_final(shash, hash->digest);
398 	return rc;
399 }
400 
401 int ima_calc_buffer_hash(const void *buf, loff_t len,
402 			 struct ima_digest_data *hash)
403 {
404 	struct crypto_shash *tfm;
405 	int rc;
406 
407 	tfm = ima_alloc_tfm(hash->algo);
408 	if (IS_ERR(tfm))
409 		return PTR_ERR(tfm);
410 
411 	rc = calc_buffer_shash_tfm(buf, len, hash, tfm);
412 
413 	ima_free_tfm(tfm);
414 	return rc;
415 }
416 
417 static void ima_pcrread(u32 idx, struct tpm_digest *d)
418 {
419 	if (!ima_tpm_chip)
420 		return;
421 
422 	if (tpm_pcr_read(ima_tpm_chip, idx, d) != 0)
423 		pr_err("Error Communicating to TPM chip\n");
424 }
425 
426 /*
427  * The boot_aggregate is a cumulative hash over TPM registers 0 - 7.  With
428  * TPM 1.2 the boot_aggregate was based on reading the SHA1 PCRs, but with
429  * TPM 2.0 hash agility, TPM chips could support multiple TPM PCR banks,
430  * allowing firmware to configure and enable different banks.
431  *
432  * Knowing which TPM bank is read to calculate the boot_aggregate digest
433  * needs to be conveyed to a verifier.  For this reason, use the same
434  * hash algorithm for reading the TPM PCRs as for calculating the boot
435  * aggregate digest as stored in the measurement list.
436  */
437 static int ima_calc_boot_aggregate_tfm(char *digest, u16 alg_id,
438 				       struct crypto_shash *tfm)
439 {
440 	struct tpm_digest d = { .alg_id = alg_id, .digest = {0} };
441 	int rc;
442 	u32 i;
443 	SHASH_DESC_ON_STACK(shash, tfm);
444 
445 	shash->tfm = tfm;
446 
447 	pr_devel("calculating the boot-aggregate based on TPM bank: %04x\n",
448 		 d.alg_id);
449 
450 	rc = crypto_shash_init(shash);
451 	if (rc != 0)
452 		return rc;
453 
454 	/* cumulative digest over TPM registers 0-7 */
455 	for (i = TPM_PCR0; i < TPM_PCR8; i++) {
456 		ima_pcrread(i, &d);
457 		/* now accumulate with current aggregate */
458 		rc = crypto_shash_update(shash, d.digest,
459 					 crypto_shash_digestsize(tfm));
460 		if (rc != 0)
461 			return rc;
462 	}
463 	/*
464 	 * Extend cumulative digest over TPM registers 8-9, which contain
465 	 * measurement for the kernel command line (reg. 8) and image (reg. 9)
466 	 * in a typical PCR allocation. Registers 8-9 are only included in
467 	 * non-SHA1 boot_aggregate digests to avoid ambiguity.
468 	 */
469 	if (alg_id != TPM_ALG_SHA1) {
470 		for (i = TPM_PCR8; i < TPM_PCR10; i++) {
471 			ima_pcrread(i, &d);
472 			rc = crypto_shash_update(shash, d.digest,
473 						crypto_shash_digestsize(tfm));
474 		}
475 	}
476 	if (!rc)
477 		rc = crypto_shash_final(shash, digest);
478 	return rc;
479 }
480 
481 int ima_calc_boot_aggregate(struct ima_digest_data *hash)
482 {
483 	struct crypto_shash *tfm;
484 	u16 crypto_id, alg_id;
485 	int rc, i, bank_idx = -1;
486 
487 	for (i = 0; i < ima_tpm_chip->nr_allocated_banks; i++) {
488 		crypto_id = ima_tpm_chip->allocated_banks[i].crypto_id;
489 		if (crypto_id == hash->algo) {
490 			bank_idx = i;
491 			break;
492 		}
493 
494 		if (crypto_id == HASH_ALGO_SHA256)
495 			bank_idx = i;
496 
497 		if (bank_idx == -1 && crypto_id == HASH_ALGO_SHA1)
498 			bank_idx = i;
499 	}
500 
501 	if (bank_idx == -1) {
502 		pr_err("No suitable TPM algorithm for boot aggregate\n");
503 		return 0;
504 	}
505 
506 	hash->algo = ima_tpm_chip->allocated_banks[bank_idx].crypto_id;
507 
508 	tfm = ima_alloc_tfm(hash->algo);
509 	if (IS_ERR(tfm))
510 		return PTR_ERR(tfm);
511 
512 	hash->length = crypto_shash_digestsize(tfm);
513 	alg_id = ima_tpm_chip->allocated_banks[bank_idx].alg_id;
514 	rc = ima_calc_boot_aggregate_tfm(hash->digest, alg_id, tfm);
515 
516 	ima_free_tfm(tfm);
517 
518 	return rc;
519 }
520