1 /* 2 * Copyright (C) 2005,2006,2007,2008 IBM Corporation 3 * 4 * Authors: 5 * Mimi Zohar <zohar@us.ibm.com> 6 * Kylene Hall <kjhall@us.ibm.com> 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License as published by 10 * the Free Software Foundation, version 2 of the License. 11 * 12 * File: ima_crypto.c 13 * Calculates md5/sha1 file hash, template hash, boot-aggreate hash 14 */ 15 16 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 17 18 #include <linux/kernel.h> 19 #include <linux/moduleparam.h> 20 #include <linux/ratelimit.h> 21 #include <linux/file.h> 22 #include <linux/crypto.h> 23 #include <linux/scatterlist.h> 24 #include <linux/err.h> 25 #include <linux/slab.h> 26 #include <crypto/hash.h> 27 28 #include "ima.h" 29 30 /* minimum file size for ahash use */ 31 static unsigned long ima_ahash_minsize; 32 module_param_named(ahash_minsize, ima_ahash_minsize, ulong, 0644); 33 MODULE_PARM_DESC(ahash_minsize, "Minimum file size for ahash use"); 34 35 /* default is 0 - 1 page. */ 36 static int ima_maxorder; 37 static unsigned int ima_bufsize = PAGE_SIZE; 38 39 static int param_set_bufsize(const char *val, const struct kernel_param *kp) 40 { 41 unsigned long long size; 42 int order; 43 44 size = memparse(val, NULL); 45 order = get_order(size); 46 if (order >= MAX_ORDER) 47 return -EINVAL; 48 ima_maxorder = order; 49 ima_bufsize = PAGE_SIZE << order; 50 return 0; 51 } 52 53 static const struct kernel_param_ops param_ops_bufsize = { 54 .set = param_set_bufsize, 55 .get = param_get_uint, 56 }; 57 #define param_check_bufsize(name, p) __param_check(name, p, unsigned int) 58 59 module_param_named(ahash_bufsize, ima_bufsize, bufsize, 0644); 60 MODULE_PARM_DESC(ahash_bufsize, "Maximum ahash buffer size"); 61 62 static struct crypto_shash *ima_shash_tfm; 63 static struct crypto_ahash *ima_ahash_tfm; 64 65 int __init ima_init_crypto(void) 66 { 67 long rc; 68 69 ima_shash_tfm = crypto_alloc_shash(hash_algo_name[ima_hash_algo], 0, 0); 70 if (IS_ERR(ima_shash_tfm)) { 71 rc = PTR_ERR(ima_shash_tfm); 72 pr_err("Can not allocate %s (reason: %ld)\n", 73 hash_algo_name[ima_hash_algo], rc); 74 return rc; 75 } 76 pr_info("Allocated hash algorithm: %s\n", 77 hash_algo_name[ima_hash_algo]); 78 return 0; 79 } 80 81 static struct crypto_shash *ima_alloc_tfm(enum hash_algo algo) 82 { 83 struct crypto_shash *tfm = ima_shash_tfm; 84 int rc; 85 86 if (algo < 0 || algo >= HASH_ALGO__LAST) 87 algo = ima_hash_algo; 88 89 if (algo != ima_hash_algo) { 90 tfm = crypto_alloc_shash(hash_algo_name[algo], 0, 0); 91 if (IS_ERR(tfm)) { 92 rc = PTR_ERR(tfm); 93 pr_err("Can not allocate %s (reason: %d)\n", 94 hash_algo_name[algo], rc); 95 } 96 } 97 return tfm; 98 } 99 100 static void ima_free_tfm(struct crypto_shash *tfm) 101 { 102 if (tfm != ima_shash_tfm) 103 crypto_free_shash(tfm); 104 } 105 106 /** 107 * ima_alloc_pages() - Allocate contiguous pages. 108 * @max_size: Maximum amount of memory to allocate. 109 * @allocated_size: Returned size of actual allocation. 110 * @last_warn: Should the min_size allocation warn or not. 111 * 112 * Tries to do opportunistic allocation for memory first trying to allocate 113 * max_size amount of memory and then splitting that until zero order is 114 * reached. Allocation is tried without generating allocation warnings unless 115 * last_warn is set. Last_warn set affects only last allocation of zero order. 116 * 117 * By default, ima_maxorder is 0 and it is equivalent to kmalloc(GFP_KERNEL) 118 * 119 * Return pointer to allocated memory, or NULL on failure. 120 */ 121 static void *ima_alloc_pages(loff_t max_size, size_t *allocated_size, 122 int last_warn) 123 { 124 void *ptr; 125 int order = ima_maxorder; 126 gfp_t gfp_mask = __GFP_RECLAIM | __GFP_NOWARN | __GFP_NORETRY; 127 128 if (order) 129 order = min(get_order(max_size), order); 130 131 for (; order; order--) { 132 ptr = (void *)__get_free_pages(gfp_mask, order); 133 if (ptr) { 134 *allocated_size = PAGE_SIZE << order; 135 return ptr; 136 } 137 } 138 139 /* order is zero - one page */ 140 141 gfp_mask = GFP_KERNEL; 142 143 if (!last_warn) 144 gfp_mask |= __GFP_NOWARN; 145 146 ptr = (void *)__get_free_pages(gfp_mask, 0); 147 if (ptr) { 148 *allocated_size = PAGE_SIZE; 149 return ptr; 150 } 151 152 *allocated_size = 0; 153 return NULL; 154 } 155 156 /** 157 * ima_free_pages() - Free pages allocated by ima_alloc_pages(). 158 * @ptr: Pointer to allocated pages. 159 * @size: Size of allocated buffer. 160 */ 161 static void ima_free_pages(void *ptr, size_t size) 162 { 163 if (!ptr) 164 return; 165 free_pages((unsigned long)ptr, get_order(size)); 166 } 167 168 static struct crypto_ahash *ima_alloc_atfm(enum hash_algo algo) 169 { 170 struct crypto_ahash *tfm = ima_ahash_tfm; 171 int rc; 172 173 if (algo < 0 || algo >= HASH_ALGO__LAST) 174 algo = ima_hash_algo; 175 176 if (algo != ima_hash_algo || !tfm) { 177 tfm = crypto_alloc_ahash(hash_algo_name[algo], 0, 0); 178 if (!IS_ERR(tfm)) { 179 if (algo == ima_hash_algo) 180 ima_ahash_tfm = tfm; 181 } else { 182 rc = PTR_ERR(tfm); 183 pr_err("Can not allocate %s (reason: %d)\n", 184 hash_algo_name[algo], rc); 185 } 186 } 187 return tfm; 188 } 189 190 static void ima_free_atfm(struct crypto_ahash *tfm) 191 { 192 if (tfm != ima_ahash_tfm) 193 crypto_free_ahash(tfm); 194 } 195 196 static inline int ahash_wait(int err, struct crypto_wait *wait) 197 { 198 199 err = crypto_wait_req(err, wait); 200 201 if (err) 202 pr_crit_ratelimited("ahash calculation failed: err: %d\n", err); 203 204 return err; 205 } 206 207 static int ima_calc_file_hash_atfm(struct file *file, 208 struct ima_digest_data *hash, 209 struct crypto_ahash *tfm) 210 { 211 loff_t i_size, offset; 212 char *rbuf[2] = { NULL, }; 213 int rc, read = 0, rbuf_len, active = 0, ahash_rc = 0; 214 struct ahash_request *req; 215 struct scatterlist sg[1]; 216 struct crypto_wait wait; 217 size_t rbuf_size[2]; 218 219 hash->length = crypto_ahash_digestsize(tfm); 220 221 req = ahash_request_alloc(tfm, GFP_KERNEL); 222 if (!req) 223 return -ENOMEM; 224 225 crypto_init_wait(&wait); 226 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG | 227 CRYPTO_TFM_REQ_MAY_SLEEP, 228 crypto_req_done, &wait); 229 230 rc = ahash_wait(crypto_ahash_init(req), &wait); 231 if (rc) 232 goto out1; 233 234 i_size = i_size_read(file_inode(file)); 235 236 if (i_size == 0) 237 goto out2; 238 239 /* 240 * Try to allocate maximum size of memory. 241 * Fail if even a single page cannot be allocated. 242 */ 243 rbuf[0] = ima_alloc_pages(i_size, &rbuf_size[0], 1); 244 if (!rbuf[0]) { 245 rc = -ENOMEM; 246 goto out1; 247 } 248 249 /* Only allocate one buffer if that is enough. */ 250 if (i_size > rbuf_size[0]) { 251 /* 252 * Try to allocate secondary buffer. If that fails fallback to 253 * using single buffering. Use previous memory allocation size 254 * as baseline for possible allocation size. 255 */ 256 rbuf[1] = ima_alloc_pages(i_size - rbuf_size[0], 257 &rbuf_size[1], 0); 258 } 259 260 if (!(file->f_mode & FMODE_READ)) { 261 file->f_mode |= FMODE_READ; 262 read = 1; 263 } 264 265 for (offset = 0; offset < i_size; offset += rbuf_len) { 266 if (!rbuf[1] && offset) { 267 /* Not using two buffers, and it is not the first 268 * read/request, wait for the completion of the 269 * previous ahash_update() request. 270 */ 271 rc = ahash_wait(ahash_rc, &wait); 272 if (rc) 273 goto out3; 274 } 275 /* read buffer */ 276 rbuf_len = min_t(loff_t, i_size - offset, rbuf_size[active]); 277 rc = integrity_kernel_read(file, offset, rbuf[active], 278 rbuf_len); 279 if (rc != rbuf_len) 280 goto out3; 281 282 if (rbuf[1] && offset) { 283 /* Using two buffers, and it is not the first 284 * read/request, wait for the completion of the 285 * previous ahash_update() request. 286 */ 287 rc = ahash_wait(ahash_rc, &wait); 288 if (rc) 289 goto out3; 290 } 291 292 sg_init_one(&sg[0], rbuf[active], rbuf_len); 293 ahash_request_set_crypt(req, sg, NULL, rbuf_len); 294 295 ahash_rc = crypto_ahash_update(req); 296 297 if (rbuf[1]) 298 active = !active; /* swap buffers, if we use two */ 299 } 300 /* wait for the last update request to complete */ 301 rc = ahash_wait(ahash_rc, &wait); 302 out3: 303 if (read) 304 file->f_mode &= ~FMODE_READ; 305 ima_free_pages(rbuf[0], rbuf_size[0]); 306 ima_free_pages(rbuf[1], rbuf_size[1]); 307 out2: 308 if (!rc) { 309 ahash_request_set_crypt(req, NULL, hash->digest, 0); 310 rc = ahash_wait(crypto_ahash_final(req), &wait); 311 } 312 out1: 313 ahash_request_free(req); 314 return rc; 315 } 316 317 static int ima_calc_file_ahash(struct file *file, struct ima_digest_data *hash) 318 { 319 struct crypto_ahash *tfm; 320 int rc; 321 322 tfm = ima_alloc_atfm(hash->algo); 323 if (IS_ERR(tfm)) 324 return PTR_ERR(tfm); 325 326 rc = ima_calc_file_hash_atfm(file, hash, tfm); 327 328 ima_free_atfm(tfm); 329 330 return rc; 331 } 332 333 static int ima_calc_file_hash_tfm(struct file *file, 334 struct ima_digest_data *hash, 335 struct crypto_shash *tfm) 336 { 337 loff_t i_size, offset = 0; 338 char *rbuf; 339 int rc, read = 0; 340 SHASH_DESC_ON_STACK(shash, tfm); 341 342 shash->tfm = tfm; 343 shash->flags = 0; 344 345 hash->length = crypto_shash_digestsize(tfm); 346 347 rc = crypto_shash_init(shash); 348 if (rc != 0) 349 return rc; 350 351 i_size = i_size_read(file_inode(file)); 352 353 if (i_size == 0) 354 goto out; 355 356 rbuf = kzalloc(PAGE_SIZE, GFP_KERNEL); 357 if (!rbuf) 358 return -ENOMEM; 359 360 if (!(file->f_mode & FMODE_READ)) { 361 file->f_mode |= FMODE_READ; 362 read = 1; 363 } 364 365 while (offset < i_size) { 366 int rbuf_len; 367 368 rbuf_len = integrity_kernel_read(file, offset, rbuf, PAGE_SIZE); 369 if (rbuf_len < 0) { 370 rc = rbuf_len; 371 break; 372 } 373 if (rbuf_len == 0) 374 break; 375 offset += rbuf_len; 376 377 rc = crypto_shash_update(shash, rbuf, rbuf_len); 378 if (rc) 379 break; 380 } 381 if (read) 382 file->f_mode &= ~FMODE_READ; 383 kfree(rbuf); 384 out: 385 if (!rc) 386 rc = crypto_shash_final(shash, hash->digest); 387 return rc; 388 } 389 390 static int ima_calc_file_shash(struct file *file, struct ima_digest_data *hash) 391 { 392 struct crypto_shash *tfm; 393 int rc; 394 395 tfm = ima_alloc_tfm(hash->algo); 396 if (IS_ERR(tfm)) 397 return PTR_ERR(tfm); 398 399 rc = ima_calc_file_hash_tfm(file, hash, tfm); 400 401 ima_free_tfm(tfm); 402 403 return rc; 404 } 405 406 /* 407 * ima_calc_file_hash - calculate file hash 408 * 409 * Asynchronous hash (ahash) allows using HW acceleration for calculating 410 * a hash. ahash performance varies for different data sizes on different 411 * crypto accelerators. shash performance might be better for smaller files. 412 * The 'ima.ahash_minsize' module parameter allows specifying the best 413 * minimum file size for using ahash on the system. 414 * 415 * If the ima.ahash_minsize parameter is not specified, this function uses 416 * shash for the hash calculation. If ahash fails, it falls back to using 417 * shash. 418 */ 419 int ima_calc_file_hash(struct file *file, struct ima_digest_data *hash) 420 { 421 loff_t i_size; 422 int rc; 423 424 /* 425 * For consistency, fail file's opened with the O_DIRECT flag on 426 * filesystems mounted with/without DAX option. 427 */ 428 if (file->f_flags & O_DIRECT) { 429 hash->length = hash_digest_size[ima_hash_algo]; 430 hash->algo = ima_hash_algo; 431 return -EINVAL; 432 } 433 434 i_size = i_size_read(file_inode(file)); 435 436 if (ima_ahash_minsize && i_size >= ima_ahash_minsize) { 437 rc = ima_calc_file_ahash(file, hash); 438 if (!rc) 439 return 0; 440 } 441 442 return ima_calc_file_shash(file, hash); 443 } 444 445 /* 446 * Calculate the hash of template data 447 */ 448 static int ima_calc_field_array_hash_tfm(struct ima_field_data *field_data, 449 struct ima_template_desc *td, 450 int num_fields, 451 struct ima_digest_data *hash, 452 struct crypto_shash *tfm) 453 { 454 SHASH_DESC_ON_STACK(shash, tfm); 455 int rc, i; 456 457 shash->tfm = tfm; 458 shash->flags = 0; 459 460 hash->length = crypto_shash_digestsize(tfm); 461 462 rc = crypto_shash_init(shash); 463 if (rc != 0) 464 return rc; 465 466 for (i = 0; i < num_fields; i++) { 467 u8 buffer[IMA_EVENT_NAME_LEN_MAX + 1] = { 0 }; 468 u8 *data_to_hash = field_data[i].data; 469 u32 datalen = field_data[i].len; 470 u32 datalen_to_hash = 471 !ima_canonical_fmt ? datalen : cpu_to_le32(datalen); 472 473 if (strcmp(td->name, IMA_TEMPLATE_IMA_NAME) != 0) { 474 rc = crypto_shash_update(shash, 475 (const u8 *) &datalen_to_hash, 476 sizeof(datalen_to_hash)); 477 if (rc) 478 break; 479 } else if (strcmp(td->fields[i]->field_id, "n") == 0) { 480 memcpy(buffer, data_to_hash, datalen); 481 data_to_hash = buffer; 482 datalen = IMA_EVENT_NAME_LEN_MAX + 1; 483 } 484 rc = crypto_shash_update(shash, data_to_hash, datalen); 485 if (rc) 486 break; 487 } 488 489 if (!rc) 490 rc = crypto_shash_final(shash, hash->digest); 491 492 return rc; 493 } 494 495 int ima_calc_field_array_hash(struct ima_field_data *field_data, 496 struct ima_template_desc *desc, int num_fields, 497 struct ima_digest_data *hash) 498 { 499 struct crypto_shash *tfm; 500 int rc; 501 502 tfm = ima_alloc_tfm(hash->algo); 503 if (IS_ERR(tfm)) 504 return PTR_ERR(tfm); 505 506 rc = ima_calc_field_array_hash_tfm(field_data, desc, num_fields, 507 hash, tfm); 508 509 ima_free_tfm(tfm); 510 511 return rc; 512 } 513 514 static int calc_buffer_ahash_atfm(const void *buf, loff_t len, 515 struct ima_digest_data *hash, 516 struct crypto_ahash *tfm) 517 { 518 struct ahash_request *req; 519 struct scatterlist sg; 520 struct crypto_wait wait; 521 int rc, ahash_rc = 0; 522 523 hash->length = crypto_ahash_digestsize(tfm); 524 525 req = ahash_request_alloc(tfm, GFP_KERNEL); 526 if (!req) 527 return -ENOMEM; 528 529 crypto_init_wait(&wait); 530 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG | 531 CRYPTO_TFM_REQ_MAY_SLEEP, 532 crypto_req_done, &wait); 533 534 rc = ahash_wait(crypto_ahash_init(req), &wait); 535 if (rc) 536 goto out; 537 538 sg_init_one(&sg, buf, len); 539 ahash_request_set_crypt(req, &sg, NULL, len); 540 541 ahash_rc = crypto_ahash_update(req); 542 543 /* wait for the update request to complete */ 544 rc = ahash_wait(ahash_rc, &wait); 545 if (!rc) { 546 ahash_request_set_crypt(req, NULL, hash->digest, 0); 547 rc = ahash_wait(crypto_ahash_final(req), &wait); 548 } 549 out: 550 ahash_request_free(req); 551 return rc; 552 } 553 554 static int calc_buffer_ahash(const void *buf, loff_t len, 555 struct ima_digest_data *hash) 556 { 557 struct crypto_ahash *tfm; 558 int rc; 559 560 tfm = ima_alloc_atfm(hash->algo); 561 if (IS_ERR(tfm)) 562 return PTR_ERR(tfm); 563 564 rc = calc_buffer_ahash_atfm(buf, len, hash, tfm); 565 566 ima_free_atfm(tfm); 567 568 return rc; 569 } 570 571 static int calc_buffer_shash_tfm(const void *buf, loff_t size, 572 struct ima_digest_data *hash, 573 struct crypto_shash *tfm) 574 { 575 SHASH_DESC_ON_STACK(shash, tfm); 576 unsigned int len; 577 int rc; 578 579 shash->tfm = tfm; 580 shash->flags = 0; 581 582 hash->length = crypto_shash_digestsize(tfm); 583 584 rc = crypto_shash_init(shash); 585 if (rc != 0) 586 return rc; 587 588 while (size) { 589 len = size < PAGE_SIZE ? size : PAGE_SIZE; 590 rc = crypto_shash_update(shash, buf, len); 591 if (rc) 592 break; 593 buf += len; 594 size -= len; 595 } 596 597 if (!rc) 598 rc = crypto_shash_final(shash, hash->digest); 599 return rc; 600 } 601 602 static int calc_buffer_shash(const void *buf, loff_t len, 603 struct ima_digest_data *hash) 604 { 605 struct crypto_shash *tfm; 606 int rc; 607 608 tfm = ima_alloc_tfm(hash->algo); 609 if (IS_ERR(tfm)) 610 return PTR_ERR(tfm); 611 612 rc = calc_buffer_shash_tfm(buf, len, hash, tfm); 613 614 ima_free_tfm(tfm); 615 return rc; 616 } 617 618 int ima_calc_buffer_hash(const void *buf, loff_t len, 619 struct ima_digest_data *hash) 620 { 621 int rc; 622 623 if (ima_ahash_minsize && len >= ima_ahash_minsize) { 624 rc = calc_buffer_ahash(buf, len, hash); 625 if (!rc) 626 return 0; 627 } 628 629 return calc_buffer_shash(buf, len, hash); 630 } 631 632 static void __init ima_pcrread(int idx, u8 *pcr) 633 { 634 if (!ima_used_chip) 635 return; 636 637 if (tpm_pcr_read(NULL, idx, pcr) != 0) 638 pr_err("Error Communicating to TPM chip\n"); 639 } 640 641 /* 642 * Calculate the boot aggregate hash 643 */ 644 static int __init ima_calc_boot_aggregate_tfm(char *digest, 645 struct crypto_shash *tfm) 646 { 647 u8 pcr_i[TPM_DIGEST_SIZE]; 648 int rc, i; 649 SHASH_DESC_ON_STACK(shash, tfm); 650 651 shash->tfm = tfm; 652 shash->flags = 0; 653 654 rc = crypto_shash_init(shash); 655 if (rc != 0) 656 return rc; 657 658 /* cumulative sha1 over tpm registers 0-7 */ 659 for (i = TPM_PCR0; i < TPM_PCR8; i++) { 660 ima_pcrread(i, pcr_i); 661 /* now accumulate with current aggregate */ 662 rc = crypto_shash_update(shash, pcr_i, TPM_DIGEST_SIZE); 663 } 664 if (!rc) 665 crypto_shash_final(shash, digest); 666 return rc; 667 } 668 669 int __init ima_calc_boot_aggregate(struct ima_digest_data *hash) 670 { 671 struct crypto_shash *tfm; 672 int rc; 673 674 tfm = ima_alloc_tfm(hash->algo); 675 if (IS_ERR(tfm)) 676 return PTR_ERR(tfm); 677 678 hash->length = crypto_shash_digestsize(tfm); 679 rc = ima_calc_boot_aggregate_tfm(hash->digest, tfm); 680 681 ima_free_tfm(tfm); 682 683 return rc; 684 } 685