1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2016 Intel Corporation 4 * 5 * Authors: 6 * Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com> 7 * 8 * Maintained by: <tpmdd-devel@lists.sourceforge.net> 9 * 10 * This file contains TPM2 protocol implementations of the commands 11 * used by the kernel internally. 12 */ 13 14 #include <linux/gfp.h> 15 #include <asm/unaligned.h> 16 #include "tpm.h" 17 18 enum tpm2_handle_types { 19 TPM2_HT_HMAC_SESSION = 0x02000000, 20 TPM2_HT_POLICY_SESSION = 0x03000000, 21 TPM2_HT_TRANSIENT = 0x80000000, 22 }; 23 24 struct tpm2_context { 25 __be64 sequence; 26 __be32 saved_handle; 27 __be32 hierarchy; 28 __be16 blob_size; 29 } __packed; 30 31 static void tpm2_flush_sessions(struct tpm_chip *chip, struct tpm_space *space) 32 { 33 int i; 34 35 for (i = 0; i < ARRAY_SIZE(space->session_tbl); i++) { 36 if (space->session_tbl[i]) 37 tpm2_flush_context(chip, space->session_tbl[i]); 38 } 39 } 40 41 int tpm2_init_space(struct tpm_space *space, unsigned int buf_size) 42 { 43 space->context_buf = kzalloc(buf_size, GFP_KERNEL); 44 if (!space->context_buf) 45 return -ENOMEM; 46 47 space->session_buf = kzalloc(buf_size, GFP_KERNEL); 48 if (space->session_buf == NULL) { 49 kfree(space->context_buf); 50 /* Prevent caller getting a dangling pointer. */ 51 space->context_buf = NULL; 52 return -ENOMEM; 53 } 54 55 space->buf_size = buf_size; 56 return 0; 57 } 58 59 void tpm2_del_space(struct tpm_chip *chip, struct tpm_space *space) 60 { 61 62 if (tpm_try_get_ops(chip) == 0) { 63 tpm2_flush_sessions(chip, space); 64 tpm_put_ops(chip); 65 } 66 67 kfree(space->context_buf); 68 kfree(space->session_buf); 69 } 70 71 int tpm2_load_context(struct tpm_chip *chip, u8 *buf, 72 unsigned int *offset, u32 *handle) 73 { 74 struct tpm_buf tbuf; 75 struct tpm2_context *ctx; 76 unsigned int body_size; 77 int rc; 78 79 rc = tpm_buf_init(&tbuf, TPM2_ST_NO_SESSIONS, TPM2_CC_CONTEXT_LOAD); 80 if (rc) 81 return rc; 82 83 ctx = (struct tpm2_context *)&buf[*offset]; 84 body_size = sizeof(*ctx) + be16_to_cpu(ctx->blob_size); 85 tpm_buf_append(&tbuf, &buf[*offset], body_size); 86 87 rc = tpm_transmit_cmd(chip, &tbuf, 4, NULL); 88 if (rc < 0) { 89 dev_warn(&chip->dev, "%s: failed with a system error %d\n", 90 __func__, rc); 91 tpm_buf_destroy(&tbuf); 92 return -EFAULT; 93 } else if (tpm2_rc_value(rc) == TPM2_RC_HANDLE || 94 rc == TPM2_RC_REFERENCE_H0) { 95 /* 96 * TPM_RC_HANDLE means that the session context can't 97 * be loaded because of an internal counter mismatch 98 * that makes the TPM think there might have been a 99 * replay. This might happen if the context was saved 100 * and loaded outside the space. 101 * 102 * TPM_RC_REFERENCE_H0 means the session has been 103 * flushed outside the space 104 */ 105 *handle = 0; 106 tpm_buf_destroy(&tbuf); 107 return -ENOENT; 108 } else if (tpm2_rc_value(rc) == TPM2_RC_INTEGRITY) { 109 tpm_buf_destroy(&tbuf); 110 return -EINVAL; 111 } else if (rc > 0) { 112 dev_warn(&chip->dev, "%s: failed with a TPM error 0x%04X\n", 113 __func__, rc); 114 tpm_buf_destroy(&tbuf); 115 return -EFAULT; 116 } 117 118 *handle = be32_to_cpup((__be32 *)&tbuf.data[TPM_HEADER_SIZE]); 119 *offset += body_size; 120 121 tpm_buf_destroy(&tbuf); 122 return 0; 123 } 124 125 int tpm2_save_context(struct tpm_chip *chip, u32 handle, u8 *buf, 126 unsigned int buf_size, unsigned int *offset) 127 { 128 struct tpm_buf tbuf; 129 unsigned int body_size; 130 int rc; 131 132 rc = tpm_buf_init(&tbuf, TPM2_ST_NO_SESSIONS, TPM2_CC_CONTEXT_SAVE); 133 if (rc) 134 return rc; 135 136 tpm_buf_append_u32(&tbuf, handle); 137 138 rc = tpm_transmit_cmd(chip, &tbuf, 0, NULL); 139 if (rc < 0) { 140 dev_warn(&chip->dev, "%s: failed with a system error %d\n", 141 __func__, rc); 142 tpm_buf_destroy(&tbuf); 143 return -EFAULT; 144 } else if (tpm2_rc_value(rc) == TPM2_RC_REFERENCE_H0) { 145 tpm_buf_destroy(&tbuf); 146 return -ENOENT; 147 } else if (rc) { 148 dev_warn(&chip->dev, "%s: failed with a TPM error 0x%04X\n", 149 __func__, rc); 150 tpm_buf_destroy(&tbuf); 151 return -EFAULT; 152 } 153 154 body_size = tpm_buf_length(&tbuf) - TPM_HEADER_SIZE; 155 if ((*offset + body_size) > buf_size) { 156 dev_warn(&chip->dev, "%s: out of backing storage\n", __func__); 157 tpm_buf_destroy(&tbuf); 158 return -ENOMEM; 159 } 160 161 memcpy(&buf[*offset], &tbuf.data[TPM_HEADER_SIZE], body_size); 162 *offset += body_size; 163 tpm_buf_destroy(&tbuf); 164 return 0; 165 } 166 167 void tpm2_flush_space(struct tpm_chip *chip) 168 { 169 struct tpm_space *space = &chip->work_space; 170 int i; 171 172 for (i = 0; i < ARRAY_SIZE(space->context_tbl); i++) 173 if (space->context_tbl[i] && ~space->context_tbl[i]) 174 tpm2_flush_context(chip, space->context_tbl[i]); 175 176 tpm2_flush_sessions(chip, space); 177 } 178 179 static int tpm2_load_space(struct tpm_chip *chip) 180 { 181 struct tpm_space *space = &chip->work_space; 182 unsigned int offset; 183 int i; 184 int rc; 185 186 for (i = 0, offset = 0; i < ARRAY_SIZE(space->context_tbl); i++) { 187 if (!space->context_tbl[i]) 188 continue; 189 190 /* sanity check, should never happen */ 191 if (~space->context_tbl[i]) { 192 dev_err(&chip->dev, "context table is inconsistent"); 193 return -EFAULT; 194 } 195 196 rc = tpm2_load_context(chip, space->context_buf, &offset, 197 &space->context_tbl[i]); 198 if (rc) 199 return rc; 200 } 201 202 for (i = 0, offset = 0; i < ARRAY_SIZE(space->session_tbl); i++) { 203 u32 handle; 204 205 if (!space->session_tbl[i]) 206 continue; 207 208 rc = tpm2_load_context(chip, space->session_buf, 209 &offset, &handle); 210 if (rc == -ENOENT) { 211 /* load failed, just forget session */ 212 space->session_tbl[i] = 0; 213 } else if (rc) { 214 tpm2_flush_space(chip); 215 return rc; 216 } 217 if (handle != space->session_tbl[i]) { 218 dev_warn(&chip->dev, "session restored to wrong handle\n"); 219 tpm2_flush_space(chip); 220 return -EFAULT; 221 } 222 } 223 224 return 0; 225 } 226 227 static bool tpm2_map_to_phandle(struct tpm_space *space, void *handle) 228 { 229 u32 vhandle = be32_to_cpup((__be32 *)handle); 230 u32 phandle; 231 int i; 232 233 i = 0xFFFFFF - (vhandle & 0xFFFFFF); 234 if (i >= ARRAY_SIZE(space->context_tbl) || !space->context_tbl[i]) 235 return false; 236 237 phandle = space->context_tbl[i]; 238 *((__be32 *)handle) = cpu_to_be32(phandle); 239 return true; 240 } 241 242 static int tpm2_map_command(struct tpm_chip *chip, u32 cc, u8 *cmd) 243 { 244 struct tpm_space *space = &chip->work_space; 245 unsigned int nr_handles; 246 u32 attrs; 247 __be32 *handle; 248 int i; 249 250 i = tpm2_find_cc(chip, cc); 251 if (i < 0) 252 return -EINVAL; 253 254 attrs = chip->cc_attrs_tbl[i]; 255 nr_handles = (attrs >> TPM2_CC_ATTR_CHANDLES) & GENMASK(2, 0); 256 257 handle = (__be32 *)&cmd[TPM_HEADER_SIZE]; 258 for (i = 0; i < nr_handles; i++, handle++) { 259 if ((be32_to_cpu(*handle) & 0xFF000000) == TPM2_HT_TRANSIENT) { 260 if (!tpm2_map_to_phandle(space, handle)) 261 return -EINVAL; 262 } 263 } 264 265 return 0; 266 } 267 268 static int tpm_find_and_validate_cc(struct tpm_chip *chip, 269 struct tpm_space *space, 270 const void *cmd, size_t len) 271 { 272 const struct tpm_header *header = (const void *)cmd; 273 int i; 274 u32 cc; 275 u32 attrs; 276 unsigned int nr_handles; 277 278 if (len < TPM_HEADER_SIZE || !chip->nr_commands) 279 return -EINVAL; 280 281 cc = be32_to_cpu(header->ordinal); 282 283 i = tpm2_find_cc(chip, cc); 284 if (i < 0) { 285 dev_dbg(&chip->dev, "0x%04X is an invalid command\n", 286 cc); 287 return -EOPNOTSUPP; 288 } 289 290 attrs = chip->cc_attrs_tbl[i]; 291 nr_handles = 292 4 * ((attrs >> TPM2_CC_ATTR_CHANDLES) & GENMASK(2, 0)); 293 if (len < TPM_HEADER_SIZE + 4 * nr_handles) 294 goto err_len; 295 296 return cc; 297 err_len: 298 dev_dbg(&chip->dev, "%s: insufficient command length %zu", __func__, 299 len); 300 return -EINVAL; 301 } 302 303 int tpm2_prepare_space(struct tpm_chip *chip, struct tpm_space *space, u8 *cmd, 304 size_t cmdsiz) 305 { 306 int rc; 307 int cc; 308 309 if (!space) 310 return 0; 311 312 cc = tpm_find_and_validate_cc(chip, space, cmd, cmdsiz); 313 if (cc < 0) 314 return cc; 315 316 memcpy(&chip->work_space.context_tbl, &space->context_tbl, 317 sizeof(space->context_tbl)); 318 memcpy(&chip->work_space.session_tbl, &space->session_tbl, 319 sizeof(space->session_tbl)); 320 memcpy(chip->work_space.context_buf, space->context_buf, 321 space->buf_size); 322 memcpy(chip->work_space.session_buf, space->session_buf, 323 space->buf_size); 324 325 rc = tpm2_load_space(chip); 326 if (rc) { 327 tpm2_flush_space(chip); 328 return rc; 329 } 330 331 rc = tpm2_map_command(chip, cc, cmd); 332 if (rc) { 333 tpm2_flush_space(chip); 334 return rc; 335 } 336 337 chip->last_cc = cc; 338 return 0; 339 } 340 341 static bool tpm2_add_session(struct tpm_chip *chip, u32 handle) 342 { 343 struct tpm_space *space = &chip->work_space; 344 int i; 345 346 for (i = 0; i < ARRAY_SIZE(space->session_tbl); i++) 347 if (space->session_tbl[i] == 0) 348 break; 349 350 if (i == ARRAY_SIZE(space->session_tbl)) 351 return false; 352 353 space->session_tbl[i] = handle; 354 return true; 355 } 356 357 static u32 tpm2_map_to_vhandle(struct tpm_space *space, u32 phandle, bool alloc) 358 { 359 int i; 360 361 for (i = 0; i < ARRAY_SIZE(space->context_tbl); i++) { 362 if (alloc) { 363 if (!space->context_tbl[i]) { 364 space->context_tbl[i] = phandle; 365 break; 366 } 367 } else if (space->context_tbl[i] == phandle) 368 break; 369 } 370 371 if (i == ARRAY_SIZE(space->context_tbl)) 372 return 0; 373 374 return TPM2_HT_TRANSIENT | (0xFFFFFF - i); 375 } 376 377 static int tpm2_map_response_header(struct tpm_chip *chip, u32 cc, u8 *rsp, 378 size_t len) 379 { 380 struct tpm_space *space = &chip->work_space; 381 struct tpm_header *header = (struct tpm_header *)rsp; 382 u32 phandle; 383 u32 phandle_type; 384 u32 vhandle; 385 u32 attrs; 386 int i; 387 388 if (be32_to_cpu(header->return_code) != TPM2_RC_SUCCESS) 389 return 0; 390 391 i = tpm2_find_cc(chip, cc); 392 /* sanity check, should never happen */ 393 if (i < 0) 394 return -EFAULT; 395 396 attrs = chip->cc_attrs_tbl[i]; 397 if (!((attrs >> TPM2_CC_ATTR_RHANDLE) & 1)) 398 return 0; 399 400 phandle = be32_to_cpup((__be32 *)&rsp[TPM_HEADER_SIZE]); 401 phandle_type = phandle & 0xFF000000; 402 403 switch (phandle_type) { 404 case TPM2_HT_TRANSIENT: 405 vhandle = tpm2_map_to_vhandle(space, phandle, true); 406 if (!vhandle) 407 goto out_no_slots; 408 409 *(__be32 *)&rsp[TPM_HEADER_SIZE] = cpu_to_be32(vhandle); 410 break; 411 case TPM2_HT_HMAC_SESSION: 412 case TPM2_HT_POLICY_SESSION: 413 if (!tpm2_add_session(chip, phandle)) 414 goto out_no_slots; 415 break; 416 default: 417 dev_err(&chip->dev, "%s: unknown handle 0x%08X\n", 418 __func__, phandle); 419 break; 420 } 421 422 return 0; 423 out_no_slots: 424 tpm2_flush_context(chip, phandle); 425 dev_warn(&chip->dev, "%s: out of slots for 0x%08X\n", __func__, 426 phandle); 427 return -ENOMEM; 428 } 429 430 struct tpm2_cap_handles { 431 u8 more_data; 432 __be32 capability; 433 __be32 count; 434 __be32 handles[]; 435 } __packed; 436 437 static int tpm2_map_response_body(struct tpm_chip *chip, u32 cc, u8 *rsp, 438 size_t len) 439 { 440 struct tpm_space *space = &chip->work_space; 441 struct tpm_header *header = (struct tpm_header *)rsp; 442 struct tpm2_cap_handles *data; 443 u32 phandle; 444 u32 phandle_type; 445 u32 vhandle; 446 int i; 447 int j; 448 449 if (cc != TPM2_CC_GET_CAPABILITY || 450 be32_to_cpu(header->return_code) != TPM2_RC_SUCCESS) { 451 return 0; 452 } 453 454 if (len < TPM_HEADER_SIZE + 9) 455 return -EFAULT; 456 457 data = (void *)&rsp[TPM_HEADER_SIZE]; 458 if (be32_to_cpu(data->capability) != TPM2_CAP_HANDLES) 459 return 0; 460 461 if (be32_to_cpu(data->count) > (UINT_MAX - TPM_HEADER_SIZE - 9) / 4) 462 return -EFAULT; 463 464 if (len != TPM_HEADER_SIZE + 9 + 4 * be32_to_cpu(data->count)) 465 return -EFAULT; 466 467 for (i = 0, j = 0; i < be32_to_cpu(data->count); i++) { 468 phandle = be32_to_cpup((__be32 *)&data->handles[i]); 469 phandle_type = phandle & 0xFF000000; 470 471 switch (phandle_type) { 472 case TPM2_HT_TRANSIENT: 473 vhandle = tpm2_map_to_vhandle(space, phandle, false); 474 if (!vhandle) 475 break; 476 477 data->handles[j] = cpu_to_be32(vhandle); 478 j++; 479 break; 480 481 default: 482 data->handles[j] = cpu_to_be32(phandle); 483 j++; 484 break; 485 } 486 487 } 488 489 header->length = cpu_to_be32(TPM_HEADER_SIZE + 9 + 4 * j); 490 data->count = cpu_to_be32(j); 491 return 0; 492 } 493 494 static int tpm2_save_space(struct tpm_chip *chip) 495 { 496 struct tpm_space *space = &chip->work_space; 497 unsigned int offset; 498 int i; 499 int rc; 500 501 for (i = 0, offset = 0; i < ARRAY_SIZE(space->context_tbl); i++) { 502 if (!(space->context_tbl[i] && ~space->context_tbl[i])) 503 continue; 504 505 rc = tpm2_save_context(chip, space->context_tbl[i], 506 space->context_buf, space->buf_size, 507 &offset); 508 if (rc == -ENOENT) { 509 space->context_tbl[i] = 0; 510 continue; 511 } else if (rc) 512 return rc; 513 514 tpm2_flush_context(chip, space->context_tbl[i]); 515 space->context_tbl[i] = ~0; 516 } 517 518 for (i = 0, offset = 0; i < ARRAY_SIZE(space->session_tbl); i++) { 519 if (!space->session_tbl[i]) 520 continue; 521 522 rc = tpm2_save_context(chip, space->session_tbl[i], 523 space->session_buf, space->buf_size, 524 &offset); 525 if (rc == -ENOENT) { 526 /* handle error saving session, just forget it */ 527 space->session_tbl[i] = 0; 528 } else if (rc < 0) { 529 tpm2_flush_space(chip); 530 return rc; 531 } 532 } 533 534 return 0; 535 } 536 537 int tpm2_commit_space(struct tpm_chip *chip, struct tpm_space *space, 538 void *buf, size_t *bufsiz) 539 { 540 struct tpm_header *header = buf; 541 int rc; 542 543 if (!space) 544 return 0; 545 546 rc = tpm2_map_response_header(chip, chip->last_cc, buf, *bufsiz); 547 if (rc) { 548 tpm2_flush_space(chip); 549 goto out; 550 } 551 552 rc = tpm2_map_response_body(chip, chip->last_cc, buf, *bufsiz); 553 if (rc) { 554 tpm2_flush_space(chip); 555 goto out; 556 } 557 558 rc = tpm2_save_space(chip); 559 if (rc) { 560 tpm2_flush_space(chip); 561 goto out; 562 } 563 564 *bufsiz = be32_to_cpu(header->length); 565 566 memcpy(&space->context_tbl, &chip->work_space.context_tbl, 567 sizeof(space->context_tbl)); 568 memcpy(&space->session_tbl, &chip->work_space.session_tbl, 569 sizeof(space->session_tbl)); 570 memcpy(space->context_buf, chip->work_space.context_buf, 571 space->buf_size); 572 memcpy(space->session_buf, chip->work_space.session_buf, 573 space->buf_size); 574 575 return 0; 576 out: 577 dev_err(&chip->dev, "%s: error %d\n", __func__, rc); 578 return rc; 579 } 580 581 /* 582 * Put the reference to the main device. 583 */ 584 static void tpm_devs_release(struct device *dev) 585 { 586 struct tpm_chip *chip = container_of(dev, struct tpm_chip, devs); 587 588 /* release the master device reference */ 589 put_device(&chip->dev); 590 } 591 592 /* 593 * Remove the device file for exposed TPM spaces and release the device 594 * reference. This may also release the reference to the master device. 595 */ 596 void tpm_devs_remove(struct tpm_chip *chip) 597 { 598 cdev_device_del(&chip->cdevs, &chip->devs); 599 put_device(&chip->devs); 600 } 601 602 /* 603 * Add a device file to expose TPM spaces. Also take a reference to the 604 * main device. 605 */ 606 int tpm_devs_add(struct tpm_chip *chip) 607 { 608 int rc; 609 610 device_initialize(&chip->devs); 611 chip->devs.parent = chip->dev.parent; 612 chip->devs.class = &tpmrm_class; 613 614 /* 615 * Get extra reference on main device to hold on behalf of devs. 616 * This holds the chip structure while cdevs is in use. The 617 * corresponding put is in the tpm_devs_release. 618 */ 619 get_device(&chip->dev); 620 chip->devs.release = tpm_devs_release; 621 chip->devs.devt = MKDEV(MAJOR(tpm_devt), chip->dev_num + TPM_NUM_DEVICES); 622 cdev_init(&chip->cdevs, &tpmrm_fops); 623 chip->cdevs.owner = THIS_MODULE; 624 625 rc = dev_set_name(&chip->devs, "tpmrm%d", chip->dev_num); 626 if (rc) 627 goto err_put_devs; 628 629 rc = cdev_device_add(&chip->cdevs, &chip->devs); 630 if (rc) { 631 dev_err(&chip->devs, 632 "unable to cdev_device_add() %s, major %d, minor %d, err=%d\n", 633 dev_name(&chip->devs), MAJOR(chip->devs.devt), 634 MINOR(chip->devs.devt), rc); 635 goto err_put_devs; 636 } 637 638 return 0; 639 640 err_put_devs: 641 put_device(&chip->devs); 642 643 return rc; 644 } 645