1 /* 2 * Copyright (C) 2005, 2006 IBM Corporation 3 * Copyright (C) 2014, 2015 Intel Corporation 4 * 5 * Authors: 6 * Leendert van Doorn <leendert@watson.ibm.com> 7 * Kylene Hall <kjhall@us.ibm.com> 8 * 9 * Maintained by: <tpmdd-devel@lists.sourceforge.net> 10 * 11 * Device driver for TCG/TCPA TPM (trusted platform module). 12 * Specifications at www.trustedcomputinggroup.org 13 * 14 * This device driver implements the TPM interface as defined in 15 * the TCG TPM Interface Spec version 1.2, revision 1.0. 16 * 17 * This program is free software; you can redistribute it and/or 18 * modify it under the terms of the GNU General Public License as 19 * published by the Free Software Foundation, version 2 of the 20 * License. 21 */ 22 #include <linux/init.h> 23 #include <linux/module.h> 24 #include <linux/moduleparam.h> 25 #include <linux/pnp.h> 26 #include <linux/slab.h> 27 #include <linux/interrupt.h> 28 #include <linux/wait.h> 29 #include <linux/acpi.h> 30 #include <linux/freezer.h> 31 #include "tpm.h" 32 33 enum tis_access { 34 TPM_ACCESS_VALID = 0x80, 35 TPM_ACCESS_ACTIVE_LOCALITY = 0x20, 36 TPM_ACCESS_REQUEST_PENDING = 0x04, 37 TPM_ACCESS_REQUEST_USE = 0x02, 38 }; 39 40 enum tis_status { 41 TPM_STS_VALID = 0x80, 42 TPM_STS_COMMAND_READY = 0x40, 43 TPM_STS_GO = 0x20, 44 TPM_STS_DATA_AVAIL = 0x10, 45 TPM_STS_DATA_EXPECT = 0x08, 46 }; 47 48 enum tis_int_flags { 49 TPM_GLOBAL_INT_ENABLE = 0x80000000, 50 TPM_INTF_BURST_COUNT_STATIC = 0x100, 51 TPM_INTF_CMD_READY_INT = 0x080, 52 TPM_INTF_INT_EDGE_FALLING = 0x040, 53 TPM_INTF_INT_EDGE_RISING = 0x020, 54 TPM_INTF_INT_LEVEL_LOW = 0x010, 55 TPM_INTF_INT_LEVEL_HIGH = 0x008, 56 TPM_INTF_LOCALITY_CHANGE_INT = 0x004, 57 TPM_INTF_STS_VALID_INT = 0x002, 58 TPM_INTF_DATA_AVAIL_INT = 0x001, 59 }; 60 61 enum tis_defaults { 62 TIS_MEM_LEN = 0x5000, 63 TIS_SHORT_TIMEOUT = 750, /* ms */ 64 TIS_LONG_TIMEOUT = 2000, /* 2 sec */ 65 }; 66 67 struct tpm_info { 68 struct resource res; 69 /* irq > 0 means: use irq $irq; 70 * irq = 0 means: autoprobe for an irq; 71 * irq = -1 means: no irq support 72 */ 73 int irq; 74 }; 75 76 /* Some timeout values are needed before it is known whether the chip is 77 * TPM 1.0 or TPM 2.0. 78 */ 79 #define TIS_TIMEOUT_A_MAX max(TIS_SHORT_TIMEOUT, TPM2_TIMEOUT_A) 80 #define TIS_TIMEOUT_B_MAX max(TIS_LONG_TIMEOUT, TPM2_TIMEOUT_B) 81 #define TIS_TIMEOUT_C_MAX max(TIS_SHORT_TIMEOUT, TPM2_TIMEOUT_C) 82 #define TIS_TIMEOUT_D_MAX max(TIS_SHORT_TIMEOUT, TPM2_TIMEOUT_D) 83 84 #define TPM_ACCESS(l) (0x0000 | ((l) << 12)) 85 #define TPM_INT_ENABLE(l) (0x0008 | ((l) << 12)) 86 #define TPM_INT_VECTOR(l) (0x000C | ((l) << 12)) 87 #define TPM_INT_STATUS(l) (0x0010 | ((l) << 12)) 88 #define TPM_INTF_CAPS(l) (0x0014 | ((l) << 12)) 89 #define TPM_STS(l) (0x0018 | ((l) << 12)) 90 #define TPM_STS3(l) (0x001b | ((l) << 12)) 91 #define TPM_DATA_FIFO(l) (0x0024 | ((l) << 12)) 92 93 #define TPM_DID_VID(l) (0x0F00 | ((l) << 12)) 94 #define TPM_RID(l) (0x0F04 | ((l) << 12)) 95 96 struct priv_data { 97 bool irq_tested; 98 }; 99 100 #if defined(CONFIG_PNP) && defined(CONFIG_ACPI) 101 static int has_hid(struct acpi_device *dev, const char *hid) 102 { 103 struct acpi_hardware_id *id; 104 105 list_for_each_entry(id, &dev->pnp.ids, list) 106 if (!strcmp(hid, id->id)) 107 return 1; 108 109 return 0; 110 } 111 112 static inline int is_itpm(struct acpi_device *dev) 113 { 114 return has_hid(dev, "INTC0102"); 115 } 116 #else 117 static inline int is_itpm(struct acpi_device *dev) 118 { 119 return 0; 120 } 121 #endif 122 123 /* Before we attempt to access the TPM we must see that the valid bit is set. 124 * The specification says that this bit is 0 at reset and remains 0 until the 125 * 'TPM has gone through its self test and initialization and has established 126 * correct values in the other bits.' */ 127 static int wait_startup(struct tpm_chip *chip, int l) 128 { 129 unsigned long stop = jiffies + chip->vendor.timeout_a; 130 do { 131 if (ioread8(chip->vendor.iobase + TPM_ACCESS(l)) & 132 TPM_ACCESS_VALID) 133 return 0; 134 msleep(TPM_TIMEOUT); 135 } while (time_before(jiffies, stop)); 136 return -1; 137 } 138 139 static int check_locality(struct tpm_chip *chip, int l) 140 { 141 if ((ioread8(chip->vendor.iobase + TPM_ACCESS(l)) & 142 (TPM_ACCESS_ACTIVE_LOCALITY | TPM_ACCESS_VALID)) == 143 (TPM_ACCESS_ACTIVE_LOCALITY | TPM_ACCESS_VALID)) 144 return chip->vendor.locality = l; 145 146 return -1; 147 } 148 149 static void release_locality(struct tpm_chip *chip, int l, int force) 150 { 151 if (force || (ioread8(chip->vendor.iobase + TPM_ACCESS(l)) & 152 (TPM_ACCESS_REQUEST_PENDING | TPM_ACCESS_VALID)) == 153 (TPM_ACCESS_REQUEST_PENDING | TPM_ACCESS_VALID)) 154 iowrite8(TPM_ACCESS_ACTIVE_LOCALITY, 155 chip->vendor.iobase + TPM_ACCESS(l)); 156 } 157 158 static int request_locality(struct tpm_chip *chip, int l) 159 { 160 unsigned long stop, timeout; 161 long rc; 162 163 if (check_locality(chip, l) >= 0) 164 return l; 165 166 iowrite8(TPM_ACCESS_REQUEST_USE, 167 chip->vendor.iobase + TPM_ACCESS(l)); 168 169 stop = jiffies + chip->vendor.timeout_a; 170 171 if (chip->vendor.irq) { 172 again: 173 timeout = stop - jiffies; 174 if ((long)timeout <= 0) 175 return -1; 176 rc = wait_event_interruptible_timeout(chip->vendor.int_queue, 177 (check_locality 178 (chip, l) >= 0), 179 timeout); 180 if (rc > 0) 181 return l; 182 if (rc == -ERESTARTSYS && freezing(current)) { 183 clear_thread_flag(TIF_SIGPENDING); 184 goto again; 185 } 186 } else { 187 /* wait for burstcount */ 188 do { 189 if (check_locality(chip, l) >= 0) 190 return l; 191 msleep(TPM_TIMEOUT); 192 } 193 while (time_before(jiffies, stop)); 194 } 195 return -1; 196 } 197 198 static u8 tpm_tis_status(struct tpm_chip *chip) 199 { 200 return ioread8(chip->vendor.iobase + 201 TPM_STS(chip->vendor.locality)); 202 } 203 204 static void tpm_tis_ready(struct tpm_chip *chip) 205 { 206 /* this causes the current command to be aborted */ 207 iowrite8(TPM_STS_COMMAND_READY, 208 chip->vendor.iobase + TPM_STS(chip->vendor.locality)); 209 } 210 211 static int get_burstcount(struct tpm_chip *chip) 212 { 213 unsigned long stop; 214 int burstcnt; 215 216 /* wait for burstcount */ 217 /* which timeout value, spec has 2 answers (c & d) */ 218 stop = jiffies + chip->vendor.timeout_d; 219 do { 220 burstcnt = ioread8(chip->vendor.iobase + 221 TPM_STS(chip->vendor.locality) + 1); 222 burstcnt += ioread8(chip->vendor.iobase + 223 TPM_STS(chip->vendor.locality) + 224 2) << 8; 225 if (burstcnt) 226 return burstcnt; 227 msleep(TPM_TIMEOUT); 228 } while (time_before(jiffies, stop)); 229 return -EBUSY; 230 } 231 232 static int recv_data(struct tpm_chip *chip, u8 *buf, size_t count) 233 { 234 int size = 0, burstcnt; 235 while (size < count && 236 wait_for_tpm_stat(chip, 237 TPM_STS_DATA_AVAIL | TPM_STS_VALID, 238 chip->vendor.timeout_c, 239 &chip->vendor.read_queue, true) 240 == 0) { 241 burstcnt = get_burstcount(chip); 242 for (; burstcnt > 0 && size < count; burstcnt--) 243 buf[size++] = ioread8(chip->vendor.iobase + 244 TPM_DATA_FIFO(chip->vendor. 245 locality)); 246 } 247 return size; 248 } 249 250 static int tpm_tis_recv(struct tpm_chip *chip, u8 *buf, size_t count) 251 { 252 int size = 0; 253 int expected, status; 254 255 if (count < TPM_HEADER_SIZE) { 256 size = -EIO; 257 goto out; 258 } 259 260 /* read first 10 bytes, including tag, paramsize, and result */ 261 if ((size = 262 recv_data(chip, buf, TPM_HEADER_SIZE)) < TPM_HEADER_SIZE) { 263 dev_err(chip->pdev, "Unable to read header\n"); 264 goto out; 265 } 266 267 expected = be32_to_cpu(*(__be32 *) (buf + 2)); 268 if (expected > count) { 269 size = -EIO; 270 goto out; 271 } 272 273 if ((size += 274 recv_data(chip, &buf[TPM_HEADER_SIZE], 275 expected - TPM_HEADER_SIZE)) < expected) { 276 dev_err(chip->pdev, "Unable to read remainder of result\n"); 277 size = -ETIME; 278 goto out; 279 } 280 281 wait_for_tpm_stat(chip, TPM_STS_VALID, chip->vendor.timeout_c, 282 &chip->vendor.int_queue, false); 283 status = tpm_tis_status(chip); 284 if (status & TPM_STS_DATA_AVAIL) { /* retry? */ 285 dev_err(chip->pdev, "Error left over data\n"); 286 size = -EIO; 287 goto out; 288 } 289 290 out: 291 tpm_tis_ready(chip); 292 release_locality(chip, chip->vendor.locality, 0); 293 return size; 294 } 295 296 static bool itpm; 297 module_param(itpm, bool, 0444); 298 MODULE_PARM_DESC(itpm, "Force iTPM workarounds (found on some Lenovo laptops)"); 299 300 /* 301 * If interrupts are used (signaled by an irq set in the vendor structure) 302 * tpm.c can skip polling for the data to be available as the interrupt is 303 * waited for here 304 */ 305 static int tpm_tis_send_data(struct tpm_chip *chip, u8 *buf, size_t len) 306 { 307 int rc, status, burstcnt; 308 size_t count = 0; 309 310 if (request_locality(chip, 0) < 0) 311 return -EBUSY; 312 313 status = tpm_tis_status(chip); 314 if ((status & TPM_STS_COMMAND_READY) == 0) { 315 tpm_tis_ready(chip); 316 if (wait_for_tpm_stat 317 (chip, TPM_STS_COMMAND_READY, chip->vendor.timeout_b, 318 &chip->vendor.int_queue, false) < 0) { 319 rc = -ETIME; 320 goto out_err; 321 } 322 } 323 324 while (count < len - 1) { 325 burstcnt = get_burstcount(chip); 326 for (; burstcnt > 0 && count < len - 1; burstcnt--) { 327 iowrite8(buf[count], chip->vendor.iobase + 328 TPM_DATA_FIFO(chip->vendor.locality)); 329 count++; 330 } 331 332 wait_for_tpm_stat(chip, TPM_STS_VALID, chip->vendor.timeout_c, 333 &chip->vendor.int_queue, false); 334 status = tpm_tis_status(chip); 335 if (!itpm && (status & TPM_STS_DATA_EXPECT) == 0) { 336 rc = -EIO; 337 goto out_err; 338 } 339 } 340 341 /* write last byte */ 342 iowrite8(buf[count], 343 chip->vendor.iobase + TPM_DATA_FIFO(chip->vendor.locality)); 344 wait_for_tpm_stat(chip, TPM_STS_VALID, chip->vendor.timeout_c, 345 &chip->vendor.int_queue, false); 346 status = tpm_tis_status(chip); 347 if ((status & TPM_STS_DATA_EXPECT) != 0) { 348 rc = -EIO; 349 goto out_err; 350 } 351 352 return 0; 353 354 out_err: 355 tpm_tis_ready(chip); 356 release_locality(chip, chip->vendor.locality, 0); 357 return rc; 358 } 359 360 static void disable_interrupts(struct tpm_chip *chip) 361 { 362 u32 intmask; 363 364 intmask = 365 ioread32(chip->vendor.iobase + 366 TPM_INT_ENABLE(chip->vendor.locality)); 367 intmask &= ~TPM_GLOBAL_INT_ENABLE; 368 iowrite32(intmask, 369 chip->vendor.iobase + 370 TPM_INT_ENABLE(chip->vendor.locality)); 371 devm_free_irq(chip->pdev, chip->vendor.irq, chip); 372 chip->vendor.irq = 0; 373 } 374 375 /* 376 * If interrupts are used (signaled by an irq set in the vendor structure) 377 * tpm.c can skip polling for the data to be available as the interrupt is 378 * waited for here 379 */ 380 static int tpm_tis_send_main(struct tpm_chip *chip, u8 *buf, size_t len) 381 { 382 int rc; 383 u32 ordinal; 384 unsigned long dur; 385 386 rc = tpm_tis_send_data(chip, buf, len); 387 if (rc < 0) 388 return rc; 389 390 /* go and do it */ 391 iowrite8(TPM_STS_GO, 392 chip->vendor.iobase + TPM_STS(chip->vendor.locality)); 393 394 if (chip->vendor.irq) { 395 ordinal = be32_to_cpu(*((__be32 *) (buf + 6))); 396 397 if (chip->flags & TPM_CHIP_FLAG_TPM2) 398 dur = tpm2_calc_ordinal_duration(chip, ordinal); 399 else 400 dur = tpm_calc_ordinal_duration(chip, ordinal); 401 402 if (wait_for_tpm_stat 403 (chip, TPM_STS_DATA_AVAIL | TPM_STS_VALID, dur, 404 &chip->vendor.read_queue, false) < 0) { 405 rc = -ETIME; 406 goto out_err; 407 } 408 } 409 return len; 410 out_err: 411 tpm_tis_ready(chip); 412 release_locality(chip, chip->vendor.locality, 0); 413 return rc; 414 } 415 416 static int tpm_tis_send(struct tpm_chip *chip, u8 *buf, size_t len) 417 { 418 int rc, irq; 419 struct priv_data *priv = chip->vendor.priv; 420 421 if (!chip->vendor.irq || priv->irq_tested) 422 return tpm_tis_send_main(chip, buf, len); 423 424 /* Verify receipt of the expected IRQ */ 425 irq = chip->vendor.irq; 426 chip->vendor.irq = 0; 427 rc = tpm_tis_send_main(chip, buf, len); 428 chip->vendor.irq = irq; 429 if (!priv->irq_tested) 430 msleep(1); 431 if (!priv->irq_tested) 432 disable_interrupts(chip); 433 priv->irq_tested = true; 434 return rc; 435 } 436 437 struct tis_vendor_timeout_override { 438 u32 did_vid; 439 unsigned long timeout_us[4]; 440 }; 441 442 static const struct tis_vendor_timeout_override vendor_timeout_overrides[] = { 443 /* Atmel 3204 */ 444 { 0x32041114, { (TIS_SHORT_TIMEOUT*1000), (TIS_LONG_TIMEOUT*1000), 445 (TIS_SHORT_TIMEOUT*1000), (TIS_SHORT_TIMEOUT*1000) } }, 446 }; 447 448 static bool tpm_tis_update_timeouts(struct tpm_chip *chip, 449 unsigned long *timeout_cap) 450 { 451 int i; 452 u32 did_vid; 453 454 did_vid = ioread32(chip->vendor.iobase + TPM_DID_VID(0)); 455 456 for (i = 0; i != ARRAY_SIZE(vendor_timeout_overrides); i++) { 457 if (vendor_timeout_overrides[i].did_vid != did_vid) 458 continue; 459 memcpy(timeout_cap, vendor_timeout_overrides[i].timeout_us, 460 sizeof(vendor_timeout_overrides[i].timeout_us)); 461 return true; 462 } 463 464 return false; 465 } 466 467 /* 468 * Early probing for iTPM with STS_DATA_EXPECT flaw. 469 * Try sending command without itpm flag set and if that 470 * fails, repeat with itpm flag set. 471 */ 472 static int probe_itpm(struct tpm_chip *chip) 473 { 474 int rc = 0; 475 u8 cmd_getticks[] = { 476 0x00, 0xc1, 0x00, 0x00, 0x00, 0x0a, 477 0x00, 0x00, 0x00, 0xf1 478 }; 479 size_t len = sizeof(cmd_getticks); 480 bool rem_itpm = itpm; 481 u16 vendor = ioread16(chip->vendor.iobase + TPM_DID_VID(0)); 482 483 /* probe only iTPMS */ 484 if (vendor != TPM_VID_INTEL) 485 return 0; 486 487 itpm = false; 488 489 rc = tpm_tis_send_data(chip, cmd_getticks, len); 490 if (rc == 0) 491 goto out; 492 493 tpm_tis_ready(chip); 494 release_locality(chip, chip->vendor.locality, 0); 495 496 itpm = true; 497 498 rc = tpm_tis_send_data(chip, cmd_getticks, len); 499 if (rc == 0) { 500 dev_info(chip->pdev, "Detected an iTPM.\n"); 501 rc = 1; 502 } else 503 rc = -EFAULT; 504 505 out: 506 itpm = rem_itpm; 507 tpm_tis_ready(chip); 508 release_locality(chip, chip->vendor.locality, 0); 509 510 return rc; 511 } 512 513 static bool tpm_tis_req_canceled(struct tpm_chip *chip, u8 status) 514 { 515 switch (chip->vendor.manufacturer_id) { 516 case TPM_VID_WINBOND: 517 return ((status == TPM_STS_VALID) || 518 (status == (TPM_STS_VALID | TPM_STS_COMMAND_READY))); 519 case TPM_VID_STM: 520 return (status == (TPM_STS_VALID | TPM_STS_COMMAND_READY)); 521 default: 522 return (status == TPM_STS_COMMAND_READY); 523 } 524 } 525 526 static const struct tpm_class_ops tpm_tis = { 527 .status = tpm_tis_status, 528 .recv = tpm_tis_recv, 529 .send = tpm_tis_send, 530 .cancel = tpm_tis_ready, 531 .update_timeouts = tpm_tis_update_timeouts, 532 .req_complete_mask = TPM_STS_DATA_AVAIL | TPM_STS_VALID, 533 .req_complete_val = TPM_STS_DATA_AVAIL | TPM_STS_VALID, 534 .req_canceled = tpm_tis_req_canceled, 535 }; 536 537 static irqreturn_t tis_int_handler(int dummy, void *dev_id) 538 { 539 struct tpm_chip *chip = dev_id; 540 u32 interrupt; 541 int i; 542 543 interrupt = ioread32(chip->vendor.iobase + 544 TPM_INT_STATUS(chip->vendor.locality)); 545 546 if (interrupt == 0) 547 return IRQ_NONE; 548 549 ((struct priv_data *)chip->vendor.priv)->irq_tested = true; 550 if (interrupt & TPM_INTF_DATA_AVAIL_INT) 551 wake_up_interruptible(&chip->vendor.read_queue); 552 if (interrupt & TPM_INTF_LOCALITY_CHANGE_INT) 553 for (i = 0; i < 5; i++) 554 if (check_locality(chip, i) >= 0) 555 break; 556 if (interrupt & 557 (TPM_INTF_LOCALITY_CHANGE_INT | TPM_INTF_STS_VALID_INT | 558 TPM_INTF_CMD_READY_INT)) 559 wake_up_interruptible(&chip->vendor.int_queue); 560 561 /* Clear interrupts handled with TPM_EOI */ 562 iowrite32(interrupt, 563 chip->vendor.iobase + 564 TPM_INT_STATUS(chip->vendor.locality)); 565 ioread32(chip->vendor.iobase + TPM_INT_STATUS(chip->vendor.locality)); 566 return IRQ_HANDLED; 567 } 568 569 /* Register the IRQ and issue a command that will cause an interrupt. If an 570 * irq is seen then leave the chip setup for IRQ operation, otherwise reverse 571 * everything and leave in polling mode. Returns 0 on success. 572 */ 573 static int tpm_tis_probe_irq_single(struct tpm_chip *chip, u32 intmask, 574 int flags, int irq) 575 { 576 struct priv_data *priv = chip->vendor.priv; 577 u8 original_int_vec; 578 579 if (devm_request_irq(chip->pdev, irq, tis_int_handler, flags, 580 chip->devname, chip) != 0) { 581 dev_info(chip->pdev, "Unable to request irq: %d for probe\n", 582 irq); 583 return -1; 584 } 585 chip->vendor.irq = irq; 586 587 original_int_vec = ioread8(chip->vendor.iobase + 588 TPM_INT_VECTOR(chip->vendor.locality)); 589 iowrite8(irq, 590 chip->vendor.iobase + TPM_INT_VECTOR(chip->vendor.locality)); 591 592 /* Clear all existing */ 593 iowrite32(ioread32(chip->vendor.iobase + 594 TPM_INT_STATUS(chip->vendor.locality)), 595 chip->vendor.iobase + TPM_INT_STATUS(chip->vendor.locality)); 596 597 /* Turn on */ 598 iowrite32(intmask | TPM_GLOBAL_INT_ENABLE, 599 chip->vendor.iobase + TPM_INT_ENABLE(chip->vendor.locality)); 600 601 priv->irq_tested = false; 602 603 /* Generate an interrupt by having the core call through to 604 * tpm_tis_send 605 */ 606 if (chip->flags & TPM_CHIP_FLAG_TPM2) 607 tpm2_gen_interrupt(chip); 608 else 609 tpm_gen_interrupt(chip); 610 611 /* tpm_tis_send will either confirm the interrupt is working or it 612 * will call disable_irq which undoes all of the above. 613 */ 614 if (!chip->vendor.irq) { 615 iowrite8(original_int_vec, 616 chip->vendor.iobase + 617 TPM_INT_VECTOR(chip->vendor.locality)); 618 return 1; 619 } 620 621 return 0; 622 } 623 624 /* Try to find the IRQ the TPM is using. This is for legacy x86 systems that 625 * do not have ACPI/etc. We typically expect the interrupt to be declared if 626 * present. 627 */ 628 static void tpm_tis_probe_irq(struct tpm_chip *chip, u32 intmask) 629 { 630 u8 original_int_vec; 631 int i; 632 633 original_int_vec = ioread8(chip->vendor.iobase + 634 TPM_INT_VECTOR(chip->vendor.locality)); 635 636 if (!original_int_vec) { 637 if (IS_ENABLED(CONFIG_X86)) 638 for (i = 3; i <= 15; i++) 639 if (!tpm_tis_probe_irq_single(chip, intmask, 0, 640 i)) 641 return; 642 } else if (!tpm_tis_probe_irq_single(chip, intmask, 0, 643 original_int_vec)) 644 return; 645 } 646 647 static bool interrupts = true; 648 module_param(interrupts, bool, 0444); 649 MODULE_PARM_DESC(interrupts, "Enable interrupts"); 650 651 static void tpm_tis_remove(struct tpm_chip *chip) 652 { 653 if (chip->flags & TPM_CHIP_FLAG_TPM2) 654 tpm2_shutdown(chip, TPM2_SU_CLEAR); 655 656 iowrite32(~TPM_GLOBAL_INT_ENABLE & 657 ioread32(chip->vendor.iobase + 658 TPM_INT_ENABLE(chip->vendor. 659 locality)), 660 chip->vendor.iobase + 661 TPM_INT_ENABLE(chip->vendor.locality)); 662 release_locality(chip, chip->vendor.locality, 1); 663 } 664 665 static int tpm_tis_init(struct device *dev, struct tpm_info *tpm_info, 666 acpi_handle acpi_dev_handle) 667 { 668 u32 vendor, intfcaps, intmask; 669 int rc, probe; 670 struct tpm_chip *chip; 671 struct priv_data *priv; 672 673 priv = devm_kzalloc(dev, sizeof(struct priv_data), GFP_KERNEL); 674 if (priv == NULL) 675 return -ENOMEM; 676 677 chip = tpmm_chip_alloc(dev, &tpm_tis); 678 if (IS_ERR(chip)) 679 return PTR_ERR(chip); 680 681 chip->vendor.priv = priv; 682 #ifdef CONFIG_ACPI 683 chip->acpi_dev_handle = acpi_dev_handle; 684 #endif 685 686 chip->vendor.iobase = devm_ioremap_resource(dev, &tpm_info->res); 687 if (IS_ERR(chip->vendor.iobase)) 688 return PTR_ERR(chip->vendor.iobase); 689 690 /* Maximum timeouts */ 691 chip->vendor.timeout_a = TIS_TIMEOUT_A_MAX; 692 chip->vendor.timeout_b = TIS_TIMEOUT_B_MAX; 693 chip->vendor.timeout_c = TIS_TIMEOUT_C_MAX; 694 chip->vendor.timeout_d = TIS_TIMEOUT_D_MAX; 695 696 if (wait_startup(chip, 0) != 0) { 697 rc = -ENODEV; 698 goto out_err; 699 } 700 701 /* Take control of the TPM's interrupt hardware and shut it off */ 702 intmask = ioread32(chip->vendor.iobase + 703 TPM_INT_ENABLE(chip->vendor.locality)); 704 intmask |= TPM_INTF_CMD_READY_INT | TPM_INTF_LOCALITY_CHANGE_INT | 705 TPM_INTF_DATA_AVAIL_INT | TPM_INTF_STS_VALID_INT; 706 intmask &= ~TPM_GLOBAL_INT_ENABLE; 707 iowrite32(intmask, 708 chip->vendor.iobase + TPM_INT_ENABLE(chip->vendor.locality)); 709 710 if (request_locality(chip, 0) != 0) { 711 rc = -ENODEV; 712 goto out_err; 713 } 714 715 rc = tpm2_probe(chip); 716 if (rc) 717 goto out_err; 718 719 vendor = ioread32(chip->vendor.iobase + TPM_DID_VID(0)); 720 chip->vendor.manufacturer_id = vendor; 721 722 dev_info(dev, "%s TPM (device-id 0x%X, rev-id %d)\n", 723 (chip->flags & TPM_CHIP_FLAG_TPM2) ? "2.0" : "1.2", 724 vendor >> 16, ioread8(chip->vendor.iobase + TPM_RID(0))); 725 726 if (!itpm) { 727 probe = probe_itpm(chip); 728 if (probe < 0) { 729 rc = -ENODEV; 730 goto out_err; 731 } 732 itpm = !!probe; 733 } 734 735 if (itpm) 736 dev_info(dev, "Intel iTPM workaround enabled\n"); 737 738 739 /* Figure out the capabilities */ 740 intfcaps = 741 ioread32(chip->vendor.iobase + 742 TPM_INTF_CAPS(chip->vendor.locality)); 743 dev_dbg(dev, "TPM interface capabilities (0x%x):\n", 744 intfcaps); 745 if (intfcaps & TPM_INTF_BURST_COUNT_STATIC) 746 dev_dbg(dev, "\tBurst Count Static\n"); 747 if (intfcaps & TPM_INTF_CMD_READY_INT) 748 dev_dbg(dev, "\tCommand Ready Int Support\n"); 749 if (intfcaps & TPM_INTF_INT_EDGE_FALLING) 750 dev_dbg(dev, "\tInterrupt Edge Falling\n"); 751 if (intfcaps & TPM_INTF_INT_EDGE_RISING) 752 dev_dbg(dev, "\tInterrupt Edge Rising\n"); 753 if (intfcaps & TPM_INTF_INT_LEVEL_LOW) 754 dev_dbg(dev, "\tInterrupt Level Low\n"); 755 if (intfcaps & TPM_INTF_INT_LEVEL_HIGH) 756 dev_dbg(dev, "\tInterrupt Level High\n"); 757 if (intfcaps & TPM_INTF_LOCALITY_CHANGE_INT) 758 dev_dbg(dev, "\tLocality Change Int Support\n"); 759 if (intfcaps & TPM_INTF_STS_VALID_INT) 760 dev_dbg(dev, "\tSts Valid Int Support\n"); 761 if (intfcaps & TPM_INTF_DATA_AVAIL_INT) 762 dev_dbg(dev, "\tData Avail Int Support\n"); 763 764 /* Very early on issue a command to the TPM in polling mode to make 765 * sure it works. May as well use that command to set the proper 766 * timeouts for the driver. 767 */ 768 if (tpm_get_timeouts(chip)) { 769 dev_err(dev, "Could not get TPM timeouts and durations\n"); 770 rc = -ENODEV; 771 goto out_err; 772 } 773 774 /* INTERRUPT Setup */ 775 init_waitqueue_head(&chip->vendor.read_queue); 776 init_waitqueue_head(&chip->vendor.int_queue); 777 if (interrupts && tpm_info->irq != -1) { 778 if (tpm_info->irq) { 779 tpm_tis_probe_irq_single(chip, intmask, IRQF_SHARED, 780 tpm_info->irq); 781 if (!chip->vendor.irq) 782 dev_err(chip->pdev, FW_BUG 783 "TPM interrupt not working, polling instead\n"); 784 } else 785 tpm_tis_probe_irq(chip, intmask); 786 } 787 788 if (chip->flags & TPM_CHIP_FLAG_TPM2) { 789 rc = tpm2_do_selftest(chip); 790 if (rc == TPM2_RC_INITIALIZE) { 791 dev_warn(dev, "Firmware has not started TPM\n"); 792 rc = tpm2_startup(chip, TPM2_SU_CLEAR); 793 if (!rc) 794 rc = tpm2_do_selftest(chip); 795 } 796 797 if (rc) { 798 dev_err(dev, "TPM self test failed\n"); 799 if (rc > 0) 800 rc = -ENODEV; 801 goto out_err; 802 } 803 } else { 804 if (tpm_do_selftest(chip)) { 805 dev_err(dev, "TPM self test failed\n"); 806 rc = -ENODEV; 807 goto out_err; 808 } 809 } 810 811 return tpm_chip_register(chip); 812 out_err: 813 tpm_tis_remove(chip); 814 return rc; 815 } 816 817 #ifdef CONFIG_PM_SLEEP 818 static void tpm_tis_reenable_interrupts(struct tpm_chip *chip) 819 { 820 u32 intmask; 821 822 /* reenable interrupts that device may have lost or 823 BIOS/firmware may have disabled */ 824 iowrite8(chip->vendor.irq, chip->vendor.iobase + 825 TPM_INT_VECTOR(chip->vendor.locality)); 826 827 intmask = 828 ioread32(chip->vendor.iobase + 829 TPM_INT_ENABLE(chip->vendor.locality)); 830 831 intmask |= TPM_INTF_CMD_READY_INT 832 | TPM_INTF_LOCALITY_CHANGE_INT | TPM_INTF_DATA_AVAIL_INT 833 | TPM_INTF_STS_VALID_INT | TPM_GLOBAL_INT_ENABLE; 834 835 iowrite32(intmask, 836 chip->vendor.iobase + TPM_INT_ENABLE(chip->vendor.locality)); 837 } 838 839 static int tpm_tis_resume(struct device *dev) 840 { 841 struct tpm_chip *chip = dev_get_drvdata(dev); 842 int ret; 843 844 if (chip->vendor.irq) 845 tpm_tis_reenable_interrupts(chip); 846 847 ret = tpm_pm_resume(dev); 848 if (ret) 849 return ret; 850 851 /* TPM 1.2 requires self-test on resume. This function actually returns 852 * an error code but for unknown reason it isn't handled. 853 */ 854 if (!(chip->flags & TPM_CHIP_FLAG_TPM2)) 855 tpm_do_selftest(chip); 856 857 return 0; 858 } 859 #endif 860 861 static SIMPLE_DEV_PM_OPS(tpm_tis_pm, tpm_pm_suspend, tpm_tis_resume); 862 863 static int tpm_tis_pnp_init(struct pnp_dev *pnp_dev, 864 const struct pnp_device_id *pnp_id) 865 { 866 struct tpm_info tpm_info = {}; 867 acpi_handle acpi_dev_handle = NULL; 868 struct resource *res; 869 870 res = pnp_get_resource(pnp_dev, IORESOURCE_MEM, 0); 871 if (!res) 872 return -ENODEV; 873 tpm_info.res = *res; 874 875 if (pnp_irq_valid(pnp_dev, 0)) 876 tpm_info.irq = pnp_irq(pnp_dev, 0); 877 else 878 tpm_info.irq = -1; 879 880 if (pnp_acpi_device(pnp_dev)) { 881 if (is_itpm(pnp_acpi_device(pnp_dev))) 882 itpm = true; 883 884 acpi_dev_handle = ACPI_HANDLE(&pnp_dev->dev); 885 } 886 887 return tpm_tis_init(&pnp_dev->dev, &tpm_info, acpi_dev_handle); 888 } 889 890 static struct pnp_device_id tpm_pnp_tbl[] = { 891 {"PNP0C31", 0}, /* TPM */ 892 {"ATM1200", 0}, /* Atmel */ 893 {"IFX0102", 0}, /* Infineon */ 894 {"BCM0101", 0}, /* Broadcom */ 895 {"BCM0102", 0}, /* Broadcom */ 896 {"NSC1200", 0}, /* National */ 897 {"ICO0102", 0}, /* Intel */ 898 /* Add new here */ 899 {"", 0}, /* User Specified */ 900 {"", 0} /* Terminator */ 901 }; 902 MODULE_DEVICE_TABLE(pnp, tpm_pnp_tbl); 903 904 static void tpm_tis_pnp_remove(struct pnp_dev *dev) 905 { 906 struct tpm_chip *chip = pnp_get_drvdata(dev); 907 908 tpm_chip_unregister(chip); 909 tpm_tis_remove(chip); 910 } 911 912 static struct pnp_driver tis_pnp_driver = { 913 .name = "tpm_tis", 914 .id_table = tpm_pnp_tbl, 915 .probe = tpm_tis_pnp_init, 916 .remove = tpm_tis_pnp_remove, 917 .driver = { 918 .pm = &tpm_tis_pm, 919 }, 920 }; 921 922 #define TIS_HID_USR_IDX sizeof(tpm_pnp_tbl)/sizeof(struct pnp_device_id) -2 923 module_param_string(hid, tpm_pnp_tbl[TIS_HID_USR_IDX].id, 924 sizeof(tpm_pnp_tbl[TIS_HID_USR_IDX].id), 0444); 925 MODULE_PARM_DESC(hid, "Set additional specific HID for this driver to probe"); 926 927 #ifdef CONFIG_ACPI 928 static int tpm_check_resource(struct acpi_resource *ares, void *data) 929 { 930 struct tpm_info *tpm_info = (struct tpm_info *) data; 931 struct resource res; 932 933 if (acpi_dev_resource_interrupt(ares, 0, &res)) 934 tpm_info->irq = res.start; 935 else if (acpi_dev_resource_memory(ares, &res)) { 936 tpm_info->res = res; 937 tpm_info->res.name = NULL; 938 } 939 940 return 1; 941 } 942 943 static int tpm_tis_acpi_init(struct acpi_device *acpi_dev) 944 { 945 struct acpi_table_tpm2 *tbl; 946 acpi_status st; 947 struct list_head resources; 948 struct tpm_info tpm_info = {}; 949 int ret; 950 951 st = acpi_get_table(ACPI_SIG_TPM2, 1, 952 (struct acpi_table_header **) &tbl); 953 if (ACPI_FAILURE(st) || tbl->header.length < sizeof(*tbl)) { 954 dev_err(&acpi_dev->dev, 955 FW_BUG "failed to get TPM2 ACPI table\n"); 956 return -EINVAL; 957 } 958 959 if (tbl->start_method != ACPI_TPM2_MEMORY_MAPPED) 960 return -ENODEV; 961 962 INIT_LIST_HEAD(&resources); 963 tpm_info.irq = -1; 964 ret = acpi_dev_get_resources(acpi_dev, &resources, tpm_check_resource, 965 &tpm_info); 966 if (ret < 0) 967 return ret; 968 969 acpi_dev_free_resource_list(&resources); 970 971 if (resource_type(&tpm_info.res) != IORESOURCE_MEM) { 972 dev_err(&acpi_dev->dev, 973 FW_BUG "TPM2 ACPI table does not define a memory resource\n"); 974 return -EINVAL; 975 } 976 977 if (is_itpm(acpi_dev)) 978 itpm = true; 979 980 return tpm_tis_init(&acpi_dev->dev, &tpm_info, acpi_dev->handle); 981 } 982 983 static int tpm_tis_acpi_remove(struct acpi_device *dev) 984 { 985 struct tpm_chip *chip = dev_get_drvdata(&dev->dev); 986 987 tpm_chip_unregister(chip); 988 tpm_tis_remove(chip); 989 990 return 0; 991 } 992 993 static struct acpi_device_id tpm_acpi_tbl[] = { 994 {"MSFT0101", 0}, /* TPM 2.0 */ 995 /* Add new here */ 996 {"", 0}, /* User Specified */ 997 {"", 0} /* Terminator */ 998 }; 999 MODULE_DEVICE_TABLE(acpi, tpm_acpi_tbl); 1000 1001 static struct acpi_driver tis_acpi_driver = { 1002 .name = "tpm_tis", 1003 .ids = tpm_acpi_tbl, 1004 .ops = { 1005 .add = tpm_tis_acpi_init, 1006 .remove = tpm_tis_acpi_remove, 1007 }, 1008 .drv = { 1009 .pm = &tpm_tis_pm, 1010 }, 1011 }; 1012 #endif 1013 1014 static struct platform_device *force_pdev; 1015 1016 static int tpm_tis_plat_probe(struct platform_device *pdev) 1017 { 1018 struct tpm_info tpm_info = {}; 1019 struct resource *res; 1020 1021 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1022 if (res == NULL) { 1023 dev_err(&pdev->dev, "no memory resource defined\n"); 1024 return -ENODEV; 1025 } 1026 tpm_info.res = *res; 1027 1028 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); 1029 if (res) { 1030 tpm_info.irq = res->start; 1031 } else { 1032 if (pdev == force_pdev) 1033 tpm_info.irq = -1; 1034 else 1035 /* When forcing auto probe the IRQ */ 1036 tpm_info.irq = 0; 1037 } 1038 1039 return tpm_tis_init(&pdev->dev, &tpm_info, NULL); 1040 } 1041 1042 static int tpm_tis_plat_remove(struct platform_device *pdev) 1043 { 1044 struct tpm_chip *chip = dev_get_drvdata(&pdev->dev); 1045 1046 tpm_chip_unregister(chip); 1047 tpm_tis_remove(chip); 1048 1049 return 0; 1050 } 1051 1052 static struct platform_driver tis_drv = { 1053 .probe = tpm_tis_plat_probe, 1054 .remove = tpm_tis_plat_remove, 1055 .driver = { 1056 .name = "tpm_tis", 1057 .pm = &tpm_tis_pm, 1058 }, 1059 }; 1060 1061 static bool force; 1062 #ifdef CONFIG_X86 1063 module_param(force, bool, 0444); 1064 MODULE_PARM_DESC(force, "Force device probe rather than using ACPI entry"); 1065 #endif 1066 1067 static int tpm_tis_force_device(void) 1068 { 1069 struct platform_device *pdev; 1070 static const struct resource x86_resources[] = { 1071 { 1072 .start = 0xFED40000, 1073 .end = 0xFED40000 + TIS_MEM_LEN - 1, 1074 .flags = IORESOURCE_MEM, 1075 }, 1076 }; 1077 1078 if (!force) 1079 return 0; 1080 1081 /* The driver core will match the name tpm_tis of the device to 1082 * the tpm_tis platform driver and complete the setup via 1083 * tpm_tis_plat_probe 1084 */ 1085 pdev = platform_device_register_simple("tpm_tis", -1, x86_resources, 1086 ARRAY_SIZE(x86_resources)); 1087 if (IS_ERR(pdev)) 1088 return PTR_ERR(pdev); 1089 force_pdev = pdev; 1090 1091 return 0; 1092 } 1093 1094 static int __init init_tis(void) 1095 { 1096 int rc; 1097 1098 rc = tpm_tis_force_device(); 1099 if (rc) 1100 goto err_force; 1101 1102 rc = platform_driver_register(&tis_drv); 1103 if (rc) 1104 goto err_platform; 1105 1106 #ifdef CONFIG_ACPI 1107 rc = acpi_bus_register_driver(&tis_acpi_driver); 1108 if (rc) 1109 goto err_acpi; 1110 #endif 1111 1112 if (IS_ENABLED(CONFIG_PNP)) { 1113 rc = pnp_register_driver(&tis_pnp_driver); 1114 if (rc) 1115 goto err_pnp; 1116 } 1117 1118 return 0; 1119 1120 err_pnp: 1121 #ifdef CONFIG_ACPI 1122 acpi_bus_unregister_driver(&tis_acpi_driver); 1123 err_acpi: 1124 #endif 1125 platform_device_unregister(force_pdev); 1126 err_platform: 1127 if (force_pdev) 1128 platform_device_unregister(force_pdev); 1129 err_force: 1130 return rc; 1131 } 1132 1133 static void __exit cleanup_tis(void) 1134 { 1135 pnp_unregister_driver(&tis_pnp_driver); 1136 #ifdef CONFIG_ACPI 1137 acpi_bus_unregister_driver(&tis_acpi_driver); 1138 #endif 1139 platform_driver_unregister(&tis_drv); 1140 1141 if (force_pdev) 1142 platform_device_unregister(force_pdev); 1143 } 1144 1145 module_init(init_tis); 1146 module_exit(cleanup_tis); 1147 MODULE_AUTHOR("Leendert van Doorn (leendert@watson.ibm.com)"); 1148 MODULE_DESCRIPTION("TPM Driver"); 1149 MODULE_VERSION("2.0"); 1150 MODULE_LICENSE("GPL"); 1151