1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2015, 2016 IBM Corporation 4 * Copyright (C) 2016 Intel Corporation 5 * 6 * Author: Stefan Berger <stefanb@us.ibm.com> 7 * 8 * Maintained by: <tpmdd-devel@lists.sourceforge.net> 9 * 10 * Device driver for vTPM (vTPM proxy driver) 11 */ 12 13 #include <linux/types.h> 14 #include <linux/spinlock.h> 15 #include <linux/uaccess.h> 16 #include <linux/wait.h> 17 #include <linux/miscdevice.h> 18 #include <linux/vtpm_proxy.h> 19 #include <linux/file.h> 20 #include <linux/anon_inodes.h> 21 #include <linux/poll.h> 22 #include <linux/compat.h> 23 24 #include "tpm.h" 25 26 #define VTPM_PROXY_REQ_COMPLETE_FLAG BIT(0) 27 28 struct proxy_dev { 29 struct tpm_chip *chip; 30 31 u32 flags; /* public API flags */ 32 33 wait_queue_head_t wq; 34 35 struct mutex buf_lock; /* protect buffer and flags */ 36 37 long state; /* internal state */ 38 #define STATE_OPENED_FLAG BIT(0) 39 #define STATE_WAIT_RESPONSE_FLAG BIT(1) /* waiting for emulator response */ 40 #define STATE_REGISTERED_FLAG BIT(2) 41 #define STATE_DRIVER_COMMAND BIT(3) /* sending a driver specific command */ 42 43 size_t req_len; /* length of queued TPM request */ 44 size_t resp_len; /* length of queued TPM response */ 45 u8 buffer[TPM_BUFSIZE]; /* request/response buffer */ 46 47 struct work_struct work; /* task that retrieves TPM timeouts */ 48 }; 49 50 /* all supported flags */ 51 #define VTPM_PROXY_FLAGS_ALL (VTPM_PROXY_FLAG_TPM2) 52 53 static struct workqueue_struct *workqueue; 54 55 static void vtpm_proxy_delete_device(struct proxy_dev *proxy_dev); 56 57 /* 58 * Functions related to 'server side' 59 */ 60 61 /** 62 * vtpm_proxy_fops_read - Read TPM commands on 'server side' 63 * 64 * @filp: file pointer 65 * @buf: read buffer 66 * @count: number of bytes to read 67 * @off: offset 68 * 69 * Return: 70 * Number of bytes read or negative error code 71 */ 72 static ssize_t vtpm_proxy_fops_read(struct file *filp, char __user *buf, 73 size_t count, loff_t *off) 74 { 75 struct proxy_dev *proxy_dev = filp->private_data; 76 size_t len; 77 int sig, rc; 78 79 sig = wait_event_interruptible(proxy_dev->wq, 80 proxy_dev->req_len != 0 || 81 !(proxy_dev->state & STATE_OPENED_FLAG)); 82 if (sig) 83 return -EINTR; 84 85 mutex_lock(&proxy_dev->buf_lock); 86 87 if (!(proxy_dev->state & STATE_OPENED_FLAG)) { 88 mutex_unlock(&proxy_dev->buf_lock); 89 return -EPIPE; 90 } 91 92 len = proxy_dev->req_len; 93 94 if (count < len || len > sizeof(proxy_dev->buffer)) { 95 mutex_unlock(&proxy_dev->buf_lock); 96 pr_debug("Invalid size in recv: count=%zd, req_len=%zd\n", 97 count, len); 98 return -EIO; 99 } 100 101 rc = copy_to_user(buf, proxy_dev->buffer, len); 102 memset(proxy_dev->buffer, 0, len); 103 proxy_dev->req_len = 0; 104 105 if (!rc) 106 proxy_dev->state |= STATE_WAIT_RESPONSE_FLAG; 107 108 mutex_unlock(&proxy_dev->buf_lock); 109 110 if (rc) 111 return -EFAULT; 112 113 return len; 114 } 115 116 /** 117 * vtpm_proxy_fops_write - Write TPM responses on 'server side' 118 * 119 * @filp: file pointer 120 * @buf: write buffer 121 * @count: number of bytes to write 122 * @off: offset 123 * 124 * Return: 125 * Number of bytes read or negative error value 126 */ 127 static ssize_t vtpm_proxy_fops_write(struct file *filp, const char __user *buf, 128 size_t count, loff_t *off) 129 { 130 struct proxy_dev *proxy_dev = filp->private_data; 131 132 mutex_lock(&proxy_dev->buf_lock); 133 134 if (!(proxy_dev->state & STATE_OPENED_FLAG)) { 135 mutex_unlock(&proxy_dev->buf_lock); 136 return -EPIPE; 137 } 138 139 if (count > sizeof(proxy_dev->buffer) || 140 !(proxy_dev->state & STATE_WAIT_RESPONSE_FLAG)) { 141 mutex_unlock(&proxy_dev->buf_lock); 142 return -EIO; 143 } 144 145 proxy_dev->state &= ~STATE_WAIT_RESPONSE_FLAG; 146 147 proxy_dev->req_len = 0; 148 149 if (copy_from_user(proxy_dev->buffer, buf, count)) { 150 mutex_unlock(&proxy_dev->buf_lock); 151 return -EFAULT; 152 } 153 154 proxy_dev->resp_len = count; 155 156 mutex_unlock(&proxy_dev->buf_lock); 157 158 wake_up_interruptible(&proxy_dev->wq); 159 160 return count; 161 } 162 163 /* 164 * vtpm_proxy_fops_poll - Poll status on 'server side' 165 * 166 * @filp: file pointer 167 * @wait: poll table 168 * 169 * Return: Poll flags 170 */ 171 static __poll_t vtpm_proxy_fops_poll(struct file *filp, poll_table *wait) 172 { 173 struct proxy_dev *proxy_dev = filp->private_data; 174 __poll_t ret; 175 176 poll_wait(filp, &proxy_dev->wq, wait); 177 178 ret = EPOLLOUT; 179 180 mutex_lock(&proxy_dev->buf_lock); 181 182 if (proxy_dev->req_len) 183 ret |= EPOLLIN | EPOLLRDNORM; 184 185 if (!(proxy_dev->state & STATE_OPENED_FLAG)) 186 ret |= EPOLLHUP; 187 188 mutex_unlock(&proxy_dev->buf_lock); 189 190 return ret; 191 } 192 193 /* 194 * vtpm_proxy_fops_open - Open vTPM device on 'server side' 195 * 196 * @filp: file pointer 197 * 198 * Called when setting up the anonymous file descriptor 199 */ 200 static void vtpm_proxy_fops_open(struct file *filp) 201 { 202 struct proxy_dev *proxy_dev = filp->private_data; 203 204 proxy_dev->state |= STATE_OPENED_FLAG; 205 } 206 207 /** 208 * vtpm_proxy_fops_undo_open - counter-part to vtpm_fops_open 209 * Call to undo vtpm_proxy_fops_open 210 * 211 *@proxy_dev: tpm proxy device 212 */ 213 static void vtpm_proxy_fops_undo_open(struct proxy_dev *proxy_dev) 214 { 215 mutex_lock(&proxy_dev->buf_lock); 216 217 proxy_dev->state &= ~STATE_OPENED_FLAG; 218 219 mutex_unlock(&proxy_dev->buf_lock); 220 221 /* no more TPM responses -- wake up anyone waiting for them */ 222 wake_up_interruptible(&proxy_dev->wq); 223 } 224 225 /* 226 * vtpm_proxy_fops_release - Close 'server side' 227 * 228 * @inode: inode 229 * @filp: file pointer 230 * Return: 231 * Always returns 0. 232 */ 233 static int vtpm_proxy_fops_release(struct inode *inode, struct file *filp) 234 { 235 struct proxy_dev *proxy_dev = filp->private_data; 236 237 filp->private_data = NULL; 238 239 vtpm_proxy_delete_device(proxy_dev); 240 241 return 0; 242 } 243 244 static const struct file_operations vtpm_proxy_fops = { 245 .owner = THIS_MODULE, 246 .read = vtpm_proxy_fops_read, 247 .write = vtpm_proxy_fops_write, 248 .poll = vtpm_proxy_fops_poll, 249 .release = vtpm_proxy_fops_release, 250 }; 251 252 /* 253 * Functions invoked by the core TPM driver to send TPM commands to 254 * 'server side' and receive responses from there. 255 */ 256 257 /* 258 * Called when core TPM driver reads TPM responses from 'server side' 259 * 260 * @chip: tpm chip to use 261 * @buf: receive buffer 262 * @count: bytes to read 263 * Return: 264 * Number of TPM response bytes read, negative error value otherwise 265 */ 266 static int vtpm_proxy_tpm_op_recv(struct tpm_chip *chip, u8 *buf, size_t count) 267 { 268 struct proxy_dev *proxy_dev = dev_get_drvdata(&chip->dev); 269 size_t len; 270 271 /* process gone ? */ 272 mutex_lock(&proxy_dev->buf_lock); 273 274 if (!(proxy_dev->state & STATE_OPENED_FLAG)) { 275 mutex_unlock(&proxy_dev->buf_lock); 276 return -EPIPE; 277 } 278 279 len = proxy_dev->resp_len; 280 if (count < len) { 281 dev_err(&chip->dev, 282 "Invalid size in recv: count=%zd, resp_len=%zd\n", 283 count, len); 284 len = -EIO; 285 goto out; 286 } 287 288 memcpy(buf, proxy_dev->buffer, len); 289 proxy_dev->resp_len = 0; 290 291 out: 292 mutex_unlock(&proxy_dev->buf_lock); 293 294 return len; 295 } 296 297 static int vtpm_proxy_is_driver_command(struct tpm_chip *chip, 298 u8 *buf, size_t count) 299 { 300 struct tpm_header *hdr = (struct tpm_header *)buf; 301 302 if (count < sizeof(struct tpm_header)) 303 return 0; 304 305 if (chip->flags & TPM_CHIP_FLAG_TPM2) { 306 switch (be32_to_cpu(hdr->ordinal)) { 307 case TPM2_CC_SET_LOCALITY: 308 return 1; 309 } 310 } else { 311 switch (be32_to_cpu(hdr->ordinal)) { 312 case TPM_ORD_SET_LOCALITY: 313 return 1; 314 } 315 } 316 return 0; 317 } 318 319 /* 320 * Called when core TPM driver forwards TPM requests to 'server side'. 321 * 322 * @chip: tpm chip to use 323 * @buf: send buffer 324 * @count: bytes to send 325 * 326 * Return: 327 * 0 in case of success, negative error value otherwise. 328 */ 329 static int vtpm_proxy_tpm_op_send(struct tpm_chip *chip, u8 *buf, size_t count) 330 { 331 struct proxy_dev *proxy_dev = dev_get_drvdata(&chip->dev); 332 333 if (count > sizeof(proxy_dev->buffer)) { 334 dev_err(&chip->dev, 335 "Invalid size in send: count=%zd, buffer size=%zd\n", 336 count, sizeof(proxy_dev->buffer)); 337 return -EIO; 338 } 339 340 if (!(proxy_dev->state & STATE_DRIVER_COMMAND) && 341 vtpm_proxy_is_driver_command(chip, buf, count)) 342 return -EFAULT; 343 344 mutex_lock(&proxy_dev->buf_lock); 345 346 if (!(proxy_dev->state & STATE_OPENED_FLAG)) { 347 mutex_unlock(&proxy_dev->buf_lock); 348 return -EPIPE; 349 } 350 351 proxy_dev->resp_len = 0; 352 353 proxy_dev->req_len = count; 354 memcpy(proxy_dev->buffer, buf, count); 355 356 proxy_dev->state &= ~STATE_WAIT_RESPONSE_FLAG; 357 358 mutex_unlock(&proxy_dev->buf_lock); 359 360 wake_up_interruptible(&proxy_dev->wq); 361 362 return 0; 363 } 364 365 static void vtpm_proxy_tpm_op_cancel(struct tpm_chip *chip) 366 { 367 /* not supported */ 368 } 369 370 static u8 vtpm_proxy_tpm_op_status(struct tpm_chip *chip) 371 { 372 struct proxy_dev *proxy_dev = dev_get_drvdata(&chip->dev); 373 374 if (proxy_dev->resp_len) 375 return VTPM_PROXY_REQ_COMPLETE_FLAG; 376 377 return 0; 378 } 379 380 static bool vtpm_proxy_tpm_req_canceled(struct tpm_chip *chip, u8 status) 381 { 382 struct proxy_dev *proxy_dev = dev_get_drvdata(&chip->dev); 383 bool ret; 384 385 mutex_lock(&proxy_dev->buf_lock); 386 387 ret = !(proxy_dev->state & STATE_OPENED_FLAG); 388 389 mutex_unlock(&proxy_dev->buf_lock); 390 391 return ret; 392 } 393 394 static int vtpm_proxy_request_locality(struct tpm_chip *chip, int locality) 395 { 396 struct tpm_buf buf; 397 int rc; 398 const struct tpm_header *header; 399 struct proxy_dev *proxy_dev = dev_get_drvdata(&chip->dev); 400 401 if (chip->flags & TPM_CHIP_FLAG_TPM2) 402 rc = tpm_buf_init(&buf, TPM2_ST_SESSIONS, 403 TPM2_CC_SET_LOCALITY); 404 else 405 rc = tpm_buf_init(&buf, TPM_TAG_RQU_COMMAND, 406 TPM_ORD_SET_LOCALITY); 407 if (rc) 408 return rc; 409 tpm_buf_append_u8(&buf, locality); 410 411 proxy_dev->state |= STATE_DRIVER_COMMAND; 412 413 rc = tpm_transmit_cmd(chip, &buf, 0, "attempting to set locality"); 414 415 proxy_dev->state &= ~STATE_DRIVER_COMMAND; 416 417 if (rc < 0) { 418 locality = rc; 419 goto out; 420 } 421 422 header = (const struct tpm_header *)buf.data; 423 rc = be32_to_cpu(header->return_code); 424 if (rc) 425 locality = -1; 426 427 out: 428 tpm_buf_destroy(&buf); 429 430 return locality; 431 } 432 433 static const struct tpm_class_ops vtpm_proxy_tpm_ops = { 434 .flags = TPM_OPS_AUTO_STARTUP, 435 .recv = vtpm_proxy_tpm_op_recv, 436 .send = vtpm_proxy_tpm_op_send, 437 .cancel = vtpm_proxy_tpm_op_cancel, 438 .status = vtpm_proxy_tpm_op_status, 439 .req_complete_mask = VTPM_PROXY_REQ_COMPLETE_FLAG, 440 .req_complete_val = VTPM_PROXY_REQ_COMPLETE_FLAG, 441 .req_canceled = vtpm_proxy_tpm_req_canceled, 442 .request_locality = vtpm_proxy_request_locality, 443 }; 444 445 /* 446 * Code related to the startup of the TPM 2 and startup of TPM 1.2 + 447 * retrieval of timeouts and durations. 448 */ 449 450 static void vtpm_proxy_work(struct work_struct *work) 451 { 452 struct proxy_dev *proxy_dev = container_of(work, struct proxy_dev, 453 work); 454 int rc; 455 456 rc = tpm_chip_register(proxy_dev->chip); 457 if (rc) 458 vtpm_proxy_fops_undo_open(proxy_dev); 459 else 460 proxy_dev->state |= STATE_REGISTERED_FLAG; 461 } 462 463 /* 464 * vtpm_proxy_work_stop: make sure the work has finished 465 * 466 * This function is useful when user space closed the fd 467 * while the driver still determines timeouts. 468 */ 469 static void vtpm_proxy_work_stop(struct proxy_dev *proxy_dev) 470 { 471 vtpm_proxy_fops_undo_open(proxy_dev); 472 flush_work(&proxy_dev->work); 473 } 474 475 /* 476 * vtpm_proxy_work_start: Schedule the work for TPM 1.2 & 2 initialization 477 */ 478 static inline void vtpm_proxy_work_start(struct proxy_dev *proxy_dev) 479 { 480 queue_work(workqueue, &proxy_dev->work); 481 } 482 483 /* 484 * Code related to creation and deletion of device pairs 485 */ 486 static struct proxy_dev *vtpm_proxy_create_proxy_dev(void) 487 { 488 struct proxy_dev *proxy_dev; 489 struct tpm_chip *chip; 490 int err; 491 492 proxy_dev = kzalloc(sizeof(*proxy_dev), GFP_KERNEL); 493 if (proxy_dev == NULL) 494 return ERR_PTR(-ENOMEM); 495 496 init_waitqueue_head(&proxy_dev->wq); 497 mutex_init(&proxy_dev->buf_lock); 498 INIT_WORK(&proxy_dev->work, vtpm_proxy_work); 499 500 chip = tpm_chip_alloc(NULL, &vtpm_proxy_tpm_ops); 501 if (IS_ERR(chip)) { 502 err = PTR_ERR(chip); 503 goto err_proxy_dev_free; 504 } 505 dev_set_drvdata(&chip->dev, proxy_dev); 506 507 proxy_dev->chip = chip; 508 509 return proxy_dev; 510 511 err_proxy_dev_free: 512 kfree(proxy_dev); 513 514 return ERR_PTR(err); 515 } 516 517 /* 518 * Undo what has been done in vtpm_create_proxy_dev 519 */ 520 static inline void vtpm_proxy_delete_proxy_dev(struct proxy_dev *proxy_dev) 521 { 522 put_device(&proxy_dev->chip->dev); /* frees chip */ 523 kfree(proxy_dev); 524 } 525 526 /* 527 * Create a /dev/tpm%d and 'server side' file descriptor pair 528 * 529 * Return: 530 * Returns file pointer on success, an error value otherwise 531 */ 532 static struct file *vtpm_proxy_create_device( 533 struct vtpm_proxy_new_dev *vtpm_new_dev) 534 { 535 struct proxy_dev *proxy_dev; 536 int rc, fd; 537 struct file *file; 538 539 if (vtpm_new_dev->flags & ~VTPM_PROXY_FLAGS_ALL) 540 return ERR_PTR(-EOPNOTSUPP); 541 542 proxy_dev = vtpm_proxy_create_proxy_dev(); 543 if (IS_ERR(proxy_dev)) 544 return ERR_CAST(proxy_dev); 545 546 proxy_dev->flags = vtpm_new_dev->flags; 547 548 /* setup an anonymous file for the server-side */ 549 fd = get_unused_fd_flags(O_RDWR); 550 if (fd < 0) { 551 rc = fd; 552 goto err_delete_proxy_dev; 553 } 554 555 file = anon_inode_getfile("[vtpms]", &vtpm_proxy_fops, proxy_dev, 556 O_RDWR); 557 if (IS_ERR(file)) { 558 rc = PTR_ERR(file); 559 goto err_put_unused_fd; 560 } 561 562 /* from now on we can unwind with put_unused_fd() + fput() */ 563 /* simulate an open() on the server side */ 564 vtpm_proxy_fops_open(file); 565 566 if (proxy_dev->flags & VTPM_PROXY_FLAG_TPM2) 567 proxy_dev->chip->flags |= TPM_CHIP_FLAG_TPM2; 568 569 vtpm_proxy_work_start(proxy_dev); 570 571 vtpm_new_dev->fd = fd; 572 vtpm_new_dev->major = MAJOR(proxy_dev->chip->dev.devt); 573 vtpm_new_dev->minor = MINOR(proxy_dev->chip->dev.devt); 574 vtpm_new_dev->tpm_num = proxy_dev->chip->dev_num; 575 576 return file; 577 578 err_put_unused_fd: 579 put_unused_fd(fd); 580 581 err_delete_proxy_dev: 582 vtpm_proxy_delete_proxy_dev(proxy_dev); 583 584 return ERR_PTR(rc); 585 } 586 587 /* 588 * Counter part to vtpm_create_device. 589 */ 590 static void vtpm_proxy_delete_device(struct proxy_dev *proxy_dev) 591 { 592 vtpm_proxy_work_stop(proxy_dev); 593 594 /* 595 * A client may hold the 'ops' lock, so let it know that the server 596 * side shuts down before we try to grab the 'ops' lock when 597 * unregistering the chip. 598 */ 599 vtpm_proxy_fops_undo_open(proxy_dev); 600 601 if (proxy_dev->state & STATE_REGISTERED_FLAG) 602 tpm_chip_unregister(proxy_dev->chip); 603 604 vtpm_proxy_delete_proxy_dev(proxy_dev); 605 } 606 607 /* 608 * Code related to the control device /dev/vtpmx 609 */ 610 611 /** 612 * vtpmx_ioc_new_dev - handler for the %VTPM_PROXY_IOC_NEW_DEV ioctl 613 * @file: /dev/vtpmx 614 * @ioctl: the ioctl number 615 * @arg: pointer to the struct vtpmx_proxy_new_dev 616 * 617 * Creates an anonymous file that is used by the process acting as a TPM to 618 * communicate with the client processes. The function will also add a new TPM 619 * device through which data is proxied to this TPM acting process. The caller 620 * will be provided with a file descriptor to communicate with the clients and 621 * major and minor numbers for the TPM device. 622 */ 623 static long vtpmx_ioc_new_dev(struct file *file, unsigned int ioctl, 624 unsigned long arg) 625 { 626 void __user *argp = (void __user *)arg; 627 struct vtpm_proxy_new_dev __user *vtpm_new_dev_p; 628 struct vtpm_proxy_new_dev vtpm_new_dev; 629 struct file *vtpm_file; 630 631 if (!capable(CAP_SYS_ADMIN)) 632 return -EPERM; 633 634 vtpm_new_dev_p = argp; 635 636 if (copy_from_user(&vtpm_new_dev, vtpm_new_dev_p, 637 sizeof(vtpm_new_dev))) 638 return -EFAULT; 639 640 vtpm_file = vtpm_proxy_create_device(&vtpm_new_dev); 641 if (IS_ERR(vtpm_file)) 642 return PTR_ERR(vtpm_file); 643 644 if (copy_to_user(vtpm_new_dev_p, &vtpm_new_dev, 645 sizeof(vtpm_new_dev))) { 646 put_unused_fd(vtpm_new_dev.fd); 647 fput(vtpm_file); 648 return -EFAULT; 649 } 650 651 fd_install(vtpm_new_dev.fd, vtpm_file); 652 return 0; 653 } 654 655 /* 656 * vtpmx_fops_ioctl: ioctl on /dev/vtpmx 657 * 658 * Return: 659 * Returns 0 on success, a negative error code otherwise. 660 */ 661 static long vtpmx_fops_ioctl(struct file *f, unsigned int ioctl, 662 unsigned long arg) 663 { 664 switch (ioctl) { 665 case VTPM_PROXY_IOC_NEW_DEV: 666 return vtpmx_ioc_new_dev(f, ioctl, arg); 667 default: 668 return -ENOIOCTLCMD; 669 } 670 } 671 672 static const struct file_operations vtpmx_fops = { 673 .owner = THIS_MODULE, 674 .unlocked_ioctl = vtpmx_fops_ioctl, 675 .compat_ioctl = compat_ptr_ioctl, 676 .llseek = noop_llseek, 677 }; 678 679 static struct miscdevice vtpmx_miscdev = { 680 .minor = MISC_DYNAMIC_MINOR, 681 .name = "vtpmx", 682 .fops = &vtpmx_fops, 683 }; 684 685 static int __init vtpm_module_init(void) 686 { 687 int rc; 688 689 workqueue = create_workqueue("tpm-vtpm"); 690 if (!workqueue) { 691 pr_err("couldn't create workqueue\n"); 692 return -ENOMEM; 693 } 694 695 rc = misc_register(&vtpmx_miscdev); 696 if (rc) { 697 pr_err("couldn't create vtpmx device\n"); 698 destroy_workqueue(workqueue); 699 } 700 701 return rc; 702 } 703 704 static void __exit vtpm_module_exit(void) 705 { 706 destroy_workqueue(workqueue); 707 misc_deregister(&vtpmx_miscdev); 708 } 709 710 module_init(vtpm_module_init); 711 module_exit(vtpm_module_exit); 712 713 MODULE_AUTHOR("Stefan Berger <stefanb@us.ibm.com>"); 714 MODULE_DESCRIPTION("vTPM Driver"); 715 MODULE_VERSION("0.1"); 716 MODULE_LICENSE("GPL"); 717