1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2022 Beckhoff Automation GmbH & Co. KG
5 * Author: Corvin Köhne <c.koehne@beckhoff.com>
6 */
7
8 #include <sys/types.h>
9 #include <sys/param.h>
10 #include <sys/linker_set.h>
11
12 #include <machine/vmm.h>
13
14 #include <assert.h>
15 #include <err.h>
16 #include <errno.h>
17 #include <pthread.h>
18 #include <pthread_np.h>
19 #include <stddef.h>
20 #include <stdio.h>
21 #include <stdlib.h>
22 #include <vmmapi.h>
23
24 #include "basl.h"
25 #include "config.h"
26 #include "mem.h"
27 #include "qemu_fwcfg.h"
28 #include "tpm_device.h"
29 #include "tpm_intf.h"
30
31 #define TPM_CRB_ADDRESS 0xFED40000
32 #define TPM_CRB_REGS_SIZE 0x1000
33
34 #define TPM_CRB_CONTROL_AREA_ADDRESS \
35 (TPM_CRB_ADDRESS + offsetof(struct tpm_crb_regs, ctrl_req))
36 #define TPM_CRB_CONTROL_AREA_SIZE TPM_CRB_REGS_SIZE
37
38 #define TPM_CRB_DATA_BUFFER_ADDRESS \
39 (TPM_CRB_ADDRESS + offsetof(struct tpm_crb_regs, data_buffer))
40 #define TPM_CRB_DATA_BUFFER_SIZE 0xF80
41
42 #define TPM_CRB_LOCALITIES_MAX 5
43
44 #define TPM_CRB_LOG_AREA_MINIMUM_SIZE (64 * 1024)
45
46 #define TPM_CRB_LOG_AREA_FWCFG_NAME "etc/tpm/log"
47
48 #define TPM_CRB_INTF_NAME "crb"
49
50 struct tpm_crb_regs {
51 union tpm_crb_reg_loc_state {
52 struct {
53 uint32_t tpm_established : 1;
54 uint32_t loc_assigned : 1;
55 uint32_t active_locality : 3;
56 uint32_t _reserved : 2;
57 uint32_t tpm_req_valid_sts : 1;
58 };
59 uint32_t val;
60 } loc_state; /* 0h */
61 uint8_t _reserved1[4]; /* 4h */
62 union tpm_crb_reg_loc_ctrl {
63 struct {
64 uint32_t request_access : 1;
65 uint32_t relinquish : 1;
66 uint32_t seize : 1;
67 uint32_t reset_establishment_bit : 1;
68 };
69 uint32_t val;
70 } loc_ctrl; /* 8h */
71 union tpm_crb_reg_loc_sts {
72 struct {
73 uint32_t granted : 1;
74 uint32_t been_seized : 1;
75 };
76 uint32_t val;
77 } loc_sts; /* Ch */
78 uint8_t _reserved2[0x20]; /* 10h */
79 union tpm_crb_reg_intf_id {
80 struct {
81 uint64_t interface_type : 4;
82 uint64_t interface_version : 4;
83 uint64_t cap_locality : 1;
84 uint64_t cap_crb_idle_bypass : 1;
85 uint64_t _reserved1 : 1;
86 uint64_t cap_data_xfer_size_support : 2;
87 uint64_t cap_fifo : 1;
88 uint64_t cap_crb : 1;
89 uint64_t _reserved2 : 2;
90 uint64_t interface_selector : 2;
91 uint64_t intf_sel_lock : 1;
92 uint64_t _reserved3 : 4;
93 uint64_t rid : 8;
94 uint64_t vid : 16;
95 uint64_t did : 16;
96 };
97 uint64_t val;
98 } intf_id; /* 30h */
99 union tpm_crb_reg_ctrl_ext {
100 struct {
101 uint32_t clear;
102 uint32_t remaining_bytes;
103 };
104 uint64_t val;
105 } ctrl_ext; /* 38 */
106 union tpm_crb_reg_ctrl_req {
107 struct {
108 uint32_t cmd_ready : 1;
109 uint32_t go_idle : 1;
110 };
111 uint32_t val;
112 } ctrl_req; /* 40h */
113 union tpm_crb_reg_ctrl_sts {
114 struct {
115 uint32_t tpm_sts : 1;
116 uint32_t tpm_idle : 1;
117 };
118 uint32_t val;
119 } ctrl_sts; /* 44h */
120 union tpm_crb_reg_ctrl_cancel {
121 struct {
122 uint32_t cancel : 1;
123 };
124 uint32_t val;
125 } ctrl_cancel; /* 48h */
126 union tpm_crb_reg_ctrl_start {
127 struct {
128 uint32_t start : 1;
129 };
130 uint32_t val;
131 } ctrl_start; /* 4Ch*/
132 uint32_t int_enable; /* 50h */
133 uint32_t int_sts; /* 54h */
134 uint32_t cmd_size; /* 58h */
135 uint32_t cmd_addr_lo; /* 5Ch */
136 uint32_t cmd_addr_hi; /* 60h */
137 uint32_t rsp_size; /* 64h */
138 uint64_t rsp_addr; /* 68h */
139 uint8_t _reserved3[0x10]; /* 70h */
140 uint8_t data_buffer[TPM_CRB_DATA_BUFFER_SIZE]; /* 80h */
141 } __packed;
142 static_assert(sizeof(struct tpm_crb_regs) == TPM_CRB_REGS_SIZE,
143 "Invalid size of tpm_crb");
144
145 #define CRB_CMD_SIZE_READ(regs) (regs.cmd_size)
146 #define CRB_CMD_SIZE_WRITE(regs, val) \
147 do { \
148 regs.cmd_size = val; \
149 } while (0)
150 #define CRB_CMD_ADDR_READ(regs) \
151 (((uint64_t)regs.cmd_addr_hi << 32) | regs.cmd_addr_lo)
152 #define CRB_CMD_ADDR_WRITE(regs, val) \
153 do { \
154 regs.cmd_addr_lo = val & 0xFFFFFFFF; \
155 regs.cmd_addr_hi = val >> 32; \
156 } while (0)
157 #define CRB_RSP_SIZE_READ(regs) (regs.rsp_size)
158 #define CRB_RSP_SIZE_WRITE(regs, val) \
159 do { \
160 regs.rsp_size = val; \
161 } while (0)
162 #define CRB_RSP_ADDR_READ(regs) (regs.rsp_addr)
163 #define CRB_RSP_ADDR_WRITE(regs, val) \
164 do { \
165 regs.rsp_addr = val; \
166 } while (0)
167
168 struct tpm_cmd_hdr {
169 uint16_t tag;
170 uint32_t len;
171 union {
172 uint32_t ordinal;
173 uint32_t errcode;
174 };
175 } __packed;
176
177 struct tpm_crb {
178 struct tpm_emul *emul;
179 void *emul_sc;
180 uint8_t tpm_log_area[TPM_CRB_LOG_AREA_MINIMUM_SIZE];
181 struct tpm_crb_regs regs;
182 pthread_t thread;
183 pthread_mutex_t mutex;
184 pthread_cond_t cond;
185 bool closing;
186 };
187
188
189 static void *
tpm_crb_thread(void * const arg)190 tpm_crb_thread(void *const arg)
191 {
192 struct tpm_crb *const crb = arg;
193
194 pthread_mutex_lock(&crb->mutex);
195 for (;;) {
196 /*
197 * We're releasing the lock after wake up. Therefore, we have to
198 * check the closing condition before and after going to sleep.
199 */
200 if (crb->closing)
201 break;
202
203 pthread_cond_wait(&crb->cond, &crb->mutex);
204
205 if (crb->closing)
206 break;
207
208 const uint64_t cmd_addr = CRB_CMD_ADDR_READ(crb->regs);
209 const uint64_t rsp_addr = CRB_RSP_ADDR_READ(crb->regs);
210 const uint32_t cmd_size = CRB_CMD_SIZE_READ(crb->regs);
211 const uint32_t rsp_size = CRB_RSP_SIZE_READ(crb->regs);
212
213 if ((cmd_addr < TPM_CRB_DATA_BUFFER_ADDRESS) ||
214 (cmd_size < sizeof (struct tpm_cmd_hdr)) ||
215 (cmd_size > TPM_CRB_DATA_BUFFER_SIZE) ||
216 (cmd_addr + cmd_size >
217 TPM_CRB_DATA_BUFFER_ADDRESS + TPM_CRB_DATA_BUFFER_SIZE)) {
218 warnx("%s: invalid cmd [%16lx/%8x] outside of TPM "
219 "buffer", __func__, cmd_addr, cmd_size);
220 break;
221 }
222
223 if ((rsp_addr < TPM_CRB_DATA_BUFFER_ADDRESS) ||
224 (rsp_size < sizeof (struct tpm_cmd_hdr)) ||
225 (rsp_size > TPM_CRB_DATA_BUFFER_SIZE) ||
226 (rsp_addr + rsp_size >
227 TPM_CRB_DATA_BUFFER_ADDRESS + TPM_CRB_DATA_BUFFER_SIZE)) {
228 warnx("%s: invalid rsp [%16lx/%8x] outside of TPM "
229 "buffer", __func__, rsp_addr, rsp_size);
230 break;
231 }
232
233 const uint64_t cmd_off = cmd_addr - TPM_CRB_DATA_BUFFER_ADDRESS;
234 const uint64_t rsp_off = rsp_addr - TPM_CRB_DATA_BUFFER_ADDRESS;
235
236 if (cmd_off > TPM_CRB_DATA_BUFFER_SIZE ||
237 cmd_off + cmd_size > TPM_CRB_DATA_BUFFER_SIZE ||
238 rsp_off > TPM_CRB_DATA_BUFFER_SIZE ||
239 rsp_off + rsp_size > TPM_CRB_DATA_BUFFER_SIZE) {
240 warnx(
241 "%s: invalid cmd [%16lx, %16lx] --> [%16lx, %16lx]\n\r",
242 __func__, cmd_addr, cmd_addr + cmd_size, rsp_addr,
243 rsp_addr + rsp_size);
244 break;
245 }
246
247 uint8_t cmd[TPM_CRB_DATA_BUFFER_SIZE];
248 memcpy(cmd, crb->regs.data_buffer, TPM_CRB_DATA_BUFFER_SIZE);
249
250 /*
251 * Do a basic sanity check of the TPM request header. We'll need
252 * the TPM request length for execute_cmd() below.
253 */
254 struct tpm_cmd_hdr *req = (struct tpm_cmd_hdr *)&cmd[cmd_off];
255 if (be32toh(req->len) < sizeof (struct tpm_cmd_hdr) ||
256 be32toh(req->len) > cmd_size) {
257 warnx("%s: invalid TPM request header", __func__);
258 break;
259 }
260
261 /*
262 * A TPM command can take multiple seconds to execute. As we've
263 * copied all required values and buffers at this point, we can
264 * release the mutex.
265 */
266 pthread_mutex_unlock(&crb->mutex);
267
268 /*
269 * The command response buffer interface uses a single buffer
270 * for sending a command to and receiving a response from the
271 * tpm. To avoid reading old data from the command buffer which
272 * might be a security issue, we zero out the command buffer
273 * before writing the response into it. The rsp_size parameter
274 * is controlled by the guest and it's not guaranteed that the
275 * response has a size of rsp_size (e.g. if the tpm returned an
276 * error, the response would have a different size than
277 * expected). For that reason, use a second buffer for the
278 * response.
279 */
280 uint8_t rsp[TPM_CRB_DATA_BUFFER_SIZE] = { 0 };
281 (void) crb->emul->execute_cmd(crb->emul_sc, req,
282 be32toh(req->len), &rsp[rsp_off], rsp_size);
283
284 pthread_mutex_lock(&crb->mutex);
285 memset(crb->regs.data_buffer, 0, TPM_CRB_DATA_BUFFER_SIZE);
286 memcpy(&crb->regs.data_buffer[rsp_off], &rsp[rsp_off], rsp_size);
287
288 crb->regs.ctrl_start.start = false;
289 }
290 pthread_mutex_unlock(&crb->mutex);
291
292 return (NULL);
293 }
294
295 static int
tpm_crb_mmiocpy(void * const dst,void * const src,const int size)296 tpm_crb_mmiocpy(void *const dst, void *const src, const int size)
297 {
298 if (!(size == 1 || size == 2 || size == 4 || size == 8))
299 return (EINVAL);
300 memcpy(dst, src, size);
301
302 return (0);
303 }
304
305 static int
tpm_crb_mem_handler(struct vcpu * vcpu __unused,const int dir,const uint64_t addr,const int size,uint64_t * const val,void * const arg1,const long arg2 __unused)306 tpm_crb_mem_handler(struct vcpu *vcpu __unused, const int dir,
307 const uint64_t addr, const int size, uint64_t *const val, void *const arg1,
308 const long arg2 __unused)
309 {
310 struct tpm_crb *crb;
311 uint8_t *ptr;
312 uint64_t off, shift;
313 int error = 0;
314
315 if ((addr & (size - 1)) != 0) {
316 warnx("%s: unaligned %s access @ %16lx [size = %x]", __func__,
317 (dir == MEM_F_READ) ? "read" : "write", addr, size);
318 return (EINVAL);
319 }
320
321 crb = arg1;
322
323 off = addr - TPM_CRB_ADDRESS;
324 if (off > TPM_CRB_REGS_SIZE || off + size >= TPM_CRB_REGS_SIZE) {
325 return (EINVAL);
326 }
327
328 shift = 8 * (off & 3);
329 ptr = (uint8_t *)&crb->regs + off;
330
331 if (dir == MEM_F_READ) {
332 error = tpm_crb_mmiocpy(val, ptr, size);
333 if (error)
334 goto err_out;
335 } else {
336 switch (off & ~0x3) {
337 case offsetof(struct tpm_crb_regs, loc_ctrl): {
338 union tpm_crb_reg_loc_ctrl loc_ctrl;
339
340 if ((size_t)size > sizeof(loc_ctrl))
341 goto err_out;
342
343 *val = *val << shift;
344 tpm_crb_mmiocpy(&loc_ctrl, val, size);
345
346 if (loc_ctrl.relinquish) {
347 crb->regs.loc_sts.granted = false;
348 crb->regs.loc_state.loc_assigned = false;
349 } else if (loc_ctrl.request_access) {
350 crb->regs.loc_sts.granted = true;
351 crb->regs.loc_state.loc_assigned = true;
352 }
353
354 break;
355 }
356 case offsetof(struct tpm_crb_regs, ctrl_req): {
357 union tpm_crb_reg_ctrl_req req;
358
359 if ((size_t)size > sizeof(req))
360 goto err_out;
361
362 *val = *val << shift;
363 tpm_crb_mmiocpy(&req, val, size);
364
365 if (req.cmd_ready && !req.go_idle) {
366 crb->regs.ctrl_sts.tpm_idle = false;
367 } else if (!req.cmd_ready && req.go_idle) {
368 crb->regs.ctrl_sts.tpm_idle = true;
369 }
370
371 break;
372 }
373 case offsetof(struct tpm_crb_regs, ctrl_cancel): {
374 /* TODO: cancel the tpm command */
375 warnx(
376 "%s: cancelling a TPM command is not implemented yet",
377 __func__);
378
379 break;
380 }
381 case offsetof(struct tpm_crb_regs, int_enable):
382 /* No interrupt support. Ignore writes to int_enable. */
383 break;
384
385 case offsetof(struct tpm_crb_regs, ctrl_start): {
386 union tpm_crb_reg_ctrl_start start;
387
388 if ((size_t)size > sizeof(start))
389 goto err_out;
390
391 *val = *val << shift;
392
393 pthread_mutex_lock(&crb->mutex);
394 tpm_crb_mmiocpy(&start, val, size);
395
396 if (!start.start || crb->regs.ctrl_start.start) {
397 pthread_mutex_unlock(&crb->mutex);
398 break;
399 }
400
401 crb->regs.ctrl_start.start = true;
402
403 pthread_cond_signal(&crb->cond);
404 pthread_mutex_unlock(&crb->mutex);
405
406 break;
407 }
408 case offsetof(struct tpm_crb_regs, cmd_size):
409 case offsetof(struct tpm_crb_regs, cmd_addr_lo):
410 case offsetof(struct tpm_crb_regs, cmd_addr_hi):
411 case offsetof(struct tpm_crb_regs, rsp_size):
412 case offsetof(struct tpm_crb_regs,
413 rsp_addr) ... offsetof(struct tpm_crb_regs, rsp_addr) +
414 4:
415 case offsetof(struct tpm_crb_regs,
416 data_buffer) ... offsetof(struct tpm_crb_regs, data_buffer) +
417 TPM_CRB_DATA_BUFFER_SIZE / 4:
418 /*
419 * Those fields are used to execute a TPM command. The
420 * crb_thread will access them. For that reason, we have
421 * to acquire the crb mutex in order to write them.
422 */
423 pthread_mutex_lock(&crb->mutex);
424 error = tpm_crb_mmiocpy(ptr, val, size);
425 pthread_mutex_unlock(&crb->mutex);
426 if (error)
427 goto err_out;
428 break;
429 default:
430 /*
431 * The other fields are either readonly or we do not
432 * support writing them.
433 */
434 error = EINVAL;
435 goto err_out;
436 }
437 }
438
439 return (0);
440
441 err_out:
442 warnx("%s: invalid %s @ %16lx [size = %d]", __func__,
443 dir == MEM_F_READ ? "read" : "write", addr, size);
444
445 return (error);
446 }
447
448 static int
tpm_crb_modify_mmio_registration(const bool registration,void * const arg1)449 tpm_crb_modify_mmio_registration(const bool registration, void *const arg1)
450 {
451 struct mem_range crb_mmio = {
452 .name = "crb-mmio",
453 .base = TPM_CRB_ADDRESS,
454 .size = TPM_CRB_LOCALITIES_MAX * TPM_CRB_CONTROL_AREA_SIZE,
455 .flags = MEM_F_RW,
456 .arg1 = arg1,
457 .handler = tpm_crb_mem_handler,
458 };
459
460 if (registration)
461 return (register_mem(&crb_mmio));
462 else
463 return (unregister_mem(&crb_mmio));
464 }
465
466 static int
tpm_crb_init(void ** sc,struct tpm_emul * emul,void * emul_sc,struct acpi_device * acpi_dev)467 tpm_crb_init(void **sc, struct tpm_emul *emul, void *emul_sc,
468 struct acpi_device *acpi_dev)
469 {
470 struct tpm_crb *crb = NULL;
471 int error;
472
473 assert(sc != NULL);
474 assert(emul != NULL);
475
476 crb = calloc(1, sizeof(struct tpm_crb));
477 if (crb == NULL) {
478 warnx("%s: failed to allocate tpm crb", __func__);
479 error = ENOMEM;
480 goto err_out;
481 }
482
483 memset(crb, 0, sizeof(*crb));
484
485 crb->emul = emul;
486 crb->emul_sc = emul_sc;
487
488 crb->regs.loc_state.tpm_req_valid_sts = true;
489 crb->regs.loc_state.tpm_established = true;
490
491 crb->regs.intf_id.interface_type = TPM_INTF_TYPE_CRB;
492 crb->regs.intf_id.interface_version = TPM_INTF_VERSION_CRB;
493 crb->regs.intf_id.cap_locality = false;
494 crb->regs.intf_id.cap_crb_idle_bypass = false;
495 crb->regs.intf_id.cap_data_xfer_size_support =
496 TPM_INTF_CAP_CRB_DATA_XFER_SIZE_64;
497 crb->regs.intf_id.cap_fifo = false;
498 crb->regs.intf_id.cap_crb = true;
499 crb->regs.intf_id.interface_selector = TPM_INTF_SELECTOR_CRB;
500 crb->regs.intf_id.intf_sel_lock = false;
501 crb->regs.intf_id.rid = 0;
502 crb->regs.intf_id.vid = 0x1014; /* IBM */
503 crb->regs.intf_id.did = 0x1014; /* IBM */
504
505 crb->regs.ctrl_sts.tpm_idle = true;
506
507 CRB_CMD_SIZE_WRITE(crb->regs, TPM_CRB_DATA_BUFFER_SIZE);
508 CRB_CMD_ADDR_WRITE(crb->regs, TPM_CRB_DATA_BUFFER_ADDRESS);
509 CRB_RSP_SIZE_WRITE(crb->regs, TPM_CRB_DATA_BUFFER_SIZE);
510 CRB_RSP_ADDR_WRITE(crb->regs, TPM_CRB_DATA_BUFFER_ADDRESS);
511
512 error = qemu_fwcfg_add_file(TPM_CRB_LOG_AREA_FWCFG_NAME,
513 TPM_CRB_LOG_AREA_MINIMUM_SIZE, crb->tpm_log_area);
514 if (error) {
515 warnx("%s: failed to add fwcfg file", __func__);
516 goto err_out;
517 }
518
519 error = acpi_device_add_res_fixed_memory32(acpi_dev, false,
520 TPM_CRB_ADDRESS, TPM_CRB_CONTROL_AREA_SIZE);
521 if (error) {
522 warnx("%s: failed to add acpi resources\n", __func__);
523 goto err_out;
524 }
525
526 error = tpm_crb_modify_mmio_registration(true, crb);
527 if (error) {
528 warnx("%s: failed to register crb mmio", __func__);
529 goto err_out;
530 }
531
532 error = pthread_mutex_init(&crb->mutex, NULL);
533 if (error) {
534 warnc(error, "%s: failed to init mutex", __func__);
535 goto err_out;
536 }
537
538 error = pthread_cond_init(&crb->cond, NULL);
539 if (error) {
540 warnc(error, "%s: failed to init cond", __func__);
541 goto err_out;
542 }
543
544 error = pthread_create(&crb->thread, NULL, tpm_crb_thread, crb);
545 if (error) {
546 warnx("%s: failed to create thread\n", __func__);
547 goto err_out;
548 }
549
550 pthread_set_name_np(crb->thread, "tpm_intf_crb");
551
552 *sc = crb;
553
554 return (0);
555
556 err_out:
557 free(crb);
558
559 return (error);
560 }
561
562 static void
tpm_crb_deinit(void * sc)563 tpm_crb_deinit(void *sc)
564 {
565 struct tpm_crb *crb;
566 int error;
567
568 if (sc == NULL) {
569 return;
570 }
571
572 crb = sc;
573
574 crb->closing = true;
575 pthread_cond_signal(&crb->cond);
576 pthread_join(crb->thread, NULL);
577
578 pthread_cond_destroy(&crb->cond);
579 pthread_mutex_destroy(&crb->mutex);
580
581 error = tpm_crb_modify_mmio_registration(false, NULL);
582 assert(error == 0);
583
584 free(crb);
585 }
586
587 static int
tpm_crb_build_acpi_table(void * sc __unused,struct vmctx * vm_ctx)588 tpm_crb_build_acpi_table(void *sc __unused, struct vmctx *vm_ctx)
589 {
590 struct basl_table *table;
591
592 BASL_EXEC(basl_table_create(&table, vm_ctx, ACPI_SIG_TPM2,
593 BASL_TABLE_ALIGNMENT));
594
595 /* Header */
596 BASL_EXEC(basl_table_append_header(table, ACPI_SIG_TPM2, 4, 1));
597 /* Platform Class */
598 BASL_EXEC(basl_table_append_int(table, 0, 2));
599 /* Reserved */
600 BASL_EXEC(basl_table_append_int(table, 0, 2));
601 /* Control Address */
602 BASL_EXEC(
603 basl_table_append_int(table, TPM_CRB_CONTROL_AREA_ADDRESS, 8));
604 /* Start Method == (7) Command Response Buffer */
605 BASL_EXEC(basl_table_append_int(table, 7, 4));
606 /* Start Method Specific Parameters */
607 uint8_t parameters[12] = { 0 };
608 BASL_EXEC(basl_table_append_bytes(table, parameters, 12));
609 /* Log Area Minimum Length */
610 BASL_EXEC(
611 basl_table_append_int(table, TPM_CRB_LOG_AREA_MINIMUM_SIZE, 4));
612 /* Log Area Start Address */
613 BASL_EXEC(
614 basl_table_append_fwcfg(table, TPM_CRB_LOG_AREA_FWCFG_NAME, 1, 8));
615
616 BASL_EXEC(basl_table_register_to_rsdt(table));
617
618 return (0);
619 }
620
621 static struct tpm_intf tpm_intf_crb = {
622 .name = TPM_CRB_INTF_NAME,
623 .init = tpm_crb_init,
624 .deinit = tpm_crb_deinit,
625 .build_acpi_table = tpm_crb_build_acpi_table,
626 };
627 TPM_INTF_SET(tpm_intf_crb);
628