1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2022 Beckhoff Automation GmbH & Co. KG
5 * Author: Corvin Köhne <c.koehne@beckhoff.com>
6 */
7
8 #include <sys/types.h>
9 #include <sys/param.h>
10 #include <sys/linker_set.h>
11
12 #include <machine/vmm.h>
13
14 #include <assert.h>
15 #include <err.h>
16 #include <errno.h>
17 #include <pthread.h>
18 #include <pthread_np.h>
19 #include <stddef.h>
20 #include <stdio.h>
21 #include <stdlib.h>
22 #include <vmmapi.h>
23 #ifndef __FreeBSD__
24 #include <endian.h>
25 #endif
26
27 #include "basl.h"
28 #include "config.h"
29 #include "mem.h"
30 #include "qemu_fwcfg.h"
31 #include "tpm_device.h"
32 #include "tpm_intf.h"
33
34 #define TPM_CRB_ADDRESS 0xFED40000
35 #define TPM_CRB_REGS_SIZE 0x1000
36
37 #define TPM_CRB_CONTROL_AREA_ADDRESS \
38 (TPM_CRB_ADDRESS + offsetof(struct tpm_crb_regs, ctrl_req))
39 #define TPM_CRB_CONTROL_AREA_SIZE TPM_CRB_REGS_SIZE
40
41 #define TPM_CRB_DATA_BUFFER_ADDRESS \
42 (TPM_CRB_ADDRESS + offsetof(struct tpm_crb_regs, data_buffer))
43 #define TPM_CRB_DATA_BUFFER_SIZE 0xF80
44
45 #define TPM_CRB_LOCALITIES_MAX 5
46
47 #define TPM_CRB_LOG_AREA_MINIMUM_SIZE (64 * 1024)
48
49 #define TPM_CRB_LOG_AREA_FWCFG_NAME "etc/tpm/log"
50
51 #define TPM_CRB_INTF_NAME "crb"
52
53 struct tpm_crb_regs {
54 union tpm_crb_reg_loc_state {
55 struct {
56 uint32_t tpm_established : 1;
57 uint32_t loc_assigned : 1;
58 uint32_t active_locality : 3;
59 uint32_t _reserved : 2;
60 uint32_t tpm_req_valid_sts : 1;
61 };
62 uint32_t val;
63 } loc_state; /* 0h */
64 uint8_t _reserved1[4]; /* 4h */
65 union tpm_crb_reg_loc_ctrl {
66 struct {
67 uint32_t request_access : 1;
68 uint32_t relinquish : 1;
69 uint32_t seize : 1;
70 uint32_t reset_establishment_bit : 1;
71 };
72 uint32_t val;
73 } loc_ctrl; /* 8h */
74 union tpm_crb_reg_loc_sts {
75 struct {
76 uint32_t granted : 1;
77 uint32_t been_seized : 1;
78 };
79 uint32_t val;
80 } loc_sts; /* Ch */
81 uint8_t _reserved2[0x20]; /* 10h */
82 union tpm_crb_reg_intf_id {
83 struct {
84 uint64_t interface_type : 4;
85 uint64_t interface_version : 4;
86 uint64_t cap_locality : 1;
87 uint64_t cap_crb_idle_bypass : 1;
88 uint64_t _reserved1 : 1;
89 uint64_t cap_data_xfer_size_support : 2;
90 uint64_t cap_fifo : 1;
91 uint64_t cap_crb : 1;
92 uint64_t _reserved2 : 2;
93 uint64_t interface_selector : 2;
94 uint64_t intf_sel_lock : 1;
95 uint64_t _reserved3 : 4;
96 uint64_t rid : 8;
97 uint64_t vid : 16;
98 uint64_t did : 16;
99 };
100 uint64_t val;
101 } intf_id; /* 30h */
102 union tpm_crb_reg_ctrl_ext {
103 struct {
104 uint32_t clear;
105 uint32_t remaining_bytes;
106 };
107 uint64_t val;
108 } ctrl_ext; /* 38 */
109 union tpm_crb_reg_ctrl_req {
110 struct {
111 uint32_t cmd_ready : 1;
112 uint32_t go_idle : 1;
113 };
114 uint32_t val;
115 } ctrl_req; /* 40h */
116 union tpm_crb_reg_ctrl_sts {
117 struct {
118 uint32_t tpm_sts : 1;
119 uint32_t tpm_idle : 1;
120 };
121 uint32_t val;
122 } ctrl_sts; /* 44h */
123 union tpm_crb_reg_ctrl_cancel {
124 struct {
125 uint32_t cancel : 1;
126 };
127 uint32_t val;
128 } ctrl_cancel; /* 48h */
129 union tpm_crb_reg_ctrl_start {
130 struct {
131 uint32_t start : 1;
132 };
133 uint32_t val;
134 } ctrl_start; /* 4Ch*/
135 uint32_t int_enable; /* 50h */
136 uint32_t int_sts; /* 54h */
137 uint32_t cmd_size; /* 58h */
138 uint32_t cmd_addr_lo; /* 5Ch */
139 uint32_t cmd_addr_hi; /* 60h */
140 uint32_t rsp_size; /* 64h */
141 uint64_t rsp_addr; /* 68h */
142 uint8_t _reserved3[0x10]; /* 70h */
143 uint8_t data_buffer[TPM_CRB_DATA_BUFFER_SIZE]; /* 80h */
144 } __packed;
145 static_assert(sizeof(struct tpm_crb_regs) == TPM_CRB_REGS_SIZE,
146 "Invalid size of tpm_crb");
147
148 #define CRB_CMD_SIZE_READ(regs) (regs.cmd_size)
149 #define CRB_CMD_SIZE_WRITE(regs, val) \
150 do { \
151 regs.cmd_size = val; \
152 } while (0)
153 #define CRB_CMD_ADDR_READ(regs) \
154 (((uint64_t)regs.cmd_addr_hi << 32) | regs.cmd_addr_lo)
155 #define CRB_CMD_ADDR_WRITE(regs, val) \
156 do { \
157 regs.cmd_addr_lo = val & 0xFFFFFFFF; \
158 regs.cmd_addr_hi = val >> 32; \
159 } while (0)
160 #define CRB_RSP_SIZE_READ(regs) (regs.rsp_size)
161 #define CRB_RSP_SIZE_WRITE(regs, val) \
162 do { \
163 regs.rsp_size = val; \
164 } while (0)
165 #define CRB_RSP_ADDR_READ(regs) (regs.rsp_addr)
166 #define CRB_RSP_ADDR_WRITE(regs, val) \
167 do { \
168 regs.rsp_addr = val; \
169 } while (0)
170
171 struct tpm_cmd_hdr {
172 uint16_t tag;
173 uint32_t len;
174 union {
175 uint32_t ordinal;
176 uint32_t errcode;
177 };
178 } __packed;
179
180 struct tpm_crb {
181 struct tpm_emul *emul;
182 void *emul_sc;
183 uint8_t tpm_log_area[TPM_CRB_LOG_AREA_MINIMUM_SIZE];
184 struct tpm_crb_regs regs;
185 pthread_t thread;
186 pthread_mutex_t mutex;
187 pthread_cond_t cond;
188 bool closing;
189 };
190
191
192 static void *
tpm_crb_thread(void * const arg)193 tpm_crb_thread(void *const arg)
194 {
195 struct tpm_crb *const crb = arg;
196
197 pthread_mutex_lock(&crb->mutex);
198 for (;;) {
199 /*
200 * We're releasing the lock after wake up. Therefore, we have to
201 * check the closing condition before and after going to sleep.
202 */
203 if (crb->closing)
204 break;
205
206 pthread_cond_wait(&crb->cond, &crb->mutex);
207
208 if (crb->closing)
209 break;
210
211 const uint64_t cmd_addr = CRB_CMD_ADDR_READ(crb->regs);
212 const uint64_t rsp_addr = CRB_RSP_ADDR_READ(crb->regs);
213 const uint32_t cmd_size = CRB_CMD_SIZE_READ(crb->regs);
214 const uint32_t rsp_size = CRB_RSP_SIZE_READ(crb->regs);
215
216 if ((cmd_addr < TPM_CRB_DATA_BUFFER_ADDRESS) ||
217 (cmd_size < sizeof (struct tpm_cmd_hdr)) ||
218 (cmd_size > TPM_CRB_DATA_BUFFER_SIZE) ||
219 (cmd_addr + cmd_size >
220 TPM_CRB_DATA_BUFFER_ADDRESS + TPM_CRB_DATA_BUFFER_SIZE)) {
221 warnx("%s: invalid cmd [%16lx/%8x] outside of TPM "
222 "buffer", __func__, cmd_addr, cmd_size);
223 break;
224 }
225
226 if ((rsp_addr < TPM_CRB_DATA_BUFFER_ADDRESS) ||
227 (rsp_size < sizeof (struct tpm_cmd_hdr)) ||
228 (rsp_size > TPM_CRB_DATA_BUFFER_SIZE) ||
229 (rsp_addr + rsp_size >
230 TPM_CRB_DATA_BUFFER_ADDRESS + TPM_CRB_DATA_BUFFER_SIZE)) {
231 warnx("%s: invalid rsp [%16lx/%8x] outside of TPM "
232 "buffer", __func__, rsp_addr, rsp_size);
233 break;
234 }
235
236 const uint64_t cmd_off = cmd_addr - TPM_CRB_DATA_BUFFER_ADDRESS;
237 const uint64_t rsp_off = rsp_addr - TPM_CRB_DATA_BUFFER_ADDRESS;
238
239 if (cmd_off > TPM_CRB_DATA_BUFFER_SIZE ||
240 cmd_off + cmd_size > TPM_CRB_DATA_BUFFER_SIZE ||
241 rsp_off > TPM_CRB_DATA_BUFFER_SIZE ||
242 rsp_off + rsp_size > TPM_CRB_DATA_BUFFER_SIZE) {
243 warnx(
244 "%s: invalid cmd [%16lx, %16lx] --> [%16lx, %16lx]\n\r",
245 __func__, cmd_addr, cmd_addr + cmd_size, rsp_addr,
246 rsp_addr + rsp_size);
247 break;
248 }
249
250 uint8_t cmd[TPM_CRB_DATA_BUFFER_SIZE];
251 memcpy(cmd, crb->regs.data_buffer, TPM_CRB_DATA_BUFFER_SIZE);
252
253 /*
254 * Do a basic sanity check of the TPM request header. We'll need
255 * the TPM request length for execute_cmd() below.
256 */
257 struct tpm_cmd_hdr *req = (struct tpm_cmd_hdr *)&cmd[cmd_off];
258 if (be32toh(req->len) < sizeof (struct tpm_cmd_hdr) ||
259 be32toh(req->len) > cmd_size) {
260 warnx("%s: invalid TPM request header", __func__);
261 break;
262 }
263
264 /*
265 * A TPM command can take multiple seconds to execute. As we've
266 * copied all required values and buffers at this point, we can
267 * release the mutex.
268 */
269 pthread_mutex_unlock(&crb->mutex);
270
271 /*
272 * The command response buffer interface uses a single buffer
273 * for sending a command to and receiving a response from the
274 * tpm. To avoid reading old data from the command buffer which
275 * might be a security issue, we zero out the command buffer
276 * before writing the response into it. The rsp_size parameter
277 * is controlled by the guest and it's not guaranteed that the
278 * response has a size of rsp_size (e.g. if the tpm returned an
279 * error, the response would have a different size than
280 * expected). For that reason, use a second buffer for the
281 * response.
282 */
283 uint8_t rsp[TPM_CRB_DATA_BUFFER_SIZE] = { 0 };
284 (void) crb->emul->execute_cmd(crb->emul_sc, req,
285 be32toh(req->len), &rsp[rsp_off], rsp_size);
286
287 pthread_mutex_lock(&crb->mutex);
288 memset(crb->regs.data_buffer, 0, TPM_CRB_DATA_BUFFER_SIZE);
289 memcpy(&crb->regs.data_buffer[rsp_off], &rsp[rsp_off], rsp_size);
290
291 crb->regs.ctrl_start.start = false;
292 }
293 pthread_mutex_unlock(&crb->mutex);
294
295 return (NULL);
296 }
297
298 static int
tpm_crb_mmiocpy(void * const dst,void * const src,const int size)299 tpm_crb_mmiocpy(void *const dst, void *const src, const int size)
300 {
301 if (!(size == 1 || size == 2 || size == 4 || size == 8))
302 return (EINVAL);
303 memcpy(dst, src, size);
304
305 return (0);
306 }
307
308 static int
tpm_crb_mem_handler(struct vcpu * vcpu __unused,const int dir,const uint64_t addr,const int size,uint64_t * const val,void * const arg1,const long arg2 __unused)309 tpm_crb_mem_handler(struct vcpu *vcpu __unused, const int dir,
310 const uint64_t addr, const int size, uint64_t *const val, void *const arg1,
311 const long arg2 __unused)
312 {
313 struct tpm_crb *crb;
314 uint8_t *ptr;
315 uint64_t off, shift;
316 int error = 0;
317
318 if ((addr & (size - 1)) != 0) {
319 warnx("%s: unaligned %s access @ %16lx [size = %x]", __func__,
320 (dir == MEM_F_READ) ? "read" : "write", addr, size);
321 return (EINVAL);
322 }
323
324 crb = arg1;
325
326 off = addr - TPM_CRB_ADDRESS;
327 if (off > TPM_CRB_REGS_SIZE || off + size >= TPM_CRB_REGS_SIZE) {
328 return (EINVAL);
329 }
330
331 shift = 8 * (off & 3);
332 ptr = (uint8_t *)&crb->regs + off;
333
334 if (dir == MEM_F_READ) {
335 error = tpm_crb_mmiocpy(val, ptr, size);
336 if (error)
337 goto err_out;
338 } else {
339 switch (off & ~0x3) {
340 case offsetof(struct tpm_crb_regs, loc_ctrl): {
341 union tpm_crb_reg_loc_ctrl loc_ctrl;
342
343 if ((size_t)size > sizeof(loc_ctrl))
344 goto err_out;
345
346 *val = *val << shift;
347 tpm_crb_mmiocpy(&loc_ctrl, val, size);
348
349 if (loc_ctrl.relinquish) {
350 crb->regs.loc_sts.granted = false;
351 crb->regs.loc_state.loc_assigned = false;
352 } else if (loc_ctrl.request_access) {
353 crb->regs.loc_sts.granted = true;
354 crb->regs.loc_state.loc_assigned = true;
355 }
356
357 break;
358 }
359 case offsetof(struct tpm_crb_regs, ctrl_req): {
360 union tpm_crb_reg_ctrl_req req;
361
362 if ((size_t)size > sizeof(req))
363 goto err_out;
364
365 *val = *val << shift;
366 tpm_crb_mmiocpy(&req, val, size);
367
368 if (req.cmd_ready && !req.go_idle) {
369 crb->regs.ctrl_sts.tpm_idle = false;
370 } else if (!req.cmd_ready && req.go_idle) {
371 crb->regs.ctrl_sts.tpm_idle = true;
372 }
373
374 break;
375 }
376 case offsetof(struct tpm_crb_regs, ctrl_cancel): {
377 /* TODO: cancel the tpm command */
378 warnx(
379 "%s: cancelling a TPM command is not implemented yet",
380 __func__);
381
382 break;
383 }
384 case offsetof(struct tpm_crb_regs, int_enable):
385 /* No interrupt support. Ignore writes to int_enable. */
386 break;
387
388 case offsetof(struct tpm_crb_regs, ctrl_start): {
389 union tpm_crb_reg_ctrl_start start;
390
391 if ((size_t)size > sizeof(start))
392 goto err_out;
393
394 *val = *val << shift;
395
396 pthread_mutex_lock(&crb->mutex);
397 tpm_crb_mmiocpy(&start, val, size);
398
399 if (!start.start || crb->regs.ctrl_start.start) {
400 pthread_mutex_unlock(&crb->mutex);
401 break;
402 }
403
404 crb->regs.ctrl_start.start = true;
405
406 pthread_cond_signal(&crb->cond);
407 pthread_mutex_unlock(&crb->mutex);
408
409 break;
410 }
411 case offsetof(struct tpm_crb_regs, cmd_size):
412 case offsetof(struct tpm_crb_regs, cmd_addr_lo):
413 case offsetof(struct tpm_crb_regs, cmd_addr_hi):
414 case offsetof(struct tpm_crb_regs, rsp_size):
415 case offsetof(struct tpm_crb_regs,
416 rsp_addr) ... offsetof(struct tpm_crb_regs, rsp_addr) +
417 4:
418 case offsetof(struct tpm_crb_regs,
419 data_buffer) ... offsetof(struct tpm_crb_regs, data_buffer) +
420 TPM_CRB_DATA_BUFFER_SIZE / 4:
421 /*
422 * Those fields are used to execute a TPM command. The
423 * crb_thread will access them. For that reason, we have
424 * to acquire the crb mutex in order to write them.
425 */
426 pthread_mutex_lock(&crb->mutex);
427 error = tpm_crb_mmiocpy(ptr, val, size);
428 pthread_mutex_unlock(&crb->mutex);
429 if (error)
430 goto err_out;
431 break;
432 default:
433 /*
434 * The other fields are either readonly or we do not
435 * support writing them.
436 */
437 error = EINVAL;
438 goto err_out;
439 }
440 }
441
442 return (0);
443
444 err_out:
445 warnx("%s: invalid %s @ %16lx [size = %d]", __func__,
446 dir == MEM_F_READ ? "read" : "write", addr, size);
447
448 return (error);
449 }
450
451 static int
tpm_crb_modify_mmio_registration(const bool registration,void * const arg1)452 tpm_crb_modify_mmio_registration(const bool registration, void *const arg1)
453 {
454 struct mem_range crb_mmio = {
455 .name = "crb-mmio",
456 .base = TPM_CRB_ADDRESS,
457 .size = TPM_CRB_LOCALITIES_MAX * TPM_CRB_CONTROL_AREA_SIZE,
458 .flags = MEM_F_RW,
459 .arg1 = arg1,
460 .handler = tpm_crb_mem_handler,
461 };
462
463 if (registration)
464 return (register_mem(&crb_mmio));
465 else
466 return (unregister_mem(&crb_mmio));
467 }
468
469 static int
tpm_crb_init(void ** sc,struct tpm_emul * emul,void * emul_sc,struct acpi_device * acpi_dev)470 tpm_crb_init(void **sc, struct tpm_emul *emul, void *emul_sc,
471 struct acpi_device *acpi_dev)
472 {
473 struct tpm_crb *crb = NULL;
474 int error;
475
476 assert(sc != NULL);
477 assert(emul != NULL);
478
479 crb = calloc(1, sizeof(struct tpm_crb));
480 if (crb == NULL) {
481 warnx("%s: failed to allocate tpm crb", __func__);
482 error = ENOMEM;
483 goto err_out;
484 }
485
486 memset(crb, 0, sizeof(*crb));
487
488 crb->emul = emul;
489 crb->emul_sc = emul_sc;
490
491 crb->regs.loc_state.tpm_req_valid_sts = true;
492 crb->regs.loc_state.tpm_established = true;
493
494 crb->regs.intf_id.interface_type = TPM_INTF_TYPE_CRB;
495 crb->regs.intf_id.interface_version = TPM_INTF_VERSION_CRB;
496 crb->regs.intf_id.cap_locality = false;
497 crb->regs.intf_id.cap_crb_idle_bypass = false;
498 crb->regs.intf_id.cap_data_xfer_size_support =
499 TPM_INTF_CAP_CRB_DATA_XFER_SIZE_64;
500 crb->regs.intf_id.cap_fifo = false;
501 crb->regs.intf_id.cap_crb = true;
502 crb->regs.intf_id.interface_selector = TPM_INTF_SELECTOR_CRB;
503 crb->regs.intf_id.intf_sel_lock = false;
504 crb->regs.intf_id.rid = 0;
505 crb->regs.intf_id.vid = 0x1014; /* IBM */
506 crb->regs.intf_id.did = 0x1014; /* IBM */
507
508 crb->regs.ctrl_sts.tpm_idle = true;
509
510 CRB_CMD_SIZE_WRITE(crb->regs, TPM_CRB_DATA_BUFFER_SIZE);
511 CRB_CMD_ADDR_WRITE(crb->regs, TPM_CRB_DATA_BUFFER_ADDRESS);
512 CRB_RSP_SIZE_WRITE(crb->regs, TPM_CRB_DATA_BUFFER_SIZE);
513 CRB_RSP_ADDR_WRITE(crb->regs, TPM_CRB_DATA_BUFFER_ADDRESS);
514
515 error = qemu_fwcfg_add_file(TPM_CRB_LOG_AREA_FWCFG_NAME,
516 TPM_CRB_LOG_AREA_MINIMUM_SIZE, crb->tpm_log_area);
517 if (error) {
518 warnx("%s: failed to add fwcfg file", __func__);
519 goto err_out;
520 }
521
522 error = acpi_device_add_res_fixed_memory32(acpi_dev, false,
523 TPM_CRB_ADDRESS, TPM_CRB_CONTROL_AREA_SIZE);
524 if (error) {
525 warnx("%s: failed to add acpi resources\n", __func__);
526 goto err_out;
527 }
528
529 error = tpm_crb_modify_mmio_registration(true, crb);
530 if (error) {
531 warnx("%s: failed to register crb mmio", __func__);
532 goto err_out;
533 }
534
535 error = pthread_mutex_init(&crb->mutex, NULL);
536 if (error) {
537 warnc(error, "%s: failed to init mutex", __func__);
538 goto err_out;
539 }
540
541 error = pthread_cond_init(&crb->cond, NULL);
542 if (error) {
543 warnc(error, "%s: failed to init cond", __func__);
544 goto err_out;
545 }
546
547 error = pthread_create(&crb->thread, NULL, tpm_crb_thread, crb);
548 if (error) {
549 warnx("%s: failed to create thread\n", __func__);
550 goto err_out;
551 }
552
553 pthread_set_name_np(crb->thread, "tpm_intf_crb");
554
555 *sc = crb;
556
557 return (0);
558
559 err_out:
560 free(crb);
561
562 return (error);
563 }
564
565 static void
tpm_crb_deinit(void * sc)566 tpm_crb_deinit(void *sc)
567 {
568 struct tpm_crb *crb;
569 int error;
570
571 if (sc == NULL) {
572 return;
573 }
574
575 crb = sc;
576
577 crb->closing = true;
578 pthread_cond_signal(&crb->cond);
579 pthread_join(crb->thread, NULL);
580
581 pthread_cond_destroy(&crb->cond);
582 pthread_mutex_destroy(&crb->mutex);
583
584 error = tpm_crb_modify_mmio_registration(false, NULL);
585 assert(error == 0);
586
587 free(crb);
588 }
589
590 static int
tpm_crb_build_acpi_table(void * sc __unused,struct vmctx * vm_ctx)591 tpm_crb_build_acpi_table(void *sc __unused, struct vmctx *vm_ctx)
592 {
593 struct basl_table *table;
594
595 BASL_EXEC(basl_table_create(&table, vm_ctx, ACPI_SIG_TPM2,
596 BASL_TABLE_ALIGNMENT));
597
598 /* Header */
599 BASL_EXEC(basl_table_append_header(table, ACPI_SIG_TPM2, 4, 1));
600 /* Platform Class */
601 BASL_EXEC(basl_table_append_int(table, 0, 2));
602 /* Reserved */
603 BASL_EXEC(basl_table_append_int(table, 0, 2));
604 /* Control Address */
605 BASL_EXEC(
606 basl_table_append_int(table, TPM_CRB_CONTROL_AREA_ADDRESS, 8));
607 /* Start Method == (7) Command Response Buffer */
608 BASL_EXEC(basl_table_append_int(table, 7, 4));
609 /* Start Method Specific Parameters */
610 uint8_t parameters[12] = { 0 };
611 BASL_EXEC(basl_table_append_bytes(table, parameters, 12));
612 /* Log Area Minimum Length */
613 BASL_EXEC(
614 basl_table_append_int(table, TPM_CRB_LOG_AREA_MINIMUM_SIZE, 4));
615 /* Log Area Start Address */
616 #ifdef __FreeBSD__
617 BASL_EXEC(
618 basl_table_append_fwcfg(table, TPM_CRB_LOG_AREA_FWCFG_NAME, 1, 8));
619 #else
620 BASL_EXEC(
621 basl_table_append_fwcfg(table,
622 (const uint8_t *)TPM_CRB_LOG_AREA_FWCFG_NAME, 1, 8));
623 #endif
624
625 BASL_EXEC(basl_table_register_to_rsdt(table));
626
627 return (0);
628 }
629
630 static struct tpm_intf tpm_intf_crb = {
631 .name = TPM_CRB_INTF_NAME,
632 .init = tpm_crb_init,
633 .deinit = tpm_crb_deinit,
634 .build_acpi_table = tpm_crb_build_acpi_table,
635 };
636 TPM_INTF_SET(tpm_intf_crb);
637