xref: /freebsd/sys/dev/tpm/tpm20.c (revision 6cf4e30252fe48b230b9d76cac20576d5b3d2ffa)
1 /*-
2  * Copyright (c) 2018 Stormshield.
3  * Copyright (c) 2018 Semihalf.
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18  * DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
19  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
20  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
21  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
23  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
24  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25  * POSSIBILITY OF SUCH DAMAGE.
26  */
27 
28 #include <sys/random.h>
29 #include <dev/random/randomdev.h>
30 
31 #include "tpm20.h"
32 
33 #define TPM_HARVEST_SIZE     16
34 /*
35  * Perform a harvest every 10 seconds.
36  * Since discrete TPMs are painfully slow
37  * we don't want to execute this too often
38  * as the chip is likely to be used by others too.
39  */
40 #define TPM_HARVEST_INTERVAL 10
41 
42 MALLOC_DEFINE(M_TPM20, "tpm_buffer", "buffer for tpm 2.0 driver");
43 
44 static void tpm20_discard_buffer(void *arg);
45 #if defined TPM_HARVEST || defined RANDOM_ENABLE_TPM
46 static void tpm20_harvest(void *arg, int unused);
47 #endif
48 static int  tpm20_restart(device_t dev, bool clear);
49 static int  tpm20_save_state(device_t dev, bool suspend);
50 
51 static d_open_t		tpm20_open;
52 static d_close_t	tpm20_close;
53 static d_read_t		tpm20_read;
54 static d_write_t	tpm20_write;
55 static d_ioctl_t	tpm20_ioctl;
56 
57 static struct cdevsw tpm20_cdevsw = {
58 	.d_version = D_VERSION,
59 	.d_open = tpm20_open,
60 	.d_close = tpm20_close,
61 	.d_read = tpm20_read,
62 	.d_write = tpm20_write,
63 	.d_ioctl = tpm20_ioctl,
64 	.d_name = "tpm20",
65 };
66 
67 int
68 tpm20_read(struct cdev *dev, struct uio *uio, int flags)
69 {
70 	struct tpm_sc *sc;
71 	size_t bytes_to_transfer;
72 	size_t offset;
73 	int result = 0;
74 
75 	sc = (struct tpm_sc *)dev->si_drv1;
76 
77 	callout_stop(&sc->discard_buffer_callout);
78 	sx_xlock(&sc->dev_lock);
79 	if (sc->owner_tid != uio->uio_td->td_tid) {
80 		sx_xunlock(&sc->dev_lock);
81 		return (EPERM);
82 	}
83 
84 	bytes_to_transfer = MIN(sc->pending_data_length, uio->uio_resid);
85 	offset = sc->total_length - sc->pending_data_length;
86 	if (bytes_to_transfer > 0) {
87 		result = uiomove((caddr_t) sc->buf + offset, bytes_to_transfer, uio);
88 		sc->pending_data_length -= bytes_to_transfer;
89 		cv_signal(&sc->buf_cv);
90 	} else {
91 		result = ETIMEDOUT;
92 	}
93 
94 	sx_xunlock(&sc->dev_lock);
95 
96 	return (result);
97 }
98 
99 int
100 tpm20_write(struct cdev *dev, struct uio *uio, int flags)
101 {
102 	struct tpm_sc *sc;
103 	size_t byte_count;
104 	int result = 0;
105 
106 	sc = (struct tpm_sc *)dev->si_drv1;
107 
108 	byte_count = uio->uio_resid;
109 	if (byte_count < TPM_HEADER_SIZE) {
110 		device_printf(sc->dev,
111 		    "Requested transfer is too small\n");
112 		return (EINVAL);
113 	}
114 
115 	if (byte_count > TPM_BUFSIZE) {
116 		device_printf(sc->dev,
117 		    "Requested transfer is too large\n");
118 		return (E2BIG);
119 	}
120 
121 	sx_xlock(&sc->dev_lock);
122 
123 	while (sc->pending_data_length != 0)
124 		cv_wait(&sc->buf_cv, &sc->dev_lock);
125 
126 	result = uiomove(sc->buf, byte_count, uio);
127 	if (result != 0) {
128 		sx_xunlock(&sc->dev_lock);
129 		return (result);
130 	}
131 
132 	result = TPM_TRANSMIT(sc->dev, byte_count);
133 
134 	if (result == 0) {
135 		callout_reset(&sc->discard_buffer_callout,
136 		    TPM_READ_TIMEOUT / tick, tpm20_discard_buffer, sc);
137 		sc->owner_tid = uio->uio_td->td_tid;
138 	}
139 
140 	sx_xunlock(&sc->dev_lock);
141 	return (result);
142 }
143 
144 static void
145 tpm20_discard_buffer(void *arg)
146 {
147 	struct tpm_sc *sc;
148 
149 	sc = (struct tpm_sc *)arg;
150 	if (callout_pending(&sc->discard_buffer_callout))
151 		return;
152 
153 	sx_xlock(&sc->dev_lock);
154 
155 	memset(sc->buf, 0, TPM_BUFSIZE);
156 	sc->pending_data_length = 0;
157 	sc->total_length = 0;
158 
159 	cv_signal(&sc->buf_cv);
160 	sx_xunlock(&sc->dev_lock);
161 
162 	device_printf(sc->dev,
163 	    "User failed to read buffer in time\n");
164 }
165 
166 int
167 tpm20_open(struct cdev *dev, int flag, int mode, struct thread *td)
168 {
169 
170 	return (0);
171 }
172 
173 int
174 tpm20_close(struct cdev *dev, int flag, int mode, struct thread *td)
175 {
176 
177 	return (0);
178 }
179 
180 int
181 tpm20_ioctl(struct cdev *dev, u_long cmd, caddr_t data,
182     int flags, struct thread *td)
183 {
184 
185 	return (ENOTTY);
186 }
187 
188 #if defined TPM_HARVEST || defined RANDOM_ENABLE_TPM
189 static const struct random_source random_tpm = {
190 	.rs_ident = "TPM",
191 	.rs_source = RANDOM_PURE_TPM,
192 };
193 #endif
194 
195 int
196 tpm20_init(struct tpm_sc *sc)
197 {
198 	struct make_dev_args args;
199 	int result;
200 
201 	cv_init(&sc->buf_cv, "TPM buffer cv");
202 	callout_init(&sc->discard_buffer_callout, 1);
203 	sc->pending_data_length = 0;
204 	sc->total_length = 0;
205 
206 	make_dev_args_init(&args);
207 	args.mda_devsw = &tpm20_cdevsw;
208 	args.mda_uid = UID_ROOT;
209 	args.mda_gid = GID_WHEEL;
210 	args.mda_mode = TPM_CDEV_PERM_FLAG;
211 	args.mda_si_drv1 = sc;
212 	result = make_dev_s(&args, &sc->sc_cdev, TPM_CDEV_NAME);
213 	if (result != 0)
214 		tpm20_release(sc);
215 
216 #if defined TPM_HARVEST || defined RANDOM_ENABLE_TPM
217 	random_source_register(&random_tpm);
218 	TIMEOUT_TASK_INIT(taskqueue_thread, &sc->harvest_task, 0,
219 	    tpm20_harvest, sc);
220 	taskqueue_enqueue_timeout(taskqueue_thread, &sc->harvest_task, 0);
221 #endif
222 
223 	return (result);
224 
225 }
226 
227 void
228 tpm20_release(struct tpm_sc *sc)
229 {
230 
231 #if defined TPM_HARVEST || defined RANDOM_ENABLE_TPM
232 	if (device_is_attached(sc->dev))
233 		taskqueue_drain_timeout(taskqueue_thread, &sc->harvest_task);
234 	random_source_deregister(&random_tpm);
235 #endif
236 
237 	if (sc->buf != NULL)
238 		free(sc->buf, M_TPM20);
239 
240 	sx_destroy(&sc->dev_lock);
241 	cv_destroy(&sc->buf_cv);
242 	if (sc->sc_cdev != NULL)
243 		destroy_dev(sc->sc_cdev);
244 }
245 
246 int
247 tpm20_resume(device_t dev)
248 {
249 
250 	tpm20_restart(dev, false);
251 
252 #if defined TPM_HARVEST || defined RANDOM_ENABLE_TPM
253 	struct tpm_sc *sc;
254 
255 	sc = device_get_softc(dev);
256 	taskqueue_enqueue_timeout(taskqueue_thread, &sc->harvest_task,
257 	    hz * TPM_HARVEST_INTERVAL);
258 #endif
259 	return (0);
260 }
261 
262 int
263 tpm20_suspend(device_t dev)
264 {
265 #if defined TPM_HARVEST || defined RANDOM_ENABLE_TPM
266 	struct tpm_sc *sc;
267 
268 	sc = device_get_softc(dev);
269 	taskqueue_drain_timeout(taskqueue_thread, &sc->harvest_task);
270 #endif
271 	return (tpm20_save_state(dev, true));
272 }
273 
274 int
275 tpm20_shutdown(device_t dev)
276 {
277 	return (tpm20_save_state(dev, false));
278 }
279 
280 #if defined TPM_HARVEST || defined RANDOM_ENABLE_TPM
281 /*
282  * Get TPM_HARVEST_SIZE random bytes and add them
283  * into system entropy pool.
284  */
285 static void
286 tpm20_harvest(void *arg, int unused)
287 {
288 	struct tpm_sc *sc;
289 	unsigned char entropy[TPM_HARVEST_SIZE];
290 	uint16_t entropy_size;
291 	int result;
292 	uint8_t cmd[] = {
293 		0x80, 0x01,		/* TPM_ST_NO_SESSIONS tag*/
294 		0x00, 0x00, 0x00, 0x0c,	/* cmd length */
295 		0x00, 0x00, 0x01, 0x7b,	/* cmd TPM_CC_GetRandom */
296 		0x00, TPM_HARVEST_SIZE 	/* number of bytes requested */
297 	};
298 
299 	sc = arg;
300 	sx_xlock(&sc->dev_lock);
301 	while (sc->pending_data_length != 0)
302 		cv_wait(&sc->buf_cv, &sc->dev_lock);
303 
304 	memcpy(sc->buf, cmd, sizeof(cmd));
305 	result = TPM_TRANSMIT(sc->dev, sizeof(cmd));
306 	if (result != 0) {
307 		sx_xunlock(&sc->dev_lock);
308 		return;
309 	}
310 
311 	/* Ignore response size */
312 	sc->pending_data_length = 0;
313 	sc->total_length = 0;
314 
315 	/* The number of random bytes we got is placed right after the header */
316 	entropy_size = (uint16_t) sc->buf[TPM_HEADER_SIZE + 1];
317 	if (entropy_size > 0) {
318 		entropy_size = MIN(entropy_size, TPM_HARVEST_SIZE);
319 		memcpy(entropy,
320 			sc->buf + TPM_HEADER_SIZE + sizeof(uint16_t),
321 			entropy_size);
322 	}
323 
324 	sx_xunlock(&sc->dev_lock);
325 	if (entropy_size > 0)
326 		random_harvest_queue(entropy, entropy_size, RANDOM_PURE_TPM);
327 
328 	taskqueue_enqueue_timeout(taskqueue_thread, &sc->harvest_task,
329 	    hz * TPM_HARVEST_INTERVAL);
330 }
331 #endif	/* TPM_HARVEST */
332 
333 static int
334 tpm20_restart(device_t dev, bool clear)
335 {
336 	struct tpm_sc *sc;
337 	uint8_t startup_cmd[] = {
338 		0x80, 0x01,             /* TPM_ST_NO_SESSIONS tag*/
339 		0x00, 0x00, 0x00, 0x0C, /* cmd length */
340 		0x00, 0x00, 0x01, 0x44, /* cmd TPM_CC_Startup */
341 		0x00, 0x01              /* TPM_SU_STATE */
342 	};
343 
344 	sc = device_get_softc(dev);
345 
346 	/*
347 	 * Inform the TPM whether we are resetting or resuming.
348 	 */
349 	if (clear)
350 		startup_cmd[11] = 0; /* TPM_SU_CLEAR */
351 
352 	if (sc == NULL || sc->buf == NULL)
353 		return (0);
354 
355 	sx_xlock(&sc->dev_lock);
356 
357 	MPASS(sc->pending_data_length == 0);
358 	memcpy(sc->buf, startup_cmd, sizeof(startup_cmd));
359 
360 	/* XXX Ignoring both TPM_TRANSMIT return and tpm's response */
361 	TPM_TRANSMIT(sc->dev, sizeof(startup_cmd));
362 	sc->pending_data_length = 0;
363 	sc->total_length = 0;
364 
365 	sx_xunlock(&sc->dev_lock);
366 
367 	return (0);
368 }
369 
370 static int
371 tpm20_save_state(device_t dev, bool suspend)
372 {
373 	struct tpm_sc *sc;
374 	uint8_t save_cmd[] = {
375 		0x80, 0x01,             /* TPM_ST_NO_SESSIONS tag*/
376 		0x00, 0x00, 0x00, 0x0C, /* cmd length */
377 		0x00, 0x00, 0x01, 0x45, /* cmd TPM_CC_Shutdown */
378 		0x00, 0x00              /* TPM_SU_STATE */
379 	};
380 
381 	sc = device_get_softc(dev);
382 
383 	/*
384 	 * Inform the TPM whether we are going to suspend or reboot/shutdown.
385 	 */
386 	if (suspend)
387 		save_cmd[11] = 1; /* TPM_SU_STATE */
388 
389 	if (sc == NULL || sc->buf == NULL)
390 		return (0);
391 
392 	sx_xlock(&sc->dev_lock);
393 
394 	MPASS(sc->pending_data_length == 0);
395 	memcpy(sc->buf, save_cmd, sizeof(save_cmd));
396 
397 	/* XXX Ignoring both TPM_TRANSMIT return and tpm's response */
398 	TPM_TRANSMIT(sc->dev, sizeof(save_cmd));
399 	sc->pending_data_length = 0;
400 	sc->total_length = 0;
401 
402 	sx_xunlock(&sc->dev_lock);
403 
404 	return (0);
405 }
406 
407 int32_t
408 tpm20_get_timeout(uint32_t command)
409 {
410 	int32_t timeout;
411 
412 	switch (command) {
413 		case TPM_CC_CreatePrimary:
414 		case TPM_CC_Create:
415 		case TPM_CC_CreateLoaded:
416 			timeout = TPM_TIMEOUT_LONG;
417 			break;
418 		case TPM_CC_SequenceComplete:
419 		case TPM_CC_Startup:
420 		case TPM_CC_SequenceUpdate:
421 		case TPM_CC_GetCapability:
422 		case TPM_CC_PCR_Extend:
423 		case TPM_CC_EventSequenceComplete:
424 		case TPM_CC_HashSequenceStart:
425 			timeout = TPM_TIMEOUT_C;
426 			break;
427 		default:
428 			timeout = TPM_TIMEOUT_B;
429 			break;
430 	}
431 	return timeout;
432 }
433