1 /*-
2 * Copyright (c) 2018 Stormshield.
3 * Copyright (c) 2018 Semihalf.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
19 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
20 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
21 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
23 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
24 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25 * POSSIBILITY OF SUCH DAMAGE.
26 */
27
28 #include <sys/random.h>
29 #include <dev/random/randomdev.h>
30
31 #include "tpm20.h"
32
33 #define TPM_HARVEST_SIZE 16
34 /*
35 * Perform a harvest every 10 seconds.
36 * Since discrete TPMs are painfully slow
37 * we don't want to execute this too often
38 * as the chip is likely to be used by others too.
39 */
40 #define TPM_HARVEST_INTERVAL 10
41
42 MALLOC_DEFINE(M_TPM20, "tpm_buffer", "buffer for tpm 2.0 driver");
43
44 static void tpm20_discard_buffer(void *arg);
45 #ifdef TPM_HARVEST
46 static void tpm20_harvest(void *arg, int unused);
47 #endif
48 static int tpm20_save_state(device_t dev, bool suspend);
49
50 static d_open_t tpm20_open;
51 static d_close_t tpm20_close;
52 static d_read_t tpm20_read;
53 static d_write_t tpm20_write;
54 static d_ioctl_t tpm20_ioctl;
55
56 static struct cdevsw tpm20_cdevsw = {
57 .d_version = D_VERSION,
58 .d_open = tpm20_open,
59 .d_close = tpm20_close,
60 .d_read = tpm20_read,
61 .d_write = tpm20_write,
62 .d_ioctl = tpm20_ioctl,
63 .d_name = "tpm20",
64 };
65
66 int
tpm20_read(struct cdev * dev,struct uio * uio,int flags)67 tpm20_read(struct cdev *dev, struct uio *uio, int flags)
68 {
69 struct tpm_sc *sc;
70 size_t bytes_to_transfer;
71 size_t offset;
72 int result = 0;
73
74 sc = (struct tpm_sc *)dev->si_drv1;
75
76 callout_stop(&sc->discard_buffer_callout);
77 sx_xlock(&sc->dev_lock);
78 if (sc->owner_tid != uio->uio_td->td_tid) {
79 sx_xunlock(&sc->dev_lock);
80 return (EPERM);
81 }
82
83 bytes_to_transfer = MIN(sc->pending_data_length, uio->uio_resid);
84 offset = sc->total_length - sc->pending_data_length;
85 if (bytes_to_transfer > 0) {
86 result = uiomove((caddr_t) sc->buf + offset, bytes_to_transfer, uio);
87 sc->pending_data_length -= bytes_to_transfer;
88 cv_signal(&sc->buf_cv);
89 } else {
90 result = ETIMEDOUT;
91 }
92
93 sx_xunlock(&sc->dev_lock);
94
95 return (result);
96 }
97
98 int
tpm20_write(struct cdev * dev,struct uio * uio,int flags)99 tpm20_write(struct cdev *dev, struct uio *uio, int flags)
100 {
101 struct tpm_sc *sc;
102 size_t byte_count;
103 int result = 0;
104
105 sc = (struct tpm_sc *)dev->si_drv1;
106
107 byte_count = uio->uio_resid;
108 if (byte_count < TPM_HEADER_SIZE) {
109 device_printf(sc->dev,
110 "Requested transfer is too small\n");
111 return (EINVAL);
112 }
113
114 if (byte_count > TPM_BUFSIZE) {
115 device_printf(sc->dev,
116 "Requested transfer is too large\n");
117 return (E2BIG);
118 }
119
120 sx_xlock(&sc->dev_lock);
121
122 while (sc->pending_data_length != 0)
123 cv_wait(&sc->buf_cv, &sc->dev_lock);
124
125 result = uiomove(sc->buf, byte_count, uio);
126 if (result != 0) {
127 sx_xunlock(&sc->dev_lock);
128 return (result);
129 }
130
131 result = TPM_TRANSMIT(sc->dev, byte_count);
132
133 if (result == 0) {
134 callout_reset(&sc->discard_buffer_callout,
135 TPM_READ_TIMEOUT / tick, tpm20_discard_buffer, sc);
136 sc->owner_tid = uio->uio_td->td_tid;
137 }
138
139 sx_xunlock(&sc->dev_lock);
140 return (result);
141 }
142
143 static void
tpm20_discard_buffer(void * arg)144 tpm20_discard_buffer(void *arg)
145 {
146 struct tpm_sc *sc;
147
148 sc = (struct tpm_sc *)arg;
149 if (callout_pending(&sc->discard_buffer_callout))
150 return;
151
152 sx_xlock(&sc->dev_lock);
153
154 memset(sc->buf, 0, TPM_BUFSIZE);
155 sc->pending_data_length = 0;
156 sc->total_length = 0;
157
158 cv_signal(&sc->buf_cv);
159 sx_xunlock(&sc->dev_lock);
160
161 device_printf(sc->dev,
162 "User failed to read buffer in time\n");
163 }
164
165 int
tpm20_open(struct cdev * dev,int flag,int mode,struct thread * td)166 tpm20_open(struct cdev *dev, int flag, int mode, struct thread *td)
167 {
168
169 return (0);
170 }
171
172 int
tpm20_close(struct cdev * dev,int flag,int mode,struct thread * td)173 tpm20_close(struct cdev *dev, int flag, int mode, struct thread *td)
174 {
175
176 return (0);
177 }
178
179 int
tpm20_ioctl(struct cdev * dev,u_long cmd,caddr_t data,int flags,struct thread * td)180 tpm20_ioctl(struct cdev *dev, u_long cmd, caddr_t data,
181 int flags, struct thread *td)
182 {
183
184 return (ENOTTY);
185 }
186
187 #ifdef TPM_HARVEST
188 static const struct random_source random_tpm = {
189 .rs_ident = "TPM",
190 .rs_source = RANDOM_PURE_TPM,
191 };
192 #endif
193
194 int
tpm20_init(struct tpm_sc * sc)195 tpm20_init(struct tpm_sc *sc)
196 {
197 struct make_dev_args args;
198 int result;
199
200 cv_init(&sc->buf_cv, "TPM buffer cv");
201 callout_init(&sc->discard_buffer_callout, 1);
202 sc->pending_data_length = 0;
203 sc->total_length = 0;
204
205 make_dev_args_init(&args);
206 args.mda_devsw = &tpm20_cdevsw;
207 args.mda_uid = UID_ROOT;
208 args.mda_gid = GID_WHEEL;
209 args.mda_mode = TPM_CDEV_PERM_FLAG;
210 args.mda_si_drv1 = sc;
211 result = make_dev_s(&args, &sc->sc_cdev, TPM_CDEV_NAME);
212 if (result != 0)
213 tpm20_release(sc);
214
215 #ifdef TPM_HARVEST
216 random_source_register(&random_tpm);
217 TIMEOUT_TASK_INIT(taskqueue_thread, &sc->harvest_task, 0,
218 tpm20_harvest, sc);
219 taskqueue_enqueue_timeout(taskqueue_thread, &sc->harvest_task, 0);
220 #endif
221
222 return (result);
223
224 }
225
226 void
tpm20_release(struct tpm_sc * sc)227 tpm20_release(struct tpm_sc *sc)
228 {
229
230 #ifdef TPM_HARVEST
231 if (device_is_attached(sc->dev))
232 taskqueue_drain_timeout(taskqueue_thread, &sc->harvest_task);
233 random_source_deregister(&random_tpm);
234 #endif
235
236 if (sc->buf != NULL)
237 free(sc->buf, M_TPM20);
238
239 sx_destroy(&sc->dev_lock);
240 cv_destroy(&sc->buf_cv);
241 if (sc->sc_cdev != NULL)
242 destroy_dev(sc->sc_cdev);
243 }
244
245 int
tpm20_suspend(device_t dev)246 tpm20_suspend(device_t dev)
247 {
248 return (tpm20_save_state(dev, true));
249 }
250
251 int
tpm20_shutdown(device_t dev)252 tpm20_shutdown(device_t dev)
253 {
254 return (tpm20_save_state(dev, false));
255 }
256
257 #ifdef TPM_HARVEST
258 /*
259 * Get TPM_HARVEST_SIZE random bytes and add them
260 * into system entropy pool.
261 */
262 static void
tpm20_harvest(void * arg,int unused)263 tpm20_harvest(void *arg, int unused)
264 {
265 struct tpm_sc *sc;
266 unsigned char entropy[TPM_HARVEST_SIZE];
267 uint16_t entropy_size;
268 int result;
269 uint8_t cmd[] = {
270 0x80, 0x01, /* TPM_ST_NO_SESSIONS tag*/
271 0x00, 0x00, 0x00, 0x0c, /* cmd length */
272 0x00, 0x00, 0x01, 0x7b, /* cmd TPM_CC_GetRandom */
273 0x00, TPM_HARVEST_SIZE /* number of bytes requested */
274 };
275
276 sc = arg;
277 sx_xlock(&sc->dev_lock);
278 while (sc->pending_data_length != 0)
279 cv_wait(&sc->buf_cv, &sc->dev_lock);
280
281 memcpy(sc->buf, cmd, sizeof(cmd));
282 result = TPM_TRANSMIT(sc->dev, sizeof(cmd));
283 if (result != 0) {
284 sx_xunlock(&sc->dev_lock);
285 return;
286 }
287
288 /* Ignore response size */
289 sc->pending_data_length = 0;
290 sc->total_length = 0;
291
292 /* The number of random bytes we got is placed right after the header */
293 entropy_size = (uint16_t) sc->buf[TPM_HEADER_SIZE + 1];
294 if (entropy_size > 0) {
295 entropy_size = MIN(entropy_size, TPM_HARVEST_SIZE);
296 memcpy(entropy,
297 sc->buf + TPM_HEADER_SIZE + sizeof(uint16_t),
298 entropy_size);
299 }
300
301 sx_xunlock(&sc->dev_lock);
302 if (entropy_size > 0)
303 random_harvest_queue(entropy, entropy_size, RANDOM_PURE_TPM);
304
305 taskqueue_enqueue_timeout(taskqueue_thread, &sc->harvest_task,
306 hz * TPM_HARVEST_INTERVAL);
307 }
308 #endif /* TPM_HARVEST */
309
310 static int
tpm20_save_state(device_t dev,bool suspend)311 tpm20_save_state(device_t dev, bool suspend)
312 {
313 struct tpm_sc *sc;
314 uint8_t save_cmd[] = {
315 0x80, 0x01, /* TPM_ST_NO_SESSIONS tag*/
316 0x00, 0x00, 0x00, 0x0C, /* cmd length */
317 0x00, 0x00, 0x01, 0x45, /* cmd TPM_CC_Shutdown */
318 0x00, 0x00 /* TPM_SU_STATE */
319 };
320
321 sc = device_get_softc(dev);
322
323 /*
324 * Inform the TPM whether we are going to suspend or reboot/shutdown.
325 */
326 if (suspend)
327 save_cmd[11] = 1; /* TPM_SU_STATE */
328
329 if (sc == NULL || sc->buf == NULL)
330 return (0);
331
332 sx_xlock(&sc->dev_lock);
333
334 memcpy(sc->buf, save_cmd, sizeof(save_cmd));
335 TPM_TRANSMIT(sc->dev, sizeof(save_cmd));
336
337 sx_xunlock(&sc->dev_lock);
338
339 return (0);
340 }
341
342 int32_t
tpm20_get_timeout(uint32_t command)343 tpm20_get_timeout(uint32_t command)
344 {
345 int32_t timeout;
346
347 switch (command) {
348 case TPM_CC_CreatePrimary:
349 case TPM_CC_Create:
350 case TPM_CC_CreateLoaded:
351 timeout = TPM_TIMEOUT_LONG;
352 break;
353 case TPM_CC_SequenceComplete:
354 case TPM_CC_Startup:
355 case TPM_CC_SequenceUpdate:
356 case TPM_CC_GetCapability:
357 case TPM_CC_PCR_Extend:
358 case TPM_CC_EventSequenceComplete:
359 case TPM_CC_HashSequenceStart:
360 timeout = TPM_TIMEOUT_C;
361 break;
362 default:
363 timeout = TPM_TIMEOUT_B;
364 break;
365 }
366 return timeout;
367 }
368