1 /*- 2 * Copyright (c) 2018 Stormshield. 3 * Copyright (c) 2018 Semihalf. 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 17 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 18 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, 19 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 20 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 21 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 23 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN 24 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 25 * POSSIBILITY OF SUCH DAMAGE. 26 */ 27 28 #include <sys/cdefs.h> 29 #include <sys/random.h> 30 31 #include "tpm20.h" 32 33 #define TPM_HARVEST_SIZE 16 34 /* 35 * Perform a harvest every 10 seconds. 36 * Since discrete TPMs are painfully slow 37 * we don't want to execute this too often 38 * as the chip is likely to be used by others too. 39 */ 40 #define TPM_HARVEST_INTERVAL 10 41 42 MALLOC_DEFINE(M_TPM20, "tpm_buffer", "buffer for tpm 2.0 driver"); 43 44 static void tpm20_discard_buffer(void *arg); 45 #ifdef TPM_HARVEST 46 static void tpm20_harvest(void *arg, int unused); 47 #endif 48 static int tpm20_save_state(device_t dev, bool suspend); 49 50 static d_open_t tpm20_open; 51 static d_close_t tpm20_close; 52 static d_read_t tpm20_read; 53 static d_write_t tpm20_write; 54 static d_ioctl_t tpm20_ioctl; 55 56 static struct cdevsw tpm20_cdevsw = { 57 .d_version = D_VERSION, 58 .d_open = tpm20_open, 59 .d_close = tpm20_close, 60 .d_read = tpm20_read, 61 .d_write = tpm20_write, 62 .d_ioctl = tpm20_ioctl, 63 .d_name = "tpm20", 64 }; 65 66 int 67 tpm20_read(struct cdev *dev, struct uio *uio, int flags) 68 { 69 struct tpm_sc *sc; 70 size_t bytes_to_transfer; 71 int result = 0; 72 73 sc = (struct tpm_sc *)dev->si_drv1; 74 75 callout_stop(&sc->discard_buffer_callout); 76 sx_xlock(&sc->dev_lock); 77 if (sc->owner_tid != uio->uio_td->td_tid) { 78 sx_xunlock(&sc->dev_lock); 79 return (EPERM); 80 } 81 82 bytes_to_transfer = MIN(sc->pending_data_length, uio->uio_resid); 83 if (bytes_to_transfer > 0) { 84 result = uiomove((caddr_t) sc->buf, bytes_to_transfer, uio); 85 memset(sc->buf, 0, TPM_BUFSIZE); 86 sc->pending_data_length = 0; 87 cv_signal(&sc->buf_cv); 88 } else { 89 result = ETIMEDOUT; 90 } 91 92 sx_xunlock(&sc->dev_lock); 93 94 return (result); 95 } 96 97 int 98 tpm20_write(struct cdev *dev, struct uio *uio, int flags) 99 { 100 struct tpm_sc *sc; 101 size_t byte_count; 102 int result = 0; 103 104 sc = (struct tpm_sc *)dev->si_drv1; 105 106 byte_count = uio->uio_resid; 107 if (byte_count < TPM_HEADER_SIZE) { 108 device_printf(sc->dev, 109 "Requested transfer is too small\n"); 110 return (EINVAL); 111 } 112 113 if (byte_count > TPM_BUFSIZE) { 114 device_printf(sc->dev, 115 "Requested transfer is too large\n"); 116 return (E2BIG); 117 } 118 119 sx_xlock(&sc->dev_lock); 120 121 while (sc->pending_data_length != 0) 122 cv_wait(&sc->buf_cv, &sc->dev_lock); 123 124 result = uiomove(sc->buf, byte_count, uio); 125 if (result != 0) { 126 sx_xunlock(&sc->dev_lock); 127 return (result); 128 } 129 130 result = sc->transmit(sc, byte_count); 131 132 if (result == 0) { 133 callout_reset(&sc->discard_buffer_callout, 134 TPM_READ_TIMEOUT / tick, tpm20_discard_buffer, sc); 135 sc->owner_tid = uio->uio_td->td_tid; 136 } 137 138 sx_xunlock(&sc->dev_lock); 139 return (result); 140 } 141 142 static void 143 tpm20_discard_buffer(void *arg) 144 { 145 struct tpm_sc *sc; 146 147 sc = (struct tpm_sc *)arg; 148 if (callout_pending(&sc->discard_buffer_callout)) 149 return; 150 151 sx_xlock(&sc->dev_lock); 152 153 memset(sc->buf, 0, TPM_BUFSIZE); 154 sc->pending_data_length = 0; 155 156 cv_signal(&sc->buf_cv); 157 sx_xunlock(&sc->dev_lock); 158 159 device_printf(sc->dev, 160 "User failed to read buffer in time\n"); 161 } 162 163 int 164 tpm20_open(struct cdev *dev, int flag, int mode, struct thread *td) 165 { 166 167 return (0); 168 } 169 170 int 171 tpm20_close(struct cdev *dev, int flag, int mode, struct thread *td) 172 { 173 174 return (0); 175 } 176 177 int 178 tpm20_ioctl(struct cdev *dev, u_long cmd, caddr_t data, 179 int flags, struct thread *td) 180 { 181 182 return (ENOTTY); 183 } 184 185 int 186 tpm20_init(struct tpm_sc *sc) 187 { 188 struct make_dev_args args; 189 int result; 190 191 cv_init(&sc->buf_cv, "TPM buffer cv"); 192 callout_init(&sc->discard_buffer_callout, 1); 193 sc->pending_data_length = 0; 194 195 make_dev_args_init(&args); 196 args.mda_devsw = &tpm20_cdevsw; 197 args.mda_uid = UID_ROOT; 198 args.mda_gid = GID_WHEEL; 199 args.mda_mode = TPM_CDEV_PERM_FLAG; 200 args.mda_si_drv1 = sc; 201 result = make_dev_s(&args, &sc->sc_cdev, TPM_CDEV_NAME); 202 if (result != 0) 203 tpm20_release(sc); 204 205 #ifdef TPM_HARVEST 206 TIMEOUT_TASK_INIT(taskqueue_thread, &sc->harvest_task, 0, 207 tpm20_harvest, sc); 208 taskqueue_enqueue_timeout(taskqueue_thread, &sc->harvest_task, 0); 209 #endif 210 211 return (result); 212 213 } 214 215 void 216 tpm20_release(struct tpm_sc *sc) 217 { 218 219 #ifdef TPM_HARVEST 220 if (device_is_attached(sc->dev)) 221 taskqueue_drain_timeout(taskqueue_thread, &sc->harvest_task); 222 #endif 223 224 if (sc->buf != NULL) 225 free(sc->buf, M_TPM20); 226 227 sx_destroy(&sc->dev_lock); 228 cv_destroy(&sc->buf_cv); 229 if (sc->sc_cdev != NULL) 230 destroy_dev(sc->sc_cdev); 231 } 232 233 int 234 tpm20_suspend(device_t dev) 235 { 236 return (tpm20_save_state(dev, true)); 237 } 238 239 int 240 tpm20_shutdown(device_t dev) 241 { 242 return (tpm20_save_state(dev, false)); 243 } 244 245 #ifdef TPM_HARVEST 246 /* 247 * Get TPM_HARVEST_SIZE random bytes and add them 248 * into system entropy pool. 249 */ 250 static void 251 tpm20_harvest(void *arg, int unused) 252 { 253 struct tpm_sc *sc; 254 unsigned char entropy[TPM_HARVEST_SIZE]; 255 uint16_t entropy_size; 256 int result; 257 uint8_t cmd[] = { 258 0x80, 0x01, /* TPM_ST_NO_SESSIONS tag*/ 259 0x00, 0x00, 0x00, 0x0c, /* cmd length */ 260 0x00, 0x00, 0x01, 0x7b, /* cmd TPM_CC_GetRandom */ 261 0x00, TPM_HARVEST_SIZE /* number of bytes requested */ 262 }; 263 264 sc = arg; 265 sx_xlock(&sc->dev_lock); 266 while (sc->pending_data_length != 0) 267 cv_wait(&sc->buf_cv, &sc->dev_lock); 268 269 memcpy(sc->buf, cmd, sizeof(cmd)); 270 result = sc->transmit(sc, sizeof(cmd)); 271 if (result != 0) { 272 sx_xunlock(&sc->dev_lock); 273 return; 274 } 275 276 /* Ignore response size */ 277 sc->pending_data_length = 0; 278 279 /* The number of random bytes we got is placed right after the header */ 280 entropy_size = (uint16_t) sc->buf[TPM_HEADER_SIZE + 1]; 281 if (entropy_size > 0) { 282 entropy_size = MIN(entropy_size, TPM_HARVEST_SIZE); 283 memcpy(entropy, 284 sc->buf + TPM_HEADER_SIZE + sizeof(uint16_t), 285 entropy_size); 286 } 287 288 sx_xunlock(&sc->dev_lock); 289 if (entropy_size > 0) 290 random_harvest_queue(entropy, entropy_size, RANDOM_PURE_TPM); 291 292 taskqueue_enqueue_timeout(taskqueue_thread, &sc->harvest_task, 293 hz * TPM_HARVEST_INTERVAL); 294 } 295 #endif /* TPM_HARVEST */ 296 297 static int 298 tpm20_save_state(device_t dev, bool suspend) 299 { 300 struct tpm_sc *sc; 301 uint8_t save_cmd[] = { 302 0x80, 0x01, /* TPM_ST_NO_SESSIONS tag*/ 303 0x00, 0x00, 0x00, 0x0C, /* cmd length */ 304 0x00, 0x00, 0x01, 0x45, /* cmd TPM_CC_Shutdown */ 305 0x00, 0x00 /* TPM_SU_STATE */ 306 }; 307 308 sc = device_get_softc(dev); 309 310 /* 311 * Inform the TPM whether we are going to suspend or reboot/shutdown. 312 */ 313 if (suspend) 314 save_cmd[11] = 1; /* TPM_SU_STATE */ 315 316 if (sc == NULL || sc->buf == NULL) 317 return (0); 318 319 sx_xlock(&sc->dev_lock); 320 321 memcpy(sc->buf, save_cmd, sizeof(save_cmd)); 322 sc->transmit(sc, sizeof(save_cmd)); 323 324 sx_xunlock(&sc->dev_lock); 325 326 return (0); 327 } 328 329 int32_t 330 tpm20_get_timeout(uint32_t command) 331 { 332 int32_t timeout; 333 334 switch (command) { 335 case TPM_CC_CreatePrimary: 336 case TPM_CC_Create: 337 case TPM_CC_CreateLoaded: 338 timeout = TPM_TIMEOUT_LONG; 339 break; 340 case TPM_CC_SequenceComplete: 341 case TPM_CC_Startup: 342 case TPM_CC_SequenceUpdate: 343 case TPM_CC_GetCapability: 344 case TPM_CC_PCR_Extend: 345 case TPM_CC_EventSequenceComplete: 346 case TPM_CC_HashSequenceStart: 347 timeout = TPM_TIMEOUT_C; 348 break; 349 default: 350 timeout = TPM_TIMEOUT_B; 351 break; 352 } 353 return timeout; 354 } 355