1 /*- 2 * Copyright (c) 2018 Stormshield. 3 * Copyright (c) 2018 Semihalf. 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 17 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 18 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, 19 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 20 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 21 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 23 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN 24 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 25 * POSSIBILITY OF SUCH DAMAGE. 26 */ 27 28 #include <sys/cdefs.h> 29 __FBSDID("$FreeBSD$"); 30 31 #include <sys/random.h> 32 33 #include "tpm20.h" 34 35 #define TPM_HARVEST_SIZE 16 36 /* 37 * Perform a harvest every 10 seconds. 38 * Since discrete TPMs are painfully slow 39 * we don't want to execute this too often 40 * as the chip is likely to be used by others too. 41 */ 42 #define TPM_HARVEST_INTERVAL 10000000 43 44 MALLOC_DECLARE(M_TPM20); 45 MALLOC_DEFINE(M_TPM20, "tpm_buffer", "buffer for tpm 2.0 driver"); 46 47 static void tpm20_discard_buffer(void *arg); 48 #ifdef TPM_HARVEST 49 static void tpm20_harvest(void *arg); 50 #endif 51 static int tpm20_save_state(device_t dev, bool suspend); 52 53 static d_open_t tpm20_open; 54 static d_close_t tpm20_close; 55 static d_read_t tpm20_read; 56 static d_write_t tpm20_write; 57 static d_ioctl_t tpm20_ioctl; 58 59 static struct cdevsw tpm20_cdevsw = { 60 .d_version = D_VERSION, 61 .d_open = tpm20_open, 62 .d_close = tpm20_close, 63 .d_read = tpm20_read, 64 .d_write = tpm20_write, 65 .d_ioctl = tpm20_ioctl, 66 .d_name = "tpm20", 67 }; 68 69 int 70 tpm20_read(struct cdev *dev, struct uio *uio, int flags) 71 { 72 struct tpm_sc *sc; 73 size_t bytes_to_transfer; 74 int result = 0; 75 76 sc = (struct tpm_sc *)dev->si_drv1; 77 78 callout_stop(&sc->discard_buffer_callout); 79 sx_xlock(&sc->dev_lock); 80 if (sc->owner_tid != uio->uio_td->td_tid) { 81 sx_xunlock(&sc->dev_lock); 82 return (EPERM); 83 } 84 85 bytes_to_transfer = MIN(sc->pending_data_length, uio->uio_resid); 86 if (bytes_to_transfer > 0) { 87 result = uiomove((caddr_t) sc->buf, bytes_to_transfer, uio); 88 memset(sc->buf, 0, TPM_BUFSIZE); 89 sc->pending_data_length = 0; 90 cv_signal(&sc->buf_cv); 91 } else { 92 result = ETIMEDOUT; 93 } 94 95 sx_xunlock(&sc->dev_lock); 96 97 return (result); 98 } 99 100 int 101 tpm20_write(struct cdev *dev, struct uio *uio, int flags) 102 { 103 struct tpm_sc *sc; 104 size_t byte_count; 105 int result = 0; 106 107 sc = (struct tpm_sc *)dev->si_drv1; 108 109 byte_count = uio->uio_resid; 110 if (byte_count < TPM_HEADER_SIZE) { 111 device_printf(sc->dev, 112 "Requested transfer is too small\n"); 113 return (EINVAL); 114 } 115 116 if (byte_count > TPM_BUFSIZE) { 117 device_printf(sc->dev, 118 "Requested transfer is too large\n"); 119 return (E2BIG); 120 } 121 122 sx_xlock(&sc->dev_lock); 123 124 while (sc->pending_data_length != 0) 125 cv_wait(&sc->buf_cv, &sc->dev_lock); 126 127 result = uiomove(sc->buf, byte_count, uio); 128 if (result != 0) { 129 sx_xunlock(&sc->dev_lock); 130 return (result); 131 } 132 133 result = sc->transmit(sc, byte_count); 134 135 if (result == 0) { 136 callout_reset(&sc->discard_buffer_callout, 137 TPM_READ_TIMEOUT / tick, tpm20_discard_buffer, sc); 138 sc->owner_tid = uio->uio_td->td_tid; 139 } 140 141 sx_xunlock(&sc->dev_lock); 142 return (result); 143 } 144 145 static void 146 tpm20_discard_buffer(void *arg) 147 { 148 struct tpm_sc *sc; 149 150 sc = (struct tpm_sc *)arg; 151 if (callout_pending(&sc->discard_buffer_callout)) 152 return; 153 154 sx_xlock(&sc->dev_lock); 155 156 memset(sc->buf, 0, TPM_BUFSIZE); 157 sc->pending_data_length = 0; 158 159 cv_signal(&sc->buf_cv); 160 sx_xunlock(&sc->dev_lock); 161 162 device_printf(sc->dev, 163 "User failed to read buffer in time\n"); 164 } 165 166 int 167 tpm20_open(struct cdev *dev, int flag, int mode, struct thread *td) 168 { 169 170 return (0); 171 } 172 173 int 174 tpm20_close(struct cdev *dev, int flag, int mode, struct thread *td) 175 { 176 177 return (0); 178 } 179 180 int 181 tpm20_ioctl(struct cdev *dev, u_long cmd, caddr_t data, 182 int flags, struct thread *td) 183 { 184 185 return (ENOTTY); 186 } 187 188 int 189 tpm20_init(struct tpm_sc *sc) 190 { 191 struct make_dev_args args; 192 int result; 193 194 sc->buf = malloc(TPM_BUFSIZE, M_TPM20, M_WAITOK); 195 sx_init(&sc->dev_lock, "TPM driver lock"); 196 cv_init(&sc->buf_cv, "TPM buffer cv"); 197 callout_init(&sc->discard_buffer_callout, 1); 198 #ifdef TPM_HARVEST 199 sc->harvest_ticks = TPM_HARVEST_INTERVAL / tick; 200 callout_init(&sc->harvest_callout, 1); 201 callout_reset(&sc->harvest_callout, 0, tpm20_harvest, sc); 202 #endif 203 sc->pending_data_length = 0; 204 205 make_dev_args_init(&args); 206 args.mda_devsw = &tpm20_cdevsw; 207 args.mda_uid = UID_ROOT; 208 args.mda_gid = GID_WHEEL; 209 args.mda_mode = TPM_CDEV_PERM_FLAG; 210 args.mda_si_drv1 = sc; 211 result = make_dev_s(&args, &sc->sc_cdev, TPM_CDEV_NAME); 212 if (result != 0) 213 tpm20_release(sc); 214 215 return (result); 216 217 } 218 219 void 220 tpm20_release(struct tpm_sc *sc) 221 { 222 223 #ifdef TPM_HARVEST 224 callout_drain(&sc->harvest_callout); 225 #endif 226 227 if (sc->buf != NULL) 228 free(sc->buf, M_TPM20); 229 230 sx_destroy(&sc->dev_lock); 231 cv_destroy(&sc->buf_cv); 232 if (sc->sc_cdev != NULL) 233 destroy_dev(sc->sc_cdev); 234 } 235 236 int 237 tpm20_suspend(device_t dev) 238 { 239 return (tpm20_save_state(dev, true)); 240 } 241 242 int 243 tpm20_shutdown(device_t dev) 244 { 245 return (tpm20_save_state(dev, false)); 246 } 247 248 #ifdef TPM_HARVEST 249 250 /* 251 * Get TPM_HARVEST_SIZE random bytes and add them 252 * into system entropy pool. 253 */ 254 static void 255 tpm20_harvest(void *arg) 256 { 257 struct tpm_sc *sc; 258 unsigned char entropy[TPM_HARVEST_SIZE]; 259 uint16_t entropy_size; 260 int result; 261 uint8_t cmd[] = { 262 0x80, 0x01, /* TPM_ST_NO_SESSIONS tag*/ 263 0x00, 0x00, 0x00, 0x0c, /* cmd length */ 264 0x00, 0x00, 0x01, 0x7b, /* cmd TPM_CC_GetRandom */ 265 0x00, TPM_HARVEST_SIZE /* number of bytes requested */ 266 }; 267 268 sc = arg; 269 sx_xlock(&sc->dev_lock); 270 while (sc->pending_data_length != 0) 271 cv_wait(&sc->buf_cv, &sc->dev_lock); 272 273 memcpy(sc->buf, cmd, sizeof(cmd)); 274 result = sc->transmit(sc, sizeof(cmd)); 275 if (result != 0) { 276 sx_xunlock(&sc->dev_lock); 277 return; 278 } 279 280 /* Ignore response size */ 281 sc->pending_data_length = 0; 282 283 /* The number of random bytes we got is placed right after the header */ 284 entropy_size = (uint16_t) sc->buf[TPM_HEADER_SIZE + 1]; 285 if (entropy_size > 0) { 286 entropy_size = MIN(entropy_size, TPM_HARVEST_SIZE); 287 memcpy(entropy, 288 sc->buf + TPM_HEADER_SIZE + sizeof(uint16_t), 289 entropy_size); 290 } 291 292 sx_xunlock(&sc->dev_lock); 293 if (entropy_size > 0) 294 random_harvest_queue(entropy, entropy_size, RANDOM_PURE_TPM); 295 296 callout_reset(&sc->harvest_callout, sc->harvest_ticks, tpm20_harvest, sc); 297 } 298 #endif /* TPM_HARVEST */ 299 300 static int 301 tpm20_save_state(device_t dev, bool suspend) 302 { 303 struct tpm_sc *sc; 304 uint8_t save_cmd[] = { 305 0x80, 0x01, /* TPM_ST_NO_SESSIONS tag*/ 306 0x00, 0x00, 0x00, 0x0C, /* cmd length */ 307 0x00, 0x00, 0x01, 0x45, /* cmd TPM_CC_Shutdown */ 308 0x00, 0x00 /* TPM_SU_STATE */ 309 }; 310 311 sc = device_get_softc(dev); 312 313 /* 314 * Inform the TPM whether we are going to suspend or reboot/shutdown. 315 */ 316 if (suspend) 317 save_cmd[11] = 1; /* TPM_SU_STATE */ 318 319 if (sc == NULL || sc->buf == NULL) 320 return (0); 321 322 sx_xlock(&sc->dev_lock); 323 324 memcpy(sc->buf, save_cmd, sizeof(save_cmd)); 325 sc->transmit(sc, sizeof(save_cmd)); 326 327 sx_xunlock(&sc->dev_lock); 328 329 return (0); 330 } 331 332 int32_t 333 tpm20_get_timeout(uint32_t command) 334 { 335 int32_t timeout; 336 337 switch (command) { 338 case TPM_CC_CreatePrimary: 339 case TPM_CC_Create: 340 case TPM_CC_CreateLoaded: 341 timeout = TPM_TIMEOUT_LONG; 342 break; 343 case TPM_CC_SequenceComplete: 344 case TPM_CC_Startup: 345 case TPM_CC_SequenceUpdate: 346 case TPM_CC_GetCapability: 347 case TPM_CC_PCR_Extend: 348 case TPM_CC_EventSequenceComplete: 349 case TPM_CC_HashSequenceStart: 350 timeout = TPM_TIMEOUT_C; 351 break; 352 default: 353 timeout = TPM_TIMEOUT_B; 354 break; 355 } 356 return timeout; 357 } 358