1 /*- 2 * Copyright (c) 2000-2013 Mark R V Murray 3 * Copyright (c) 2013 Arthur Mesh 4 * Copyright (c) 2004 Robert N. M. Watson 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer 12 * in this position and unchanged. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 * 28 */ 29 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 #include "opt_random.h" 34 35 #include <sys/param.h> 36 #include <sys/systm.h> 37 #include <sys/eventhandler.h> 38 #include <sys/kernel.h> 39 #include <sys/kthread.h> 40 #include <sys/linker.h> 41 #include <sys/lock.h> 42 #include <sys/malloc.h> 43 #include <sys/mutex.h> 44 #include <sys/random.h> 45 #include <sys/selinfo.h> 46 #include <sys/sysctl.h> 47 #include <sys/unistd.h> 48 49 #include <machine/cpu.h> 50 #include <machine/vmparam.h> 51 52 #include <dev/random/randomdev.h> 53 #include <dev/random/randomdev_soft.h> 54 #include <dev/random/random_adaptors.h> 55 #include <dev/random/random_harvestq.h> 56 #include <dev/random/live_entropy_sources.h> 57 #include <dev/random/rwfile.h> 58 59 #define RANDOM_FIFO_MAX 1024 /* How many events to queue up */ 60 61 /* 62 * The harvest mutex protects the consistency of the entropy fifos and 63 * empty fifo and other associated structures. 64 */ 65 struct mtx harvest_mtx; 66 67 /* Lockable FIFO queue holding entropy buffers */ 68 struct entropyfifo { 69 int count; 70 STAILQ_HEAD(harvestlist, harvest) head; 71 }; 72 73 /* Empty entropy buffers */ 74 static struct entropyfifo emptyfifo; 75 76 /* Harvested entropy */ 77 static struct entropyfifo harvestfifo; 78 79 /* <0 to end the kthread, 0 to let it run, 1 to flush the harvest queues */ 80 int random_kthread_control = 0; 81 82 static struct proc *random_kthread_proc; 83 84 #ifdef RANDOM_RWFILE 85 static const char *entropy_files[] = { 86 "/entropy", 87 NULL 88 }; 89 #endif 90 91 /* Deal with entropy cached externally if this is present. 92 * Lots of policy may eventually arrive in this function. 93 * Called after / is mounted. 94 */ 95 static void 96 random_harvestq_cache(void *arg __unused) 97 { 98 uint8_t *keyfile, *data; 99 size_t size, i; 100 #ifdef RANDOM_RWFILE 101 const char **entropy_file; 102 uint8_t *zbuf; 103 int error; 104 #endif 105 106 /* Get stuff that may have been preloaded by loader(8) */ 107 keyfile = preload_search_by_type("/boot/entropy"); 108 if (keyfile != NULL) { 109 data = preload_fetch_addr(keyfile); 110 size = preload_fetch_size(keyfile); 111 if (data != NULL && size != 0) { 112 for (i = 0; i < size; i += 16) 113 random_harvestq_internal(get_cyclecount(), data + i, 16, 16, RANDOM_CACHED); 114 printf("random: read %zu bytes from preloaded cache\n", size); 115 bzero(data, size); 116 } 117 else 118 printf("random: no preloaded entropy cache available\n"); 119 } 120 121 #ifdef RANDOM_RWFILE 122 /* Read and attempt to overwrite the entropy cache files. 123 * If the file exists, can be read and then overwritten, 124 * then use it. Ignore it otherwise, but print out what is 125 * going on. 126 */ 127 data = malloc(PAGE_SIZE, M_ENTROPY, M_WAITOK); 128 zbuf = __DECONST(void *, zero_region); 129 for (entropy_file = entropy_files; *entropy_file; entropy_file++) { 130 error = randomdev_read_file(*entropy_file, data, PAGE_SIZE); 131 if (error == 0) { 132 printf("random: entropy cache '%s' provides %ld bytes\n", *entropy_file, (long)PAGE_SIZE); 133 error = randomdev_write_file(*entropy_file, zbuf, PAGE_SIZE); 134 if (error == 0) { 135 printf("random: entropy cache '%s' contents used and successfully overwritten\n", *entropy_file); 136 for (i = 0; i < PAGE_SIZE; i += 16) 137 random_harvestq_internal(get_cyclecount(), data + i, 16, 16, RANDOM_CACHED); 138 } 139 else 140 printf("random: entropy cache '%s' not overwritten and therefore not used; error = %d\n", *entropy_file, error); 141 } 142 else 143 printf("random: entropy cache '%s' not present or unreadable; error = %d\n", *entropy_file, error); 144 } 145 bzero(data, PAGE_SIZE); 146 free(data, M_ENTROPY); 147 #endif 148 } 149 EVENTHANDLER_DEFINE(mountroot, random_harvestq_cache, NULL, 0); 150 151 static void 152 random_kthread(void *arg) 153 { 154 STAILQ_HEAD(, harvest) local_queue; 155 struct harvest *event = NULL; 156 int local_count; 157 event_proc_f entropy_processor = arg; 158 159 STAILQ_INIT(&local_queue); 160 local_count = 0; 161 162 /* Process until told to stop */ 163 mtx_lock_spin(&harvest_mtx); 164 for (; random_kthread_control >= 0;) { 165 166 /* 167 * Grab all the entropy events. 168 * Drain entropy source records into a thread-local 169 * queue for processing while not holding the mutex. 170 */ 171 STAILQ_CONCAT(&local_queue, &harvestfifo.head); 172 local_count += harvestfifo.count; 173 harvestfifo.count = 0; 174 175 /* 176 * Deal with events, if any. 177 * Then transfer the used events back into the empty fifo. 178 */ 179 if (!STAILQ_EMPTY(&local_queue)) { 180 mtx_unlock_spin(&harvest_mtx); 181 STAILQ_FOREACH(event, &local_queue, next) 182 entropy_processor(event); 183 mtx_lock_spin(&harvest_mtx); 184 STAILQ_CONCAT(&emptyfifo.head, &local_queue); 185 emptyfifo.count += local_count; 186 local_count = 0; 187 } 188 189 KASSERT(local_count == 0, ("random_kthread: local_count %d", 190 local_count)); 191 192 /* 193 * Do only one round of the hardware sources for now. 194 * Later we'll need to make it rate-adaptive. 195 */ 196 mtx_unlock_spin(&harvest_mtx); 197 live_entropy_sources_feed(1, entropy_processor); 198 mtx_lock_spin(&harvest_mtx); 199 200 /* 201 * If a queue flush was commanded, it has now happened, 202 * and we can mark this by resetting the command. 203 */ 204 205 if (random_kthread_control == 1) 206 random_kthread_control = 0; 207 208 /* Work done, so don't belabour the issue */ 209 msleep_spin_sbt(&random_kthread_control, &harvest_mtx, 210 "-", SBT_1S/10, 0, C_PREL(1)); 211 212 } 213 mtx_unlock_spin(&harvest_mtx); 214 215 random_set_wakeup_exit(&random_kthread_control); 216 /* NOTREACHED */ 217 } 218 219 void 220 random_harvestq_init(event_proc_f cb) 221 { 222 int error, i; 223 struct harvest *np; 224 225 /* Initialise the harvest fifos */ 226 227 /* Contains the currently unused event structs. */ 228 STAILQ_INIT(&emptyfifo.head); 229 for (i = 0; i < RANDOM_FIFO_MAX; i++) { 230 np = malloc(sizeof(struct harvest), M_ENTROPY, M_WAITOK); 231 STAILQ_INSERT_TAIL(&emptyfifo.head, np, next); 232 } 233 emptyfifo.count = RANDOM_FIFO_MAX; 234 235 /* Will contain the queued-up events. */ 236 STAILQ_INIT(&harvestfifo.head); 237 harvestfifo.count = 0; 238 239 mtx_init(&harvest_mtx, "entropy harvest mutex", NULL, MTX_SPIN); 240 241 /* Start the hash/reseed thread */ 242 error = kproc_create(random_kthread, cb, 243 &random_kthread_proc, RFHIGHPID, 0, "rand_harvestq"); /* RANDOM_CSPRNG_NAME */ 244 245 if (error != 0) 246 panic("Cannot create entropy maintenance thread."); 247 } 248 249 void 250 random_harvestq_deinit(void) 251 { 252 struct harvest *np; 253 254 /* Destroy the harvest fifos */ 255 while (!STAILQ_EMPTY(&emptyfifo.head)) { 256 np = STAILQ_FIRST(&emptyfifo.head); 257 STAILQ_REMOVE_HEAD(&emptyfifo.head, next); 258 free(np, M_ENTROPY); 259 } 260 emptyfifo.count = 0; 261 while (!STAILQ_EMPTY(&harvestfifo.head)) { 262 np = STAILQ_FIRST(&harvestfifo.head); 263 STAILQ_REMOVE_HEAD(&harvestfifo.head, next); 264 free(np, M_ENTROPY); 265 } 266 harvestfifo.count = 0; 267 268 mtx_destroy(&harvest_mtx); 269 } 270 271 /* 272 * Entropy harvesting routine. 273 * This is supposed to be fast; do not do anything slow in here! 274 * 275 * It is also illegal (and morally reprehensible) to insert any 276 * high-rate data here. "High-rate" is define as a data source 277 * that will usually cause lots of failures of the "Lockless read" 278 * check a few lines below. This includes the "always-on" sources 279 * like the Intel "rdrand" or the VIA Nehamiah "xstore" sources. 280 */ 281 void 282 random_harvestq_internal(u_int64_t somecounter, const void *entropy, 283 u_int count, u_int bits, enum esource origin) 284 { 285 struct harvest *event; 286 287 KASSERT(origin >= RANDOM_START && origin < ENTROPYSOURCE, 288 ("random_harvest_internal: origin %d invalid\n", origin)); 289 290 /* Lockless read to avoid lock operations if fifo is full. */ 291 if (harvestfifo.count >= RANDOM_FIFO_MAX) 292 return; 293 294 mtx_lock_spin(&harvest_mtx); 295 296 /* 297 * On't overfill the harvest queue; this could steal all 298 * our memory. 299 */ 300 if (harvestfifo.count < RANDOM_FIFO_MAX) { 301 event = STAILQ_FIRST(&emptyfifo.head); 302 if (event != NULL) { 303 /* Add the harvested data to the fifo */ 304 STAILQ_REMOVE_HEAD(&emptyfifo.head, next); 305 emptyfifo.count--; 306 event->somecounter = somecounter; 307 event->size = count; 308 event->bits = bits; 309 event->source = origin; 310 311 /* XXXX Come back and make this dynamic! */ 312 count = MIN(count, HARVESTSIZE); 313 memcpy(event->entropy, entropy, count); 314 315 STAILQ_INSERT_TAIL(&harvestfifo.head, 316 event, next); 317 harvestfifo.count++; 318 } 319 } 320 321 mtx_unlock_spin(&harvest_mtx); 322 } 323