1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2005-2011 Pawel Jakub Dawidek <pawel@dawidek.net> 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29 #include <sys/cdefs.h> 30 __FBSDID("$FreeBSD$"); 31 32 #include <sys/param.h> 33 #include <sys/systm.h> 34 #include <sys/kernel.h> 35 #include <sys/linker.h> 36 #include <sys/module.h> 37 #include <sys/lock.h> 38 #include <sys/mutex.h> 39 #include <sys/bio.h> 40 #include <sys/sysctl.h> 41 #include <sys/malloc.h> 42 #include <sys/kthread.h> 43 #include <sys/proc.h> 44 #include <sys/sched.h> 45 #include <sys/smp.h> 46 #include <sys/vnode.h> 47 48 #include <vm/uma.h> 49 50 #include <geom/geom.h> 51 #include <geom/geom_dbg.h> 52 #include <geom/eli/g_eli.h> 53 #include <geom/eli/pkcs5v2.h> 54 55 /* 56 * The data layout description when integrity verification is configured. 57 * 58 * One of the most important assumption here is that authenticated data and its 59 * HMAC has to be stored in the same place (namely in the same sector) to make 60 * it work reliable. 61 * The problem is that file systems work only with sectors that are multiple of 62 * 512 bytes and a power of two number. 63 * My idea to implement it is as follows. 64 * Let's store HMAC in sector. This is a must. This leaves us 480 bytes for 65 * data. We can't use that directly (ie. we can't create provider with 480 bytes 66 * sector size). We need another sector from where we take only 32 bytes of data 67 * and we store HMAC of this data as well. This takes two sectors from the 68 * original provider at the input and leaves us one sector of authenticated data 69 * at the output. Not very efficient, but you got the idea. 70 * Now, let's assume, we want to create provider with 4096 bytes sector. 71 * To output 4096 bytes of authenticated data we need 8x480 plus 1x256, so we 72 * need nine 512-bytes sectors at the input to get one 4096-bytes sector at the 73 * output. That's better. With 4096 bytes sector we can use 89% of size of the 74 * original provider. I find it as an acceptable cost. 75 * The reliability comes from the fact, that every HMAC stored inside the sector 76 * is calculated only for the data in the same sector, so its impossible to 77 * write new data and leave old HMAC or vice versa. 78 * 79 * And here is the picture: 80 * 81 * da0: +----+----+ +----+----+ +----+----+ +----+----+ +----+----+ +----+----+ +----+----+ +----+----+ +----+-----+ 82 * |32b |480b| |32b |480b| |32b |480b| |32b |480b| |32b |480b| |32b |480b| |32b |480b| |32b |480b| |32b |256b | 83 * |HMAC|Data| |HMAC|Data| |HMAC|Data| |HMAC|Data| |HMAC|Data| |HMAC|Data| |HMAC|Data| |HMAC|Data| |HMAC|Data | 84 * +----+----+ +----+----+ +----+----+ +----+----+ +----+----+ +----+----+ +----+----+ +----+----+ +----+-----+ 85 * |512 bytes| |512 bytes| |512 bytes| |512 bytes| |512 bytes| |512 bytes| |512 bytes| |512 bytes| |288 bytes | 86 * +---------+ +---------+ +---------+ +---------+ +---------+ +---------+ +---------+ +---------+ |224 unused| 87 * +----------+ 88 * da0.eli: +----+----+----+----+----+----+----+----+----+ 89 * |480b|480b|480b|480b|480b|480b|480b|480b|256b| 90 * +----+----+----+----+----+----+----+----+----+ 91 * | 4096 bytes | 92 * +--------------------------------------------+ 93 * 94 * PS. You can use any sector size with geli(8). My example is using 4kB, 95 * because it's most efficient. For 8kB sectors you need 2 extra sectors, 96 * so the cost is the same as for 4kB sectors. 97 */ 98 99 /* 100 * Code paths: 101 * BIO_READ: 102 * g_eli_start -> g_eli_auth_read -> g_io_request -> g_eli_read_done -> g_eli_auth_run -> g_eli_auth_read_done -> g_io_deliver 103 * BIO_WRITE: 104 * g_eli_start -> g_eli_auth_run -> g_eli_auth_write_done -> g_io_request -> g_eli_write_done -> g_io_deliver 105 */ 106 107 MALLOC_DECLARE(M_ELI); 108 109 /* 110 * Here we generate key for HMAC. Every sector has its own HMAC key, so it is 111 * not possible to copy sectors. 112 * We cannot depend on fact, that every sector has its own IV, because different 113 * IV doesn't change HMAC, when we use encrypt-then-authenticate method. 114 */ 115 static void 116 g_eli_auth_keygen(struct g_eli_softc *sc, off_t offset, u_char *key) 117 { 118 SHA256_CTX ctx; 119 120 /* Copy precalculated SHA256 context. */ 121 bcopy(&sc->sc_akeyctx, &ctx, sizeof(ctx)); 122 SHA256_Update(&ctx, (uint8_t *)&offset, sizeof(offset)); 123 SHA256_Final(key, &ctx); 124 } 125 126 /* 127 * The function is called after we read and decrypt data. 128 * 129 * g_eli_start -> g_eli_auth_read -> g_io_request -> g_eli_read_done -> g_eli_auth_run -> G_ELI_AUTH_READ_DONE -> g_io_deliver 130 */ 131 static int 132 g_eli_auth_read_done(struct cryptop *crp) 133 { 134 struct g_eli_softc *sc; 135 struct bio *bp; 136 137 if (crp->crp_etype == EAGAIN) { 138 if (g_eli_crypto_rerun(crp) == 0) 139 return (0); 140 } 141 bp = (struct bio *)crp->crp_opaque; 142 bp->bio_inbed++; 143 sc = bp->bio_to->geom->softc; 144 if (crp->crp_etype == 0) { 145 bp->bio_completed += crp->crp_payload_length; 146 G_ELI_DEBUG(3, "Crypto READ request done (%d/%d) (add=%d completed=%jd).", 147 bp->bio_inbed, bp->bio_children, crp->crp_payload_length, (intmax_t)bp->bio_completed); 148 } else { 149 u_int nsec, decr_secsize, encr_secsize, rel_sec; 150 int *errorp; 151 152 /* Sectorsize of decrypted provider eg. 4096. */ 153 decr_secsize = bp->bio_to->sectorsize; 154 /* The real sectorsize of encrypted provider, eg. 512. */ 155 encr_secsize = 156 LIST_FIRST(&sc->sc_geom->consumer)->provider->sectorsize; 157 /* Number of sectors from decrypted provider, eg. 2. */ 158 nsec = bp->bio_length / decr_secsize; 159 /* Number of sectors from encrypted provider, eg. 18. */ 160 nsec = (nsec * sc->sc_bytes_per_sector) / encr_secsize; 161 /* Which relative sector this request decrypted. */ 162 rel_sec = ((crp->crp_buf.cb_buf + crp->crp_payload_start) - 163 (char *)bp->bio_driver2) / encr_secsize; 164 165 errorp = (int *)((char *)bp->bio_driver2 + encr_secsize * nsec + 166 sizeof(int) * rel_sec); 167 *errorp = crp->crp_etype; 168 G_ELI_DEBUG(1, 169 "Crypto READ request failed (%d/%d) error=%d.", 170 bp->bio_inbed, bp->bio_children, crp->crp_etype); 171 if (bp->bio_error == 0 || bp->bio_error == EINTEGRITY) 172 bp->bio_error = crp->crp_etype == EBADMSG ? 173 EINTEGRITY : crp->crp_etype; 174 } 175 if (crp->crp_cipher_key != NULL) 176 g_eli_key_drop(sc, __DECONST(void *, crp->crp_cipher_key)); 177 crypto_freereq(crp); 178 /* 179 * Do we have all sectors already? 180 */ 181 if (bp->bio_inbed < bp->bio_children) 182 return (0); 183 184 if (bp->bio_error == 0) { 185 u_int i, lsec, nsec, data_secsize, decr_secsize, encr_secsize; 186 u_char *srcdata, *dstdata; 187 188 /* Sectorsize of decrypted provider eg. 4096. */ 189 decr_secsize = bp->bio_to->sectorsize; 190 /* The real sectorsize of encrypted provider, eg. 512. */ 191 encr_secsize = LIST_FIRST(&sc->sc_geom->consumer)->provider->sectorsize; 192 /* Number of data bytes in one encrypted sector, eg. 480. */ 193 data_secsize = sc->sc_data_per_sector; 194 /* Number of sectors from decrypted provider, eg. 2. */ 195 nsec = bp->bio_length / decr_secsize; 196 /* Number of sectors from encrypted provider, eg. 18. */ 197 nsec = (nsec * sc->sc_bytes_per_sector) / encr_secsize; 198 /* Last sector number in every big sector, eg. 9. */ 199 lsec = sc->sc_bytes_per_sector / encr_secsize; 200 201 srcdata = bp->bio_driver2; 202 dstdata = bp->bio_data; 203 204 for (i = 1; i <= nsec; i++) { 205 data_secsize = sc->sc_data_per_sector; 206 if ((i % lsec) == 0) 207 data_secsize = decr_secsize % data_secsize; 208 bcopy(srcdata + sc->sc_alen, dstdata, data_secsize); 209 srcdata += encr_secsize; 210 dstdata += data_secsize; 211 } 212 } else if (bp->bio_error == EINTEGRITY) { 213 u_int i, lsec, nsec, data_secsize, decr_secsize, encr_secsize; 214 int *errorp; 215 off_t coroff, corsize, dstoff; 216 217 /* Sectorsize of decrypted provider eg. 4096. */ 218 decr_secsize = bp->bio_to->sectorsize; 219 /* The real sectorsize of encrypted provider, eg. 512. */ 220 encr_secsize = LIST_FIRST(&sc->sc_geom->consumer)->provider->sectorsize; 221 /* Number of data bytes in one encrypted sector, eg. 480. */ 222 data_secsize = sc->sc_data_per_sector; 223 /* Number of sectors from decrypted provider, eg. 2. */ 224 nsec = bp->bio_length / decr_secsize; 225 /* Number of sectors from encrypted provider, eg. 18. */ 226 nsec = (nsec * sc->sc_bytes_per_sector) / encr_secsize; 227 /* Last sector number in every big sector, eg. 9. */ 228 lsec = sc->sc_bytes_per_sector / encr_secsize; 229 230 errorp = (int *)((char *)bp->bio_driver2 + encr_secsize * nsec); 231 coroff = -1; 232 corsize = 0; 233 dstoff = bp->bio_offset; 234 235 for (i = 1; i <= nsec; i++) { 236 data_secsize = sc->sc_data_per_sector; 237 if ((i % lsec) == 0) 238 data_secsize = decr_secsize % data_secsize; 239 if (errorp[i - 1] == EBADMSG) { 240 /* 241 * Corruption detected, remember the offset if 242 * this is the first corrupted sector and 243 * increase size. 244 */ 245 if (coroff == -1) 246 coroff = dstoff; 247 corsize += data_secsize; 248 } else { 249 /* 250 * No corruption, good. 251 * Report previous corruption if there was one. 252 */ 253 if (coroff != -1) { 254 G_ELI_DEBUG(0, "%s: Failed to authenticate %jd " 255 "bytes of data at offset %jd.", 256 sc->sc_name, (intmax_t)corsize, 257 (intmax_t)coroff); 258 coroff = -1; 259 corsize = 0; 260 } 261 } 262 dstoff += data_secsize; 263 } 264 /* Report previous corruption if there was one. */ 265 if (coroff != -1) { 266 G_ELI_DEBUG(0, "%s: Failed to authenticate %jd " 267 "bytes of data at offset %jd.", 268 sc->sc_name, (intmax_t)corsize, (intmax_t)coroff); 269 } 270 } 271 free(bp->bio_driver2, M_ELI); 272 bp->bio_driver2 = NULL; 273 if (bp->bio_error != 0) { 274 if (bp->bio_error != EINTEGRITY) { 275 G_ELI_LOGREQ(0, bp, 276 "Crypto READ request failed (error=%d).", 277 bp->bio_error); 278 } 279 bp->bio_completed = 0; 280 } 281 /* 282 * Read is finished, send it up. 283 */ 284 g_io_deliver(bp, bp->bio_error); 285 atomic_subtract_int(&sc->sc_inflight, 1); 286 return (0); 287 } 288 289 /* 290 * The function is called after data encryption. 291 * 292 * g_eli_start -> g_eli_auth_run -> G_ELI_AUTH_WRITE_DONE -> g_io_request -> g_eli_write_done -> g_io_deliver 293 */ 294 static int 295 g_eli_auth_write_done(struct cryptop *crp) 296 { 297 struct g_eli_softc *sc; 298 struct g_consumer *cp; 299 struct bio *bp, *cbp, *cbp2; 300 u_int nsec; 301 302 if (crp->crp_etype == EAGAIN) { 303 if (g_eli_crypto_rerun(crp) == 0) 304 return (0); 305 } 306 bp = (struct bio *)crp->crp_opaque; 307 bp->bio_inbed++; 308 if (crp->crp_etype == 0) { 309 G_ELI_DEBUG(3, "Crypto WRITE request done (%d/%d).", 310 bp->bio_inbed, bp->bio_children); 311 } else { 312 G_ELI_DEBUG(1, "Crypto WRITE request failed (%d/%d) error=%d.", 313 bp->bio_inbed, bp->bio_children, crp->crp_etype); 314 if (bp->bio_error == 0) 315 bp->bio_error = crp->crp_etype; 316 } 317 sc = bp->bio_to->geom->softc; 318 if (crp->crp_cipher_key != NULL) 319 g_eli_key_drop(sc, __DECONST(void *, crp->crp_cipher_key)); 320 crypto_freereq(crp); 321 /* 322 * All sectors are already encrypted? 323 */ 324 if (bp->bio_inbed < bp->bio_children) 325 return (0); 326 if (bp->bio_error != 0) { 327 G_ELI_LOGREQ(0, bp, "Crypto WRITE request failed (error=%d).", 328 bp->bio_error); 329 free(bp->bio_driver2, M_ELI); 330 bp->bio_driver2 = NULL; 331 cbp = bp->bio_driver1; 332 bp->bio_driver1 = NULL; 333 g_destroy_bio(cbp); 334 g_io_deliver(bp, bp->bio_error); 335 atomic_subtract_int(&sc->sc_inflight, 1); 336 return (0); 337 } 338 cp = LIST_FIRST(&sc->sc_geom->consumer); 339 cbp = bp->bio_driver1; 340 bp->bio_driver1 = NULL; 341 cbp->bio_to = cp->provider; 342 cbp->bio_done = g_eli_write_done; 343 344 /* Number of sectors from decrypted provider, eg. 1. */ 345 nsec = bp->bio_length / bp->bio_to->sectorsize; 346 /* Number of sectors from encrypted provider, eg. 9. */ 347 nsec = (nsec * sc->sc_bytes_per_sector) / cp->provider->sectorsize; 348 349 cbp->bio_length = cp->provider->sectorsize * nsec; 350 cbp->bio_offset = (bp->bio_offset / bp->bio_to->sectorsize) * sc->sc_bytes_per_sector; 351 cbp->bio_data = bp->bio_driver2; 352 353 /* 354 * We write more than what is requested, so we have to be ready to write 355 * more than maxphys. 356 */ 357 cbp2 = NULL; 358 if (cbp->bio_length > maxphys) { 359 cbp2 = g_duplicate_bio(bp); 360 cbp2->bio_length = cbp->bio_length - maxphys; 361 cbp2->bio_data = cbp->bio_data + maxphys; 362 cbp2->bio_offset = cbp->bio_offset + maxphys; 363 cbp2->bio_to = cp->provider; 364 cbp2->bio_done = g_eli_write_done; 365 cbp->bio_length = maxphys; 366 } 367 /* 368 * Send encrypted data to the provider. 369 */ 370 G_ELI_LOGREQ(2, cbp, "Sending request."); 371 bp->bio_inbed = 0; 372 bp->bio_children = (cbp2 != NULL ? 2 : 1); 373 g_io_request(cbp, cp); 374 if (cbp2 != NULL) { 375 G_ELI_LOGREQ(2, cbp2, "Sending request."); 376 g_io_request(cbp2, cp); 377 } 378 return (0); 379 } 380 381 void 382 g_eli_auth_read(struct g_eli_softc *sc, struct bio *bp) 383 { 384 struct g_consumer *cp; 385 struct bio *cbp, *cbp2; 386 size_t size; 387 off_t nsec; 388 389 bp->bio_pflags = 0; 390 391 cp = LIST_FIRST(&sc->sc_geom->consumer); 392 cbp = bp->bio_driver1; 393 bp->bio_driver1 = NULL; 394 cbp->bio_to = cp->provider; 395 cbp->bio_done = g_eli_read_done; 396 397 /* Number of sectors from decrypted provider, eg. 1. */ 398 nsec = bp->bio_length / bp->bio_to->sectorsize; 399 /* Number of sectors from encrypted provider, eg. 9. */ 400 nsec = (nsec * sc->sc_bytes_per_sector) / cp->provider->sectorsize; 401 402 cbp->bio_length = cp->provider->sectorsize * nsec; 403 size = cbp->bio_length; 404 size += sizeof(int) * nsec; 405 size += G_ELI_AUTH_SECKEYLEN * nsec; 406 cbp->bio_offset = (bp->bio_offset / bp->bio_to->sectorsize) * sc->sc_bytes_per_sector; 407 bp->bio_driver2 = malloc(size, M_ELI, M_WAITOK); 408 cbp->bio_data = bp->bio_driver2; 409 410 /* Clear the error array. */ 411 memset((char *)bp->bio_driver2 + cbp->bio_length, 0, 412 sizeof(int) * nsec); 413 414 /* 415 * We read more than what is requested, so we have to be ready to read 416 * more than maxphys. 417 */ 418 cbp2 = NULL; 419 if (cbp->bio_length > maxphys) { 420 cbp2 = g_duplicate_bio(bp); 421 cbp2->bio_length = cbp->bio_length - maxphys; 422 cbp2->bio_data = cbp->bio_data + maxphys; 423 cbp2->bio_offset = cbp->bio_offset + maxphys; 424 cbp2->bio_to = cp->provider; 425 cbp2->bio_done = g_eli_read_done; 426 cbp->bio_length = maxphys; 427 } 428 /* 429 * Read encrypted data from provider. 430 */ 431 G_ELI_LOGREQ(2, cbp, "Sending request."); 432 g_io_request(cbp, cp); 433 if (cbp2 != NULL) { 434 G_ELI_LOGREQ(2, cbp2, "Sending request."); 435 g_io_request(cbp2, cp); 436 } 437 } 438 439 /* 440 * This is the main function responsible for cryptography (ie. communication 441 * with crypto(9) subsystem). 442 * 443 * BIO_READ: 444 * g_eli_start -> g_eli_auth_read -> g_io_request -> g_eli_read_done -> G_ELI_AUTH_RUN -> g_eli_auth_read_done -> g_io_deliver 445 * BIO_WRITE: 446 * g_eli_start -> G_ELI_AUTH_RUN -> g_eli_auth_write_done -> g_io_request -> g_eli_write_done -> g_io_deliver 447 */ 448 void 449 g_eli_auth_run(struct g_eli_worker *wr, struct bio *bp) 450 { 451 struct g_eli_softc *sc; 452 struct cryptopq crpq; 453 struct cryptop *crp; 454 u_int i, lsec, nsec, data_secsize, decr_secsize, encr_secsize; 455 off_t dstoff; 456 u_char *p, *data, *authkey, *plaindata; 457 int error; 458 bool batch; 459 460 G_ELI_LOGREQ(3, bp, "%s", __func__); 461 462 bp->bio_pflags = wr->w_number; 463 sc = wr->w_softc; 464 /* Sectorsize of decrypted provider eg. 4096. */ 465 decr_secsize = bp->bio_to->sectorsize; 466 /* The real sectorsize of encrypted provider, eg. 512. */ 467 encr_secsize = LIST_FIRST(&sc->sc_geom->consumer)->provider->sectorsize; 468 /* Number of data bytes in one encrypted sector, eg. 480. */ 469 data_secsize = sc->sc_data_per_sector; 470 /* Number of sectors from decrypted provider, eg. 2. */ 471 nsec = bp->bio_length / decr_secsize; 472 /* Number of sectors from encrypted provider, eg. 18. */ 473 nsec = (nsec * sc->sc_bytes_per_sector) / encr_secsize; 474 /* Last sector number in every big sector, eg. 9. */ 475 lsec = sc->sc_bytes_per_sector / encr_secsize; 476 /* Destination offset, used for IV generation. */ 477 dstoff = (bp->bio_offset / bp->bio_to->sectorsize) * sc->sc_bytes_per_sector; 478 479 plaindata = bp->bio_data; 480 if (bp->bio_cmd == BIO_READ) { 481 data = bp->bio_driver2; 482 p = data + encr_secsize * nsec; 483 p += sizeof(int) * nsec; 484 } else { 485 size_t size; 486 487 size = encr_secsize * nsec; 488 size += G_ELI_AUTH_SECKEYLEN * nsec; 489 size += sizeof(uintptr_t); /* Space for alignment. */ 490 data = malloc(size, M_ELI, M_WAITOK); 491 bp->bio_driver2 = data; 492 p = data + encr_secsize * nsec; 493 } 494 bp->bio_inbed = 0; 495 bp->bio_children = nsec; 496 497 #if defined(__mips_n64) || defined(__mips_o64) 498 p = (char *)roundup((uintptr_t)p, sizeof(uintptr_t)); 499 #endif 500 501 TAILQ_INIT(&crpq); 502 batch = atomic_load_int(&g_eli_batch) != 0; 503 504 for (i = 1; i <= nsec; i++, dstoff += encr_secsize) { 505 crp = crypto_getreq(wr->w_sid, M_WAITOK); 506 authkey = (u_char *)p; p += G_ELI_AUTH_SECKEYLEN; 507 508 data_secsize = sc->sc_data_per_sector; 509 if ((i % lsec) == 0) { 510 data_secsize = decr_secsize % data_secsize; 511 /* 512 * Last encrypted sector of each decrypted sector is 513 * only partially filled. 514 */ 515 if (bp->bio_cmd == BIO_WRITE) 516 memset(data + sc->sc_alen + data_secsize, 0, 517 encr_secsize - sc->sc_alen - data_secsize); 518 } else if (data_secsize + sc->sc_alen != encr_secsize) { 519 /* 520 * If the HMAC size is not a multiple of 128 bits, the 521 * per-sector data size is rounded down to ensure that 522 * encryption can be performed without requiring any 523 * padding. In this case, each sector contains unused 524 * bytes. 525 */ 526 if (bp->bio_cmd == BIO_WRITE) 527 memset(data + sc->sc_alen + data_secsize, 0, 528 encr_secsize - sc->sc_alen - data_secsize); 529 } 530 531 if (bp->bio_cmd == BIO_WRITE) { 532 bcopy(plaindata, data + sc->sc_alen, data_secsize); 533 plaindata += data_secsize; 534 } 535 536 crypto_use_buf(crp, data, sc->sc_alen + data_secsize); 537 crp->crp_opaque = (void *)bp; 538 data += encr_secsize; 539 crp->crp_flags = CRYPTO_F_CBIFSYNC; 540 if (bp->bio_cmd == BIO_WRITE) { 541 crp->crp_callback = g_eli_auth_write_done; 542 crp->crp_op = CRYPTO_OP_ENCRYPT | 543 CRYPTO_OP_COMPUTE_DIGEST; 544 } else { 545 crp->crp_callback = g_eli_auth_read_done; 546 crp->crp_op = CRYPTO_OP_DECRYPT | 547 CRYPTO_OP_VERIFY_DIGEST; 548 } 549 550 crp->crp_digest_start = 0; 551 crp->crp_payload_start = sc->sc_alen; 552 crp->crp_payload_length = data_secsize; 553 if ((sc->sc_flags & G_ELI_FLAG_FIRST_KEY) == 0) { 554 crp->crp_cipher_key = g_eli_key_hold(sc, dstoff, 555 encr_secsize); 556 } 557 if (g_eli_ivlen(sc->sc_ealgo) != 0) { 558 crp->crp_flags |= CRYPTO_F_IV_SEPARATE; 559 g_eli_crypto_ivgen(sc, dstoff, crp->crp_iv, 560 sizeof(crp->crp_iv)); 561 } 562 563 g_eli_auth_keygen(sc, dstoff, authkey); 564 crp->crp_auth_key = authkey; 565 566 if (batch) { 567 TAILQ_INSERT_TAIL(&crpq, crp, crp_next); 568 } else { 569 error = crypto_dispatch(crp); 570 KASSERT(error == 0, 571 ("crypto_dispatch() failed (error=%d)", error)); 572 } 573 } 574 575 if (batch) 576 crypto_dispatch_batch(&crpq, 0); 577 } 578