1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2005-2011 Pawel Jakub Dawidek <pawel@dawidek.net> 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29 #include <sys/cdefs.h> 30 __FBSDID("$FreeBSD$"); 31 32 #include <sys/param.h> 33 #include <sys/systm.h> 34 #include <sys/kernel.h> 35 #include <sys/linker.h> 36 #include <sys/module.h> 37 #include <sys/lock.h> 38 #include <sys/mutex.h> 39 #include <sys/bio.h> 40 #include <sys/sysctl.h> 41 #include <sys/kthread.h> 42 #include <sys/proc.h> 43 #include <sys/sched.h> 44 #include <sys/smp.h> 45 #include <sys/vnode.h> 46 47 #include <vm/uma.h> 48 49 #include <geom/geom.h> 50 #include <geom/geom_dbg.h> 51 #include <geom/eli/g_eli.h> 52 #include <geom/eli/pkcs5v2.h> 53 54 /* 55 * The data layout description when integrity verification is configured. 56 * 57 * One of the most important assumption here is that authenticated data and its 58 * HMAC has to be stored in the same place (namely in the same sector) to make 59 * it work reliable. 60 * The problem is that file systems work only with sectors that are multiple of 61 * 512 bytes and a power of two number. 62 * My idea to implement it is as follows. 63 * Let's store HMAC in sector. This is a must. This leaves us 480 bytes for 64 * data. We can't use that directly (ie. we can't create provider with 480 bytes 65 * sector size). We need another sector from where we take only 32 bytes of data 66 * and we store HMAC of this data as well. This takes two sectors from the 67 * original provider at the input and leaves us one sector of authenticated data 68 * at the output. Not very efficient, but you got the idea. 69 * Now, let's assume, we want to create provider with 4096 bytes sector. 70 * To output 4096 bytes of authenticated data we need 8x480 plus 1x256, so we 71 * need nine 512-bytes sectors at the input to get one 4096-bytes sector at the 72 * output. That's better. With 4096 bytes sector we can use 89% of size of the 73 * original provider. I find it as an acceptable cost. 74 * The reliability comes from the fact, that every HMAC stored inside the sector 75 * is calculated only for the data in the same sector, so its impossible to 76 * write new data and leave old HMAC or vice versa. 77 * 78 * And here is the picture: 79 * 80 * da0: +----+----+ +----+----+ +----+----+ +----+----+ +----+----+ +----+----+ +----+----+ +----+----+ +----+-----+ 81 * |32b |480b| |32b |480b| |32b |480b| |32b |480b| |32b |480b| |32b |480b| |32b |480b| |32b |480b| |32b |256b | 82 * |HMAC|Data| |HMAC|Data| |HMAC|Data| |HMAC|Data| |HMAC|Data| |HMAC|Data| |HMAC|Data| |HMAC|Data| |HMAC|Data | 83 * +----+----+ +----+----+ +----+----+ +----+----+ +----+----+ +----+----+ +----+----+ +----+----+ +----+-----+ 84 * |512 bytes| |512 bytes| |512 bytes| |512 bytes| |512 bytes| |512 bytes| |512 bytes| |512 bytes| |288 bytes | 85 * +---------+ +---------+ +---------+ +---------+ +---------+ +---------+ +---------+ +---------+ |224 unused| 86 * +----------+ 87 * da0.eli: +----+----+----+----+----+----+----+----+----+ 88 * |480b|480b|480b|480b|480b|480b|480b|480b|256b| 89 * +----+----+----+----+----+----+----+----+----+ 90 * | 4096 bytes | 91 * +--------------------------------------------+ 92 * 93 * PS. You can use any sector size with geli(8). My example is using 4kB, 94 * because it's most efficient. For 8kB sectors you need 2 extra sectors, 95 * so the cost is the same as for 4kB sectors. 96 */ 97 98 /* 99 * Code paths: 100 * BIO_READ: 101 * g_eli_start -> g_eli_auth_read -> g_io_request -> g_eli_read_done -> g_eli_auth_run -> g_eli_auth_read_done -> g_io_deliver 102 * BIO_WRITE: 103 * g_eli_start -> g_eli_auth_run -> g_eli_auth_write_done -> g_io_request -> g_eli_write_done -> g_io_deliver 104 */ 105 106 /* 107 * Here we generate key for HMAC. Every sector has its own HMAC key, so it is 108 * not possible to copy sectors. 109 * We cannot depend on fact, that every sector has its own IV, because different 110 * IV doesn't change HMAC, when we use encrypt-then-authenticate method. 111 */ 112 static void 113 g_eli_auth_keygen(struct g_eli_softc *sc, off_t offset, u_char *key) 114 { 115 SHA256_CTX ctx; 116 117 /* Copy precalculated SHA256 context. */ 118 bcopy(&sc->sc_akeyctx, &ctx, sizeof(ctx)); 119 SHA256_Update(&ctx, (uint8_t *)&offset, sizeof(offset)); 120 SHA256_Final(key, &ctx); 121 } 122 123 /* 124 * The function is called after we read and decrypt data. 125 * 126 * g_eli_start -> g_eli_auth_read -> g_io_request -> g_eli_read_done -> g_eli_auth_run -> G_ELI_AUTH_READ_DONE -> g_io_deliver 127 */ 128 static int 129 g_eli_auth_read_done(struct cryptop *crp) 130 { 131 struct g_eli_softc *sc; 132 struct bio *bp; 133 134 if (crp->crp_etype == EAGAIN) { 135 if (g_eli_crypto_rerun(crp) == 0) 136 return (0); 137 } 138 bp = (struct bio *)crp->crp_opaque; 139 bp->bio_inbed++; 140 sc = bp->bio_to->geom->softc; 141 if (crp->crp_etype == 0) { 142 bp->bio_completed += crp->crp_payload_length; 143 G_ELI_DEBUG(3, "Crypto READ request done (%d/%d) (add=%d completed=%jd).", 144 bp->bio_inbed, bp->bio_children, crp->crp_payload_length, (intmax_t)bp->bio_completed); 145 } else { 146 u_int nsec, decr_secsize, encr_secsize, rel_sec; 147 int *errorp; 148 149 /* Sectorsize of decrypted provider eg. 4096. */ 150 decr_secsize = bp->bio_to->sectorsize; 151 /* The real sectorsize of encrypted provider, eg. 512. */ 152 encr_secsize = 153 LIST_FIRST(&sc->sc_geom->consumer)->provider->sectorsize; 154 /* Number of sectors from decrypted provider, eg. 2. */ 155 nsec = bp->bio_length / decr_secsize; 156 /* Number of sectors from encrypted provider, eg. 18. */ 157 nsec = (nsec * sc->sc_bytes_per_sector) / encr_secsize; 158 /* Which relative sector this request decrypted. */ 159 rel_sec = ((crp->crp_buf.cb_buf + crp->crp_payload_start) - 160 (char *)bp->bio_driver2) / encr_secsize; 161 162 errorp = (int *)((char *)bp->bio_driver2 + encr_secsize * nsec + 163 sizeof(int) * rel_sec); 164 *errorp = crp->crp_etype; 165 G_ELI_DEBUG(1, 166 "Crypto READ request failed (%d/%d) error=%d.", 167 bp->bio_inbed, bp->bio_children, crp->crp_etype); 168 if (bp->bio_error == 0 || bp->bio_error == EINTEGRITY) 169 bp->bio_error = crp->crp_etype == EBADMSG ? 170 EINTEGRITY : crp->crp_etype; 171 } 172 if (crp->crp_cipher_key != NULL) 173 g_eli_key_drop(sc, __DECONST(void *, crp->crp_cipher_key)); 174 crypto_freereq(crp); 175 /* 176 * Do we have all sectors already? 177 */ 178 if (bp->bio_inbed < bp->bio_children) 179 return (0); 180 181 if (bp->bio_error == 0) { 182 u_int i, lsec, nsec, data_secsize, decr_secsize, encr_secsize; 183 u_char *srcdata, *dstdata; 184 185 /* Sectorsize of decrypted provider eg. 4096. */ 186 decr_secsize = bp->bio_to->sectorsize; 187 /* The real sectorsize of encrypted provider, eg. 512. */ 188 encr_secsize = LIST_FIRST(&sc->sc_geom->consumer)->provider->sectorsize; 189 /* Number of data bytes in one encrypted sector, eg. 480. */ 190 data_secsize = sc->sc_data_per_sector; 191 /* Number of sectors from decrypted provider, eg. 2. */ 192 nsec = bp->bio_length / decr_secsize; 193 /* Number of sectors from encrypted provider, eg. 18. */ 194 nsec = (nsec * sc->sc_bytes_per_sector) / encr_secsize; 195 /* Last sector number in every big sector, eg. 9. */ 196 lsec = sc->sc_bytes_per_sector / encr_secsize; 197 198 srcdata = bp->bio_driver2; 199 dstdata = bp->bio_data; 200 201 for (i = 1; i <= nsec; i++) { 202 data_secsize = sc->sc_data_per_sector; 203 if ((i % lsec) == 0) 204 data_secsize = decr_secsize % data_secsize; 205 bcopy(srcdata + sc->sc_alen, dstdata, data_secsize); 206 srcdata += encr_secsize; 207 dstdata += data_secsize; 208 } 209 } else if (bp->bio_error == EINTEGRITY) { 210 u_int i, lsec, nsec, data_secsize, decr_secsize, encr_secsize; 211 int *errorp; 212 off_t coroff, corsize, dstoff; 213 214 /* Sectorsize of decrypted provider eg. 4096. */ 215 decr_secsize = bp->bio_to->sectorsize; 216 /* The real sectorsize of encrypted provider, eg. 512. */ 217 encr_secsize = LIST_FIRST(&sc->sc_geom->consumer)->provider->sectorsize; 218 /* Number of data bytes in one encrypted sector, eg. 480. */ 219 data_secsize = sc->sc_data_per_sector; 220 /* Number of sectors from decrypted provider, eg. 2. */ 221 nsec = bp->bio_length / decr_secsize; 222 /* Number of sectors from encrypted provider, eg. 18. */ 223 nsec = (nsec * sc->sc_bytes_per_sector) / encr_secsize; 224 /* Last sector number in every big sector, eg. 9. */ 225 lsec = sc->sc_bytes_per_sector / encr_secsize; 226 227 errorp = (int *)((char *)bp->bio_driver2 + encr_secsize * nsec); 228 coroff = -1; 229 corsize = 0; 230 dstoff = bp->bio_offset; 231 232 for (i = 1; i <= nsec; i++) { 233 data_secsize = sc->sc_data_per_sector; 234 if ((i % lsec) == 0) 235 data_secsize = decr_secsize % data_secsize; 236 if (errorp[i - 1] == EBADMSG) { 237 /* 238 * Corruption detected, remember the offset if 239 * this is the first corrupted sector and 240 * increase size. 241 */ 242 if (coroff == -1) 243 coroff = dstoff; 244 corsize += data_secsize; 245 } else { 246 /* 247 * No corruption, good. 248 * Report previous corruption if there was one. 249 */ 250 if (coroff != -1) { 251 G_ELI_DEBUG(0, "%s: Failed to authenticate %jd " 252 "bytes of data at offset %jd.", 253 sc->sc_name, (intmax_t)corsize, 254 (intmax_t)coroff); 255 coroff = -1; 256 corsize = 0; 257 } 258 } 259 dstoff += data_secsize; 260 } 261 /* Report previous corruption if there was one. */ 262 if (coroff != -1) { 263 G_ELI_DEBUG(0, "%s: Failed to authenticate %jd " 264 "bytes of data at offset %jd.", 265 sc->sc_name, (intmax_t)corsize, (intmax_t)coroff); 266 } 267 } 268 g_eli_free_data(bp); 269 if (bp->bio_error != 0) { 270 if (bp->bio_error != EINTEGRITY) { 271 G_ELI_LOGREQ(0, bp, 272 "Crypto READ request failed (error=%d).", 273 bp->bio_error); 274 } 275 bp->bio_completed = 0; 276 } 277 /* 278 * Read is finished, send it up. 279 */ 280 g_io_deliver(bp, bp->bio_error); 281 atomic_subtract_int(&sc->sc_inflight, 1); 282 return (0); 283 } 284 285 /* 286 * The function is called after data encryption. 287 * 288 * g_eli_start -> g_eli_auth_run -> G_ELI_AUTH_WRITE_DONE -> g_io_request -> g_eli_write_done -> g_io_deliver 289 */ 290 static int 291 g_eli_auth_write_done(struct cryptop *crp) 292 { 293 struct g_eli_softc *sc; 294 struct g_consumer *cp; 295 struct bio *bp, *cbp, *cbp2; 296 u_int nsec; 297 298 if (crp->crp_etype == EAGAIN) { 299 if (g_eli_crypto_rerun(crp) == 0) 300 return (0); 301 } 302 bp = (struct bio *)crp->crp_opaque; 303 bp->bio_inbed++; 304 if (crp->crp_etype == 0) { 305 G_ELI_DEBUG(3, "Crypto WRITE request done (%d/%d).", 306 bp->bio_inbed, bp->bio_children); 307 } else { 308 G_ELI_DEBUG(1, "Crypto WRITE request failed (%d/%d) error=%d.", 309 bp->bio_inbed, bp->bio_children, crp->crp_etype); 310 if (bp->bio_error == 0) 311 bp->bio_error = crp->crp_etype; 312 } 313 sc = bp->bio_to->geom->softc; 314 if (crp->crp_cipher_key != NULL) 315 g_eli_key_drop(sc, __DECONST(void *, crp->crp_cipher_key)); 316 crypto_freereq(crp); 317 /* 318 * All sectors are already encrypted? 319 */ 320 if (bp->bio_inbed < bp->bio_children) 321 return (0); 322 if (bp->bio_error != 0) { 323 G_ELI_LOGREQ(0, bp, "Crypto WRITE request failed (error=%d).", 324 bp->bio_error); 325 g_eli_free_data(bp); 326 cbp = bp->bio_driver1; 327 bp->bio_driver1 = NULL; 328 g_destroy_bio(cbp); 329 g_io_deliver(bp, bp->bio_error); 330 atomic_subtract_int(&sc->sc_inflight, 1); 331 return (0); 332 } 333 cp = LIST_FIRST(&sc->sc_geom->consumer); 334 cbp = bp->bio_driver1; 335 bp->bio_driver1 = NULL; 336 cbp->bio_to = cp->provider; 337 cbp->bio_done = g_eli_write_done; 338 339 /* Number of sectors from decrypted provider, eg. 1. */ 340 nsec = bp->bio_length / bp->bio_to->sectorsize; 341 /* Number of sectors from encrypted provider, eg. 9. */ 342 nsec = (nsec * sc->sc_bytes_per_sector) / cp->provider->sectorsize; 343 344 cbp->bio_length = cp->provider->sectorsize * nsec; 345 cbp->bio_offset = (bp->bio_offset / bp->bio_to->sectorsize) * sc->sc_bytes_per_sector; 346 cbp->bio_data = bp->bio_driver2; 347 348 /* 349 * We write more than what is requested, so we have to be ready to write 350 * more than maxphys. 351 */ 352 cbp2 = NULL; 353 if (cbp->bio_length > maxphys) { 354 cbp2 = g_duplicate_bio(bp); 355 cbp2->bio_length = cbp->bio_length - maxphys; 356 cbp2->bio_data = cbp->bio_data + maxphys; 357 cbp2->bio_offset = cbp->bio_offset + maxphys; 358 cbp2->bio_to = cp->provider; 359 cbp2->bio_done = g_eli_write_done; 360 cbp->bio_length = maxphys; 361 } 362 /* 363 * Send encrypted data to the provider. 364 */ 365 G_ELI_LOGREQ(2, cbp, "Sending request."); 366 bp->bio_inbed = 0; 367 bp->bio_children = (cbp2 != NULL ? 2 : 1); 368 g_io_request(cbp, cp); 369 if (cbp2 != NULL) { 370 G_ELI_LOGREQ(2, cbp2, "Sending request."); 371 g_io_request(cbp2, cp); 372 } 373 return (0); 374 } 375 376 void 377 g_eli_auth_read(struct g_eli_softc *sc, struct bio *bp) 378 { 379 struct g_consumer *cp; 380 struct bio *cbp, *cbp2; 381 size_t size; 382 off_t nsec; 383 384 G_ELI_SETWORKER(bp->bio_pflags, 0); 385 386 cp = LIST_FIRST(&sc->sc_geom->consumer); 387 cbp = bp->bio_driver1; 388 bp->bio_driver1 = NULL; 389 cbp->bio_to = cp->provider; 390 cbp->bio_done = g_eli_read_done; 391 392 /* Number of sectors from decrypted provider, eg. 1. */ 393 nsec = bp->bio_length / bp->bio_to->sectorsize; 394 /* Number of sectors from encrypted provider, eg. 9. */ 395 nsec = (nsec * sc->sc_bytes_per_sector) / cp->provider->sectorsize; 396 397 cbp->bio_length = cp->provider->sectorsize * nsec; 398 size = cbp->bio_length; 399 size += sizeof(int) * nsec; 400 size += G_ELI_AUTH_SECKEYLEN * nsec; 401 cbp->bio_offset = (bp->bio_offset / bp->bio_to->sectorsize) * sc->sc_bytes_per_sector; 402 if (!g_eli_alloc_data(bp, size)) { 403 G_ELI_LOGREQ(0, bp, "Crypto auth read request failed (ENOMEM)"); 404 g_destroy_bio(cbp); 405 bp->bio_error = ENOMEM; 406 g_io_deliver(bp, bp->bio_error); 407 atomic_subtract_int(&sc->sc_inflight, 1); 408 return; 409 } 410 cbp->bio_data = bp->bio_driver2; 411 412 /* Clear the error array. */ 413 memset((char *)bp->bio_driver2 + cbp->bio_length, 0, 414 sizeof(int) * nsec); 415 416 /* 417 * We read more than what is requested, so we have to be ready to read 418 * more than maxphys. 419 */ 420 cbp2 = NULL; 421 if (cbp->bio_length > maxphys) { 422 cbp2 = g_duplicate_bio(bp); 423 cbp2->bio_length = cbp->bio_length - maxphys; 424 cbp2->bio_data = cbp->bio_data + maxphys; 425 cbp2->bio_offset = cbp->bio_offset + maxphys; 426 cbp2->bio_to = cp->provider; 427 cbp2->bio_done = g_eli_read_done; 428 cbp->bio_length = maxphys; 429 } 430 /* 431 * Read encrypted data from provider. 432 */ 433 G_ELI_LOGREQ(2, cbp, "Sending request."); 434 g_io_request(cbp, cp); 435 if (cbp2 != NULL) { 436 G_ELI_LOGREQ(2, cbp2, "Sending request."); 437 g_io_request(cbp2, cp); 438 } 439 } 440 441 /* 442 * This is the main function responsible for cryptography (ie. communication 443 * with crypto(9) subsystem). 444 * 445 * BIO_READ: 446 * g_eli_start -> g_eli_auth_read -> g_io_request -> g_eli_read_done -> G_ELI_AUTH_RUN -> g_eli_auth_read_done -> g_io_deliver 447 * BIO_WRITE: 448 * g_eli_start -> G_ELI_AUTH_RUN -> g_eli_auth_write_done -> g_io_request -> g_eli_write_done -> g_io_deliver 449 */ 450 void 451 g_eli_auth_run(struct g_eli_worker *wr, struct bio *bp) 452 { 453 struct g_eli_softc *sc; 454 struct cryptopq crpq; 455 struct cryptop *crp; 456 u_int i, lsec, nsec, data_secsize, decr_secsize, encr_secsize; 457 off_t dstoff; 458 u_char *p, *data, *authkey, *plaindata; 459 int error __diagused; 460 bool batch; 461 462 G_ELI_LOGREQ(3, bp, "%s", __func__); 463 464 G_ELI_SETWORKER(bp->bio_pflags, wr->w_number); 465 sc = wr->w_softc; 466 /* Sectorsize of decrypted provider eg. 4096. */ 467 decr_secsize = bp->bio_to->sectorsize; 468 /* The real sectorsize of encrypted provider, eg. 512. */ 469 encr_secsize = LIST_FIRST(&sc->sc_geom->consumer)->provider->sectorsize; 470 /* Number of data bytes in one encrypted sector, eg. 480. */ 471 data_secsize = sc->sc_data_per_sector; 472 /* Number of sectors from decrypted provider, eg. 2. */ 473 nsec = bp->bio_length / decr_secsize; 474 /* Number of sectors from encrypted provider, eg. 18. */ 475 nsec = (nsec * sc->sc_bytes_per_sector) / encr_secsize; 476 /* Last sector number in every big sector, eg. 9. */ 477 lsec = sc->sc_bytes_per_sector / encr_secsize; 478 /* Destination offset, used for IV generation. */ 479 dstoff = (bp->bio_offset / bp->bio_to->sectorsize) * sc->sc_bytes_per_sector; 480 481 plaindata = bp->bio_data; 482 if (bp->bio_cmd == BIO_READ) { 483 data = bp->bio_driver2; 484 p = data + encr_secsize * nsec; 485 p += sizeof(int) * nsec; 486 } else { 487 size_t size; 488 489 size = encr_secsize * nsec; 490 size += G_ELI_AUTH_SECKEYLEN * nsec; 491 size += sizeof(uintptr_t); /* Space for alignment. */ 492 if (!g_eli_alloc_data(bp, size)) { 493 G_ELI_LOGREQ(0, bp, "Crypto request failed (ENOMEM)"); 494 if (bp->bio_driver1 != NULL) { 495 g_destroy_bio(bp->bio_driver1); 496 bp->bio_driver1 = NULL; 497 } 498 bp->bio_error = ENOMEM; 499 g_io_deliver(bp, bp->bio_error); 500 if (sc != NULL) 501 atomic_subtract_int(&sc->sc_inflight, 1); 502 return; 503 } 504 data = bp->bio_driver2; 505 p = data + encr_secsize * nsec; 506 } 507 bp->bio_inbed = 0; 508 bp->bio_children = nsec; 509 510 #if defined(__mips_n64) || defined(__mips_o64) 511 p = (char *)roundup((uintptr_t)p, sizeof(uintptr_t)); 512 #endif 513 514 TAILQ_INIT(&crpq); 515 batch = atomic_load_int(&g_eli_batch) != 0; 516 517 for (i = 1; i <= nsec; i++, dstoff += encr_secsize) { 518 crp = crypto_getreq(wr->w_sid, M_WAITOK); 519 authkey = (u_char *)p; p += G_ELI_AUTH_SECKEYLEN; 520 521 data_secsize = sc->sc_data_per_sector; 522 if ((i % lsec) == 0) { 523 data_secsize = decr_secsize % data_secsize; 524 /* 525 * Last encrypted sector of each decrypted sector is 526 * only partially filled. 527 */ 528 if (bp->bio_cmd == BIO_WRITE) 529 memset(data + sc->sc_alen + data_secsize, 0, 530 encr_secsize - sc->sc_alen - data_secsize); 531 } else if (data_secsize + sc->sc_alen != encr_secsize) { 532 /* 533 * If the HMAC size is not a multiple of 128 bits, the 534 * per-sector data size is rounded down to ensure that 535 * encryption can be performed without requiring any 536 * padding. In this case, each sector contains unused 537 * bytes. 538 */ 539 if (bp->bio_cmd == BIO_WRITE) 540 memset(data + sc->sc_alen + data_secsize, 0, 541 encr_secsize - sc->sc_alen - data_secsize); 542 } 543 544 if (bp->bio_cmd == BIO_WRITE) { 545 bcopy(plaindata, data + sc->sc_alen, data_secsize); 546 plaindata += data_secsize; 547 } 548 549 crypto_use_buf(crp, data, sc->sc_alen + data_secsize); 550 crp->crp_opaque = (void *)bp; 551 data += encr_secsize; 552 crp->crp_flags = CRYPTO_F_CBIFSYNC; 553 if (bp->bio_cmd == BIO_WRITE) { 554 crp->crp_callback = g_eli_auth_write_done; 555 crp->crp_op = CRYPTO_OP_ENCRYPT | 556 CRYPTO_OP_COMPUTE_DIGEST; 557 } else { 558 crp->crp_callback = g_eli_auth_read_done; 559 crp->crp_op = CRYPTO_OP_DECRYPT | 560 CRYPTO_OP_VERIFY_DIGEST; 561 } 562 563 crp->crp_digest_start = 0; 564 crp->crp_payload_start = sc->sc_alen; 565 crp->crp_payload_length = data_secsize; 566 if ((sc->sc_flags & G_ELI_FLAG_FIRST_KEY) == 0) { 567 crp->crp_cipher_key = g_eli_key_hold(sc, dstoff, 568 encr_secsize); 569 } 570 if (g_eli_ivlen(sc->sc_ealgo) != 0) { 571 crp->crp_flags |= CRYPTO_F_IV_SEPARATE; 572 g_eli_crypto_ivgen(sc, dstoff, crp->crp_iv, 573 sizeof(crp->crp_iv)); 574 } 575 576 g_eli_auth_keygen(sc, dstoff, authkey); 577 crp->crp_auth_key = authkey; 578 579 if (batch) { 580 TAILQ_INSERT_TAIL(&crpq, crp, crp_next); 581 } else { 582 error = crypto_dispatch(crp); 583 KASSERT(error == 0, 584 ("crypto_dispatch() failed (error=%d)", error)); 585 } 586 } 587 588 if (batch) 589 crypto_dispatch_batch(&crpq, 0); 590 } 591