1eaa3b919SPawel Jakub Dawidek /*- 23728855aSPedro F. Giffuni * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 33728855aSPedro F. Giffuni * 41e09ff3dSPawel Jakub Dawidek * Copyright (c) 2005-2011 Pawel Jakub Dawidek <pawel@dawidek.net> 5eaa3b919SPawel Jakub Dawidek * All rights reserved. 6eaa3b919SPawel Jakub Dawidek * 7eaa3b919SPawel Jakub Dawidek * Redistribution and use in source and binary forms, with or without 8eaa3b919SPawel Jakub Dawidek * modification, are permitted provided that the following conditions 9eaa3b919SPawel Jakub Dawidek * are met: 10eaa3b919SPawel Jakub Dawidek * 1. Redistributions of source code must retain the above copyright 11eaa3b919SPawel Jakub Dawidek * notice, this list of conditions and the following disclaimer. 12eaa3b919SPawel Jakub Dawidek * 2. Redistributions in binary form must reproduce the above copyright 13eaa3b919SPawel Jakub Dawidek * notice, this list of conditions and the following disclaimer in the 14eaa3b919SPawel Jakub Dawidek * documentation and/or other materials provided with the distribution. 15eaa3b919SPawel Jakub Dawidek * 16eaa3b919SPawel Jakub Dawidek * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND 17eaa3b919SPawel Jakub Dawidek * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18eaa3b919SPawel Jakub Dawidek * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19eaa3b919SPawel Jakub Dawidek * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE 20eaa3b919SPawel Jakub Dawidek * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21eaa3b919SPawel Jakub Dawidek * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22eaa3b919SPawel Jakub Dawidek * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23eaa3b919SPawel Jakub Dawidek * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24eaa3b919SPawel Jakub Dawidek * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25eaa3b919SPawel Jakub Dawidek * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26eaa3b919SPawel Jakub Dawidek * SUCH DAMAGE. 27eaa3b919SPawel Jakub Dawidek */ 28eaa3b919SPawel Jakub Dawidek 29eaa3b919SPawel Jakub Dawidek #include <sys/cdefs.h> 30eaa3b919SPawel Jakub Dawidek __FBSDID("$FreeBSD$"); 31eaa3b919SPawel Jakub Dawidek 32eaa3b919SPawel Jakub Dawidek #include <sys/param.h> 33eaa3b919SPawel Jakub Dawidek #include <sys/systm.h> 34eaa3b919SPawel Jakub Dawidek #include <sys/kernel.h> 35eaa3b919SPawel Jakub Dawidek #include <sys/linker.h> 36eaa3b919SPawel Jakub Dawidek #include <sys/module.h> 37eaa3b919SPawel Jakub Dawidek #include <sys/lock.h> 38eaa3b919SPawel Jakub Dawidek #include <sys/mutex.h> 39eaa3b919SPawel Jakub Dawidek #include <sys/bio.h> 40eaa3b919SPawel Jakub Dawidek #include <sys/sysctl.h> 41eaa3b919SPawel Jakub Dawidek #include <sys/kthread.h> 42eaa3b919SPawel Jakub Dawidek #include <sys/proc.h> 43eaa3b919SPawel Jakub Dawidek #include <sys/sched.h> 44eaa3b919SPawel Jakub Dawidek #include <sys/smp.h> 45eaa3b919SPawel Jakub Dawidek #include <sys/vnode.h> 46eaa3b919SPawel Jakub Dawidek 47eaa3b919SPawel Jakub Dawidek #include <vm/uma.h> 48eaa3b919SPawel Jakub Dawidek 49eaa3b919SPawel Jakub Dawidek #include <geom/geom.h> 50ac03832eSConrad Meyer #include <geom/geom_dbg.h> 51eaa3b919SPawel Jakub Dawidek #include <geom/eli/g_eli.h> 52eaa3b919SPawel Jakub Dawidek #include <geom/eli/pkcs5v2.h> 53eaa3b919SPawel Jakub Dawidek 54eaa3b919SPawel Jakub Dawidek /* 55eaa3b919SPawel Jakub Dawidek * The data layout description when integrity verification is configured. 56eaa3b919SPawel Jakub Dawidek * 57eaa3b919SPawel Jakub Dawidek * One of the most important assumption here is that authenticated data and its 58eaa3b919SPawel Jakub Dawidek * HMAC has to be stored in the same place (namely in the same sector) to make 59eaa3b919SPawel Jakub Dawidek * it work reliable. 60eaa3b919SPawel Jakub Dawidek * The problem is that file systems work only with sectors that are multiple of 61eaa3b919SPawel Jakub Dawidek * 512 bytes and a power of two number. 62eaa3b919SPawel Jakub Dawidek * My idea to implement it is as follows. 63eaa3b919SPawel Jakub Dawidek * Let's store HMAC in sector. This is a must. This leaves us 480 bytes for 64eaa3b919SPawel Jakub Dawidek * data. We can't use that directly (ie. we can't create provider with 480 bytes 65eaa3b919SPawel Jakub Dawidek * sector size). We need another sector from where we take only 32 bytes of data 66eaa3b919SPawel Jakub Dawidek * and we store HMAC of this data as well. This takes two sectors from the 67eaa3b919SPawel Jakub Dawidek * original provider at the input and leaves us one sector of authenticated data 68eaa3b919SPawel Jakub Dawidek * at the output. Not very efficient, but you got the idea. 69eaa3b919SPawel Jakub Dawidek * Now, let's assume, we want to create provider with 4096 bytes sector. 70eaa3b919SPawel Jakub Dawidek * To output 4096 bytes of authenticated data we need 8x480 plus 1x256, so we 71eaa3b919SPawel Jakub Dawidek * need nine 512-bytes sectors at the input to get one 4096-bytes sector at the 72eaa3b919SPawel Jakub Dawidek * output. That's better. With 4096 bytes sector we can use 89% of size of the 73eaa3b919SPawel Jakub Dawidek * original provider. I find it as an acceptable cost. 74eaa3b919SPawel Jakub Dawidek * The reliability comes from the fact, that every HMAC stored inside the sector 75eaa3b919SPawel Jakub Dawidek * is calculated only for the data in the same sector, so its impossible to 76eaa3b919SPawel Jakub Dawidek * write new data and leave old HMAC or vice versa. 77eaa3b919SPawel Jakub Dawidek * 78eaa3b919SPawel Jakub Dawidek * And here is the picture: 79eaa3b919SPawel Jakub Dawidek * 80eaa3b919SPawel Jakub Dawidek * da0: +----+----+ +----+----+ +----+----+ +----+----+ +----+----+ +----+----+ +----+----+ +----+----+ +----+-----+ 81eaa3b919SPawel Jakub Dawidek * |32b |480b| |32b |480b| |32b |480b| |32b |480b| |32b |480b| |32b |480b| |32b |480b| |32b |480b| |32b |256b | 82eaa3b919SPawel Jakub Dawidek * |HMAC|Data| |HMAC|Data| |HMAC|Data| |HMAC|Data| |HMAC|Data| |HMAC|Data| |HMAC|Data| |HMAC|Data| |HMAC|Data | 83eaa3b919SPawel Jakub Dawidek * +----+----+ +----+----+ +----+----+ +----+----+ +----+----+ +----+----+ +----+----+ +----+----+ +----+-----+ 84eaa3b919SPawel Jakub Dawidek * |512 bytes| |512 bytes| |512 bytes| |512 bytes| |512 bytes| |512 bytes| |512 bytes| |512 bytes| |288 bytes | 85eaa3b919SPawel Jakub Dawidek * +---------+ +---------+ +---------+ +---------+ +---------+ +---------+ +---------+ +---------+ |224 unused| 86eaa3b919SPawel Jakub Dawidek * +----------+ 87eaa3b919SPawel Jakub Dawidek * da0.eli: +----+----+----+----+----+----+----+----+----+ 88eaa3b919SPawel Jakub Dawidek * |480b|480b|480b|480b|480b|480b|480b|480b|256b| 89eaa3b919SPawel Jakub Dawidek * +----+----+----+----+----+----+----+----+----+ 90eaa3b919SPawel Jakub Dawidek * | 4096 bytes | 91eaa3b919SPawel Jakub Dawidek * +--------------------------------------------+ 92eaa3b919SPawel Jakub Dawidek * 93eaa3b919SPawel Jakub Dawidek * PS. You can use any sector size with geli(8). My example is using 4kB, 94eaa3b919SPawel Jakub Dawidek * because it's most efficient. For 8kB sectors you need 2 extra sectors, 95eaa3b919SPawel Jakub Dawidek * so the cost is the same as for 4kB sectors. 96eaa3b919SPawel Jakub Dawidek */ 97eaa3b919SPawel Jakub Dawidek 98eaa3b919SPawel Jakub Dawidek /* 99eaa3b919SPawel Jakub Dawidek * Code paths: 100eaa3b919SPawel Jakub Dawidek * BIO_READ: 101eaa3b919SPawel Jakub Dawidek * g_eli_start -> g_eli_auth_read -> g_io_request -> g_eli_read_done -> g_eli_auth_run -> g_eli_auth_read_done -> g_io_deliver 102eaa3b919SPawel Jakub Dawidek * BIO_WRITE: 103eaa3b919SPawel Jakub Dawidek * g_eli_start -> g_eli_auth_run -> g_eli_auth_write_done -> g_io_request -> g_eli_write_done -> g_io_deliver 104eaa3b919SPawel Jakub Dawidek */ 105eaa3b919SPawel Jakub Dawidek 106eaa3b919SPawel Jakub Dawidek /* 107eaa3b919SPawel Jakub Dawidek * Here we generate key for HMAC. Every sector has its own HMAC key, so it is 108eaa3b919SPawel Jakub Dawidek * not possible to copy sectors. 109eaa3b919SPawel Jakub Dawidek * We cannot depend on fact, that every sector has its own IV, because different 110eaa3b919SPawel Jakub Dawidek * IV doesn't change HMAC, when we use encrypt-then-authenticate method. 111eaa3b919SPawel Jakub Dawidek */ 112eaa3b919SPawel Jakub Dawidek static void 113eaa3b919SPawel Jakub Dawidek g_eli_auth_keygen(struct g_eli_softc *sc, off_t offset, u_char *key) 114eaa3b919SPawel Jakub Dawidek { 115eaa3b919SPawel Jakub Dawidek SHA256_CTX ctx; 116eaa3b919SPawel Jakub Dawidek 117eaa3b919SPawel Jakub Dawidek /* Copy precalculated SHA256 context. */ 118eaa3b919SPawel Jakub Dawidek bcopy(&sc->sc_akeyctx, &ctx, sizeof(ctx)); 119eaa3b919SPawel Jakub Dawidek SHA256_Update(&ctx, (uint8_t *)&offset, sizeof(offset)); 120eaa3b919SPawel Jakub Dawidek SHA256_Final(key, &ctx); 121eaa3b919SPawel Jakub Dawidek } 122eaa3b919SPawel Jakub Dawidek 123eaa3b919SPawel Jakub Dawidek /* 124eaa3b919SPawel Jakub Dawidek * The function is called after we read and decrypt data. 125eaa3b919SPawel Jakub Dawidek * 126eaa3b919SPawel Jakub Dawidek * g_eli_start -> g_eli_auth_read -> g_io_request -> g_eli_read_done -> g_eli_auth_run -> G_ELI_AUTH_READ_DONE -> g_io_deliver 127eaa3b919SPawel Jakub Dawidek */ 128eaa3b919SPawel Jakub Dawidek static int 129eaa3b919SPawel Jakub Dawidek g_eli_auth_read_done(struct cryptop *crp) 130eaa3b919SPawel Jakub Dawidek { 1315ad4a7c7SPawel Jakub Dawidek struct g_eli_softc *sc; 132eaa3b919SPawel Jakub Dawidek struct bio *bp; 133eaa3b919SPawel Jakub Dawidek 134eaa3b919SPawel Jakub Dawidek if (crp->crp_etype == EAGAIN) { 135eaa3b919SPawel Jakub Dawidek if (g_eli_crypto_rerun(crp) == 0) 136eaa3b919SPawel Jakub Dawidek return (0); 137eaa3b919SPawel Jakub Dawidek } 138eaa3b919SPawel Jakub Dawidek bp = (struct bio *)crp->crp_opaque; 139eaa3b919SPawel Jakub Dawidek bp->bio_inbed++; 1401e09ff3dSPawel Jakub Dawidek sc = bp->bio_to->geom->softc; 141c0341432SJohn Baldwin if (crp->crp_etype == 0) { 142c0341432SJohn Baldwin bp->bio_completed += crp->crp_payload_length; 143c0341432SJohn Baldwin G_ELI_DEBUG(3, "Crypto READ request done (%d/%d) (add=%d completed=%jd).", 144c0341432SJohn Baldwin bp->bio_inbed, bp->bio_children, crp->crp_payload_length, (intmax_t)bp->bio_completed); 145c0341432SJohn Baldwin } else { 146c0341432SJohn Baldwin u_int nsec, decr_secsize, encr_secsize, rel_sec; 147c0341432SJohn Baldwin int *errorp; 148c0341432SJohn Baldwin 149c0341432SJohn Baldwin /* Sectorsize of decrypted provider eg. 4096. */ 150c0341432SJohn Baldwin decr_secsize = bp->bio_to->sectorsize; 151c0341432SJohn Baldwin /* The real sectorsize of encrypted provider, eg. 512. */ 152c0341432SJohn Baldwin encr_secsize = 153c0341432SJohn Baldwin LIST_FIRST(&sc->sc_geom->consumer)->provider->sectorsize; 154c0341432SJohn Baldwin /* Number of sectors from decrypted provider, eg. 2. */ 155c0341432SJohn Baldwin nsec = bp->bio_length / decr_secsize; 156c0341432SJohn Baldwin /* Number of sectors from encrypted provider, eg. 18. */ 157c0341432SJohn Baldwin nsec = (nsec * sc->sc_bytes_per_sector) / encr_secsize; 158c0341432SJohn Baldwin /* Which relative sector this request decrypted. */ 1599c0e3d3aSJohn Baldwin rel_sec = ((crp->crp_buf.cb_buf + crp->crp_payload_start) - 160c0341432SJohn Baldwin (char *)bp->bio_driver2) / encr_secsize; 161c0341432SJohn Baldwin 162c0341432SJohn Baldwin errorp = (int *)((char *)bp->bio_driver2 + encr_secsize * nsec + 163c0341432SJohn Baldwin sizeof(int) * rel_sec); 164c0341432SJohn Baldwin *errorp = crp->crp_etype; 165c0341432SJohn Baldwin G_ELI_DEBUG(1, 166c0341432SJohn Baldwin "Crypto READ request failed (%d/%d) error=%d.", 167c0341432SJohn Baldwin bp->bio_inbed, bp->bio_children, crp->crp_etype); 168c0341432SJohn Baldwin if (bp->bio_error == 0 || bp->bio_error == EINTEGRITY) 169c0341432SJohn Baldwin bp->bio_error = crp->crp_etype == EBADMSG ? 170c0341432SJohn Baldwin EINTEGRITY : crp->crp_etype; 171c0341432SJohn Baldwin } 172c0341432SJohn Baldwin if (crp->crp_cipher_key != NULL) 173c0341432SJohn Baldwin g_eli_key_drop(sc, __DECONST(void *, crp->crp_cipher_key)); 174c0341432SJohn Baldwin crypto_freereq(crp); 175eaa3b919SPawel Jakub Dawidek /* 176eaa3b919SPawel Jakub Dawidek * Do we have all sectors already? 177eaa3b919SPawel Jakub Dawidek */ 178eaa3b919SPawel Jakub Dawidek if (bp->bio_inbed < bp->bio_children) 179eaa3b919SPawel Jakub Dawidek return (0); 180c0341432SJohn Baldwin 181eaa3b919SPawel Jakub Dawidek if (bp->bio_error == 0) { 182eaa3b919SPawel Jakub Dawidek u_int i, lsec, nsec, data_secsize, decr_secsize, encr_secsize; 183c0341432SJohn Baldwin u_char *srcdata, *dstdata; 184eaa3b919SPawel Jakub Dawidek 185eaa3b919SPawel Jakub Dawidek /* Sectorsize of decrypted provider eg. 4096. */ 186eaa3b919SPawel Jakub Dawidek decr_secsize = bp->bio_to->sectorsize; 187eaa3b919SPawel Jakub Dawidek /* The real sectorsize of encrypted provider, eg. 512. */ 188eaa3b919SPawel Jakub Dawidek encr_secsize = LIST_FIRST(&sc->sc_geom->consumer)->provider->sectorsize; 189eaa3b919SPawel Jakub Dawidek /* Number of data bytes in one encrypted sector, eg. 480. */ 190eaa3b919SPawel Jakub Dawidek data_secsize = sc->sc_data_per_sector; 191eaa3b919SPawel Jakub Dawidek /* Number of sectors from decrypted provider, eg. 2. */ 192eaa3b919SPawel Jakub Dawidek nsec = bp->bio_length / decr_secsize; 193eaa3b919SPawel Jakub Dawidek /* Number of sectors from encrypted provider, eg. 18. */ 194eaa3b919SPawel Jakub Dawidek nsec = (nsec * sc->sc_bytes_per_sector) / encr_secsize; 195eaa3b919SPawel Jakub Dawidek /* Last sector number in every big sector, eg. 9. */ 196eaa3b919SPawel Jakub Dawidek lsec = sc->sc_bytes_per_sector / encr_secsize; 197eaa3b919SPawel Jakub Dawidek 198eaa3b919SPawel Jakub Dawidek srcdata = bp->bio_driver2; 199eaa3b919SPawel Jakub Dawidek dstdata = bp->bio_data; 200eaa3b919SPawel Jakub Dawidek 201eaa3b919SPawel Jakub Dawidek for (i = 1; i <= nsec; i++) { 202eaa3b919SPawel Jakub Dawidek data_secsize = sc->sc_data_per_sector; 203eaa3b919SPawel Jakub Dawidek if ((i % lsec) == 0) 204eaa3b919SPawel Jakub Dawidek data_secsize = decr_secsize % data_secsize; 205c0341432SJohn Baldwin bcopy(srcdata + sc->sc_alen, dstdata, data_secsize); 206c0341432SJohn Baldwin srcdata += encr_secsize; 207c0341432SJohn Baldwin dstdata += data_secsize; 208c0341432SJohn Baldwin } 209c0341432SJohn Baldwin } else if (bp->bio_error == EINTEGRITY) { 210c0341432SJohn Baldwin u_int i, lsec, nsec, data_secsize, decr_secsize, encr_secsize; 211c0341432SJohn Baldwin int *errorp; 212c0341432SJohn Baldwin off_t coroff, corsize, dstoff; 213c0341432SJohn Baldwin 214c0341432SJohn Baldwin /* Sectorsize of decrypted provider eg. 4096. */ 215c0341432SJohn Baldwin decr_secsize = bp->bio_to->sectorsize; 216c0341432SJohn Baldwin /* The real sectorsize of encrypted provider, eg. 512. */ 217c0341432SJohn Baldwin encr_secsize = LIST_FIRST(&sc->sc_geom->consumer)->provider->sectorsize; 218c0341432SJohn Baldwin /* Number of data bytes in one encrypted sector, eg. 480. */ 219c0341432SJohn Baldwin data_secsize = sc->sc_data_per_sector; 220c0341432SJohn Baldwin /* Number of sectors from decrypted provider, eg. 2. */ 221c0341432SJohn Baldwin nsec = bp->bio_length / decr_secsize; 222c0341432SJohn Baldwin /* Number of sectors from encrypted provider, eg. 18. */ 223c0341432SJohn Baldwin nsec = (nsec * sc->sc_bytes_per_sector) / encr_secsize; 224c0341432SJohn Baldwin /* Last sector number in every big sector, eg. 9. */ 225c0341432SJohn Baldwin lsec = sc->sc_bytes_per_sector / encr_secsize; 226c0341432SJohn Baldwin 227c0341432SJohn Baldwin errorp = (int *)((char *)bp->bio_driver2 + encr_secsize * nsec); 228c0341432SJohn Baldwin coroff = -1; 229c0341432SJohn Baldwin corsize = 0; 230c0341432SJohn Baldwin dstoff = bp->bio_offset; 231c0341432SJohn Baldwin 232c0341432SJohn Baldwin for (i = 1; i <= nsec; i++) { 233c0341432SJohn Baldwin data_secsize = sc->sc_data_per_sector; 234c0341432SJohn Baldwin if ((i % lsec) == 0) 235c0341432SJohn Baldwin data_secsize = decr_secsize % data_secsize; 236c0341432SJohn Baldwin if (errorp[i - 1] == EBADMSG) { 237eaa3b919SPawel Jakub Dawidek /* 238c0341432SJohn Baldwin * Corruption detected, remember the offset if 239eaa3b919SPawel Jakub Dawidek * this is the first corrupted sector and 240eaa3b919SPawel Jakub Dawidek * increase size. 241eaa3b919SPawel Jakub Dawidek */ 242c0341432SJohn Baldwin if (coroff == -1) 243c0341432SJohn Baldwin coroff = dstoff; 244eaa3b919SPawel Jakub Dawidek corsize += data_secsize; 245eaa3b919SPawel Jakub Dawidek } else { 246eaa3b919SPawel Jakub Dawidek /* 247c0341432SJohn Baldwin * No corruption, good. 248eaa3b919SPawel Jakub Dawidek * Report previous corruption if there was one. 249eaa3b919SPawel Jakub Dawidek */ 250eaa3b919SPawel Jakub Dawidek if (coroff != -1) { 251af23b88bSEitan Adler G_ELI_DEBUG(0, "%s: Failed to authenticate %jd " 252615a3e39SEitan Adler "bytes of data at offset %jd.", 253eaa3b919SPawel Jakub Dawidek sc->sc_name, (intmax_t)corsize, 254eaa3b919SPawel Jakub Dawidek (intmax_t)coroff); 255eaa3b919SPawel Jakub Dawidek coroff = -1; 256eaa3b919SPawel Jakub Dawidek corsize = 0; 257eaa3b919SPawel Jakub Dawidek } 258eaa3b919SPawel Jakub Dawidek } 259c0341432SJohn Baldwin dstoff += data_secsize; 260eaa3b919SPawel Jakub Dawidek } 261eaa3b919SPawel Jakub Dawidek /* Report previous corruption if there was one. */ 262eaa3b919SPawel Jakub Dawidek if (coroff != -1) { 263af23b88bSEitan Adler G_ELI_DEBUG(0, "%s: Failed to authenticate %jd " 264615a3e39SEitan Adler "bytes of data at offset %jd.", 265eaa3b919SPawel Jakub Dawidek sc->sc_name, (intmax_t)corsize, (intmax_t)coroff); 266eaa3b919SPawel Jakub Dawidek } 267eaa3b919SPawel Jakub Dawidek } 268*2dbc9a38SGleb Smirnoff g_eli_free_data(bp); 269eaa3b919SPawel Jakub Dawidek if (bp->bio_error != 0) { 270c0341432SJohn Baldwin if (bp->bio_error != EINTEGRITY) { 271eaa3b919SPawel Jakub Dawidek G_ELI_LOGREQ(0, bp, 272eaa3b919SPawel Jakub Dawidek "Crypto READ request failed (error=%d).", 273eaa3b919SPawel Jakub Dawidek bp->bio_error); 274eaa3b919SPawel Jakub Dawidek } 275eaa3b919SPawel Jakub Dawidek bp->bio_completed = 0; 276eaa3b919SPawel Jakub Dawidek } 277eaa3b919SPawel Jakub Dawidek /* 278eaa3b919SPawel Jakub Dawidek * Read is finished, send it up. 279eaa3b919SPawel Jakub Dawidek */ 280eaa3b919SPawel Jakub Dawidek g_io_deliver(bp, bp->bio_error); 2815ad4a7c7SPawel Jakub Dawidek atomic_subtract_int(&sc->sc_inflight, 1); 282eaa3b919SPawel Jakub Dawidek return (0); 283eaa3b919SPawel Jakub Dawidek } 284eaa3b919SPawel Jakub Dawidek 285eaa3b919SPawel Jakub Dawidek /* 286eaa3b919SPawel Jakub Dawidek * The function is called after data encryption. 287eaa3b919SPawel Jakub Dawidek * 288eaa3b919SPawel Jakub Dawidek * g_eli_start -> g_eli_auth_run -> G_ELI_AUTH_WRITE_DONE -> g_io_request -> g_eli_write_done -> g_io_deliver 289eaa3b919SPawel Jakub Dawidek */ 290eaa3b919SPawel Jakub Dawidek static int 291eaa3b919SPawel Jakub Dawidek g_eli_auth_write_done(struct cryptop *crp) 292eaa3b919SPawel Jakub Dawidek { 293eaa3b919SPawel Jakub Dawidek struct g_eli_softc *sc; 294eaa3b919SPawel Jakub Dawidek struct g_consumer *cp; 295eaa3b919SPawel Jakub Dawidek struct bio *bp, *cbp, *cbp2; 296eaa3b919SPawel Jakub Dawidek u_int nsec; 297eaa3b919SPawel Jakub Dawidek 298eaa3b919SPawel Jakub Dawidek if (crp->crp_etype == EAGAIN) { 299eaa3b919SPawel Jakub Dawidek if (g_eli_crypto_rerun(crp) == 0) 300eaa3b919SPawel Jakub Dawidek return (0); 301eaa3b919SPawel Jakub Dawidek } 302eaa3b919SPawel Jakub Dawidek bp = (struct bio *)crp->crp_opaque; 303eaa3b919SPawel Jakub Dawidek bp->bio_inbed++; 304eaa3b919SPawel Jakub Dawidek if (crp->crp_etype == 0) { 305eaa3b919SPawel Jakub Dawidek G_ELI_DEBUG(3, "Crypto WRITE request done (%d/%d).", 306eaa3b919SPawel Jakub Dawidek bp->bio_inbed, bp->bio_children); 307eaa3b919SPawel Jakub Dawidek } else { 308eaa3b919SPawel Jakub Dawidek G_ELI_DEBUG(1, "Crypto WRITE request failed (%d/%d) error=%d.", 309eaa3b919SPawel Jakub Dawidek bp->bio_inbed, bp->bio_children, crp->crp_etype); 310eaa3b919SPawel Jakub Dawidek if (bp->bio_error == 0) 311eaa3b919SPawel Jakub Dawidek bp->bio_error = crp->crp_etype; 312eaa3b919SPawel Jakub Dawidek } 3131e09ff3dSPawel Jakub Dawidek sc = bp->bio_to->geom->softc; 314c0341432SJohn Baldwin if (crp->crp_cipher_key != NULL) 315c0341432SJohn Baldwin g_eli_key_drop(sc, __DECONST(void *, crp->crp_cipher_key)); 316c0341432SJohn Baldwin crypto_freereq(crp); 317eaa3b919SPawel Jakub Dawidek /* 318eaa3b919SPawel Jakub Dawidek * All sectors are already encrypted? 319eaa3b919SPawel Jakub Dawidek */ 320eaa3b919SPawel Jakub Dawidek if (bp->bio_inbed < bp->bio_children) 321eaa3b919SPawel Jakub Dawidek return (0); 322eaa3b919SPawel Jakub Dawidek if (bp->bio_error != 0) { 323eaa3b919SPawel Jakub Dawidek G_ELI_LOGREQ(0, bp, "Crypto WRITE request failed (error=%d).", 324eaa3b919SPawel Jakub Dawidek bp->bio_error); 325*2dbc9a38SGleb Smirnoff g_eli_free_data(bp); 326eaa3b919SPawel Jakub Dawidek cbp = bp->bio_driver1; 327eaa3b919SPawel Jakub Dawidek bp->bio_driver1 = NULL; 328eaa3b919SPawel Jakub Dawidek g_destroy_bio(cbp); 329eaa3b919SPawel Jakub Dawidek g_io_deliver(bp, bp->bio_error); 3305ad4a7c7SPawel Jakub Dawidek atomic_subtract_int(&sc->sc_inflight, 1); 331eaa3b919SPawel Jakub Dawidek return (0); 332eaa3b919SPawel Jakub Dawidek } 333eaa3b919SPawel Jakub Dawidek cp = LIST_FIRST(&sc->sc_geom->consumer); 334eaa3b919SPawel Jakub Dawidek cbp = bp->bio_driver1; 335eaa3b919SPawel Jakub Dawidek bp->bio_driver1 = NULL; 336eaa3b919SPawel Jakub Dawidek cbp->bio_to = cp->provider; 337eaa3b919SPawel Jakub Dawidek cbp->bio_done = g_eli_write_done; 338eaa3b919SPawel Jakub Dawidek 339eaa3b919SPawel Jakub Dawidek /* Number of sectors from decrypted provider, eg. 1. */ 340eaa3b919SPawel Jakub Dawidek nsec = bp->bio_length / bp->bio_to->sectorsize; 341eaa3b919SPawel Jakub Dawidek /* Number of sectors from encrypted provider, eg. 9. */ 342eaa3b919SPawel Jakub Dawidek nsec = (nsec * sc->sc_bytes_per_sector) / cp->provider->sectorsize; 343eaa3b919SPawel Jakub Dawidek 344eaa3b919SPawel Jakub Dawidek cbp->bio_length = cp->provider->sectorsize * nsec; 345eaa3b919SPawel Jakub Dawidek cbp->bio_offset = (bp->bio_offset / bp->bio_to->sectorsize) * sc->sc_bytes_per_sector; 346eaa3b919SPawel Jakub Dawidek cbp->bio_data = bp->bio_driver2; 347eaa3b919SPawel Jakub Dawidek 348eaa3b919SPawel Jakub Dawidek /* 349eaa3b919SPawel Jakub Dawidek * We write more than what is requested, so we have to be ready to write 350cd853791SKonstantin Belousov * more than maxphys. 351eaa3b919SPawel Jakub Dawidek */ 352eaa3b919SPawel Jakub Dawidek cbp2 = NULL; 353cd853791SKonstantin Belousov if (cbp->bio_length > maxphys) { 354eaa3b919SPawel Jakub Dawidek cbp2 = g_duplicate_bio(bp); 355cd853791SKonstantin Belousov cbp2->bio_length = cbp->bio_length - maxphys; 356cd853791SKonstantin Belousov cbp2->bio_data = cbp->bio_data + maxphys; 357cd853791SKonstantin Belousov cbp2->bio_offset = cbp->bio_offset + maxphys; 358eaa3b919SPawel Jakub Dawidek cbp2->bio_to = cp->provider; 359eaa3b919SPawel Jakub Dawidek cbp2->bio_done = g_eli_write_done; 360cd853791SKonstantin Belousov cbp->bio_length = maxphys; 361eaa3b919SPawel Jakub Dawidek } 362eaa3b919SPawel Jakub Dawidek /* 363eaa3b919SPawel Jakub Dawidek * Send encrypted data to the provider. 364eaa3b919SPawel Jakub Dawidek */ 365eaa3b919SPawel Jakub Dawidek G_ELI_LOGREQ(2, cbp, "Sending request."); 366eaa3b919SPawel Jakub Dawidek bp->bio_inbed = 0; 367eaa3b919SPawel Jakub Dawidek bp->bio_children = (cbp2 != NULL ? 2 : 1); 368eaa3b919SPawel Jakub Dawidek g_io_request(cbp, cp); 369eaa3b919SPawel Jakub Dawidek if (cbp2 != NULL) { 370eaa3b919SPawel Jakub Dawidek G_ELI_LOGREQ(2, cbp2, "Sending request."); 371eaa3b919SPawel Jakub Dawidek g_io_request(cbp2, cp); 372eaa3b919SPawel Jakub Dawidek } 373eaa3b919SPawel Jakub Dawidek return (0); 374eaa3b919SPawel Jakub Dawidek } 375eaa3b919SPawel Jakub Dawidek 376eaa3b919SPawel Jakub Dawidek void 377eaa3b919SPawel Jakub Dawidek g_eli_auth_read(struct g_eli_softc *sc, struct bio *bp) 378eaa3b919SPawel Jakub Dawidek { 379eaa3b919SPawel Jakub Dawidek struct g_consumer *cp; 380eaa3b919SPawel Jakub Dawidek struct bio *cbp, *cbp2; 381eaa3b919SPawel Jakub Dawidek size_t size; 382eaa3b919SPawel Jakub Dawidek off_t nsec; 383eaa3b919SPawel Jakub Dawidek 384*2dbc9a38SGleb Smirnoff G_ELI_SETWORKER(bp->bio_pflags, 0); 385eaa3b919SPawel Jakub Dawidek 386eaa3b919SPawel Jakub Dawidek cp = LIST_FIRST(&sc->sc_geom->consumer); 387eaa3b919SPawel Jakub Dawidek cbp = bp->bio_driver1; 388eaa3b919SPawel Jakub Dawidek bp->bio_driver1 = NULL; 389eaa3b919SPawel Jakub Dawidek cbp->bio_to = cp->provider; 390eaa3b919SPawel Jakub Dawidek cbp->bio_done = g_eli_read_done; 391eaa3b919SPawel Jakub Dawidek 392eaa3b919SPawel Jakub Dawidek /* Number of sectors from decrypted provider, eg. 1. */ 393eaa3b919SPawel Jakub Dawidek nsec = bp->bio_length / bp->bio_to->sectorsize; 394eaa3b919SPawel Jakub Dawidek /* Number of sectors from encrypted provider, eg. 9. */ 395eaa3b919SPawel Jakub Dawidek nsec = (nsec * sc->sc_bytes_per_sector) / cp->provider->sectorsize; 396eaa3b919SPawel Jakub Dawidek 397eaa3b919SPawel Jakub Dawidek cbp->bio_length = cp->provider->sectorsize * nsec; 398eaa3b919SPawel Jakub Dawidek size = cbp->bio_length; 399c0341432SJohn Baldwin size += sizeof(int) * nsec; 400eaa3b919SPawel Jakub Dawidek size += G_ELI_AUTH_SECKEYLEN * nsec; 401eaa3b919SPawel Jakub Dawidek cbp->bio_offset = (bp->bio_offset / bp->bio_to->sectorsize) * sc->sc_bytes_per_sector; 402*2dbc9a38SGleb Smirnoff if (!g_eli_alloc_data(bp, size)) { 403*2dbc9a38SGleb Smirnoff G_ELI_LOGREQ(0, bp, "Crypto auth read request failed (ENOMEM)"); 404*2dbc9a38SGleb Smirnoff g_destroy_bio(cbp); 405*2dbc9a38SGleb Smirnoff bp->bio_error = ENOMEM; 406*2dbc9a38SGleb Smirnoff g_io_deliver(bp, bp->bio_error); 407*2dbc9a38SGleb Smirnoff atomic_subtract_int(&sc->sc_inflight, 1); 408*2dbc9a38SGleb Smirnoff return; 409*2dbc9a38SGleb Smirnoff } 410eaa3b919SPawel Jakub Dawidek cbp->bio_data = bp->bio_driver2; 411eaa3b919SPawel Jakub Dawidek 412c0341432SJohn Baldwin /* Clear the error array. */ 413c0341432SJohn Baldwin memset((char *)bp->bio_driver2 + cbp->bio_length, 0, 414c0341432SJohn Baldwin sizeof(int) * nsec); 415c0341432SJohn Baldwin 416eaa3b919SPawel Jakub Dawidek /* 417eaa3b919SPawel Jakub Dawidek * We read more than what is requested, so we have to be ready to read 418cd853791SKonstantin Belousov * more than maxphys. 419eaa3b919SPawel Jakub Dawidek */ 420eaa3b919SPawel Jakub Dawidek cbp2 = NULL; 421cd853791SKonstantin Belousov if (cbp->bio_length > maxphys) { 422eaa3b919SPawel Jakub Dawidek cbp2 = g_duplicate_bio(bp); 423cd853791SKonstantin Belousov cbp2->bio_length = cbp->bio_length - maxphys; 424cd853791SKonstantin Belousov cbp2->bio_data = cbp->bio_data + maxphys; 425cd853791SKonstantin Belousov cbp2->bio_offset = cbp->bio_offset + maxphys; 426eaa3b919SPawel Jakub Dawidek cbp2->bio_to = cp->provider; 427eaa3b919SPawel Jakub Dawidek cbp2->bio_done = g_eli_read_done; 428cd853791SKonstantin Belousov cbp->bio_length = maxphys; 429eaa3b919SPawel Jakub Dawidek } 430eaa3b919SPawel Jakub Dawidek /* 431eaa3b919SPawel Jakub Dawidek * Read encrypted data from provider. 432eaa3b919SPawel Jakub Dawidek */ 433eaa3b919SPawel Jakub Dawidek G_ELI_LOGREQ(2, cbp, "Sending request."); 434eaa3b919SPawel Jakub Dawidek g_io_request(cbp, cp); 435eaa3b919SPawel Jakub Dawidek if (cbp2 != NULL) { 436eaa3b919SPawel Jakub Dawidek G_ELI_LOGREQ(2, cbp2, "Sending request."); 437eaa3b919SPawel Jakub Dawidek g_io_request(cbp2, cp); 438eaa3b919SPawel Jakub Dawidek } 439eaa3b919SPawel Jakub Dawidek } 440eaa3b919SPawel Jakub Dawidek 441eaa3b919SPawel Jakub Dawidek /* 442eaa3b919SPawel Jakub Dawidek * This is the main function responsible for cryptography (ie. communication 443eaa3b919SPawel Jakub Dawidek * with crypto(9) subsystem). 444056638c4SPawel Jakub Dawidek * 445056638c4SPawel Jakub Dawidek * BIO_READ: 446056638c4SPawel Jakub Dawidek * g_eli_start -> g_eli_auth_read -> g_io_request -> g_eli_read_done -> G_ELI_AUTH_RUN -> g_eli_auth_read_done -> g_io_deliver 447056638c4SPawel Jakub Dawidek * BIO_WRITE: 448056638c4SPawel Jakub Dawidek * g_eli_start -> G_ELI_AUTH_RUN -> g_eli_auth_write_done -> g_io_request -> g_eli_write_done -> g_io_deliver 449eaa3b919SPawel Jakub Dawidek */ 450eaa3b919SPawel Jakub Dawidek void 451eaa3b919SPawel Jakub Dawidek g_eli_auth_run(struct g_eli_worker *wr, struct bio *bp) 452eaa3b919SPawel Jakub Dawidek { 453eaa3b919SPawel Jakub Dawidek struct g_eli_softc *sc; 45468f6800cSMark Johnston struct cryptopq crpq; 455eaa3b919SPawel Jakub Dawidek struct cryptop *crp; 456eaa3b919SPawel Jakub Dawidek u_int i, lsec, nsec, data_secsize, decr_secsize, encr_secsize; 457eaa3b919SPawel Jakub Dawidek off_t dstoff; 458c0341432SJohn Baldwin u_char *p, *data, *authkey, *plaindata; 4595ee9ea19SPawel Jakub Dawidek int error; 46068f6800cSMark Johnston bool batch; 461eaa3b919SPawel Jakub Dawidek 462eaa3b919SPawel Jakub Dawidek G_ELI_LOGREQ(3, bp, "%s", __func__); 463eaa3b919SPawel Jakub Dawidek 464*2dbc9a38SGleb Smirnoff G_ELI_SETWORKER(bp->bio_pflags, wr->w_number); 465eaa3b919SPawel Jakub Dawidek sc = wr->w_softc; 466eaa3b919SPawel Jakub Dawidek /* Sectorsize of decrypted provider eg. 4096. */ 467eaa3b919SPawel Jakub Dawidek decr_secsize = bp->bio_to->sectorsize; 468eaa3b919SPawel Jakub Dawidek /* The real sectorsize of encrypted provider, eg. 512. */ 469eaa3b919SPawel Jakub Dawidek encr_secsize = LIST_FIRST(&sc->sc_geom->consumer)->provider->sectorsize; 470eaa3b919SPawel Jakub Dawidek /* Number of data bytes in one encrypted sector, eg. 480. */ 471eaa3b919SPawel Jakub Dawidek data_secsize = sc->sc_data_per_sector; 472eaa3b919SPawel Jakub Dawidek /* Number of sectors from decrypted provider, eg. 2. */ 473eaa3b919SPawel Jakub Dawidek nsec = bp->bio_length / decr_secsize; 474eaa3b919SPawel Jakub Dawidek /* Number of sectors from encrypted provider, eg. 18. */ 475eaa3b919SPawel Jakub Dawidek nsec = (nsec * sc->sc_bytes_per_sector) / encr_secsize; 476eaa3b919SPawel Jakub Dawidek /* Last sector number in every big sector, eg. 9. */ 477eaa3b919SPawel Jakub Dawidek lsec = sc->sc_bytes_per_sector / encr_secsize; 478eaa3b919SPawel Jakub Dawidek /* Destination offset, used for IV generation. */ 479eaa3b919SPawel Jakub Dawidek dstoff = (bp->bio_offset / bp->bio_to->sectorsize) * sc->sc_bytes_per_sector; 480eaa3b919SPawel Jakub Dawidek 481eaa3b919SPawel Jakub Dawidek plaindata = bp->bio_data; 482eaa3b919SPawel Jakub Dawidek if (bp->bio_cmd == BIO_READ) { 483eaa3b919SPawel Jakub Dawidek data = bp->bio_driver2; 484c0341432SJohn Baldwin p = data + encr_secsize * nsec; 485c0341432SJohn Baldwin p += sizeof(int) * nsec; 486eaa3b919SPawel Jakub Dawidek } else { 487eaa3b919SPawel Jakub Dawidek size_t size; 488eaa3b919SPawel Jakub Dawidek 489eaa3b919SPawel Jakub Dawidek size = encr_secsize * nsec; 490eaa3b919SPawel Jakub Dawidek size += G_ELI_AUTH_SECKEYLEN * nsec; 491ae8b1f90SRuslan Bukin size += sizeof(uintptr_t); /* Space for alignment. */ 492*2dbc9a38SGleb Smirnoff if (!g_eli_alloc_data(bp, size)) { 493*2dbc9a38SGleb Smirnoff G_ELI_LOGREQ(0, bp, "Crypto request failed (ENOMEM)"); 494*2dbc9a38SGleb Smirnoff if (bp->bio_driver1 != NULL) { 495*2dbc9a38SGleb Smirnoff g_destroy_bio(bp->bio_driver1); 496*2dbc9a38SGleb Smirnoff bp->bio_driver1 = NULL; 497*2dbc9a38SGleb Smirnoff } 498*2dbc9a38SGleb Smirnoff bp->bio_error = ENOMEM; 499*2dbc9a38SGleb Smirnoff g_io_deliver(bp, bp->bio_error); 500*2dbc9a38SGleb Smirnoff if (sc != NULL) 501*2dbc9a38SGleb Smirnoff atomic_subtract_int(&sc->sc_inflight, 1); 502*2dbc9a38SGleb Smirnoff return; 503*2dbc9a38SGleb Smirnoff } 504*2dbc9a38SGleb Smirnoff data = bp->bio_driver2; 505eaa3b919SPawel Jakub Dawidek p = data + encr_secsize * nsec; 506eaa3b919SPawel Jakub Dawidek } 507eaa3b919SPawel Jakub Dawidek bp->bio_inbed = 0; 508eaa3b919SPawel Jakub Dawidek bp->bio_children = nsec; 509eaa3b919SPawel Jakub Dawidek 510ae8b1f90SRuslan Bukin #if defined(__mips_n64) || defined(__mips_o64) 511ae8b1f90SRuslan Bukin p = (char *)roundup((uintptr_t)p, sizeof(uintptr_t)); 512ae8b1f90SRuslan Bukin #endif 513ae8b1f90SRuslan Bukin 51468f6800cSMark Johnston TAILQ_INIT(&crpq); 51568f6800cSMark Johnston batch = atomic_load_int(&g_eli_batch) != 0; 51668f6800cSMark Johnston 517eaa3b919SPawel Jakub Dawidek for (i = 1; i <= nsec; i++, dstoff += encr_secsize) { 518c0341432SJohn Baldwin crp = crypto_getreq(wr->w_sid, M_WAITOK); 519eaa3b919SPawel Jakub Dawidek authkey = (u_char *)p; p += G_ELI_AUTH_SECKEYLEN; 520eaa3b919SPawel Jakub Dawidek 521eaa3b919SPawel Jakub Dawidek data_secsize = sc->sc_data_per_sector; 522ea5eee64SConrad Meyer if ((i % lsec) == 0) { 523eaa3b919SPawel Jakub Dawidek data_secsize = decr_secsize % data_secsize; 524ea5eee64SConrad Meyer /* 525ea5eee64SConrad Meyer * Last encrypted sector of each decrypted sector is 526ea5eee64SConrad Meyer * only partially filled. 527ea5eee64SConrad Meyer */ 528ea5eee64SConrad Meyer if (bp->bio_cmd == BIO_WRITE) 529ea5eee64SConrad Meyer memset(data + sc->sc_alen + data_secsize, 0, 530ea5eee64SConrad Meyer encr_secsize - sc->sc_alen - data_secsize); 5310fcafe85SMark Johnston } else if (data_secsize + sc->sc_alen != encr_secsize) { 5320fcafe85SMark Johnston /* 5330fcafe85SMark Johnston * If the HMAC size is not a multiple of 128 bits, the 5340fcafe85SMark Johnston * per-sector data size is rounded down to ensure that 5350fcafe85SMark Johnston * encryption can be performed without requiring any 5360fcafe85SMark Johnston * padding. In this case, each sector contains unused 5370fcafe85SMark Johnston * bytes. 5380fcafe85SMark Johnston */ 5390fcafe85SMark Johnston if (bp->bio_cmd == BIO_WRITE) 5400fcafe85SMark Johnston memset(data + sc->sc_alen + data_secsize, 0, 5410fcafe85SMark Johnston encr_secsize - sc->sc_alen - data_secsize); 542ea5eee64SConrad Meyer } 543eaa3b919SPawel Jakub Dawidek 544c0341432SJohn Baldwin if (bp->bio_cmd == BIO_WRITE) { 545eaa3b919SPawel Jakub Dawidek bcopy(plaindata, data + sc->sc_alen, data_secsize); 546eaa3b919SPawel Jakub Dawidek plaindata += data_secsize; 547eaa3b919SPawel Jakub Dawidek } 548eaa3b919SPawel Jakub Dawidek 5499c0e3d3aSJohn Baldwin crypto_use_buf(crp, data, sc->sc_alen + data_secsize); 550eaa3b919SPawel Jakub Dawidek crp->crp_opaque = (void *)bp; 55189fac384SJohn-Mark Gurney data += encr_secsize; 55208fca7a5SJohn-Mark Gurney crp->crp_flags = CRYPTO_F_CBIFSYNC; 553eaa3b919SPawel Jakub Dawidek if (bp->bio_cmd == BIO_WRITE) { 554eaa3b919SPawel Jakub Dawidek crp->crp_callback = g_eli_auth_write_done; 555c0341432SJohn Baldwin crp->crp_op = CRYPTO_OP_ENCRYPT | 556c0341432SJohn Baldwin CRYPTO_OP_COMPUTE_DIGEST; 557eaa3b919SPawel Jakub Dawidek } else { 558eaa3b919SPawel Jakub Dawidek crp->crp_callback = g_eli_auth_read_done; 559c0341432SJohn Baldwin crp->crp_op = CRYPTO_OP_DECRYPT | 560c0341432SJohn Baldwin CRYPTO_OP_VERIFY_DIGEST; 561eaa3b919SPawel Jakub Dawidek } 562eaa3b919SPawel Jakub Dawidek 563c0341432SJohn Baldwin crp->crp_digest_start = 0; 564c0341432SJohn Baldwin crp->crp_payload_start = sc->sc_alen; 565c0341432SJohn Baldwin crp->crp_payload_length = data_secsize; 566c0341432SJohn Baldwin if ((sc->sc_flags & G_ELI_FLAG_FIRST_KEY) == 0) { 567c0341432SJohn Baldwin crp->crp_cipher_key = g_eli_key_hold(sc, dstoff, 568c0341432SJohn Baldwin encr_secsize); 569c0341432SJohn Baldwin } 570aafaa8b7SAlan Somers if (g_eli_ivlen(sc->sc_ealgo) != 0) { 571aafaa8b7SAlan Somers crp->crp_flags |= CRYPTO_F_IV_SEPARATE; 572c0341432SJohn Baldwin g_eli_crypto_ivgen(sc, dstoff, crp->crp_iv, 573c0341432SJohn Baldwin sizeof(crp->crp_iv)); 574aafaa8b7SAlan Somers } 575eaa3b919SPawel Jakub Dawidek 576eaa3b919SPawel Jakub Dawidek g_eli_auth_keygen(sc, dstoff, authkey); 577c0341432SJohn Baldwin crp->crp_auth_key = authkey; 578eaa3b919SPawel Jakub Dawidek 57968f6800cSMark Johnston if (batch) { 58068f6800cSMark Johnston TAILQ_INSERT_TAIL(&crpq, crp, crp_next); 58168f6800cSMark Johnston } else { 5825ee9ea19SPawel Jakub Dawidek error = crypto_dispatch(crp); 58368f6800cSMark Johnston KASSERT(error == 0, 58468f6800cSMark Johnston ("crypto_dispatch() failed (error=%d)", error)); 585eaa3b919SPawel Jakub Dawidek } 586eaa3b919SPawel Jakub Dawidek } 58768f6800cSMark Johnston 58868f6800cSMark Johnston if (batch) 58968f6800cSMark Johnston crypto_dispatch_batch(&crpq, 0); 59068f6800cSMark Johnston } 591