1eaa3b919SPawel Jakub Dawidek /*-
2*4d846d26SWarner Losh * SPDX-License-Identifier: BSD-2-Clause
33728855aSPedro F. Giffuni *
41e09ff3dSPawel Jakub Dawidek * Copyright (c) 2005-2011 Pawel Jakub Dawidek <pawel@dawidek.net>
5eaa3b919SPawel Jakub Dawidek * All rights reserved.
6eaa3b919SPawel Jakub Dawidek *
7eaa3b919SPawel Jakub Dawidek * Redistribution and use in source and binary forms, with or without
8eaa3b919SPawel Jakub Dawidek * modification, are permitted provided that the following conditions
9eaa3b919SPawel Jakub Dawidek * are met:
10eaa3b919SPawel Jakub Dawidek * 1. Redistributions of source code must retain the above copyright
11eaa3b919SPawel Jakub Dawidek * notice, this list of conditions and the following disclaimer.
12eaa3b919SPawel Jakub Dawidek * 2. Redistributions in binary form must reproduce the above copyright
13eaa3b919SPawel Jakub Dawidek * notice, this list of conditions and the following disclaimer in the
14eaa3b919SPawel Jakub Dawidek * documentation and/or other materials provided with the distribution.
15eaa3b919SPawel Jakub Dawidek *
16eaa3b919SPawel Jakub Dawidek * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
17eaa3b919SPawel Jakub Dawidek * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18eaa3b919SPawel Jakub Dawidek * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19eaa3b919SPawel Jakub Dawidek * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
20eaa3b919SPawel Jakub Dawidek * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21eaa3b919SPawel Jakub Dawidek * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22eaa3b919SPawel Jakub Dawidek * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23eaa3b919SPawel Jakub Dawidek * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24eaa3b919SPawel Jakub Dawidek * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25eaa3b919SPawel Jakub Dawidek * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26eaa3b919SPawel Jakub Dawidek * SUCH DAMAGE.
27eaa3b919SPawel Jakub Dawidek */
28eaa3b919SPawel Jakub Dawidek
29eaa3b919SPawel Jakub Dawidek #include <sys/param.h>
30eaa3b919SPawel Jakub Dawidek #include <sys/systm.h>
31eaa3b919SPawel Jakub Dawidek #include <sys/kernel.h>
32eaa3b919SPawel Jakub Dawidek #include <sys/linker.h>
33eaa3b919SPawel Jakub Dawidek #include <sys/module.h>
34eaa3b919SPawel Jakub Dawidek #include <sys/lock.h>
35eaa3b919SPawel Jakub Dawidek #include <sys/mutex.h>
36eaa3b919SPawel Jakub Dawidek #include <sys/bio.h>
37eaa3b919SPawel Jakub Dawidek #include <sys/sysctl.h>
38eaa3b919SPawel Jakub Dawidek #include <sys/kthread.h>
39eaa3b919SPawel Jakub Dawidek #include <sys/proc.h>
40eaa3b919SPawel Jakub Dawidek #include <sys/sched.h>
41eaa3b919SPawel Jakub Dawidek #include <sys/smp.h>
42eaa3b919SPawel Jakub Dawidek #include <sys/vnode.h>
43eaa3b919SPawel Jakub Dawidek
44eaa3b919SPawel Jakub Dawidek #include <vm/uma.h>
45eaa3b919SPawel Jakub Dawidek
46eaa3b919SPawel Jakub Dawidek #include <geom/geom.h>
47ac03832eSConrad Meyer #include <geom/geom_dbg.h>
48eaa3b919SPawel Jakub Dawidek #include <geom/eli/g_eli.h>
49eaa3b919SPawel Jakub Dawidek #include <geom/eli/pkcs5v2.h>
50eaa3b919SPawel Jakub Dawidek
51eaa3b919SPawel Jakub Dawidek /*
52eaa3b919SPawel Jakub Dawidek * The data layout description when integrity verification is configured.
53eaa3b919SPawel Jakub Dawidek *
54eaa3b919SPawel Jakub Dawidek * One of the most important assumption here is that authenticated data and its
55eaa3b919SPawel Jakub Dawidek * HMAC has to be stored in the same place (namely in the same sector) to make
56eaa3b919SPawel Jakub Dawidek * it work reliable.
57eaa3b919SPawel Jakub Dawidek * The problem is that file systems work only with sectors that are multiple of
58eaa3b919SPawel Jakub Dawidek * 512 bytes and a power of two number.
59eaa3b919SPawel Jakub Dawidek * My idea to implement it is as follows.
60eaa3b919SPawel Jakub Dawidek * Let's store HMAC in sector. This is a must. This leaves us 480 bytes for
61eaa3b919SPawel Jakub Dawidek * data. We can't use that directly (ie. we can't create provider with 480 bytes
62eaa3b919SPawel Jakub Dawidek * sector size). We need another sector from where we take only 32 bytes of data
63eaa3b919SPawel Jakub Dawidek * and we store HMAC of this data as well. This takes two sectors from the
64eaa3b919SPawel Jakub Dawidek * original provider at the input and leaves us one sector of authenticated data
65eaa3b919SPawel Jakub Dawidek * at the output. Not very efficient, but you got the idea.
66eaa3b919SPawel Jakub Dawidek * Now, let's assume, we want to create provider with 4096 bytes sector.
67eaa3b919SPawel Jakub Dawidek * To output 4096 bytes of authenticated data we need 8x480 plus 1x256, so we
68eaa3b919SPawel Jakub Dawidek * need nine 512-bytes sectors at the input to get one 4096-bytes sector at the
69eaa3b919SPawel Jakub Dawidek * output. That's better. With 4096 bytes sector we can use 89% of size of the
70eaa3b919SPawel Jakub Dawidek * original provider. I find it as an acceptable cost.
71eaa3b919SPawel Jakub Dawidek * The reliability comes from the fact, that every HMAC stored inside the sector
72eaa3b919SPawel Jakub Dawidek * is calculated only for the data in the same sector, so its impossible to
73eaa3b919SPawel Jakub Dawidek * write new data and leave old HMAC or vice versa.
74eaa3b919SPawel Jakub Dawidek *
75eaa3b919SPawel Jakub Dawidek * And here is the picture:
76eaa3b919SPawel Jakub Dawidek *
77eaa3b919SPawel Jakub Dawidek * da0: +----+----+ +----+----+ +----+----+ +----+----+ +----+----+ +----+----+ +----+----+ +----+----+ +----+-----+
78eaa3b919SPawel Jakub Dawidek * |32b |480b| |32b |480b| |32b |480b| |32b |480b| |32b |480b| |32b |480b| |32b |480b| |32b |480b| |32b |256b |
79eaa3b919SPawel Jakub Dawidek * |HMAC|Data| |HMAC|Data| |HMAC|Data| |HMAC|Data| |HMAC|Data| |HMAC|Data| |HMAC|Data| |HMAC|Data| |HMAC|Data |
80eaa3b919SPawel Jakub Dawidek * +----+----+ +----+----+ +----+----+ +----+----+ +----+----+ +----+----+ +----+----+ +----+----+ +----+-----+
81eaa3b919SPawel Jakub Dawidek * |512 bytes| |512 bytes| |512 bytes| |512 bytes| |512 bytes| |512 bytes| |512 bytes| |512 bytes| |288 bytes |
82eaa3b919SPawel Jakub Dawidek * +---------+ +---------+ +---------+ +---------+ +---------+ +---------+ +---------+ +---------+ |224 unused|
83eaa3b919SPawel Jakub Dawidek * +----------+
84eaa3b919SPawel Jakub Dawidek * da0.eli: +----+----+----+----+----+----+----+----+----+
85eaa3b919SPawel Jakub Dawidek * |480b|480b|480b|480b|480b|480b|480b|480b|256b|
86eaa3b919SPawel Jakub Dawidek * +----+----+----+----+----+----+----+----+----+
87eaa3b919SPawel Jakub Dawidek * | 4096 bytes |
88eaa3b919SPawel Jakub Dawidek * +--------------------------------------------+
89eaa3b919SPawel Jakub Dawidek *
90eaa3b919SPawel Jakub Dawidek * PS. You can use any sector size with geli(8). My example is using 4kB,
91eaa3b919SPawel Jakub Dawidek * because it's most efficient. For 8kB sectors you need 2 extra sectors,
92eaa3b919SPawel Jakub Dawidek * so the cost is the same as for 4kB sectors.
93eaa3b919SPawel Jakub Dawidek */
94eaa3b919SPawel Jakub Dawidek
95eaa3b919SPawel Jakub Dawidek /*
96eaa3b919SPawel Jakub Dawidek * Code paths:
97eaa3b919SPawel Jakub Dawidek * BIO_READ:
98eaa3b919SPawel Jakub Dawidek * g_eli_start -> g_eli_auth_read -> g_io_request -> g_eli_read_done -> g_eli_auth_run -> g_eli_auth_read_done -> g_io_deliver
99eaa3b919SPawel Jakub Dawidek * BIO_WRITE:
100eaa3b919SPawel Jakub Dawidek * g_eli_start -> g_eli_auth_run -> g_eli_auth_write_done -> g_io_request -> g_eli_write_done -> g_io_deliver
101eaa3b919SPawel Jakub Dawidek */
102eaa3b919SPawel Jakub Dawidek
103eaa3b919SPawel Jakub Dawidek /*
104eaa3b919SPawel Jakub Dawidek * Here we generate key for HMAC. Every sector has its own HMAC key, so it is
105eaa3b919SPawel Jakub Dawidek * not possible to copy sectors.
106eaa3b919SPawel Jakub Dawidek * We cannot depend on fact, that every sector has its own IV, because different
107eaa3b919SPawel Jakub Dawidek * IV doesn't change HMAC, when we use encrypt-then-authenticate method.
108eaa3b919SPawel Jakub Dawidek */
109eaa3b919SPawel Jakub Dawidek static void
g_eli_auth_keygen(struct g_eli_softc * sc,off_t offset,u_char * key)110eaa3b919SPawel Jakub Dawidek g_eli_auth_keygen(struct g_eli_softc *sc, off_t offset, u_char *key)
111eaa3b919SPawel Jakub Dawidek {
112eaa3b919SPawel Jakub Dawidek SHA256_CTX ctx;
113eaa3b919SPawel Jakub Dawidek
114eaa3b919SPawel Jakub Dawidek /* Copy precalculated SHA256 context. */
115eaa3b919SPawel Jakub Dawidek bcopy(&sc->sc_akeyctx, &ctx, sizeof(ctx));
116eaa3b919SPawel Jakub Dawidek SHA256_Update(&ctx, (uint8_t *)&offset, sizeof(offset));
117eaa3b919SPawel Jakub Dawidek SHA256_Final(key, &ctx);
118eaa3b919SPawel Jakub Dawidek }
119eaa3b919SPawel Jakub Dawidek
120eaa3b919SPawel Jakub Dawidek /*
121eaa3b919SPawel Jakub Dawidek * The function is called after we read and decrypt data.
122eaa3b919SPawel Jakub Dawidek *
123eaa3b919SPawel Jakub Dawidek * g_eli_start -> g_eli_auth_read -> g_io_request -> g_eli_read_done -> g_eli_auth_run -> G_ELI_AUTH_READ_DONE -> g_io_deliver
124eaa3b919SPawel Jakub Dawidek */
125eaa3b919SPawel Jakub Dawidek static int
g_eli_auth_read_done(struct cryptop * crp)126eaa3b919SPawel Jakub Dawidek g_eli_auth_read_done(struct cryptop *crp)
127eaa3b919SPawel Jakub Dawidek {
1285ad4a7c7SPawel Jakub Dawidek struct g_eli_softc *sc;
129eaa3b919SPawel Jakub Dawidek struct bio *bp;
130eaa3b919SPawel Jakub Dawidek
131eaa3b919SPawel Jakub Dawidek if (crp->crp_etype == EAGAIN) {
132eaa3b919SPawel Jakub Dawidek if (g_eli_crypto_rerun(crp) == 0)
133eaa3b919SPawel Jakub Dawidek return (0);
134eaa3b919SPawel Jakub Dawidek }
135eaa3b919SPawel Jakub Dawidek bp = (struct bio *)crp->crp_opaque;
136eaa3b919SPawel Jakub Dawidek bp->bio_inbed++;
1371e09ff3dSPawel Jakub Dawidek sc = bp->bio_to->geom->softc;
138c0341432SJohn Baldwin if (crp->crp_etype == 0) {
139c0341432SJohn Baldwin bp->bio_completed += crp->crp_payload_length;
140c0341432SJohn Baldwin G_ELI_DEBUG(3, "Crypto READ request done (%d/%d) (add=%d completed=%jd).",
141c0341432SJohn Baldwin bp->bio_inbed, bp->bio_children, crp->crp_payload_length, (intmax_t)bp->bio_completed);
142c0341432SJohn Baldwin } else {
143c0341432SJohn Baldwin u_int nsec, decr_secsize, encr_secsize, rel_sec;
144c0341432SJohn Baldwin int *errorp;
145c0341432SJohn Baldwin
146c0341432SJohn Baldwin /* Sectorsize of decrypted provider eg. 4096. */
147c0341432SJohn Baldwin decr_secsize = bp->bio_to->sectorsize;
148c0341432SJohn Baldwin /* The real sectorsize of encrypted provider, eg. 512. */
149c0341432SJohn Baldwin encr_secsize =
150c0341432SJohn Baldwin LIST_FIRST(&sc->sc_geom->consumer)->provider->sectorsize;
151c0341432SJohn Baldwin /* Number of sectors from decrypted provider, eg. 2. */
152c0341432SJohn Baldwin nsec = bp->bio_length / decr_secsize;
153c0341432SJohn Baldwin /* Number of sectors from encrypted provider, eg. 18. */
154c0341432SJohn Baldwin nsec = (nsec * sc->sc_bytes_per_sector) / encr_secsize;
155c0341432SJohn Baldwin /* Which relative sector this request decrypted. */
1569c0e3d3aSJohn Baldwin rel_sec = ((crp->crp_buf.cb_buf + crp->crp_payload_start) -
157c0341432SJohn Baldwin (char *)bp->bio_driver2) / encr_secsize;
158c0341432SJohn Baldwin
159c0341432SJohn Baldwin errorp = (int *)((char *)bp->bio_driver2 + encr_secsize * nsec +
160c0341432SJohn Baldwin sizeof(int) * rel_sec);
161c0341432SJohn Baldwin *errorp = crp->crp_etype;
162c0341432SJohn Baldwin G_ELI_DEBUG(1,
163c0341432SJohn Baldwin "Crypto READ request failed (%d/%d) error=%d.",
164c0341432SJohn Baldwin bp->bio_inbed, bp->bio_children, crp->crp_etype);
165c0341432SJohn Baldwin if (bp->bio_error == 0 || bp->bio_error == EINTEGRITY)
166c0341432SJohn Baldwin bp->bio_error = crp->crp_etype == EBADMSG ?
167c0341432SJohn Baldwin EINTEGRITY : crp->crp_etype;
168c0341432SJohn Baldwin }
169c0341432SJohn Baldwin if (crp->crp_cipher_key != NULL)
170c0341432SJohn Baldwin g_eli_key_drop(sc, __DECONST(void *, crp->crp_cipher_key));
171c0341432SJohn Baldwin crypto_freereq(crp);
172eaa3b919SPawel Jakub Dawidek /*
173eaa3b919SPawel Jakub Dawidek * Do we have all sectors already?
174eaa3b919SPawel Jakub Dawidek */
175eaa3b919SPawel Jakub Dawidek if (bp->bio_inbed < bp->bio_children)
176eaa3b919SPawel Jakub Dawidek return (0);
177c0341432SJohn Baldwin
178eaa3b919SPawel Jakub Dawidek if (bp->bio_error == 0) {
179eaa3b919SPawel Jakub Dawidek u_int i, lsec, nsec, data_secsize, decr_secsize, encr_secsize;
180c0341432SJohn Baldwin u_char *srcdata, *dstdata;
181eaa3b919SPawel Jakub Dawidek
182eaa3b919SPawel Jakub Dawidek /* Sectorsize of decrypted provider eg. 4096. */
183eaa3b919SPawel Jakub Dawidek decr_secsize = bp->bio_to->sectorsize;
184eaa3b919SPawel Jakub Dawidek /* The real sectorsize of encrypted provider, eg. 512. */
185eaa3b919SPawel Jakub Dawidek encr_secsize = LIST_FIRST(&sc->sc_geom->consumer)->provider->sectorsize;
186eaa3b919SPawel Jakub Dawidek /* Number of data bytes in one encrypted sector, eg. 480. */
187eaa3b919SPawel Jakub Dawidek data_secsize = sc->sc_data_per_sector;
188eaa3b919SPawel Jakub Dawidek /* Number of sectors from decrypted provider, eg. 2. */
189eaa3b919SPawel Jakub Dawidek nsec = bp->bio_length / decr_secsize;
190eaa3b919SPawel Jakub Dawidek /* Number of sectors from encrypted provider, eg. 18. */
191eaa3b919SPawel Jakub Dawidek nsec = (nsec * sc->sc_bytes_per_sector) / encr_secsize;
192eaa3b919SPawel Jakub Dawidek /* Last sector number in every big sector, eg. 9. */
193eaa3b919SPawel Jakub Dawidek lsec = sc->sc_bytes_per_sector / encr_secsize;
194eaa3b919SPawel Jakub Dawidek
195eaa3b919SPawel Jakub Dawidek srcdata = bp->bio_driver2;
196eaa3b919SPawel Jakub Dawidek dstdata = bp->bio_data;
197eaa3b919SPawel Jakub Dawidek
198eaa3b919SPawel Jakub Dawidek for (i = 1; i <= nsec; i++) {
199eaa3b919SPawel Jakub Dawidek data_secsize = sc->sc_data_per_sector;
200eaa3b919SPawel Jakub Dawidek if ((i % lsec) == 0)
201eaa3b919SPawel Jakub Dawidek data_secsize = decr_secsize % data_secsize;
202c0341432SJohn Baldwin bcopy(srcdata + sc->sc_alen, dstdata, data_secsize);
203c0341432SJohn Baldwin srcdata += encr_secsize;
204c0341432SJohn Baldwin dstdata += data_secsize;
205c0341432SJohn Baldwin }
206c0341432SJohn Baldwin } else if (bp->bio_error == EINTEGRITY) {
207c0341432SJohn Baldwin u_int i, lsec, nsec, data_secsize, decr_secsize, encr_secsize;
208c0341432SJohn Baldwin int *errorp;
209c0341432SJohn Baldwin off_t coroff, corsize, dstoff;
210c0341432SJohn Baldwin
211c0341432SJohn Baldwin /* Sectorsize of decrypted provider eg. 4096. */
212c0341432SJohn Baldwin decr_secsize = bp->bio_to->sectorsize;
213c0341432SJohn Baldwin /* The real sectorsize of encrypted provider, eg. 512. */
214c0341432SJohn Baldwin encr_secsize = LIST_FIRST(&sc->sc_geom->consumer)->provider->sectorsize;
215c0341432SJohn Baldwin /* Number of data bytes in one encrypted sector, eg. 480. */
216c0341432SJohn Baldwin data_secsize = sc->sc_data_per_sector;
217c0341432SJohn Baldwin /* Number of sectors from decrypted provider, eg. 2. */
218c0341432SJohn Baldwin nsec = bp->bio_length / decr_secsize;
219c0341432SJohn Baldwin /* Number of sectors from encrypted provider, eg. 18. */
220c0341432SJohn Baldwin nsec = (nsec * sc->sc_bytes_per_sector) / encr_secsize;
221c0341432SJohn Baldwin /* Last sector number in every big sector, eg. 9. */
222c0341432SJohn Baldwin lsec = sc->sc_bytes_per_sector / encr_secsize;
223c0341432SJohn Baldwin
224c0341432SJohn Baldwin errorp = (int *)((char *)bp->bio_driver2 + encr_secsize * nsec);
225c0341432SJohn Baldwin coroff = -1;
226c0341432SJohn Baldwin corsize = 0;
227c0341432SJohn Baldwin dstoff = bp->bio_offset;
228c0341432SJohn Baldwin
229c0341432SJohn Baldwin for (i = 1; i <= nsec; i++) {
230c0341432SJohn Baldwin data_secsize = sc->sc_data_per_sector;
231c0341432SJohn Baldwin if ((i % lsec) == 0)
232c0341432SJohn Baldwin data_secsize = decr_secsize % data_secsize;
233c0341432SJohn Baldwin if (errorp[i - 1] == EBADMSG) {
234eaa3b919SPawel Jakub Dawidek /*
235c0341432SJohn Baldwin * Corruption detected, remember the offset if
236eaa3b919SPawel Jakub Dawidek * this is the first corrupted sector and
237eaa3b919SPawel Jakub Dawidek * increase size.
238eaa3b919SPawel Jakub Dawidek */
239c0341432SJohn Baldwin if (coroff == -1)
240c0341432SJohn Baldwin coroff = dstoff;
241eaa3b919SPawel Jakub Dawidek corsize += data_secsize;
242eaa3b919SPawel Jakub Dawidek } else {
243eaa3b919SPawel Jakub Dawidek /*
244c0341432SJohn Baldwin * No corruption, good.
245eaa3b919SPawel Jakub Dawidek * Report previous corruption if there was one.
246eaa3b919SPawel Jakub Dawidek */
247eaa3b919SPawel Jakub Dawidek if (coroff != -1) {
248af23b88bSEitan Adler G_ELI_DEBUG(0, "%s: Failed to authenticate %jd "
249615a3e39SEitan Adler "bytes of data at offset %jd.",
250eaa3b919SPawel Jakub Dawidek sc->sc_name, (intmax_t)corsize,
251eaa3b919SPawel Jakub Dawidek (intmax_t)coroff);
252eaa3b919SPawel Jakub Dawidek coroff = -1;
253eaa3b919SPawel Jakub Dawidek corsize = 0;
254eaa3b919SPawel Jakub Dawidek }
255eaa3b919SPawel Jakub Dawidek }
256c0341432SJohn Baldwin dstoff += data_secsize;
257eaa3b919SPawel Jakub Dawidek }
258eaa3b919SPawel Jakub Dawidek /* Report previous corruption if there was one. */
259eaa3b919SPawel Jakub Dawidek if (coroff != -1) {
260af23b88bSEitan Adler G_ELI_DEBUG(0, "%s: Failed to authenticate %jd "
261615a3e39SEitan Adler "bytes of data at offset %jd.",
262eaa3b919SPawel Jakub Dawidek sc->sc_name, (intmax_t)corsize, (intmax_t)coroff);
263eaa3b919SPawel Jakub Dawidek }
264eaa3b919SPawel Jakub Dawidek }
2652dbc9a38SGleb Smirnoff g_eli_free_data(bp);
266eaa3b919SPawel Jakub Dawidek if (bp->bio_error != 0) {
267c0341432SJohn Baldwin if (bp->bio_error != EINTEGRITY) {
268eaa3b919SPawel Jakub Dawidek G_ELI_LOGREQ(0, bp,
269eaa3b919SPawel Jakub Dawidek "Crypto READ request failed (error=%d).",
270eaa3b919SPawel Jakub Dawidek bp->bio_error);
271eaa3b919SPawel Jakub Dawidek }
272eaa3b919SPawel Jakub Dawidek bp->bio_completed = 0;
273eaa3b919SPawel Jakub Dawidek }
274eaa3b919SPawel Jakub Dawidek /*
275eaa3b919SPawel Jakub Dawidek * Read is finished, send it up.
276eaa3b919SPawel Jakub Dawidek */
277eaa3b919SPawel Jakub Dawidek g_io_deliver(bp, bp->bio_error);
2785ad4a7c7SPawel Jakub Dawidek atomic_subtract_int(&sc->sc_inflight, 1);
279eaa3b919SPawel Jakub Dawidek return (0);
280eaa3b919SPawel Jakub Dawidek }
281eaa3b919SPawel Jakub Dawidek
282eaa3b919SPawel Jakub Dawidek /*
283eaa3b919SPawel Jakub Dawidek * The function is called after data encryption.
284eaa3b919SPawel Jakub Dawidek *
285eaa3b919SPawel Jakub Dawidek * g_eli_start -> g_eli_auth_run -> G_ELI_AUTH_WRITE_DONE -> g_io_request -> g_eli_write_done -> g_io_deliver
286eaa3b919SPawel Jakub Dawidek */
287eaa3b919SPawel Jakub Dawidek static int
g_eli_auth_write_done(struct cryptop * crp)288eaa3b919SPawel Jakub Dawidek g_eli_auth_write_done(struct cryptop *crp)
289eaa3b919SPawel Jakub Dawidek {
290eaa3b919SPawel Jakub Dawidek struct g_eli_softc *sc;
291eaa3b919SPawel Jakub Dawidek struct g_consumer *cp;
292eaa3b919SPawel Jakub Dawidek struct bio *bp, *cbp, *cbp2;
293eaa3b919SPawel Jakub Dawidek u_int nsec;
294eaa3b919SPawel Jakub Dawidek
295eaa3b919SPawel Jakub Dawidek if (crp->crp_etype == EAGAIN) {
296eaa3b919SPawel Jakub Dawidek if (g_eli_crypto_rerun(crp) == 0)
297eaa3b919SPawel Jakub Dawidek return (0);
298eaa3b919SPawel Jakub Dawidek }
299eaa3b919SPawel Jakub Dawidek bp = (struct bio *)crp->crp_opaque;
300eaa3b919SPawel Jakub Dawidek bp->bio_inbed++;
301eaa3b919SPawel Jakub Dawidek if (crp->crp_etype == 0) {
302eaa3b919SPawel Jakub Dawidek G_ELI_DEBUG(3, "Crypto WRITE request done (%d/%d).",
303eaa3b919SPawel Jakub Dawidek bp->bio_inbed, bp->bio_children);
304eaa3b919SPawel Jakub Dawidek } else {
305eaa3b919SPawel Jakub Dawidek G_ELI_DEBUG(1, "Crypto WRITE request failed (%d/%d) error=%d.",
306eaa3b919SPawel Jakub Dawidek bp->bio_inbed, bp->bio_children, crp->crp_etype);
307eaa3b919SPawel Jakub Dawidek if (bp->bio_error == 0)
308eaa3b919SPawel Jakub Dawidek bp->bio_error = crp->crp_etype;
309eaa3b919SPawel Jakub Dawidek }
3101e09ff3dSPawel Jakub Dawidek sc = bp->bio_to->geom->softc;
311c0341432SJohn Baldwin if (crp->crp_cipher_key != NULL)
312c0341432SJohn Baldwin g_eli_key_drop(sc, __DECONST(void *, crp->crp_cipher_key));
313c0341432SJohn Baldwin crypto_freereq(crp);
314eaa3b919SPawel Jakub Dawidek /*
315eaa3b919SPawel Jakub Dawidek * All sectors are already encrypted?
316eaa3b919SPawel Jakub Dawidek */
317eaa3b919SPawel Jakub Dawidek if (bp->bio_inbed < bp->bio_children)
318eaa3b919SPawel Jakub Dawidek return (0);
319eaa3b919SPawel Jakub Dawidek if (bp->bio_error != 0) {
320eaa3b919SPawel Jakub Dawidek G_ELI_LOGREQ(0, bp, "Crypto WRITE request failed (error=%d).",
321eaa3b919SPawel Jakub Dawidek bp->bio_error);
3222dbc9a38SGleb Smirnoff g_eli_free_data(bp);
323eaa3b919SPawel Jakub Dawidek cbp = bp->bio_driver1;
324eaa3b919SPawel Jakub Dawidek bp->bio_driver1 = NULL;
325eaa3b919SPawel Jakub Dawidek g_destroy_bio(cbp);
326eaa3b919SPawel Jakub Dawidek g_io_deliver(bp, bp->bio_error);
3275ad4a7c7SPawel Jakub Dawidek atomic_subtract_int(&sc->sc_inflight, 1);
328eaa3b919SPawel Jakub Dawidek return (0);
329eaa3b919SPawel Jakub Dawidek }
330eaa3b919SPawel Jakub Dawidek cp = LIST_FIRST(&sc->sc_geom->consumer);
331eaa3b919SPawel Jakub Dawidek cbp = bp->bio_driver1;
332eaa3b919SPawel Jakub Dawidek bp->bio_driver1 = NULL;
333eaa3b919SPawel Jakub Dawidek cbp->bio_to = cp->provider;
334eaa3b919SPawel Jakub Dawidek cbp->bio_done = g_eli_write_done;
335eaa3b919SPawel Jakub Dawidek
336eaa3b919SPawel Jakub Dawidek /* Number of sectors from decrypted provider, eg. 1. */
337eaa3b919SPawel Jakub Dawidek nsec = bp->bio_length / bp->bio_to->sectorsize;
338eaa3b919SPawel Jakub Dawidek /* Number of sectors from encrypted provider, eg. 9. */
339eaa3b919SPawel Jakub Dawidek nsec = (nsec * sc->sc_bytes_per_sector) / cp->provider->sectorsize;
340eaa3b919SPawel Jakub Dawidek
341eaa3b919SPawel Jakub Dawidek cbp->bio_length = cp->provider->sectorsize * nsec;
342eaa3b919SPawel Jakub Dawidek cbp->bio_offset = (bp->bio_offset / bp->bio_to->sectorsize) * sc->sc_bytes_per_sector;
343eaa3b919SPawel Jakub Dawidek cbp->bio_data = bp->bio_driver2;
344eaa3b919SPawel Jakub Dawidek
345eaa3b919SPawel Jakub Dawidek /*
346eaa3b919SPawel Jakub Dawidek * We write more than what is requested, so we have to be ready to write
347cd853791SKonstantin Belousov * more than maxphys.
348eaa3b919SPawel Jakub Dawidek */
349eaa3b919SPawel Jakub Dawidek cbp2 = NULL;
350cd853791SKonstantin Belousov if (cbp->bio_length > maxphys) {
351eaa3b919SPawel Jakub Dawidek cbp2 = g_duplicate_bio(bp);
352cd853791SKonstantin Belousov cbp2->bio_length = cbp->bio_length - maxphys;
353cd853791SKonstantin Belousov cbp2->bio_data = cbp->bio_data + maxphys;
354cd853791SKonstantin Belousov cbp2->bio_offset = cbp->bio_offset + maxphys;
355eaa3b919SPawel Jakub Dawidek cbp2->bio_to = cp->provider;
356eaa3b919SPawel Jakub Dawidek cbp2->bio_done = g_eli_write_done;
357cd853791SKonstantin Belousov cbp->bio_length = maxphys;
358eaa3b919SPawel Jakub Dawidek }
359eaa3b919SPawel Jakub Dawidek /*
360eaa3b919SPawel Jakub Dawidek * Send encrypted data to the provider.
361eaa3b919SPawel Jakub Dawidek */
362eaa3b919SPawel Jakub Dawidek G_ELI_LOGREQ(2, cbp, "Sending request.");
363eaa3b919SPawel Jakub Dawidek bp->bio_inbed = 0;
364eaa3b919SPawel Jakub Dawidek bp->bio_children = (cbp2 != NULL ? 2 : 1);
365eaa3b919SPawel Jakub Dawidek g_io_request(cbp, cp);
366eaa3b919SPawel Jakub Dawidek if (cbp2 != NULL) {
367eaa3b919SPawel Jakub Dawidek G_ELI_LOGREQ(2, cbp2, "Sending request.");
368eaa3b919SPawel Jakub Dawidek g_io_request(cbp2, cp);
369eaa3b919SPawel Jakub Dawidek }
370eaa3b919SPawel Jakub Dawidek return (0);
371eaa3b919SPawel Jakub Dawidek }
372eaa3b919SPawel Jakub Dawidek
373eaa3b919SPawel Jakub Dawidek void
g_eli_auth_read(struct g_eli_softc * sc,struct bio * bp)374eaa3b919SPawel Jakub Dawidek g_eli_auth_read(struct g_eli_softc *sc, struct bio *bp)
375eaa3b919SPawel Jakub Dawidek {
376eaa3b919SPawel Jakub Dawidek struct g_consumer *cp;
377eaa3b919SPawel Jakub Dawidek struct bio *cbp, *cbp2;
378eaa3b919SPawel Jakub Dawidek size_t size;
379eaa3b919SPawel Jakub Dawidek off_t nsec;
380eaa3b919SPawel Jakub Dawidek
3812dbc9a38SGleb Smirnoff G_ELI_SETWORKER(bp->bio_pflags, 0);
382eaa3b919SPawel Jakub Dawidek
383eaa3b919SPawel Jakub Dawidek cp = LIST_FIRST(&sc->sc_geom->consumer);
384eaa3b919SPawel Jakub Dawidek cbp = bp->bio_driver1;
385eaa3b919SPawel Jakub Dawidek bp->bio_driver1 = NULL;
386eaa3b919SPawel Jakub Dawidek cbp->bio_to = cp->provider;
387eaa3b919SPawel Jakub Dawidek cbp->bio_done = g_eli_read_done;
388eaa3b919SPawel Jakub Dawidek
389eaa3b919SPawel Jakub Dawidek /* Number of sectors from decrypted provider, eg. 1. */
390eaa3b919SPawel Jakub Dawidek nsec = bp->bio_length / bp->bio_to->sectorsize;
391eaa3b919SPawel Jakub Dawidek /* Number of sectors from encrypted provider, eg. 9. */
392eaa3b919SPawel Jakub Dawidek nsec = (nsec * sc->sc_bytes_per_sector) / cp->provider->sectorsize;
393eaa3b919SPawel Jakub Dawidek
394eaa3b919SPawel Jakub Dawidek cbp->bio_length = cp->provider->sectorsize * nsec;
395eaa3b919SPawel Jakub Dawidek size = cbp->bio_length;
396c0341432SJohn Baldwin size += sizeof(int) * nsec;
397eaa3b919SPawel Jakub Dawidek size += G_ELI_AUTH_SECKEYLEN * nsec;
398eaa3b919SPawel Jakub Dawidek cbp->bio_offset = (bp->bio_offset / bp->bio_to->sectorsize) * sc->sc_bytes_per_sector;
3992dbc9a38SGleb Smirnoff if (!g_eli_alloc_data(bp, size)) {
4002dbc9a38SGleb Smirnoff G_ELI_LOGREQ(0, bp, "Crypto auth read request failed (ENOMEM)");
4012dbc9a38SGleb Smirnoff g_destroy_bio(cbp);
4022dbc9a38SGleb Smirnoff bp->bio_error = ENOMEM;
4032dbc9a38SGleb Smirnoff g_io_deliver(bp, bp->bio_error);
4042dbc9a38SGleb Smirnoff atomic_subtract_int(&sc->sc_inflight, 1);
4052dbc9a38SGleb Smirnoff return;
4062dbc9a38SGleb Smirnoff }
407eaa3b919SPawel Jakub Dawidek cbp->bio_data = bp->bio_driver2;
408eaa3b919SPawel Jakub Dawidek
409c0341432SJohn Baldwin /* Clear the error array. */
410c0341432SJohn Baldwin memset((char *)bp->bio_driver2 + cbp->bio_length, 0,
411c0341432SJohn Baldwin sizeof(int) * nsec);
412c0341432SJohn Baldwin
413eaa3b919SPawel Jakub Dawidek /*
414eaa3b919SPawel Jakub Dawidek * We read more than what is requested, so we have to be ready to read
415cd853791SKonstantin Belousov * more than maxphys.
416eaa3b919SPawel Jakub Dawidek */
417eaa3b919SPawel Jakub Dawidek cbp2 = NULL;
418cd853791SKonstantin Belousov if (cbp->bio_length > maxphys) {
419eaa3b919SPawel Jakub Dawidek cbp2 = g_duplicate_bio(bp);
420cd853791SKonstantin Belousov cbp2->bio_length = cbp->bio_length - maxphys;
421cd853791SKonstantin Belousov cbp2->bio_data = cbp->bio_data + maxphys;
422cd853791SKonstantin Belousov cbp2->bio_offset = cbp->bio_offset + maxphys;
423eaa3b919SPawel Jakub Dawidek cbp2->bio_to = cp->provider;
424eaa3b919SPawel Jakub Dawidek cbp2->bio_done = g_eli_read_done;
425cd853791SKonstantin Belousov cbp->bio_length = maxphys;
426eaa3b919SPawel Jakub Dawidek }
427eaa3b919SPawel Jakub Dawidek /*
428eaa3b919SPawel Jakub Dawidek * Read encrypted data from provider.
429eaa3b919SPawel Jakub Dawidek */
430eaa3b919SPawel Jakub Dawidek G_ELI_LOGREQ(2, cbp, "Sending request.");
431eaa3b919SPawel Jakub Dawidek g_io_request(cbp, cp);
432eaa3b919SPawel Jakub Dawidek if (cbp2 != NULL) {
433eaa3b919SPawel Jakub Dawidek G_ELI_LOGREQ(2, cbp2, "Sending request.");
434eaa3b919SPawel Jakub Dawidek g_io_request(cbp2, cp);
435eaa3b919SPawel Jakub Dawidek }
436eaa3b919SPawel Jakub Dawidek }
437eaa3b919SPawel Jakub Dawidek
438eaa3b919SPawel Jakub Dawidek /*
439eaa3b919SPawel Jakub Dawidek * This is the main function responsible for cryptography (ie. communication
440eaa3b919SPawel Jakub Dawidek * with crypto(9) subsystem).
441056638c4SPawel Jakub Dawidek *
442056638c4SPawel Jakub Dawidek * BIO_READ:
443056638c4SPawel Jakub Dawidek * g_eli_start -> g_eli_auth_read -> g_io_request -> g_eli_read_done -> G_ELI_AUTH_RUN -> g_eli_auth_read_done -> g_io_deliver
444056638c4SPawel Jakub Dawidek * BIO_WRITE:
445056638c4SPawel Jakub Dawidek * g_eli_start -> G_ELI_AUTH_RUN -> g_eli_auth_write_done -> g_io_request -> g_eli_write_done -> g_io_deliver
446eaa3b919SPawel Jakub Dawidek */
447eaa3b919SPawel Jakub Dawidek void
g_eli_auth_run(struct g_eli_worker * wr,struct bio * bp)448eaa3b919SPawel Jakub Dawidek g_eli_auth_run(struct g_eli_worker *wr, struct bio *bp)
449eaa3b919SPawel Jakub Dawidek {
450eaa3b919SPawel Jakub Dawidek struct g_eli_softc *sc;
45168f6800cSMark Johnston struct cryptopq crpq;
452eaa3b919SPawel Jakub Dawidek struct cryptop *crp;
453eaa3b919SPawel Jakub Dawidek u_int i, lsec, nsec, data_secsize, decr_secsize, encr_secsize;
454eaa3b919SPawel Jakub Dawidek off_t dstoff;
455c0341432SJohn Baldwin u_char *p, *data, *authkey, *plaindata;
456c9048120SMateusz Guzik int error __diagused;
45768f6800cSMark Johnston bool batch;
458eaa3b919SPawel Jakub Dawidek
459eaa3b919SPawel Jakub Dawidek G_ELI_LOGREQ(3, bp, "%s", __func__);
460eaa3b919SPawel Jakub Dawidek
4612dbc9a38SGleb Smirnoff G_ELI_SETWORKER(bp->bio_pflags, wr->w_number);
462eaa3b919SPawel Jakub Dawidek sc = wr->w_softc;
463eaa3b919SPawel Jakub Dawidek /* Sectorsize of decrypted provider eg. 4096. */
464eaa3b919SPawel Jakub Dawidek decr_secsize = bp->bio_to->sectorsize;
465eaa3b919SPawel Jakub Dawidek /* The real sectorsize of encrypted provider, eg. 512. */
466eaa3b919SPawel Jakub Dawidek encr_secsize = LIST_FIRST(&sc->sc_geom->consumer)->provider->sectorsize;
467eaa3b919SPawel Jakub Dawidek /* Number of data bytes in one encrypted sector, eg. 480. */
468eaa3b919SPawel Jakub Dawidek data_secsize = sc->sc_data_per_sector;
469eaa3b919SPawel Jakub Dawidek /* Number of sectors from decrypted provider, eg. 2. */
470eaa3b919SPawel Jakub Dawidek nsec = bp->bio_length / decr_secsize;
471eaa3b919SPawel Jakub Dawidek /* Number of sectors from encrypted provider, eg. 18. */
472eaa3b919SPawel Jakub Dawidek nsec = (nsec * sc->sc_bytes_per_sector) / encr_secsize;
473eaa3b919SPawel Jakub Dawidek /* Last sector number in every big sector, eg. 9. */
474eaa3b919SPawel Jakub Dawidek lsec = sc->sc_bytes_per_sector / encr_secsize;
475eaa3b919SPawel Jakub Dawidek /* Destination offset, used for IV generation. */
476eaa3b919SPawel Jakub Dawidek dstoff = (bp->bio_offset / bp->bio_to->sectorsize) * sc->sc_bytes_per_sector;
477eaa3b919SPawel Jakub Dawidek
478eaa3b919SPawel Jakub Dawidek plaindata = bp->bio_data;
479eaa3b919SPawel Jakub Dawidek if (bp->bio_cmd == BIO_READ) {
480eaa3b919SPawel Jakub Dawidek data = bp->bio_driver2;
481c0341432SJohn Baldwin p = data + encr_secsize * nsec;
482c0341432SJohn Baldwin p += sizeof(int) * nsec;
483eaa3b919SPawel Jakub Dawidek } else {
484eaa3b919SPawel Jakub Dawidek size_t size;
485eaa3b919SPawel Jakub Dawidek
486eaa3b919SPawel Jakub Dawidek size = encr_secsize * nsec;
487eaa3b919SPawel Jakub Dawidek size += G_ELI_AUTH_SECKEYLEN * nsec;
488ae8b1f90SRuslan Bukin size += sizeof(uintptr_t); /* Space for alignment. */
4892dbc9a38SGleb Smirnoff if (!g_eli_alloc_data(bp, size)) {
4902dbc9a38SGleb Smirnoff G_ELI_LOGREQ(0, bp, "Crypto request failed (ENOMEM)");
4912dbc9a38SGleb Smirnoff if (bp->bio_driver1 != NULL) {
4922dbc9a38SGleb Smirnoff g_destroy_bio(bp->bio_driver1);
4932dbc9a38SGleb Smirnoff bp->bio_driver1 = NULL;
4942dbc9a38SGleb Smirnoff }
4952dbc9a38SGleb Smirnoff bp->bio_error = ENOMEM;
4962dbc9a38SGleb Smirnoff g_io_deliver(bp, bp->bio_error);
4972dbc9a38SGleb Smirnoff if (sc != NULL)
4982dbc9a38SGleb Smirnoff atomic_subtract_int(&sc->sc_inflight, 1);
4992dbc9a38SGleb Smirnoff return;
5002dbc9a38SGleb Smirnoff }
5012dbc9a38SGleb Smirnoff data = bp->bio_driver2;
502eaa3b919SPawel Jakub Dawidek p = data + encr_secsize * nsec;
503eaa3b919SPawel Jakub Dawidek }
504eaa3b919SPawel Jakub Dawidek bp->bio_inbed = 0;
505eaa3b919SPawel Jakub Dawidek bp->bio_children = nsec;
506eaa3b919SPawel Jakub Dawidek
507ae8b1f90SRuslan Bukin #if defined(__mips_n64) || defined(__mips_o64)
508ae8b1f90SRuslan Bukin p = (char *)roundup((uintptr_t)p, sizeof(uintptr_t));
509ae8b1f90SRuslan Bukin #endif
510ae8b1f90SRuslan Bukin
51168f6800cSMark Johnston TAILQ_INIT(&crpq);
51268f6800cSMark Johnston batch = atomic_load_int(&g_eli_batch) != 0;
51368f6800cSMark Johnston
514eaa3b919SPawel Jakub Dawidek for (i = 1; i <= nsec; i++, dstoff += encr_secsize) {
515c0341432SJohn Baldwin crp = crypto_getreq(wr->w_sid, M_WAITOK);
516eaa3b919SPawel Jakub Dawidek authkey = (u_char *)p; p += G_ELI_AUTH_SECKEYLEN;
517eaa3b919SPawel Jakub Dawidek
518eaa3b919SPawel Jakub Dawidek data_secsize = sc->sc_data_per_sector;
519ea5eee64SConrad Meyer if ((i % lsec) == 0) {
520eaa3b919SPawel Jakub Dawidek data_secsize = decr_secsize % data_secsize;
521ea5eee64SConrad Meyer /*
522ea5eee64SConrad Meyer * Last encrypted sector of each decrypted sector is
523ea5eee64SConrad Meyer * only partially filled.
524ea5eee64SConrad Meyer */
525ea5eee64SConrad Meyer if (bp->bio_cmd == BIO_WRITE)
526ea5eee64SConrad Meyer memset(data + sc->sc_alen + data_secsize, 0,
527ea5eee64SConrad Meyer encr_secsize - sc->sc_alen - data_secsize);
5280fcafe85SMark Johnston } else if (data_secsize + sc->sc_alen != encr_secsize) {
5290fcafe85SMark Johnston /*
5300fcafe85SMark Johnston * If the HMAC size is not a multiple of 128 bits, the
5310fcafe85SMark Johnston * per-sector data size is rounded down to ensure that
5320fcafe85SMark Johnston * encryption can be performed without requiring any
5330fcafe85SMark Johnston * padding. In this case, each sector contains unused
5340fcafe85SMark Johnston * bytes.
5350fcafe85SMark Johnston */
5360fcafe85SMark Johnston if (bp->bio_cmd == BIO_WRITE)
5370fcafe85SMark Johnston memset(data + sc->sc_alen + data_secsize, 0,
5380fcafe85SMark Johnston encr_secsize - sc->sc_alen - data_secsize);
539ea5eee64SConrad Meyer }
540eaa3b919SPawel Jakub Dawidek
541c0341432SJohn Baldwin if (bp->bio_cmd == BIO_WRITE) {
542eaa3b919SPawel Jakub Dawidek bcopy(plaindata, data + sc->sc_alen, data_secsize);
543eaa3b919SPawel Jakub Dawidek plaindata += data_secsize;
544eaa3b919SPawel Jakub Dawidek }
545eaa3b919SPawel Jakub Dawidek
5469c0e3d3aSJohn Baldwin crypto_use_buf(crp, data, sc->sc_alen + data_secsize);
547eaa3b919SPawel Jakub Dawidek crp->crp_opaque = (void *)bp;
54889fac384SJohn-Mark Gurney data += encr_secsize;
54908fca7a5SJohn-Mark Gurney crp->crp_flags = CRYPTO_F_CBIFSYNC;
550eaa3b919SPawel Jakub Dawidek if (bp->bio_cmd == BIO_WRITE) {
551eaa3b919SPawel Jakub Dawidek crp->crp_callback = g_eli_auth_write_done;
552c0341432SJohn Baldwin crp->crp_op = CRYPTO_OP_ENCRYPT |
553c0341432SJohn Baldwin CRYPTO_OP_COMPUTE_DIGEST;
554eaa3b919SPawel Jakub Dawidek } else {
555eaa3b919SPawel Jakub Dawidek crp->crp_callback = g_eli_auth_read_done;
556c0341432SJohn Baldwin crp->crp_op = CRYPTO_OP_DECRYPT |
557c0341432SJohn Baldwin CRYPTO_OP_VERIFY_DIGEST;
558eaa3b919SPawel Jakub Dawidek }
559eaa3b919SPawel Jakub Dawidek
560c0341432SJohn Baldwin crp->crp_digest_start = 0;
561c0341432SJohn Baldwin crp->crp_payload_start = sc->sc_alen;
562c0341432SJohn Baldwin crp->crp_payload_length = data_secsize;
563c0341432SJohn Baldwin if ((sc->sc_flags & G_ELI_FLAG_FIRST_KEY) == 0) {
564c0341432SJohn Baldwin crp->crp_cipher_key = g_eli_key_hold(sc, dstoff,
565c0341432SJohn Baldwin encr_secsize);
566c0341432SJohn Baldwin }
567aafaa8b7SAlan Somers if (g_eli_ivlen(sc->sc_ealgo) != 0) {
568aafaa8b7SAlan Somers crp->crp_flags |= CRYPTO_F_IV_SEPARATE;
569c0341432SJohn Baldwin g_eli_crypto_ivgen(sc, dstoff, crp->crp_iv,
570c0341432SJohn Baldwin sizeof(crp->crp_iv));
571aafaa8b7SAlan Somers }
572eaa3b919SPawel Jakub Dawidek
573eaa3b919SPawel Jakub Dawidek g_eli_auth_keygen(sc, dstoff, authkey);
574c0341432SJohn Baldwin crp->crp_auth_key = authkey;
575eaa3b919SPawel Jakub Dawidek
57668f6800cSMark Johnston if (batch) {
57768f6800cSMark Johnston TAILQ_INSERT_TAIL(&crpq, crp, crp_next);
57868f6800cSMark Johnston } else {
5795ee9ea19SPawel Jakub Dawidek error = crypto_dispatch(crp);
58068f6800cSMark Johnston KASSERT(error == 0,
58168f6800cSMark Johnston ("crypto_dispatch() failed (error=%d)", error));
582eaa3b919SPawel Jakub Dawidek }
583eaa3b919SPawel Jakub Dawidek }
58468f6800cSMark Johnston
58568f6800cSMark Johnston if (batch)
58668f6800cSMark Johnston crypto_dispatch_batch(&crpq, 0);
58768f6800cSMark Johnston }
588