1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2005-2011 Pawel Jakub Dawidek <pawel@dawidek.net>
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29 #include <sys/param.h>
30 #include <sys/systm.h>
31 #include <sys/kernel.h>
32 #include <sys/linker.h>
33 #include <sys/module.h>
34 #include <sys/lock.h>
35 #include <sys/mutex.h>
36 #include <sys/bio.h>
37 #include <sys/sysctl.h>
38 #include <sys/kthread.h>
39 #include <sys/proc.h>
40 #include <sys/sched.h>
41 #include <sys/smp.h>
42 #include <sys/vnode.h>
43
44 #include <vm/uma.h>
45
46 #include <geom/geom.h>
47 #include <geom/geom_dbg.h>
48 #include <geom/eli/g_eli.h>
49 #include <geom/eli/pkcs5v2.h>
50
51 /*
52 * Code paths:
53 * BIO_READ:
54 * g_eli_start -> g_eli_crypto_read -> g_io_request -> g_eli_read_done -> g_eli_crypto_run -> g_eli_crypto_read_done -> g_io_deliver
55 * BIO_WRITE:
56 * g_eli_start -> g_eli_crypto_run -> g_eli_crypto_write_done -> g_io_request -> g_eli_write_done -> g_io_deliver
57 */
58
59 /*
60 * Copy data from a (potentially unmapped) bio to a kernelspace buffer.
61 *
62 * The buffer must have at least as much room as bp->bio_length.
63 */
64 static void
g_eli_bio_copyin(struct bio * bp,void * kaddr)65 g_eli_bio_copyin(struct bio *bp, void *kaddr)
66 {
67 struct uio uio;
68 struct iovec iov[1];
69
70 iov[0].iov_base = kaddr;
71 iov[0].iov_len = bp->bio_length;
72 uio.uio_iov = iov;
73 uio.uio_iovcnt = 1;
74 uio.uio_offset = 0;
75 uio.uio_resid = bp->bio_length;
76 uio.uio_segflg = UIO_SYSSPACE;
77 uio.uio_rw = UIO_READ;
78 uiomove_fromphys(bp->bio_ma, bp->bio_ma_offset, bp->bio_length, &uio);
79 }
80
81 /*
82 * The function is called after we read and decrypt data.
83 *
84 * g_eli_start -> g_eli_crypto_read -> g_io_request -> g_eli_read_done -> g_eli_crypto_run -> G_ELI_CRYPTO_READ_DONE -> g_io_deliver
85 */
86 static int
g_eli_crypto_read_done(struct cryptop * crp)87 g_eli_crypto_read_done(struct cryptop *crp)
88 {
89 struct g_eli_softc *sc;
90 struct bio *bp;
91
92 if (crp->crp_etype == EAGAIN) {
93 if (g_eli_crypto_rerun(crp) == 0)
94 return (0);
95 }
96 bp = (struct bio *)crp->crp_opaque;
97 bp->bio_inbed++;
98 if (crp->crp_etype == 0) {
99 G_ELI_DEBUG(3, "Crypto READ request done (%d/%d).",
100 bp->bio_inbed, bp->bio_children);
101 bp->bio_completed += crp->crp_payload_length;
102 } else {
103 G_ELI_DEBUG(1, "Crypto READ request failed (%d/%d) error=%d.",
104 bp->bio_inbed, bp->bio_children, crp->crp_etype);
105 if (bp->bio_error == 0)
106 bp->bio_error = crp->crp_etype;
107 }
108 sc = bp->bio_to->geom->softc;
109 if (sc != NULL && crp->crp_cipher_key != NULL)
110 g_eli_key_drop(sc, __DECONST(void *, crp->crp_cipher_key));
111 crypto_freereq(crp);
112 /*
113 * Do we have all sectors already?
114 */
115 if (bp->bio_inbed < bp->bio_children)
116 return (0);
117
118 if (bp->bio_error != 0) {
119 G_ELI_LOGREQ(0, bp, "Crypto READ request failed (error=%d).",
120 bp->bio_error);
121 bp->bio_completed = 0;
122 }
123 /*
124 * Read is finished, send it up.
125 */
126 g_io_deliver(bp, bp->bio_error);
127 if (sc != NULL)
128 atomic_subtract_int(&sc->sc_inflight, 1);
129 return (0);
130 }
131
132 /*
133 * The function is called after data encryption.
134 *
135 * g_eli_start -> g_eli_crypto_run -> G_ELI_CRYPTO_WRITE_DONE -> g_io_request -> g_eli_write_done -> g_io_deliver
136 */
137 static int
g_eli_crypto_write_done(struct cryptop * crp)138 g_eli_crypto_write_done(struct cryptop *crp)
139 {
140 struct g_eli_softc *sc;
141 struct g_geom *gp;
142 struct g_consumer *cp;
143 struct bio *bp, *cbp;
144
145 if (crp->crp_etype == EAGAIN) {
146 if (g_eli_crypto_rerun(crp) == 0)
147 return (0);
148 }
149 bp = (struct bio *)crp->crp_opaque;
150 bp->bio_inbed++;
151 if (crp->crp_etype == 0) {
152 G_ELI_DEBUG(3, "Crypto WRITE request done (%d/%d).",
153 bp->bio_inbed, bp->bio_children);
154 } else {
155 G_ELI_DEBUG(1, "Crypto WRITE request failed (%d/%d) error=%d.",
156 bp->bio_inbed, bp->bio_children, crp->crp_etype);
157 if (bp->bio_error == 0)
158 bp->bio_error = crp->crp_etype;
159 }
160 gp = bp->bio_to->geom;
161 sc = gp->softc;
162 if (crp->crp_cipher_key != NULL)
163 g_eli_key_drop(sc, __DECONST(void *, crp->crp_cipher_key));
164 crypto_freereq(crp);
165 /*
166 * All sectors are already encrypted?
167 */
168 if (bp->bio_inbed < bp->bio_children)
169 return (0);
170 bp->bio_inbed = 0;
171 bp->bio_children = 1;
172 cbp = bp->bio_driver1;
173 bp->bio_driver1 = NULL;
174 if (bp->bio_error != 0) {
175 G_ELI_LOGREQ(0, bp, "Crypto WRITE request failed (error=%d).",
176 bp->bio_error);
177 g_eli_free_data(bp);
178 g_destroy_bio(cbp);
179 g_io_deliver(bp, bp->bio_error);
180 atomic_subtract_int(&sc->sc_inflight, 1);
181 return (0);
182 }
183 cbp->bio_data = bp->bio_driver2;
184 /*
185 * Clear BIO_UNMAPPED, which was inherited from where we cloned the bio
186 * in g_eli_start, because we manually set bio_data
187 */
188 cbp->bio_flags &= ~BIO_UNMAPPED;
189 cbp->bio_done = g_eli_write_done;
190 cp = LIST_FIRST(&gp->consumer);
191 cbp->bio_to = cp->provider;
192 G_ELI_LOGREQ(2, cbp, "Sending request.");
193 /*
194 * Send encrypted data to the provider.
195 */
196 g_io_request(cbp, cp);
197 return (0);
198 }
199
200 /*
201 * The function is called to read encrypted data.
202 *
203 * g_eli_start -> G_ELI_CRYPTO_READ -> g_io_request -> g_eli_read_done -> g_eli_crypto_run -> g_eli_crypto_read_done -> g_io_deliver
204 */
205 void
g_eli_crypto_read(struct g_eli_softc * sc,struct bio * bp,boolean_t fromworker)206 g_eli_crypto_read(struct g_eli_softc *sc, struct bio *bp, boolean_t fromworker)
207 {
208 struct g_consumer *cp;
209 struct bio *cbp;
210
211 if (!fromworker) {
212 /*
213 * We are not called from the worker thread, so check if
214 * device is suspended.
215 */
216 mtx_lock(&sc->sc_queue_mtx);
217 if (sc->sc_flags & G_ELI_FLAG_SUSPEND) {
218 /*
219 * If device is suspended, we place the request onto
220 * the queue, so it can be handled after resume.
221 */
222 G_ELI_DEBUG(0, "device suspended, move onto queue");
223 bioq_insert_tail(&sc->sc_queue, bp);
224 mtx_unlock(&sc->sc_queue_mtx);
225 wakeup(sc);
226 return;
227 }
228 atomic_add_int(&sc->sc_inflight, 1);
229 mtx_unlock(&sc->sc_queue_mtx);
230 }
231 G_ELI_SETWORKER(bp->bio_pflags, 0);
232 bp->bio_driver2 = NULL;
233 cbp = bp->bio_driver1;
234 cbp->bio_done = g_eli_read_done;
235 cp = LIST_FIRST(&sc->sc_geom->consumer);
236 cbp->bio_to = cp->provider;
237 G_ELI_LOGREQ(2, cbp, "Sending request.");
238 /*
239 * Read encrypted data from provider.
240 */
241 g_io_request(cbp, cp);
242 }
243
244 /*
245 * This is the main function responsible for cryptography (ie. communication
246 * with crypto(9) subsystem).
247 *
248 * BIO_READ:
249 * g_eli_start -> g_eli_crypto_read -> g_io_request -> g_eli_read_done -> G_ELI_CRYPTO_RUN -> g_eli_crypto_read_done -> g_io_deliver
250 * BIO_WRITE:
251 * g_eli_start -> G_ELI_CRYPTO_RUN -> g_eli_crypto_write_done -> g_io_request -> g_eli_write_done -> g_io_deliver
252 */
253 void
g_eli_crypto_run(struct g_eli_worker * wr,struct bio * bp)254 g_eli_crypto_run(struct g_eli_worker *wr, struct bio *bp)
255 {
256 struct g_eli_softc *sc;
257 struct cryptopq crpq;
258 struct cryptop *crp;
259 vm_page_t *pages;
260 u_int i, nsec, secsize;
261 off_t dstoff;
262 u_char *data = NULL;
263 int error __diagused, pages_offset;
264 bool batch;
265
266 G_ELI_LOGREQ(3, bp, "%s", __func__);
267
268 G_ELI_SETWORKER(bp->bio_pflags, wr->w_number);
269 sc = wr->w_softc;
270 secsize = LIST_FIRST(&sc->sc_geom->provider)->sectorsize;
271 nsec = bp->bio_length / secsize;
272
273 bp->bio_inbed = 0;
274 bp->bio_children = nsec;
275
276 /*
277 * If we write the data we cannot destroy current bio_data content,
278 * so we need to allocate more memory for encrypted data.
279 */
280 if (bp->bio_cmd == BIO_WRITE) {
281 if (!g_eli_alloc_data(bp, bp->bio_length)) {
282 G_ELI_LOGREQ(0, bp, "Crypto request failed (ENOMEM).");
283 if (bp->bio_driver1 != NULL) {
284 g_destroy_bio(bp->bio_driver1);
285 bp->bio_driver1 = NULL;
286 }
287 bp->bio_error = ENOMEM;
288 g_io_deliver(bp, bp->bio_error);
289 if (sc != NULL)
290 atomic_subtract_int(&sc->sc_inflight, 1);
291 return;
292 }
293 data = bp->bio_driver2;
294 /*
295 * This copy could be eliminated by using crypto's output
296 * buffer, instead of using a single overwriting buffer.
297 */
298 if ((bp->bio_flags & BIO_UNMAPPED) != 0)
299 g_eli_bio_copyin(bp, data);
300 else
301 bcopy(bp->bio_data, data, bp->bio_length);
302 } else {
303 if ((bp->bio_flags & BIO_UNMAPPED) != 0) {
304 pages = bp->bio_ma;
305 pages_offset = bp->bio_ma_offset;
306 } else {
307 data = bp->bio_data;
308 }
309 }
310
311 TAILQ_INIT(&crpq);
312 batch = atomic_load_int(&g_eli_batch) != 0;
313
314 for (i = 0, dstoff = bp->bio_offset; i < nsec; i++, dstoff += secsize) {
315 crp = crypto_getreq(wr->w_sid, M_WAITOK);
316
317 if (data) {
318 crypto_use_buf(crp, data, secsize);
319 data += secsize;
320 } else {
321 MPASS(pages != NULL);
322 crypto_use_vmpage(crp, pages, secsize, pages_offset);
323 pages_offset += secsize;
324 pages += pages_offset >> PAGE_SHIFT;
325 pages_offset &= PAGE_MASK;
326 }
327 crp->crp_opaque = (void *)bp;
328 if (bp->bio_cmd == BIO_WRITE) {
329 crp->crp_op = CRYPTO_OP_ENCRYPT;
330 crp->crp_callback = g_eli_crypto_write_done;
331 } else /* if (bp->bio_cmd == BIO_READ) */ {
332 crp->crp_op = CRYPTO_OP_DECRYPT;
333 crp->crp_callback = g_eli_crypto_read_done;
334 }
335 crp->crp_flags = CRYPTO_F_CBIFSYNC;
336 crp->crp_payload_start = 0;
337 crp->crp_payload_length = secsize;
338 if ((sc->sc_flags & G_ELI_FLAG_SINGLE_KEY) == 0) {
339 crp->crp_cipher_key = g_eli_key_hold(sc, dstoff,
340 secsize);
341 }
342 if (g_eli_ivlen(sc->sc_ealgo) != 0) {
343 crp->crp_flags |= CRYPTO_F_IV_SEPARATE;
344 g_eli_crypto_ivgen(sc, dstoff, crp->crp_iv,
345 sizeof(crp->crp_iv));
346 }
347
348 if (batch) {
349 TAILQ_INSERT_TAIL(&crpq, crp, crp_next);
350 } else {
351 error = crypto_dispatch(crp);
352 KASSERT(error == 0,
353 ("crypto_dispatch() failed (error=%d)", error));
354 }
355 }
356
357 if (batch)
358 crypto_dispatch_batch(&crpq, 0);
359 }
360