xref: /freebsd/sys/geom/eli/g_eli_privacy.c (revision 22cf89c938886d14f5796fc49f9f020c23ea8eaf)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2005-2011 Pawel Jakub Dawidek <pawel@dawidek.net>
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #include <sys/cdefs.h>
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/kernel.h>
33 #include <sys/linker.h>
34 #include <sys/module.h>
35 #include <sys/lock.h>
36 #include <sys/mutex.h>
37 #include <sys/bio.h>
38 #include <sys/sysctl.h>
39 #include <sys/kthread.h>
40 #include <sys/proc.h>
41 #include <sys/sched.h>
42 #include <sys/smp.h>
43 #include <sys/vnode.h>
44 
45 #include <vm/uma.h>
46 
47 #include <geom/geom.h>
48 #include <geom/geom_dbg.h>
49 #include <geom/eli/g_eli.h>
50 #include <geom/eli/pkcs5v2.h>
51 
52 /*
53  * Code paths:
54  * BIO_READ:
55  *	g_eli_start -> g_eli_crypto_read -> g_io_request -> g_eli_read_done -> g_eli_crypto_run -> g_eli_crypto_read_done -> g_io_deliver
56  * BIO_WRITE:
57  *	g_eli_start -> g_eli_crypto_run -> g_eli_crypto_write_done -> g_io_request -> g_eli_write_done -> g_io_deliver
58  */
59 
60 /*
61  * Copy data from a (potentially unmapped) bio to a kernelspace buffer.
62  *
63  * The buffer must have at least as much room as bp->bio_length.
64  */
65 static void
66 g_eli_bio_copyin(struct bio *bp, void *kaddr)
67 {
68 	struct uio uio;
69 	struct iovec iov[1];
70 
71 	iov[0].iov_base = kaddr;
72 	iov[0].iov_len = bp->bio_length;
73 	uio.uio_iov = iov;
74 	uio.uio_iovcnt = 1;
75 	uio.uio_offset = 0;
76 	uio.uio_resid = bp->bio_length;
77 	uio.uio_segflg = UIO_SYSSPACE;
78 	uio.uio_rw = UIO_READ;
79 	uiomove_fromphys(bp->bio_ma, bp->bio_ma_offset, bp->bio_length, &uio);
80 }
81 
82 /*
83  * The function is called after we read and decrypt data.
84  *
85  * g_eli_start -> g_eli_crypto_read -> g_io_request -> g_eli_read_done -> g_eli_crypto_run -> G_ELI_CRYPTO_READ_DONE -> g_io_deliver
86  */
87 static int
88 g_eli_crypto_read_done(struct cryptop *crp)
89 {
90 	struct g_eli_softc *sc;
91 	struct bio *bp;
92 
93 	if (crp->crp_etype == EAGAIN) {
94 		if (g_eli_crypto_rerun(crp) == 0)
95 			return (0);
96 	}
97 	bp = (struct bio *)crp->crp_opaque;
98 	bp->bio_inbed++;
99 	if (crp->crp_etype == 0) {
100 		G_ELI_DEBUG(3, "Crypto READ request done (%d/%d).",
101 		    bp->bio_inbed, bp->bio_children);
102 		bp->bio_completed += crp->crp_payload_length;
103 	} else {
104 		G_ELI_DEBUG(1, "Crypto READ request failed (%d/%d) error=%d.",
105 		    bp->bio_inbed, bp->bio_children, crp->crp_etype);
106 		if (bp->bio_error == 0)
107 			bp->bio_error = crp->crp_etype;
108 	}
109 	sc = bp->bio_to->geom->softc;
110 	if (sc != NULL && crp->crp_cipher_key != NULL)
111 		g_eli_key_drop(sc, __DECONST(void *, crp->crp_cipher_key));
112 	crypto_freereq(crp);
113 	/*
114 	 * Do we have all sectors already?
115 	 */
116 	if (bp->bio_inbed < bp->bio_children)
117 		return (0);
118 
119 	if (bp->bio_error != 0) {
120 		G_ELI_LOGREQ(0, bp, "Crypto READ request failed (error=%d).",
121 		    bp->bio_error);
122 		bp->bio_completed = 0;
123 	}
124 	/*
125 	 * Read is finished, send it up.
126 	 */
127 	g_io_deliver(bp, bp->bio_error);
128 	if (sc != NULL)
129 		atomic_subtract_int(&sc->sc_inflight, 1);
130 	return (0);
131 }
132 
133 /*
134  * The function is called after data encryption.
135  *
136  * g_eli_start -> g_eli_crypto_run -> G_ELI_CRYPTO_WRITE_DONE -> g_io_request -> g_eli_write_done -> g_io_deliver
137  */
138 static int
139 g_eli_crypto_write_done(struct cryptop *crp)
140 {
141 	struct g_eli_softc *sc;
142 	struct g_geom *gp;
143 	struct g_consumer *cp;
144 	struct bio *bp, *cbp;
145 
146 	if (crp->crp_etype == EAGAIN) {
147 		if (g_eli_crypto_rerun(crp) == 0)
148 			return (0);
149 	}
150 	bp = (struct bio *)crp->crp_opaque;
151 	bp->bio_inbed++;
152 	if (crp->crp_etype == 0) {
153 		G_ELI_DEBUG(3, "Crypto WRITE request done (%d/%d).",
154 		    bp->bio_inbed, bp->bio_children);
155 	} else {
156 		G_ELI_DEBUG(1, "Crypto WRITE request failed (%d/%d) error=%d.",
157 		    bp->bio_inbed, bp->bio_children, crp->crp_etype);
158 		if (bp->bio_error == 0)
159 			bp->bio_error = crp->crp_etype;
160 	}
161 	gp = bp->bio_to->geom;
162 	sc = gp->softc;
163 	if (crp->crp_cipher_key != NULL)
164 		g_eli_key_drop(sc, __DECONST(void *, crp->crp_cipher_key));
165 	crypto_freereq(crp);
166 	/*
167 	 * All sectors are already encrypted?
168 	 */
169 	if (bp->bio_inbed < bp->bio_children)
170 		return (0);
171 	bp->bio_inbed = 0;
172 	bp->bio_children = 1;
173 	cbp = bp->bio_driver1;
174 	bp->bio_driver1 = NULL;
175 	if (bp->bio_error != 0) {
176 		G_ELI_LOGREQ(0, bp, "Crypto WRITE request failed (error=%d).",
177 		    bp->bio_error);
178 		g_eli_free_data(bp);
179 		g_destroy_bio(cbp);
180 		g_io_deliver(bp, bp->bio_error);
181 		atomic_subtract_int(&sc->sc_inflight, 1);
182 		return (0);
183 	}
184 	cbp->bio_data = bp->bio_driver2;
185 	/*
186 	 * Clear BIO_UNMAPPED, which was inherited from where we cloned the bio
187 	 * in g_eli_start, because we manually set bio_data
188 	 */
189 	cbp->bio_flags &= ~BIO_UNMAPPED;
190 	cbp->bio_done = g_eli_write_done;
191 	cp = LIST_FIRST(&gp->consumer);
192 	cbp->bio_to = cp->provider;
193 	G_ELI_LOGREQ(2, cbp, "Sending request.");
194 	/*
195 	 * Send encrypted data to the provider.
196 	 */
197 	g_io_request(cbp, cp);
198 	return (0);
199 }
200 
201 /*
202  * The function is called to read encrypted data.
203  *
204  * g_eli_start -> G_ELI_CRYPTO_READ -> g_io_request -> g_eli_read_done -> g_eli_crypto_run -> g_eli_crypto_read_done -> g_io_deliver
205  */
206 void
207 g_eli_crypto_read(struct g_eli_softc *sc, struct bio *bp, boolean_t fromworker)
208 {
209 	struct g_consumer *cp;
210 	struct bio *cbp;
211 
212 	if (!fromworker) {
213 		/*
214 		 * We are not called from the worker thread, so check if
215 		 * device is suspended.
216 		 */
217 		mtx_lock(&sc->sc_queue_mtx);
218 		if (sc->sc_flags & G_ELI_FLAG_SUSPEND) {
219 			/*
220 			 * If device is suspended, we place the request onto
221 			 * the queue, so it can be handled after resume.
222 			 */
223 			G_ELI_DEBUG(0, "device suspended, move onto queue");
224 			bioq_insert_tail(&sc->sc_queue, bp);
225 			mtx_unlock(&sc->sc_queue_mtx);
226 			wakeup(sc);
227 			return;
228 		}
229 		atomic_add_int(&sc->sc_inflight, 1);
230 		mtx_unlock(&sc->sc_queue_mtx);
231 	}
232 	G_ELI_SETWORKER(bp->bio_pflags, 0);
233 	bp->bio_driver2 = NULL;
234 	cbp = bp->bio_driver1;
235 	cbp->bio_done = g_eli_read_done;
236 	cp = LIST_FIRST(&sc->sc_geom->consumer);
237 	cbp->bio_to = cp->provider;
238 	G_ELI_LOGREQ(2, cbp, "Sending request.");
239 	/*
240 	 * Read encrypted data from provider.
241 	 */
242 	g_io_request(cbp, cp);
243 }
244 
245 /*
246  * This is the main function responsible for cryptography (ie. communication
247  * with crypto(9) subsystem).
248  *
249  * BIO_READ:
250  *	g_eli_start -> g_eli_crypto_read -> g_io_request -> g_eli_read_done -> G_ELI_CRYPTO_RUN -> g_eli_crypto_read_done -> g_io_deliver
251  * BIO_WRITE:
252  *	g_eli_start -> G_ELI_CRYPTO_RUN -> g_eli_crypto_write_done -> g_io_request -> g_eli_write_done -> g_io_deliver
253  */
254 void
255 g_eli_crypto_run(struct g_eli_worker *wr, struct bio *bp)
256 {
257 	struct g_eli_softc *sc;
258 	struct cryptopq crpq;
259 	struct cryptop *crp;
260 	vm_page_t *pages;
261 	u_int i, nsec, secsize;
262 	off_t dstoff;
263 	u_char *data = NULL;
264 	int error __diagused, pages_offset;
265 	bool batch;
266 
267 	G_ELI_LOGREQ(3, bp, "%s", __func__);
268 
269 	G_ELI_SETWORKER(bp->bio_pflags, wr->w_number);
270 	sc = wr->w_softc;
271 	secsize = LIST_FIRST(&sc->sc_geom->provider)->sectorsize;
272 	nsec = bp->bio_length / secsize;
273 
274 	bp->bio_inbed = 0;
275 	bp->bio_children = nsec;
276 
277 	/*
278 	 * If we write the data we cannot destroy current bio_data content,
279 	 * so we need to allocate more memory for encrypted data.
280 	 */
281 	if (bp->bio_cmd == BIO_WRITE) {
282 		if (!g_eli_alloc_data(bp, bp->bio_length)) {
283 			G_ELI_LOGREQ(0, bp, "Crypto request failed (ENOMEM).");
284 			if (bp->bio_driver1 != NULL) {
285 				g_destroy_bio(bp->bio_driver1);
286 				bp->bio_driver1 = NULL;
287 			}
288 			bp->bio_error = ENOMEM;
289 			g_io_deliver(bp, bp->bio_error);
290 			if (sc != NULL)
291 				atomic_subtract_int(&sc->sc_inflight, 1);
292 			return;
293 		}
294 		data = bp->bio_driver2;
295 		/*
296 		 * This copy could be eliminated by using crypto's output
297 		 * buffer, instead of using a single overwriting buffer.
298 		 */
299 		if ((bp->bio_flags & BIO_UNMAPPED) != 0)
300 			g_eli_bio_copyin(bp, data);
301 		else
302 			bcopy(bp->bio_data, data, bp->bio_length);
303 	} else {
304 		if ((bp->bio_flags & BIO_UNMAPPED) != 0) {
305 			pages = bp->bio_ma;
306 			pages_offset = bp->bio_ma_offset;
307 		} else {
308 			data = bp->bio_data;
309 		}
310 	}
311 
312 	TAILQ_INIT(&crpq);
313 	batch = atomic_load_int(&g_eli_batch) != 0;
314 
315 	for (i = 0, dstoff = bp->bio_offset; i < nsec; i++, dstoff += secsize) {
316 		crp = crypto_getreq(wr->w_sid, M_WAITOK);
317 
318 		if (data) {
319 			crypto_use_buf(crp, data, secsize);
320 			data += secsize;
321 		} else {
322 			MPASS(pages != NULL);
323 			crypto_use_vmpage(crp, pages, secsize, pages_offset);
324 			pages_offset += secsize;
325 			pages += pages_offset >> PAGE_SHIFT;
326 			pages_offset &= PAGE_MASK;
327 		}
328 		crp->crp_opaque = (void *)bp;
329 		if (bp->bio_cmd == BIO_WRITE) {
330 			crp->crp_op = CRYPTO_OP_ENCRYPT;
331 			crp->crp_callback = g_eli_crypto_write_done;
332 		} else /* if (bp->bio_cmd == BIO_READ) */ {
333 			crp->crp_op = CRYPTO_OP_DECRYPT;
334 			crp->crp_callback = g_eli_crypto_read_done;
335 		}
336 		crp->crp_flags = CRYPTO_F_CBIFSYNC;
337 		crp->crp_payload_start = 0;
338 		crp->crp_payload_length = secsize;
339 		if ((sc->sc_flags & G_ELI_FLAG_SINGLE_KEY) == 0) {
340 			crp->crp_cipher_key = g_eli_key_hold(sc, dstoff,
341 			    secsize);
342 		}
343 		if (g_eli_ivlen(sc->sc_ealgo) != 0) {
344 			crp->crp_flags |= CRYPTO_F_IV_SEPARATE;
345 			g_eli_crypto_ivgen(sc, dstoff, crp->crp_iv,
346 			    sizeof(crp->crp_iv));
347 		}
348 
349 		if (batch) {
350 			TAILQ_INSERT_TAIL(&crpq, crp, crp_next);
351 		} else {
352 			error = crypto_dispatch(crp);
353 			KASSERT(error == 0,
354 			    ("crypto_dispatch() failed (error=%d)", error));
355 		}
356 	}
357 
358 	if (batch)
359 		crypto_dispatch_batch(&crpq, 0);
360 }
361