xref: /freebsd/contrib/libdiff/test/test122.right-P.txt (revision 5ca8e32633c4ffbbcd6762e5888b6a4ba0708c6c)
1/* $OpenBSD: softraid_crypto.c,v 1.139 2020/07/13 00:06:22 kn Exp $ */
2/*
3 * Copyright (c) 2007 Marco Peereboom <marco@peereboom.us>
4 * Copyright (c) 2008 Hans-Joerg Hoexer <hshoexer@openbsd.org>
5 * Copyright (c) 2008 Damien Miller <djm@mindrot.org>
6 * Copyright (c) 2009 Joel Sing <jsing@openbsd.org>
7 *
8 * Permission to use, copy, modify, and distribute this software for any
9 * purpose with or without fee is hereby granted, provided that the above
10 * copyright notice and this permission notice appear in all copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 */
20
21#include "bio.h"
22
23#include <sys/param.h>
24#include <sys/systm.h>
25#include <sys/buf.h>
26#include <sys/device.h>
27#include <sys/ioctl.h>
28#include <sys/malloc.h>
29#include <sys/pool.h>
30#include <sys/kernel.h>
31#include <sys/disk.h>
32#include <sys/rwlock.h>
33#include <sys/queue.h>
34#include <sys/fcntl.h>
35#include <sys/disklabel.h>
36#include <sys/vnode.h>
37#include <sys/mount.h>
38#include <sys/sensors.h>
39#include <sys/stat.h>
40#include <sys/conf.h>
41#include <sys/uio.h>
42#include <sys/dkio.h>
43
44#include <crypto/cryptodev.h>
45#include <crypto/rijndael.h>
46#include <crypto/md5.h>
47#include <crypto/sha1.h>
48#include <crypto/sha2.h>
49#include <crypto/hmac.h>
50
51#include <scsi/scsi_all.h>
52#include <scsi/scsiconf.h>
53#include <scsi/scsi_disk.h>
54
55#include <dev/softraidvar.h>
56
57/*
58 * The per-I/O data that we need to preallocate. We cannot afford to allow I/O
59 * to start failing when memory pressure kicks in. We can store this in the WU
60 * because we assert that only one ccb per WU will ever be active.
61 */
62struct sr_crypto_wu {
63	struct sr_workunit		 cr_wu;		/* Must be first. */
64	struct uio			 cr_uio;
65	struct iovec			 cr_iov;
66	struct cryptop	 		*cr_crp;
67	void				*cr_dmabuf;
68};
69
70
71struct sr_crypto_wu *sr_crypto_prepare(struct sr_workunit *, int);
72int		sr_crypto_create_keys(struct sr_discipline *);
73int		sr_crypto_get_kdf(struct bioc_createraid *,
74		    struct sr_discipline *);
75int		sr_crypto_decrypt(u_char *, u_char *, u_char *, size_t, int);
76int		sr_crypto_encrypt(u_char *, u_char *, u_char *, size_t, int);
77int		sr_crypto_decrypt_key(struct sr_discipline *);
78int		sr_crypto_change_maskkey(struct sr_discipline *,
79		    struct sr_crypto_kdfinfo *, struct sr_crypto_kdfinfo *);
80int		sr_crypto_create(struct sr_discipline *,
81		    struct bioc_createraid *, int, int64_t);
82int		sr_crypto_assemble(struct sr_discipline *,
83		    struct bioc_createraid *, int, void *);
84int		sr_crypto_alloc_resources(struct sr_discipline *);
85void		sr_crypto_free_resources(struct sr_discipline *);
86int		sr_crypto_ioctl(struct sr_discipline *,
87		    struct bioc_discipline *);
88int		sr_crypto_meta_opt_handler(struct sr_discipline *,
89		    struct sr_meta_opt_hdr *);
90void		sr_crypto_write(struct cryptop *);
91int		sr_crypto_rw(struct sr_workunit *);
92int		sr_crypto_dev_rw(struct sr_workunit *, struct sr_crypto_wu *);
93void		sr_crypto_done(struct sr_workunit *);
94void		sr_crypto_read(struct cryptop *);
95void		sr_crypto_calculate_check_hmac_sha1(u_int8_t *, int,
96		   u_int8_t *, int, u_char *);
97void		sr_crypto_hotplug(struct sr_discipline *, struct disk *, int);
98
99#ifdef SR_DEBUG0
100void		 sr_crypto_dumpkeys(struct sr_discipline *);
101#endif
102
103/* Discipline initialisation. */
104void
105sr_crypto_discipline_init(struct sr_discipline *sd)
106{
107	int i;
108
109	/* Fill out discipline members. */
110	sd->sd_wu_size = sizeof(struct sr_crypto_wu);
111	sd->sd_type = SR_MD_CRYPTO;
112	strlcpy(sd->sd_name, "CRYPTO", sizeof(sd->sd_name));
113	sd->sd_capabilities = SR_CAP_SYSTEM_DISK | SR_CAP_AUTO_ASSEMBLE;
114	sd->sd_max_wu = SR_CRYPTO_NOWU;
115
116	for (i = 0; i < SR_CRYPTO_MAXKEYS; i++)
117		sd->mds.mdd_crypto.scr_sid[i] = (u_int64_t)-1;
118
119	/* Setup discipline specific function pointers. */
120	sd->sd_alloc_resources = sr_crypto_alloc_resources;
121	sd->sd_assemble = sr_crypto_assemble;
122	sd->sd_create = sr_crypto_create;
123	sd->sd_free_resources = sr_crypto_free_resources;
124	sd->sd_ioctl_handler = sr_crypto_ioctl;
125	sd->sd_meta_opt_handler = sr_crypto_meta_opt_handler;
126	sd->sd_scsi_rw = sr_crypto_rw;
127	sd->sd_scsi_done = sr_crypto_done;
128}
129
130int
131sr_crypto_create(struct sr_discipline *sd, struct bioc_createraid *bc,
132    int no_chunk, int64_t coerced_size)
133{
134	struct sr_meta_opt_item	*omi;
135	int			rv = EINVAL;
136
137	if (no_chunk != 1) {
138		sr_error(sd->sd_sc, "%s requires exactly one chunk",
139		    sd->sd_name);
140		goto done;
141	}
142
143	if (coerced_size > SR_CRYPTO_MAXSIZE) {
144		sr_error(sd->sd_sc, "%s exceeds maximum size (%lli > %llu)",
145		    sd->sd_name, coerced_size, SR_CRYPTO_MAXSIZE);
146		goto done;
147	}
148
149	/* Create crypto optional metadata. */
150	omi = malloc(sizeof(struct sr_meta_opt_item), M_DEVBUF,
151	    M_WAITOK | M_ZERO);
152	omi->omi_som = malloc(sizeof(struct sr_meta_crypto), M_DEVBUF,
153	    M_WAITOK | M_ZERO);
154	omi->omi_som->som_type = SR_OPT_CRYPTO;
155	omi->omi_som->som_length = sizeof(struct sr_meta_crypto);
156	SLIST_INSERT_HEAD(&sd->sd_meta_opt, omi, omi_link);
157	sd->mds.mdd_crypto.scr_meta = (struct sr_meta_crypto *)omi->omi_som;
158	sd->sd_meta->ssdi.ssd_opt_no++;
159
160	sd->mds.mdd_crypto.key_disk = NULL;
161
162	if (bc->bc_key_disk != NODEV) {
163
164		/* Create a key disk. */
165		if (sr_crypto_get_kdf(bc, sd))
166			goto done;
167		sd->mds.mdd_crypto.key_disk =
168		    sr_crypto_create_key_disk(sd, bc->bc_key_disk);
169		if (sd->mds.mdd_crypto.key_disk == NULL)
170			goto done;
171		sd->sd_capabilities |= SR_CAP_AUTO_ASSEMBLE;
172
173	} else if (bc->bc_opaque_flags & BIOC_SOOUT) {
174
175		/* No hint available yet. */
176		bc->bc_opaque_status = BIOC_SOINOUT_FAILED;
177		rv = EAGAIN;
178		goto done;
179
180	} else if (sr_crypto_get_kdf(bc, sd))
181		goto done;
182
183	/* Passphrase volumes cannot be automatically assembled. */
184	if (!(bc->bc_flags & BIOC_SCNOAUTOASSEMBLE) && bc->bc_key_disk == NODEV)
185		goto done;
186
187	sd->sd_meta->ssdi.ssd_size = coerced_size;
188
189	sr_crypto_create_keys(sd);
190
191	sd->sd_max_ccb_per_wu = no_chunk;
192
193	rv = 0;
194done:
195	return (rv);
196}
197
198int
199sr_crypto_assemble(struct sr_discipline *sd, struct bioc_createraid *bc,
200    int no_chunk, void *data)
201{
202	int	rv = EINVAL;
203
204	sd->mds.mdd_crypto.key_disk = NULL;
205
206	/* Crypto optional metadata must already exist... */
207	if (sd->mds.mdd_crypto.scr_meta == NULL)
208		goto done;
209
210	if (data != NULL) {
211		/* Kernel already has mask key. */
212		memcpy(sd->mds.mdd_crypto.scr_maskkey, data,
213		    sizeof(sd->mds.mdd_crypto.scr_maskkey));
214	} else if (bc->bc_key_disk != NODEV) {
215		/* Read the mask key from the key disk. */
216		sd->mds.mdd_crypto.key_disk =
217		    sr_crypto_read_key_disk(sd, bc->bc_key_disk);
218		if (sd->mds.mdd_crypto.key_disk == NULL)
219			goto done;
220	} else if (bc->bc_opaque_flags & BIOC_SOOUT) {
221		/* provide userland with kdf hint */
222		if (bc->bc_opaque == NULL)
223			goto done;
224
225		if (sizeof(sd->mds.mdd_crypto.scr_meta->scm_kdfhint) <
226		    bc->bc_opaque_size)
227			goto done;
228
229		if (copyout(sd->mds.mdd_crypto.scr_meta->scm_kdfhint,
230		    bc->bc_opaque, bc->bc_opaque_size))
231			goto done;
232
233		/* we're done */
234		bc->bc_opaque_status = BIOC_SOINOUT_OK;
235		rv = EAGAIN;
236		goto done;
237	} else if (bc->bc_opaque_flags & BIOC_SOIN) {
238		/* get kdf with maskkey from userland */
239		if (sr_crypto_get_kdf(bc, sd))
240			goto done;
241	} else
242		goto done;
243
244	sd->sd_max_ccb_per_wu = sd->sd_meta->ssdi.ssd_chunk_no;
245
246	rv = 0;
247done:
248	return (rv);
249}
250
251struct sr_crypto_wu *
252sr_crypto_prepare(struct sr_workunit *wu, int encrypt)
253{
254	struct scsi_xfer	*xs = wu->swu_xs;
255	struct sr_discipline	*sd = wu->swu_dis;
256	struct sr_crypto_wu	*crwu;
257	struct cryptodesc	*crd;
258	int			flags, i, n;
259	daddr_t			blkno;
260	u_int			keyndx;
261
262	DNPRINTF(SR_D_DIS, "%s: sr_crypto_prepare wu %p encrypt %d\n",
263	    DEVNAME(sd->sd_sc), wu, encrypt);
264
265	crwu = (struct sr_crypto_wu *)wu;
266	crwu->cr_uio.uio_iovcnt = 1;
267	crwu->cr_uio.uio_iov->iov_len = xs->datalen;
268	if (xs->flags & SCSI_DATA_OUT) {
269		crwu->cr_uio.uio_iov->iov_base = crwu->cr_dmabuf;
270		memcpy(crwu->cr_uio.uio_iov->iov_base, xs->data, xs->datalen);
271	} else
272		crwu->cr_uio.uio_iov->iov_base = xs->data;
273
274	blkno = wu->swu_blk_start;
275	n = xs->datalen >> DEV_BSHIFT;
276
277	/*
278	 * We preallocated enough crypto descs for up to MAXPHYS of I/O.
279	 * Since there may be less than that we need to tweak the amount
280	 * of crypto desc structures to be just long enough for our needs.
281	 */
282	KASSERT(crwu->cr_crp->crp_ndescalloc >= n);
283	crwu->cr_crp->crp_ndesc = n;
284	flags = (encrypt ? CRD_F_ENCRYPT : 0) |
285	    CRD_F_IV_PRESENT | CRD_F_IV_EXPLICIT;
286
287	/*
288	 * Select crypto session based on block number.
289	 *
290	 * XXX - this does not handle the case where the read/write spans
291	 * across a different key blocks (e.g. 0.5TB boundary). Currently
292	 * this is already broken by the use of scr_key[0] below.
293	 */
294	keyndx = blkno >> SR_CRYPTO_KEY_BLKSHIFT;
295	crwu->cr_crp->crp_sid = sd->mds.mdd_crypto.scr_sid[keyndx];
296
297	crwu->cr_crp->crp_opaque = crwu;
298	crwu->cr_crp->crp_ilen = xs->datalen;
299	crwu->cr_crp->crp_alloctype = M_DEVBUF;
300	crwu->cr_crp->crp_flags = CRYPTO_F_IOV | CRYPTO_F_NOQUEUE;
301	crwu->cr_crp->crp_buf = &crwu->cr_uio;
302	for (i = 0; i < crwu->cr_crp->crp_ndesc; i++, blkno++) {
303		crd = &crwu->cr_crp->crp_desc[i];
304		crd->crd_skip = i << DEV_BSHIFT;
305		crd->crd_len = DEV_BSIZE;
306		crd->crd_inject = 0;
307		crd->crd_flags = flags;
308		crd->crd_alg = sd->mds.mdd_crypto.scr_alg;
309		crd->crd_klen = sd->mds.mdd_crypto.scr_klen;
310		crd->crd_key = sd->mds.mdd_crypto.scr_key[0];
311		memcpy(crd->crd_iv, &blkno, sizeof(blkno));
312	}
313
314	return (crwu);
315}
316
317int
318sr_crypto_get_kdf(struct bioc_createraid *bc, struct sr_discipline *sd)
319{
320	int			rv = EINVAL;
321	struct sr_crypto_kdfinfo *kdfinfo;
322
323	if (!(bc->bc_opaque_flags & BIOC_SOIN))
324		return (rv);
325	if (bc->bc_opaque == NULL)
326		return (rv);
327	if (bc->bc_opaque_size != sizeof(*kdfinfo))
328		return (rv);
329
330	kdfinfo = malloc(bc->bc_opaque_size, M_DEVBUF, M_WAITOK | M_ZERO);
331	if (copyin(bc->bc_opaque, kdfinfo, bc->bc_opaque_size))
332		goto out;
333
334	if (kdfinfo->len != bc->bc_opaque_size)
335		goto out;
336
337	/* copy KDF hint to disk meta data */
338	if (kdfinfo->flags & SR_CRYPTOKDF_HINT) {
339		if (sizeof(sd->mds.mdd_crypto.scr_meta->scm_kdfhint) <
340		    kdfinfo->genkdf.len)
341			goto out;
342		memcpy(sd->mds.mdd_crypto.scr_meta->scm_kdfhint,
343		    &kdfinfo->genkdf, kdfinfo->genkdf.len);
344	}
345
346	/* copy mask key to run-time meta data */
347	if ((kdfinfo->flags & SR_CRYPTOKDF_KEY)) {
348		if (sizeof(sd->mds.mdd_crypto.scr_maskkey) <
349		    sizeof(kdfinfo->maskkey))
350			goto out;
351		memcpy(sd->mds.mdd_crypto.scr_maskkey, &kdfinfo->maskkey,
352		    sizeof(kdfinfo->maskkey));
353	}
354
355	bc->bc_opaque_status = BIOC_SOINOUT_OK;
356	rv = 0;
357out:
358	explicit_bzero(kdfinfo, bc->bc_opaque_size);
359	free(kdfinfo, M_DEVBUF, bc->bc_opaque_size);
360
361	return (rv);
362}
363
364int
365sr_crypto_encrypt(u_char *p, u_char *c, u_char *key, size_t size, int alg)
366{
367	rijndael_ctx		ctx;
368	int			i, rv = 1;
369
370	switch (alg) {
371	case SR_CRYPTOM_AES_ECB_256:
372		if (rijndael_set_key_enc_only(&ctx, key, 256) != 0)
373			goto out;
374		for (i = 0; i < size; i += RIJNDAEL128_BLOCK_LEN)
375			rijndael_encrypt(&ctx, &p[i], &c[i]);
376		rv = 0;
377		break;
378	default:
379		DNPRINTF(SR_D_DIS, "%s: unsupported encryption algorithm %d\n",
380		    "softraid", alg);
381		rv = -1;
382		goto out;
383	}
384
385out:
386	explicit_bzero(&ctx, sizeof(ctx));
387	return (rv);
388}
389
390int
391sr_crypto_decrypt(u_char *c, u_char *p, u_char *key, size_t size, int alg)
392{
393	rijndael_ctx		ctx;
394	int			i, rv = 1;
395
396	switch (alg) {
397	case SR_CRYPTOM_AES_ECB_256:
398		if (rijndael_set_key(&ctx, key, 256) != 0)
399			goto out;
400		for (i = 0; i < size; i += RIJNDAEL128_BLOCK_LEN)
401			rijndael_decrypt(&ctx, &c[i], &p[i]);
402		rv = 0;
403		break;
404	default:
405		DNPRINTF(SR_D_DIS, "%s: unsupported encryption algorithm %d\n",
406		    "softraid", alg);
407		rv = -1;
408		goto out;
409	}
410
411out:
412	explicit_bzero(&ctx, sizeof(ctx));
413	return (rv);
414}
415
416void
417sr_crypto_calculate_check_hmac_sha1(u_int8_t *maskkey, int maskkey_size,
418    u_int8_t *key, int key_size, u_char *check_digest)
419{
420	u_char			check_key[SHA1_DIGEST_LENGTH];
421	HMAC_SHA1_CTX		hmacctx;
422	SHA1_CTX		shactx;
423
424	bzero(check_key, sizeof(check_key));
425	bzero(&hmacctx, sizeof(hmacctx));
426	bzero(&shactx, sizeof(shactx));
427
428	/* k = SHA1(mask_key) */
429	SHA1Init(&shactx);
430	SHA1Update(&shactx, maskkey, maskkey_size);
431	SHA1Final(check_key, &shactx);
432
433	/* mac = HMAC_SHA1_k(unencrypted key) */
434	HMAC_SHA1_Init(&hmacctx, check_key, sizeof(check_key));
435	HMAC_SHA1_Update(&hmacctx, key, key_size);
436	HMAC_SHA1_Final(check_digest, &hmacctx);
437
438	explicit_bzero(check_key, sizeof(check_key));
439	explicit_bzero(&hmacctx, sizeof(hmacctx));
440	explicit_bzero(&shactx, sizeof(shactx));
441}
442
443int
444sr_crypto_decrypt_key(struct sr_discipline *sd)
445{
446	u_char			check_digest[SHA1_DIGEST_LENGTH];
447	int			rv = 1;
448
449	DNPRINTF(SR_D_DIS, "%s: sr_crypto_decrypt_key\n", DEVNAME(sd->sd_sc));
450
451	if (sd->mds.mdd_crypto.scr_meta->scm_check_alg != SR_CRYPTOC_HMAC_SHA1)
452		goto out;
453
454	if (sr_crypto_decrypt((u_char *)sd->mds.mdd_crypto.scr_meta->scm_key,
455	    (u_char *)sd->mds.mdd_crypto.scr_key,
456	    sd->mds.mdd_crypto.scr_maskkey, sizeof(sd->mds.mdd_crypto.scr_key),
457	    sd->mds.mdd_crypto.scr_meta->scm_mask_alg) == -1)
458		goto out;
459
460#ifdef SR_DEBUG0
461	sr_crypto_dumpkeys(sd);
462#endif
463
464	/* Check that the key decrypted properly. */
465	sr_crypto_calculate_check_hmac_sha1(sd->mds.mdd_crypto.scr_maskkey,
466	    sizeof(sd->mds.mdd_crypto.scr_maskkey),
467	    (u_int8_t *)sd->mds.mdd_crypto.scr_key,
468	    sizeof(sd->mds.mdd_crypto.scr_key),
469	    check_digest);
470	if (memcmp(sd->mds.mdd_crypto.scr_meta->chk_hmac_sha1.sch_mac,
471	    check_digest, sizeof(check_digest)) != 0) {
472		explicit_bzero(sd->mds.mdd_crypto.scr_key,
473		    sizeof(sd->mds.mdd_crypto.scr_key));
474		goto out;
475	}
476
477	rv = 0; /* Success */
478out:
479	/* we don't need the mask key anymore */
480	explicit_bzero(&sd->mds.mdd_crypto.scr_maskkey,
481	    sizeof(sd->mds.mdd_crypto.scr_maskkey));
482
483	explicit_bzero(check_digest, sizeof(check_digest));
484
485	return rv;
486}
487
488int
489sr_crypto_create_keys(struct sr_discipline *sd)
490{
491
492	DNPRINTF(SR_D_DIS, "%s: sr_crypto_create_keys\n",
493	    DEVNAME(sd->sd_sc));
494
495	if (AES_MAXKEYBYTES < sizeof(sd->mds.mdd_crypto.scr_maskkey))
496		return (1);
497
498	/* XXX allow user to specify */
499	sd->mds.mdd_crypto.scr_meta->scm_alg = SR_CRYPTOA_AES_XTS_256;
500
501	/* generate crypto keys */
502	arc4random_buf(sd->mds.mdd_crypto.scr_key,
503	    sizeof(sd->mds.mdd_crypto.scr_key));
504
505	/* Mask the disk keys. */
506	sd->mds.mdd_crypto.scr_meta->scm_mask_alg = SR_CRYPTOM_AES_ECB_256;
507	sr_crypto_encrypt((u_char *)sd->mds.mdd_crypto.scr_key,
508	    (u_char *)sd->mds.mdd_crypto.scr_meta->scm_key,
509	    sd->mds.mdd_crypto.scr_maskkey, sizeof(sd->mds.mdd_crypto.scr_key),
510	    sd->mds.mdd_crypto.scr_meta->scm_mask_alg);
511
512	/* Prepare key decryption check code. */
513	sd->mds.mdd_crypto.scr_meta->scm_check_alg = SR_CRYPTOC_HMAC_SHA1;
514	sr_crypto_calculate_check_hmac_sha1(sd->mds.mdd_crypto.scr_maskkey,
515	    sizeof(sd->mds.mdd_crypto.scr_maskkey),
516	    (u_int8_t *)sd->mds.mdd_crypto.scr_key,
517	    sizeof(sd->mds.mdd_crypto.scr_key),
518	    sd->mds.mdd_crypto.scr_meta->chk_hmac_sha1.sch_mac);
519
520	/* Erase the plaintext disk keys */
521	explicit_bzero(sd->mds.mdd_crypto.scr_key,
522	    sizeof(sd->mds.mdd_crypto.scr_key));
523
524#ifdef SR_DEBUG0
525	sr_crypto_dumpkeys(sd);
526#endif
527
528	sd->mds.mdd_crypto.scr_meta->scm_flags = SR_CRYPTOF_KEY |
529	    SR_CRYPTOF_KDFHINT;
530
531	return (0);
532}
533
534int
535sr_crypto_change_maskkey(struct sr_discipline *sd,
536  struct sr_crypto_kdfinfo *kdfinfo1, struct sr_crypto_kdfinfo *kdfinfo2)
537{
538	u_char			check_digest[SHA1_DIGEST_LENGTH];
539	u_char			*c, *p = NULL;
540	size_t			ksz;
541	int			rv = 1;
542
543	DNPRINTF(SR_D_DIS, "%s: sr_crypto_change_maskkey\n",
544	    DEVNAME(sd->sd_sc));
545
546	if (sd->mds.mdd_crypto.scr_meta->scm_check_alg != SR_CRYPTOC_HMAC_SHA1)
547		goto out;
548
549	c = (u_char *)sd->mds.mdd_crypto.scr_meta->scm_key;
550	ksz = sizeof(sd->mds.mdd_crypto.scr_key);
551	p = malloc(ksz, M_DEVBUF, M_WAITOK | M_CANFAIL | M_ZERO);
552	if (p == NULL)
553		goto out;
554
555	if (sr_crypto_decrypt(c, p, kdfinfo1->maskkey, ksz,
556	    sd->mds.mdd_crypto.scr_meta->scm_mask_alg) == -1)
557		goto out;
558
559#ifdef SR_DEBUG0
560	sr_crypto_dumpkeys(sd);
561#endif
562
563	sr_crypto_calculate_check_hmac_sha1(kdfinfo1->maskkey,
564	    sizeof(kdfinfo1->maskkey), p, ksz, check_digest);
565	if (memcmp(sd->mds.mdd_crypto.scr_meta->chk_hmac_sha1.sch_mac,
566	    check_digest, sizeof(check_digest)) != 0) {
567		sr_error(sd->sd_sc, "incorrect key or passphrase");
568		rv = EPERM;
569		goto out;
570	}
571
572	/* Copy new KDF hint to metadata, if supplied. */
573	if (kdfinfo2->flags & SR_CRYPTOKDF_HINT) {
574		if (kdfinfo2->genkdf.len >
575		    sizeof(sd->mds.mdd_crypto.scr_meta->scm_kdfhint))
576			goto out;
577		explicit_bzero(sd->mds.mdd_crypto.scr_meta->scm_kdfhint,
578		    sizeof(sd->mds.mdd_crypto.scr_meta->scm_kdfhint));
579		memcpy(sd->mds.mdd_crypto.scr_meta->scm_kdfhint,
580		    &kdfinfo2->genkdf, kdfinfo2->genkdf.len);
581	}
582
583	/* Mask the disk keys. */
584	c = (u_char *)sd->mds.mdd_crypto.scr_meta->scm_key;
585	if (sr_crypto_encrypt(p, c, kdfinfo2->maskkey, ksz,
586	    sd->mds.mdd_crypto.scr_meta->scm_mask_alg) == -1)
587		goto out;
588
589	/* Prepare key decryption check code. */
590	sd->mds.mdd_crypto.scr_meta->scm_check_alg = SR_CRYPTOC_HMAC_SHA1;
591	sr_crypto_calculate_check_hmac_sha1(kdfinfo2->maskkey,
592	    sizeof(kdfinfo2->maskkey), (u_int8_t *)sd->mds.mdd_crypto.scr_key,
593	    sizeof(sd->mds.mdd_crypto.scr_key), check_digest);
594
595	/* Copy new encrypted key and HMAC to metadata. */
596	memcpy(sd->mds.mdd_crypto.scr_meta->chk_hmac_sha1.sch_mac, check_digest,
597	    sizeof(sd->mds.mdd_crypto.scr_meta->chk_hmac_sha1.sch_mac));
598
599	rv = 0; /* Success */
600
601out:
602	if (p) {
603		explicit_bzero(p, ksz);
604		free(p, M_DEVBUF, ksz);
605	}
606
607	explicit_bzero(check_digest, sizeof(check_digest));
608	explicit_bzero(&kdfinfo1->maskkey, sizeof(kdfinfo1->maskkey));
609	explicit_bzero(&kdfinfo2->maskkey, sizeof(kdfinfo2->maskkey));
610
611	return (rv);
612}
613
614struct sr_chunk *
615sr_crypto_create_key_disk(struct sr_discipline *sd, dev_t dev)
616{
617	struct sr_softc		*sc = sd->sd_sc;
618	struct sr_discipline	*fakesd = NULL;
619	struct sr_metadata	*sm = NULL;
620	struct sr_meta_chunk    *km;
621	struct sr_meta_opt_item *omi = NULL;
622	struct sr_meta_keydisk	*skm;
623	struct sr_chunk		*key_disk = NULL;
624	struct disklabel	label;
625	struct vnode		*vn;
626	char			devname[32];
627	int			c, part, open = 0;
628
629	/*
630	 * Create a metadata structure on the key disk and store
631	 * keying material in the optional metadata.
632	 */
633
634	sr_meta_getdevname(sc, dev, devname, sizeof(devname));
635
636	/* Make sure chunk is not already in use. */
637	c = sr_chunk_in_use(sc, dev);
638	if (c != BIOC_SDINVALID && c != BIOC_SDOFFLINE) {
639		sr_error(sc, "%s is already in use", devname);
640		goto done;
641	}
642
643	/* Open device. */
644	if (bdevvp(dev, &vn)) {
645		sr_error(sc, "cannot open key disk %s", devname);
646		goto done;
647	}
648	if (VOP_OPEN(vn, FREAD | FWRITE, NOCRED, curproc)) {
649		DNPRINTF(SR_D_META,"%s: sr_crypto_create_key_disk cannot "
650		    "open %s\n", DEVNAME(sc), devname);
651		vput(vn);
652		goto done;
653	}
654	open = 1; /* close dev on error */
655
656	/* Get partition details. */
657	part = DISKPART(dev);
658	if (VOP_IOCTL(vn, DIOCGDINFO, (caddr_t)&label,
659	    FREAD, NOCRED, curproc)) {
660		DNPRINTF(SR_D_META, "%s: sr_crypto_create_key_disk ioctl "
661		    "failed\n", DEVNAME(sc));
662		goto done;
663	}
664	if (label.d_partitions[part].p_fstype != FS_RAID) {
665		sr_error(sc, "%s partition not of type RAID (%d)",
666		    devname, label.d_partitions[part].p_fstype);
667		goto done;
668	}
669
670	/*
671	 * Create and populate chunk metadata.
672	 */
673
674	key_disk = malloc(sizeof(struct sr_chunk), M_DEVBUF, M_WAITOK | M_ZERO);
675	km = &key_disk->src_meta;
676
677	key_disk->src_dev_mm = dev;
678	key_disk->src_vn = vn;
679	strlcpy(key_disk->src_devname, devname, sizeof(km->scmi.scm_devname));
680	key_disk->src_size = 0;
681
682	km->scmi.scm_volid = sd->sd_meta->ssdi.ssd_level;
683	km->scmi.scm_chunk_id = 0;
684	km->scmi.scm_size = 0;
685	km->scmi.scm_coerced_size = 0;
686	strlcpy(km->scmi.scm_devname, devname, sizeof(km->scmi.scm_devname));
687	memcpy(&km->scmi.scm_uuid, &sd->sd_meta->ssdi.ssd_uuid,
688	    sizeof(struct sr_uuid));
689
690	sr_checksum(sc, km, &km->scm_checksum,
691	    sizeof(struct sr_meta_chunk_invariant));
692
693	km->scm_status = BIOC_SDONLINE;
694
695	/*
696	 * Create and populate our own discipline and metadata.
697	 */
698
699	sm = malloc(sizeof(struct sr_metadata), M_DEVBUF, M_WAITOK | M_ZERO);
700	sm->ssdi.ssd_magic = SR_MAGIC;
701	sm->ssdi.ssd_version = SR_META_VERSION;
702	sm->ssd_ondisk = 0;
703	sm->ssdi.ssd_vol_flags = 0;
704	memcpy(&sm->ssdi.ssd_uuid, &sd->sd_meta->ssdi.ssd_uuid,
705	    sizeof(struct sr_uuid));
706	sm->ssdi.ssd_chunk_no = 1;
707	sm->ssdi.ssd_volid = SR_KEYDISK_VOLID;
708	sm->ssdi.ssd_level = SR_KEYDISK_LEVEL;
709	sm->ssdi.ssd_size = 0;
710	strlcpy(sm->ssdi.ssd_vendor, "OPENBSD", sizeof(sm->ssdi.ssd_vendor));
711	snprintf(sm->ssdi.ssd_product, sizeof(sm->ssdi.ssd_product),
712	    "SR %s", "KEYDISK");
713	snprintf(sm->ssdi.ssd_revision, sizeof(sm->ssdi.ssd_revision),
714	    "%03d", SR_META_VERSION);
715
716	fakesd = malloc(sizeof(struct sr_discipline), M_DEVBUF,
717	    M_WAITOK | M_ZERO);
718	fakesd->sd_sc = sd->sd_sc;
719	fakesd->sd_meta = sm;
720	fakesd->sd_meta_type = SR_META_F_NATIVE;
721	fakesd->sd_vol_status = BIOC_SVONLINE;
722	strlcpy(fakesd->sd_name, "KEYDISK", sizeof(fakesd->sd_name));
723	SLIST_INIT(&fakesd->sd_meta_opt);
724
725	/* Add chunk to volume. */
726	fakesd->sd_vol.sv_chunks = malloc(sizeof(struct sr_chunk *), M_DEVBUF,
727	    M_WAITOK | M_ZERO);
728	fakesd->sd_vol.sv_chunks[0] = key_disk;
729	SLIST_INIT(&fakesd->sd_vol.sv_chunk_list);
730	SLIST_INSERT_HEAD(&fakesd->sd_vol.sv_chunk_list, key_disk, src_link);
731
732	/* Generate mask key. */
733	arc4random_buf(sd->mds.mdd_crypto.scr_maskkey,
734	    sizeof(sd->mds.mdd_crypto.scr_maskkey));
735
736	/* Copy mask key to optional metadata area. */
737	omi = malloc(sizeof(struct sr_meta_opt_item), M_DEVBUF,
738	    M_WAITOK | M_ZERO);
739	omi->omi_som = malloc(sizeof(struct sr_meta_keydisk), M_DEVBUF,
740	    M_WAITOK | M_ZERO);
741	omi->omi_som->som_type = SR_OPT_KEYDISK;
742	omi->omi_som->som_length = sizeof(struct sr_meta_keydisk);
743	skm = (struct sr_meta_keydisk *)omi->omi_som;
744	memcpy(&skm->skm_maskkey, sd->mds.mdd_crypto.scr_maskkey,
745	    sizeof(skm->skm_maskkey));
746	SLIST_INSERT_HEAD(&fakesd->sd_meta_opt, omi, omi_link);
747	fakesd->sd_meta->ssdi.ssd_opt_no++;
748
749	/* Save metadata. */
750	if (sr_meta_save(fakesd, SR_META_DIRTY)) {
751		sr_error(sc, "could not save metadata to %s", devname);
752		goto fail;
753	}
754
755	goto done;
756
757fail:
758	free(key_disk, M_DEVBUF, sizeof(struct sr_chunk));
759	key_disk = NULL;
760
761done:
762	free(omi, M_DEVBUF, sizeof(struct sr_meta_opt_item));
763	if (fakesd && fakesd->sd_vol.sv_chunks)
764		free(fakesd->sd_vol.sv_chunks, M_DEVBUF,
765		    sizeof(struct sr_chunk *));
766	free(fakesd, M_DEVBUF, sizeof(struct sr_discipline));
767	free(sm, M_DEVBUF, sizeof(struct sr_metadata));
768	if (open) {
769		VOP_CLOSE(vn, FREAD | FWRITE, NOCRED, curproc);
770		vput(vn);
771	}
772
773	return key_disk;
774}
775
776struct sr_chunk *
777sr_crypto_read_key_disk(struct sr_discipline *sd, dev_t dev)
778{
779	struct sr_softc		*sc = sd->sd_sc;
780	struct sr_metadata	*sm = NULL;
781	struct sr_meta_opt_item *omi, *omi_next;
782	struct sr_meta_opt_hdr	*omh;
783	struct sr_meta_keydisk	*skm;
784	struct sr_meta_opt_head som;
785	struct sr_chunk		*key_disk = NULL;
786	struct disklabel	label;
787	struct vnode		*vn = NULL;
788	char			devname[32];
789	int			c, part, open = 0;
790
791	/*
792	 * Load a key disk and load keying material into memory.
793	 */
794
795	SLIST_INIT(&som);
796
797	sr_meta_getdevname(sc, dev, devname, sizeof(devname));
798
799	/* Make sure chunk is not already in use. */
800	c = sr_chunk_in_use(sc, dev);
801	if (c != BIOC_SDINVALID && c != BIOC_SDOFFLINE) {
802		sr_error(sc, "%s is already in use", devname);
803		goto done;
804	}
805
806	/* Open device. */
807	if (bdevvp(dev, &vn)) {
808		sr_error(sc, "cannot open key disk %s", devname);
809		goto done;
810	}
811	if (VOP_OPEN(vn, FREAD, NOCRED, curproc)) {
812		DNPRINTF(SR_D_META,"%s: sr_crypto_read_key_disk cannot "
813		    "open %s\n", DEVNAME(sc), devname);
814		vput(vn);
815		goto done;
816	}
817	open = 1; /* close dev on error */
818
819	/* Get partition details. */
820	part = DISKPART(dev);
821	if (VOP_IOCTL(vn, DIOCGDINFO, (caddr_t)&label, FREAD,
822	    NOCRED, curproc)) {
823		DNPRINTF(SR_D_META, "%s: sr_crypto_read_key_disk ioctl "
824		    "failed\n", DEVNAME(sc));
825		goto done;
826	}
827	if (label.d_partitions[part].p_fstype != FS_RAID) {
828		sr_error(sc, "%s partition not of type RAID (%d)",
829		    devname, label.d_partitions[part].p_fstype);
830		goto done;
831	}
832
833	/*
834	 * Read and validate key disk metadata.
835	 */
836	sm = malloc(SR_META_SIZE * DEV_BSIZE, M_DEVBUF, M_WAITOK | M_ZERO);
837	if (sr_meta_native_read(sd, dev, sm, NULL)) {
838		sr_error(sc, "native bootprobe could not read native metadata");
839		goto done;
840	}
841
842	if (sr_meta_validate(sd, dev, sm, NULL)) {
843		DNPRINTF(SR_D_META, "%s: invalid metadata\n",
844		    DEVNAME(sc));
845		goto done;
846	}
847
848	/* Make sure this is a key disk. */
849	if (sm->ssdi.ssd_level != SR_KEYDISK_LEVEL) {
850		sr_error(sc, "%s is not a key disk", devname);
851		goto done;
852	}
853
854	/* Construct key disk chunk. */
855	key_disk = malloc(sizeof(struct sr_chunk), M_DEVBUF, M_WAITOK | M_ZERO);
856	key_disk->src_dev_mm = dev;
857	key_disk->src_vn = vn;
858	key_disk->src_size = 0;
859
860	memcpy(&key_disk->src_meta, (struct sr_meta_chunk *)(sm + 1),
861	    sizeof(key_disk->src_meta));
862
863	/* Read mask key from optional metadata. */
864	sr_meta_opt_load(sc, sm, &som);
865	SLIST_FOREACH(omi, &som, omi_link) {
866		omh = omi->omi_som;
867		if (omh->som_type == SR_OPT_KEYDISK) {
868			skm = (struct sr_meta_keydisk *)omh;
869			memcpy(sd->mds.mdd_crypto.scr_maskkey, &skm->skm_maskkey,
870			    sizeof(sd->mds.mdd_crypto.scr_maskkey));
871		} else if (omh->som_type == SR_OPT_CRYPTO) {
872			/* Original keydisk format with key in crypto area. */
873			memcpy(sd->mds.mdd_crypto.scr_maskkey,
874			    omh + sizeof(struct sr_meta_opt_hdr),
875			    sizeof(sd->mds.mdd_crypto.scr_maskkey));
876		}
877	}
878
879	open = 0;
880
881done:
882	for (omi = SLIST_FIRST(&som); omi != NULL; omi = omi_next) {
883		omi_next = SLIST_NEXT(omi, omi_link);
884		free(omi->omi_som, M_DEVBUF, 0);
885		free(omi, M_DEVBUF, sizeof(struct sr_meta_opt_item));
886	}
887
888	free(sm, M_DEVBUF, SR_META_SIZE * DEV_BSIZE);
889
890	if (vn && open) {
891		VOP_CLOSE(vn, FREAD, NOCRED, curproc);
892		vput(vn);
893	}
894
895	return key_disk;
896}
897
898static void
899sr_crypto_free_sessions(struct sr_discipline *sd)
900{
901	u_int			i;
902
903	for (i = 0; i < SR_CRYPTO_MAXKEYS; i++) {
904		if (sd->mds.mdd_crypto.scr_sid[i] != (u_int64_t)-1) {
905			crypto_freesession(sd->mds.mdd_crypto.scr_sid[i]);
906			sd->mds.mdd_crypto.scr_sid[i] = (u_int64_t)-1;
907		}
908	}
909}
910
911int
912sr_crypto_alloc_resources(struct sr_discipline *sd)
913{
914	struct sr_workunit	*wu;
915	struct sr_crypto_wu	*crwu;
916	struct cryptoini	cri;
917	u_int			num_keys, i;
918
919	DNPRINTF(SR_D_DIS, "%s: sr_crypto_alloc_resources\n",
920	    DEVNAME(sd->sd_sc));
921
922	sd->mds.mdd_crypto.scr_alg = CRYPTO_AES_XTS;
923	switch (sd->mds.mdd_crypto.scr_meta->scm_alg) {
924	case SR_CRYPTOA_AES_XTS_128:
925		sd->mds.mdd_crypto.scr_klen = 256;
926		break;
927	case SR_CRYPTOA_AES_XTS_256:
928		sd->mds.mdd_crypto.scr_klen = 512;
929		break;
930	default:
931		sr_error(sd->sd_sc, "unknown crypto algorithm");
932		return (EINVAL);
933	}
934
935	for (i = 0; i < SR_CRYPTO_MAXKEYS; i++)
936		sd->mds.mdd_crypto.scr_sid[i] = (u_int64_t)-1;
937
938	if (sr_wu_alloc(sd)) {
939		sr_error(sd->sd_sc, "unable to allocate work units");
940		return (ENOMEM);
941	}
942	if (sr_ccb_alloc(sd)) {
943		sr_error(sd->sd_sc, "unable to allocate CCBs");
944		return (ENOMEM);
945	}
946	if (sr_crypto_decrypt_key(sd)) {
947		sr_error(sd->sd_sc, "incorrect key or passphrase");
948		return (EPERM);
949	}
950
951	/*
952	 * For each work unit allocate the uio, iovec and crypto structures.
953	 * These have to be allocated now because during runtime we cannot
954	 * fail an allocation without failing the I/O (which can cause real
955	 * problems).
956	 */
957	TAILQ_FOREACH(wu, &sd->sd_wu, swu_next) {
958		crwu = (struct sr_crypto_wu *)wu;
959		crwu->cr_uio.uio_iov = &crwu->cr_iov;
960		crwu->cr_dmabuf = dma_alloc(MAXPHYS, PR_WAITOK);
961		crwu->cr_crp = crypto_getreq(MAXPHYS >> DEV_BSHIFT);
962		if (crwu->cr_crp == NULL)
963			return (ENOMEM);
964	}
965
966	memset(&cri, 0, sizeof(cri));
967	cri.cri_alg = sd->mds.mdd_crypto.scr_alg;
968	cri.cri_klen = sd->mds.mdd_crypto.scr_klen;
969
970	/* Allocate a session for every 2^SR_CRYPTO_KEY_BLKSHIFT blocks. */
971	num_keys = ((sd->sd_meta->ssdi.ssd_size - 1) >>
972	    SR_CRYPTO_KEY_BLKSHIFT) + 1;
973	if (num_keys > SR_CRYPTO_MAXKEYS)
974		return (EFBIG);
975	for (i = 0; i < num_keys; i++) {
976		cri.cri_key = sd->mds.mdd_crypto.scr_key[i];
977		if (crypto_newsession(&sd->mds.mdd_crypto.scr_sid[i],
978		    &cri, 0) != 0) {
979			sr_crypto_free_sessions(sd);
980			return (EINVAL);
981		}
982	}
983
984	sr_hotplug_register(sd, sr_crypto_hotplug);
985
986	return (0);
987}
988
989void
990sr_crypto_free_resources(struct sr_discipline *sd)
991{
992	struct sr_workunit	*wu;
993	struct sr_crypto_wu	*crwu;
994
995	DNPRINTF(SR_D_DIS, "%s: sr_crypto_free_resources\n",
996	    DEVNAME(sd->sd_sc));
997
998	if (sd->mds.mdd_crypto.key_disk != NULL) {
999		explicit_bzero(sd->mds.mdd_crypto.key_disk,
1000		    sizeof(*sd->mds.mdd_crypto.key_disk));
1001		free(sd->mds.mdd_crypto.key_disk, M_DEVBUF,
1002		    sizeof(*sd->mds.mdd_crypto.key_disk));
1003	}
1004
1005	sr_hotplug_unregister(sd, sr_crypto_hotplug);
1006
1007	sr_crypto_free_sessions(sd);
1008
1009	TAILQ_FOREACH(wu, &sd->sd_wu, swu_next) {
1010		crwu = (struct sr_crypto_wu *)wu;
1011		if (crwu->cr_dmabuf)
1012			dma_free(crwu->cr_dmabuf, MAXPHYS);
1013		if (crwu->cr_crp)
1014			crypto_freereq(crwu->cr_crp);
1015	}
1016
1017	sr_wu_free(sd);
1018	sr_ccb_free(sd);
1019}
1020
1021int
1022sr_crypto_ioctl(struct sr_discipline *sd, struct bioc_discipline *bd)
1023{
1024	struct sr_crypto_kdfpair kdfpair;
1025	struct sr_crypto_kdfinfo kdfinfo1, kdfinfo2;
1026	int			size, rv = 1;
1027
1028	DNPRINTF(SR_D_IOCTL, "%s: sr_crypto_ioctl %u\n",
1029	    DEVNAME(sd->sd_sc), bd->bd_cmd);
1030
1031	switch (bd->bd_cmd) {
1032	case SR_IOCTL_GET_KDFHINT:
1033
1034		/* Get KDF hint for userland. */
1035		size = sizeof(sd->mds.mdd_crypto.scr_meta->scm_kdfhint);
1036		if (bd->bd_data == NULL || bd->bd_size > size)
1037			goto bad;
1038		if (copyout(sd->mds.mdd_crypto.scr_meta->scm_kdfhint,
1039		    bd->bd_data, bd->bd_size))
1040			goto bad;
1041
1042		rv = 0;
1043
1044		break;
1045
1046	case SR_IOCTL_CHANGE_PASSPHRASE:
1047
1048		/* Attempt to change passphrase. */
1049
1050		size = sizeof(kdfpair);
1051		if (bd->bd_data == NULL || bd->bd_size > size)
1052			goto bad;
1053		if (copyin(bd->bd_data, &kdfpair, size))
1054			goto bad;
1055
1056		size = sizeof(kdfinfo1);
1057		if (kdfpair.kdfinfo1 == NULL || kdfpair.kdfsize1 > size)
1058			goto bad;
1059		if (copyin(kdfpair.kdfinfo1, &kdfinfo1, size))
1060			goto bad;
1061
1062		size = sizeof(kdfinfo2);
1063		if (kdfpair.kdfinfo2 == NULL || kdfpair.kdfsize2 > size)
1064			goto bad;
1065		if (copyin(kdfpair.kdfinfo2, &kdfinfo2, size))
1066			goto bad;
1067
1068		if (sr_crypto_change_maskkey(sd, &kdfinfo1, &kdfinfo2))
1069			goto bad;
1070
1071		/* Save metadata to disk. */
1072		rv = sr_meta_save(sd, SR_META_DIRTY);
1073
1074		break;
1075	}
1076
1077bad:
1078	explicit_bzero(&kdfpair, sizeof(kdfpair));
1079	explicit_bzero(&kdfinfo1, sizeof(kdfinfo1));
1080	explicit_bzero(&kdfinfo2, sizeof(kdfinfo2));
1081
1082	return (rv);
1083}
1084
1085int
1086sr_crypto_meta_opt_handler(struct sr_discipline *sd, struct sr_meta_opt_hdr *om)
1087{
1088	int rv = EINVAL;
1089
1090	if (om->som_type == SR_OPT_CRYPTO) {
1091		sd->mds.mdd_crypto.scr_meta = (struct sr_meta_crypto *)om;
1092		rv = 0;
1093	}
1094
1095	return (rv);
1096}
1097
1098int
1099sr_crypto_rw(struct sr_workunit *wu)
1100{
1101	struct sr_crypto_wu	*crwu;
1102	daddr_t			blkno;
1103	int			rv = 0;
1104
1105	DNPRINTF(SR_D_DIS, "%s: sr_crypto_rw wu %p\n",
1106	    DEVNAME(wu->swu_dis->sd_sc), wu);
1107
1108	if (sr_validate_io(wu, &blkno, "sr_crypto_rw"))
1109		return (1);
1110
1111	if (wu->swu_xs->flags & SCSI_DATA_OUT) {
1112		crwu = sr_crypto_prepare(wu, 1);
1113		crwu->cr_crp->crp_callback = sr_crypto_write;
1114		rv = crypto_dispatch(crwu->cr_crp);
1115		if (rv == 0)
1116			rv = crwu->cr_crp->crp_etype;
1117	} else
1118		rv = sr_crypto_dev_rw(wu, NULL);
1119
1120	return (rv);
1121}
1122
1123void
1124sr_crypto_write(struct cryptop *crp)
1125{
1126	struct sr_crypto_wu	*crwu = crp->crp_opaque;
1127	struct sr_workunit	*wu = &crwu->cr_wu;
1128	int			s;
1129
1130	DNPRINTF(SR_D_INTR, "%s: sr_crypto_write: wu %p xs: %p\n",
1131	    DEVNAME(wu->swu_dis->sd_sc), wu, wu->swu_xs);
1132
1133	if (crp->crp_etype) {
1134		/* fail io */
1135		wu->swu_xs->error = XS_DRIVER_STUFFUP;
1136		s = splbio();
1137		sr_scsi_done(wu->swu_dis, wu->swu_xs);
1138		splx(s);
1139	}
1140
1141	sr_crypto_dev_rw(wu, crwu);
1142}
1143
1144int
1145sr_crypto_dev_rw(struct sr_workunit *wu, struct sr_crypto_wu *crwu)
1146{
1147	struct sr_discipline	*sd = wu->swu_dis;
1148	struct scsi_xfer	*xs = wu->swu_xs;
1149	struct sr_ccb		*ccb;
1150	struct uio		*uio;
1151	daddr_t			blkno;
1152
1153	blkno = wu->swu_blk_start;
1154
1155	ccb = sr_ccb_rw(sd, 0, blkno, xs->datalen, xs->data, xs->flags, 0);
1156	if (!ccb) {
1157		/* should never happen but handle more gracefully */
1158		printf("%s: %s: too many ccbs queued\n",
1159		    DEVNAME(sd->sd_sc), sd->sd_meta->ssd_devname);
1160		goto bad;
1161	}
1162	if (!ISSET(xs->flags, SCSI_DATA_IN)) {
1163		uio = crwu->cr_crp->crp_buf;
1164		ccb->ccb_buf.b_data = uio->uio_iov->iov_base;
1165		ccb->ccb_opaque = crwu;
1166	}
1167	sr_wu_enqueue_ccb(wu, ccb);
1168	sr_schedule_wu(wu);
1169
1170	return (0);
1171
1172bad:
1173	/* wu is unwound by sr_wu_put */
1174	if (crwu)
1175		crwu->cr_crp->crp_etype = EINVAL;
1176	return (1);
1177}
1178
1179void
1180sr_crypto_done(struct sr_workunit *wu)
1181{
1182	struct scsi_xfer	*xs = wu->swu_xs;
1183	struct sr_crypto_wu	*crwu;
1184	int			s;
1185
1186	/* If this was a successful read, initiate decryption of the data. */
1187	if (ISSET(xs->flags, SCSI_DATA_IN) && xs->error == XS_NOERROR) {
1188		crwu = sr_crypto_prepare(wu, 0);
1189		crwu->cr_crp->crp_callback = sr_crypto_read;
1190		DNPRINTF(SR_D_INTR, "%s: sr_crypto_done: crypto_dispatch %p\n",
1191		    DEVNAME(wu->swu_dis->sd_sc), crwu->cr_crp);
1192		crypto_dispatch(crwu->cr_crp);
1193		return;
1194	}
1195
1196	s = splbio();
1197	sr_scsi_done(wu->swu_dis, wu->swu_xs);
1198	splx(s);
1199}
1200
1201void
1202sr_crypto_read(struct cryptop *crp)
1203{
1204	struct sr_crypto_wu	*crwu = crp->crp_opaque;
1205	struct sr_workunit	*wu = &crwu->cr_wu;
1206	int			s;
1207
1208	DNPRINTF(SR_D_INTR, "%s: sr_crypto_read: wu %p xs: %p\n",
1209	    DEVNAME(wu->swu_dis->sd_sc), wu, wu->swu_xs);
1210
1211	if (crp->crp_etype)
1212		wu->swu_xs->error = XS_DRIVER_STUFFUP;
1213
1214	s = splbio();
1215	sr_scsi_done(wu->swu_dis, wu->swu_xs);
1216	splx(s);
1217}
1218
1219void
1220sr_crypto_hotplug(struct sr_discipline *sd, struct disk *diskp, int action)
1221{
1222	DNPRINTF(SR_D_MISC, "%s: sr_crypto_hotplug: %s %d\n",
1223	    DEVNAME(sd->sd_sc), diskp->dk_name, action);
1224}
1225
1226#ifdef SR_DEBUG0
1227void
1228sr_crypto_dumpkeys(struct sr_discipline *sd)
1229{
1230	int			i, j;
1231
1232	printf("sr_crypto_dumpkeys:\n");
1233	for (i = 0; i < SR_CRYPTO_MAXKEYS; i++) {
1234		printf("\tscm_key[%d]: 0x", i);
1235		for (j = 0; j < SR_CRYPTO_KEYBYTES; j++) {
1236			printf("%02x",
1237			    sd->mds.mdd_crypto.scr_meta->scm_key[i][j]);
1238		}
1239		printf("\n");
1240	}
1241	printf("sr_crypto_dumpkeys: runtime data keys:\n");
1242	for (i = 0; i < SR_CRYPTO_MAXKEYS; i++) {
1243		printf("\tscr_key[%d]: 0x", i);
1244		for (j = 0; j < SR_CRYPTO_KEYBYTES; j++) {
1245			printf("%02x",
1246			    sd->mds.mdd_crypto.scr_key[i][j]);
1247		}
1248		printf("\n");
1249	}
1250}
1251#endif	/* SR_DEBUG */
1252