xref: /freebsd/sys/crypto/ccp/ccp.c (revision 137a344c6341d1469432e9deb3a25593f96672ad)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2017 Chelsio Communications, Inc.
5  * Copyright (c) 2017 Conrad Meyer <cem@FreeBSD.org>
6  * All rights reserved.
7  * Largely borrowed from ccr(4), Written by: John Baldwin <jhb@FreeBSD.org>
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  */
30 
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
33 
34 #include "opt_ddb.h"
35 
36 #include <sys/types.h>
37 #include <sys/bus.h>
38 #include <sys/lock.h>
39 #include <sys/kernel.h>
40 #include <sys/malloc.h>
41 #include <sys/mutex.h>
42 #include <sys/module.h>
43 #include <sys/random.h>
44 #include <sys/sglist.h>
45 #include <sys/sysctl.h>
46 
47 #ifdef DDB
48 #include <ddb/ddb.h>
49 #endif
50 
51 #include <dev/pci/pcivar.h>
52 
53 #include <dev/random/randomdev.h>
54 
55 #include <opencrypto/cryptodev.h>
56 #include <opencrypto/xform.h>
57 
58 #include "cryptodev_if.h"
59 
60 #include "ccp.h"
61 #include "ccp_hardware.h"
62 
63 MALLOC_DEFINE(M_CCP, "ccp", "AMD CCP crypto");
64 
65 /*
66  * Need a global softc available for garbage random_source API, which lacks any
67  * context pointer.  It's also handy for debugging.
68  */
69 struct ccp_softc *g_ccp_softc;
70 
71 bool g_debug_print = false;
72 SYSCTL_BOOL(_hw_ccp, OID_AUTO, debug, CTLFLAG_RWTUN, &g_debug_print, 0,
73     "Set to enable debugging log messages");
74 
75 static struct pciid {
76 	uint32_t devid;
77 	const char *desc;
78 } ccp_ids[] = {
79 	{ 0x14561022, "AMD CCP-5a" },
80 	{ 0x14681022, "AMD CCP-5b" },
81 };
82 MODULE_PNP_INFO("W32:vendor/device", pci, ccp, ccp_ids, sizeof(ccp_ids[0]),
83     nitems(ccp_ids));
84 
85 static struct random_source random_ccp = {
86 	.rs_ident = "AMD CCP TRNG",
87 	.rs_source = RANDOM_PURE_CCP,
88 	.rs_read = random_ccp_read,
89 };
90 
91 /*
92  * ccp_populate_sglist() generates a scatter/gather list that covers the entire
93  * crypto operation buffer.
94  */
95 static int
96 ccp_populate_sglist(struct sglist *sg, struct cryptop *crp)
97 {
98 	int error;
99 
100 	sglist_reset(sg);
101 	if (crp->crp_flags & CRYPTO_F_IMBUF)
102 		error = sglist_append_mbuf(sg, crp->crp_mbuf);
103 	else if (crp->crp_flags & CRYPTO_F_IOV)
104 		error = sglist_append_uio(sg, crp->crp_uio);
105 	else
106 		error = sglist_append(sg, crp->crp_buf, crp->crp_ilen);
107 	return (error);
108 }
109 
110 /*
111  * Handle a GCM request with an empty payload by performing the
112  * operation in software.  Derived from swcr_authenc().
113  */
114 static void
115 ccp_gcm_soft(struct ccp_session *s, struct cryptop *crp,
116     struct cryptodesc *crda, struct cryptodesc *crde)
117 {
118 	struct aes_gmac_ctx gmac_ctx;
119 	char block[GMAC_BLOCK_LEN];
120 	char digest[GMAC_DIGEST_LEN];
121 	char iv[AES_BLOCK_LEN];
122 	int i, len;
123 
124 	/*
125 	 * This assumes a 12-byte IV from the crp.  See longer comment
126 	 * above in ccp_gcm() for more details.
127 	 */
128 	if (crde->crd_flags & CRD_F_ENCRYPT) {
129 		if (crde->crd_flags & CRD_F_IV_EXPLICIT)
130 			memcpy(iv, crde->crd_iv, 12);
131 		else
132 			arc4rand(iv, 12, 0);
133 	} else {
134 		if (crde->crd_flags & CRD_F_IV_EXPLICIT)
135 			memcpy(iv, crde->crd_iv, 12);
136 		else
137 			crypto_copydata(crp->crp_flags, crp->crp_buf,
138 			    crde->crd_inject, 12, iv);
139 	}
140 	*(uint32_t *)&iv[12] = htobe32(1);
141 
142 	/* Initialize the MAC. */
143 	AES_GMAC_Init(&gmac_ctx);
144 	AES_GMAC_Setkey(&gmac_ctx, s->blkcipher.enckey, s->blkcipher.key_len);
145 	AES_GMAC_Reinit(&gmac_ctx, iv, sizeof(iv));
146 
147 	/* MAC the AAD. */
148 	for (i = 0; i < crda->crd_len; i += sizeof(block)) {
149 		len = imin(crda->crd_len - i, sizeof(block));
150 		crypto_copydata(crp->crp_flags, crp->crp_buf, crda->crd_skip +
151 		    i, len, block);
152 		bzero(block + len, sizeof(block) - len);
153 		AES_GMAC_Update(&gmac_ctx, block, sizeof(block));
154 	}
155 
156 	/* Length block. */
157 	bzero(block, sizeof(block));
158 	((uint32_t *)block)[1] = htobe32(crda->crd_len * 8);
159 	AES_GMAC_Update(&gmac_ctx, block, sizeof(block));
160 	AES_GMAC_Final(digest, &gmac_ctx);
161 
162 	if (crde->crd_flags & CRD_F_ENCRYPT) {
163 		crypto_copyback(crp->crp_flags, crp->crp_buf, crda->crd_inject,
164 		    sizeof(digest), digest);
165 		crp->crp_etype = 0;
166 	} else {
167 		char digest2[GMAC_DIGEST_LEN];
168 
169 		crypto_copydata(crp->crp_flags, crp->crp_buf, crda->crd_inject,
170 		    sizeof(digest2), digest2);
171 		if (timingsafe_bcmp(digest, digest2, sizeof(digest)) == 0)
172 			crp->crp_etype = 0;
173 		else
174 			crp->crp_etype = EBADMSG;
175 	}
176 	crypto_done(crp);
177 }
178 
179 static int
180 ccp_probe(device_t dev)
181 {
182 	struct pciid *ip;
183 	uint32_t id;
184 
185 	id = pci_get_devid(dev);
186 	for (ip = ccp_ids; ip < &ccp_ids[nitems(ccp_ids)]; ip++) {
187 		if (id == ip->devid) {
188 			device_set_desc(dev, ip->desc);
189 			return (0);
190 		}
191 	}
192 	return (ENXIO);
193 }
194 
195 static void
196 ccp_initialize_queues(struct ccp_softc *sc)
197 {
198 	struct ccp_queue *qp;
199 	size_t i;
200 
201 	for (i = 0; i < nitems(sc->queues); i++) {
202 		qp = &sc->queues[i];
203 
204 		qp->cq_softc = sc;
205 		qp->cq_qindex = i;
206 		mtx_init(&qp->cq_lock, "ccp queue", NULL, MTX_DEF);
207 		/* XXX - arbitrarily chosen sizes */
208 		qp->cq_sg_crp = sglist_alloc(32, M_WAITOK);
209 		/* Two more SGEs than sg_crp to accommodate ipad. */
210 		qp->cq_sg_ulptx = sglist_alloc(34, M_WAITOK);
211 		qp->cq_sg_dst = sglist_alloc(2, M_WAITOK);
212 	}
213 }
214 
215 static void
216 ccp_free_queues(struct ccp_softc *sc)
217 {
218 	struct ccp_queue *qp;
219 	size_t i;
220 
221 	for (i = 0; i < nitems(sc->queues); i++) {
222 		qp = &sc->queues[i];
223 
224 		mtx_destroy(&qp->cq_lock);
225 		sglist_free(qp->cq_sg_crp);
226 		sglist_free(qp->cq_sg_ulptx);
227 		sglist_free(qp->cq_sg_dst);
228 	}
229 }
230 
231 static int
232 ccp_attach(device_t dev)
233 {
234 	struct ccp_softc *sc;
235 	int error;
236 
237 	sc = device_get_softc(dev);
238 	sc->dev = dev;
239 
240 	sc->cid = crypto_get_driverid(dev, CRYPTOCAP_F_HARDWARE);
241 	if (sc->cid < 0) {
242 		device_printf(dev, "could not get crypto driver id\n");
243 		return (ENXIO);
244 	}
245 
246 	error = ccp_hw_attach(dev);
247 	if (error != 0)
248 		return (error);
249 
250 	mtx_init(&sc->lock, "ccp", NULL, MTX_DEF);
251 
252 	ccp_initialize_queues(sc);
253 
254 	if (g_ccp_softc == NULL) {
255 		g_ccp_softc = sc;
256 		if ((sc->hw_features & VERSION_CAP_TRNG) != 0)
257 			random_source_register(&random_ccp);
258 	}
259 
260 	if ((sc->hw_features & VERSION_CAP_AES) != 0) {
261 		crypto_register(sc->cid, CRYPTO_AES_CBC, 0, 0);
262 		crypto_register(sc->cid, CRYPTO_AES_ICM, 0, 0);
263 		crypto_register(sc->cid, CRYPTO_AES_NIST_GCM_16, 0, 0);
264 		crypto_register(sc->cid, CRYPTO_AES_128_NIST_GMAC, 0, 0);
265 		crypto_register(sc->cid, CRYPTO_AES_192_NIST_GMAC, 0, 0);
266 		crypto_register(sc->cid, CRYPTO_AES_256_NIST_GMAC, 0, 0);
267 		crypto_register(sc->cid, CRYPTO_AES_XTS, 0, 0);
268 	}
269 	if ((sc->hw_features & VERSION_CAP_SHA) != 0) {
270 		crypto_register(sc->cid, CRYPTO_SHA1_HMAC, 0, 0);
271 		crypto_register(sc->cid, CRYPTO_SHA2_256_HMAC, 0, 0);
272 		crypto_register(sc->cid, CRYPTO_SHA2_384_HMAC, 0, 0);
273 		crypto_register(sc->cid, CRYPTO_SHA2_512_HMAC, 0, 0);
274 	}
275 
276 	return (0);
277 }
278 
279 static int
280 ccp_detach(device_t dev)
281 {
282 	struct ccp_softc *sc;
283 	int i;
284 
285 	sc = device_get_softc(dev);
286 
287 	mtx_lock(&sc->lock);
288 	for (i = 0; i < sc->nsessions; i++) {
289 		if (sc->sessions[i].active || sc->sessions[i].pending != 0) {
290 			mtx_unlock(&sc->lock);
291 			return (EBUSY);
292 		}
293 	}
294 	sc->detaching = true;
295 	mtx_unlock(&sc->lock);
296 
297 	crypto_unregister_all(sc->cid);
298 	if (g_ccp_softc == sc && (sc->hw_features & VERSION_CAP_TRNG) != 0)
299 		random_source_deregister(&random_ccp);
300 
301 	ccp_hw_detach(dev);
302 	ccp_free_queues(sc);
303 
304 	if (g_ccp_softc == sc)
305 		g_ccp_softc = NULL;
306 
307 	free(sc->sessions, M_CCP);
308 	mtx_destroy(&sc->lock);
309 	return (0);
310 }
311 
312 static void
313 ccp_init_hmac_digest(struct ccp_session *s, int cri_alg, char *key,
314     int klen)
315 {
316 	union authctx auth_ctx;
317 	struct auth_hash *axf;
318 	u_int i;
319 
320 	/*
321 	 * If the key is larger than the block size, use the digest of
322 	 * the key as the key instead.
323 	 */
324 	axf = s->hmac.auth_hash;
325 	klen /= 8;
326 	if (klen > axf->blocksize) {
327 		axf->Init(&auth_ctx);
328 		axf->Update(&auth_ctx, key, klen);
329 		axf->Final(s->hmac.ipad, &auth_ctx);
330 		explicit_bzero(&auth_ctx, sizeof(auth_ctx));
331 		klen = axf->hashsize;
332 	} else
333 		memcpy(s->hmac.ipad, key, klen);
334 
335 	memset(s->hmac.ipad + klen, 0, axf->blocksize - klen);
336 	memcpy(s->hmac.opad, s->hmac.ipad, axf->blocksize);
337 
338 	for (i = 0; i < axf->blocksize; i++) {
339 		s->hmac.ipad[i] ^= HMAC_IPAD_VAL;
340 		s->hmac.opad[i] ^= HMAC_OPAD_VAL;
341 	}
342 }
343 
344 static int
345 ccp_aes_check_keylen(int alg, int klen)
346 {
347 
348 	switch (klen) {
349 	case 128:
350 	case 192:
351 		if (alg == CRYPTO_AES_XTS)
352 			return (EINVAL);
353 		break;
354 	case 256:
355 		break;
356 	case 512:
357 		if (alg != CRYPTO_AES_XTS)
358 			return (EINVAL);
359 		break;
360 	default:
361 		return (EINVAL);
362 	}
363 	return (0);
364 }
365 
366 static void
367 ccp_aes_setkey(struct ccp_session *s, int alg, const void *key, int klen)
368 {
369 	unsigned kbits;
370 
371 	if (alg == CRYPTO_AES_XTS)
372 		kbits = klen / 2;
373 	else
374 		kbits = klen;
375 
376 	switch (kbits) {
377 	case 128:
378 		s->blkcipher.cipher_type = CCP_AES_TYPE_128;
379 		break;
380 	case 192:
381 		s->blkcipher.cipher_type = CCP_AES_TYPE_192;
382 		break;
383 	case 256:
384 		s->blkcipher.cipher_type = CCP_AES_TYPE_256;
385 		break;
386 	default:
387 		panic("should not get here");
388 	}
389 
390 	s->blkcipher.key_len = klen / 8;
391 	memcpy(s->blkcipher.enckey, key, s->blkcipher.key_len);
392 }
393 
394 static int
395 ccp_newsession(device_t dev, uint32_t *sidp, struct cryptoini *cri)
396 {
397 	struct ccp_softc *sc;
398 	struct ccp_session *s;
399 	struct auth_hash *auth_hash;
400 	struct cryptoini *c, *hash, *cipher;
401 	enum ccp_aes_mode cipher_mode;
402 	unsigned auth_mode, iv_len;
403 	unsigned partial_digest_len;
404 	unsigned q;
405 	int error, i, sess;
406 	bool gcm_hash;
407 
408 	if (sidp == NULL || cri == NULL)
409 		return (EINVAL);
410 
411 	gcm_hash = false;
412 	cipher = NULL;
413 	hash = NULL;
414 	auth_hash = NULL;
415 	/* XXX reconcile auth_mode with use by ccp_sha */
416 	auth_mode = 0;
417 	cipher_mode = CCP_AES_MODE_ECB;
418 	iv_len = 0;
419 	partial_digest_len = 0;
420 	for (c = cri; c != NULL; c = c->cri_next) {
421 		switch (c->cri_alg) {
422 		case CRYPTO_SHA1_HMAC:
423 		case CRYPTO_SHA2_256_HMAC:
424 		case CRYPTO_SHA2_384_HMAC:
425 		case CRYPTO_SHA2_512_HMAC:
426 		case CRYPTO_AES_128_NIST_GMAC:
427 		case CRYPTO_AES_192_NIST_GMAC:
428 		case CRYPTO_AES_256_NIST_GMAC:
429 			if (hash)
430 				return (EINVAL);
431 			hash = c;
432 			switch (c->cri_alg) {
433 			case CRYPTO_SHA1_HMAC:
434 				auth_hash = &auth_hash_hmac_sha1;
435 				auth_mode = SHA1;
436 				partial_digest_len = SHA1_HASH_LEN;
437 				break;
438 			case CRYPTO_SHA2_256_HMAC:
439 				auth_hash = &auth_hash_hmac_sha2_256;
440 				auth_mode = SHA2_256;
441 				partial_digest_len = SHA2_256_HASH_LEN;
442 				break;
443 			case CRYPTO_SHA2_384_HMAC:
444 				auth_hash = &auth_hash_hmac_sha2_384;
445 				auth_mode = SHA2_384;
446 				partial_digest_len = SHA2_512_HASH_LEN;
447 				break;
448 			case CRYPTO_SHA2_512_HMAC:
449 				auth_hash = &auth_hash_hmac_sha2_512;
450 				auth_mode = SHA2_512;
451 				partial_digest_len = SHA2_512_HASH_LEN;
452 				break;
453 			case CRYPTO_AES_128_NIST_GMAC:
454 			case CRYPTO_AES_192_NIST_GMAC:
455 			case CRYPTO_AES_256_NIST_GMAC:
456 				gcm_hash = true;
457 #if 0
458 				auth_mode = CHCR_SCMD_AUTH_MODE_GHASH;
459 #endif
460 				break;
461 			}
462 			break;
463 		case CRYPTO_AES_CBC:
464 		case CRYPTO_AES_ICM:
465 		case CRYPTO_AES_NIST_GCM_16:
466 		case CRYPTO_AES_XTS:
467 			if (cipher)
468 				return (EINVAL);
469 			cipher = c;
470 			switch (c->cri_alg) {
471 			case CRYPTO_AES_CBC:
472 				cipher_mode = CCP_AES_MODE_CBC;
473 				iv_len = AES_BLOCK_LEN;
474 				break;
475 			case CRYPTO_AES_ICM:
476 				cipher_mode = CCP_AES_MODE_CTR;
477 				iv_len = AES_BLOCK_LEN;
478 				break;
479 			case CRYPTO_AES_NIST_GCM_16:
480 				cipher_mode = CCP_AES_MODE_GCTR;
481 				iv_len = AES_GCM_IV_LEN;
482 				break;
483 			case CRYPTO_AES_XTS:
484 				cipher_mode = CCP_AES_MODE_XTS;
485 				iv_len = AES_BLOCK_LEN;
486 				break;
487 			}
488 			if (c->cri_key != NULL) {
489 				error = ccp_aes_check_keylen(c->cri_alg,
490 				    c->cri_klen);
491 				if (error != 0)
492 					return (error);
493 			}
494 			break;
495 		default:
496 			return (EINVAL);
497 		}
498 	}
499 	if (gcm_hash != (cipher_mode == CCP_AES_MODE_GCTR))
500 		return (EINVAL);
501 	if (hash == NULL && cipher == NULL)
502 		return (EINVAL);
503 	if (hash != NULL && hash->cri_key == NULL)
504 		return (EINVAL);
505 
506 	sc = device_get_softc(dev);
507 	mtx_lock(&sc->lock);
508 	if (sc->detaching) {
509 		mtx_unlock(&sc->lock);
510 		return (ENXIO);
511 	}
512 	sess = -1;
513 	for (i = 0; i < sc->nsessions; i++) {
514 		if (!sc->sessions[i].active && sc->sessions[i].pending == 0) {
515 			sess = i;
516 			break;
517 		}
518 	}
519 	if (sess == -1) {
520 		s = malloc(sizeof(*s) * (sc->nsessions + 1), M_CCP,
521 		    M_NOWAIT | M_ZERO);
522 		if (s == NULL) {
523 			mtx_unlock(&sc->lock);
524 			return (ENOMEM);
525 		}
526 		if (sc->sessions != NULL)
527 			memcpy(s, sc->sessions, sizeof(*s) * sc->nsessions);
528 		sess = sc->nsessions;
529 		free(sc->sessions, M_CCP);
530 		sc->sessions = s;
531 		sc->nsessions++;
532 	}
533 
534 	s = &sc->sessions[sess];
535 
536 	/* Just grab the first usable queue for now. */
537 	for (q = 0; q < nitems(sc->queues); q++)
538 		if ((sc->valid_queues & (1 << q)) != 0)
539 			break;
540 	if (q == nitems(sc->queues)) {
541 		mtx_unlock(&sc->lock);
542 		return (ENXIO);
543 	}
544 	s->queue = q;
545 
546 	if (gcm_hash)
547 		s->mode = GCM;
548 	else if (hash != NULL && cipher != NULL)
549 		s->mode = AUTHENC;
550 	else if (hash != NULL)
551 		s->mode = HMAC;
552 	else {
553 		MPASS(cipher != NULL);
554 		s->mode = BLKCIPHER;
555 	}
556 	if (gcm_hash) {
557 		if (hash->cri_mlen == 0)
558 			s->gmac.hash_len = AES_GMAC_HASH_LEN;
559 		else
560 			s->gmac.hash_len = hash->cri_mlen;
561 	} else if (hash != NULL) {
562 		s->hmac.auth_hash = auth_hash;
563 		s->hmac.auth_mode = auth_mode;
564 		s->hmac.partial_digest_len = partial_digest_len;
565 		if (hash->cri_mlen == 0)
566 			s->hmac.hash_len = auth_hash->hashsize;
567 		else
568 			s->hmac.hash_len = hash->cri_mlen;
569 		ccp_init_hmac_digest(s, hash->cri_alg, hash->cri_key,
570 		    hash->cri_klen);
571 	}
572 	if (cipher != NULL) {
573 		s->blkcipher.cipher_mode = cipher_mode;
574 		s->blkcipher.iv_len = iv_len;
575 		if (cipher->cri_key != NULL)
576 			ccp_aes_setkey(s, cipher->cri_alg, cipher->cri_key,
577 			    cipher->cri_klen);
578 	}
579 
580 	s->active = true;
581 	mtx_unlock(&sc->lock);
582 
583 	*sidp = sess;
584 	return (0);
585 }
586 
587 static int
588 ccp_freesession(device_t dev, uint64_t tid)
589 {
590 	struct ccp_softc *sc;
591 	uint32_t sid;
592 	int error;
593 
594 	sc = device_get_softc(dev);
595 	sid = CRYPTO_SESID2LID(tid);
596 	mtx_lock(&sc->lock);
597 	if (sid >= sc->nsessions || !sc->sessions[sid].active)
598 		error = EINVAL;
599 	else {
600 		if (sc->sessions[sid].pending != 0)
601 			device_printf(dev,
602 			    "session %d freed with %d pending requests\n", sid,
603 			    sc->sessions[sid].pending);
604 		sc->sessions[sid].active = false;
605 		error = 0;
606 	}
607 	mtx_unlock(&sc->lock);
608 	return (error);
609 }
610 
611 static int
612 ccp_process(device_t dev, struct cryptop *crp, int hint)
613 {
614 	struct ccp_softc *sc;
615 	struct ccp_queue *qp;
616 	struct ccp_session *s;
617 	struct cryptodesc *crd, *crda, *crde;
618 	uint32_t sid;
619 	int error;
620 	bool qpheld;
621 
622 	qpheld = false;
623 	qp = NULL;
624 	if (crp == NULL)
625 		return (EINVAL);
626 
627 	crd = crp->crp_desc;
628 	sid = CRYPTO_SESID2LID(crp->crp_sid);
629 	sc = device_get_softc(dev);
630 	mtx_lock(&sc->lock);
631 	if (sid >= sc->nsessions || !sc->sessions[sid].active) {
632 		mtx_unlock(&sc->lock);
633 		error = EINVAL;
634 		goto out;
635 	}
636 
637 	s = &sc->sessions[sid];
638 	qp = &sc->queues[s->queue];
639 	mtx_unlock(&sc->lock);
640 	error = ccp_queue_acquire_reserve(qp, 1 /* placeholder */, M_NOWAIT);
641 	if (error != 0)
642 		goto out;
643 	qpheld = true;
644 
645 	error = ccp_populate_sglist(qp->cq_sg_crp, crp);
646 	if (error != 0)
647 		goto out;
648 
649 	switch (s->mode) {
650 	case HMAC:
651 		if (crd->crd_flags & CRD_F_KEY_EXPLICIT)
652 			ccp_init_hmac_digest(s, crd->crd_alg, crd->crd_key,
653 			    crd->crd_klen);
654 		error = ccp_hmac(qp, s, crp);
655 		break;
656 	case BLKCIPHER:
657 		if (crd->crd_flags & CRD_F_KEY_EXPLICIT) {
658 			error = ccp_aes_check_keylen(crd->crd_alg,
659 			    crd->crd_klen);
660 			if (error != 0)
661 				break;
662 			ccp_aes_setkey(s, crd->crd_alg, crd->crd_key,
663 			    crd->crd_klen);
664 		}
665 		error = ccp_blkcipher(qp, s, crp);
666 		break;
667 	case AUTHENC:
668 		error = 0;
669 		switch (crd->crd_alg) {
670 		case CRYPTO_AES_CBC:
671 		case CRYPTO_AES_ICM:
672 		case CRYPTO_AES_XTS:
673 			/* Only encrypt-then-authenticate supported. */
674 			crde = crd;
675 			crda = crd->crd_next;
676 			if (!(crde->crd_flags & CRD_F_ENCRYPT)) {
677 				error = EINVAL;
678 				break;
679 			}
680 			s->cipher_first = true;
681 			break;
682 		default:
683 			crda = crd;
684 			crde = crd->crd_next;
685 			if (crde->crd_flags & CRD_F_ENCRYPT) {
686 				error = EINVAL;
687 				break;
688 			}
689 			s->cipher_first = false;
690 			break;
691 		}
692 		if (error != 0)
693 			break;
694 		if (crda->crd_flags & CRD_F_KEY_EXPLICIT)
695 			ccp_init_hmac_digest(s, crda->crd_alg, crda->crd_key,
696 			    crda->crd_klen);
697 		if (crde->crd_flags & CRD_F_KEY_EXPLICIT) {
698 			error = ccp_aes_check_keylen(crde->crd_alg,
699 			    crde->crd_klen);
700 			if (error != 0)
701 				break;
702 			ccp_aes_setkey(s, crde->crd_alg, crde->crd_key,
703 			    crde->crd_klen);
704 		}
705 		error = ccp_authenc(qp, s, crp, crda, crde);
706 		break;
707 	case GCM:
708 		error = 0;
709 		if (crd->crd_alg == CRYPTO_AES_NIST_GCM_16) {
710 			crde = crd;
711 			crda = crd->crd_next;
712 			s->cipher_first = true;
713 		} else {
714 			crda = crd;
715 			crde = crd->crd_next;
716 			s->cipher_first = false;
717 		}
718 		if (crde->crd_flags & CRD_F_KEY_EXPLICIT) {
719 			error = ccp_aes_check_keylen(crde->crd_alg,
720 			    crde->crd_klen);
721 			if (error != 0)
722 				break;
723 			ccp_aes_setkey(s, crde->crd_alg, crde->crd_key,
724 			    crde->crd_klen);
725 		}
726 		if (crde->crd_len == 0) {
727 			mtx_unlock(&qp->cq_lock);
728 			ccp_gcm_soft(s, crp, crda, crde);
729 			return (0);
730 		}
731 		error = ccp_gcm(qp, s, crp, crda, crde);
732 		break;
733 	}
734 
735 	if (error == 0)
736 		s->pending++;
737 
738 out:
739 	if (qpheld) {
740 		if (error != 0) {
741 			/*
742 			 * Squash EAGAIN so callers don't uselessly and
743 			 * expensively retry if the ring was full.
744 			 */
745 			if (error == EAGAIN)
746 				error = ENOMEM;
747 			ccp_queue_abort(qp);
748 		} else
749 			ccp_queue_release(qp);
750 	}
751 
752 	if (error != 0) {
753 		DPRINTF(dev, "%s: early error:%d\n", __func__, error);
754 		crp->crp_etype = error;
755 		crypto_done(crp);
756 	}
757 	return (0);
758 }
759 
760 static device_method_t ccp_methods[] = {
761 	DEVMETHOD(device_probe,		ccp_probe),
762 	DEVMETHOD(device_attach,	ccp_attach),
763 	DEVMETHOD(device_detach,	ccp_detach),
764 
765 	DEVMETHOD(cryptodev_newsession,	ccp_newsession),
766 	DEVMETHOD(cryptodev_freesession, ccp_freesession),
767 	DEVMETHOD(cryptodev_process,	ccp_process),
768 
769 	DEVMETHOD_END
770 };
771 
772 static driver_t ccp_driver = {
773 	"ccp",
774 	ccp_methods,
775 	sizeof(struct ccp_softc)
776 };
777 
778 static devclass_t ccp_devclass;
779 DRIVER_MODULE(ccp, pci, ccp_driver, ccp_devclass, NULL, NULL);
780 MODULE_VERSION(ccp, 1);
781 MODULE_DEPEND(ccp, crypto, 1, 1, 1);
782 MODULE_DEPEND(ccp, random_device, 1, 1, 1);
783 
784 static int
785 ccp_queue_reserve_space(struct ccp_queue *qp, unsigned n, int mflags)
786 {
787 	struct ccp_softc *sc;
788 
789 	mtx_assert(&qp->cq_lock, MA_OWNED);
790 	sc = qp->cq_softc;
791 
792 	if (n < 1 || n >= (1 << sc->ring_size_order))
793 		return (EINVAL);
794 
795 	while (true) {
796 		if (ccp_queue_get_ring_space(qp) >= n)
797 			return (0);
798 		if ((mflags & M_WAITOK) == 0)
799 			return (EAGAIN);
800 		qp->cq_waiting = true;
801 		msleep(&qp->cq_tail, &qp->cq_lock, 0, "ccpqfull", 0);
802 	}
803 }
804 
805 int
806 ccp_queue_acquire_reserve(struct ccp_queue *qp, unsigned n, int mflags)
807 {
808 	int error;
809 
810 	mtx_lock(&qp->cq_lock);
811 	qp->cq_acq_tail = qp->cq_tail;
812 	error = ccp_queue_reserve_space(qp, n, mflags);
813 	if (error != 0)
814 		mtx_unlock(&qp->cq_lock);
815 	return (error);
816 }
817 
818 void
819 ccp_queue_release(struct ccp_queue *qp)
820 {
821 
822 	mtx_assert(&qp->cq_lock, MA_OWNED);
823 	if (qp->cq_tail != qp->cq_acq_tail) {
824 		wmb();
825 		ccp_queue_write_tail(qp);
826 	}
827 	mtx_unlock(&qp->cq_lock);
828 }
829 
830 void
831 ccp_queue_abort(struct ccp_queue *qp)
832 {
833 	unsigned i;
834 
835 	mtx_assert(&qp->cq_lock, MA_OWNED);
836 
837 	/* Wipe out any descriptors associated with this aborted txn. */
838 	for (i = qp->cq_acq_tail; i != qp->cq_tail;
839 	    i = (i + 1) % (1 << qp->cq_softc->ring_size_order)) {
840 		memset(&qp->desc_ring[i], 0, sizeof(qp->desc_ring[i]));
841 	}
842 	qp->cq_tail = qp->cq_acq_tail;
843 
844 	mtx_unlock(&qp->cq_lock);
845 }
846 
847 #ifdef DDB
848 #define	_db_show_lock(lo)	LOCK_CLASS(lo)->lc_ddb_show(lo)
849 #define	db_show_lock(lk)	_db_show_lock(&(lk)->lock_object)
850 static void
851 db_show_ccp_sc(struct ccp_softc *sc)
852 {
853 
854 	db_printf("ccp softc at %p\n", sc);
855 	db_printf(" cid: %d\n", (int)sc->cid);
856 	db_printf(" nsessions: %d\n", sc->nsessions);
857 
858 	db_printf(" lock: ");
859 	db_show_lock(&sc->lock);
860 
861 	db_printf(" detaching: %d\n", (int)sc->detaching);
862 	db_printf(" ring_size_order: %u\n", sc->ring_size_order);
863 
864 	db_printf(" hw_version: %d\n", (int)sc->hw_version);
865 	db_printf(" hw_features: %b\n", (int)sc->hw_features,
866 	    "\20\24ELFC\23TRNG\22Zip_Compress\16Zip_Decompress\13ECC\12RSA"
867 	    "\11SHA\0103DES\07AES");
868 
869 	db_printf(" hw status:\n");
870 	db_ccp_show_hw(sc);
871 }
872 
873 static void
874 db_show_ccp_qp(struct ccp_queue *qp)
875 {
876 
877 	db_printf(" lock: ");
878 	db_show_lock(&qp->cq_lock);
879 
880 	db_printf(" cq_qindex: %u\n", qp->cq_qindex);
881 	db_printf(" cq_softc: %p\n", qp->cq_softc);
882 
883 	db_printf(" head: %u\n", qp->cq_head);
884 	db_printf(" tail: %u\n", qp->cq_tail);
885 	db_printf(" acq_tail: %u\n", qp->cq_acq_tail);
886 	db_printf(" desc_ring: %p\n", qp->desc_ring);
887 	db_printf(" completions_ring: %p\n", qp->completions_ring);
888 	db_printf(" descriptors (phys): 0x%jx\n",
889 	    (uintmax_t)qp->desc_ring_bus_addr);
890 
891 	db_printf(" hw status:\n");
892 	db_ccp_show_queue_hw(qp);
893 }
894 
895 DB_SHOW_COMMAND(ccp, db_show_ccp)
896 {
897 	struct ccp_softc *sc;
898 	unsigned unit, qindex;
899 
900 	if (!have_addr)
901 		goto usage;
902 
903 	unit = (unsigned)addr;
904 
905 	sc = devclass_get_softc(ccp_devclass, unit);
906 	if (sc == NULL) {
907 		db_printf("No such device ccp%u\n", unit);
908 		goto usage;
909 	}
910 
911 	if (count == -1) {
912 		db_show_ccp_sc(sc);
913 		return;
914 	}
915 
916 	qindex = (unsigned)count;
917 	if (qindex >= nitems(sc->queues)) {
918 		db_printf("No such queue %u\n", qindex);
919 		goto usage;
920 	}
921 	db_show_ccp_qp(&sc->queues[qindex]);
922 	return;
923 
924 usage:
925 	db_printf("usage: show ccp <unit>[,<qindex>]\n");
926 	return;
927 }
928 #endif /* DDB */
929