xref: /freebsd/sys/crypto/ccp/ccp.c (revision 56e53cb8ef000c3ef72337a4095987a932cdedef)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2017 Chelsio Communications, Inc.
5  * Copyright (c) 2017 Conrad Meyer <cem@FreeBSD.org>
6  * All rights reserved.
7  * Largely borrowed from ccr(4), Written by: John Baldwin <jhb@FreeBSD.org>
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  */
30 
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
33 
34 #include "opt_ddb.h"
35 
36 #include <sys/types.h>
37 #include <sys/bus.h>
38 #include <sys/lock.h>
39 #include <sys/kernel.h>
40 #include <sys/malloc.h>
41 #include <sys/mutex.h>
42 #include <sys/module.h>
43 #include <sys/random.h>
44 #include <sys/sglist.h>
45 #include <sys/sysctl.h>
46 
47 #ifdef DDB
48 #include <ddb/ddb.h>
49 #endif
50 
51 #include <dev/pci/pcivar.h>
52 
53 #include <dev/random/randomdev.h>
54 
55 #include <opencrypto/cryptodev.h>
56 #include <opencrypto/xform.h>
57 
58 #include "cryptodev_if.h"
59 
60 #include "ccp.h"
61 #include "ccp_hardware.h"
62 
63 MALLOC_DEFINE(M_CCP, "ccp", "AMD CCP crypto");
64 
65 /*
66  * Need a global softc available for garbage random_source API, which lacks any
67  * context pointer.  It's also handy for debugging.
68  */
69 struct ccp_softc *g_ccp_softc;
70 
71 bool g_debug_print = false;
72 SYSCTL_BOOL(_hw_ccp, OID_AUTO, debug, CTLFLAG_RWTUN, &g_debug_print, 0,
73     "Set to enable debugging log messages");
74 
75 static struct pciid {
76 	uint32_t devid;
77 	const char *desc;
78 } ccp_ids[] = {
79 	{ 0x14561022, "AMD CCP-5a" },
80 	{ 0x14681022, "AMD CCP-5b" },
81 };
82 MODULE_PNP_INFO("W32:vendor/device", pci, ccp, ccp_ids, sizeof(ccp_ids[0]),
83     nitems(ccp_ids));
84 
85 static struct random_source random_ccp = {
86 	.rs_ident = "AMD CCP TRNG",
87 	.rs_source = RANDOM_PURE_CCP,
88 	.rs_read = random_ccp_read,
89 };
90 
91 /*
92  * ccp_populate_sglist() generates a scatter/gather list that covers the entire
93  * crypto operation buffer.
94  */
95 static int
96 ccp_populate_sglist(struct sglist *sg, struct cryptop *crp)
97 {
98 	int error;
99 
100 	sglist_reset(sg);
101 	if (crp->crp_flags & CRYPTO_F_IMBUF)
102 		error = sglist_append_mbuf(sg, crp->crp_mbuf);
103 	else if (crp->crp_flags & CRYPTO_F_IOV)
104 		error = sglist_append_uio(sg, crp->crp_uio);
105 	else
106 		error = sglist_append(sg, crp->crp_buf, crp->crp_ilen);
107 	return (error);
108 }
109 
110 /*
111  * Handle a GCM request with an empty payload by performing the
112  * operation in software.  Derived from swcr_authenc().
113  */
114 static void
115 ccp_gcm_soft(struct ccp_session *s, struct cryptop *crp,
116     struct cryptodesc *crda, struct cryptodesc *crde)
117 {
118 	struct aes_gmac_ctx gmac_ctx;
119 	char block[GMAC_BLOCK_LEN];
120 	char digest[GMAC_DIGEST_LEN];
121 	char iv[AES_BLOCK_LEN];
122 	int i, len;
123 
124 	/*
125 	 * This assumes a 12-byte IV from the crp.  See longer comment
126 	 * above in ccp_gcm() for more details.
127 	 */
128 	if (crde->crd_flags & CRD_F_ENCRYPT) {
129 		if (crde->crd_flags & CRD_F_IV_EXPLICIT)
130 			memcpy(iv, crde->crd_iv, 12);
131 		else
132 			arc4rand(iv, 12, 0);
133 		if ((crde->crd_flags & CRD_F_IV_PRESENT) == 0)
134 			crypto_copyback(crp->crp_flags, crp->crp_buf,
135 			    crde->crd_inject, 12, iv);
136 	} else {
137 		if (crde->crd_flags & CRD_F_IV_EXPLICIT)
138 			memcpy(iv, crde->crd_iv, 12);
139 		else
140 			crypto_copydata(crp->crp_flags, crp->crp_buf,
141 			    crde->crd_inject, 12, iv);
142 	}
143 	*(uint32_t *)&iv[12] = htobe32(1);
144 
145 	/* Initialize the MAC. */
146 	AES_GMAC_Init(&gmac_ctx);
147 	AES_GMAC_Setkey(&gmac_ctx, s->blkcipher.enckey, s->blkcipher.key_len);
148 	AES_GMAC_Reinit(&gmac_ctx, iv, sizeof(iv));
149 
150 	/* MAC the AAD. */
151 	for (i = 0; i < crda->crd_len; i += sizeof(block)) {
152 		len = imin(crda->crd_len - i, sizeof(block));
153 		crypto_copydata(crp->crp_flags, crp->crp_buf, crda->crd_skip +
154 		    i, len, block);
155 		bzero(block + len, sizeof(block) - len);
156 		AES_GMAC_Update(&gmac_ctx, block, sizeof(block));
157 	}
158 
159 	/* Length block. */
160 	bzero(block, sizeof(block));
161 	((uint32_t *)block)[1] = htobe32(crda->crd_len * 8);
162 	AES_GMAC_Update(&gmac_ctx, block, sizeof(block));
163 	AES_GMAC_Final(digest, &gmac_ctx);
164 
165 	if (crde->crd_flags & CRD_F_ENCRYPT) {
166 		crypto_copyback(crp->crp_flags, crp->crp_buf, crda->crd_inject,
167 		    sizeof(digest), digest);
168 		crp->crp_etype = 0;
169 	} else {
170 		char digest2[GMAC_DIGEST_LEN];
171 
172 		crypto_copydata(crp->crp_flags, crp->crp_buf, crda->crd_inject,
173 		    sizeof(digest2), digest2);
174 		if (timingsafe_bcmp(digest, digest2, sizeof(digest)) == 0)
175 			crp->crp_etype = 0;
176 		else
177 			crp->crp_etype = EBADMSG;
178 	}
179 	crypto_done(crp);
180 }
181 
182 static int
183 ccp_probe(device_t dev)
184 {
185 	struct pciid *ip;
186 	uint32_t id;
187 
188 	id = pci_get_devid(dev);
189 	for (ip = ccp_ids; ip < &ccp_ids[nitems(ccp_ids)]; ip++) {
190 		if (id == ip->devid) {
191 			device_set_desc(dev, ip->desc);
192 			return (0);
193 		}
194 	}
195 	return (ENXIO);
196 }
197 
198 static void
199 ccp_initialize_queues(struct ccp_softc *sc)
200 {
201 	struct ccp_queue *qp;
202 	size_t i;
203 
204 	for (i = 0; i < nitems(sc->queues); i++) {
205 		qp = &sc->queues[i];
206 
207 		qp->cq_softc = sc;
208 		qp->cq_qindex = i;
209 		mtx_init(&qp->cq_lock, "ccp queue", NULL, MTX_DEF);
210 		/* XXX - arbitrarily chosen sizes */
211 		qp->cq_sg_crp = sglist_alloc(32, M_WAITOK);
212 		/* Two more SGEs than sg_crp to accommodate ipad. */
213 		qp->cq_sg_ulptx = sglist_alloc(34, M_WAITOK);
214 		qp->cq_sg_dst = sglist_alloc(2, M_WAITOK);
215 	}
216 }
217 
218 static void
219 ccp_free_queues(struct ccp_softc *sc)
220 {
221 	struct ccp_queue *qp;
222 	size_t i;
223 
224 	for (i = 0; i < nitems(sc->queues); i++) {
225 		qp = &sc->queues[i];
226 
227 		mtx_destroy(&qp->cq_lock);
228 		sglist_free(qp->cq_sg_crp);
229 		sglist_free(qp->cq_sg_ulptx);
230 		sglist_free(qp->cq_sg_dst);
231 	}
232 }
233 
234 static int
235 ccp_attach(device_t dev)
236 {
237 	struct ccp_softc *sc;
238 	int error;
239 
240 	sc = device_get_softc(dev);
241 	sc->dev = dev;
242 
243 	sc->cid = crypto_get_driverid(dev, CRYPTOCAP_F_HARDWARE);
244 	if (sc->cid < 0) {
245 		device_printf(dev, "could not get crypto driver id\n");
246 		return (ENXIO);
247 	}
248 
249 	error = ccp_hw_attach(dev);
250 	if (error != 0)
251 		return (error);
252 
253 	mtx_init(&sc->lock, "ccp", NULL, MTX_DEF);
254 
255 	ccp_initialize_queues(sc);
256 
257 	if (g_ccp_softc == NULL) {
258 		g_ccp_softc = sc;
259 		if ((sc->hw_features & VERSION_CAP_TRNG) != 0)
260 			random_source_register(&random_ccp);
261 	}
262 
263 	if ((sc->hw_features & VERSION_CAP_AES) != 0) {
264 		crypto_register(sc->cid, CRYPTO_AES_CBC, 0, 0);
265 		crypto_register(sc->cid, CRYPTO_AES_ICM, 0, 0);
266 		crypto_register(sc->cid, CRYPTO_AES_NIST_GCM_16, 0, 0);
267 		crypto_register(sc->cid, CRYPTO_AES_128_NIST_GMAC, 0, 0);
268 		crypto_register(sc->cid, CRYPTO_AES_192_NIST_GMAC, 0, 0);
269 		crypto_register(sc->cid, CRYPTO_AES_256_NIST_GMAC, 0, 0);
270 		crypto_register(sc->cid, CRYPTO_AES_XTS, 0, 0);
271 	}
272 	if ((sc->hw_features & VERSION_CAP_SHA) != 0) {
273 		crypto_register(sc->cid, CRYPTO_SHA1_HMAC, 0, 0);
274 		crypto_register(sc->cid, CRYPTO_SHA2_256_HMAC, 0, 0);
275 		crypto_register(sc->cid, CRYPTO_SHA2_384_HMAC, 0, 0);
276 		crypto_register(sc->cid, CRYPTO_SHA2_512_HMAC, 0, 0);
277 	}
278 
279 	return (0);
280 }
281 
282 static int
283 ccp_detach(device_t dev)
284 {
285 	struct ccp_softc *sc;
286 	int i;
287 
288 	sc = device_get_softc(dev);
289 
290 	mtx_lock(&sc->lock);
291 	for (i = 0; i < sc->nsessions; i++) {
292 		if (sc->sessions[i].active || sc->sessions[i].pending != 0) {
293 			mtx_unlock(&sc->lock);
294 			return (EBUSY);
295 		}
296 	}
297 	sc->detaching = true;
298 	mtx_unlock(&sc->lock);
299 
300 	crypto_unregister_all(sc->cid);
301 	if (g_ccp_softc == sc && (sc->hw_features & VERSION_CAP_TRNG) != 0)
302 		random_source_deregister(&random_ccp);
303 
304 	ccp_hw_detach(dev);
305 	ccp_free_queues(sc);
306 
307 	if (g_ccp_softc == sc)
308 		g_ccp_softc = NULL;
309 
310 	free(sc->sessions, M_CCP);
311 	mtx_destroy(&sc->lock);
312 	return (0);
313 }
314 
315 static void
316 ccp_init_hmac_digest(struct ccp_session *s, int cri_alg, char *key,
317     int klen)
318 {
319 	union authctx auth_ctx;
320 	struct auth_hash *axf;
321 	u_int i;
322 
323 	/*
324 	 * If the key is larger than the block size, use the digest of
325 	 * the key as the key instead.
326 	 */
327 	axf = s->hmac.auth_hash;
328 	klen /= 8;
329 	if (klen > axf->blocksize) {
330 		axf->Init(&auth_ctx);
331 		axf->Update(&auth_ctx, key, klen);
332 		axf->Final(s->hmac.ipad, &auth_ctx);
333 		explicit_bzero(&auth_ctx, sizeof(auth_ctx));
334 		klen = axf->hashsize;
335 	} else
336 		memcpy(s->hmac.ipad, key, klen);
337 
338 	memset(s->hmac.ipad + klen, 0, axf->blocksize - klen);
339 	memcpy(s->hmac.opad, s->hmac.ipad, axf->blocksize);
340 
341 	for (i = 0; i < axf->blocksize; i++) {
342 		s->hmac.ipad[i] ^= HMAC_IPAD_VAL;
343 		s->hmac.opad[i] ^= HMAC_OPAD_VAL;
344 	}
345 }
346 
347 static int
348 ccp_aes_check_keylen(int alg, int klen)
349 {
350 
351 	switch (klen) {
352 	case 128:
353 	case 192:
354 		if (alg == CRYPTO_AES_XTS)
355 			return (EINVAL);
356 		break;
357 	case 256:
358 		break;
359 	case 512:
360 		if (alg != CRYPTO_AES_XTS)
361 			return (EINVAL);
362 		break;
363 	default:
364 		return (EINVAL);
365 	}
366 	return (0);
367 }
368 
369 static void
370 ccp_aes_setkey(struct ccp_session *s, int alg, const void *key, int klen)
371 {
372 	unsigned kbits;
373 
374 	if (alg == CRYPTO_AES_XTS)
375 		kbits = klen / 2;
376 	else
377 		kbits = klen;
378 
379 	switch (kbits) {
380 	case 128:
381 		s->blkcipher.cipher_type = CCP_AES_TYPE_128;
382 		break;
383 	case 192:
384 		s->blkcipher.cipher_type = CCP_AES_TYPE_192;
385 		break;
386 	case 256:
387 		s->blkcipher.cipher_type = CCP_AES_TYPE_256;
388 		break;
389 	default:
390 		panic("should not get here");
391 	}
392 
393 	s->blkcipher.key_len = klen / 8;
394 	memcpy(s->blkcipher.enckey, key, s->blkcipher.key_len);
395 }
396 
397 static int
398 ccp_newsession(device_t dev, uint32_t *sidp, struct cryptoini *cri)
399 {
400 	struct ccp_softc *sc;
401 	struct ccp_session *s;
402 	struct auth_hash *auth_hash;
403 	struct cryptoini *c, *hash, *cipher;
404 	enum ccp_aes_mode cipher_mode;
405 	unsigned auth_mode, iv_len;
406 	unsigned partial_digest_len;
407 	unsigned q;
408 	int error, i, sess;
409 	bool gcm_hash;
410 
411 	if (sidp == NULL || cri == NULL)
412 		return (EINVAL);
413 
414 	gcm_hash = false;
415 	cipher = NULL;
416 	hash = NULL;
417 	auth_hash = NULL;
418 	/* XXX reconcile auth_mode with use by ccp_sha */
419 	auth_mode = 0;
420 	cipher_mode = CCP_AES_MODE_ECB;
421 	iv_len = 0;
422 	partial_digest_len = 0;
423 	for (c = cri; c != NULL; c = c->cri_next) {
424 		switch (c->cri_alg) {
425 		case CRYPTO_SHA1_HMAC:
426 		case CRYPTO_SHA2_256_HMAC:
427 		case CRYPTO_SHA2_384_HMAC:
428 		case CRYPTO_SHA2_512_HMAC:
429 		case CRYPTO_AES_128_NIST_GMAC:
430 		case CRYPTO_AES_192_NIST_GMAC:
431 		case CRYPTO_AES_256_NIST_GMAC:
432 			if (hash)
433 				return (EINVAL);
434 			hash = c;
435 			switch (c->cri_alg) {
436 			case CRYPTO_SHA1_HMAC:
437 				auth_hash = &auth_hash_hmac_sha1;
438 				auth_mode = SHA1;
439 				partial_digest_len = SHA1_HASH_LEN;
440 				break;
441 			case CRYPTO_SHA2_256_HMAC:
442 				auth_hash = &auth_hash_hmac_sha2_256;
443 				auth_mode = SHA2_256;
444 				partial_digest_len = SHA2_256_HASH_LEN;
445 				break;
446 			case CRYPTO_SHA2_384_HMAC:
447 				auth_hash = &auth_hash_hmac_sha2_384;
448 				auth_mode = SHA2_384;
449 				partial_digest_len = SHA2_512_HASH_LEN;
450 				break;
451 			case CRYPTO_SHA2_512_HMAC:
452 				auth_hash = &auth_hash_hmac_sha2_512;
453 				auth_mode = SHA2_512;
454 				partial_digest_len = SHA2_512_HASH_LEN;
455 				break;
456 			case CRYPTO_AES_128_NIST_GMAC:
457 			case CRYPTO_AES_192_NIST_GMAC:
458 			case CRYPTO_AES_256_NIST_GMAC:
459 				gcm_hash = true;
460 #if 0
461 				auth_mode = CHCR_SCMD_AUTH_MODE_GHASH;
462 #endif
463 				break;
464 			}
465 			break;
466 		case CRYPTO_AES_CBC:
467 		case CRYPTO_AES_ICM:
468 		case CRYPTO_AES_NIST_GCM_16:
469 		case CRYPTO_AES_XTS:
470 			if (cipher)
471 				return (EINVAL);
472 			cipher = c;
473 			switch (c->cri_alg) {
474 			case CRYPTO_AES_CBC:
475 				cipher_mode = CCP_AES_MODE_CBC;
476 				iv_len = AES_BLOCK_LEN;
477 				break;
478 			case CRYPTO_AES_ICM:
479 				cipher_mode = CCP_AES_MODE_CTR;
480 				iv_len = AES_BLOCK_LEN;
481 				break;
482 			case CRYPTO_AES_NIST_GCM_16:
483 				cipher_mode = CCP_AES_MODE_GCTR;
484 				iv_len = AES_GCM_IV_LEN;
485 				break;
486 			case CRYPTO_AES_XTS:
487 				cipher_mode = CCP_AES_MODE_XTS;
488 				iv_len = AES_BLOCK_LEN;
489 				break;
490 			}
491 			if (c->cri_key != NULL) {
492 				error = ccp_aes_check_keylen(c->cri_alg,
493 				    c->cri_klen);
494 				if (error != 0)
495 					return (error);
496 			}
497 			break;
498 		default:
499 			return (EINVAL);
500 		}
501 	}
502 	if (gcm_hash != (cipher_mode == CCP_AES_MODE_GCTR))
503 		return (EINVAL);
504 	if (hash == NULL && cipher == NULL)
505 		return (EINVAL);
506 	if (hash != NULL && hash->cri_key == NULL)
507 		return (EINVAL);
508 
509 	sc = device_get_softc(dev);
510 	mtx_lock(&sc->lock);
511 	if (sc->detaching) {
512 		mtx_unlock(&sc->lock);
513 		return (ENXIO);
514 	}
515 	sess = -1;
516 	for (i = 0; i < sc->nsessions; i++) {
517 		if (!sc->sessions[i].active && sc->sessions[i].pending == 0) {
518 			sess = i;
519 			break;
520 		}
521 	}
522 	if (sess == -1) {
523 		s = malloc(sizeof(*s) * (sc->nsessions + 1), M_CCP,
524 		    M_NOWAIT | M_ZERO);
525 		if (s == NULL) {
526 			mtx_unlock(&sc->lock);
527 			return (ENOMEM);
528 		}
529 		if (sc->sessions != NULL)
530 			memcpy(s, sc->sessions, sizeof(*s) * sc->nsessions);
531 		sess = sc->nsessions;
532 		free(sc->sessions, M_CCP);
533 		sc->sessions = s;
534 		sc->nsessions++;
535 	}
536 
537 	s = &sc->sessions[sess];
538 
539 	/* Just grab the first usable queue for now. */
540 	for (q = 0; q < nitems(sc->queues); q++)
541 		if ((sc->valid_queues & (1 << q)) != 0)
542 			break;
543 	if (q == nitems(sc->queues)) {
544 		mtx_unlock(&sc->lock);
545 		return (ENXIO);
546 	}
547 	s->queue = q;
548 
549 	if (gcm_hash)
550 		s->mode = GCM;
551 	else if (hash != NULL && cipher != NULL)
552 		s->mode = AUTHENC;
553 	else if (hash != NULL)
554 		s->mode = HMAC;
555 	else {
556 		MPASS(cipher != NULL);
557 		s->mode = BLKCIPHER;
558 	}
559 	if (gcm_hash) {
560 		if (hash->cri_mlen == 0)
561 			s->gmac.hash_len = AES_GMAC_HASH_LEN;
562 		else
563 			s->gmac.hash_len = hash->cri_mlen;
564 	} else if (hash != NULL) {
565 		s->hmac.auth_hash = auth_hash;
566 		s->hmac.auth_mode = auth_mode;
567 		s->hmac.partial_digest_len = partial_digest_len;
568 		if (hash->cri_mlen == 0)
569 			s->hmac.hash_len = auth_hash->hashsize;
570 		else
571 			s->hmac.hash_len = hash->cri_mlen;
572 		ccp_init_hmac_digest(s, hash->cri_alg, hash->cri_key,
573 		    hash->cri_klen);
574 	}
575 	if (cipher != NULL) {
576 		s->blkcipher.cipher_mode = cipher_mode;
577 		s->blkcipher.iv_len = iv_len;
578 		if (cipher->cri_key != NULL)
579 			ccp_aes_setkey(s, cipher->cri_alg, cipher->cri_key,
580 			    cipher->cri_klen);
581 	}
582 
583 	s->active = true;
584 	mtx_unlock(&sc->lock);
585 
586 	*sidp = sess;
587 	return (0);
588 }
589 
590 static int
591 ccp_freesession(device_t dev, uint64_t tid)
592 {
593 	struct ccp_softc *sc;
594 	uint32_t sid;
595 	int error;
596 
597 	sc = device_get_softc(dev);
598 	sid = CRYPTO_SESID2LID(tid);
599 	mtx_lock(&sc->lock);
600 	if (sid >= sc->nsessions || !sc->sessions[sid].active)
601 		error = EINVAL;
602 	else {
603 		if (sc->sessions[sid].pending != 0)
604 			device_printf(dev,
605 			    "session %d freed with %d pending requests\n", sid,
606 			    sc->sessions[sid].pending);
607 		sc->sessions[sid].active = false;
608 		error = 0;
609 	}
610 	mtx_unlock(&sc->lock);
611 	return (error);
612 }
613 
614 static int
615 ccp_process(device_t dev, struct cryptop *crp, int hint)
616 {
617 	struct ccp_softc *sc;
618 	struct ccp_queue *qp;
619 	struct ccp_session *s;
620 	struct cryptodesc *crd, *crda, *crde;
621 	uint32_t sid;
622 	int error;
623 	bool qpheld;
624 
625 	qpheld = false;
626 	qp = NULL;
627 	if (crp == NULL)
628 		return (EINVAL);
629 
630 	crd = crp->crp_desc;
631 	sid = CRYPTO_SESID2LID(crp->crp_sid);
632 	sc = device_get_softc(dev);
633 	mtx_lock(&sc->lock);
634 	if (sid >= sc->nsessions || !sc->sessions[sid].active) {
635 		mtx_unlock(&sc->lock);
636 		error = EINVAL;
637 		goto out;
638 	}
639 
640 	s = &sc->sessions[sid];
641 	qp = &sc->queues[s->queue];
642 	mtx_unlock(&sc->lock);
643 	error = ccp_queue_acquire_reserve(qp, 1 /* placeholder */, M_NOWAIT);
644 	if (error != 0)
645 		goto out;
646 	qpheld = true;
647 
648 	error = ccp_populate_sglist(qp->cq_sg_crp, crp);
649 	if (error != 0)
650 		goto out;
651 
652 	switch (s->mode) {
653 	case HMAC:
654 		if (crd->crd_flags & CRD_F_KEY_EXPLICIT)
655 			ccp_init_hmac_digest(s, crd->crd_alg, crd->crd_key,
656 			    crd->crd_klen);
657 		error = ccp_hmac(qp, s, crp);
658 		break;
659 	case BLKCIPHER:
660 		if (crd->crd_flags & CRD_F_KEY_EXPLICIT) {
661 			error = ccp_aes_check_keylen(crd->crd_alg,
662 			    crd->crd_klen);
663 			if (error != 0)
664 				break;
665 			ccp_aes_setkey(s, crd->crd_alg, crd->crd_key,
666 			    crd->crd_klen);
667 		}
668 		error = ccp_blkcipher(qp, s, crp);
669 		break;
670 	case AUTHENC:
671 		error = 0;
672 		switch (crd->crd_alg) {
673 		case CRYPTO_AES_CBC:
674 		case CRYPTO_AES_ICM:
675 		case CRYPTO_AES_XTS:
676 			/* Only encrypt-then-authenticate supported. */
677 			crde = crd;
678 			crda = crd->crd_next;
679 			if (!(crde->crd_flags & CRD_F_ENCRYPT)) {
680 				error = EINVAL;
681 				break;
682 			}
683 			s->cipher_first = true;
684 			break;
685 		default:
686 			crda = crd;
687 			crde = crd->crd_next;
688 			if (crde->crd_flags & CRD_F_ENCRYPT) {
689 				error = EINVAL;
690 				break;
691 			}
692 			s->cipher_first = false;
693 			break;
694 		}
695 		if (error != 0)
696 			break;
697 		if (crda->crd_flags & CRD_F_KEY_EXPLICIT)
698 			ccp_init_hmac_digest(s, crda->crd_alg, crda->crd_key,
699 			    crda->crd_klen);
700 		if (crde->crd_flags & CRD_F_KEY_EXPLICIT) {
701 			error = ccp_aes_check_keylen(crde->crd_alg,
702 			    crde->crd_klen);
703 			if (error != 0)
704 				break;
705 			ccp_aes_setkey(s, crde->crd_alg, crde->crd_key,
706 			    crde->crd_klen);
707 		}
708 		error = ccp_authenc(qp, s, crp, crda, crde);
709 		break;
710 	case GCM:
711 		error = 0;
712 		if (crd->crd_alg == CRYPTO_AES_NIST_GCM_16) {
713 			crde = crd;
714 			crda = crd->crd_next;
715 			s->cipher_first = true;
716 		} else {
717 			crda = crd;
718 			crde = crd->crd_next;
719 			s->cipher_first = false;
720 		}
721 		if (crde->crd_flags & CRD_F_KEY_EXPLICIT) {
722 			error = ccp_aes_check_keylen(crde->crd_alg,
723 			    crde->crd_klen);
724 			if (error != 0)
725 				break;
726 			ccp_aes_setkey(s, crde->crd_alg, crde->crd_key,
727 			    crde->crd_klen);
728 		}
729 		if (crde->crd_len == 0) {
730 			mtx_unlock(&qp->cq_lock);
731 			ccp_gcm_soft(s, crp, crda, crde);
732 			return (0);
733 		}
734 		error = ccp_gcm(qp, s, crp, crda, crde);
735 		break;
736 	}
737 
738 	if (error == 0)
739 		s->pending++;
740 
741 out:
742 	if (qpheld) {
743 		if (error != 0) {
744 			/*
745 			 * Squash EAGAIN so callers don't uselessly and
746 			 * expensively retry if the ring was full.
747 			 */
748 			if (error == EAGAIN)
749 				error = ENOMEM;
750 			ccp_queue_abort(qp);
751 		} else
752 			ccp_queue_release(qp);
753 	}
754 
755 	if (error != 0) {
756 		DPRINTF(dev, "%s: early error:%d\n", __func__, error);
757 		crp->crp_etype = error;
758 		crypto_done(crp);
759 	}
760 	return (0);
761 }
762 
763 static device_method_t ccp_methods[] = {
764 	DEVMETHOD(device_probe,		ccp_probe),
765 	DEVMETHOD(device_attach,	ccp_attach),
766 	DEVMETHOD(device_detach,	ccp_detach),
767 
768 	DEVMETHOD(cryptodev_newsession,	ccp_newsession),
769 	DEVMETHOD(cryptodev_freesession, ccp_freesession),
770 	DEVMETHOD(cryptodev_process,	ccp_process),
771 
772 	DEVMETHOD_END
773 };
774 
775 static driver_t ccp_driver = {
776 	"ccp",
777 	ccp_methods,
778 	sizeof(struct ccp_softc)
779 };
780 
781 static devclass_t ccp_devclass;
782 DRIVER_MODULE(ccp, pci, ccp_driver, ccp_devclass, NULL, NULL);
783 MODULE_VERSION(ccp, 1);
784 MODULE_DEPEND(ccp, crypto, 1, 1, 1);
785 MODULE_DEPEND(ccp, random_device, 1, 1, 1);
786 
787 static int
788 ccp_queue_reserve_space(struct ccp_queue *qp, unsigned n, int mflags)
789 {
790 	struct ccp_softc *sc;
791 
792 	mtx_assert(&qp->cq_lock, MA_OWNED);
793 	sc = qp->cq_softc;
794 
795 	if (n < 1 || n >= (1 << sc->ring_size_order))
796 		return (EINVAL);
797 
798 	while (true) {
799 		if (ccp_queue_get_ring_space(qp) >= n)
800 			return (0);
801 		if ((mflags & M_WAITOK) == 0)
802 			return (EAGAIN);
803 		qp->cq_waiting = true;
804 		msleep(&qp->cq_tail, &qp->cq_lock, 0, "ccpqfull", 0);
805 	}
806 }
807 
808 int
809 ccp_queue_acquire_reserve(struct ccp_queue *qp, unsigned n, int mflags)
810 {
811 	int error;
812 
813 	mtx_lock(&qp->cq_lock);
814 	qp->cq_acq_tail = qp->cq_tail;
815 	error = ccp_queue_reserve_space(qp, n, mflags);
816 	if (error != 0)
817 		mtx_unlock(&qp->cq_lock);
818 	return (error);
819 }
820 
821 void
822 ccp_queue_release(struct ccp_queue *qp)
823 {
824 
825 	mtx_assert(&qp->cq_lock, MA_OWNED);
826 	if (qp->cq_tail != qp->cq_acq_tail) {
827 		wmb();
828 		ccp_queue_write_tail(qp);
829 	}
830 	mtx_unlock(&qp->cq_lock);
831 }
832 
833 void
834 ccp_queue_abort(struct ccp_queue *qp)
835 {
836 	unsigned i;
837 
838 	mtx_assert(&qp->cq_lock, MA_OWNED);
839 
840 	/* Wipe out any descriptors associated with this aborted txn. */
841 	for (i = qp->cq_acq_tail; i != qp->cq_tail;
842 	    i = (i + 1) % (1 << qp->cq_softc->ring_size_order)) {
843 		memset(&qp->desc_ring[i], 0, sizeof(qp->desc_ring[i]));
844 	}
845 	qp->cq_tail = qp->cq_acq_tail;
846 
847 	mtx_unlock(&qp->cq_lock);
848 }
849 
850 #ifdef DDB
851 #define	_db_show_lock(lo)	LOCK_CLASS(lo)->lc_ddb_show(lo)
852 #define	db_show_lock(lk)	_db_show_lock(&(lk)->lock_object)
853 static void
854 db_show_ccp_sc(struct ccp_softc *sc)
855 {
856 
857 	db_printf("ccp softc at %p\n", sc);
858 	db_printf(" cid: %d\n", (int)sc->cid);
859 	db_printf(" nsessions: %d\n", sc->nsessions);
860 
861 	db_printf(" lock: ");
862 	db_show_lock(&sc->lock);
863 
864 	db_printf(" detaching: %d\n", (int)sc->detaching);
865 	db_printf(" ring_size_order: %u\n", sc->ring_size_order);
866 
867 	db_printf(" hw_version: %d\n", (int)sc->hw_version);
868 	db_printf(" hw_features: %b\n", (int)sc->hw_features,
869 	    "\20\24ELFC\23TRNG\22Zip_Compress\16Zip_Decompress\13ECC\12RSA"
870 	    "\11SHA\0103DES\07AES");
871 
872 	db_printf(" hw status:\n");
873 	db_ccp_show_hw(sc);
874 }
875 
876 static void
877 db_show_ccp_qp(struct ccp_queue *qp)
878 {
879 
880 	db_printf(" lock: ");
881 	db_show_lock(&qp->cq_lock);
882 
883 	db_printf(" cq_qindex: %u\n", qp->cq_qindex);
884 	db_printf(" cq_softc: %p\n", qp->cq_softc);
885 
886 	db_printf(" head: %u\n", qp->cq_head);
887 	db_printf(" tail: %u\n", qp->cq_tail);
888 	db_printf(" acq_tail: %u\n", qp->cq_acq_tail);
889 	db_printf(" desc_ring: %p\n", qp->desc_ring);
890 	db_printf(" completions_ring: %p\n", qp->completions_ring);
891 	db_printf(" descriptors (phys): 0x%jx\n",
892 	    (uintmax_t)qp->desc_ring_bus_addr);
893 
894 	db_printf(" hw status:\n");
895 	db_ccp_show_queue_hw(qp);
896 }
897 
898 DB_SHOW_COMMAND(ccp, db_show_ccp)
899 {
900 	struct ccp_softc *sc;
901 	unsigned unit, qindex;
902 
903 	if (!have_addr)
904 		goto usage;
905 
906 	unit = (unsigned)addr;
907 
908 	sc = devclass_get_softc(ccp_devclass, unit);
909 	if (sc == NULL) {
910 		db_printf("No such device ccp%u\n", unit);
911 		goto usage;
912 	}
913 
914 	if (count == -1) {
915 		db_show_ccp_sc(sc);
916 		return;
917 	}
918 
919 	qindex = (unsigned)count;
920 	if (qindex >= nitems(sc->queues)) {
921 		db_printf("No such queue %u\n", qindex);
922 		goto usage;
923 	}
924 	db_show_ccp_qp(&sc->queues[qindex]);
925 	return;
926 
927 usage:
928 	db_printf("usage: show ccp <unit>[,<qindex>]\n");
929 	return;
930 }
931 #endif /* DDB */
932