xref: /freebsd/sys/dev/cxgbe/crypto/t4_crypto.c (revision 0b57cec536236d46e3dba9bd041533462f33dbb7)
1 /*-
2  * Copyright (c) 2017 Chelsio Communications, Inc.
3  * All rights reserved.
4  * Written by: John Baldwin <jhb@FreeBSD.org>
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  */
27 
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
30 
31 #include <sys/types.h>
32 #include <sys/bus.h>
33 #include <sys/lock.h>
34 #include <sys/malloc.h>
35 #include <sys/mutex.h>
36 #include <sys/module.h>
37 #include <sys/sglist.h>
38 
39 #include <opencrypto/cryptodev.h>
40 #include <opencrypto/xform.h>
41 
42 #include "cryptodev_if.h"
43 
44 #include "common/common.h"
45 #include "crypto/t4_crypto.h"
46 
47 /*
48  * Requests consist of:
49  *
50  * +-------------------------------+
51  * | struct fw_crypto_lookaside_wr |
52  * +-------------------------------+
53  * | struct ulp_txpkt              |
54  * +-------------------------------+
55  * | struct ulptx_idata            |
56  * +-------------------------------+
57  * | struct cpl_tx_sec_pdu         |
58  * +-------------------------------+
59  * | struct cpl_tls_tx_scmd_fmt    |
60  * +-------------------------------+
61  * | key context header            |
62  * +-------------------------------+
63  * | AES key                       |  ----- For requests with AES
64  * +-------------------------------+
65  * | Hash state                    |  ----- For hash-only requests
66  * +-------------------------------+ -
67  * | IPAD (16-byte aligned)        |  \
68  * +-------------------------------+  +---- For requests with HMAC
69  * | OPAD (16-byte aligned)        |  /
70  * +-------------------------------+ -
71  * | GMAC H                        |  ----- For AES-GCM
72  * +-------------------------------+ -
73  * | struct cpl_rx_phys_dsgl       |  \
74  * +-------------------------------+  +---- Destination buffer for
75  * | PHYS_DSGL entries             |  /     non-hash-only requests
76  * +-------------------------------+ -
77  * | 16 dummy bytes                |  ----- Only for HMAC/hash-only requests
78  * +-------------------------------+
79  * | IV                            |  ----- If immediate IV
80  * +-------------------------------+
81  * | Payload                       |  ----- If immediate Payload
82  * +-------------------------------+ -
83  * | struct ulptx_sgl              |  \
84  * +-------------------------------+  +---- If payload via SGL
85  * | SGL entries                   |  /
86  * +-------------------------------+ -
87  *
88  * Note that the key context must be padded to ensure 16-byte alignment.
89  * For HMAC requests, the key consists of the partial hash of the IPAD
90  * followed by the partial hash of the OPAD.
91  *
92  * Replies consist of:
93  *
94  * +-------------------------------+
95  * | struct cpl_fw6_pld            |
96  * +-------------------------------+
97  * | hash digest                   |  ----- For HMAC request with
98  * +-------------------------------+        'hash_size' set in work request
99  *
100  * A 32-bit big-endian error status word is supplied in the last 4
101  * bytes of data[0] in the CPL_FW6_PLD message.  bit 0 indicates a
102  * "MAC" error and bit 1 indicates a "PAD" error.
103  *
104  * The 64-bit 'cookie' field from the fw_crypto_lookaside_wr message
105  * in the request is returned in data[1] of the CPL_FW6_PLD message.
106  *
107  * For block cipher replies, the updated IV is supplied in data[2] and
108  * data[3] of the CPL_FW6_PLD message.
109  *
110  * For hash replies where the work request set 'hash_size' to request
111  * a copy of the hash in the reply, the hash digest is supplied
112  * immediately following the CPL_FW6_PLD message.
113  */
114 
115 /*
116  * The crypto engine supports a maximum AAD size of 511 bytes.
117  */
118 #define	MAX_AAD_LEN		511
119 
120 /*
121  * The documentation for CPL_RX_PHYS_DSGL claims a maximum of 32 SG
122  * entries.  While the CPL includes a 16-bit length field, the T6 can
123  * sometimes hang if an error occurs while processing a request with a
124  * single DSGL entry larger than 2k.
125  */
126 #define	MAX_RX_PHYS_DSGL_SGE	32
127 #define	DSGL_SGE_MAXLEN		2048
128 
129 /*
130  * The adapter only supports requests with a total input or output
131  * length of 64k-1 or smaller.  Longer requests either result in hung
132  * requests or incorrect results.
133  */
134 #define	MAX_REQUEST_SIZE	65535
135 
136 static MALLOC_DEFINE(M_CCR, "ccr", "Chelsio T6 crypto");
137 
138 struct ccr_session_hmac {
139 	struct auth_hash *auth_hash;
140 	int hash_len;
141 	unsigned int partial_digest_len;
142 	unsigned int auth_mode;
143 	unsigned int mk_size;
144 	char pads[CHCR_HASH_MAX_BLOCK_SIZE_128 * 2];
145 };
146 
147 struct ccr_session_gmac {
148 	int hash_len;
149 	char ghash_h[GMAC_BLOCK_LEN];
150 };
151 
152 struct ccr_session_ccm_mac {
153 	int hash_len;
154 };
155 
156 struct ccr_session_blkcipher {
157 	unsigned int cipher_mode;
158 	unsigned int key_len;
159 	unsigned int iv_len;
160 	__be32 key_ctx_hdr;
161 	char enckey[CHCR_AES_MAX_KEY_LEN];
162 	char deckey[CHCR_AES_MAX_KEY_LEN];
163 };
164 
165 struct ccr_session {
166 	bool active;
167 	int pending;
168 	enum { HASH, HMAC, BLKCIPHER, AUTHENC, GCM, CCM } mode;
169 	union {
170 		struct ccr_session_hmac hmac;
171 		struct ccr_session_gmac gmac;
172 		struct ccr_session_ccm_mac ccm_mac;
173 	};
174 	struct ccr_session_blkcipher blkcipher;
175 };
176 
177 struct ccr_softc {
178 	struct adapter *adapter;
179 	device_t dev;
180 	uint32_t cid;
181 	int tx_channel_id;
182 	struct mtx lock;
183 	bool detaching;
184 	struct sge_wrq *txq;
185 	struct sge_rxq *rxq;
186 
187 	/*
188 	 * Pre-allocate S/G lists used when preparing a work request.
189 	 * 'sg_crp' contains an sglist describing the entire buffer
190 	 * for a 'struct cryptop'.  'sg_ulptx' is used to describe
191 	 * the data the engine should DMA as input via ULPTX_SGL.
192 	 * 'sg_dsgl' is used to describe the destination that cipher
193 	 * text and a tag should be written to.
194 	 */
195 	struct sglist *sg_crp;
196 	struct sglist *sg_ulptx;
197 	struct sglist *sg_dsgl;
198 
199 	/*
200 	 * Pre-allocate a dummy output buffer for the IV and AAD for
201 	 * AEAD requests.
202 	 */
203 	char *iv_aad_buf;
204 	struct sglist *sg_iv_aad;
205 
206 	/* Statistics. */
207 	uint64_t stats_blkcipher_encrypt;
208 	uint64_t stats_blkcipher_decrypt;
209 	uint64_t stats_hash;
210 	uint64_t stats_hmac;
211 	uint64_t stats_authenc_encrypt;
212 	uint64_t stats_authenc_decrypt;
213 	uint64_t stats_gcm_encrypt;
214 	uint64_t stats_gcm_decrypt;
215 	uint64_t stats_ccm_encrypt;
216 	uint64_t stats_ccm_decrypt;
217 	uint64_t stats_wr_nomem;
218 	uint64_t stats_inflight;
219 	uint64_t stats_mac_error;
220 	uint64_t stats_pad_error;
221 	uint64_t stats_bad_session;
222 	uint64_t stats_sglist_error;
223 	uint64_t stats_process_error;
224 	uint64_t stats_sw_fallback;
225 };
226 
227 /*
228  * Crypto requests involve two kind of scatter/gather lists.
229  *
230  * Non-hash-only requests require a PHYS_DSGL that describes the
231  * location to store the results of the encryption or decryption
232  * operation.  This SGL uses a different format (PHYS_DSGL) and should
233  * exclude the crd_skip bytes at the start of the data as well as
234  * any AAD or IV.  For authenticated encryption requests it should
235  * cover include the destination of the hash or tag.
236  *
237  * The input payload may either be supplied inline as immediate data,
238  * or via a standard ULP_TX SGL.  This SGL should include AAD,
239  * ciphertext, and the hash or tag for authenticated decryption
240  * requests.
241  *
242  * These scatter/gather lists can describe different subsets of the
243  * buffer described by the crypto operation.  ccr_populate_sglist()
244  * generates a scatter/gather list that covers the entire crypto
245  * operation buffer that is then used to construct the other
246  * scatter/gather lists.
247  */
248 static int
249 ccr_populate_sglist(struct sglist *sg, struct cryptop *crp)
250 {
251 	int error;
252 
253 	sglist_reset(sg);
254 	if (crp->crp_flags & CRYPTO_F_IMBUF)
255 		error = sglist_append_mbuf(sg, (struct mbuf *)crp->crp_buf);
256 	else if (crp->crp_flags & CRYPTO_F_IOV)
257 		error = sglist_append_uio(sg, (struct uio *)crp->crp_buf);
258 	else
259 		error = sglist_append(sg, crp->crp_buf, crp->crp_ilen);
260 	return (error);
261 }
262 
263 /*
264  * Segments in 'sg' larger than 'maxsegsize' are counted as multiple
265  * segments.
266  */
267 static int
268 ccr_count_sgl(struct sglist *sg, int maxsegsize)
269 {
270 	int i, nsegs;
271 
272 	nsegs = 0;
273 	for (i = 0; i < sg->sg_nseg; i++)
274 		nsegs += howmany(sg->sg_segs[i].ss_len, maxsegsize);
275 	return (nsegs);
276 }
277 
278 /* These functions deal with PHYS_DSGL for the reply buffer. */
279 static inline int
280 ccr_phys_dsgl_len(int nsegs)
281 {
282 	int len;
283 
284 	len = (nsegs / 8) * sizeof(struct phys_sge_pairs);
285 	if ((nsegs % 8) != 0) {
286 		len += sizeof(uint16_t) * 8;
287 		len += roundup2(nsegs % 8, 2) * sizeof(uint64_t);
288 	}
289 	return (len);
290 }
291 
292 static void
293 ccr_write_phys_dsgl(struct ccr_softc *sc, void *dst, int nsegs)
294 {
295 	struct sglist *sg;
296 	struct cpl_rx_phys_dsgl *cpl;
297 	struct phys_sge_pairs *sgl;
298 	vm_paddr_t paddr;
299 	size_t seglen;
300 	u_int i, j;
301 
302 	sg = sc->sg_dsgl;
303 	cpl = dst;
304 	cpl->op_to_tid = htobe32(V_CPL_RX_PHYS_DSGL_OPCODE(CPL_RX_PHYS_DSGL) |
305 	    V_CPL_RX_PHYS_DSGL_ISRDMA(0));
306 	cpl->pcirlxorder_to_noofsgentr = htobe32(
307 	    V_CPL_RX_PHYS_DSGL_PCIRLXORDER(0) |
308 	    V_CPL_RX_PHYS_DSGL_PCINOSNOOP(0) |
309 	    V_CPL_RX_PHYS_DSGL_PCITPHNTENB(0) | V_CPL_RX_PHYS_DSGL_DCAID(0) |
310 	    V_CPL_RX_PHYS_DSGL_NOOFSGENTR(nsegs));
311 	cpl->rss_hdr_int.opcode = CPL_RX_PHYS_ADDR;
312 	cpl->rss_hdr_int.qid = htobe16(sc->rxq->iq.abs_id);
313 	cpl->rss_hdr_int.hash_val = 0;
314 	sgl = (struct phys_sge_pairs *)(cpl + 1);
315 	j = 0;
316 	for (i = 0; i < sg->sg_nseg; i++) {
317 		seglen = sg->sg_segs[i].ss_len;
318 		paddr = sg->sg_segs[i].ss_paddr;
319 		do {
320 			sgl->addr[j] = htobe64(paddr);
321 			if (seglen > DSGL_SGE_MAXLEN) {
322 				sgl->len[j] = htobe16(DSGL_SGE_MAXLEN);
323 				paddr += DSGL_SGE_MAXLEN;
324 				seglen -= DSGL_SGE_MAXLEN;
325 			} else {
326 				sgl->len[j] = htobe16(seglen);
327 				seglen = 0;
328 			}
329 			j++;
330 			if (j == 8) {
331 				sgl++;
332 				j = 0;
333 			}
334 		} while (seglen != 0);
335 	}
336 	MPASS(j + 8 * (sgl - (struct phys_sge_pairs *)(cpl + 1)) == nsegs);
337 }
338 
339 /* These functions deal with the ULPTX_SGL for input payload. */
340 static inline int
341 ccr_ulptx_sgl_len(int nsegs)
342 {
343 	u_int n;
344 
345 	nsegs--; /* first segment is part of ulptx_sgl */
346 	n = sizeof(struct ulptx_sgl) + 8 * ((3 * nsegs) / 2 + (nsegs & 1));
347 	return (roundup2(n, 16));
348 }
349 
350 static void
351 ccr_write_ulptx_sgl(struct ccr_softc *sc, void *dst, int nsegs)
352 {
353 	struct ulptx_sgl *usgl;
354 	struct sglist *sg;
355 	struct sglist_seg *ss;
356 	int i;
357 
358 	sg = sc->sg_ulptx;
359 	MPASS(nsegs == sg->sg_nseg);
360 	ss = &sg->sg_segs[0];
361 	usgl = dst;
362 	usgl->cmd_nsge = htobe32(V_ULPTX_CMD(ULP_TX_SC_DSGL) |
363 	    V_ULPTX_NSGE(nsegs));
364 	usgl->len0 = htobe32(ss->ss_len);
365 	usgl->addr0 = htobe64(ss->ss_paddr);
366 	ss++;
367 	for (i = 0; i < sg->sg_nseg - 1; i++) {
368 		usgl->sge[i / 2].len[i & 1] = htobe32(ss->ss_len);
369 		usgl->sge[i / 2].addr[i & 1] = htobe64(ss->ss_paddr);
370 		ss++;
371 	}
372 
373 }
374 
375 static bool
376 ccr_use_imm_data(u_int transhdr_len, u_int input_len)
377 {
378 
379 	if (input_len > CRYPTO_MAX_IMM_TX_PKT_LEN)
380 		return (false);
381 	if (roundup2(transhdr_len, 16) + roundup2(input_len, 16) >
382 	    SGE_MAX_WR_LEN)
383 		return (false);
384 	return (true);
385 }
386 
387 static void
388 ccr_populate_wreq(struct ccr_softc *sc, struct chcr_wr *crwr, u_int kctx_len,
389     u_int wr_len, u_int imm_len, u_int sgl_len, u_int hash_size,
390     struct cryptop *crp)
391 {
392 	u_int cctx_size, idata_len;
393 
394 	cctx_size = sizeof(struct _key_ctx) + kctx_len;
395 	crwr->wreq.op_to_cctx_size = htobe32(
396 	    V_FW_CRYPTO_LOOKASIDE_WR_OPCODE(FW_CRYPTO_LOOKASIDE_WR) |
397 	    V_FW_CRYPTO_LOOKASIDE_WR_COMPL(0) |
398 	    V_FW_CRYPTO_LOOKASIDE_WR_IMM_LEN(imm_len) |
399 	    V_FW_CRYPTO_LOOKASIDE_WR_CCTX_LOC(1) |
400 	    V_FW_CRYPTO_LOOKASIDE_WR_CCTX_SIZE(cctx_size >> 4));
401 	crwr->wreq.len16_pkd = htobe32(
402 	    V_FW_CRYPTO_LOOKASIDE_WR_LEN16(wr_len / 16));
403 	crwr->wreq.session_id = 0;
404 	crwr->wreq.rx_chid_to_rx_q_id = htobe32(
405 	    V_FW_CRYPTO_LOOKASIDE_WR_RX_CHID(sc->tx_channel_id) |
406 	    V_FW_CRYPTO_LOOKASIDE_WR_LCB(0) |
407 	    V_FW_CRYPTO_LOOKASIDE_WR_PHASH(0) |
408 	    V_FW_CRYPTO_LOOKASIDE_WR_IV(IV_NOP) |
409 	    V_FW_CRYPTO_LOOKASIDE_WR_FQIDX(0) |
410 	    V_FW_CRYPTO_LOOKASIDE_WR_TX_CH(0) |
411 	    V_FW_CRYPTO_LOOKASIDE_WR_RX_Q_ID(sc->rxq->iq.abs_id));
412 	crwr->wreq.key_addr = 0;
413 	crwr->wreq.pld_size_hash_size = htobe32(
414 	    V_FW_CRYPTO_LOOKASIDE_WR_PLD_SIZE(sgl_len) |
415 	    V_FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE(hash_size));
416 	crwr->wreq.cookie = htobe64((uintptr_t)crp);
417 
418 	crwr->ulptx.cmd_dest = htobe32(V_ULPTX_CMD(ULP_TX_PKT) |
419 	    V_ULP_TXPKT_DATAMODIFY(0) |
420 	    V_ULP_TXPKT_CHANNELID(sc->tx_channel_id) | V_ULP_TXPKT_DEST(0) |
421 	    V_ULP_TXPKT_FID(sc->rxq->iq.abs_id) | V_ULP_TXPKT_RO(1));
422 	crwr->ulptx.len = htobe32(
423 	    ((wr_len - sizeof(struct fw_crypto_lookaside_wr)) / 16));
424 
425 	crwr->sc_imm.cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM) |
426 	    V_ULP_TX_SC_MORE(sgl_len != 0 ? 1 : 0));
427 	idata_len = wr_len - offsetof(struct chcr_wr, sec_cpl) - sgl_len;
428 	if (imm_len % 16 != 0)
429 		idata_len -= 16 - imm_len % 16;
430 	crwr->sc_imm.len = htobe32(idata_len);
431 }
432 
433 static int
434 ccr_hash(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp)
435 {
436 	struct chcr_wr *crwr;
437 	struct wrqe *wr;
438 	struct auth_hash *axf;
439 	struct cryptodesc *crd;
440 	char *dst;
441 	u_int hash_size_in_response, kctx_flits, kctx_len, transhdr_len, wr_len;
442 	u_int hmac_ctrl, imm_len, iopad_size;
443 	int error, sgl_nsegs, sgl_len, use_opad;
444 
445 	crd = crp->crp_desc;
446 
447 	/* Reject requests with too large of an input buffer. */
448 	if (crd->crd_len > MAX_REQUEST_SIZE)
449 		return (EFBIG);
450 
451 	axf = s->hmac.auth_hash;
452 
453 	if (s->mode == HMAC) {
454 		use_opad = 1;
455 		hmac_ctrl = SCMD_HMAC_CTRL_NO_TRUNC;
456 	} else {
457 		use_opad = 0;
458 		hmac_ctrl = SCMD_HMAC_CTRL_NOP;
459 	}
460 
461 	/* PADs must be 128-bit aligned. */
462 	iopad_size = roundup2(s->hmac.partial_digest_len, 16);
463 
464 	/*
465 	 * The 'key' part of the context includes the aligned IPAD and
466 	 * OPAD.
467 	 */
468 	kctx_len = iopad_size;
469 	if (use_opad)
470 		kctx_len += iopad_size;
471 	hash_size_in_response = axf->hashsize;
472 	transhdr_len = HASH_TRANSHDR_SIZE(kctx_len);
473 
474 	if (crd->crd_len == 0) {
475 		imm_len = axf->blocksize;
476 		sgl_nsegs = 0;
477 		sgl_len = 0;
478 	} else if (ccr_use_imm_data(transhdr_len, crd->crd_len)) {
479 		imm_len = crd->crd_len;
480 		sgl_nsegs = 0;
481 		sgl_len = 0;
482 	} else {
483 		imm_len = 0;
484 		sglist_reset(sc->sg_ulptx);
485 		error = sglist_append_sglist(sc->sg_ulptx, sc->sg_crp,
486 		    crd->crd_skip, crd->crd_len);
487 		if (error)
488 			return (error);
489 		sgl_nsegs = sc->sg_ulptx->sg_nseg;
490 		sgl_len = ccr_ulptx_sgl_len(sgl_nsegs);
491 	}
492 
493 	wr_len = roundup2(transhdr_len, 16) + roundup2(imm_len, 16) + sgl_len;
494 	if (wr_len > SGE_MAX_WR_LEN)
495 		return (EFBIG);
496 	wr = alloc_wrqe(wr_len, sc->txq);
497 	if (wr == NULL) {
498 		sc->stats_wr_nomem++;
499 		return (ENOMEM);
500 	}
501 	crwr = wrtod(wr);
502 	memset(crwr, 0, wr_len);
503 
504 	ccr_populate_wreq(sc, crwr, kctx_len, wr_len, imm_len, sgl_len,
505 	    hash_size_in_response, crp);
506 
507 	/* XXX: Hardcodes SGE loopback channel of 0. */
508 	crwr->sec_cpl.op_ivinsrtofst = htobe32(
509 	    V_CPL_TX_SEC_PDU_OPCODE(CPL_TX_SEC_PDU) |
510 	    V_CPL_TX_SEC_PDU_RXCHID(sc->tx_channel_id) |
511 	    V_CPL_TX_SEC_PDU_ACKFOLLOWS(0) | V_CPL_TX_SEC_PDU_ULPTXLPBK(1) |
512 	    V_CPL_TX_SEC_PDU_CPLLEN(2) | V_CPL_TX_SEC_PDU_PLACEHOLDER(0) |
513 	    V_CPL_TX_SEC_PDU_IVINSRTOFST(0));
514 
515 	crwr->sec_cpl.pldlen = htobe32(crd->crd_len == 0 ? axf->blocksize :
516 	    crd->crd_len);
517 
518 	crwr->sec_cpl.cipherstop_lo_authinsert = htobe32(
519 	    V_CPL_TX_SEC_PDU_AUTHSTART(1) | V_CPL_TX_SEC_PDU_AUTHSTOP(0));
520 
521 	/* These two flits are actually a CPL_TLS_TX_SCMD_FMT. */
522 	crwr->sec_cpl.seqno_numivs = htobe32(
523 	    V_SCMD_SEQ_NO_CTRL(0) |
524 	    V_SCMD_PROTO_VERSION(SCMD_PROTO_VERSION_GENERIC) |
525 	    V_SCMD_CIPH_MODE(SCMD_CIPH_MODE_NOP) |
526 	    V_SCMD_AUTH_MODE(s->hmac.auth_mode) |
527 	    V_SCMD_HMAC_CTRL(hmac_ctrl));
528 	crwr->sec_cpl.ivgen_hdrlen = htobe32(
529 	    V_SCMD_LAST_FRAG(0) |
530 	    V_SCMD_MORE_FRAGS(crd->crd_len == 0 ? 1 : 0) | V_SCMD_MAC_ONLY(1));
531 
532 	memcpy(crwr->key_ctx.key, s->hmac.pads, kctx_len);
533 
534 	/* XXX: F_KEY_CONTEXT_SALT_PRESENT set, but 'salt' not set. */
535 	kctx_flits = (sizeof(struct _key_ctx) + kctx_len) / 16;
536 	crwr->key_ctx.ctx_hdr = htobe32(V_KEY_CONTEXT_CTX_LEN(kctx_flits) |
537 	    V_KEY_CONTEXT_OPAD_PRESENT(use_opad) |
538 	    V_KEY_CONTEXT_SALT_PRESENT(1) |
539 	    V_KEY_CONTEXT_CK_SIZE(CHCR_KEYCTX_NO_KEY) |
540 	    V_KEY_CONTEXT_MK_SIZE(s->hmac.mk_size) | V_KEY_CONTEXT_VALID(1));
541 
542 	dst = (char *)(crwr + 1) + kctx_len + DUMMY_BYTES;
543 	if (crd->crd_len == 0) {
544 		dst[0] = 0x80;
545 		if (s->mode == HMAC)
546 			*(uint64_t *)(dst + axf->blocksize - sizeof(uint64_t)) =
547 			    htobe64(axf->blocksize << 3);
548 	} else if (imm_len != 0)
549 		crypto_copydata(crp->crp_flags, crp->crp_buf, crd->crd_skip,
550 		    crd->crd_len, dst);
551 	else
552 		ccr_write_ulptx_sgl(sc, dst, sgl_nsegs);
553 
554 	/* XXX: TODO backpressure */
555 	t4_wrq_tx(sc->adapter, wr);
556 
557 	return (0);
558 }
559 
560 static int
561 ccr_hash_done(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp,
562     const struct cpl_fw6_pld *cpl, int error)
563 {
564 	struct cryptodesc *crd;
565 
566 	crd = crp->crp_desc;
567 	if (error == 0) {
568 		crypto_copyback(crp->crp_flags, crp->crp_buf, crd->crd_inject,
569 		    s->hmac.hash_len, (c_caddr_t)(cpl + 1));
570 	}
571 
572 	return (error);
573 }
574 
575 static int
576 ccr_blkcipher(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp)
577 {
578 	char iv[CHCR_MAX_CRYPTO_IV_LEN];
579 	struct chcr_wr *crwr;
580 	struct wrqe *wr;
581 	struct cryptodesc *crd;
582 	char *dst;
583 	u_int kctx_len, key_half, op_type, transhdr_len, wr_len;
584 	u_int imm_len;
585 	int dsgl_nsegs, dsgl_len;
586 	int sgl_nsegs, sgl_len;
587 	int error;
588 
589 	crd = crp->crp_desc;
590 
591 	if (s->blkcipher.key_len == 0 || crd->crd_len == 0)
592 		return (EINVAL);
593 	if (crd->crd_alg == CRYPTO_AES_CBC &&
594 	    (crd->crd_len % AES_BLOCK_LEN) != 0)
595 		return (EINVAL);
596 
597 	/* Reject requests with too large of an input buffer. */
598 	if (crd->crd_len > MAX_REQUEST_SIZE)
599 		return (EFBIG);
600 
601 	if (crd->crd_flags & CRD_F_ENCRYPT)
602 		op_type = CHCR_ENCRYPT_OP;
603 	else
604 		op_type = CHCR_DECRYPT_OP;
605 
606 	sglist_reset(sc->sg_dsgl);
607 	error = sglist_append_sglist(sc->sg_dsgl, sc->sg_crp, crd->crd_skip,
608 	    crd->crd_len);
609 	if (error)
610 		return (error);
611 	dsgl_nsegs = ccr_count_sgl(sc->sg_dsgl, DSGL_SGE_MAXLEN);
612 	if (dsgl_nsegs > MAX_RX_PHYS_DSGL_SGE)
613 		return (EFBIG);
614 	dsgl_len = ccr_phys_dsgl_len(dsgl_nsegs);
615 
616 	/* The 'key' must be 128-bit aligned. */
617 	kctx_len = roundup2(s->blkcipher.key_len, 16);
618 	transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dsgl_len);
619 
620 	if (ccr_use_imm_data(transhdr_len, crd->crd_len +
621 	    s->blkcipher.iv_len)) {
622 		imm_len = crd->crd_len;
623 		sgl_nsegs = 0;
624 		sgl_len = 0;
625 	} else {
626 		imm_len = 0;
627 		sglist_reset(sc->sg_ulptx);
628 		error = sglist_append_sglist(sc->sg_ulptx, sc->sg_crp,
629 		    crd->crd_skip, crd->crd_len);
630 		if (error)
631 			return (error);
632 		sgl_nsegs = sc->sg_ulptx->sg_nseg;
633 		sgl_len = ccr_ulptx_sgl_len(sgl_nsegs);
634 	}
635 
636 	wr_len = roundup2(transhdr_len, 16) + s->blkcipher.iv_len +
637 	    roundup2(imm_len, 16) + sgl_len;
638 	if (wr_len > SGE_MAX_WR_LEN)
639 		return (EFBIG);
640 	wr = alloc_wrqe(wr_len, sc->txq);
641 	if (wr == NULL) {
642 		sc->stats_wr_nomem++;
643 		return (ENOMEM);
644 	}
645 	crwr = wrtod(wr);
646 	memset(crwr, 0, wr_len);
647 
648 	/*
649 	 * Read the existing IV from the request or generate a random
650 	 * one if none is provided.  Optionally copy the generated IV
651 	 * into the output buffer if requested.
652 	 */
653 	if (op_type == CHCR_ENCRYPT_OP) {
654 		if (crd->crd_flags & CRD_F_IV_EXPLICIT)
655 			memcpy(iv, crd->crd_iv, s->blkcipher.iv_len);
656 		else
657 			arc4rand(iv, s->blkcipher.iv_len, 0);
658 		if ((crd->crd_flags & CRD_F_IV_PRESENT) == 0)
659 			crypto_copyback(crp->crp_flags, crp->crp_buf,
660 			    crd->crd_inject, s->blkcipher.iv_len, iv);
661 	} else {
662 		if (crd->crd_flags & CRD_F_IV_EXPLICIT)
663 			memcpy(iv, crd->crd_iv, s->blkcipher.iv_len);
664 		else
665 			crypto_copydata(crp->crp_flags, crp->crp_buf,
666 			    crd->crd_inject, s->blkcipher.iv_len, iv);
667 	}
668 
669 	ccr_populate_wreq(sc, crwr, kctx_len, wr_len, imm_len, sgl_len, 0,
670 	    crp);
671 
672 	/* XXX: Hardcodes SGE loopback channel of 0. */
673 	crwr->sec_cpl.op_ivinsrtofst = htobe32(
674 	    V_CPL_TX_SEC_PDU_OPCODE(CPL_TX_SEC_PDU) |
675 	    V_CPL_TX_SEC_PDU_RXCHID(sc->tx_channel_id) |
676 	    V_CPL_TX_SEC_PDU_ACKFOLLOWS(0) | V_CPL_TX_SEC_PDU_ULPTXLPBK(1) |
677 	    V_CPL_TX_SEC_PDU_CPLLEN(2) | V_CPL_TX_SEC_PDU_PLACEHOLDER(0) |
678 	    V_CPL_TX_SEC_PDU_IVINSRTOFST(1));
679 
680 	crwr->sec_cpl.pldlen = htobe32(s->blkcipher.iv_len + crd->crd_len);
681 
682 	crwr->sec_cpl.aadstart_cipherstop_hi = htobe32(
683 	    V_CPL_TX_SEC_PDU_CIPHERSTART(s->blkcipher.iv_len + 1) |
684 	    V_CPL_TX_SEC_PDU_CIPHERSTOP_HI(0));
685 	crwr->sec_cpl.cipherstop_lo_authinsert = htobe32(
686 	    V_CPL_TX_SEC_PDU_CIPHERSTOP_LO(0));
687 
688 	/* These two flits are actually a CPL_TLS_TX_SCMD_FMT. */
689 	crwr->sec_cpl.seqno_numivs = htobe32(
690 	    V_SCMD_SEQ_NO_CTRL(0) |
691 	    V_SCMD_PROTO_VERSION(SCMD_PROTO_VERSION_GENERIC) |
692 	    V_SCMD_ENC_DEC_CTRL(op_type) |
693 	    V_SCMD_CIPH_MODE(s->blkcipher.cipher_mode) |
694 	    V_SCMD_AUTH_MODE(SCMD_AUTH_MODE_NOP) |
695 	    V_SCMD_HMAC_CTRL(SCMD_HMAC_CTRL_NOP) |
696 	    V_SCMD_IV_SIZE(s->blkcipher.iv_len / 2) |
697 	    V_SCMD_NUM_IVS(0));
698 	crwr->sec_cpl.ivgen_hdrlen = htobe32(
699 	    V_SCMD_IV_GEN_CTRL(0) |
700 	    V_SCMD_MORE_FRAGS(0) | V_SCMD_LAST_FRAG(0) | V_SCMD_MAC_ONLY(0) |
701 	    V_SCMD_AADIVDROP(1) | V_SCMD_HDR_LEN(dsgl_len));
702 
703 	crwr->key_ctx.ctx_hdr = s->blkcipher.key_ctx_hdr;
704 	switch (crd->crd_alg) {
705 	case CRYPTO_AES_CBC:
706 		if (crd->crd_flags & CRD_F_ENCRYPT)
707 			memcpy(crwr->key_ctx.key, s->blkcipher.enckey,
708 			    s->blkcipher.key_len);
709 		else
710 			memcpy(crwr->key_ctx.key, s->blkcipher.deckey,
711 			    s->blkcipher.key_len);
712 		break;
713 	case CRYPTO_AES_ICM:
714 		memcpy(crwr->key_ctx.key, s->blkcipher.enckey,
715 		    s->blkcipher.key_len);
716 		break;
717 	case CRYPTO_AES_XTS:
718 		key_half = s->blkcipher.key_len / 2;
719 		memcpy(crwr->key_ctx.key, s->blkcipher.enckey + key_half,
720 		    key_half);
721 		if (crd->crd_flags & CRD_F_ENCRYPT)
722 			memcpy(crwr->key_ctx.key + key_half,
723 			    s->blkcipher.enckey, key_half);
724 		else
725 			memcpy(crwr->key_ctx.key + key_half,
726 			    s->blkcipher.deckey, key_half);
727 		break;
728 	}
729 
730 	dst = (char *)(crwr + 1) + kctx_len;
731 	ccr_write_phys_dsgl(sc, dst, dsgl_nsegs);
732 	dst += sizeof(struct cpl_rx_phys_dsgl) + dsgl_len;
733 	memcpy(dst, iv, s->blkcipher.iv_len);
734 	dst += s->blkcipher.iv_len;
735 	if (imm_len != 0)
736 		crypto_copydata(crp->crp_flags, crp->crp_buf, crd->crd_skip,
737 		    crd->crd_len, dst);
738 	else
739 		ccr_write_ulptx_sgl(sc, dst, sgl_nsegs);
740 
741 	/* XXX: TODO backpressure */
742 	t4_wrq_tx(sc->adapter, wr);
743 
744 	return (0);
745 }
746 
747 static int
748 ccr_blkcipher_done(struct ccr_softc *sc, struct ccr_session *s,
749     struct cryptop *crp, const struct cpl_fw6_pld *cpl, int error)
750 {
751 
752 	/*
753 	 * The updated IV to permit chained requests is at
754 	 * cpl->data[2], but OCF doesn't permit chained requests.
755 	 */
756 	return (error);
757 }
758 
759 /*
760  * 'hashsize' is the length of a full digest.  'authsize' is the
761  * requested digest length for this operation which may be less
762  * than 'hashsize'.
763  */
764 static int
765 ccr_hmac_ctrl(unsigned int hashsize, unsigned int authsize)
766 {
767 
768 	if (authsize == 10)
769 		return (SCMD_HMAC_CTRL_TRUNC_RFC4366);
770 	if (authsize == 12)
771 		return (SCMD_HMAC_CTRL_IPSEC_96BIT);
772 	if (authsize == hashsize / 2)
773 		return (SCMD_HMAC_CTRL_DIV2);
774 	return (SCMD_HMAC_CTRL_NO_TRUNC);
775 }
776 
777 static int
778 ccr_authenc(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp,
779     struct cryptodesc *crda, struct cryptodesc *crde)
780 {
781 	char iv[CHCR_MAX_CRYPTO_IV_LEN];
782 	struct chcr_wr *crwr;
783 	struct wrqe *wr;
784 	struct auth_hash *axf;
785 	char *dst;
786 	u_int kctx_len, key_half, op_type, transhdr_len, wr_len;
787 	u_int hash_size_in_response, imm_len, iopad_size;
788 	u_int aad_start, aad_len, aad_stop;
789 	u_int auth_start, auth_stop, auth_insert;
790 	u_int cipher_start, cipher_stop;
791 	u_int hmac_ctrl, input_len;
792 	int dsgl_nsegs, dsgl_len;
793 	int sgl_nsegs, sgl_len;
794 	int error;
795 
796 	/*
797 	 * If there is a need in the future, requests with an empty
798 	 * payload could be supported as HMAC-only requests.
799 	 */
800 	if (s->blkcipher.key_len == 0 || crde->crd_len == 0)
801 		return (EINVAL);
802 	if (crde->crd_alg == CRYPTO_AES_CBC &&
803 	    (crde->crd_len % AES_BLOCK_LEN) != 0)
804 		return (EINVAL);
805 
806 	/*
807 	 * Compute the length of the AAD (data covered by the
808 	 * authentication descriptor but not the encryption
809 	 * descriptor).  To simplify the logic, AAD is only permitted
810 	 * before the cipher/plain text, not after.  This is true of
811 	 * all currently-generated requests.
812 	 */
813 	if (crda->crd_len + crda->crd_skip > crde->crd_len + crde->crd_skip)
814 		return (EINVAL);
815 	if (crda->crd_skip < crde->crd_skip) {
816 		if (crda->crd_skip + crda->crd_len > crde->crd_skip)
817 			aad_len = (crde->crd_skip - crda->crd_skip);
818 		else
819 			aad_len = crda->crd_len;
820 	} else
821 		aad_len = 0;
822 	if (aad_len + s->blkcipher.iv_len > MAX_AAD_LEN)
823 		return (EINVAL);
824 
825 	axf = s->hmac.auth_hash;
826 	hash_size_in_response = s->hmac.hash_len;
827 	if (crde->crd_flags & CRD_F_ENCRYPT)
828 		op_type = CHCR_ENCRYPT_OP;
829 	else
830 		op_type = CHCR_DECRYPT_OP;
831 
832 	/*
833 	 * The output buffer consists of the cipher text followed by
834 	 * the hash when encrypting.  For decryption it only contains
835 	 * the plain text.
836 	 *
837 	 * Due to a firmware bug, the output buffer must include a
838 	 * dummy output buffer for the IV and AAD prior to the real
839 	 * output buffer.
840 	 */
841 	if (op_type == CHCR_ENCRYPT_OP) {
842 		if (s->blkcipher.iv_len + aad_len + crde->crd_len +
843 		    hash_size_in_response > MAX_REQUEST_SIZE)
844 			return (EFBIG);
845 	} else {
846 		if (s->blkcipher.iv_len + aad_len + crde->crd_len >
847 		    MAX_REQUEST_SIZE)
848 			return (EFBIG);
849 	}
850 	sglist_reset(sc->sg_dsgl);
851 	error = sglist_append_sglist(sc->sg_dsgl, sc->sg_iv_aad, 0,
852 	    s->blkcipher.iv_len + aad_len);
853 	if (error)
854 		return (error);
855 	error = sglist_append_sglist(sc->sg_dsgl, sc->sg_crp, crde->crd_skip,
856 	    crde->crd_len);
857 	if (error)
858 		return (error);
859 	if (op_type == CHCR_ENCRYPT_OP) {
860 		error = sglist_append_sglist(sc->sg_dsgl, sc->sg_crp,
861 		    crda->crd_inject, hash_size_in_response);
862 		if (error)
863 			return (error);
864 	}
865 	dsgl_nsegs = ccr_count_sgl(sc->sg_dsgl, DSGL_SGE_MAXLEN);
866 	if (dsgl_nsegs > MAX_RX_PHYS_DSGL_SGE)
867 		return (EFBIG);
868 	dsgl_len = ccr_phys_dsgl_len(dsgl_nsegs);
869 
870 	/* PADs must be 128-bit aligned. */
871 	iopad_size = roundup2(s->hmac.partial_digest_len, 16);
872 
873 	/*
874 	 * The 'key' part of the key context consists of the key followed
875 	 * by the IPAD and OPAD.
876 	 */
877 	kctx_len = roundup2(s->blkcipher.key_len, 16) + iopad_size * 2;
878 	transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dsgl_len);
879 
880 	/*
881 	 * The input buffer consists of the IV, any AAD, and then the
882 	 * cipher/plain text.  For decryption requests the hash is
883 	 * appended after the cipher text.
884 	 *
885 	 * The IV is always stored at the start of the input buffer
886 	 * even though it may be duplicated in the payload.  The
887 	 * crypto engine doesn't work properly if the IV offset points
888 	 * inside of the AAD region, so a second copy is always
889 	 * required.
890 	 */
891 	input_len = aad_len + crde->crd_len;
892 
893 	/*
894 	 * The firmware hangs if sent a request which is a
895 	 * bit smaller than MAX_REQUEST_SIZE.  In particular, the
896 	 * firmware appears to require 512 - 16 bytes of spare room
897 	 * along with the size of the hash even if the hash isn't
898 	 * included in the input buffer.
899 	 */
900 	if (input_len + roundup2(axf->hashsize, 16) + (512 - 16) >
901 	    MAX_REQUEST_SIZE)
902 		return (EFBIG);
903 	if (op_type == CHCR_DECRYPT_OP)
904 		input_len += hash_size_in_response;
905 	if (ccr_use_imm_data(transhdr_len, s->blkcipher.iv_len + input_len)) {
906 		imm_len = input_len;
907 		sgl_nsegs = 0;
908 		sgl_len = 0;
909 	} else {
910 		imm_len = 0;
911 		sglist_reset(sc->sg_ulptx);
912 		if (aad_len != 0) {
913 			error = sglist_append_sglist(sc->sg_ulptx, sc->sg_crp,
914 			    crda->crd_skip, aad_len);
915 			if (error)
916 				return (error);
917 		}
918 		error = sglist_append_sglist(sc->sg_ulptx, sc->sg_crp,
919 		    crde->crd_skip, crde->crd_len);
920 		if (error)
921 			return (error);
922 		if (op_type == CHCR_DECRYPT_OP) {
923 			error = sglist_append_sglist(sc->sg_ulptx, sc->sg_crp,
924 			    crda->crd_inject, hash_size_in_response);
925 			if (error)
926 				return (error);
927 		}
928 		sgl_nsegs = sc->sg_ulptx->sg_nseg;
929 		sgl_len = ccr_ulptx_sgl_len(sgl_nsegs);
930 	}
931 
932 	/*
933 	 * Any auth-only data before the cipher region is marked as AAD.
934 	 * Auth-data that overlaps with the cipher region is placed in
935 	 * the auth section.
936 	 */
937 	if (aad_len != 0) {
938 		aad_start = s->blkcipher.iv_len + 1;
939 		aad_stop = aad_start + aad_len - 1;
940 	} else {
941 		aad_start = 0;
942 		aad_stop = 0;
943 	}
944 	cipher_start = s->blkcipher.iv_len + aad_len + 1;
945 	if (op_type == CHCR_DECRYPT_OP)
946 		cipher_stop = hash_size_in_response;
947 	else
948 		cipher_stop = 0;
949 	if (aad_len == crda->crd_len) {
950 		auth_start = 0;
951 		auth_stop = 0;
952 	} else {
953 		if (aad_len != 0)
954 			auth_start = cipher_start;
955 		else
956 			auth_start = s->blkcipher.iv_len + crda->crd_skip -
957 			    crde->crd_skip + 1;
958 		auth_stop = (crde->crd_skip + crde->crd_len) -
959 		    (crda->crd_skip + crda->crd_len) + cipher_stop;
960 	}
961 	if (op_type == CHCR_DECRYPT_OP)
962 		auth_insert = hash_size_in_response;
963 	else
964 		auth_insert = 0;
965 
966 	wr_len = roundup2(transhdr_len, 16) + s->blkcipher.iv_len +
967 	    roundup2(imm_len, 16) + sgl_len;
968 	if (wr_len > SGE_MAX_WR_LEN)
969 		return (EFBIG);
970 	wr = alloc_wrqe(wr_len, sc->txq);
971 	if (wr == NULL) {
972 		sc->stats_wr_nomem++;
973 		return (ENOMEM);
974 	}
975 	crwr = wrtod(wr);
976 	memset(crwr, 0, wr_len);
977 
978 	/*
979 	 * Read the existing IV from the request or generate a random
980 	 * one if none is provided.  Optionally copy the generated IV
981 	 * into the output buffer if requested.
982 	 */
983 	if (op_type == CHCR_ENCRYPT_OP) {
984 		if (crde->crd_flags & CRD_F_IV_EXPLICIT)
985 			memcpy(iv, crde->crd_iv, s->blkcipher.iv_len);
986 		else
987 			arc4rand(iv, s->blkcipher.iv_len, 0);
988 		if ((crde->crd_flags & CRD_F_IV_PRESENT) == 0)
989 			crypto_copyback(crp->crp_flags, crp->crp_buf,
990 			    crde->crd_inject, s->blkcipher.iv_len, iv);
991 	} else {
992 		if (crde->crd_flags & CRD_F_IV_EXPLICIT)
993 			memcpy(iv, crde->crd_iv, s->blkcipher.iv_len);
994 		else
995 			crypto_copydata(crp->crp_flags, crp->crp_buf,
996 			    crde->crd_inject, s->blkcipher.iv_len, iv);
997 	}
998 
999 	ccr_populate_wreq(sc, crwr, kctx_len, wr_len, imm_len, sgl_len,
1000 	    op_type == CHCR_DECRYPT_OP ? hash_size_in_response : 0, crp);
1001 
1002 	/* XXX: Hardcodes SGE loopback channel of 0. */
1003 	crwr->sec_cpl.op_ivinsrtofst = htobe32(
1004 	    V_CPL_TX_SEC_PDU_OPCODE(CPL_TX_SEC_PDU) |
1005 	    V_CPL_TX_SEC_PDU_RXCHID(sc->tx_channel_id) |
1006 	    V_CPL_TX_SEC_PDU_ACKFOLLOWS(0) | V_CPL_TX_SEC_PDU_ULPTXLPBK(1) |
1007 	    V_CPL_TX_SEC_PDU_CPLLEN(2) | V_CPL_TX_SEC_PDU_PLACEHOLDER(0) |
1008 	    V_CPL_TX_SEC_PDU_IVINSRTOFST(1));
1009 
1010 	crwr->sec_cpl.pldlen = htobe32(s->blkcipher.iv_len + input_len);
1011 
1012 	crwr->sec_cpl.aadstart_cipherstop_hi = htobe32(
1013 	    V_CPL_TX_SEC_PDU_AADSTART(aad_start) |
1014 	    V_CPL_TX_SEC_PDU_AADSTOP(aad_stop) |
1015 	    V_CPL_TX_SEC_PDU_CIPHERSTART(cipher_start) |
1016 	    V_CPL_TX_SEC_PDU_CIPHERSTOP_HI(cipher_stop >> 4));
1017 	crwr->sec_cpl.cipherstop_lo_authinsert = htobe32(
1018 	    V_CPL_TX_SEC_PDU_CIPHERSTOP_LO(cipher_stop & 0xf) |
1019 	    V_CPL_TX_SEC_PDU_AUTHSTART(auth_start) |
1020 	    V_CPL_TX_SEC_PDU_AUTHSTOP(auth_stop) |
1021 	    V_CPL_TX_SEC_PDU_AUTHINSERT(auth_insert));
1022 
1023 	/* These two flits are actually a CPL_TLS_TX_SCMD_FMT. */
1024 	hmac_ctrl = ccr_hmac_ctrl(axf->hashsize, hash_size_in_response);
1025 	crwr->sec_cpl.seqno_numivs = htobe32(
1026 	    V_SCMD_SEQ_NO_CTRL(0) |
1027 	    V_SCMD_PROTO_VERSION(SCMD_PROTO_VERSION_GENERIC) |
1028 	    V_SCMD_ENC_DEC_CTRL(op_type) |
1029 	    V_SCMD_CIPH_AUTH_SEQ_CTRL(op_type == CHCR_ENCRYPT_OP ? 1 : 0) |
1030 	    V_SCMD_CIPH_MODE(s->blkcipher.cipher_mode) |
1031 	    V_SCMD_AUTH_MODE(s->hmac.auth_mode) |
1032 	    V_SCMD_HMAC_CTRL(hmac_ctrl) |
1033 	    V_SCMD_IV_SIZE(s->blkcipher.iv_len / 2) |
1034 	    V_SCMD_NUM_IVS(0));
1035 	crwr->sec_cpl.ivgen_hdrlen = htobe32(
1036 	    V_SCMD_IV_GEN_CTRL(0) |
1037 	    V_SCMD_MORE_FRAGS(0) | V_SCMD_LAST_FRAG(0) | V_SCMD_MAC_ONLY(0) |
1038 	    V_SCMD_AADIVDROP(0) | V_SCMD_HDR_LEN(dsgl_len));
1039 
1040 	crwr->key_ctx.ctx_hdr = s->blkcipher.key_ctx_hdr;
1041 	switch (crde->crd_alg) {
1042 	case CRYPTO_AES_CBC:
1043 		if (crde->crd_flags & CRD_F_ENCRYPT)
1044 			memcpy(crwr->key_ctx.key, s->blkcipher.enckey,
1045 			    s->blkcipher.key_len);
1046 		else
1047 			memcpy(crwr->key_ctx.key, s->blkcipher.deckey,
1048 			    s->blkcipher.key_len);
1049 		break;
1050 	case CRYPTO_AES_ICM:
1051 		memcpy(crwr->key_ctx.key, s->blkcipher.enckey,
1052 		    s->blkcipher.key_len);
1053 		break;
1054 	case CRYPTO_AES_XTS:
1055 		key_half = s->blkcipher.key_len / 2;
1056 		memcpy(crwr->key_ctx.key, s->blkcipher.enckey + key_half,
1057 		    key_half);
1058 		if (crde->crd_flags & CRD_F_ENCRYPT)
1059 			memcpy(crwr->key_ctx.key + key_half,
1060 			    s->blkcipher.enckey, key_half);
1061 		else
1062 			memcpy(crwr->key_ctx.key + key_half,
1063 			    s->blkcipher.deckey, key_half);
1064 		break;
1065 	}
1066 
1067 	dst = crwr->key_ctx.key + roundup2(s->blkcipher.key_len, 16);
1068 	memcpy(dst, s->hmac.pads, iopad_size * 2);
1069 
1070 	dst = (char *)(crwr + 1) + kctx_len;
1071 	ccr_write_phys_dsgl(sc, dst, dsgl_nsegs);
1072 	dst += sizeof(struct cpl_rx_phys_dsgl) + dsgl_len;
1073 	memcpy(dst, iv, s->blkcipher.iv_len);
1074 	dst += s->blkcipher.iv_len;
1075 	if (imm_len != 0) {
1076 		if (aad_len != 0) {
1077 			crypto_copydata(crp->crp_flags, crp->crp_buf,
1078 			    crda->crd_skip, aad_len, dst);
1079 			dst += aad_len;
1080 		}
1081 		crypto_copydata(crp->crp_flags, crp->crp_buf, crde->crd_skip,
1082 		    crde->crd_len, dst);
1083 		dst += crde->crd_len;
1084 		if (op_type == CHCR_DECRYPT_OP)
1085 			crypto_copydata(crp->crp_flags, crp->crp_buf,
1086 			    crda->crd_inject, hash_size_in_response, dst);
1087 	} else
1088 		ccr_write_ulptx_sgl(sc, dst, sgl_nsegs);
1089 
1090 	/* XXX: TODO backpressure */
1091 	t4_wrq_tx(sc->adapter, wr);
1092 
1093 	return (0);
1094 }
1095 
1096 static int
1097 ccr_authenc_done(struct ccr_softc *sc, struct ccr_session *s,
1098     struct cryptop *crp, const struct cpl_fw6_pld *cpl, int error)
1099 {
1100 	struct cryptodesc *crd;
1101 
1102 	/*
1103 	 * The updated IV to permit chained requests is at
1104 	 * cpl->data[2], but OCF doesn't permit chained requests.
1105 	 *
1106 	 * For a decryption request, the hardware may do a verification
1107 	 * of the HMAC which will fail if the existing HMAC isn't in the
1108 	 * buffer.  If that happens, clear the error and copy the HMAC
1109 	 * from the CPL reply into the buffer.
1110 	 *
1111 	 * For encryption requests, crd should be the cipher request
1112 	 * which will have CRD_F_ENCRYPT set.  For decryption
1113 	 * requests, crp_desc will be the HMAC request which should
1114 	 * not have this flag set.
1115 	 */
1116 	crd = crp->crp_desc;
1117 	if (error == EBADMSG && !CHK_PAD_ERR_BIT(be64toh(cpl->data[0])) &&
1118 	    !(crd->crd_flags & CRD_F_ENCRYPT)) {
1119 		crypto_copyback(crp->crp_flags, crp->crp_buf, crd->crd_inject,
1120 		    s->hmac.hash_len, (c_caddr_t)(cpl + 1));
1121 		error = 0;
1122 	}
1123 	return (error);
1124 }
1125 
1126 static int
1127 ccr_gcm(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp,
1128     struct cryptodesc *crda, struct cryptodesc *crde)
1129 {
1130 	char iv[CHCR_MAX_CRYPTO_IV_LEN];
1131 	struct chcr_wr *crwr;
1132 	struct wrqe *wr;
1133 	char *dst;
1134 	u_int iv_len, kctx_len, op_type, transhdr_len, wr_len;
1135 	u_int hash_size_in_response, imm_len;
1136 	u_int aad_start, aad_stop, cipher_start, cipher_stop, auth_insert;
1137 	u_int hmac_ctrl, input_len;
1138 	int dsgl_nsegs, dsgl_len;
1139 	int sgl_nsegs, sgl_len;
1140 	int error;
1141 
1142 	if (s->blkcipher.key_len == 0)
1143 		return (EINVAL);
1144 
1145 	/*
1146 	 * The crypto engine doesn't handle GCM requests with an empty
1147 	 * payload, so handle those in software instead.
1148 	 */
1149 	if (crde->crd_len == 0)
1150 		return (EMSGSIZE);
1151 
1152 	/*
1153 	 * AAD is only permitted before the cipher/plain text, not
1154 	 * after.
1155 	 */
1156 	if (crda->crd_len + crda->crd_skip > crde->crd_len + crde->crd_skip)
1157 		return (EMSGSIZE);
1158 
1159 	if (crda->crd_len + AES_BLOCK_LEN > MAX_AAD_LEN)
1160 		return (EMSGSIZE);
1161 
1162 	hash_size_in_response = s->gmac.hash_len;
1163 	if (crde->crd_flags & CRD_F_ENCRYPT)
1164 		op_type = CHCR_ENCRYPT_OP;
1165 	else
1166 		op_type = CHCR_DECRYPT_OP;
1167 
1168 	/*
1169 	 * The IV handling for GCM in OCF is a bit more complicated in
1170 	 * that IPSec provides a full 16-byte IV (including the
1171 	 * counter), whereas the /dev/crypto interface sometimes
1172 	 * provides a full 16-byte IV (if no IV is provided in the
1173 	 * ioctl) and sometimes a 12-byte IV (if the IV was explicit).
1174 	 *
1175 	 * When provided a 12-byte IV, assume the IV is really 16 bytes
1176 	 * with a counter in the last 4 bytes initialized to 1.
1177 	 *
1178 	 * While iv_len is checked below, the value is currently
1179 	 * always set to 12 when creating a GCM session in this driver
1180 	 * due to limitations in OCF (there is no way to know what the
1181 	 * IV length of a given request will be).  This means that the
1182 	 * driver always assumes as 12-byte IV for now.
1183 	 */
1184 	if (s->blkcipher.iv_len == 12)
1185 		iv_len = AES_BLOCK_LEN;
1186 	else
1187 		iv_len = s->blkcipher.iv_len;
1188 
1189 	/*
1190 	 * The output buffer consists of the cipher text followed by
1191 	 * the tag when encrypting.  For decryption it only contains
1192 	 * the plain text.
1193 	 *
1194 	 * Due to a firmware bug, the output buffer must include a
1195 	 * dummy output buffer for the IV and AAD prior to the real
1196 	 * output buffer.
1197 	 */
1198 	if (op_type == CHCR_ENCRYPT_OP) {
1199 		if (iv_len + crda->crd_len + crde->crd_len +
1200 		    hash_size_in_response > MAX_REQUEST_SIZE)
1201 			return (EFBIG);
1202 	} else {
1203 		if (iv_len + crda->crd_len + crde->crd_len > MAX_REQUEST_SIZE)
1204 			return (EFBIG);
1205 	}
1206 	sglist_reset(sc->sg_dsgl);
1207 	error = sglist_append_sglist(sc->sg_dsgl, sc->sg_iv_aad, 0, iv_len +
1208 	    crda->crd_len);
1209 	if (error)
1210 		return (error);
1211 	error = sglist_append_sglist(sc->sg_dsgl, sc->sg_crp, crde->crd_skip,
1212 	    crde->crd_len);
1213 	if (error)
1214 		return (error);
1215 	if (op_type == CHCR_ENCRYPT_OP) {
1216 		error = sglist_append_sglist(sc->sg_dsgl, sc->sg_crp,
1217 		    crda->crd_inject, hash_size_in_response);
1218 		if (error)
1219 			return (error);
1220 	}
1221 	dsgl_nsegs = ccr_count_sgl(sc->sg_dsgl, DSGL_SGE_MAXLEN);
1222 	if (dsgl_nsegs > MAX_RX_PHYS_DSGL_SGE)
1223 		return (EFBIG);
1224 	dsgl_len = ccr_phys_dsgl_len(dsgl_nsegs);
1225 
1226 	/*
1227 	 * The 'key' part of the key context consists of the key followed
1228 	 * by the Galois hash key.
1229 	 */
1230 	kctx_len = roundup2(s->blkcipher.key_len, 16) + GMAC_BLOCK_LEN;
1231 	transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dsgl_len);
1232 
1233 	/*
1234 	 * The input buffer consists of the IV, any AAD, and then the
1235 	 * cipher/plain text.  For decryption requests the hash is
1236 	 * appended after the cipher text.
1237 	 *
1238 	 * The IV is always stored at the start of the input buffer
1239 	 * even though it may be duplicated in the payload.  The
1240 	 * crypto engine doesn't work properly if the IV offset points
1241 	 * inside of the AAD region, so a second copy is always
1242 	 * required.
1243 	 */
1244 	input_len = crda->crd_len + crde->crd_len;
1245 	if (op_type == CHCR_DECRYPT_OP)
1246 		input_len += hash_size_in_response;
1247 	if (input_len > MAX_REQUEST_SIZE)
1248 		return (EFBIG);
1249 	if (ccr_use_imm_data(transhdr_len, iv_len + input_len)) {
1250 		imm_len = input_len;
1251 		sgl_nsegs = 0;
1252 		sgl_len = 0;
1253 	} else {
1254 		imm_len = 0;
1255 		sglist_reset(sc->sg_ulptx);
1256 		if (crda->crd_len != 0) {
1257 			error = sglist_append_sglist(sc->sg_ulptx, sc->sg_crp,
1258 			    crda->crd_skip, crda->crd_len);
1259 			if (error)
1260 				return (error);
1261 		}
1262 		error = sglist_append_sglist(sc->sg_ulptx, sc->sg_crp,
1263 		    crde->crd_skip, crde->crd_len);
1264 		if (error)
1265 			return (error);
1266 		if (op_type == CHCR_DECRYPT_OP) {
1267 			error = sglist_append_sglist(sc->sg_ulptx, sc->sg_crp,
1268 			    crda->crd_inject, hash_size_in_response);
1269 			if (error)
1270 				return (error);
1271 		}
1272 		sgl_nsegs = sc->sg_ulptx->sg_nseg;
1273 		sgl_len = ccr_ulptx_sgl_len(sgl_nsegs);
1274 	}
1275 
1276 	if (crda->crd_len != 0) {
1277 		aad_start = iv_len + 1;
1278 		aad_stop = aad_start + crda->crd_len - 1;
1279 	} else {
1280 		aad_start = 0;
1281 		aad_stop = 0;
1282 	}
1283 	cipher_start = iv_len + crda->crd_len + 1;
1284 	if (op_type == CHCR_DECRYPT_OP)
1285 		cipher_stop = hash_size_in_response;
1286 	else
1287 		cipher_stop = 0;
1288 	if (op_type == CHCR_DECRYPT_OP)
1289 		auth_insert = hash_size_in_response;
1290 	else
1291 		auth_insert = 0;
1292 
1293 	wr_len = roundup2(transhdr_len, 16) + iv_len + roundup2(imm_len, 16) +
1294 	    sgl_len;
1295 	if (wr_len > SGE_MAX_WR_LEN)
1296 		return (EFBIG);
1297 	wr = alloc_wrqe(wr_len, sc->txq);
1298 	if (wr == NULL) {
1299 		sc->stats_wr_nomem++;
1300 		return (ENOMEM);
1301 	}
1302 	crwr = wrtod(wr);
1303 	memset(crwr, 0, wr_len);
1304 
1305 	/*
1306 	 * Read the existing IV from the request or generate a random
1307 	 * one if none is provided.  Optionally copy the generated IV
1308 	 * into the output buffer if requested.
1309 	 *
1310 	 * If the input IV is 12 bytes, append an explicit 4-byte
1311 	 * counter of 1.
1312 	 */
1313 	if (op_type == CHCR_ENCRYPT_OP) {
1314 		if (crde->crd_flags & CRD_F_IV_EXPLICIT)
1315 			memcpy(iv, crde->crd_iv, s->blkcipher.iv_len);
1316 		else
1317 			arc4rand(iv, s->blkcipher.iv_len, 0);
1318 		if ((crde->crd_flags & CRD_F_IV_PRESENT) == 0)
1319 			crypto_copyback(crp->crp_flags, crp->crp_buf,
1320 			    crde->crd_inject, s->blkcipher.iv_len, iv);
1321 	} else {
1322 		if (crde->crd_flags & CRD_F_IV_EXPLICIT)
1323 			memcpy(iv, crde->crd_iv, s->blkcipher.iv_len);
1324 		else
1325 			crypto_copydata(crp->crp_flags, crp->crp_buf,
1326 			    crde->crd_inject, s->blkcipher.iv_len, iv);
1327 	}
1328 	if (s->blkcipher.iv_len == 12)
1329 		*(uint32_t *)&iv[12] = htobe32(1);
1330 
1331 	ccr_populate_wreq(sc, crwr, kctx_len, wr_len, imm_len, sgl_len, 0,
1332 	    crp);
1333 
1334 	/* XXX: Hardcodes SGE loopback channel of 0. */
1335 	crwr->sec_cpl.op_ivinsrtofst = htobe32(
1336 	    V_CPL_TX_SEC_PDU_OPCODE(CPL_TX_SEC_PDU) |
1337 	    V_CPL_TX_SEC_PDU_RXCHID(sc->tx_channel_id) |
1338 	    V_CPL_TX_SEC_PDU_ACKFOLLOWS(0) | V_CPL_TX_SEC_PDU_ULPTXLPBK(1) |
1339 	    V_CPL_TX_SEC_PDU_CPLLEN(2) | V_CPL_TX_SEC_PDU_PLACEHOLDER(0) |
1340 	    V_CPL_TX_SEC_PDU_IVINSRTOFST(1));
1341 
1342 	crwr->sec_cpl.pldlen = htobe32(iv_len + input_len);
1343 
1344 	/*
1345 	 * NB: cipherstop is explicitly set to 0.  On encrypt it
1346 	 * should normally be set to 0 anyway (as the encrypt crd ends
1347 	 * at the end of the input).  However, for decrypt the cipher
1348 	 * ends before the tag in the AUTHENC case (and authstop is
1349 	 * set to stop before the tag), but for GCM the cipher still
1350 	 * runs to the end of the buffer.  Not sure if this is
1351 	 * intentional or a firmware quirk, but it is required for
1352 	 * working tag validation with GCM decryption.
1353 	 */
1354 	crwr->sec_cpl.aadstart_cipherstop_hi = htobe32(
1355 	    V_CPL_TX_SEC_PDU_AADSTART(aad_start) |
1356 	    V_CPL_TX_SEC_PDU_AADSTOP(aad_stop) |
1357 	    V_CPL_TX_SEC_PDU_CIPHERSTART(cipher_start) |
1358 	    V_CPL_TX_SEC_PDU_CIPHERSTOP_HI(0));
1359 	crwr->sec_cpl.cipherstop_lo_authinsert = htobe32(
1360 	    V_CPL_TX_SEC_PDU_CIPHERSTOP_LO(0) |
1361 	    V_CPL_TX_SEC_PDU_AUTHSTART(cipher_start) |
1362 	    V_CPL_TX_SEC_PDU_AUTHSTOP(cipher_stop) |
1363 	    V_CPL_TX_SEC_PDU_AUTHINSERT(auth_insert));
1364 
1365 	/* These two flits are actually a CPL_TLS_TX_SCMD_FMT. */
1366 	hmac_ctrl = ccr_hmac_ctrl(AES_GMAC_HASH_LEN, hash_size_in_response);
1367 	crwr->sec_cpl.seqno_numivs = htobe32(
1368 	    V_SCMD_SEQ_NO_CTRL(0) |
1369 	    V_SCMD_PROTO_VERSION(SCMD_PROTO_VERSION_GENERIC) |
1370 	    V_SCMD_ENC_DEC_CTRL(op_type) |
1371 	    V_SCMD_CIPH_AUTH_SEQ_CTRL(op_type == CHCR_ENCRYPT_OP ? 1 : 0) |
1372 	    V_SCMD_CIPH_MODE(SCMD_CIPH_MODE_AES_GCM) |
1373 	    V_SCMD_AUTH_MODE(SCMD_AUTH_MODE_GHASH) |
1374 	    V_SCMD_HMAC_CTRL(hmac_ctrl) |
1375 	    V_SCMD_IV_SIZE(iv_len / 2) |
1376 	    V_SCMD_NUM_IVS(0));
1377 	crwr->sec_cpl.ivgen_hdrlen = htobe32(
1378 	    V_SCMD_IV_GEN_CTRL(0) |
1379 	    V_SCMD_MORE_FRAGS(0) | V_SCMD_LAST_FRAG(0) | V_SCMD_MAC_ONLY(0) |
1380 	    V_SCMD_AADIVDROP(0) | V_SCMD_HDR_LEN(dsgl_len));
1381 
1382 	crwr->key_ctx.ctx_hdr = s->blkcipher.key_ctx_hdr;
1383 	memcpy(crwr->key_ctx.key, s->blkcipher.enckey, s->blkcipher.key_len);
1384 	dst = crwr->key_ctx.key + roundup2(s->blkcipher.key_len, 16);
1385 	memcpy(dst, s->gmac.ghash_h, GMAC_BLOCK_LEN);
1386 
1387 	dst = (char *)(crwr + 1) + kctx_len;
1388 	ccr_write_phys_dsgl(sc, dst, dsgl_nsegs);
1389 	dst += sizeof(struct cpl_rx_phys_dsgl) + dsgl_len;
1390 	memcpy(dst, iv, iv_len);
1391 	dst += iv_len;
1392 	if (imm_len != 0) {
1393 		if (crda->crd_len != 0) {
1394 			crypto_copydata(crp->crp_flags, crp->crp_buf,
1395 			    crda->crd_skip, crda->crd_len, dst);
1396 			dst += crda->crd_len;
1397 		}
1398 		crypto_copydata(crp->crp_flags, crp->crp_buf, crde->crd_skip,
1399 		    crde->crd_len, dst);
1400 		dst += crde->crd_len;
1401 		if (op_type == CHCR_DECRYPT_OP)
1402 			crypto_copydata(crp->crp_flags, crp->crp_buf,
1403 			    crda->crd_inject, hash_size_in_response, dst);
1404 	} else
1405 		ccr_write_ulptx_sgl(sc, dst, sgl_nsegs);
1406 
1407 	/* XXX: TODO backpressure */
1408 	t4_wrq_tx(sc->adapter, wr);
1409 
1410 	return (0);
1411 }
1412 
1413 static int
1414 ccr_gcm_done(struct ccr_softc *sc, struct ccr_session *s,
1415     struct cryptop *crp, const struct cpl_fw6_pld *cpl, int error)
1416 {
1417 
1418 	/*
1419 	 * The updated IV to permit chained requests is at
1420 	 * cpl->data[2], but OCF doesn't permit chained requests.
1421 	 *
1422 	 * Note that the hardware should always verify the GMAC hash.
1423 	 */
1424 	return (error);
1425 }
1426 
1427 /*
1428  * Handle a GCM request that is not supported by the crypto engine by
1429  * performing the operation in software.  Derived from swcr_authenc().
1430  */
1431 static void
1432 ccr_gcm_soft(struct ccr_session *s, struct cryptop *crp,
1433     struct cryptodesc *crda, struct cryptodesc *crde)
1434 {
1435 	struct auth_hash *axf;
1436 	struct enc_xform *exf;
1437 	void *auth_ctx;
1438 	uint8_t *kschedule;
1439 	char block[GMAC_BLOCK_LEN];
1440 	char digest[GMAC_DIGEST_LEN];
1441 	char iv[AES_BLOCK_LEN];
1442 	int error, i, len;
1443 
1444 	auth_ctx = NULL;
1445 	kschedule = NULL;
1446 
1447 	/* Initialize the MAC. */
1448 	switch (s->blkcipher.key_len) {
1449 	case 16:
1450 		axf = &auth_hash_nist_gmac_aes_128;
1451 		break;
1452 	case 24:
1453 		axf = &auth_hash_nist_gmac_aes_192;
1454 		break;
1455 	case 32:
1456 		axf = &auth_hash_nist_gmac_aes_256;
1457 		break;
1458 	default:
1459 		error = EINVAL;
1460 		goto out;
1461 	}
1462 	auth_ctx = malloc(axf->ctxsize, M_CCR, M_NOWAIT);
1463 	if (auth_ctx == NULL) {
1464 		error = ENOMEM;
1465 		goto out;
1466 	}
1467 	axf->Init(auth_ctx);
1468 	axf->Setkey(auth_ctx, s->blkcipher.enckey, s->blkcipher.key_len);
1469 
1470 	/* Initialize the cipher. */
1471 	exf = &enc_xform_aes_nist_gcm;
1472 	error = exf->setkey(&kschedule, s->blkcipher.enckey,
1473 	    s->blkcipher.key_len);
1474 	if (error)
1475 		goto out;
1476 
1477 	/*
1478 	 * This assumes a 12-byte IV from the crp.  See longer comment
1479 	 * above in ccr_gcm() for more details.
1480 	 */
1481 	if (crde->crd_flags & CRD_F_ENCRYPT) {
1482 		if (crde->crd_flags & CRD_F_IV_EXPLICIT)
1483 			memcpy(iv, crde->crd_iv, 12);
1484 		else
1485 			arc4rand(iv, 12, 0);
1486 		if ((crde->crd_flags & CRD_F_IV_PRESENT) == 0)
1487 			crypto_copyback(crp->crp_flags, crp->crp_buf,
1488 			    crde->crd_inject, 12, iv);
1489 	} else {
1490 		if (crde->crd_flags & CRD_F_IV_EXPLICIT)
1491 			memcpy(iv, crde->crd_iv, 12);
1492 		else
1493 			crypto_copydata(crp->crp_flags, crp->crp_buf,
1494 			    crde->crd_inject, 12, iv);
1495 	}
1496 	*(uint32_t *)&iv[12] = htobe32(1);
1497 
1498 	axf->Reinit(auth_ctx, iv, sizeof(iv));
1499 
1500 	/* MAC the AAD. */
1501 	for (i = 0; i < crda->crd_len; i += sizeof(block)) {
1502 		len = imin(crda->crd_len - i, sizeof(block));
1503 		crypto_copydata(crp->crp_flags, crp->crp_buf, crda->crd_skip +
1504 		    i, len, block);
1505 		bzero(block + len, sizeof(block) - len);
1506 		axf->Update(auth_ctx, block, sizeof(block));
1507 	}
1508 
1509 	exf->reinit(kschedule, iv);
1510 
1511 	/* Do encryption with MAC */
1512 	for (i = 0; i < crde->crd_len; i += sizeof(block)) {
1513 		len = imin(crde->crd_len - i, sizeof(block));
1514 		crypto_copydata(crp->crp_flags, crp->crp_buf, crde->crd_skip +
1515 		    i, len, block);
1516 		bzero(block + len, sizeof(block) - len);
1517 		if (crde->crd_flags & CRD_F_ENCRYPT) {
1518 			exf->encrypt(kschedule, block);
1519 			axf->Update(auth_ctx, block, len);
1520 			crypto_copyback(crp->crp_flags, crp->crp_buf,
1521 			    crde->crd_skip + i, len, block);
1522 		} else {
1523 			axf->Update(auth_ctx, block, len);
1524 		}
1525 	}
1526 
1527 	/* Length block. */
1528 	bzero(block, sizeof(block));
1529 	((uint32_t *)block)[1] = htobe32(crda->crd_len * 8);
1530 	((uint32_t *)block)[3] = htobe32(crde->crd_len * 8);
1531 	axf->Update(auth_ctx, block, sizeof(block));
1532 
1533 	/* Finalize MAC. */
1534 	axf->Final(digest, auth_ctx);
1535 
1536 	/* Inject or validate tag. */
1537 	if (crde->crd_flags & CRD_F_ENCRYPT) {
1538 		crypto_copyback(crp->crp_flags, crp->crp_buf, crda->crd_inject,
1539 		    sizeof(digest), digest);
1540 		error = 0;
1541 	} else {
1542 		char digest2[GMAC_DIGEST_LEN];
1543 
1544 		crypto_copydata(crp->crp_flags, crp->crp_buf, crda->crd_inject,
1545 		    sizeof(digest2), digest2);
1546 		if (timingsafe_bcmp(digest, digest2, sizeof(digest)) == 0) {
1547 			error = 0;
1548 
1549 			/* Tag matches, decrypt data. */
1550 			for (i = 0; i < crde->crd_len; i += sizeof(block)) {
1551 				len = imin(crde->crd_len - i, sizeof(block));
1552 				crypto_copydata(crp->crp_flags, crp->crp_buf,
1553 				    crde->crd_skip + i, len, block);
1554 				bzero(block + len, sizeof(block) - len);
1555 				exf->decrypt(kschedule, block);
1556 				crypto_copyback(crp->crp_flags, crp->crp_buf,
1557 				    crde->crd_skip + i, len, block);
1558 			}
1559 		} else
1560 			error = EBADMSG;
1561 	}
1562 
1563 	exf->zerokey(&kschedule);
1564 out:
1565 	if (auth_ctx != NULL) {
1566 		memset(auth_ctx, 0, axf->ctxsize);
1567 		free(auth_ctx, M_CCR);
1568 	}
1569 	crp->crp_etype = error;
1570 	crypto_done(crp);
1571 }
1572 
1573 static void
1574 generate_ccm_b0(struct cryptodesc *crda, struct cryptodesc *crde,
1575     u_int hash_size_in_response, const char *iv, char *b0)
1576 {
1577 	u_int i, payload_len;
1578 
1579 	/* NB: L is already set in the first byte of the IV. */
1580 	memcpy(b0, iv, CCM_B0_SIZE);
1581 
1582 	/* Set length of hash in bits 3 - 5. */
1583 	b0[0] |= (((hash_size_in_response - 2) / 2) << 3);
1584 
1585 	/* Store the payload length as a big-endian value. */
1586 	payload_len = crde->crd_len;
1587 	for (i = 0; i < iv[0]; i++) {
1588 		b0[CCM_CBC_BLOCK_LEN - 1 - i] = payload_len;
1589 		payload_len >>= 8;
1590 	}
1591 
1592 	/*
1593 	 * If there is AAD in the request, set bit 6 in the flags
1594 	 * field and store the AAD length as a big-endian value at the
1595 	 * start of block 1.  This only assumes a 16-bit AAD length
1596 	 * since T6 doesn't support large AAD sizes.
1597 	 */
1598 	if (crda->crd_len != 0) {
1599 		b0[0] |= (1 << 6);
1600 		*(uint16_t *)(b0 + CCM_B0_SIZE) = htobe16(crda->crd_len);
1601 	}
1602 }
1603 
1604 static int
1605 ccr_ccm(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp,
1606     struct cryptodesc *crda, struct cryptodesc *crde)
1607 {
1608 	char iv[CHCR_MAX_CRYPTO_IV_LEN];
1609 	struct ulptx_idata *idata;
1610 	struct chcr_wr *crwr;
1611 	struct wrqe *wr;
1612 	char *dst;
1613 	u_int iv_len, kctx_len, op_type, transhdr_len, wr_len;
1614 	u_int aad_len, b0_len, hash_size_in_response, imm_len;
1615 	u_int aad_start, aad_stop, cipher_start, cipher_stop, auth_insert;
1616 	u_int hmac_ctrl, input_len;
1617 	int dsgl_nsegs, dsgl_len;
1618 	int sgl_nsegs, sgl_len;
1619 	int error;
1620 
1621 	if (s->blkcipher.key_len == 0)
1622 		return (EINVAL);
1623 
1624 	/*
1625 	 * The crypto engine doesn't handle CCM requests with an empty
1626 	 * payload, so handle those in software instead.
1627 	 */
1628 	if (crde->crd_len == 0)
1629 		return (EMSGSIZE);
1630 
1631 	/*
1632 	 * AAD is only permitted before the cipher/plain text, not
1633 	 * after.
1634 	 */
1635 	if (crda->crd_len + crda->crd_skip > crde->crd_len + crde->crd_skip)
1636 		return (EMSGSIZE);
1637 
1638 	/*
1639 	 * CCM always includes block 0 in the AAD before AAD from the
1640 	 * request.
1641 	 */
1642 	b0_len = CCM_B0_SIZE;
1643 	if (crda->crd_len != 0)
1644 		b0_len += CCM_AAD_FIELD_SIZE;
1645 	aad_len = b0_len + crda->crd_len;
1646 
1647 	/*
1648 	 * Always assume a 12 byte input IV for now since that is what
1649 	 * OCF always generates.  The full IV in the work request is
1650 	 * 16 bytes.
1651 	 */
1652 	iv_len = AES_BLOCK_LEN;
1653 
1654 	if (iv_len + aad_len > MAX_AAD_LEN)
1655 		return (EMSGSIZE);
1656 
1657 	hash_size_in_response = s->ccm_mac.hash_len;
1658 	if (crde->crd_flags & CRD_F_ENCRYPT)
1659 		op_type = CHCR_ENCRYPT_OP;
1660 	else
1661 		op_type = CHCR_DECRYPT_OP;
1662 
1663 	/*
1664 	 * The output buffer consists of the cipher text followed by
1665 	 * the tag when encrypting.  For decryption it only contains
1666 	 * the plain text.
1667 	 *
1668 	 * Due to a firmware bug, the output buffer must include a
1669 	 * dummy output buffer for the IV and AAD prior to the real
1670 	 * output buffer.
1671 	 */
1672 	if (op_type == CHCR_ENCRYPT_OP) {
1673 		if (iv_len + aad_len + crde->crd_len + hash_size_in_response >
1674 		    MAX_REQUEST_SIZE)
1675 			return (EFBIG);
1676 	} else {
1677 		if (iv_len + aad_len + crde->crd_len > MAX_REQUEST_SIZE)
1678 			return (EFBIG);
1679 	}
1680 	sglist_reset(sc->sg_dsgl);
1681 	error = sglist_append_sglist(sc->sg_dsgl, sc->sg_iv_aad, 0, iv_len +
1682 	    aad_len);
1683 	if (error)
1684 		return (error);
1685 	error = sglist_append_sglist(sc->sg_dsgl, sc->sg_crp, crde->crd_skip,
1686 	    crde->crd_len);
1687 	if (error)
1688 		return (error);
1689 	if (op_type == CHCR_ENCRYPT_OP) {
1690 		error = sglist_append_sglist(sc->sg_dsgl, sc->sg_crp,
1691 		    crda->crd_inject, hash_size_in_response);
1692 		if (error)
1693 			return (error);
1694 	}
1695 	dsgl_nsegs = ccr_count_sgl(sc->sg_dsgl, DSGL_SGE_MAXLEN);
1696 	if (dsgl_nsegs > MAX_RX_PHYS_DSGL_SGE)
1697 		return (EFBIG);
1698 	dsgl_len = ccr_phys_dsgl_len(dsgl_nsegs);
1699 
1700 	/*
1701 	 * The 'key' part of the key context consists of two copies of
1702 	 * the AES key.
1703 	 */
1704 	kctx_len = roundup2(s->blkcipher.key_len, 16) * 2;
1705 	transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dsgl_len);
1706 
1707 	/*
1708 	 * The input buffer consists of the IV, AAD (including block
1709 	 * 0), and then the cipher/plain text.  For decryption
1710 	 * requests the hash is appended after the cipher text.
1711 	 *
1712 	 * The IV is always stored at the start of the input buffer
1713 	 * even though it may be duplicated in the payload.  The
1714 	 * crypto engine doesn't work properly if the IV offset points
1715 	 * inside of the AAD region, so a second copy is always
1716 	 * required.
1717 	 */
1718 	input_len = aad_len + crde->crd_len;
1719 	if (op_type == CHCR_DECRYPT_OP)
1720 		input_len += hash_size_in_response;
1721 	if (input_len > MAX_REQUEST_SIZE)
1722 		return (EFBIG);
1723 	if (ccr_use_imm_data(transhdr_len, iv_len + input_len)) {
1724 		imm_len = input_len;
1725 		sgl_nsegs = 0;
1726 		sgl_len = 0;
1727 	} else {
1728 		/* Block 0 is passed as immediate data. */
1729 		imm_len = b0_len;
1730 
1731 		sglist_reset(sc->sg_ulptx);
1732 		if (crda->crd_len != 0) {
1733 			error = sglist_append_sglist(sc->sg_ulptx, sc->sg_crp,
1734 			    crda->crd_skip, crda->crd_len);
1735 			if (error)
1736 				return (error);
1737 		}
1738 		error = sglist_append_sglist(sc->sg_ulptx, sc->sg_crp,
1739 		    crde->crd_skip, crde->crd_len);
1740 		if (error)
1741 			return (error);
1742 		if (op_type == CHCR_DECRYPT_OP) {
1743 			error = sglist_append_sglist(sc->sg_ulptx, sc->sg_crp,
1744 			    crda->crd_inject, hash_size_in_response);
1745 			if (error)
1746 				return (error);
1747 		}
1748 		sgl_nsegs = sc->sg_ulptx->sg_nseg;
1749 		sgl_len = ccr_ulptx_sgl_len(sgl_nsegs);
1750 	}
1751 
1752 	aad_start = iv_len + 1;
1753 	aad_stop = aad_start + aad_len - 1;
1754 	cipher_start = aad_stop + 1;
1755 	if (op_type == CHCR_DECRYPT_OP)
1756 		cipher_stop = hash_size_in_response;
1757 	else
1758 		cipher_stop = 0;
1759 	if (op_type == CHCR_DECRYPT_OP)
1760 		auth_insert = hash_size_in_response;
1761 	else
1762 		auth_insert = 0;
1763 
1764 	wr_len = roundup2(transhdr_len, 16) + iv_len + roundup2(imm_len, 16) +
1765 	    sgl_len;
1766 	if (wr_len > SGE_MAX_WR_LEN)
1767 		return (EFBIG);
1768 	wr = alloc_wrqe(wr_len, sc->txq);
1769 	if (wr == NULL) {
1770 		sc->stats_wr_nomem++;
1771 		return (ENOMEM);
1772 	}
1773 	crwr = wrtod(wr);
1774 	memset(crwr, 0, wr_len);
1775 
1776 	/*
1777 	 * Read the nonce from the request or generate a random one if
1778 	 * none is provided.  Use the nonce to generate the full IV
1779 	 * with the counter set to 0.
1780 	 */
1781 	memset(iv, 0, iv_len);
1782 	iv[0] = (15 - AES_CCM_IV_LEN) - 1;
1783 	if (op_type == CHCR_ENCRYPT_OP) {
1784 		if (crde->crd_flags & CRD_F_IV_EXPLICIT)
1785 			memcpy(iv + 1, crde->crd_iv, AES_CCM_IV_LEN);
1786 		else
1787 			arc4rand(iv + 1, AES_CCM_IV_LEN, 0);
1788 		if ((crde->crd_flags & CRD_F_IV_PRESENT) == 0)
1789 			crypto_copyback(crp->crp_flags, crp->crp_buf,
1790 			    crde->crd_inject, AES_CCM_IV_LEN, iv + 1);
1791 	} else {
1792 		if (crde->crd_flags & CRD_F_IV_EXPLICIT)
1793 			memcpy(iv + 1, crde->crd_iv, AES_CCM_IV_LEN);
1794 		else
1795 			crypto_copydata(crp->crp_flags, crp->crp_buf,
1796 			    crde->crd_inject, AES_CCM_IV_LEN, iv + 1);
1797 	}
1798 
1799 	ccr_populate_wreq(sc, crwr, kctx_len, wr_len, imm_len, sgl_len, 0,
1800 	    crp);
1801 
1802 	/* XXX: Hardcodes SGE loopback channel of 0. */
1803 	crwr->sec_cpl.op_ivinsrtofst = htobe32(
1804 	    V_CPL_TX_SEC_PDU_OPCODE(CPL_TX_SEC_PDU) |
1805 	    V_CPL_TX_SEC_PDU_RXCHID(sc->tx_channel_id) |
1806 	    V_CPL_TX_SEC_PDU_ACKFOLLOWS(0) | V_CPL_TX_SEC_PDU_ULPTXLPBK(1) |
1807 	    V_CPL_TX_SEC_PDU_CPLLEN(2) | V_CPL_TX_SEC_PDU_PLACEHOLDER(0) |
1808 	    V_CPL_TX_SEC_PDU_IVINSRTOFST(1));
1809 
1810 	crwr->sec_cpl.pldlen = htobe32(iv_len + input_len);
1811 
1812 	/*
1813 	 * NB: cipherstop is explicitly set to 0.  See comments above
1814 	 * in ccr_gcm().
1815 	 */
1816 	crwr->sec_cpl.aadstart_cipherstop_hi = htobe32(
1817 	    V_CPL_TX_SEC_PDU_AADSTART(aad_start) |
1818 	    V_CPL_TX_SEC_PDU_AADSTOP(aad_stop) |
1819 	    V_CPL_TX_SEC_PDU_CIPHERSTART(cipher_start) |
1820 	    V_CPL_TX_SEC_PDU_CIPHERSTOP_HI(0));
1821 	crwr->sec_cpl.cipherstop_lo_authinsert = htobe32(
1822 	    V_CPL_TX_SEC_PDU_CIPHERSTOP_LO(0) |
1823 	    V_CPL_TX_SEC_PDU_AUTHSTART(cipher_start) |
1824 	    V_CPL_TX_SEC_PDU_AUTHSTOP(cipher_stop) |
1825 	    V_CPL_TX_SEC_PDU_AUTHINSERT(auth_insert));
1826 
1827 	/* These two flits are actually a CPL_TLS_TX_SCMD_FMT. */
1828 	hmac_ctrl = ccr_hmac_ctrl(AES_CBC_MAC_HASH_LEN, hash_size_in_response);
1829 	crwr->sec_cpl.seqno_numivs = htobe32(
1830 	    V_SCMD_SEQ_NO_CTRL(0) |
1831 	    V_SCMD_PROTO_VERSION(SCMD_PROTO_VERSION_GENERIC) |
1832 	    V_SCMD_ENC_DEC_CTRL(op_type) |
1833 	    V_SCMD_CIPH_AUTH_SEQ_CTRL(op_type == CHCR_ENCRYPT_OP ? 0 : 1) |
1834 	    V_SCMD_CIPH_MODE(SCMD_CIPH_MODE_AES_CCM) |
1835 	    V_SCMD_AUTH_MODE(SCMD_AUTH_MODE_CBCMAC) |
1836 	    V_SCMD_HMAC_CTRL(hmac_ctrl) |
1837 	    V_SCMD_IV_SIZE(iv_len / 2) |
1838 	    V_SCMD_NUM_IVS(0));
1839 	crwr->sec_cpl.ivgen_hdrlen = htobe32(
1840 	    V_SCMD_IV_GEN_CTRL(0) |
1841 	    V_SCMD_MORE_FRAGS(0) | V_SCMD_LAST_FRAG(0) | V_SCMD_MAC_ONLY(0) |
1842 	    V_SCMD_AADIVDROP(0) | V_SCMD_HDR_LEN(dsgl_len));
1843 
1844 	crwr->key_ctx.ctx_hdr = s->blkcipher.key_ctx_hdr;
1845 	memcpy(crwr->key_ctx.key, s->blkcipher.enckey, s->blkcipher.key_len);
1846 	memcpy(crwr->key_ctx.key + roundup(s->blkcipher.key_len, 16),
1847 	    s->blkcipher.enckey, s->blkcipher.key_len);
1848 
1849 	dst = (char *)(crwr + 1) + kctx_len;
1850 	ccr_write_phys_dsgl(sc, dst, dsgl_nsegs);
1851 	dst += sizeof(struct cpl_rx_phys_dsgl) + dsgl_len;
1852 	memcpy(dst, iv, iv_len);
1853 	dst += iv_len;
1854 	generate_ccm_b0(crda, crde, hash_size_in_response, iv, dst);
1855 	if (sgl_nsegs == 0) {
1856 		dst += b0_len;
1857 		if (crda->crd_len != 0) {
1858 			crypto_copydata(crp->crp_flags, crp->crp_buf,
1859 			    crda->crd_skip, crda->crd_len, dst);
1860 			dst += crda->crd_len;
1861 		}
1862 		crypto_copydata(crp->crp_flags, crp->crp_buf, crde->crd_skip,
1863 		    crde->crd_len, dst);
1864 		dst += crde->crd_len;
1865 		if (op_type == CHCR_DECRYPT_OP)
1866 			crypto_copydata(crp->crp_flags, crp->crp_buf,
1867 			    crda->crd_inject, hash_size_in_response, dst);
1868 	} else {
1869 		dst += CCM_B0_SIZE;
1870 		if (b0_len > CCM_B0_SIZE) {
1871 			/*
1872 			 * If there is AAD, insert padding including a
1873 			 * ULP_TX_SC_NOOP so that the ULP_TX_SC_DSGL
1874 			 * is 16-byte aligned.
1875 			 */
1876 			KASSERT(b0_len - CCM_B0_SIZE == CCM_AAD_FIELD_SIZE,
1877 			    ("b0_len mismatch"));
1878 			memset(dst + CCM_AAD_FIELD_SIZE, 0,
1879 			    8 - CCM_AAD_FIELD_SIZE);
1880 			idata = (void *)(dst + 8);
1881 			idata->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_NOOP));
1882 			idata->len = htobe32(0);
1883 			dst = (void *)(idata + 1);
1884 		}
1885 		ccr_write_ulptx_sgl(sc, dst, sgl_nsegs);
1886 	}
1887 
1888 	/* XXX: TODO backpressure */
1889 	t4_wrq_tx(sc->adapter, wr);
1890 
1891 	return (0);
1892 }
1893 
1894 static int
1895 ccr_ccm_done(struct ccr_softc *sc, struct ccr_session *s,
1896     struct cryptop *crp, const struct cpl_fw6_pld *cpl, int error)
1897 {
1898 
1899 	/*
1900 	 * The updated IV to permit chained requests is at
1901 	 * cpl->data[2], but OCF doesn't permit chained requests.
1902 	 *
1903 	 * Note that the hardware should always verify the CBC MAC
1904 	 * hash.
1905 	 */
1906 	return (error);
1907 }
1908 
1909 /*
1910  * Handle a CCM request that is not supported by the crypto engine by
1911  * performing the operation in software.  Derived from swcr_authenc().
1912  */
1913 static void
1914 ccr_ccm_soft(struct ccr_session *s, struct cryptop *crp,
1915     struct cryptodesc *crda, struct cryptodesc *crde)
1916 {
1917 	struct auth_hash *axf;
1918 	struct enc_xform *exf;
1919 	union authctx *auth_ctx;
1920 	uint8_t *kschedule;
1921 	char block[CCM_CBC_BLOCK_LEN];
1922 	char digest[AES_CBC_MAC_HASH_LEN];
1923 	char iv[AES_CCM_IV_LEN];
1924 	int error, i, len;
1925 
1926 	auth_ctx = NULL;
1927 	kschedule = NULL;
1928 
1929 	/* Initialize the MAC. */
1930 	switch (s->blkcipher.key_len) {
1931 	case 16:
1932 		axf = &auth_hash_ccm_cbc_mac_128;
1933 		break;
1934 	case 24:
1935 		axf = &auth_hash_ccm_cbc_mac_192;
1936 		break;
1937 	case 32:
1938 		axf = &auth_hash_ccm_cbc_mac_256;
1939 		break;
1940 	default:
1941 		error = EINVAL;
1942 		goto out;
1943 	}
1944 	auth_ctx = malloc(axf->ctxsize, M_CCR, M_NOWAIT);
1945 	if (auth_ctx == NULL) {
1946 		error = ENOMEM;
1947 		goto out;
1948 	}
1949 	axf->Init(auth_ctx);
1950 	axf->Setkey(auth_ctx, s->blkcipher.enckey, s->blkcipher.key_len);
1951 
1952 	/* Initialize the cipher. */
1953 	exf = &enc_xform_ccm;
1954 	error = exf->setkey(&kschedule, s->blkcipher.enckey,
1955 	    s->blkcipher.key_len);
1956 	if (error)
1957 		goto out;
1958 
1959 	if (crde->crd_flags & CRD_F_ENCRYPT) {
1960 		if (crde->crd_flags & CRD_F_IV_EXPLICIT)
1961 			memcpy(iv, crde->crd_iv, AES_CCM_IV_LEN);
1962 		else
1963 			arc4rand(iv, AES_CCM_IV_LEN, 0);
1964 		if ((crde->crd_flags & CRD_F_IV_PRESENT) == 0)
1965 			crypto_copyback(crp->crp_flags, crp->crp_buf,
1966 			    crde->crd_inject, AES_CCM_IV_LEN, iv);
1967 	} else {
1968 		if (crde->crd_flags & CRD_F_IV_EXPLICIT)
1969 			memcpy(iv, crde->crd_iv, AES_CCM_IV_LEN);
1970 		else
1971 			crypto_copydata(crp->crp_flags, crp->crp_buf,
1972 			    crde->crd_inject, AES_CCM_IV_LEN, iv);
1973 	}
1974 
1975 	auth_ctx->aes_cbc_mac_ctx.authDataLength = crda->crd_len;
1976 	auth_ctx->aes_cbc_mac_ctx.cryptDataLength = crde->crd_len;
1977 	axf->Reinit(auth_ctx, iv, sizeof(iv));
1978 
1979 	/* MAC the AAD. */
1980 	for (i = 0; i < crda->crd_len; i += sizeof(block)) {
1981 		len = imin(crda->crd_len - i, sizeof(block));
1982 		crypto_copydata(crp->crp_flags, crp->crp_buf, crda->crd_skip +
1983 		    i, len, block);
1984 		bzero(block + len, sizeof(block) - len);
1985 		axf->Update(auth_ctx, block, sizeof(block));
1986 	}
1987 
1988 	exf->reinit(kschedule, iv);
1989 
1990 	/* Do encryption/decryption with MAC */
1991 	for (i = 0; i < crde->crd_len; i += sizeof(block)) {
1992 		len = imin(crde->crd_len - i, sizeof(block));
1993 		crypto_copydata(crp->crp_flags, crp->crp_buf, crde->crd_skip +
1994 		    i, len, block);
1995 		bzero(block + len, sizeof(block) - len);
1996 		if (crde->crd_flags & CRD_F_ENCRYPT) {
1997 			axf->Update(auth_ctx, block, len);
1998 			exf->encrypt(kschedule, block);
1999 			crypto_copyback(crp->crp_flags, crp->crp_buf,
2000 			    crde->crd_skip + i, len, block);
2001 		} else {
2002 			exf->decrypt(kschedule, block);
2003 			axf->Update(auth_ctx, block, len);
2004 		}
2005 	}
2006 
2007 	/* Finalize MAC. */
2008 	axf->Final(digest, auth_ctx);
2009 
2010 	/* Inject or validate tag. */
2011 	if (crde->crd_flags & CRD_F_ENCRYPT) {
2012 		crypto_copyback(crp->crp_flags, crp->crp_buf, crda->crd_inject,
2013 		    sizeof(digest), digest);
2014 		error = 0;
2015 	} else {
2016 		char digest2[GMAC_DIGEST_LEN];
2017 
2018 		crypto_copydata(crp->crp_flags, crp->crp_buf, crda->crd_inject,
2019 		    sizeof(digest2), digest2);
2020 		if (timingsafe_bcmp(digest, digest2, sizeof(digest)) == 0) {
2021 			error = 0;
2022 
2023 			/* Tag matches, decrypt data. */
2024 			exf->reinit(kschedule, iv);
2025 			for (i = 0; i < crde->crd_len; i += sizeof(block)) {
2026 				len = imin(crde->crd_len - i, sizeof(block));
2027 				crypto_copydata(crp->crp_flags, crp->crp_buf,
2028 				    crde->crd_skip + i, len, block);
2029 				bzero(block + len, sizeof(block) - len);
2030 				exf->decrypt(kschedule, block);
2031 				crypto_copyback(crp->crp_flags, crp->crp_buf,
2032 				    crde->crd_skip + i, len, block);
2033 			}
2034 		} else
2035 			error = EBADMSG;
2036 	}
2037 
2038 	exf->zerokey(&kschedule);
2039 out:
2040 	if (auth_ctx != NULL) {
2041 		memset(auth_ctx, 0, axf->ctxsize);
2042 		free(auth_ctx, M_CCR);
2043 	}
2044 	crp->crp_etype = error;
2045 	crypto_done(crp);
2046 }
2047 
2048 static void
2049 ccr_identify(driver_t *driver, device_t parent)
2050 {
2051 	struct adapter *sc;
2052 
2053 	sc = device_get_softc(parent);
2054 	if (sc->cryptocaps & FW_CAPS_CONFIG_CRYPTO_LOOKASIDE &&
2055 	    device_find_child(parent, "ccr", -1) == NULL)
2056 		device_add_child(parent, "ccr", -1);
2057 }
2058 
2059 static int
2060 ccr_probe(device_t dev)
2061 {
2062 
2063 	device_set_desc(dev, "Chelsio Crypto Accelerator");
2064 	return (BUS_PROBE_DEFAULT);
2065 }
2066 
2067 static void
2068 ccr_sysctls(struct ccr_softc *sc)
2069 {
2070 	struct sysctl_ctx_list *ctx;
2071 	struct sysctl_oid *oid;
2072 	struct sysctl_oid_list *children;
2073 
2074 	ctx = device_get_sysctl_ctx(sc->dev);
2075 
2076 	/*
2077 	 * dev.ccr.X.
2078 	 */
2079 	oid = device_get_sysctl_tree(sc->dev);
2080 	children = SYSCTL_CHILDREN(oid);
2081 
2082 	/*
2083 	 * dev.ccr.X.stats.
2084 	 */
2085 	oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats", CTLFLAG_RD,
2086 	    NULL, "statistics");
2087 	children = SYSCTL_CHILDREN(oid);
2088 
2089 	SYSCTL_ADD_U64(ctx, children, OID_AUTO, "hash", CTLFLAG_RD,
2090 	    &sc->stats_hash, 0, "Hash requests submitted");
2091 	SYSCTL_ADD_U64(ctx, children, OID_AUTO, "hmac", CTLFLAG_RD,
2092 	    &sc->stats_hmac, 0, "HMAC requests submitted");
2093 	SYSCTL_ADD_U64(ctx, children, OID_AUTO, "cipher_encrypt", CTLFLAG_RD,
2094 	    &sc->stats_blkcipher_encrypt, 0,
2095 	    "Cipher encryption requests submitted");
2096 	SYSCTL_ADD_U64(ctx, children, OID_AUTO, "cipher_decrypt", CTLFLAG_RD,
2097 	    &sc->stats_blkcipher_decrypt, 0,
2098 	    "Cipher decryption requests submitted");
2099 	SYSCTL_ADD_U64(ctx, children, OID_AUTO, "authenc_encrypt", CTLFLAG_RD,
2100 	    &sc->stats_authenc_encrypt, 0,
2101 	    "Combined AES+HMAC encryption requests submitted");
2102 	SYSCTL_ADD_U64(ctx, children, OID_AUTO, "authenc_decrypt", CTLFLAG_RD,
2103 	    &sc->stats_authenc_decrypt, 0,
2104 	    "Combined AES+HMAC decryption requests submitted");
2105 	SYSCTL_ADD_U64(ctx, children, OID_AUTO, "gcm_encrypt", CTLFLAG_RD,
2106 	    &sc->stats_gcm_encrypt, 0, "AES-GCM encryption requests submitted");
2107 	SYSCTL_ADD_U64(ctx, children, OID_AUTO, "gcm_decrypt", CTLFLAG_RD,
2108 	    &sc->stats_gcm_decrypt, 0, "AES-GCM decryption requests submitted");
2109 	SYSCTL_ADD_U64(ctx, children, OID_AUTO, "ccm_encrypt", CTLFLAG_RD,
2110 	    &sc->stats_ccm_encrypt, 0, "AES-CCM encryption requests submitted");
2111 	SYSCTL_ADD_U64(ctx, children, OID_AUTO, "ccm_decrypt", CTLFLAG_RD,
2112 	    &sc->stats_ccm_decrypt, 0, "AES-CCM decryption requests submitted");
2113 	SYSCTL_ADD_U64(ctx, children, OID_AUTO, "wr_nomem", CTLFLAG_RD,
2114 	    &sc->stats_wr_nomem, 0, "Work request memory allocation failures");
2115 	SYSCTL_ADD_U64(ctx, children, OID_AUTO, "inflight", CTLFLAG_RD,
2116 	    &sc->stats_inflight, 0, "Requests currently pending");
2117 	SYSCTL_ADD_U64(ctx, children, OID_AUTO, "mac_error", CTLFLAG_RD,
2118 	    &sc->stats_mac_error, 0, "MAC errors");
2119 	SYSCTL_ADD_U64(ctx, children, OID_AUTO, "pad_error", CTLFLAG_RD,
2120 	    &sc->stats_pad_error, 0, "Padding errors");
2121 	SYSCTL_ADD_U64(ctx, children, OID_AUTO, "bad_session", CTLFLAG_RD,
2122 	    &sc->stats_bad_session, 0, "Requests with invalid session ID");
2123 	SYSCTL_ADD_U64(ctx, children, OID_AUTO, "sglist_error", CTLFLAG_RD,
2124 	    &sc->stats_sglist_error, 0,
2125 	    "Requests for which DMA mapping failed");
2126 	SYSCTL_ADD_U64(ctx, children, OID_AUTO, "process_error", CTLFLAG_RD,
2127 	    &sc->stats_process_error, 0, "Requests failed during queueing");
2128 	SYSCTL_ADD_U64(ctx, children, OID_AUTO, "sw_fallback", CTLFLAG_RD,
2129 	    &sc->stats_sw_fallback, 0,
2130 	    "Requests processed by falling back to software");
2131 }
2132 
2133 static int
2134 ccr_attach(device_t dev)
2135 {
2136 	struct ccr_softc *sc;
2137 	int32_t cid;
2138 
2139 	sc = device_get_softc(dev);
2140 	sc->dev = dev;
2141 	sc->adapter = device_get_softc(device_get_parent(dev));
2142 	sc->txq = &sc->adapter->sge.ctrlq[0];
2143 	sc->rxq = &sc->adapter->sge.rxq[0];
2144 	cid = crypto_get_driverid(dev, sizeof(struct ccr_session),
2145 	    CRYPTOCAP_F_HARDWARE);
2146 	if (cid < 0) {
2147 		device_printf(dev, "could not get crypto driver id\n");
2148 		return (ENXIO);
2149 	}
2150 	sc->cid = cid;
2151 	sc->adapter->ccr_softc = sc;
2152 
2153 	/* XXX: TODO? */
2154 	sc->tx_channel_id = 0;
2155 
2156 	mtx_init(&sc->lock, "ccr", NULL, MTX_DEF);
2157 	sc->sg_crp = sglist_alloc(TX_SGL_SEGS, M_WAITOK);
2158 	sc->sg_ulptx = sglist_alloc(TX_SGL_SEGS, M_WAITOK);
2159 	sc->sg_dsgl = sglist_alloc(MAX_RX_PHYS_DSGL_SGE, M_WAITOK);
2160 	sc->iv_aad_buf = malloc(MAX_AAD_LEN, M_CCR, M_WAITOK);
2161 	sc->sg_iv_aad = sglist_build(sc->iv_aad_buf, MAX_AAD_LEN, M_WAITOK);
2162 	ccr_sysctls(sc);
2163 
2164 	crypto_register(cid, CRYPTO_SHA1, 0, 0);
2165 	crypto_register(cid, CRYPTO_SHA2_224, 0, 0);
2166 	crypto_register(cid, CRYPTO_SHA2_256, 0, 0);
2167 	crypto_register(cid, CRYPTO_SHA2_384, 0, 0);
2168 	crypto_register(cid, CRYPTO_SHA2_512, 0, 0);
2169 	crypto_register(cid, CRYPTO_SHA1_HMAC, 0, 0);
2170 	crypto_register(cid, CRYPTO_SHA2_224_HMAC, 0, 0);
2171 	crypto_register(cid, CRYPTO_SHA2_256_HMAC, 0, 0);
2172 	crypto_register(cid, CRYPTO_SHA2_384_HMAC, 0, 0);
2173 	crypto_register(cid, CRYPTO_SHA2_512_HMAC, 0, 0);
2174 	crypto_register(cid, CRYPTO_AES_CBC, 0, 0);
2175 	crypto_register(cid, CRYPTO_AES_ICM, 0, 0);
2176 	crypto_register(cid, CRYPTO_AES_NIST_GCM_16, 0, 0);
2177 	crypto_register(cid, CRYPTO_AES_128_NIST_GMAC, 0, 0);
2178 	crypto_register(cid, CRYPTO_AES_192_NIST_GMAC, 0, 0);
2179 	crypto_register(cid, CRYPTO_AES_256_NIST_GMAC, 0, 0);
2180 	crypto_register(cid, CRYPTO_AES_XTS, 0, 0);
2181 	crypto_register(cid, CRYPTO_AES_CCM_16, 0, 0);
2182 	crypto_register(cid, CRYPTO_AES_CCM_CBC_MAC, 0, 0);
2183 	return (0);
2184 }
2185 
2186 static int
2187 ccr_detach(device_t dev)
2188 {
2189 	struct ccr_softc *sc;
2190 
2191 	sc = device_get_softc(dev);
2192 
2193 	mtx_lock(&sc->lock);
2194 	sc->detaching = true;
2195 	mtx_unlock(&sc->lock);
2196 
2197 	crypto_unregister_all(sc->cid);
2198 
2199 	mtx_destroy(&sc->lock);
2200 	sglist_free(sc->sg_iv_aad);
2201 	free(sc->iv_aad_buf, M_CCR);
2202 	sglist_free(sc->sg_dsgl);
2203 	sglist_free(sc->sg_ulptx);
2204 	sglist_free(sc->sg_crp);
2205 	sc->adapter->ccr_softc = NULL;
2206 	return (0);
2207 }
2208 
2209 static void
2210 ccr_init_hash_digest(struct ccr_session *s, int cri_alg)
2211 {
2212 	union authctx auth_ctx;
2213 	struct auth_hash *axf;
2214 
2215 	axf = s->hmac.auth_hash;
2216 	axf->Init(&auth_ctx);
2217 	t4_copy_partial_hash(cri_alg, &auth_ctx, s->hmac.pads);
2218 }
2219 
2220 static int
2221 ccr_aes_check_keylen(int alg, int klen)
2222 {
2223 
2224 	switch (klen) {
2225 	case 128:
2226 	case 192:
2227 		if (alg == CRYPTO_AES_XTS)
2228 			return (EINVAL);
2229 		break;
2230 	case 256:
2231 		break;
2232 	case 512:
2233 		if (alg != CRYPTO_AES_XTS)
2234 			return (EINVAL);
2235 		break;
2236 	default:
2237 		return (EINVAL);
2238 	}
2239 	return (0);
2240 }
2241 
2242 static void
2243 ccr_aes_setkey(struct ccr_session *s, int alg, const void *key, int klen)
2244 {
2245 	unsigned int ck_size, iopad_size, kctx_flits, kctx_len, kbits, mk_size;
2246 	unsigned int opad_present;
2247 
2248 	if (alg == CRYPTO_AES_XTS)
2249 		kbits = klen / 2;
2250 	else
2251 		kbits = klen;
2252 	switch (kbits) {
2253 	case 128:
2254 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
2255 		break;
2256 	case 192:
2257 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
2258 		break;
2259 	case 256:
2260 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
2261 		break;
2262 	default:
2263 		panic("should not get here");
2264 	}
2265 
2266 	s->blkcipher.key_len = klen / 8;
2267 	memcpy(s->blkcipher.enckey, key, s->blkcipher.key_len);
2268 	switch (alg) {
2269 	case CRYPTO_AES_CBC:
2270 	case CRYPTO_AES_XTS:
2271 		t4_aes_getdeckey(s->blkcipher.deckey, key, kbits);
2272 		break;
2273 	}
2274 
2275 	kctx_len = roundup2(s->blkcipher.key_len, 16);
2276 	switch (s->mode) {
2277 	case AUTHENC:
2278 		mk_size = s->hmac.mk_size;
2279 		opad_present = 1;
2280 		iopad_size = roundup2(s->hmac.partial_digest_len, 16);
2281 		kctx_len += iopad_size * 2;
2282 		break;
2283 	case GCM:
2284 		mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_128;
2285 		opad_present = 0;
2286 		kctx_len += GMAC_BLOCK_LEN;
2287 		break;
2288 	case CCM:
2289 		switch (kbits) {
2290 		case 128:
2291 			mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_128;
2292 			break;
2293 		case 192:
2294 			mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_192;
2295 			break;
2296 		case 256:
2297 			mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
2298 			break;
2299 		default:
2300 			panic("should not get here");
2301 		}
2302 		opad_present = 0;
2303 		kctx_len *= 2;
2304 		break;
2305 	default:
2306 		mk_size = CHCR_KEYCTX_NO_KEY;
2307 		opad_present = 0;
2308 		break;
2309 	}
2310 	kctx_flits = (sizeof(struct _key_ctx) + kctx_len) / 16;
2311 	s->blkcipher.key_ctx_hdr = htobe32(V_KEY_CONTEXT_CTX_LEN(kctx_flits) |
2312 	    V_KEY_CONTEXT_DUAL_CK(alg == CRYPTO_AES_XTS) |
2313 	    V_KEY_CONTEXT_OPAD_PRESENT(opad_present) |
2314 	    V_KEY_CONTEXT_SALT_PRESENT(1) | V_KEY_CONTEXT_CK_SIZE(ck_size) |
2315 	    V_KEY_CONTEXT_MK_SIZE(mk_size) | V_KEY_CONTEXT_VALID(1));
2316 }
2317 
2318 static int
2319 ccr_newsession(device_t dev, crypto_session_t cses, struct cryptoini *cri)
2320 {
2321 	struct ccr_softc *sc;
2322 	struct ccr_session *s;
2323 	struct auth_hash *auth_hash;
2324 	struct cryptoini *c, *hash, *cipher;
2325 	unsigned int auth_mode, cipher_mode, iv_len, mk_size;
2326 	unsigned int partial_digest_len;
2327 	int error;
2328 	bool gcm_hash, hmac;
2329 
2330 	if (cri == NULL)
2331 		return (EINVAL);
2332 
2333 	gcm_hash = false;
2334 	hmac = false;
2335 	cipher = NULL;
2336 	hash = NULL;
2337 	auth_hash = NULL;
2338 	auth_mode = SCMD_AUTH_MODE_NOP;
2339 	cipher_mode = SCMD_CIPH_MODE_NOP;
2340 	iv_len = 0;
2341 	mk_size = 0;
2342 	partial_digest_len = 0;
2343 	for (c = cri; c != NULL; c = c->cri_next) {
2344 		switch (c->cri_alg) {
2345 		case CRYPTO_SHA1:
2346 		case CRYPTO_SHA2_224:
2347 		case CRYPTO_SHA2_256:
2348 		case CRYPTO_SHA2_384:
2349 		case CRYPTO_SHA2_512:
2350 		case CRYPTO_SHA1_HMAC:
2351 		case CRYPTO_SHA2_224_HMAC:
2352 		case CRYPTO_SHA2_256_HMAC:
2353 		case CRYPTO_SHA2_384_HMAC:
2354 		case CRYPTO_SHA2_512_HMAC:
2355 		case CRYPTO_AES_128_NIST_GMAC:
2356 		case CRYPTO_AES_192_NIST_GMAC:
2357 		case CRYPTO_AES_256_NIST_GMAC:
2358 		case CRYPTO_AES_CCM_CBC_MAC:
2359 			if (hash)
2360 				return (EINVAL);
2361 			hash = c;
2362 			switch (c->cri_alg) {
2363 			case CRYPTO_SHA1:
2364 			case CRYPTO_SHA1_HMAC:
2365 				auth_hash = &auth_hash_hmac_sha1;
2366 				auth_mode = SCMD_AUTH_MODE_SHA1;
2367 				mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_160;
2368 				partial_digest_len = SHA1_HASH_LEN;
2369 				break;
2370 			case CRYPTO_SHA2_224:
2371 			case CRYPTO_SHA2_224_HMAC:
2372 				auth_hash = &auth_hash_hmac_sha2_224;
2373 				auth_mode = SCMD_AUTH_MODE_SHA224;
2374 				mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
2375 				partial_digest_len = SHA2_256_HASH_LEN;
2376 				break;
2377 			case CRYPTO_SHA2_256:
2378 			case CRYPTO_SHA2_256_HMAC:
2379 				auth_hash = &auth_hash_hmac_sha2_256;
2380 				auth_mode = SCMD_AUTH_MODE_SHA256;
2381 				mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
2382 				partial_digest_len = SHA2_256_HASH_LEN;
2383 				break;
2384 			case CRYPTO_SHA2_384:
2385 			case CRYPTO_SHA2_384_HMAC:
2386 				auth_hash = &auth_hash_hmac_sha2_384;
2387 				auth_mode = SCMD_AUTH_MODE_SHA512_384;
2388 				mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512;
2389 				partial_digest_len = SHA2_512_HASH_LEN;
2390 				break;
2391 			case CRYPTO_SHA2_512:
2392 			case CRYPTO_SHA2_512_HMAC:
2393 				auth_hash = &auth_hash_hmac_sha2_512;
2394 				auth_mode = SCMD_AUTH_MODE_SHA512_512;
2395 				mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512;
2396 				partial_digest_len = SHA2_512_HASH_LEN;
2397 				break;
2398 			case CRYPTO_AES_128_NIST_GMAC:
2399 			case CRYPTO_AES_192_NIST_GMAC:
2400 			case CRYPTO_AES_256_NIST_GMAC:
2401 				gcm_hash = true;
2402 				auth_mode = SCMD_AUTH_MODE_GHASH;
2403 				mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_128;
2404 				break;
2405 			case CRYPTO_AES_CCM_CBC_MAC:
2406 				auth_mode = SCMD_AUTH_MODE_CBCMAC;
2407 				break;
2408 			}
2409 			switch (c->cri_alg) {
2410 			case CRYPTO_SHA1_HMAC:
2411 			case CRYPTO_SHA2_224_HMAC:
2412 			case CRYPTO_SHA2_256_HMAC:
2413 			case CRYPTO_SHA2_384_HMAC:
2414 			case CRYPTO_SHA2_512_HMAC:
2415 				hmac = true;
2416 				break;
2417 			}
2418 			break;
2419 		case CRYPTO_AES_CBC:
2420 		case CRYPTO_AES_ICM:
2421 		case CRYPTO_AES_NIST_GCM_16:
2422 		case CRYPTO_AES_XTS:
2423 		case CRYPTO_AES_CCM_16:
2424 			if (cipher)
2425 				return (EINVAL);
2426 			cipher = c;
2427 			switch (c->cri_alg) {
2428 			case CRYPTO_AES_CBC:
2429 				cipher_mode = SCMD_CIPH_MODE_AES_CBC;
2430 				iv_len = AES_BLOCK_LEN;
2431 				break;
2432 			case CRYPTO_AES_ICM:
2433 				cipher_mode = SCMD_CIPH_MODE_AES_CTR;
2434 				iv_len = AES_BLOCK_LEN;
2435 				break;
2436 			case CRYPTO_AES_NIST_GCM_16:
2437 				cipher_mode = SCMD_CIPH_MODE_AES_GCM;
2438 				iv_len = AES_GCM_IV_LEN;
2439 				break;
2440 			case CRYPTO_AES_XTS:
2441 				cipher_mode = SCMD_CIPH_MODE_AES_XTS;
2442 				iv_len = AES_BLOCK_LEN;
2443 				break;
2444 			case CRYPTO_AES_CCM_16:
2445 				cipher_mode = SCMD_CIPH_MODE_AES_CCM;
2446 				iv_len = AES_CCM_IV_LEN;
2447 				break;
2448 			}
2449 			if (c->cri_key != NULL) {
2450 				error = ccr_aes_check_keylen(c->cri_alg,
2451 				    c->cri_klen);
2452 				if (error)
2453 					return (error);
2454 			}
2455 			break;
2456 		default:
2457 			return (EINVAL);
2458 		}
2459 	}
2460 	if (gcm_hash != (cipher_mode == SCMD_CIPH_MODE_AES_GCM))
2461 		return (EINVAL);
2462 	if ((auth_mode == SCMD_AUTH_MODE_CBCMAC) !=
2463 	    (cipher_mode == SCMD_CIPH_MODE_AES_CCM))
2464 		return (EINVAL);
2465 	if (hash == NULL && cipher == NULL)
2466 		return (EINVAL);
2467 	if (hash != NULL) {
2468 		if (hmac || gcm_hash || auth_mode == SCMD_AUTH_MODE_CBCMAC) {
2469 			if (hash->cri_key == NULL)
2470 				return (EINVAL);
2471 		} else {
2472 			if (hash->cri_key != NULL)
2473 				return (EINVAL);
2474 		}
2475 	}
2476 
2477 	sc = device_get_softc(dev);
2478 
2479 	/*
2480 	 * XXX: Don't create a session if the queues aren't
2481 	 * initialized.  This is racy as the rxq can be destroyed by
2482 	 * the associated VI detaching.  Eventually ccr should use
2483 	 * dedicated queues.
2484 	 */
2485 	if (sc->rxq->iq.adapter == NULL || sc->txq->adapter == NULL)
2486 		return (ENXIO);
2487 
2488 	mtx_lock(&sc->lock);
2489 	if (sc->detaching) {
2490 		mtx_unlock(&sc->lock);
2491 		return (ENXIO);
2492 	}
2493 
2494 	s = crypto_get_driver_session(cses);
2495 
2496 	if (gcm_hash)
2497 		s->mode = GCM;
2498 	else if (cipher_mode == SCMD_CIPH_MODE_AES_CCM)
2499 		s->mode = CCM;
2500 	else if (hash != NULL && cipher != NULL)
2501 		s->mode = AUTHENC;
2502 	else if (hash != NULL) {
2503 		if (hmac)
2504 			s->mode = HMAC;
2505 		else
2506 			s->mode = HASH;
2507 	} else {
2508 		MPASS(cipher != NULL);
2509 		s->mode = BLKCIPHER;
2510 	}
2511 	if (gcm_hash) {
2512 		if (hash->cri_mlen == 0)
2513 			s->gmac.hash_len = AES_GMAC_HASH_LEN;
2514 		else
2515 			s->gmac.hash_len = hash->cri_mlen;
2516 		t4_init_gmac_hash(hash->cri_key, hash->cri_klen,
2517 		    s->gmac.ghash_h);
2518 	} else if (auth_mode == SCMD_AUTH_MODE_CBCMAC) {
2519 		if (hash->cri_mlen == 0)
2520 			s->ccm_mac.hash_len = AES_CBC_MAC_HASH_LEN;
2521 		else
2522 			s->ccm_mac.hash_len = hash->cri_mlen;
2523 	} else if (hash != NULL) {
2524 		s->hmac.auth_hash = auth_hash;
2525 		s->hmac.auth_mode = auth_mode;
2526 		s->hmac.mk_size = mk_size;
2527 		s->hmac.partial_digest_len = partial_digest_len;
2528 		if (hash->cri_mlen == 0)
2529 			s->hmac.hash_len = auth_hash->hashsize;
2530 		else
2531 			s->hmac.hash_len = hash->cri_mlen;
2532 		if (hmac)
2533 			t4_init_hmac_digest(auth_hash, partial_digest_len,
2534 			    hash->cri_key, hash->cri_klen, s->hmac.pads);
2535 		else
2536 			ccr_init_hash_digest(s, hash->cri_alg);
2537 	}
2538 	if (cipher != NULL) {
2539 		s->blkcipher.cipher_mode = cipher_mode;
2540 		s->blkcipher.iv_len = iv_len;
2541 		if (cipher->cri_key != NULL)
2542 			ccr_aes_setkey(s, cipher->cri_alg, cipher->cri_key,
2543 			    cipher->cri_klen);
2544 	}
2545 
2546 	s->active = true;
2547 	mtx_unlock(&sc->lock);
2548 	return (0);
2549 }
2550 
2551 static void
2552 ccr_freesession(device_t dev, crypto_session_t cses)
2553 {
2554 	struct ccr_softc *sc;
2555 	struct ccr_session *s;
2556 
2557 	sc = device_get_softc(dev);
2558 	s = crypto_get_driver_session(cses);
2559 	mtx_lock(&sc->lock);
2560 	if (s->pending != 0)
2561 		device_printf(dev,
2562 		    "session %p freed with %d pending requests\n", s,
2563 		    s->pending);
2564 	s->active = false;
2565 	mtx_unlock(&sc->lock);
2566 }
2567 
2568 static int
2569 ccr_process(device_t dev, struct cryptop *crp, int hint)
2570 {
2571 	struct ccr_softc *sc;
2572 	struct ccr_session *s;
2573 	struct cryptodesc *crd, *crda, *crde;
2574 	int error;
2575 
2576 	if (crp == NULL)
2577 		return (EINVAL);
2578 
2579 	crd = crp->crp_desc;
2580 	s = crypto_get_driver_session(crp->crp_session);
2581 	sc = device_get_softc(dev);
2582 
2583 	mtx_lock(&sc->lock);
2584 	error = ccr_populate_sglist(sc->sg_crp, crp);
2585 	if (error) {
2586 		sc->stats_sglist_error++;
2587 		goto out;
2588 	}
2589 
2590 	switch (s->mode) {
2591 	case HASH:
2592 		error = ccr_hash(sc, s, crp);
2593 		if (error == 0)
2594 			sc->stats_hash++;
2595 		break;
2596 	case HMAC:
2597 		if (crd->crd_flags & CRD_F_KEY_EXPLICIT)
2598 			t4_init_hmac_digest(s->hmac.auth_hash,
2599 			    s->hmac.partial_digest_len, crd->crd_key,
2600 			    crd->crd_klen, s->hmac.pads);
2601 		error = ccr_hash(sc, s, crp);
2602 		if (error == 0)
2603 			sc->stats_hmac++;
2604 		break;
2605 	case BLKCIPHER:
2606 		if (crd->crd_flags & CRD_F_KEY_EXPLICIT) {
2607 			error = ccr_aes_check_keylen(crd->crd_alg,
2608 			    crd->crd_klen);
2609 			if (error)
2610 				break;
2611 			ccr_aes_setkey(s, crd->crd_alg, crd->crd_key,
2612 			    crd->crd_klen);
2613 		}
2614 		error = ccr_blkcipher(sc, s, crp);
2615 		if (error == 0) {
2616 			if (crd->crd_flags & CRD_F_ENCRYPT)
2617 				sc->stats_blkcipher_encrypt++;
2618 			else
2619 				sc->stats_blkcipher_decrypt++;
2620 		}
2621 		break;
2622 	case AUTHENC:
2623 		error = 0;
2624 		switch (crd->crd_alg) {
2625 		case CRYPTO_AES_CBC:
2626 		case CRYPTO_AES_ICM:
2627 		case CRYPTO_AES_XTS:
2628 			/* Only encrypt-then-authenticate supported. */
2629 			crde = crd;
2630 			crda = crd->crd_next;
2631 			if (!(crde->crd_flags & CRD_F_ENCRYPT)) {
2632 				error = EINVAL;
2633 				break;
2634 			}
2635 			break;
2636 		default:
2637 			crda = crd;
2638 			crde = crd->crd_next;
2639 			if (crde->crd_flags & CRD_F_ENCRYPT) {
2640 				error = EINVAL;
2641 				break;
2642 			}
2643 			break;
2644 		}
2645 		if (error)
2646 			break;
2647 		if (crda->crd_flags & CRD_F_KEY_EXPLICIT)
2648 			t4_init_hmac_digest(s->hmac.auth_hash,
2649 			    s->hmac.partial_digest_len, crda->crd_key,
2650 			    crda->crd_klen, s->hmac.pads);
2651 		if (crde->crd_flags & CRD_F_KEY_EXPLICIT) {
2652 			error = ccr_aes_check_keylen(crde->crd_alg,
2653 			    crde->crd_klen);
2654 			if (error)
2655 				break;
2656 			ccr_aes_setkey(s, crde->crd_alg, crde->crd_key,
2657 			    crde->crd_klen);
2658 		}
2659 		error = ccr_authenc(sc, s, crp, crda, crde);
2660 		if (error == 0) {
2661 			if (crde->crd_flags & CRD_F_ENCRYPT)
2662 				sc->stats_authenc_encrypt++;
2663 			else
2664 				sc->stats_authenc_decrypt++;
2665 		}
2666 		break;
2667 	case GCM:
2668 		error = 0;
2669 		if (crd->crd_alg == CRYPTO_AES_NIST_GCM_16) {
2670 			crde = crd;
2671 			crda = crd->crd_next;
2672 		} else {
2673 			crda = crd;
2674 			crde = crd->crd_next;
2675 		}
2676 		if (crda->crd_flags & CRD_F_KEY_EXPLICIT)
2677 			t4_init_gmac_hash(crda->crd_key, crda->crd_klen,
2678 			    s->gmac.ghash_h);
2679 		if (crde->crd_flags & CRD_F_KEY_EXPLICIT) {
2680 			error = ccr_aes_check_keylen(crde->crd_alg,
2681 			    crde->crd_klen);
2682 			if (error)
2683 				break;
2684 			ccr_aes_setkey(s, crde->crd_alg, crde->crd_key,
2685 			    crde->crd_klen);
2686 		}
2687 		if (crde->crd_len == 0) {
2688 			mtx_unlock(&sc->lock);
2689 			ccr_gcm_soft(s, crp, crda, crde);
2690 			return (0);
2691 		}
2692 		error = ccr_gcm(sc, s, crp, crda, crde);
2693 		if (error == EMSGSIZE) {
2694 			sc->stats_sw_fallback++;
2695 			mtx_unlock(&sc->lock);
2696 			ccr_gcm_soft(s, crp, crda, crde);
2697 			return (0);
2698 		}
2699 		if (error == 0) {
2700 			if (crde->crd_flags & CRD_F_ENCRYPT)
2701 				sc->stats_gcm_encrypt++;
2702 			else
2703 				sc->stats_gcm_decrypt++;
2704 		}
2705 		break;
2706 	case CCM:
2707 		error = 0;
2708 		if (crd->crd_alg == CRYPTO_AES_CCM_16) {
2709 			crde = crd;
2710 			crda = crd->crd_next;
2711 		} else {
2712 			crda = crd;
2713 			crde = crd->crd_next;
2714 		}
2715 		if (crde->crd_flags & CRD_F_KEY_EXPLICIT) {
2716 			error = ccr_aes_check_keylen(crde->crd_alg,
2717 			    crde->crd_klen);
2718 			if (error)
2719 				break;
2720 			ccr_aes_setkey(s, crde->crd_alg, crde->crd_key,
2721 			    crde->crd_klen);
2722 		}
2723 		error = ccr_ccm(sc, s, crp, crda, crde);
2724 		if (error == EMSGSIZE) {
2725 			sc->stats_sw_fallback++;
2726 			mtx_unlock(&sc->lock);
2727 			ccr_ccm_soft(s, crp, crda, crde);
2728 			return (0);
2729 		}
2730 		if (error == 0) {
2731 			if (crde->crd_flags & CRD_F_ENCRYPT)
2732 				sc->stats_ccm_encrypt++;
2733 			else
2734 				sc->stats_ccm_decrypt++;
2735 		}
2736 		break;
2737 	}
2738 
2739 	if (error == 0) {
2740 		s->pending++;
2741 		sc->stats_inflight++;
2742 	} else
2743 		sc->stats_process_error++;
2744 
2745 out:
2746 	mtx_unlock(&sc->lock);
2747 
2748 	if (error) {
2749 		crp->crp_etype = error;
2750 		crypto_done(crp);
2751 	}
2752 
2753 	return (0);
2754 }
2755 
2756 static int
2757 do_cpl6_fw_pld(struct sge_iq *iq, const struct rss_header *rss,
2758     struct mbuf *m)
2759 {
2760 	struct ccr_softc *sc = iq->adapter->ccr_softc;
2761 	struct ccr_session *s;
2762 	const struct cpl_fw6_pld *cpl;
2763 	struct cryptop *crp;
2764 	uint32_t status;
2765 	int error;
2766 
2767 	if (m != NULL)
2768 		cpl = mtod(m, const void *);
2769 	else
2770 		cpl = (const void *)(rss + 1);
2771 
2772 	crp = (struct cryptop *)(uintptr_t)be64toh(cpl->data[1]);
2773 	s = crypto_get_driver_session(crp->crp_session);
2774 	status = be64toh(cpl->data[0]);
2775 	if (CHK_MAC_ERR_BIT(status) || CHK_PAD_ERR_BIT(status))
2776 		error = EBADMSG;
2777 	else
2778 		error = 0;
2779 
2780 	mtx_lock(&sc->lock);
2781 	s->pending--;
2782 	sc->stats_inflight--;
2783 
2784 	switch (s->mode) {
2785 	case HASH:
2786 	case HMAC:
2787 		error = ccr_hash_done(sc, s, crp, cpl, error);
2788 		break;
2789 	case BLKCIPHER:
2790 		error = ccr_blkcipher_done(sc, s, crp, cpl, error);
2791 		break;
2792 	case AUTHENC:
2793 		error = ccr_authenc_done(sc, s, crp, cpl, error);
2794 		break;
2795 	case GCM:
2796 		error = ccr_gcm_done(sc, s, crp, cpl, error);
2797 		break;
2798 	case CCM:
2799 		error = ccr_ccm_done(sc, s, crp, cpl, error);
2800 		break;
2801 	}
2802 
2803 	if (error == EBADMSG) {
2804 		if (CHK_MAC_ERR_BIT(status))
2805 			sc->stats_mac_error++;
2806 		if (CHK_PAD_ERR_BIT(status))
2807 			sc->stats_pad_error++;
2808 	}
2809 	mtx_unlock(&sc->lock);
2810 	crp->crp_etype = error;
2811 	crypto_done(crp);
2812 	m_freem(m);
2813 	return (0);
2814 }
2815 
2816 static int
2817 ccr_modevent(module_t mod, int cmd, void *arg)
2818 {
2819 
2820 	switch (cmd) {
2821 	case MOD_LOAD:
2822 		t4_register_cpl_handler(CPL_FW6_PLD, do_cpl6_fw_pld);
2823 		return (0);
2824 	case MOD_UNLOAD:
2825 		t4_register_cpl_handler(CPL_FW6_PLD, NULL);
2826 		return (0);
2827 	default:
2828 		return (EOPNOTSUPP);
2829 	}
2830 }
2831 
2832 static device_method_t ccr_methods[] = {
2833 	DEVMETHOD(device_identify,	ccr_identify),
2834 	DEVMETHOD(device_probe,		ccr_probe),
2835 	DEVMETHOD(device_attach,	ccr_attach),
2836 	DEVMETHOD(device_detach,	ccr_detach),
2837 
2838 	DEVMETHOD(cryptodev_newsession,	ccr_newsession),
2839 	DEVMETHOD(cryptodev_freesession, ccr_freesession),
2840 	DEVMETHOD(cryptodev_process,	ccr_process),
2841 
2842 	DEVMETHOD_END
2843 };
2844 
2845 static driver_t ccr_driver = {
2846 	"ccr",
2847 	ccr_methods,
2848 	sizeof(struct ccr_softc)
2849 };
2850 
2851 static devclass_t ccr_devclass;
2852 
2853 DRIVER_MODULE(ccr, t6nex, ccr_driver, ccr_devclass, ccr_modevent, NULL);
2854 MODULE_VERSION(ccr, 1);
2855 MODULE_DEPEND(ccr, crypto, 1, 1, 1);
2856 MODULE_DEPEND(ccr, t6nex, 1, 1, 1);
2857