xref: /freebsd/sys/dev/cxgbe/crypto/t4_crypto.c (revision e9b1dc32c9bd2ebae5f9e140bfa0e0321bc366b5)
1 /*-
2  * Copyright (c) 2017 Chelsio Communications, Inc.
3  * All rights reserved.
4  * Written by: John Baldwin <jhb@FreeBSD.org>
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  */
27 
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
30 
31 #include <sys/types.h>
32 #include <sys/bus.h>
33 #include <sys/lock.h>
34 #include <sys/malloc.h>
35 #include <sys/mutex.h>
36 #include <sys/module.h>
37 #include <sys/sglist.h>
38 
39 #include <opencrypto/cryptodev.h>
40 #include <opencrypto/xform.h>
41 
42 #include "cryptodev_if.h"
43 
44 #include "common/common.h"
45 #include "crypto/t4_crypto.h"
46 
47 /*
48  * Requests consist of:
49  *
50  * +-------------------------------+
51  * | struct fw_crypto_lookaside_wr |
52  * +-------------------------------+
53  * | struct ulp_txpkt              |
54  * +-------------------------------+
55  * | struct ulptx_idata            |
56  * +-------------------------------+
57  * | struct cpl_tx_sec_pdu         |
58  * +-------------------------------+
59  * | struct cpl_tls_tx_scmd_fmt    |
60  * +-------------------------------+
61  * | key context header            |
62  * +-------------------------------+
63  * | AES key                       |  ----- For requests with AES
64  * +-------------------------------+ -
65  * | IPAD (16-byte aligned)        |  \
66  * +-------------------------------+  +---- For requests with HMAC
67  * | OPAD (16-byte aligned)        |  /
68  * +-------------------------------+ -
69  * | GMAC H                        |  ----- For AES-GCM
70  * +-------------------------------+ -
71  * | struct cpl_rx_phys_dsgl       |  \
72  * +-------------------------------+  +---- Destination buffer for
73  * | PHYS_DSGL entries             |  /     non-hash-only requests
74  * +-------------------------------+ -
75  * | 16 dummy bytes                |  ----- Only for hash-only requests
76  * +-------------------------------+
77  * | IV                            |  ----- If immediate IV
78  * +-------------------------------+
79  * | Payload                       |  ----- If immediate Payload
80  * +-------------------------------+ -
81  * | struct ulptx_sgl              |  \
82  * +-------------------------------+  +---- If payload via SGL
83  * | SGL entries                   |  /
84  * +-------------------------------+ -
85  *
86  * Note that the key context must be padded to ensure 16-byte alignment.
87  * For HMAC requests, the key consists of the partial hash of the IPAD
88  * followed by the partial hash of the OPAD.
89  *
90  * Replies consist of:
91  *
92  * +-------------------------------+
93  * | struct cpl_fw6_pld            |
94  * +-------------------------------+
95  * | hash digest                   |  ----- For HMAC request with
96  * +-------------------------------+        'hash_size' set in work request
97  *
98  * A 32-bit big-endian error status word is supplied in the last 4
99  * bytes of data[0] in the CPL_FW6_PLD message.  bit 0 indicates a
100  * "MAC" error and bit 1 indicates a "PAD" error.
101  *
102  * The 64-bit 'cookie' field from the fw_crypto_lookaside_wr message
103  * in the request is returned in data[1] of the CPL_FW6_PLD message.
104  *
105  * For block cipher replies, the updated IV is supplied in data[2] and
106  * data[3] of the CPL_FW6_PLD message.
107  *
108  * For hash replies where the work request set 'hash_size' to request
109  * a copy of the hash in the reply, the hash digest is supplied
110  * immediately following the CPL_FW6_PLD message.
111  */
112 
113 /*
114  * The crypto engine supports a maximum AAD size of 511 bytes.
115  */
116 #define	MAX_AAD_LEN		511
117 
118 /*
119  * The documentation for CPL_RX_PHYS_DSGL claims a maximum of 32 SG
120  * entries.  While the CPL includes a 16-bit length field, the T6 can
121  * sometimes hang if an error occurs while processing a request with a
122  * single DSGL entry larger than 2k.
123  */
124 #define	MAX_RX_PHYS_DSGL_SGE	32
125 #define	DSGL_SGE_MAXLEN		2048
126 
127 /*
128  * The adapter only supports requests with a total input or output
129  * length of 64k-1 or smaller.  Longer requests either result in hung
130  * requests or incorrect results.
131  */
132 #define	MAX_REQUEST_SIZE	65535
133 
134 static MALLOC_DEFINE(M_CCR, "ccr", "Chelsio T6 crypto");
135 
136 struct ccr_session_hmac {
137 	struct auth_hash *auth_hash;
138 	int hash_len;
139 	unsigned int partial_digest_len;
140 	unsigned int auth_mode;
141 	unsigned int mk_size;
142 	char ipad[CHCR_HASH_MAX_BLOCK_SIZE_128];
143 	char opad[CHCR_HASH_MAX_BLOCK_SIZE_128];
144 };
145 
146 struct ccr_session_gmac {
147 	int hash_len;
148 	char ghash_h[GMAC_BLOCK_LEN];
149 };
150 
151 struct ccr_session_blkcipher {
152 	unsigned int cipher_mode;
153 	unsigned int key_len;
154 	unsigned int iv_len;
155 	__be32 key_ctx_hdr;
156 	char enckey[CHCR_AES_MAX_KEY_LEN];
157 	char deckey[CHCR_AES_MAX_KEY_LEN];
158 };
159 
160 struct ccr_session {
161 	bool active;
162 	int pending;
163 	enum { HMAC, BLKCIPHER, AUTHENC, GCM } mode;
164 	union {
165 		struct ccr_session_hmac hmac;
166 		struct ccr_session_gmac gmac;
167 	};
168 	struct ccr_session_blkcipher blkcipher;
169 };
170 
171 struct ccr_softc {
172 	struct adapter *adapter;
173 	device_t dev;
174 	uint32_t cid;
175 	int tx_channel_id;
176 	struct mtx lock;
177 	bool detaching;
178 	struct sge_wrq *txq;
179 	struct sge_rxq *rxq;
180 
181 	/*
182 	 * Pre-allocate S/G lists used when preparing a work request.
183 	 * 'sg_crp' contains an sglist describing the entire buffer
184 	 * for a 'struct cryptop'.  'sg_ulptx' is used to describe
185 	 * the data the engine should DMA as input via ULPTX_SGL.
186 	 * 'sg_dsgl' is used to describe the destination that cipher
187 	 * text and a tag should be written to.
188 	 */
189 	struct sglist *sg_crp;
190 	struct sglist *sg_ulptx;
191 	struct sglist *sg_dsgl;
192 
193 	/*
194 	 * Pre-allocate a dummy output buffer for the IV and AAD for
195 	 * AEAD requests.
196 	 */
197 	char *iv_aad_buf;
198 	struct sglist *sg_iv_aad;
199 
200 	/* Statistics. */
201 	uint64_t stats_blkcipher_encrypt;
202 	uint64_t stats_blkcipher_decrypt;
203 	uint64_t stats_hmac;
204 	uint64_t stats_authenc_encrypt;
205 	uint64_t stats_authenc_decrypt;
206 	uint64_t stats_gcm_encrypt;
207 	uint64_t stats_gcm_decrypt;
208 	uint64_t stats_wr_nomem;
209 	uint64_t stats_inflight;
210 	uint64_t stats_mac_error;
211 	uint64_t stats_pad_error;
212 	uint64_t stats_bad_session;
213 	uint64_t stats_sglist_error;
214 	uint64_t stats_process_error;
215 	uint64_t stats_sw_fallback;
216 };
217 
218 /*
219  * Crypto requests involve two kind of scatter/gather lists.
220  *
221  * Non-hash-only requests require a PHYS_DSGL that describes the
222  * location to store the results of the encryption or decryption
223  * operation.  This SGL uses a different format (PHYS_DSGL) and should
224  * exclude the crd_skip bytes at the start of the data as well as
225  * any AAD or IV.  For authenticated encryption requests it should
226  * cover include the destination of the hash or tag.
227  *
228  * The input payload may either be supplied inline as immediate data,
229  * or via a standard ULP_TX SGL.  This SGL should include AAD,
230  * ciphertext, and the hash or tag for authenticated decryption
231  * requests.
232  *
233  * These scatter/gather lists can describe different subsets of the
234  * buffer described by the crypto operation.  ccr_populate_sglist()
235  * generates a scatter/gather list that covers the entire crypto
236  * operation buffer that is then used to construct the other
237  * scatter/gather lists.
238  */
239 static int
240 ccr_populate_sglist(struct sglist *sg, struct cryptop *crp)
241 {
242 	int error;
243 
244 	sglist_reset(sg);
245 	if (crp->crp_flags & CRYPTO_F_IMBUF)
246 		error = sglist_append_mbuf(sg, (struct mbuf *)crp->crp_buf);
247 	else if (crp->crp_flags & CRYPTO_F_IOV)
248 		error = sglist_append_uio(sg, (struct uio *)crp->crp_buf);
249 	else
250 		error = sglist_append(sg, crp->crp_buf, crp->crp_ilen);
251 	return (error);
252 }
253 
254 /*
255  * Segments in 'sg' larger than 'maxsegsize' are counted as multiple
256  * segments.
257  */
258 static int
259 ccr_count_sgl(struct sglist *sg, int maxsegsize)
260 {
261 	int i, nsegs;
262 
263 	nsegs = 0;
264 	for (i = 0; i < sg->sg_nseg; i++)
265 		nsegs += howmany(sg->sg_segs[i].ss_len, maxsegsize);
266 	return (nsegs);
267 }
268 
269 /* These functions deal with PHYS_DSGL for the reply buffer. */
270 static inline int
271 ccr_phys_dsgl_len(int nsegs)
272 {
273 	int len;
274 
275 	len = (nsegs / 8) * sizeof(struct phys_sge_pairs);
276 	if ((nsegs % 8) != 0) {
277 		len += sizeof(uint16_t) * 8;
278 		len += roundup2(nsegs % 8, 2) * sizeof(uint64_t);
279 	}
280 	return (len);
281 }
282 
283 static void
284 ccr_write_phys_dsgl(struct ccr_softc *sc, void *dst, int nsegs)
285 {
286 	struct sglist *sg;
287 	struct cpl_rx_phys_dsgl *cpl;
288 	struct phys_sge_pairs *sgl;
289 	vm_paddr_t paddr;
290 	size_t seglen;
291 	u_int i, j;
292 
293 	sg = sc->sg_dsgl;
294 	cpl = dst;
295 	cpl->op_to_tid = htobe32(V_CPL_RX_PHYS_DSGL_OPCODE(CPL_RX_PHYS_DSGL) |
296 	    V_CPL_RX_PHYS_DSGL_ISRDMA(0));
297 	cpl->pcirlxorder_to_noofsgentr = htobe32(
298 	    V_CPL_RX_PHYS_DSGL_PCIRLXORDER(0) |
299 	    V_CPL_RX_PHYS_DSGL_PCINOSNOOP(0) |
300 	    V_CPL_RX_PHYS_DSGL_PCITPHNTENB(0) | V_CPL_RX_PHYS_DSGL_DCAID(0) |
301 	    V_CPL_RX_PHYS_DSGL_NOOFSGENTR(nsegs));
302 	cpl->rss_hdr_int.opcode = CPL_RX_PHYS_ADDR;
303 	cpl->rss_hdr_int.qid = htobe16(sc->rxq->iq.abs_id);
304 	cpl->rss_hdr_int.hash_val = 0;
305 	sgl = (struct phys_sge_pairs *)(cpl + 1);
306 	j = 0;
307 	for (i = 0; i < sg->sg_nseg; i++) {
308 		seglen = sg->sg_segs[i].ss_len;
309 		paddr = sg->sg_segs[i].ss_paddr;
310 		do {
311 			sgl->addr[j] = htobe64(paddr);
312 			if (seglen > DSGL_SGE_MAXLEN) {
313 				sgl->len[j] = htobe16(DSGL_SGE_MAXLEN);
314 				paddr += DSGL_SGE_MAXLEN;
315 				seglen -= DSGL_SGE_MAXLEN;
316 			} else {
317 				sgl->len[j] = htobe16(seglen);
318 				seglen = 0;
319 			}
320 			j++;
321 			if (j == 8) {
322 				sgl++;
323 				j = 0;
324 			}
325 		} while (seglen != 0);
326 	}
327 	MPASS(j + 8 * (sgl - (struct phys_sge_pairs *)(cpl + 1)) == nsegs);
328 }
329 
330 /* These functions deal with the ULPTX_SGL for input payload. */
331 static inline int
332 ccr_ulptx_sgl_len(int nsegs)
333 {
334 	u_int n;
335 
336 	nsegs--; /* first segment is part of ulptx_sgl */
337 	n = sizeof(struct ulptx_sgl) + 8 * ((3 * nsegs) / 2 + (nsegs & 1));
338 	return (roundup2(n, 16));
339 }
340 
341 static void
342 ccr_write_ulptx_sgl(struct ccr_softc *sc, void *dst, int nsegs)
343 {
344 	struct ulptx_sgl *usgl;
345 	struct sglist *sg;
346 	struct sglist_seg *ss;
347 	int i;
348 
349 	sg = sc->sg_ulptx;
350 	MPASS(nsegs == sg->sg_nseg);
351 	ss = &sg->sg_segs[0];
352 	usgl = dst;
353 	usgl->cmd_nsge = htobe32(V_ULPTX_CMD(ULP_TX_SC_DSGL) |
354 	    V_ULPTX_NSGE(nsegs));
355 	usgl->len0 = htobe32(ss->ss_len);
356 	usgl->addr0 = htobe64(ss->ss_paddr);
357 	ss++;
358 	for (i = 0; i < sg->sg_nseg - 1; i++) {
359 		usgl->sge[i / 2].len[i & 1] = htobe32(ss->ss_len);
360 		usgl->sge[i / 2].addr[i & 1] = htobe64(ss->ss_paddr);
361 		ss++;
362 	}
363 
364 }
365 
366 static bool
367 ccr_use_imm_data(u_int transhdr_len, u_int input_len)
368 {
369 
370 	if (input_len > CRYPTO_MAX_IMM_TX_PKT_LEN)
371 		return (false);
372 	if (roundup2(transhdr_len, 16) + roundup2(input_len, 16) >
373 	    SGE_MAX_WR_LEN)
374 		return (false);
375 	return (true);
376 }
377 
378 static void
379 ccr_populate_wreq(struct ccr_softc *sc, struct chcr_wr *crwr, u_int kctx_len,
380     u_int wr_len, u_int imm_len, u_int sgl_len, u_int hash_size,
381     struct cryptop *crp)
382 {
383 	u_int cctx_size;
384 
385 	cctx_size = sizeof(struct _key_ctx) + kctx_len;
386 	crwr->wreq.op_to_cctx_size = htobe32(
387 	    V_FW_CRYPTO_LOOKASIDE_WR_OPCODE(FW_CRYPTO_LOOKASIDE_WR) |
388 	    V_FW_CRYPTO_LOOKASIDE_WR_COMPL(0) |
389 	    V_FW_CRYPTO_LOOKASIDE_WR_IMM_LEN(imm_len) |
390 	    V_FW_CRYPTO_LOOKASIDE_WR_CCTX_LOC(1) |
391 	    V_FW_CRYPTO_LOOKASIDE_WR_CCTX_SIZE(cctx_size >> 4));
392 	crwr->wreq.len16_pkd = htobe32(
393 	    V_FW_CRYPTO_LOOKASIDE_WR_LEN16(wr_len / 16));
394 	crwr->wreq.session_id = 0;
395 	crwr->wreq.rx_chid_to_rx_q_id = htobe32(
396 	    V_FW_CRYPTO_LOOKASIDE_WR_RX_CHID(sc->tx_channel_id) |
397 	    V_FW_CRYPTO_LOOKASIDE_WR_LCB(0) |
398 	    V_FW_CRYPTO_LOOKASIDE_WR_PHASH(0) |
399 	    V_FW_CRYPTO_LOOKASIDE_WR_IV(IV_NOP) |
400 	    V_FW_CRYPTO_LOOKASIDE_WR_FQIDX(0) |
401 	    V_FW_CRYPTO_LOOKASIDE_WR_TX_CH(0) |
402 	    V_FW_CRYPTO_LOOKASIDE_WR_RX_Q_ID(sc->rxq->iq.abs_id));
403 	crwr->wreq.key_addr = 0;
404 	crwr->wreq.pld_size_hash_size = htobe32(
405 	    V_FW_CRYPTO_LOOKASIDE_WR_PLD_SIZE(sgl_len) |
406 	    V_FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE(hash_size));
407 	crwr->wreq.cookie = htobe64((uintptr_t)crp);
408 
409 	crwr->ulptx.cmd_dest = htobe32(V_ULPTX_CMD(ULP_TX_PKT) |
410 	    V_ULP_TXPKT_DATAMODIFY(0) |
411 	    V_ULP_TXPKT_CHANNELID(sc->tx_channel_id) | V_ULP_TXPKT_DEST(0) |
412 	    V_ULP_TXPKT_FID(0) | V_ULP_TXPKT_RO(1));
413 	crwr->ulptx.len = htobe32(
414 	    ((wr_len - sizeof(struct fw_crypto_lookaside_wr)) / 16));
415 
416 	crwr->sc_imm.cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM) |
417 	    V_ULP_TX_SC_MORE(imm_len != 0 ? 0 : 1));
418 	crwr->sc_imm.len = htobe32(wr_len - offsetof(struct chcr_wr, sec_cpl) -
419 	    sgl_len);
420 }
421 
422 static int
423 ccr_hmac(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp)
424 {
425 	struct chcr_wr *crwr;
426 	struct wrqe *wr;
427 	struct auth_hash *axf;
428 	struct cryptodesc *crd;
429 	char *dst;
430 	u_int hash_size_in_response, kctx_flits, kctx_len, transhdr_len, wr_len;
431 	u_int imm_len, iopad_size;
432 	int error, sgl_nsegs, sgl_len;
433 
434 	crd = crp->crp_desc;
435 
436 	/* Reject requests with too large of an input buffer. */
437 	if (crd->crd_len > MAX_REQUEST_SIZE)
438 		return (EFBIG);
439 
440 	axf = s->hmac.auth_hash;
441 
442 	/* PADs must be 128-bit aligned. */
443 	iopad_size = roundup2(s->hmac.partial_digest_len, 16);
444 
445 	/*
446 	 * The 'key' part of the context includes the aligned IPAD and
447 	 * OPAD.
448 	 */
449 	kctx_len = iopad_size * 2;
450 	hash_size_in_response = axf->hashsize;
451 	transhdr_len = HASH_TRANSHDR_SIZE(kctx_len);
452 
453 	if (crd->crd_len == 0) {
454 		imm_len = axf->blocksize;
455 		sgl_nsegs = 0;
456 		sgl_len = 0;
457 	} else if (ccr_use_imm_data(transhdr_len, crd->crd_len)) {
458 		imm_len = crd->crd_len;
459 		sgl_nsegs = 0;
460 		sgl_len = 0;
461 	} else {
462 		imm_len = 0;
463 		sglist_reset(sc->sg_ulptx);
464 		error = sglist_append_sglist(sc->sg_ulptx, sc->sg_crp,
465 		    crd->crd_skip, crd->crd_len);
466 		if (error)
467 			return (error);
468 		sgl_nsegs = sc->sg_ulptx->sg_nseg;
469 		sgl_len = ccr_ulptx_sgl_len(sgl_nsegs);
470 	}
471 
472 	wr_len = roundup2(transhdr_len, 16) + roundup2(imm_len, 16) + sgl_len;
473 	if (wr_len > SGE_MAX_WR_LEN)
474 		return (EFBIG);
475 	wr = alloc_wrqe(wr_len, sc->txq);
476 	if (wr == NULL) {
477 		sc->stats_wr_nomem++;
478 		return (ENOMEM);
479 	}
480 	crwr = wrtod(wr);
481 	memset(crwr, 0, wr_len);
482 
483 	ccr_populate_wreq(sc, crwr, kctx_len, wr_len, imm_len, sgl_len,
484 	    hash_size_in_response, crp);
485 
486 	/* XXX: Hardcodes SGE loopback channel of 0. */
487 	crwr->sec_cpl.op_ivinsrtofst = htobe32(
488 	    V_CPL_TX_SEC_PDU_OPCODE(CPL_TX_SEC_PDU) |
489 	    V_CPL_TX_SEC_PDU_RXCHID(sc->tx_channel_id) |
490 	    V_CPL_TX_SEC_PDU_ACKFOLLOWS(0) | V_CPL_TX_SEC_PDU_ULPTXLPBK(1) |
491 	    V_CPL_TX_SEC_PDU_CPLLEN(2) | V_CPL_TX_SEC_PDU_PLACEHOLDER(0) |
492 	    V_CPL_TX_SEC_PDU_IVINSRTOFST(0));
493 
494 	crwr->sec_cpl.pldlen = htobe32(crd->crd_len == 0 ? axf->blocksize :
495 	    crd->crd_len);
496 
497 	crwr->sec_cpl.cipherstop_lo_authinsert = htobe32(
498 	    V_CPL_TX_SEC_PDU_AUTHSTART(1) | V_CPL_TX_SEC_PDU_AUTHSTOP(0));
499 
500 	/* These two flits are actually a CPL_TLS_TX_SCMD_FMT. */
501 	crwr->sec_cpl.seqno_numivs = htobe32(
502 	    V_SCMD_SEQ_NO_CTRL(0) |
503 	    V_SCMD_PROTO_VERSION(CHCR_SCMD_PROTO_VERSION_GENERIC) |
504 	    V_SCMD_CIPH_MODE(CHCR_SCMD_CIPHER_MODE_NOP) |
505 	    V_SCMD_AUTH_MODE(s->hmac.auth_mode) |
506 	    V_SCMD_HMAC_CTRL(CHCR_SCMD_HMAC_CTRL_NO_TRUNC));
507 	crwr->sec_cpl.ivgen_hdrlen = htobe32(
508 	    V_SCMD_LAST_FRAG(0) |
509 	    V_SCMD_MORE_FRAGS(crd->crd_len == 0 ? 1 : 0) | V_SCMD_MAC_ONLY(1));
510 
511 	memcpy(crwr->key_ctx.key, s->hmac.ipad, s->hmac.partial_digest_len);
512 	memcpy(crwr->key_ctx.key + iopad_size, s->hmac.opad,
513 	    s->hmac.partial_digest_len);
514 
515 	/* XXX: F_KEY_CONTEXT_SALT_PRESENT set, but 'salt' not set. */
516 	kctx_flits = (sizeof(struct _key_ctx) + kctx_len) / 16;
517 	crwr->key_ctx.ctx_hdr = htobe32(V_KEY_CONTEXT_CTX_LEN(kctx_flits) |
518 	    V_KEY_CONTEXT_OPAD_PRESENT(1) | V_KEY_CONTEXT_SALT_PRESENT(1) |
519 	    V_KEY_CONTEXT_CK_SIZE(CHCR_KEYCTX_NO_KEY) |
520 	    V_KEY_CONTEXT_MK_SIZE(s->hmac.mk_size) | V_KEY_CONTEXT_VALID(1));
521 
522 	dst = (char *)(crwr + 1) + kctx_len + DUMMY_BYTES;
523 	if (crd->crd_len == 0) {
524 		dst[0] = 0x80;
525 		*(uint64_t *)(dst + axf->blocksize - sizeof(uint64_t)) =
526 		    htobe64(axf->blocksize << 3);
527 	} else if (imm_len != 0)
528 		crypto_copydata(crp->crp_flags, crp->crp_buf, crd->crd_skip,
529 		    crd->crd_len, dst);
530 	else
531 		ccr_write_ulptx_sgl(sc, dst, sgl_nsegs);
532 
533 	/* XXX: TODO backpressure */
534 	t4_wrq_tx(sc->adapter, wr);
535 
536 	return (0);
537 }
538 
539 static int
540 ccr_hmac_done(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp,
541     const struct cpl_fw6_pld *cpl, int error)
542 {
543 	struct cryptodesc *crd;
544 
545 	crd = crp->crp_desc;
546 	if (error == 0) {
547 		crypto_copyback(crp->crp_flags, crp->crp_buf, crd->crd_inject,
548 		    s->hmac.hash_len, (c_caddr_t)(cpl + 1));
549 	}
550 
551 	return (error);
552 }
553 
554 static int
555 ccr_blkcipher(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp)
556 {
557 	char iv[CHCR_MAX_CRYPTO_IV_LEN];
558 	struct chcr_wr *crwr;
559 	struct wrqe *wr;
560 	struct cryptodesc *crd;
561 	char *dst;
562 	u_int kctx_len, key_half, op_type, transhdr_len, wr_len;
563 	u_int imm_len;
564 	int dsgl_nsegs, dsgl_len;
565 	int sgl_nsegs, sgl_len;
566 	int error;
567 
568 	crd = crp->crp_desc;
569 
570 	if (s->blkcipher.key_len == 0 || crd->crd_len == 0)
571 		return (EINVAL);
572 	if (crd->crd_alg == CRYPTO_AES_CBC &&
573 	    (crd->crd_len % AES_BLOCK_LEN) != 0)
574 		return (EINVAL);
575 
576 	/* Reject requests with too large of an input buffer. */
577 	if (crd->crd_len > MAX_REQUEST_SIZE)
578 		return (EFBIG);
579 
580 	if (crd->crd_flags & CRD_F_ENCRYPT)
581 		op_type = CHCR_ENCRYPT_OP;
582 	else
583 		op_type = CHCR_DECRYPT_OP;
584 
585 	sglist_reset(sc->sg_dsgl);
586 	error = sglist_append_sglist(sc->sg_dsgl, sc->sg_crp, crd->crd_skip,
587 	    crd->crd_len);
588 	if (error)
589 		return (error);
590 	dsgl_nsegs = ccr_count_sgl(sc->sg_dsgl, DSGL_SGE_MAXLEN);
591 	if (dsgl_nsegs > MAX_RX_PHYS_DSGL_SGE)
592 		return (EFBIG);
593 	dsgl_len = ccr_phys_dsgl_len(dsgl_nsegs);
594 
595 	/* The 'key' must be 128-bit aligned. */
596 	kctx_len = roundup2(s->blkcipher.key_len, 16);
597 	transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dsgl_len);
598 
599 	if (ccr_use_imm_data(transhdr_len, crd->crd_len +
600 	    s->blkcipher.iv_len)) {
601 		imm_len = crd->crd_len;
602 		sgl_nsegs = 0;
603 		sgl_len = 0;
604 	} else {
605 		imm_len = 0;
606 		sglist_reset(sc->sg_ulptx);
607 		error = sglist_append_sglist(sc->sg_ulptx, sc->sg_crp,
608 		    crd->crd_skip, crd->crd_len);
609 		if (error)
610 			return (error);
611 		sgl_nsegs = sc->sg_ulptx->sg_nseg;
612 		sgl_len = ccr_ulptx_sgl_len(sgl_nsegs);
613 	}
614 
615 	wr_len = roundup2(transhdr_len, 16) + s->blkcipher.iv_len +
616 	    roundup2(imm_len, 16) + sgl_len;
617 	if (wr_len > SGE_MAX_WR_LEN)
618 		return (EFBIG);
619 	wr = alloc_wrqe(wr_len, sc->txq);
620 	if (wr == NULL) {
621 		sc->stats_wr_nomem++;
622 		return (ENOMEM);
623 	}
624 	crwr = wrtod(wr);
625 	memset(crwr, 0, wr_len);
626 
627 	/*
628 	 * Read the existing IV from the request or generate a random
629 	 * one if none is provided.  Optionally copy the generated IV
630 	 * into the output buffer if requested.
631 	 */
632 	if (op_type == CHCR_ENCRYPT_OP) {
633 		if (crd->crd_flags & CRD_F_IV_EXPLICIT)
634 			memcpy(iv, crd->crd_iv, s->blkcipher.iv_len);
635 		else
636 			arc4rand(iv, s->blkcipher.iv_len, 0);
637 		if ((crd->crd_flags & CRD_F_IV_PRESENT) == 0)
638 			crypto_copyback(crp->crp_flags, crp->crp_buf,
639 			    crd->crd_inject, s->blkcipher.iv_len, iv);
640 	} else {
641 		if (crd->crd_flags & CRD_F_IV_EXPLICIT)
642 			memcpy(iv, crd->crd_iv, s->blkcipher.iv_len);
643 		else
644 			crypto_copydata(crp->crp_flags, crp->crp_buf,
645 			    crd->crd_inject, s->blkcipher.iv_len, iv);
646 	}
647 
648 	ccr_populate_wreq(sc, crwr, kctx_len, wr_len, imm_len, sgl_len, 0,
649 	    crp);
650 
651 	/* XXX: Hardcodes SGE loopback channel of 0. */
652 	crwr->sec_cpl.op_ivinsrtofst = htobe32(
653 	    V_CPL_TX_SEC_PDU_OPCODE(CPL_TX_SEC_PDU) |
654 	    V_CPL_TX_SEC_PDU_RXCHID(sc->tx_channel_id) |
655 	    V_CPL_TX_SEC_PDU_ACKFOLLOWS(0) | V_CPL_TX_SEC_PDU_ULPTXLPBK(1) |
656 	    V_CPL_TX_SEC_PDU_CPLLEN(2) | V_CPL_TX_SEC_PDU_PLACEHOLDER(0) |
657 	    V_CPL_TX_SEC_PDU_IVINSRTOFST(1));
658 
659 	crwr->sec_cpl.pldlen = htobe32(s->blkcipher.iv_len + crd->crd_len);
660 
661 	crwr->sec_cpl.aadstart_cipherstop_hi = htobe32(
662 	    V_CPL_TX_SEC_PDU_CIPHERSTART(s->blkcipher.iv_len + 1) |
663 	    V_CPL_TX_SEC_PDU_CIPHERSTOP_HI(0));
664 	crwr->sec_cpl.cipherstop_lo_authinsert = htobe32(
665 	    V_CPL_TX_SEC_PDU_CIPHERSTOP_LO(0));
666 
667 	/* These two flits are actually a CPL_TLS_TX_SCMD_FMT. */
668 	crwr->sec_cpl.seqno_numivs = htobe32(
669 	    V_SCMD_SEQ_NO_CTRL(0) |
670 	    V_SCMD_PROTO_VERSION(CHCR_SCMD_PROTO_VERSION_GENERIC) |
671 	    V_SCMD_ENC_DEC_CTRL(op_type) |
672 	    V_SCMD_CIPH_MODE(s->blkcipher.cipher_mode) |
673 	    V_SCMD_AUTH_MODE(CHCR_SCMD_AUTH_MODE_NOP) |
674 	    V_SCMD_HMAC_CTRL(CHCR_SCMD_HMAC_CTRL_NOP) |
675 	    V_SCMD_IV_SIZE(s->blkcipher.iv_len / 2) |
676 	    V_SCMD_NUM_IVS(0));
677 	crwr->sec_cpl.ivgen_hdrlen = htobe32(
678 	    V_SCMD_IV_GEN_CTRL(0) |
679 	    V_SCMD_MORE_FRAGS(0) | V_SCMD_LAST_FRAG(0) | V_SCMD_MAC_ONLY(0) |
680 	    V_SCMD_AADIVDROP(1) | V_SCMD_HDR_LEN(dsgl_len));
681 
682 	crwr->key_ctx.ctx_hdr = s->blkcipher.key_ctx_hdr;
683 	switch (crd->crd_alg) {
684 	case CRYPTO_AES_CBC:
685 		if (crd->crd_flags & CRD_F_ENCRYPT)
686 			memcpy(crwr->key_ctx.key, s->blkcipher.enckey,
687 			    s->blkcipher.key_len);
688 		else
689 			memcpy(crwr->key_ctx.key, s->blkcipher.deckey,
690 			    s->blkcipher.key_len);
691 		break;
692 	case CRYPTO_AES_ICM:
693 		memcpy(crwr->key_ctx.key, s->blkcipher.enckey,
694 		    s->blkcipher.key_len);
695 		break;
696 	case CRYPTO_AES_XTS:
697 		key_half = s->blkcipher.key_len / 2;
698 		memcpy(crwr->key_ctx.key, s->blkcipher.enckey + key_half,
699 		    key_half);
700 		if (crd->crd_flags & CRD_F_ENCRYPT)
701 			memcpy(crwr->key_ctx.key + key_half,
702 			    s->blkcipher.enckey, key_half);
703 		else
704 			memcpy(crwr->key_ctx.key + key_half,
705 			    s->blkcipher.deckey, key_half);
706 		break;
707 	}
708 
709 	dst = (char *)(crwr + 1) + kctx_len;
710 	ccr_write_phys_dsgl(sc, dst, dsgl_nsegs);
711 	dst += sizeof(struct cpl_rx_phys_dsgl) + dsgl_len;
712 	memcpy(dst, iv, s->blkcipher.iv_len);
713 	dst += s->blkcipher.iv_len;
714 	if (imm_len != 0)
715 		crypto_copydata(crp->crp_flags, crp->crp_buf, crd->crd_skip,
716 		    crd->crd_len, dst);
717 	else
718 		ccr_write_ulptx_sgl(sc, dst, sgl_nsegs);
719 
720 	/* XXX: TODO backpressure */
721 	t4_wrq_tx(sc->adapter, wr);
722 
723 	return (0);
724 }
725 
726 static int
727 ccr_blkcipher_done(struct ccr_softc *sc, struct ccr_session *s,
728     struct cryptop *crp, const struct cpl_fw6_pld *cpl, int error)
729 {
730 
731 	/*
732 	 * The updated IV to permit chained requests is at
733 	 * cpl->data[2], but OCF doesn't permit chained requests.
734 	 */
735 	return (error);
736 }
737 
738 /*
739  * 'hashsize' is the length of a full digest.  'authsize' is the
740  * requested digest length for this operation which may be less
741  * than 'hashsize'.
742  */
743 static int
744 ccr_hmac_ctrl(unsigned int hashsize, unsigned int authsize)
745 {
746 
747 	if (authsize == 10)
748 		return (CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366);
749 	if (authsize == 12)
750 		return (CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT);
751 	if (authsize == hashsize / 2)
752 		return (CHCR_SCMD_HMAC_CTRL_DIV2);
753 	return (CHCR_SCMD_HMAC_CTRL_NO_TRUNC);
754 }
755 
756 static int
757 ccr_authenc(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp,
758     struct cryptodesc *crda, struct cryptodesc *crde)
759 {
760 	char iv[CHCR_MAX_CRYPTO_IV_LEN];
761 	struct chcr_wr *crwr;
762 	struct wrqe *wr;
763 	struct auth_hash *axf;
764 	char *dst;
765 	u_int kctx_len, key_half, op_type, transhdr_len, wr_len;
766 	u_int hash_size_in_response, imm_len, iopad_size;
767 	u_int aad_start, aad_len, aad_stop;
768 	u_int auth_start, auth_stop, auth_insert;
769 	u_int cipher_start, cipher_stop;
770 	u_int hmac_ctrl, input_len;
771 	int dsgl_nsegs, dsgl_len;
772 	int sgl_nsegs, sgl_len;
773 	int error;
774 
775 	/*
776 	 * If there is a need in the future, requests with an empty
777 	 * payload could be supported as HMAC-only requests.
778 	 */
779 	if (s->blkcipher.key_len == 0 || crde->crd_len == 0)
780 		return (EINVAL);
781 	if (crde->crd_alg == CRYPTO_AES_CBC &&
782 	    (crde->crd_len % AES_BLOCK_LEN) != 0)
783 		return (EINVAL);
784 
785 	/*
786 	 * Compute the length of the AAD (data covered by the
787 	 * authentication descriptor but not the encryption
788 	 * descriptor).  To simplify the logic, AAD is only permitted
789 	 * before the cipher/plain text, not after.  This is true of
790 	 * all currently-generated requests.
791 	 */
792 	if (crda->crd_len + crda->crd_skip > crde->crd_len + crde->crd_skip)
793 		return (EINVAL);
794 	if (crda->crd_skip < crde->crd_skip) {
795 		if (crda->crd_skip + crda->crd_len > crde->crd_skip)
796 			aad_len = (crde->crd_skip - crda->crd_skip);
797 		else
798 			aad_len = crda->crd_len;
799 	} else
800 		aad_len = 0;
801 	if (aad_len + s->blkcipher.iv_len > MAX_AAD_LEN)
802 		return (EINVAL);
803 
804 	axf = s->hmac.auth_hash;
805 	hash_size_in_response = s->hmac.hash_len;
806 	if (crde->crd_flags & CRD_F_ENCRYPT)
807 		op_type = CHCR_ENCRYPT_OP;
808 	else
809 		op_type = CHCR_DECRYPT_OP;
810 
811 	/*
812 	 * The output buffer consists of the cipher text followed by
813 	 * the hash when encrypting.  For decryption it only contains
814 	 * the plain text.
815 	 *
816 	 * Due to a firmware bug, the output buffer must include a
817 	 * dummy output buffer for the IV and AAD prior to the real
818 	 * output buffer.
819 	 */
820 	if (op_type == CHCR_ENCRYPT_OP) {
821 		if (s->blkcipher.iv_len + aad_len + crde->crd_len +
822 		    hash_size_in_response > MAX_REQUEST_SIZE)
823 			return (EFBIG);
824 	} else {
825 		if (s->blkcipher.iv_len + aad_len + crde->crd_len >
826 		    MAX_REQUEST_SIZE)
827 			return (EFBIG);
828 	}
829 	sglist_reset(sc->sg_dsgl);
830 	error = sglist_append_sglist(sc->sg_dsgl, sc->sg_iv_aad, 0,
831 	    s->blkcipher.iv_len + aad_len);
832 	if (error)
833 		return (error);
834 	error = sglist_append_sglist(sc->sg_dsgl, sc->sg_crp, crde->crd_skip,
835 	    crde->crd_len);
836 	if (error)
837 		return (error);
838 	if (op_type == CHCR_ENCRYPT_OP) {
839 		error = sglist_append_sglist(sc->sg_dsgl, sc->sg_crp,
840 		    crda->crd_inject, hash_size_in_response);
841 		if (error)
842 			return (error);
843 	}
844 	dsgl_nsegs = ccr_count_sgl(sc->sg_dsgl, DSGL_SGE_MAXLEN);
845 	if (dsgl_nsegs > MAX_RX_PHYS_DSGL_SGE)
846 		return (EFBIG);
847 	dsgl_len = ccr_phys_dsgl_len(dsgl_nsegs);
848 
849 	/* PADs must be 128-bit aligned. */
850 	iopad_size = roundup2(s->hmac.partial_digest_len, 16);
851 
852 	/*
853 	 * The 'key' part of the key context consists of the key followed
854 	 * by the IPAD and OPAD.
855 	 */
856 	kctx_len = roundup2(s->blkcipher.key_len, 16) + iopad_size * 2;
857 	transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dsgl_len);
858 
859 	/*
860 	 * The input buffer consists of the IV, any AAD, and then the
861 	 * cipher/plain text.  For decryption requests the hash is
862 	 * appended after the cipher text.
863 	 *
864 	 * The IV is always stored at the start of the input buffer
865 	 * even though it may be duplicated in the payload.  The
866 	 * crypto engine doesn't work properly if the IV offset points
867 	 * inside of the AAD region, so a second copy is always
868 	 * required.
869 	 */
870 	input_len = aad_len + crde->crd_len;
871 
872 	/*
873 	 * The firmware hangs if sent a request which is a
874 	 * bit smaller than MAX_REQUEST_SIZE.  In particular, the
875 	 * firmware appears to require 512 - 16 bytes of spare room
876 	 * along with the size of the hash even if the hash isn't
877 	 * included in the input buffer.
878 	 */
879 	if (input_len + roundup2(axf->hashsize, 16) + (512 - 16) >
880 	    MAX_REQUEST_SIZE)
881 		return (EFBIG);
882 	if (op_type == CHCR_DECRYPT_OP)
883 		input_len += hash_size_in_response;
884 	if (ccr_use_imm_data(transhdr_len, s->blkcipher.iv_len + input_len)) {
885 		imm_len = input_len;
886 		sgl_nsegs = 0;
887 		sgl_len = 0;
888 	} else {
889 		imm_len = 0;
890 		sglist_reset(sc->sg_ulptx);
891 		if (aad_len != 0) {
892 			error = sglist_append_sglist(sc->sg_ulptx, sc->sg_crp,
893 			    crda->crd_skip, aad_len);
894 			if (error)
895 				return (error);
896 		}
897 		error = sglist_append_sglist(sc->sg_ulptx, sc->sg_crp,
898 		    crde->crd_skip, crde->crd_len);
899 		if (error)
900 			return (error);
901 		if (op_type == CHCR_DECRYPT_OP) {
902 			error = sglist_append_sglist(sc->sg_ulptx, sc->sg_crp,
903 			    crda->crd_inject, hash_size_in_response);
904 			if (error)
905 				return (error);
906 		}
907 		sgl_nsegs = sc->sg_ulptx->sg_nseg;
908 		sgl_len = ccr_ulptx_sgl_len(sgl_nsegs);
909 	}
910 
911 	/*
912 	 * Any auth-only data before the cipher region is marked as AAD.
913 	 * Auth-data that overlaps with the cipher region is placed in
914 	 * the auth section.
915 	 */
916 	if (aad_len != 0) {
917 		aad_start = s->blkcipher.iv_len + 1;
918 		aad_stop = aad_start + aad_len - 1;
919 	} else {
920 		aad_start = 0;
921 		aad_stop = 0;
922 	}
923 	cipher_start = s->blkcipher.iv_len + aad_len + 1;
924 	if (op_type == CHCR_DECRYPT_OP)
925 		cipher_stop = hash_size_in_response;
926 	else
927 		cipher_stop = 0;
928 	if (aad_len == crda->crd_len) {
929 		auth_start = 0;
930 		auth_stop = 0;
931 	} else {
932 		if (aad_len != 0)
933 			auth_start = cipher_start;
934 		else
935 			auth_start = s->blkcipher.iv_len + crda->crd_skip -
936 			    crde->crd_skip + 1;
937 		auth_stop = (crde->crd_skip + crde->crd_len) -
938 		    (crda->crd_skip + crda->crd_len) + cipher_stop;
939 	}
940 	if (op_type == CHCR_DECRYPT_OP)
941 		auth_insert = hash_size_in_response;
942 	else
943 		auth_insert = 0;
944 
945 	wr_len = roundup2(transhdr_len, 16) + s->blkcipher.iv_len +
946 	    roundup2(imm_len, 16) + sgl_len;
947 	if (wr_len > SGE_MAX_WR_LEN)
948 		return (EFBIG);
949 	wr = alloc_wrqe(wr_len, sc->txq);
950 	if (wr == NULL) {
951 		sc->stats_wr_nomem++;
952 		return (ENOMEM);
953 	}
954 	crwr = wrtod(wr);
955 	memset(crwr, 0, wr_len);
956 
957 	/*
958 	 * Read the existing IV from the request or generate a random
959 	 * one if none is provided.  Optionally copy the generated IV
960 	 * into the output buffer if requested.
961 	 */
962 	if (op_type == CHCR_ENCRYPT_OP) {
963 		if (crde->crd_flags & CRD_F_IV_EXPLICIT)
964 			memcpy(iv, crde->crd_iv, s->blkcipher.iv_len);
965 		else
966 			arc4rand(iv, s->blkcipher.iv_len, 0);
967 		if ((crde->crd_flags & CRD_F_IV_PRESENT) == 0)
968 			crypto_copyback(crp->crp_flags, crp->crp_buf,
969 			    crde->crd_inject, s->blkcipher.iv_len, iv);
970 	} else {
971 		if (crde->crd_flags & CRD_F_IV_EXPLICIT)
972 			memcpy(iv, crde->crd_iv, s->blkcipher.iv_len);
973 		else
974 			crypto_copydata(crp->crp_flags, crp->crp_buf,
975 			    crde->crd_inject, s->blkcipher.iv_len, iv);
976 	}
977 
978 	ccr_populate_wreq(sc, crwr, kctx_len, wr_len, imm_len, sgl_len,
979 	    op_type == CHCR_DECRYPT_OP ? hash_size_in_response : 0, crp);
980 
981 	/* XXX: Hardcodes SGE loopback channel of 0. */
982 	crwr->sec_cpl.op_ivinsrtofst = htobe32(
983 	    V_CPL_TX_SEC_PDU_OPCODE(CPL_TX_SEC_PDU) |
984 	    V_CPL_TX_SEC_PDU_RXCHID(sc->tx_channel_id) |
985 	    V_CPL_TX_SEC_PDU_ACKFOLLOWS(0) | V_CPL_TX_SEC_PDU_ULPTXLPBK(1) |
986 	    V_CPL_TX_SEC_PDU_CPLLEN(2) | V_CPL_TX_SEC_PDU_PLACEHOLDER(0) |
987 	    V_CPL_TX_SEC_PDU_IVINSRTOFST(1));
988 
989 	crwr->sec_cpl.pldlen = htobe32(s->blkcipher.iv_len + input_len);
990 
991 	crwr->sec_cpl.aadstart_cipherstop_hi = htobe32(
992 	    V_CPL_TX_SEC_PDU_AADSTART(aad_start) |
993 	    V_CPL_TX_SEC_PDU_AADSTOP(aad_stop) |
994 	    V_CPL_TX_SEC_PDU_CIPHERSTART(cipher_start) |
995 	    V_CPL_TX_SEC_PDU_CIPHERSTOP_HI(cipher_stop >> 4));
996 	crwr->sec_cpl.cipherstop_lo_authinsert = htobe32(
997 	    V_CPL_TX_SEC_PDU_CIPHERSTOP_LO(cipher_stop & 0xf) |
998 	    V_CPL_TX_SEC_PDU_AUTHSTART(auth_start) |
999 	    V_CPL_TX_SEC_PDU_AUTHSTOP(auth_stop) |
1000 	    V_CPL_TX_SEC_PDU_AUTHINSERT(auth_insert));
1001 
1002 	/* These two flits are actually a CPL_TLS_TX_SCMD_FMT. */
1003 	hmac_ctrl = ccr_hmac_ctrl(axf->hashsize, hash_size_in_response);
1004 	crwr->sec_cpl.seqno_numivs = htobe32(
1005 	    V_SCMD_SEQ_NO_CTRL(0) |
1006 	    V_SCMD_PROTO_VERSION(CHCR_SCMD_PROTO_VERSION_GENERIC) |
1007 	    V_SCMD_ENC_DEC_CTRL(op_type) |
1008 	    V_SCMD_CIPH_AUTH_SEQ_CTRL(op_type == CHCR_ENCRYPT_OP ? 1 : 0) |
1009 	    V_SCMD_CIPH_MODE(s->blkcipher.cipher_mode) |
1010 	    V_SCMD_AUTH_MODE(s->hmac.auth_mode) |
1011 	    V_SCMD_HMAC_CTRL(hmac_ctrl) |
1012 	    V_SCMD_IV_SIZE(s->blkcipher.iv_len / 2) |
1013 	    V_SCMD_NUM_IVS(0));
1014 	crwr->sec_cpl.ivgen_hdrlen = htobe32(
1015 	    V_SCMD_IV_GEN_CTRL(0) |
1016 	    V_SCMD_MORE_FRAGS(0) | V_SCMD_LAST_FRAG(0) | V_SCMD_MAC_ONLY(0) |
1017 	    V_SCMD_AADIVDROP(0) | V_SCMD_HDR_LEN(dsgl_len));
1018 
1019 	crwr->key_ctx.ctx_hdr = s->blkcipher.key_ctx_hdr;
1020 	switch (crde->crd_alg) {
1021 	case CRYPTO_AES_CBC:
1022 		if (crde->crd_flags & CRD_F_ENCRYPT)
1023 			memcpy(crwr->key_ctx.key, s->blkcipher.enckey,
1024 			    s->blkcipher.key_len);
1025 		else
1026 			memcpy(crwr->key_ctx.key, s->blkcipher.deckey,
1027 			    s->blkcipher.key_len);
1028 		break;
1029 	case CRYPTO_AES_ICM:
1030 		memcpy(crwr->key_ctx.key, s->blkcipher.enckey,
1031 		    s->blkcipher.key_len);
1032 		break;
1033 	case CRYPTO_AES_XTS:
1034 		key_half = s->blkcipher.key_len / 2;
1035 		memcpy(crwr->key_ctx.key, s->blkcipher.enckey + key_half,
1036 		    key_half);
1037 		if (crde->crd_flags & CRD_F_ENCRYPT)
1038 			memcpy(crwr->key_ctx.key + key_half,
1039 			    s->blkcipher.enckey, key_half);
1040 		else
1041 			memcpy(crwr->key_ctx.key + key_half,
1042 			    s->blkcipher.deckey, key_half);
1043 		break;
1044 	}
1045 
1046 	dst = crwr->key_ctx.key + roundup2(s->blkcipher.key_len, 16);
1047 	memcpy(dst, s->hmac.ipad, s->hmac.partial_digest_len);
1048 	memcpy(dst + iopad_size, s->hmac.opad, s->hmac.partial_digest_len);
1049 
1050 	dst = (char *)(crwr + 1) + kctx_len;
1051 	ccr_write_phys_dsgl(sc, dst, dsgl_nsegs);
1052 	dst += sizeof(struct cpl_rx_phys_dsgl) + dsgl_len;
1053 	memcpy(dst, iv, s->blkcipher.iv_len);
1054 	dst += s->blkcipher.iv_len;
1055 	if (imm_len != 0) {
1056 		if (aad_len != 0) {
1057 			crypto_copydata(crp->crp_flags, crp->crp_buf,
1058 			    crda->crd_skip, aad_len, dst);
1059 			dst += aad_len;
1060 		}
1061 		crypto_copydata(crp->crp_flags, crp->crp_buf, crde->crd_skip,
1062 		    crde->crd_len, dst);
1063 		dst += crde->crd_len;
1064 		if (op_type == CHCR_DECRYPT_OP)
1065 			crypto_copydata(crp->crp_flags, crp->crp_buf,
1066 			    crda->crd_inject, hash_size_in_response, dst);
1067 	} else
1068 		ccr_write_ulptx_sgl(sc, dst, sgl_nsegs);
1069 
1070 	/* XXX: TODO backpressure */
1071 	t4_wrq_tx(sc->adapter, wr);
1072 
1073 	return (0);
1074 }
1075 
1076 static int
1077 ccr_authenc_done(struct ccr_softc *sc, struct ccr_session *s,
1078     struct cryptop *crp, const struct cpl_fw6_pld *cpl, int error)
1079 {
1080 	struct cryptodesc *crd;
1081 
1082 	/*
1083 	 * The updated IV to permit chained requests is at
1084 	 * cpl->data[2], but OCF doesn't permit chained requests.
1085 	 *
1086 	 * For a decryption request, the hardware may do a verification
1087 	 * of the HMAC which will fail if the existing HMAC isn't in the
1088 	 * buffer.  If that happens, clear the error and copy the HMAC
1089 	 * from the CPL reply into the buffer.
1090 	 *
1091 	 * For encryption requests, crd should be the cipher request
1092 	 * which will have CRD_F_ENCRYPT set.  For decryption
1093 	 * requests, crp_desc will be the HMAC request which should
1094 	 * not have this flag set.
1095 	 */
1096 	crd = crp->crp_desc;
1097 	if (error == EBADMSG && !CHK_PAD_ERR_BIT(be64toh(cpl->data[0])) &&
1098 	    !(crd->crd_flags & CRD_F_ENCRYPT)) {
1099 		crypto_copyback(crp->crp_flags, crp->crp_buf, crd->crd_inject,
1100 		    s->hmac.hash_len, (c_caddr_t)(cpl + 1));
1101 		error = 0;
1102 	}
1103 	return (error);
1104 }
1105 
1106 static int
1107 ccr_gcm(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp,
1108     struct cryptodesc *crda, struct cryptodesc *crde)
1109 {
1110 	char iv[CHCR_MAX_CRYPTO_IV_LEN];
1111 	struct chcr_wr *crwr;
1112 	struct wrqe *wr;
1113 	char *dst;
1114 	u_int iv_len, kctx_len, op_type, transhdr_len, wr_len;
1115 	u_int hash_size_in_response, imm_len;
1116 	u_int aad_start, aad_stop, cipher_start, cipher_stop, auth_insert;
1117 	u_int hmac_ctrl, input_len;
1118 	int dsgl_nsegs, dsgl_len;
1119 	int sgl_nsegs, sgl_len;
1120 	int error;
1121 
1122 	if (s->blkcipher.key_len == 0)
1123 		return (EINVAL);
1124 
1125 	/*
1126 	 * The crypto engine doesn't handle GCM requests with an empty
1127 	 * payload, so handle those in software instead.
1128 	 */
1129 	if (crde->crd_len == 0)
1130 		return (EMSGSIZE);
1131 
1132 	/*
1133 	 * AAD is only permitted before the cipher/plain text, not
1134 	 * after.
1135 	 */
1136 	if (crda->crd_len + crda->crd_skip > crde->crd_len + crde->crd_skip)
1137 		return (EMSGSIZE);
1138 
1139 	if (crda->crd_len + AES_BLOCK_LEN > MAX_AAD_LEN)
1140 		return (EMSGSIZE);
1141 
1142 	hash_size_in_response = s->gmac.hash_len;
1143 	if (crde->crd_flags & CRD_F_ENCRYPT)
1144 		op_type = CHCR_ENCRYPT_OP;
1145 	else
1146 		op_type = CHCR_DECRYPT_OP;
1147 
1148 	/*
1149 	 * The IV handling for GCM in OCF is a bit more complicated in
1150 	 * that IPSec provides a full 16-byte IV (including the
1151 	 * counter), whereas the /dev/crypto interface sometimes
1152 	 * provides a full 16-byte IV (if no IV is provided in the
1153 	 * ioctl) and sometimes a 12-byte IV (if the IV was explicit).
1154 	 *
1155 	 * When provided a 12-byte IV, assume the IV is really 16 bytes
1156 	 * with a counter in the last 4 bytes initialized to 1.
1157 	 *
1158 	 * While iv_len is checked below, the value is currently
1159 	 * always set to 12 when creating a GCM session in this driver
1160 	 * due to limitations in OCF (there is no way to know what the
1161 	 * IV length of a given request will be).  This means that the
1162 	 * driver always assumes as 12-byte IV for now.
1163 	 */
1164 	if (s->blkcipher.iv_len == 12)
1165 		iv_len = AES_BLOCK_LEN;
1166 	else
1167 		iv_len = s->blkcipher.iv_len;
1168 
1169 	/*
1170 	 * The output buffer consists of the cipher text followed by
1171 	 * the tag when encrypting.  For decryption it only contains
1172 	 * the plain text.
1173 	 *
1174 	 * Due to a firmware bug, the output buffer must include a
1175 	 * dummy output buffer for the IV and AAD prior to the real
1176 	 * output buffer.
1177 	 */
1178 	if (op_type == CHCR_ENCRYPT_OP) {
1179 		if (iv_len + crda->crd_len + crde->crd_len +
1180 		    hash_size_in_response > MAX_REQUEST_SIZE)
1181 			return (EFBIG);
1182 	} else {
1183 		if (iv_len + crda->crd_len + crde->crd_len > MAX_REQUEST_SIZE)
1184 			return (EFBIG);
1185 	}
1186 	sglist_reset(sc->sg_dsgl);
1187 	error = sglist_append_sglist(sc->sg_dsgl, sc->sg_iv_aad, 0, iv_len +
1188 	    crda->crd_len);
1189 	if (error)
1190 		return (error);
1191 	error = sglist_append_sglist(sc->sg_dsgl, sc->sg_crp, crde->crd_skip,
1192 	    crde->crd_len);
1193 	if (error)
1194 		return (error);
1195 	if (op_type == CHCR_ENCRYPT_OP) {
1196 		error = sglist_append_sglist(sc->sg_dsgl, sc->sg_crp,
1197 		    crda->crd_inject, hash_size_in_response);
1198 		if (error)
1199 			return (error);
1200 	}
1201 	dsgl_nsegs = ccr_count_sgl(sc->sg_dsgl, DSGL_SGE_MAXLEN);
1202 	if (dsgl_nsegs > MAX_RX_PHYS_DSGL_SGE)
1203 		return (EFBIG);
1204 	dsgl_len = ccr_phys_dsgl_len(dsgl_nsegs);
1205 
1206 	/*
1207 	 * The 'key' part of the key context consists of the key followed
1208 	 * by the Galois hash key.
1209 	 */
1210 	kctx_len = roundup2(s->blkcipher.key_len, 16) + GMAC_BLOCK_LEN;
1211 	transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dsgl_len);
1212 
1213 	/*
1214 	 * The input buffer consists of the IV, any AAD, and then the
1215 	 * cipher/plain text.  For decryption requests the hash is
1216 	 * appended after the cipher text.
1217 	 *
1218 	 * The IV is always stored at the start of the input buffer
1219 	 * even though it may be duplicated in the payload.  The
1220 	 * crypto engine doesn't work properly if the IV offset points
1221 	 * inside of the AAD region, so a second copy is always
1222 	 * required.
1223 	 */
1224 	input_len = crda->crd_len + crde->crd_len;
1225 	if (op_type == CHCR_DECRYPT_OP)
1226 		input_len += hash_size_in_response;
1227 	if (input_len > MAX_REQUEST_SIZE)
1228 		return (EFBIG);
1229 	if (ccr_use_imm_data(transhdr_len, iv_len + input_len)) {
1230 		imm_len = input_len;
1231 		sgl_nsegs = 0;
1232 		sgl_len = 0;
1233 	} else {
1234 		imm_len = 0;
1235 		sglist_reset(sc->sg_ulptx);
1236 		if (crda->crd_len != 0) {
1237 			error = sglist_append_sglist(sc->sg_ulptx, sc->sg_crp,
1238 			    crda->crd_skip, crda->crd_len);
1239 			if (error)
1240 				return (error);
1241 		}
1242 		error = sglist_append_sglist(sc->sg_ulptx, sc->sg_crp,
1243 		    crde->crd_skip, crde->crd_len);
1244 		if (error)
1245 			return (error);
1246 		if (op_type == CHCR_DECRYPT_OP) {
1247 			error = sglist_append_sglist(sc->sg_ulptx, sc->sg_crp,
1248 			    crda->crd_inject, hash_size_in_response);
1249 			if (error)
1250 				return (error);
1251 		}
1252 		sgl_nsegs = sc->sg_ulptx->sg_nseg;
1253 		sgl_len = ccr_ulptx_sgl_len(sgl_nsegs);
1254 	}
1255 
1256 	if (crda->crd_len != 0) {
1257 		aad_start = iv_len + 1;
1258 		aad_stop = aad_start + crda->crd_len - 1;
1259 	} else {
1260 		aad_start = 0;
1261 		aad_stop = 0;
1262 	}
1263 	cipher_start = iv_len + crda->crd_len + 1;
1264 	if (op_type == CHCR_DECRYPT_OP)
1265 		cipher_stop = hash_size_in_response;
1266 	else
1267 		cipher_stop = 0;
1268 	if (op_type == CHCR_DECRYPT_OP)
1269 		auth_insert = hash_size_in_response;
1270 	else
1271 		auth_insert = 0;
1272 
1273 	wr_len = roundup2(transhdr_len, 16) + iv_len + roundup2(imm_len, 16) +
1274 	    sgl_len;
1275 	if (wr_len > SGE_MAX_WR_LEN)
1276 		return (EFBIG);
1277 	wr = alloc_wrqe(wr_len, sc->txq);
1278 	if (wr == NULL) {
1279 		sc->stats_wr_nomem++;
1280 		return (ENOMEM);
1281 	}
1282 	crwr = wrtod(wr);
1283 	memset(crwr, 0, wr_len);
1284 
1285 	/*
1286 	 * Read the existing IV from the request or generate a random
1287 	 * one if none is provided.  Optionally copy the generated IV
1288 	 * into the output buffer if requested.
1289 	 *
1290 	 * If the input IV is 12 bytes, append an explicit 4-byte
1291 	 * counter of 1.
1292 	 */
1293 	if (op_type == CHCR_ENCRYPT_OP) {
1294 		if (crde->crd_flags & CRD_F_IV_EXPLICIT)
1295 			memcpy(iv, crde->crd_iv, s->blkcipher.iv_len);
1296 		else
1297 			arc4rand(iv, s->blkcipher.iv_len, 0);
1298 		if ((crde->crd_flags & CRD_F_IV_PRESENT) == 0)
1299 			crypto_copyback(crp->crp_flags, crp->crp_buf,
1300 			    crde->crd_inject, s->blkcipher.iv_len, iv);
1301 	} else {
1302 		if (crde->crd_flags & CRD_F_IV_EXPLICIT)
1303 			memcpy(iv, crde->crd_iv, s->blkcipher.iv_len);
1304 		else
1305 			crypto_copydata(crp->crp_flags, crp->crp_buf,
1306 			    crde->crd_inject, s->blkcipher.iv_len, iv);
1307 	}
1308 	if (s->blkcipher.iv_len == 12)
1309 		*(uint32_t *)&iv[12] = htobe32(1);
1310 
1311 	ccr_populate_wreq(sc, crwr, kctx_len, wr_len, imm_len, sgl_len, 0,
1312 	    crp);
1313 
1314 	/* XXX: Hardcodes SGE loopback channel of 0. */
1315 	crwr->sec_cpl.op_ivinsrtofst = htobe32(
1316 	    V_CPL_TX_SEC_PDU_OPCODE(CPL_TX_SEC_PDU) |
1317 	    V_CPL_TX_SEC_PDU_RXCHID(sc->tx_channel_id) |
1318 	    V_CPL_TX_SEC_PDU_ACKFOLLOWS(0) | V_CPL_TX_SEC_PDU_ULPTXLPBK(1) |
1319 	    V_CPL_TX_SEC_PDU_CPLLEN(2) | V_CPL_TX_SEC_PDU_PLACEHOLDER(0) |
1320 	    V_CPL_TX_SEC_PDU_IVINSRTOFST(1));
1321 
1322 	crwr->sec_cpl.pldlen = htobe32(iv_len + input_len);
1323 
1324 	/*
1325 	 * NB: cipherstop is explicitly set to 0.  On encrypt it
1326 	 * should normally be set to 0 anyway (as the encrypt crd ends
1327 	 * at the end of the input).  However, for decrypt the cipher
1328 	 * ends before the tag in the AUTHENC case (and authstop is
1329 	 * set to stop before the tag), but for GCM the cipher still
1330 	 * runs to the end of the buffer.  Not sure if this is
1331 	 * intentional or a firmware quirk, but it is required for
1332 	 * working tag validation with GCM decryption.
1333 	 */
1334 	crwr->sec_cpl.aadstart_cipherstop_hi = htobe32(
1335 	    V_CPL_TX_SEC_PDU_AADSTART(aad_start) |
1336 	    V_CPL_TX_SEC_PDU_AADSTOP(aad_stop) |
1337 	    V_CPL_TX_SEC_PDU_CIPHERSTART(cipher_start) |
1338 	    V_CPL_TX_SEC_PDU_CIPHERSTOP_HI(0));
1339 	crwr->sec_cpl.cipherstop_lo_authinsert = htobe32(
1340 	    V_CPL_TX_SEC_PDU_CIPHERSTOP_LO(0) |
1341 	    V_CPL_TX_SEC_PDU_AUTHSTART(cipher_start) |
1342 	    V_CPL_TX_SEC_PDU_AUTHSTOP(cipher_stop) |
1343 	    V_CPL_TX_SEC_PDU_AUTHINSERT(auth_insert));
1344 
1345 	/* These two flits are actually a CPL_TLS_TX_SCMD_FMT. */
1346 	hmac_ctrl = ccr_hmac_ctrl(AES_GMAC_HASH_LEN, hash_size_in_response);
1347 	crwr->sec_cpl.seqno_numivs = htobe32(
1348 	    V_SCMD_SEQ_NO_CTRL(0) |
1349 	    V_SCMD_PROTO_VERSION(CHCR_SCMD_PROTO_VERSION_GENERIC) |
1350 	    V_SCMD_ENC_DEC_CTRL(op_type) |
1351 	    V_SCMD_CIPH_AUTH_SEQ_CTRL(op_type == CHCR_ENCRYPT_OP ? 1 : 0) |
1352 	    V_SCMD_CIPH_MODE(CHCR_SCMD_CIPHER_MODE_AES_GCM) |
1353 	    V_SCMD_AUTH_MODE(CHCR_SCMD_AUTH_MODE_GHASH) |
1354 	    V_SCMD_HMAC_CTRL(hmac_ctrl) |
1355 	    V_SCMD_IV_SIZE(iv_len / 2) |
1356 	    V_SCMD_NUM_IVS(0));
1357 	crwr->sec_cpl.ivgen_hdrlen = htobe32(
1358 	    V_SCMD_IV_GEN_CTRL(0) |
1359 	    V_SCMD_MORE_FRAGS(0) | V_SCMD_LAST_FRAG(0) | V_SCMD_MAC_ONLY(0) |
1360 	    V_SCMD_AADIVDROP(0) | V_SCMD_HDR_LEN(dsgl_len));
1361 
1362 	crwr->key_ctx.ctx_hdr = s->blkcipher.key_ctx_hdr;
1363 	memcpy(crwr->key_ctx.key, s->blkcipher.enckey, s->blkcipher.key_len);
1364 	dst = crwr->key_ctx.key + roundup2(s->blkcipher.key_len, 16);
1365 	memcpy(dst, s->gmac.ghash_h, GMAC_BLOCK_LEN);
1366 
1367 	dst = (char *)(crwr + 1) + kctx_len;
1368 	ccr_write_phys_dsgl(sc, dst, dsgl_nsegs);
1369 	dst += sizeof(struct cpl_rx_phys_dsgl) + dsgl_len;
1370 	memcpy(dst, iv, iv_len);
1371 	dst += iv_len;
1372 	if (imm_len != 0) {
1373 		if (crda->crd_len != 0) {
1374 			crypto_copydata(crp->crp_flags, crp->crp_buf,
1375 			    crda->crd_skip, crda->crd_len, dst);
1376 			dst += crda->crd_len;
1377 		}
1378 		crypto_copydata(crp->crp_flags, crp->crp_buf, crde->crd_skip,
1379 		    crde->crd_len, dst);
1380 		dst += crde->crd_len;
1381 		if (op_type == CHCR_DECRYPT_OP)
1382 			crypto_copydata(crp->crp_flags, crp->crp_buf,
1383 			    crda->crd_inject, hash_size_in_response, dst);
1384 	} else
1385 		ccr_write_ulptx_sgl(sc, dst, sgl_nsegs);
1386 
1387 	/* XXX: TODO backpressure */
1388 	t4_wrq_tx(sc->adapter, wr);
1389 
1390 	return (0);
1391 }
1392 
1393 static int
1394 ccr_gcm_done(struct ccr_softc *sc, struct ccr_session *s,
1395     struct cryptop *crp, const struct cpl_fw6_pld *cpl, int error)
1396 {
1397 
1398 	/*
1399 	 * The updated IV to permit chained requests is at
1400 	 * cpl->data[2], but OCF doesn't permit chained requests.
1401 	 *
1402 	 * Note that the hardware should always verify the GMAC hash.
1403 	 */
1404 	return (error);
1405 }
1406 
1407 /*
1408  * Handle a GCM request that is not supported by the crypto engine by
1409  * performing the operation in software.  Derived from swcr_authenc().
1410  */
1411 static void
1412 ccr_gcm_soft(struct ccr_session *s, struct cryptop *crp,
1413     struct cryptodesc *crda, struct cryptodesc *crde)
1414 {
1415 	struct auth_hash *axf;
1416 	struct enc_xform *exf;
1417 	void *auth_ctx;
1418 	uint8_t *kschedule;
1419 	char block[GMAC_BLOCK_LEN];
1420 	char digest[GMAC_DIGEST_LEN];
1421 	char iv[AES_BLOCK_LEN];
1422 	int error, i, len;
1423 
1424 	auth_ctx = NULL;
1425 	kschedule = NULL;
1426 
1427 	/* Initialize the MAC. */
1428 	switch (s->blkcipher.key_len) {
1429 	case 16:
1430 		axf = &auth_hash_nist_gmac_aes_128;
1431 		break;
1432 	case 24:
1433 		axf = &auth_hash_nist_gmac_aes_192;
1434 		break;
1435 	case 32:
1436 		axf = &auth_hash_nist_gmac_aes_256;
1437 		break;
1438 	default:
1439 		error = EINVAL;
1440 		goto out;
1441 	}
1442 	auth_ctx = malloc(axf->ctxsize, M_CCR, M_NOWAIT);
1443 	if (auth_ctx == NULL) {
1444 		error = ENOMEM;
1445 		goto out;
1446 	}
1447 	axf->Init(auth_ctx);
1448 	axf->Setkey(auth_ctx, s->blkcipher.enckey, s->blkcipher.key_len);
1449 
1450 	/* Initialize the cipher. */
1451 	exf = &enc_xform_aes_nist_gcm;
1452 	error = exf->setkey(&kschedule, s->blkcipher.enckey,
1453 	    s->blkcipher.key_len);
1454 	if (error)
1455 		goto out;
1456 
1457 	/*
1458 	 * This assumes a 12-byte IV from the crp.  See longer comment
1459 	 * above in ccr_gcm() for more details.
1460 	 */
1461 	if (crde->crd_flags & CRD_F_ENCRYPT) {
1462 		if (crde->crd_flags & CRD_F_IV_EXPLICIT)
1463 			memcpy(iv, crde->crd_iv, 12);
1464 		else
1465 			arc4rand(iv, 12, 0);
1466 		if ((crde->crd_flags & CRD_F_IV_PRESENT) == 0)
1467 			crypto_copyback(crp->crp_flags, crp->crp_buf,
1468 			    crde->crd_inject, 12, iv);
1469 	} else {
1470 		if (crde->crd_flags & CRD_F_IV_EXPLICIT)
1471 			memcpy(iv, crde->crd_iv, 12);
1472 		else
1473 			crypto_copydata(crp->crp_flags, crp->crp_buf,
1474 			    crde->crd_inject, 12, iv);
1475 	}
1476 	*(uint32_t *)&iv[12] = htobe32(1);
1477 
1478 	axf->Reinit(auth_ctx, iv, sizeof(iv));
1479 
1480 	/* MAC the AAD. */
1481 	for (i = 0; i < crda->crd_len; i += sizeof(block)) {
1482 		len = imin(crda->crd_len - i, sizeof(block));
1483 		crypto_copydata(crp->crp_flags, crp->crp_buf, crda->crd_skip +
1484 		    i, len, block);
1485 		bzero(block + len, sizeof(block) - len);
1486 		axf->Update(auth_ctx, block, sizeof(block));
1487 	}
1488 
1489 	exf->reinit(kschedule, iv);
1490 
1491 	/* Do encryption with MAC */
1492 	for (i = 0; i < crde->crd_len; i += sizeof(block)) {
1493 		len = imin(crde->crd_len - i, sizeof(block));
1494 		crypto_copydata(crp->crp_flags, crp->crp_buf, crde->crd_skip +
1495 		    i, len, block);
1496 		bzero(block + len, sizeof(block) - len);
1497 		if (crde->crd_flags & CRD_F_ENCRYPT) {
1498 			exf->encrypt(kschedule, block);
1499 			axf->Update(auth_ctx, block, len);
1500 			crypto_copyback(crp->crp_flags, crp->crp_buf,
1501 			    crde->crd_skip + i, len, block);
1502 		} else {
1503 			axf->Update(auth_ctx, block, len);
1504 		}
1505 	}
1506 
1507 	/* Length block. */
1508 	bzero(block, sizeof(block));
1509 	((uint32_t *)block)[1] = htobe32(crda->crd_len * 8);
1510 	((uint32_t *)block)[3] = htobe32(crde->crd_len * 8);
1511 	axf->Update(auth_ctx, block, sizeof(block));
1512 
1513 	/* Finalize MAC. */
1514 	axf->Final(digest, auth_ctx);
1515 
1516 	/* Inject or validate tag. */
1517 	if (crde->crd_flags & CRD_F_ENCRYPT) {
1518 		crypto_copyback(crp->crp_flags, crp->crp_buf, crda->crd_inject,
1519 		    sizeof(digest), digest);
1520 		error = 0;
1521 	} else {
1522 		char digest2[GMAC_DIGEST_LEN];
1523 
1524 		crypto_copydata(crp->crp_flags, crp->crp_buf, crda->crd_inject,
1525 		    sizeof(digest2), digest2);
1526 		if (timingsafe_bcmp(digest, digest2, sizeof(digest)) == 0) {
1527 			error = 0;
1528 
1529 			/* Tag matches, decrypt data. */
1530 			for (i = 0; i < crde->crd_len; i += sizeof(block)) {
1531 				len = imin(crde->crd_len - i, sizeof(block));
1532 				crypto_copydata(crp->crp_flags, crp->crp_buf,
1533 				    crde->crd_skip + i, len, block);
1534 				bzero(block + len, sizeof(block) - len);
1535 				exf->decrypt(kschedule, block);
1536 				crypto_copyback(crp->crp_flags, crp->crp_buf,
1537 				    crde->crd_skip + i, len, block);
1538 			}
1539 		} else
1540 			error = EBADMSG;
1541 	}
1542 
1543 	exf->zerokey(&kschedule);
1544 out:
1545 	if (auth_ctx != NULL) {
1546 		memset(auth_ctx, 0, axf->ctxsize);
1547 		free(auth_ctx, M_CCR);
1548 	}
1549 	crp->crp_etype = error;
1550 	crypto_done(crp);
1551 }
1552 
1553 static void
1554 ccr_identify(driver_t *driver, device_t parent)
1555 {
1556 	struct adapter *sc;
1557 
1558 	sc = device_get_softc(parent);
1559 	if (sc->cryptocaps & FW_CAPS_CONFIG_CRYPTO_LOOKASIDE &&
1560 	    device_find_child(parent, "ccr", -1) == NULL)
1561 		device_add_child(parent, "ccr", -1);
1562 }
1563 
1564 static int
1565 ccr_probe(device_t dev)
1566 {
1567 
1568 	device_set_desc(dev, "Chelsio Crypto Accelerator");
1569 	return (BUS_PROBE_DEFAULT);
1570 }
1571 
1572 static void
1573 ccr_sysctls(struct ccr_softc *sc)
1574 {
1575 	struct sysctl_ctx_list *ctx;
1576 	struct sysctl_oid *oid;
1577 	struct sysctl_oid_list *children;
1578 
1579 	ctx = device_get_sysctl_ctx(sc->dev);
1580 
1581 	/*
1582 	 * dev.ccr.X.
1583 	 */
1584 	oid = device_get_sysctl_tree(sc->dev);
1585 	children = SYSCTL_CHILDREN(oid);
1586 
1587 	/*
1588 	 * dev.ccr.X.stats.
1589 	 */
1590 	oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats", CTLFLAG_RD,
1591 	    NULL, "statistics");
1592 	children = SYSCTL_CHILDREN(oid);
1593 
1594 	SYSCTL_ADD_U64(ctx, children, OID_AUTO, "hmac", CTLFLAG_RD,
1595 	    &sc->stats_hmac, 0, "HMAC requests submitted");
1596 	SYSCTL_ADD_U64(ctx, children, OID_AUTO, "cipher_encrypt", CTLFLAG_RD,
1597 	    &sc->stats_blkcipher_encrypt, 0,
1598 	    "Cipher encryption requests submitted");
1599 	SYSCTL_ADD_U64(ctx, children, OID_AUTO, "cipher_decrypt", CTLFLAG_RD,
1600 	    &sc->stats_blkcipher_decrypt, 0,
1601 	    "Cipher decryption requests submitted");
1602 	SYSCTL_ADD_U64(ctx, children, OID_AUTO, "authenc_encrypt", CTLFLAG_RD,
1603 	    &sc->stats_authenc_encrypt, 0,
1604 	    "Combined AES+HMAC encryption requests submitted");
1605 	SYSCTL_ADD_U64(ctx, children, OID_AUTO, "authenc_decrypt", CTLFLAG_RD,
1606 	    &sc->stats_authenc_decrypt, 0,
1607 	    "Combined AES+HMAC decryption requests submitted");
1608 	SYSCTL_ADD_U64(ctx, children, OID_AUTO, "gcm_encrypt", CTLFLAG_RD,
1609 	    &sc->stats_gcm_encrypt, 0, "AES-GCM encryption requests submitted");
1610 	SYSCTL_ADD_U64(ctx, children, OID_AUTO, "gcm_decrypt", CTLFLAG_RD,
1611 	    &sc->stats_gcm_decrypt, 0, "AES-GCM decryption requests submitted");
1612 	SYSCTL_ADD_U64(ctx, children, OID_AUTO, "wr_nomem", CTLFLAG_RD,
1613 	    &sc->stats_wr_nomem, 0, "Work request memory allocation failures");
1614 	SYSCTL_ADD_U64(ctx, children, OID_AUTO, "inflight", CTLFLAG_RD,
1615 	    &sc->stats_inflight, 0, "Requests currently pending");
1616 	SYSCTL_ADD_U64(ctx, children, OID_AUTO, "mac_error", CTLFLAG_RD,
1617 	    &sc->stats_mac_error, 0, "MAC errors");
1618 	SYSCTL_ADD_U64(ctx, children, OID_AUTO, "pad_error", CTLFLAG_RD,
1619 	    &sc->stats_pad_error, 0, "Padding errors");
1620 	SYSCTL_ADD_U64(ctx, children, OID_AUTO, "bad_session", CTLFLAG_RD,
1621 	    &sc->stats_bad_session, 0, "Requests with invalid session ID");
1622 	SYSCTL_ADD_U64(ctx, children, OID_AUTO, "sglist_error", CTLFLAG_RD,
1623 	    &sc->stats_sglist_error, 0,
1624 	    "Requests for which DMA mapping failed");
1625 	SYSCTL_ADD_U64(ctx, children, OID_AUTO, "process_error", CTLFLAG_RD,
1626 	    &sc->stats_process_error, 0, "Requests failed during queueing");
1627 	SYSCTL_ADD_U64(ctx, children, OID_AUTO, "sw_fallback", CTLFLAG_RD,
1628 	    &sc->stats_sw_fallback, 0,
1629 	    "Requests processed by falling back to software");
1630 }
1631 
1632 static int
1633 ccr_attach(device_t dev)
1634 {
1635 	struct ccr_softc *sc;
1636 	int32_t cid;
1637 
1638 	/*
1639 	 * TODO: Crypto requests will panic if the parent device isn't
1640 	 * initialized so that the queues are up and running.  Need to
1641 	 * figure out how to handle that correctly, maybe just reject
1642 	 * requests if the adapter isn't fully initialized?
1643 	 */
1644 	sc = device_get_softc(dev);
1645 	sc->dev = dev;
1646 	sc->adapter = device_get_softc(device_get_parent(dev));
1647 	sc->txq = &sc->adapter->sge.ctrlq[0];
1648 	sc->rxq = &sc->adapter->sge.rxq[0];
1649 	cid = crypto_get_driverid(dev, sizeof(struct ccr_session),
1650 	    CRYPTOCAP_F_HARDWARE);
1651 	if (cid < 0) {
1652 		device_printf(dev, "could not get crypto driver id\n");
1653 		return (ENXIO);
1654 	}
1655 	sc->cid = cid;
1656 	sc->adapter->ccr_softc = sc;
1657 
1658 	/* XXX: TODO? */
1659 	sc->tx_channel_id = 0;
1660 
1661 	mtx_init(&sc->lock, "ccr", NULL, MTX_DEF);
1662 	sc->sg_crp = sglist_alloc(TX_SGL_SEGS, M_WAITOK);
1663 	sc->sg_ulptx = sglist_alloc(TX_SGL_SEGS, M_WAITOK);
1664 	sc->sg_dsgl = sglist_alloc(MAX_RX_PHYS_DSGL_SGE, M_WAITOK);
1665 	sc->iv_aad_buf = malloc(MAX_AAD_LEN, M_CCR, M_WAITOK);
1666 	sc->sg_iv_aad = sglist_build(sc->iv_aad_buf, MAX_AAD_LEN, M_WAITOK);
1667 	ccr_sysctls(sc);
1668 
1669 	crypto_register(cid, CRYPTO_SHA1_HMAC, 0, 0);
1670 	crypto_register(cid, CRYPTO_SHA2_224_HMAC, 0, 0);
1671 	crypto_register(cid, CRYPTO_SHA2_256_HMAC, 0, 0);
1672 	crypto_register(cid, CRYPTO_SHA2_384_HMAC, 0, 0);
1673 	crypto_register(cid, CRYPTO_SHA2_512_HMAC, 0, 0);
1674 	crypto_register(cid, CRYPTO_AES_CBC, 0, 0);
1675 	crypto_register(cid, CRYPTO_AES_ICM, 0, 0);
1676 	crypto_register(cid, CRYPTO_AES_NIST_GCM_16, 0, 0);
1677 	crypto_register(cid, CRYPTO_AES_128_NIST_GMAC, 0, 0);
1678 	crypto_register(cid, CRYPTO_AES_192_NIST_GMAC, 0, 0);
1679 	crypto_register(cid, CRYPTO_AES_256_NIST_GMAC, 0, 0);
1680 	crypto_register(cid, CRYPTO_AES_XTS, 0, 0);
1681 	return (0);
1682 }
1683 
1684 static int
1685 ccr_detach(device_t dev)
1686 {
1687 	struct ccr_softc *sc;
1688 
1689 	sc = device_get_softc(dev);
1690 
1691 	mtx_lock(&sc->lock);
1692 	sc->detaching = true;
1693 	mtx_unlock(&sc->lock);
1694 
1695 	crypto_unregister_all(sc->cid);
1696 
1697 	mtx_destroy(&sc->lock);
1698 	sglist_free(sc->sg_iv_aad);
1699 	free(sc->iv_aad_buf, M_CCR);
1700 	sglist_free(sc->sg_dsgl);
1701 	sglist_free(sc->sg_ulptx);
1702 	sglist_free(sc->sg_crp);
1703 	sc->adapter->ccr_softc = NULL;
1704 	return (0);
1705 }
1706 
1707 static void
1708 ccr_copy_partial_hash(void *dst, int cri_alg, union authctx *auth_ctx)
1709 {
1710 	uint32_t *u32;
1711 	uint64_t *u64;
1712 	u_int i;
1713 
1714 	u32 = (uint32_t *)dst;
1715 	u64 = (uint64_t *)dst;
1716 	switch (cri_alg) {
1717 	case CRYPTO_SHA1_HMAC:
1718 		for (i = 0; i < SHA1_HASH_LEN / 4; i++)
1719 			u32[i] = htobe32(auth_ctx->sha1ctx.h.b32[i]);
1720 		break;
1721 	case CRYPTO_SHA2_224_HMAC:
1722 		for (i = 0; i < SHA2_256_HASH_LEN / 4; i++)
1723 			u32[i] = htobe32(auth_ctx->sha224ctx.state[i]);
1724 		break;
1725 	case CRYPTO_SHA2_256_HMAC:
1726 		for (i = 0; i < SHA2_256_HASH_LEN / 4; i++)
1727 			u32[i] = htobe32(auth_ctx->sha256ctx.state[i]);
1728 		break;
1729 	case CRYPTO_SHA2_384_HMAC:
1730 		for (i = 0; i < SHA2_512_HASH_LEN / 8; i++)
1731 			u64[i] = htobe64(auth_ctx->sha384ctx.state[i]);
1732 		break;
1733 	case CRYPTO_SHA2_512_HMAC:
1734 		for (i = 0; i < SHA2_512_HASH_LEN / 8; i++)
1735 			u64[i] = htobe64(auth_ctx->sha512ctx.state[i]);
1736 		break;
1737 	}
1738 }
1739 
1740 static void
1741 ccr_init_hmac_digest(struct ccr_session *s, int cri_alg, char *key,
1742     int klen)
1743 {
1744 	union authctx auth_ctx;
1745 	struct auth_hash *axf;
1746 	u_int i;
1747 
1748 	/*
1749 	 * If the key is larger than the block size, use the digest of
1750 	 * the key as the key instead.
1751 	 */
1752 	axf = s->hmac.auth_hash;
1753 	klen /= 8;
1754 	if (klen > axf->blocksize) {
1755 		axf->Init(&auth_ctx);
1756 		axf->Update(&auth_ctx, key, klen);
1757 		axf->Final(s->hmac.ipad, &auth_ctx);
1758 		klen = axf->hashsize;
1759 	} else
1760 		memcpy(s->hmac.ipad, key, klen);
1761 
1762 	memset(s->hmac.ipad + klen, 0, axf->blocksize - klen);
1763 	memcpy(s->hmac.opad, s->hmac.ipad, axf->blocksize);
1764 
1765 	for (i = 0; i < axf->blocksize; i++) {
1766 		s->hmac.ipad[i] ^= HMAC_IPAD_VAL;
1767 		s->hmac.opad[i] ^= HMAC_OPAD_VAL;
1768 	}
1769 
1770 	/*
1771 	 * Hash the raw ipad and opad and store the partial result in
1772 	 * the same buffer.
1773 	 */
1774 	axf->Init(&auth_ctx);
1775 	axf->Update(&auth_ctx, s->hmac.ipad, axf->blocksize);
1776 	ccr_copy_partial_hash(s->hmac.ipad, cri_alg, &auth_ctx);
1777 
1778 	axf->Init(&auth_ctx);
1779 	axf->Update(&auth_ctx, s->hmac.opad, axf->blocksize);
1780 	ccr_copy_partial_hash(s->hmac.opad, cri_alg, &auth_ctx);
1781 }
1782 
1783 /*
1784  * Borrowed from AES_GMAC_Setkey().
1785  */
1786 static void
1787 ccr_init_gmac_hash(struct ccr_session *s, char *key, int klen)
1788 {
1789 	static char zeroes[GMAC_BLOCK_LEN];
1790 	uint32_t keysched[4 * (RIJNDAEL_MAXNR + 1)];
1791 	int rounds;
1792 
1793 	rounds = rijndaelKeySetupEnc(keysched, key, klen);
1794 	rijndaelEncrypt(keysched, rounds, zeroes, s->gmac.ghash_h);
1795 }
1796 
1797 static int
1798 ccr_aes_check_keylen(int alg, int klen)
1799 {
1800 
1801 	switch (klen) {
1802 	case 128:
1803 	case 192:
1804 		if (alg == CRYPTO_AES_XTS)
1805 			return (EINVAL);
1806 		break;
1807 	case 256:
1808 		break;
1809 	case 512:
1810 		if (alg != CRYPTO_AES_XTS)
1811 			return (EINVAL);
1812 		break;
1813 	default:
1814 		return (EINVAL);
1815 	}
1816 	return (0);
1817 }
1818 
1819 static void
1820 ccr_aes_setkey(struct ccr_session *s, int alg, const void *key, int klen)
1821 {
1822 	unsigned int ck_size, iopad_size, kctx_flits, kctx_len, kbits, mk_size;
1823 	unsigned int opad_present;
1824 
1825 	if (alg == CRYPTO_AES_XTS)
1826 		kbits = klen / 2;
1827 	else
1828 		kbits = klen;
1829 	switch (kbits) {
1830 	case 128:
1831 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
1832 		break;
1833 	case 192:
1834 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
1835 		break;
1836 	case 256:
1837 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
1838 		break;
1839 	default:
1840 		panic("should not get here");
1841 	}
1842 
1843 	s->blkcipher.key_len = klen / 8;
1844 	memcpy(s->blkcipher.enckey, key, s->blkcipher.key_len);
1845 	switch (alg) {
1846 	case CRYPTO_AES_CBC:
1847 	case CRYPTO_AES_XTS:
1848 		t4_aes_getdeckey(s->blkcipher.deckey, key, kbits);
1849 		break;
1850 	}
1851 
1852 	kctx_len = roundup2(s->blkcipher.key_len, 16);
1853 	switch (s->mode) {
1854 	case AUTHENC:
1855 		mk_size = s->hmac.mk_size;
1856 		opad_present = 1;
1857 		iopad_size = roundup2(s->hmac.partial_digest_len, 16);
1858 		kctx_len += iopad_size * 2;
1859 		break;
1860 	case GCM:
1861 		mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_128;
1862 		opad_present = 0;
1863 		kctx_len += GMAC_BLOCK_LEN;
1864 		break;
1865 	default:
1866 		mk_size = CHCR_KEYCTX_NO_KEY;
1867 		opad_present = 0;
1868 		break;
1869 	}
1870 	kctx_flits = (sizeof(struct _key_ctx) + kctx_len) / 16;
1871 	s->blkcipher.key_ctx_hdr = htobe32(V_KEY_CONTEXT_CTX_LEN(kctx_flits) |
1872 	    V_KEY_CONTEXT_DUAL_CK(alg == CRYPTO_AES_XTS) |
1873 	    V_KEY_CONTEXT_OPAD_PRESENT(opad_present) |
1874 	    V_KEY_CONTEXT_SALT_PRESENT(1) | V_KEY_CONTEXT_CK_SIZE(ck_size) |
1875 	    V_KEY_CONTEXT_MK_SIZE(mk_size) | V_KEY_CONTEXT_VALID(1));
1876 }
1877 
1878 static int
1879 ccr_newsession(device_t dev, crypto_session_t cses, struct cryptoini *cri)
1880 {
1881 	struct ccr_softc *sc;
1882 	struct ccr_session *s;
1883 	struct auth_hash *auth_hash;
1884 	struct cryptoini *c, *hash, *cipher;
1885 	unsigned int auth_mode, cipher_mode, iv_len, mk_size;
1886 	unsigned int partial_digest_len;
1887 	int error;
1888 	bool gcm_hash;
1889 
1890 	if (cri == NULL)
1891 		return (EINVAL);
1892 
1893 	gcm_hash = false;
1894 	cipher = NULL;
1895 	hash = NULL;
1896 	auth_hash = NULL;
1897 	auth_mode = CHCR_SCMD_AUTH_MODE_NOP;
1898 	cipher_mode = CHCR_SCMD_CIPHER_MODE_NOP;
1899 	iv_len = 0;
1900 	mk_size = 0;
1901 	partial_digest_len = 0;
1902 	for (c = cri; c != NULL; c = c->cri_next) {
1903 		switch (c->cri_alg) {
1904 		case CRYPTO_SHA1_HMAC:
1905 		case CRYPTO_SHA2_224_HMAC:
1906 		case CRYPTO_SHA2_256_HMAC:
1907 		case CRYPTO_SHA2_384_HMAC:
1908 		case CRYPTO_SHA2_512_HMAC:
1909 		case CRYPTO_AES_128_NIST_GMAC:
1910 		case CRYPTO_AES_192_NIST_GMAC:
1911 		case CRYPTO_AES_256_NIST_GMAC:
1912 			if (hash)
1913 				return (EINVAL);
1914 			hash = c;
1915 			switch (c->cri_alg) {
1916 			case CRYPTO_SHA1_HMAC:
1917 				auth_hash = &auth_hash_hmac_sha1;
1918 				auth_mode = CHCR_SCMD_AUTH_MODE_SHA1;
1919 				mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_160;
1920 				partial_digest_len = SHA1_HASH_LEN;
1921 				break;
1922 			case CRYPTO_SHA2_224_HMAC:
1923 				auth_hash = &auth_hash_hmac_sha2_224;
1924 				auth_mode = CHCR_SCMD_AUTH_MODE_SHA224;
1925 				mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
1926 				partial_digest_len = SHA2_256_HASH_LEN;
1927 				break;
1928 			case CRYPTO_SHA2_256_HMAC:
1929 				auth_hash = &auth_hash_hmac_sha2_256;
1930 				auth_mode = CHCR_SCMD_AUTH_MODE_SHA256;
1931 				mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
1932 				partial_digest_len = SHA2_256_HASH_LEN;
1933 				break;
1934 			case CRYPTO_SHA2_384_HMAC:
1935 				auth_hash = &auth_hash_hmac_sha2_384;
1936 				auth_mode = CHCR_SCMD_AUTH_MODE_SHA512_384;
1937 				mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512;
1938 				partial_digest_len = SHA2_512_HASH_LEN;
1939 				break;
1940 			case CRYPTO_SHA2_512_HMAC:
1941 				auth_hash = &auth_hash_hmac_sha2_512;
1942 				auth_mode = CHCR_SCMD_AUTH_MODE_SHA512_512;
1943 				mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512;
1944 				partial_digest_len = SHA2_512_HASH_LEN;
1945 				break;
1946 			case CRYPTO_AES_128_NIST_GMAC:
1947 			case CRYPTO_AES_192_NIST_GMAC:
1948 			case CRYPTO_AES_256_NIST_GMAC:
1949 				gcm_hash = true;
1950 				auth_mode = CHCR_SCMD_AUTH_MODE_GHASH;
1951 				mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_128;
1952 				break;
1953 			}
1954 			break;
1955 		case CRYPTO_AES_CBC:
1956 		case CRYPTO_AES_ICM:
1957 		case CRYPTO_AES_NIST_GCM_16:
1958 		case CRYPTO_AES_XTS:
1959 			if (cipher)
1960 				return (EINVAL);
1961 			cipher = c;
1962 			switch (c->cri_alg) {
1963 			case CRYPTO_AES_CBC:
1964 				cipher_mode = CHCR_SCMD_CIPHER_MODE_AES_CBC;
1965 				iv_len = AES_BLOCK_LEN;
1966 				break;
1967 			case CRYPTO_AES_ICM:
1968 				cipher_mode = CHCR_SCMD_CIPHER_MODE_AES_CTR;
1969 				iv_len = AES_BLOCK_LEN;
1970 				break;
1971 			case CRYPTO_AES_NIST_GCM_16:
1972 				cipher_mode = CHCR_SCMD_CIPHER_MODE_AES_GCM;
1973 				iv_len = AES_GCM_IV_LEN;
1974 				break;
1975 			case CRYPTO_AES_XTS:
1976 				cipher_mode = CHCR_SCMD_CIPHER_MODE_AES_XTS;
1977 				iv_len = AES_BLOCK_LEN;
1978 				break;
1979 			}
1980 			if (c->cri_key != NULL) {
1981 				error = ccr_aes_check_keylen(c->cri_alg,
1982 				    c->cri_klen);
1983 				if (error)
1984 					return (error);
1985 			}
1986 			break;
1987 		default:
1988 			return (EINVAL);
1989 		}
1990 	}
1991 	if (gcm_hash != (cipher_mode == CHCR_SCMD_CIPHER_MODE_AES_GCM))
1992 		return (EINVAL);
1993 	if (hash == NULL && cipher == NULL)
1994 		return (EINVAL);
1995 	if (hash != NULL && hash->cri_key == NULL)
1996 		return (EINVAL);
1997 
1998 	sc = device_get_softc(dev);
1999 	mtx_lock(&sc->lock);
2000 	if (sc->detaching) {
2001 		mtx_unlock(&sc->lock);
2002 		return (ENXIO);
2003 	}
2004 
2005 	s = crypto_get_driver_session(cses);
2006 
2007 	if (gcm_hash)
2008 		s->mode = GCM;
2009 	else if (hash != NULL && cipher != NULL)
2010 		s->mode = AUTHENC;
2011 	else if (hash != NULL)
2012 		s->mode = HMAC;
2013 	else {
2014 		MPASS(cipher != NULL);
2015 		s->mode = BLKCIPHER;
2016 	}
2017 	if (gcm_hash) {
2018 		if (hash->cri_mlen == 0)
2019 			s->gmac.hash_len = AES_GMAC_HASH_LEN;
2020 		else
2021 			s->gmac.hash_len = hash->cri_mlen;
2022 		ccr_init_gmac_hash(s, hash->cri_key, hash->cri_klen);
2023 	} else if (hash != NULL) {
2024 		s->hmac.auth_hash = auth_hash;
2025 		s->hmac.auth_mode = auth_mode;
2026 		s->hmac.mk_size = mk_size;
2027 		s->hmac.partial_digest_len = partial_digest_len;
2028 		if (hash->cri_mlen == 0)
2029 			s->hmac.hash_len = auth_hash->hashsize;
2030 		else
2031 			s->hmac.hash_len = hash->cri_mlen;
2032 		ccr_init_hmac_digest(s, hash->cri_alg, hash->cri_key,
2033 		    hash->cri_klen);
2034 	}
2035 	if (cipher != NULL) {
2036 		s->blkcipher.cipher_mode = cipher_mode;
2037 		s->blkcipher.iv_len = iv_len;
2038 		if (cipher->cri_key != NULL)
2039 			ccr_aes_setkey(s, cipher->cri_alg, cipher->cri_key,
2040 			    cipher->cri_klen);
2041 	}
2042 
2043 	s->active = true;
2044 	mtx_unlock(&sc->lock);
2045 	return (0);
2046 }
2047 
2048 static void
2049 ccr_freesession(device_t dev, crypto_session_t cses)
2050 {
2051 	struct ccr_softc *sc;
2052 	struct ccr_session *s;
2053 
2054 	sc = device_get_softc(dev);
2055 	s = crypto_get_driver_session(cses);
2056 	mtx_lock(&sc->lock);
2057 	if (s->pending != 0)
2058 		device_printf(dev,
2059 		    "session %p freed with %d pending requests\n", s,
2060 		    s->pending);
2061 	s->active = false;
2062 	mtx_unlock(&sc->lock);
2063 }
2064 
2065 static int
2066 ccr_process(device_t dev, struct cryptop *crp, int hint)
2067 {
2068 	struct ccr_softc *sc;
2069 	struct ccr_session *s;
2070 	struct cryptodesc *crd, *crda, *crde;
2071 	int error;
2072 
2073 	if (crp == NULL)
2074 		return (EINVAL);
2075 
2076 	crd = crp->crp_desc;
2077 	s = crypto_get_driver_session(crp->crp_session);
2078 	sc = device_get_softc(dev);
2079 
2080 	mtx_lock(&sc->lock);
2081 	error = ccr_populate_sglist(sc->sg_crp, crp);
2082 	if (error) {
2083 		sc->stats_sglist_error++;
2084 		goto out;
2085 	}
2086 
2087 	switch (s->mode) {
2088 	case HMAC:
2089 		if (crd->crd_flags & CRD_F_KEY_EXPLICIT)
2090 			ccr_init_hmac_digest(s, crd->crd_alg, crd->crd_key,
2091 			    crd->crd_klen);
2092 		error = ccr_hmac(sc, s, crp);
2093 		if (error == 0)
2094 			sc->stats_hmac++;
2095 		break;
2096 	case BLKCIPHER:
2097 		if (crd->crd_flags & CRD_F_KEY_EXPLICIT) {
2098 			error = ccr_aes_check_keylen(crd->crd_alg,
2099 			    crd->crd_klen);
2100 			if (error)
2101 				break;
2102 			ccr_aes_setkey(s, crd->crd_alg, crd->crd_key,
2103 			    crd->crd_klen);
2104 		}
2105 		error = ccr_blkcipher(sc, s, crp);
2106 		if (error == 0) {
2107 			if (crd->crd_flags & CRD_F_ENCRYPT)
2108 				sc->stats_blkcipher_encrypt++;
2109 			else
2110 				sc->stats_blkcipher_decrypt++;
2111 		}
2112 		break;
2113 	case AUTHENC:
2114 		error = 0;
2115 		switch (crd->crd_alg) {
2116 		case CRYPTO_AES_CBC:
2117 		case CRYPTO_AES_ICM:
2118 		case CRYPTO_AES_XTS:
2119 			/* Only encrypt-then-authenticate supported. */
2120 			crde = crd;
2121 			crda = crd->crd_next;
2122 			if (!(crde->crd_flags & CRD_F_ENCRYPT)) {
2123 				error = EINVAL;
2124 				break;
2125 			}
2126 			break;
2127 		default:
2128 			crda = crd;
2129 			crde = crd->crd_next;
2130 			if (crde->crd_flags & CRD_F_ENCRYPT) {
2131 				error = EINVAL;
2132 				break;
2133 			}
2134 			break;
2135 		}
2136 		if (error)
2137 			break;
2138 		if (crda->crd_flags & CRD_F_KEY_EXPLICIT)
2139 			ccr_init_hmac_digest(s, crda->crd_alg, crda->crd_key,
2140 			    crda->crd_klen);
2141 		if (crde->crd_flags & CRD_F_KEY_EXPLICIT) {
2142 			error = ccr_aes_check_keylen(crde->crd_alg,
2143 			    crde->crd_klen);
2144 			if (error)
2145 				break;
2146 			ccr_aes_setkey(s, crde->crd_alg, crde->crd_key,
2147 			    crde->crd_klen);
2148 		}
2149 		error = ccr_authenc(sc, s, crp, crda, crde);
2150 		if (error == 0) {
2151 			if (crde->crd_flags & CRD_F_ENCRYPT)
2152 				sc->stats_authenc_encrypt++;
2153 			else
2154 				sc->stats_authenc_decrypt++;
2155 		}
2156 		break;
2157 	case GCM:
2158 		error = 0;
2159 		if (crd->crd_alg == CRYPTO_AES_NIST_GCM_16) {
2160 			crde = crd;
2161 			crda = crd->crd_next;
2162 		} else {
2163 			crda = crd;
2164 			crde = crd->crd_next;
2165 		}
2166 		if (crda->crd_flags & CRD_F_KEY_EXPLICIT)
2167 			ccr_init_gmac_hash(s, crda->crd_key, crda->crd_klen);
2168 		if (crde->crd_flags & CRD_F_KEY_EXPLICIT) {
2169 			error = ccr_aes_check_keylen(crde->crd_alg,
2170 			    crde->crd_klen);
2171 			if (error)
2172 				break;
2173 			ccr_aes_setkey(s, crde->crd_alg, crde->crd_key,
2174 			    crde->crd_klen);
2175 		}
2176 		if (crde->crd_len == 0) {
2177 			mtx_unlock(&sc->lock);
2178 			ccr_gcm_soft(s, crp, crda, crde);
2179 			return (0);
2180 		}
2181 		error = ccr_gcm(sc, s, crp, crda, crde);
2182 		if (error == EMSGSIZE) {
2183 			sc->stats_sw_fallback++;
2184 			mtx_unlock(&sc->lock);
2185 			ccr_gcm_soft(s, crp, crda, crde);
2186 			return (0);
2187 		}
2188 		if (error == 0) {
2189 			if (crde->crd_flags & CRD_F_ENCRYPT)
2190 				sc->stats_gcm_encrypt++;
2191 			else
2192 				sc->stats_gcm_decrypt++;
2193 		}
2194 		break;
2195 	}
2196 
2197 	if (error == 0) {
2198 		s->pending++;
2199 		sc->stats_inflight++;
2200 	} else
2201 		sc->stats_process_error++;
2202 
2203 out:
2204 	mtx_unlock(&sc->lock);
2205 
2206 	if (error) {
2207 		crp->crp_etype = error;
2208 		crypto_done(crp);
2209 	}
2210 
2211 	return (0);
2212 }
2213 
2214 static int
2215 do_cpl6_fw_pld(struct sge_iq *iq, const struct rss_header *rss,
2216     struct mbuf *m)
2217 {
2218 	struct ccr_softc *sc = iq->adapter->ccr_softc;
2219 	struct ccr_session *s;
2220 	const struct cpl_fw6_pld *cpl;
2221 	struct cryptop *crp;
2222 	uint32_t status;
2223 	int error;
2224 
2225 	if (m != NULL)
2226 		cpl = mtod(m, const void *);
2227 	else
2228 		cpl = (const void *)(rss + 1);
2229 
2230 	crp = (struct cryptop *)(uintptr_t)be64toh(cpl->data[1]);
2231 	s = crypto_get_driver_session(crp->crp_session);
2232 	status = be64toh(cpl->data[0]);
2233 	if (CHK_MAC_ERR_BIT(status) || CHK_PAD_ERR_BIT(status))
2234 		error = EBADMSG;
2235 	else
2236 		error = 0;
2237 
2238 	mtx_lock(&sc->lock);
2239 	s->pending--;
2240 	sc->stats_inflight--;
2241 
2242 	switch (s->mode) {
2243 	case HMAC:
2244 		error = ccr_hmac_done(sc, s, crp, cpl, error);
2245 		break;
2246 	case BLKCIPHER:
2247 		error = ccr_blkcipher_done(sc, s, crp, cpl, error);
2248 		break;
2249 	case AUTHENC:
2250 		error = ccr_authenc_done(sc, s, crp, cpl, error);
2251 		break;
2252 	case GCM:
2253 		error = ccr_gcm_done(sc, s, crp, cpl, error);
2254 		break;
2255 	}
2256 
2257 	if (error == EBADMSG) {
2258 		if (CHK_MAC_ERR_BIT(status))
2259 			sc->stats_mac_error++;
2260 		if (CHK_PAD_ERR_BIT(status))
2261 			sc->stats_pad_error++;
2262 	}
2263 	mtx_unlock(&sc->lock);
2264 	crp->crp_etype = error;
2265 	crypto_done(crp);
2266 	m_freem(m);
2267 	return (0);
2268 }
2269 
2270 static int
2271 ccr_modevent(module_t mod, int cmd, void *arg)
2272 {
2273 
2274 	switch (cmd) {
2275 	case MOD_LOAD:
2276 		t4_register_cpl_handler(CPL_FW6_PLD, do_cpl6_fw_pld);
2277 		return (0);
2278 	case MOD_UNLOAD:
2279 		t4_register_cpl_handler(CPL_FW6_PLD, NULL);
2280 		return (0);
2281 	default:
2282 		return (EOPNOTSUPP);
2283 	}
2284 }
2285 
2286 static device_method_t ccr_methods[] = {
2287 	DEVMETHOD(device_identify,	ccr_identify),
2288 	DEVMETHOD(device_probe,		ccr_probe),
2289 	DEVMETHOD(device_attach,	ccr_attach),
2290 	DEVMETHOD(device_detach,	ccr_detach),
2291 
2292 	DEVMETHOD(cryptodev_newsession,	ccr_newsession),
2293 	DEVMETHOD(cryptodev_freesession, ccr_freesession),
2294 	DEVMETHOD(cryptodev_process,	ccr_process),
2295 
2296 	DEVMETHOD_END
2297 };
2298 
2299 static driver_t ccr_driver = {
2300 	"ccr",
2301 	ccr_methods,
2302 	sizeof(struct ccr_softc)
2303 };
2304 
2305 static devclass_t ccr_devclass;
2306 
2307 DRIVER_MODULE(ccr, t6nex, ccr_driver, ccr_devclass, ccr_modevent, NULL);
2308 MODULE_VERSION(ccr, 1);
2309 MODULE_DEPEND(ccr, crypto, 1, 1, 1);
2310 MODULE_DEPEND(ccr, t6nex, 1, 1, 1);
2311