xref: /freebsd/sys/dev/cxgbe/crypto/t4_crypto.c (revision fa50a3552d1e759e1bb65e54cb0b7e863bcf54d5)
1 /*-
2  * Copyright (c) 2017 Chelsio Communications, Inc.
3  * All rights reserved.
4  * Written by: John Baldwin <jhb@FreeBSD.org>
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  */
27 
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
30 
31 #include <sys/types.h>
32 #include <sys/bus.h>
33 #include <sys/lock.h>
34 #include <sys/malloc.h>
35 #include <sys/mutex.h>
36 #include <sys/module.h>
37 #include <sys/sglist.h>
38 
39 #include <opencrypto/cryptodev.h>
40 #include <opencrypto/xform.h>
41 
42 #include "cryptodev_if.h"
43 
44 #include "common/common.h"
45 #include "crypto/t4_crypto.h"
46 
47 /*
48  * Requests consist of:
49  *
50  * +-------------------------------+
51  * | struct fw_crypto_lookaside_wr |
52  * +-------------------------------+
53  * | struct ulp_txpkt              |
54  * +-------------------------------+
55  * | struct ulptx_idata            |
56  * +-------------------------------+
57  * | struct cpl_tx_sec_pdu         |
58  * +-------------------------------+
59  * | struct cpl_tls_tx_scmd_fmt    |
60  * +-------------------------------+
61  * | key context header            |
62  * +-------------------------------+
63  * | AES key                       |  ----- For requests with AES
64  * +-------------------------------+
65  * | Hash state                    |  ----- For hash-only requests
66  * +-------------------------------+ -
67  * | IPAD (16-byte aligned)        |  \
68  * +-------------------------------+  +---- For requests with HMAC
69  * | OPAD (16-byte aligned)        |  /
70  * +-------------------------------+ -
71  * | GMAC H                        |  ----- For AES-GCM
72  * +-------------------------------+ -
73  * | struct cpl_rx_phys_dsgl       |  \
74  * +-------------------------------+  +---- Destination buffer for
75  * | PHYS_DSGL entries             |  /     non-hash-only requests
76  * +-------------------------------+ -
77  * | 16 dummy bytes                |  ----- Only for HMAC/hash-only requests
78  * +-------------------------------+
79  * | IV                            |  ----- If immediate IV
80  * +-------------------------------+
81  * | Payload                       |  ----- If immediate Payload
82  * +-------------------------------+ -
83  * | struct ulptx_sgl              |  \
84  * +-------------------------------+  +---- If payload via SGL
85  * | SGL entries                   |  /
86  * +-------------------------------+ -
87  *
88  * Note that the key context must be padded to ensure 16-byte alignment.
89  * For HMAC requests, the key consists of the partial hash of the IPAD
90  * followed by the partial hash of the OPAD.
91  *
92  * Replies consist of:
93  *
94  * +-------------------------------+
95  * | struct cpl_fw6_pld            |
96  * +-------------------------------+
97  * | hash digest                   |  ----- For HMAC request with
98  * +-------------------------------+        'hash_size' set in work request
99  *
100  * A 32-bit big-endian error status word is supplied in the last 4
101  * bytes of data[0] in the CPL_FW6_PLD message.  bit 0 indicates a
102  * "MAC" error and bit 1 indicates a "PAD" error.
103  *
104  * The 64-bit 'cookie' field from the fw_crypto_lookaside_wr message
105  * in the request is returned in data[1] of the CPL_FW6_PLD message.
106  *
107  * For block cipher replies, the updated IV is supplied in data[2] and
108  * data[3] of the CPL_FW6_PLD message.
109  *
110  * For hash replies where the work request set 'hash_size' to request
111  * a copy of the hash in the reply, the hash digest is supplied
112  * immediately following the CPL_FW6_PLD message.
113  */
114 
115 /*
116  * The crypto engine supports a maximum AAD size of 511 bytes.
117  */
118 #define	MAX_AAD_LEN		511
119 
120 /*
121  * The documentation for CPL_RX_PHYS_DSGL claims a maximum of 32 SG
122  * entries.  While the CPL includes a 16-bit length field, the T6 can
123  * sometimes hang if an error occurs while processing a request with a
124  * single DSGL entry larger than 2k.
125  */
126 #define	MAX_RX_PHYS_DSGL_SGE	32
127 #define	DSGL_SGE_MAXLEN		2048
128 
129 /*
130  * The adapter only supports requests with a total input or output
131  * length of 64k-1 or smaller.  Longer requests either result in hung
132  * requests or incorrect results.
133  */
134 #define	MAX_REQUEST_SIZE	65535
135 
136 static MALLOC_DEFINE(M_CCR, "ccr", "Chelsio T6 crypto");
137 
138 struct ccr_session_hmac {
139 	struct auth_hash *auth_hash;
140 	int hash_len;
141 	unsigned int partial_digest_len;
142 	unsigned int auth_mode;
143 	unsigned int mk_size;
144 	char ipad[CHCR_HASH_MAX_BLOCK_SIZE_128];
145 	char opad[CHCR_HASH_MAX_BLOCK_SIZE_128];
146 };
147 
148 struct ccr_session_gmac {
149 	int hash_len;
150 	char ghash_h[GMAC_BLOCK_LEN];
151 };
152 
153 struct ccr_session_blkcipher {
154 	unsigned int cipher_mode;
155 	unsigned int key_len;
156 	unsigned int iv_len;
157 	__be32 key_ctx_hdr;
158 	char enckey[CHCR_AES_MAX_KEY_LEN];
159 	char deckey[CHCR_AES_MAX_KEY_LEN];
160 };
161 
162 struct ccr_session {
163 	bool active;
164 	int pending;
165 	enum { HASH, HMAC, BLKCIPHER, AUTHENC, GCM } mode;
166 	union {
167 		struct ccr_session_hmac hmac;
168 		struct ccr_session_gmac gmac;
169 	};
170 	struct ccr_session_blkcipher blkcipher;
171 };
172 
173 struct ccr_softc {
174 	struct adapter *adapter;
175 	device_t dev;
176 	uint32_t cid;
177 	int tx_channel_id;
178 	struct mtx lock;
179 	bool detaching;
180 	struct sge_wrq *txq;
181 	struct sge_rxq *rxq;
182 
183 	/*
184 	 * Pre-allocate S/G lists used when preparing a work request.
185 	 * 'sg_crp' contains an sglist describing the entire buffer
186 	 * for a 'struct cryptop'.  'sg_ulptx' is used to describe
187 	 * the data the engine should DMA as input via ULPTX_SGL.
188 	 * 'sg_dsgl' is used to describe the destination that cipher
189 	 * text and a tag should be written to.
190 	 */
191 	struct sglist *sg_crp;
192 	struct sglist *sg_ulptx;
193 	struct sglist *sg_dsgl;
194 
195 	/*
196 	 * Pre-allocate a dummy output buffer for the IV and AAD for
197 	 * AEAD requests.
198 	 */
199 	char *iv_aad_buf;
200 	struct sglist *sg_iv_aad;
201 
202 	/* Statistics. */
203 	uint64_t stats_blkcipher_encrypt;
204 	uint64_t stats_blkcipher_decrypt;
205 	uint64_t stats_hash;
206 	uint64_t stats_hmac;
207 	uint64_t stats_authenc_encrypt;
208 	uint64_t stats_authenc_decrypt;
209 	uint64_t stats_gcm_encrypt;
210 	uint64_t stats_gcm_decrypt;
211 	uint64_t stats_wr_nomem;
212 	uint64_t stats_inflight;
213 	uint64_t stats_mac_error;
214 	uint64_t stats_pad_error;
215 	uint64_t stats_bad_session;
216 	uint64_t stats_sglist_error;
217 	uint64_t stats_process_error;
218 	uint64_t stats_sw_fallback;
219 };
220 
221 /*
222  * Crypto requests involve two kind of scatter/gather lists.
223  *
224  * Non-hash-only requests require a PHYS_DSGL that describes the
225  * location to store the results of the encryption or decryption
226  * operation.  This SGL uses a different format (PHYS_DSGL) and should
227  * exclude the crd_skip bytes at the start of the data as well as
228  * any AAD or IV.  For authenticated encryption requests it should
229  * cover include the destination of the hash or tag.
230  *
231  * The input payload may either be supplied inline as immediate data,
232  * or via a standard ULP_TX SGL.  This SGL should include AAD,
233  * ciphertext, and the hash or tag for authenticated decryption
234  * requests.
235  *
236  * These scatter/gather lists can describe different subsets of the
237  * buffer described by the crypto operation.  ccr_populate_sglist()
238  * generates a scatter/gather list that covers the entire crypto
239  * operation buffer that is then used to construct the other
240  * scatter/gather lists.
241  */
242 static int
243 ccr_populate_sglist(struct sglist *sg, struct cryptop *crp)
244 {
245 	int error;
246 
247 	sglist_reset(sg);
248 	if (crp->crp_flags & CRYPTO_F_IMBUF)
249 		error = sglist_append_mbuf(sg, (struct mbuf *)crp->crp_buf);
250 	else if (crp->crp_flags & CRYPTO_F_IOV)
251 		error = sglist_append_uio(sg, (struct uio *)crp->crp_buf);
252 	else
253 		error = sglist_append(sg, crp->crp_buf, crp->crp_ilen);
254 	return (error);
255 }
256 
257 /*
258  * Segments in 'sg' larger than 'maxsegsize' are counted as multiple
259  * segments.
260  */
261 static int
262 ccr_count_sgl(struct sglist *sg, int maxsegsize)
263 {
264 	int i, nsegs;
265 
266 	nsegs = 0;
267 	for (i = 0; i < sg->sg_nseg; i++)
268 		nsegs += howmany(sg->sg_segs[i].ss_len, maxsegsize);
269 	return (nsegs);
270 }
271 
272 /* These functions deal with PHYS_DSGL for the reply buffer. */
273 static inline int
274 ccr_phys_dsgl_len(int nsegs)
275 {
276 	int len;
277 
278 	len = (nsegs / 8) * sizeof(struct phys_sge_pairs);
279 	if ((nsegs % 8) != 0) {
280 		len += sizeof(uint16_t) * 8;
281 		len += roundup2(nsegs % 8, 2) * sizeof(uint64_t);
282 	}
283 	return (len);
284 }
285 
286 static void
287 ccr_write_phys_dsgl(struct ccr_softc *sc, void *dst, int nsegs)
288 {
289 	struct sglist *sg;
290 	struct cpl_rx_phys_dsgl *cpl;
291 	struct phys_sge_pairs *sgl;
292 	vm_paddr_t paddr;
293 	size_t seglen;
294 	u_int i, j;
295 
296 	sg = sc->sg_dsgl;
297 	cpl = dst;
298 	cpl->op_to_tid = htobe32(V_CPL_RX_PHYS_DSGL_OPCODE(CPL_RX_PHYS_DSGL) |
299 	    V_CPL_RX_PHYS_DSGL_ISRDMA(0));
300 	cpl->pcirlxorder_to_noofsgentr = htobe32(
301 	    V_CPL_RX_PHYS_DSGL_PCIRLXORDER(0) |
302 	    V_CPL_RX_PHYS_DSGL_PCINOSNOOP(0) |
303 	    V_CPL_RX_PHYS_DSGL_PCITPHNTENB(0) | V_CPL_RX_PHYS_DSGL_DCAID(0) |
304 	    V_CPL_RX_PHYS_DSGL_NOOFSGENTR(nsegs));
305 	cpl->rss_hdr_int.opcode = CPL_RX_PHYS_ADDR;
306 	cpl->rss_hdr_int.qid = htobe16(sc->rxq->iq.abs_id);
307 	cpl->rss_hdr_int.hash_val = 0;
308 	sgl = (struct phys_sge_pairs *)(cpl + 1);
309 	j = 0;
310 	for (i = 0; i < sg->sg_nseg; i++) {
311 		seglen = sg->sg_segs[i].ss_len;
312 		paddr = sg->sg_segs[i].ss_paddr;
313 		do {
314 			sgl->addr[j] = htobe64(paddr);
315 			if (seglen > DSGL_SGE_MAXLEN) {
316 				sgl->len[j] = htobe16(DSGL_SGE_MAXLEN);
317 				paddr += DSGL_SGE_MAXLEN;
318 				seglen -= DSGL_SGE_MAXLEN;
319 			} else {
320 				sgl->len[j] = htobe16(seglen);
321 				seglen = 0;
322 			}
323 			j++;
324 			if (j == 8) {
325 				sgl++;
326 				j = 0;
327 			}
328 		} while (seglen != 0);
329 	}
330 	MPASS(j + 8 * (sgl - (struct phys_sge_pairs *)(cpl + 1)) == nsegs);
331 }
332 
333 /* These functions deal with the ULPTX_SGL for input payload. */
334 static inline int
335 ccr_ulptx_sgl_len(int nsegs)
336 {
337 	u_int n;
338 
339 	nsegs--; /* first segment is part of ulptx_sgl */
340 	n = sizeof(struct ulptx_sgl) + 8 * ((3 * nsegs) / 2 + (nsegs & 1));
341 	return (roundup2(n, 16));
342 }
343 
344 static void
345 ccr_write_ulptx_sgl(struct ccr_softc *sc, void *dst, int nsegs)
346 {
347 	struct ulptx_sgl *usgl;
348 	struct sglist *sg;
349 	struct sglist_seg *ss;
350 	int i;
351 
352 	sg = sc->sg_ulptx;
353 	MPASS(nsegs == sg->sg_nseg);
354 	ss = &sg->sg_segs[0];
355 	usgl = dst;
356 	usgl->cmd_nsge = htobe32(V_ULPTX_CMD(ULP_TX_SC_DSGL) |
357 	    V_ULPTX_NSGE(nsegs));
358 	usgl->len0 = htobe32(ss->ss_len);
359 	usgl->addr0 = htobe64(ss->ss_paddr);
360 	ss++;
361 	for (i = 0; i < sg->sg_nseg - 1; i++) {
362 		usgl->sge[i / 2].len[i & 1] = htobe32(ss->ss_len);
363 		usgl->sge[i / 2].addr[i & 1] = htobe64(ss->ss_paddr);
364 		ss++;
365 	}
366 
367 }
368 
369 static bool
370 ccr_use_imm_data(u_int transhdr_len, u_int input_len)
371 {
372 
373 	if (input_len > CRYPTO_MAX_IMM_TX_PKT_LEN)
374 		return (false);
375 	if (roundup2(transhdr_len, 16) + roundup2(input_len, 16) >
376 	    SGE_MAX_WR_LEN)
377 		return (false);
378 	return (true);
379 }
380 
381 static void
382 ccr_populate_wreq(struct ccr_softc *sc, struct chcr_wr *crwr, u_int kctx_len,
383     u_int wr_len, u_int imm_len, u_int sgl_len, u_int hash_size,
384     struct cryptop *crp)
385 {
386 	u_int cctx_size;
387 
388 	cctx_size = sizeof(struct _key_ctx) + kctx_len;
389 	crwr->wreq.op_to_cctx_size = htobe32(
390 	    V_FW_CRYPTO_LOOKASIDE_WR_OPCODE(FW_CRYPTO_LOOKASIDE_WR) |
391 	    V_FW_CRYPTO_LOOKASIDE_WR_COMPL(0) |
392 	    V_FW_CRYPTO_LOOKASIDE_WR_IMM_LEN(imm_len) |
393 	    V_FW_CRYPTO_LOOKASIDE_WR_CCTX_LOC(1) |
394 	    V_FW_CRYPTO_LOOKASIDE_WR_CCTX_SIZE(cctx_size >> 4));
395 	crwr->wreq.len16_pkd = htobe32(
396 	    V_FW_CRYPTO_LOOKASIDE_WR_LEN16(wr_len / 16));
397 	crwr->wreq.session_id = 0;
398 	crwr->wreq.rx_chid_to_rx_q_id = htobe32(
399 	    V_FW_CRYPTO_LOOKASIDE_WR_RX_CHID(sc->tx_channel_id) |
400 	    V_FW_CRYPTO_LOOKASIDE_WR_LCB(0) |
401 	    V_FW_CRYPTO_LOOKASIDE_WR_PHASH(0) |
402 	    V_FW_CRYPTO_LOOKASIDE_WR_IV(IV_NOP) |
403 	    V_FW_CRYPTO_LOOKASIDE_WR_FQIDX(0) |
404 	    V_FW_CRYPTO_LOOKASIDE_WR_TX_CH(0) |
405 	    V_FW_CRYPTO_LOOKASIDE_WR_RX_Q_ID(sc->rxq->iq.abs_id));
406 	crwr->wreq.key_addr = 0;
407 	crwr->wreq.pld_size_hash_size = htobe32(
408 	    V_FW_CRYPTO_LOOKASIDE_WR_PLD_SIZE(sgl_len) |
409 	    V_FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE(hash_size));
410 	crwr->wreq.cookie = htobe64((uintptr_t)crp);
411 
412 	crwr->ulptx.cmd_dest = htobe32(V_ULPTX_CMD(ULP_TX_PKT) |
413 	    V_ULP_TXPKT_DATAMODIFY(0) |
414 	    V_ULP_TXPKT_CHANNELID(sc->tx_channel_id) | V_ULP_TXPKT_DEST(0) |
415 	    V_ULP_TXPKT_FID(0) | V_ULP_TXPKT_RO(1));
416 	crwr->ulptx.len = htobe32(
417 	    ((wr_len - sizeof(struct fw_crypto_lookaside_wr)) / 16));
418 
419 	crwr->sc_imm.cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM) |
420 	    V_ULP_TX_SC_MORE(imm_len != 0 ? 0 : 1));
421 	crwr->sc_imm.len = htobe32(wr_len - offsetof(struct chcr_wr, sec_cpl) -
422 	    sgl_len);
423 }
424 
425 static int
426 ccr_hash(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp)
427 {
428 	struct chcr_wr *crwr;
429 	struct wrqe *wr;
430 	struct auth_hash *axf;
431 	struct cryptodesc *crd;
432 	char *dst;
433 	u_int hash_size_in_response, kctx_flits, kctx_len, transhdr_len, wr_len;
434 	u_int hmac_ctrl, imm_len, iopad_size;
435 	int error, sgl_nsegs, sgl_len, use_opad;
436 
437 	crd = crp->crp_desc;
438 
439 	/* Reject requests with too large of an input buffer. */
440 	if (crd->crd_len > MAX_REQUEST_SIZE)
441 		return (EFBIG);
442 
443 	axf = s->hmac.auth_hash;
444 
445 	if (s->mode == HMAC) {
446 		use_opad = 1;
447 		hmac_ctrl = SCMD_HMAC_CTRL_NO_TRUNC;
448 	} else {
449 		use_opad = 0;
450 		hmac_ctrl = SCMD_HMAC_CTRL_NOP;
451 	}
452 
453 	/* PADs must be 128-bit aligned. */
454 	iopad_size = roundup2(s->hmac.partial_digest_len, 16);
455 
456 	/*
457 	 * The 'key' part of the context includes the aligned IPAD and
458 	 * OPAD.
459 	 */
460 	kctx_len = iopad_size;
461 	if (use_opad)
462 		kctx_len += iopad_size;
463 	hash_size_in_response = axf->hashsize;
464 	transhdr_len = HASH_TRANSHDR_SIZE(kctx_len);
465 
466 	if (crd->crd_len == 0) {
467 		imm_len = axf->blocksize;
468 		sgl_nsegs = 0;
469 		sgl_len = 0;
470 	} else if (ccr_use_imm_data(transhdr_len, crd->crd_len)) {
471 		imm_len = crd->crd_len;
472 		sgl_nsegs = 0;
473 		sgl_len = 0;
474 	} else {
475 		imm_len = 0;
476 		sglist_reset(sc->sg_ulptx);
477 		error = sglist_append_sglist(sc->sg_ulptx, sc->sg_crp,
478 		    crd->crd_skip, crd->crd_len);
479 		if (error)
480 			return (error);
481 		sgl_nsegs = sc->sg_ulptx->sg_nseg;
482 		sgl_len = ccr_ulptx_sgl_len(sgl_nsegs);
483 	}
484 
485 	wr_len = roundup2(transhdr_len, 16) + roundup2(imm_len, 16) + sgl_len;
486 	if (wr_len > SGE_MAX_WR_LEN)
487 		return (EFBIG);
488 	wr = alloc_wrqe(wr_len, sc->txq);
489 	if (wr == NULL) {
490 		sc->stats_wr_nomem++;
491 		return (ENOMEM);
492 	}
493 	crwr = wrtod(wr);
494 	memset(crwr, 0, wr_len);
495 
496 	ccr_populate_wreq(sc, crwr, kctx_len, wr_len, imm_len, sgl_len,
497 	    hash_size_in_response, crp);
498 
499 	/* XXX: Hardcodes SGE loopback channel of 0. */
500 	crwr->sec_cpl.op_ivinsrtofst = htobe32(
501 	    V_CPL_TX_SEC_PDU_OPCODE(CPL_TX_SEC_PDU) |
502 	    V_CPL_TX_SEC_PDU_RXCHID(sc->tx_channel_id) |
503 	    V_CPL_TX_SEC_PDU_ACKFOLLOWS(0) | V_CPL_TX_SEC_PDU_ULPTXLPBK(1) |
504 	    V_CPL_TX_SEC_PDU_CPLLEN(2) | V_CPL_TX_SEC_PDU_PLACEHOLDER(0) |
505 	    V_CPL_TX_SEC_PDU_IVINSRTOFST(0));
506 
507 	crwr->sec_cpl.pldlen = htobe32(crd->crd_len == 0 ? axf->blocksize :
508 	    crd->crd_len);
509 
510 	crwr->sec_cpl.cipherstop_lo_authinsert = htobe32(
511 	    V_CPL_TX_SEC_PDU_AUTHSTART(1) | V_CPL_TX_SEC_PDU_AUTHSTOP(0));
512 
513 	/* These two flits are actually a CPL_TLS_TX_SCMD_FMT. */
514 	crwr->sec_cpl.seqno_numivs = htobe32(
515 	    V_SCMD_SEQ_NO_CTRL(0) |
516 	    V_SCMD_PROTO_VERSION(SCMD_PROTO_VERSION_GENERIC) |
517 	    V_SCMD_CIPH_MODE(SCMD_CIPH_MODE_NOP) |
518 	    V_SCMD_AUTH_MODE(s->hmac.auth_mode) |
519 	    V_SCMD_HMAC_CTRL(hmac_ctrl));
520 	crwr->sec_cpl.ivgen_hdrlen = htobe32(
521 	    V_SCMD_LAST_FRAG(0) |
522 	    V_SCMD_MORE_FRAGS(crd->crd_len == 0 ? 1 : 0) | V_SCMD_MAC_ONLY(1));
523 
524 	memcpy(crwr->key_ctx.key, s->hmac.ipad, s->hmac.partial_digest_len);
525 	if (use_opad)
526 		memcpy(crwr->key_ctx.key + iopad_size, s->hmac.opad,
527 		    s->hmac.partial_digest_len);
528 
529 	/* XXX: F_KEY_CONTEXT_SALT_PRESENT set, but 'salt' not set. */
530 	kctx_flits = (sizeof(struct _key_ctx) + kctx_len) / 16;
531 	crwr->key_ctx.ctx_hdr = htobe32(V_KEY_CONTEXT_CTX_LEN(kctx_flits) |
532 	    V_KEY_CONTEXT_OPAD_PRESENT(use_opad) |
533 	    V_KEY_CONTEXT_SALT_PRESENT(1) |
534 	    V_KEY_CONTEXT_CK_SIZE(CHCR_KEYCTX_NO_KEY) |
535 	    V_KEY_CONTEXT_MK_SIZE(s->hmac.mk_size) | V_KEY_CONTEXT_VALID(1));
536 
537 	dst = (char *)(crwr + 1) + kctx_len + DUMMY_BYTES;
538 	if (crd->crd_len == 0) {
539 		dst[0] = 0x80;
540 		*(uint64_t *)(dst + axf->blocksize - sizeof(uint64_t)) =
541 		    htobe64(axf->blocksize << 3);
542 	} else if (imm_len != 0)
543 		crypto_copydata(crp->crp_flags, crp->crp_buf, crd->crd_skip,
544 		    crd->crd_len, dst);
545 	else
546 		ccr_write_ulptx_sgl(sc, dst, sgl_nsegs);
547 
548 	/* XXX: TODO backpressure */
549 	t4_wrq_tx(sc->adapter, wr);
550 
551 	return (0);
552 }
553 
554 static int
555 ccr_hash_done(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp,
556     const struct cpl_fw6_pld *cpl, int error)
557 {
558 	struct cryptodesc *crd;
559 
560 	crd = crp->crp_desc;
561 	if (error == 0) {
562 		crypto_copyback(crp->crp_flags, crp->crp_buf, crd->crd_inject,
563 		    s->hmac.hash_len, (c_caddr_t)(cpl + 1));
564 	}
565 
566 	return (error);
567 }
568 
569 static int
570 ccr_blkcipher(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp)
571 {
572 	char iv[CHCR_MAX_CRYPTO_IV_LEN];
573 	struct chcr_wr *crwr;
574 	struct wrqe *wr;
575 	struct cryptodesc *crd;
576 	char *dst;
577 	u_int kctx_len, key_half, op_type, transhdr_len, wr_len;
578 	u_int imm_len;
579 	int dsgl_nsegs, dsgl_len;
580 	int sgl_nsegs, sgl_len;
581 	int error;
582 
583 	crd = crp->crp_desc;
584 
585 	if (s->blkcipher.key_len == 0 || crd->crd_len == 0)
586 		return (EINVAL);
587 	if (crd->crd_alg == CRYPTO_AES_CBC &&
588 	    (crd->crd_len % AES_BLOCK_LEN) != 0)
589 		return (EINVAL);
590 
591 	/* Reject requests with too large of an input buffer. */
592 	if (crd->crd_len > MAX_REQUEST_SIZE)
593 		return (EFBIG);
594 
595 	if (crd->crd_flags & CRD_F_ENCRYPT)
596 		op_type = CHCR_ENCRYPT_OP;
597 	else
598 		op_type = CHCR_DECRYPT_OP;
599 
600 	sglist_reset(sc->sg_dsgl);
601 	error = sglist_append_sglist(sc->sg_dsgl, sc->sg_crp, crd->crd_skip,
602 	    crd->crd_len);
603 	if (error)
604 		return (error);
605 	dsgl_nsegs = ccr_count_sgl(sc->sg_dsgl, DSGL_SGE_MAXLEN);
606 	if (dsgl_nsegs > MAX_RX_PHYS_DSGL_SGE)
607 		return (EFBIG);
608 	dsgl_len = ccr_phys_dsgl_len(dsgl_nsegs);
609 
610 	/* The 'key' must be 128-bit aligned. */
611 	kctx_len = roundup2(s->blkcipher.key_len, 16);
612 	transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dsgl_len);
613 
614 	if (ccr_use_imm_data(transhdr_len, crd->crd_len +
615 	    s->blkcipher.iv_len)) {
616 		imm_len = crd->crd_len;
617 		sgl_nsegs = 0;
618 		sgl_len = 0;
619 	} else {
620 		imm_len = 0;
621 		sglist_reset(sc->sg_ulptx);
622 		error = sglist_append_sglist(sc->sg_ulptx, sc->sg_crp,
623 		    crd->crd_skip, crd->crd_len);
624 		if (error)
625 			return (error);
626 		sgl_nsegs = sc->sg_ulptx->sg_nseg;
627 		sgl_len = ccr_ulptx_sgl_len(sgl_nsegs);
628 	}
629 
630 	wr_len = roundup2(transhdr_len, 16) + s->blkcipher.iv_len +
631 	    roundup2(imm_len, 16) + sgl_len;
632 	if (wr_len > SGE_MAX_WR_LEN)
633 		return (EFBIG);
634 	wr = alloc_wrqe(wr_len, sc->txq);
635 	if (wr == NULL) {
636 		sc->stats_wr_nomem++;
637 		return (ENOMEM);
638 	}
639 	crwr = wrtod(wr);
640 	memset(crwr, 0, wr_len);
641 
642 	/*
643 	 * Read the existing IV from the request or generate a random
644 	 * one if none is provided.  Optionally copy the generated IV
645 	 * into the output buffer if requested.
646 	 */
647 	if (op_type == CHCR_ENCRYPT_OP) {
648 		if (crd->crd_flags & CRD_F_IV_EXPLICIT)
649 			memcpy(iv, crd->crd_iv, s->blkcipher.iv_len);
650 		else
651 			arc4rand(iv, s->blkcipher.iv_len, 0);
652 		if ((crd->crd_flags & CRD_F_IV_PRESENT) == 0)
653 			crypto_copyback(crp->crp_flags, crp->crp_buf,
654 			    crd->crd_inject, s->blkcipher.iv_len, iv);
655 	} else {
656 		if (crd->crd_flags & CRD_F_IV_EXPLICIT)
657 			memcpy(iv, crd->crd_iv, s->blkcipher.iv_len);
658 		else
659 			crypto_copydata(crp->crp_flags, crp->crp_buf,
660 			    crd->crd_inject, s->blkcipher.iv_len, iv);
661 	}
662 
663 	ccr_populate_wreq(sc, crwr, kctx_len, wr_len, imm_len, sgl_len, 0,
664 	    crp);
665 
666 	/* XXX: Hardcodes SGE loopback channel of 0. */
667 	crwr->sec_cpl.op_ivinsrtofst = htobe32(
668 	    V_CPL_TX_SEC_PDU_OPCODE(CPL_TX_SEC_PDU) |
669 	    V_CPL_TX_SEC_PDU_RXCHID(sc->tx_channel_id) |
670 	    V_CPL_TX_SEC_PDU_ACKFOLLOWS(0) | V_CPL_TX_SEC_PDU_ULPTXLPBK(1) |
671 	    V_CPL_TX_SEC_PDU_CPLLEN(2) | V_CPL_TX_SEC_PDU_PLACEHOLDER(0) |
672 	    V_CPL_TX_SEC_PDU_IVINSRTOFST(1));
673 
674 	crwr->sec_cpl.pldlen = htobe32(s->blkcipher.iv_len + crd->crd_len);
675 
676 	crwr->sec_cpl.aadstart_cipherstop_hi = htobe32(
677 	    V_CPL_TX_SEC_PDU_CIPHERSTART(s->blkcipher.iv_len + 1) |
678 	    V_CPL_TX_SEC_PDU_CIPHERSTOP_HI(0));
679 	crwr->sec_cpl.cipherstop_lo_authinsert = htobe32(
680 	    V_CPL_TX_SEC_PDU_CIPHERSTOP_LO(0));
681 
682 	/* These two flits are actually a CPL_TLS_TX_SCMD_FMT. */
683 	crwr->sec_cpl.seqno_numivs = htobe32(
684 	    V_SCMD_SEQ_NO_CTRL(0) |
685 	    V_SCMD_PROTO_VERSION(SCMD_PROTO_VERSION_GENERIC) |
686 	    V_SCMD_ENC_DEC_CTRL(op_type) |
687 	    V_SCMD_CIPH_MODE(s->blkcipher.cipher_mode) |
688 	    V_SCMD_AUTH_MODE(SCMD_AUTH_MODE_NOP) |
689 	    V_SCMD_HMAC_CTRL(SCMD_HMAC_CTRL_NOP) |
690 	    V_SCMD_IV_SIZE(s->blkcipher.iv_len / 2) |
691 	    V_SCMD_NUM_IVS(0));
692 	crwr->sec_cpl.ivgen_hdrlen = htobe32(
693 	    V_SCMD_IV_GEN_CTRL(0) |
694 	    V_SCMD_MORE_FRAGS(0) | V_SCMD_LAST_FRAG(0) | V_SCMD_MAC_ONLY(0) |
695 	    V_SCMD_AADIVDROP(1) | V_SCMD_HDR_LEN(dsgl_len));
696 
697 	crwr->key_ctx.ctx_hdr = s->blkcipher.key_ctx_hdr;
698 	switch (crd->crd_alg) {
699 	case CRYPTO_AES_CBC:
700 		if (crd->crd_flags & CRD_F_ENCRYPT)
701 			memcpy(crwr->key_ctx.key, s->blkcipher.enckey,
702 			    s->blkcipher.key_len);
703 		else
704 			memcpy(crwr->key_ctx.key, s->blkcipher.deckey,
705 			    s->blkcipher.key_len);
706 		break;
707 	case CRYPTO_AES_ICM:
708 		memcpy(crwr->key_ctx.key, s->blkcipher.enckey,
709 		    s->blkcipher.key_len);
710 		break;
711 	case CRYPTO_AES_XTS:
712 		key_half = s->blkcipher.key_len / 2;
713 		memcpy(crwr->key_ctx.key, s->blkcipher.enckey + key_half,
714 		    key_half);
715 		if (crd->crd_flags & CRD_F_ENCRYPT)
716 			memcpy(crwr->key_ctx.key + key_half,
717 			    s->blkcipher.enckey, key_half);
718 		else
719 			memcpy(crwr->key_ctx.key + key_half,
720 			    s->blkcipher.deckey, key_half);
721 		break;
722 	}
723 
724 	dst = (char *)(crwr + 1) + kctx_len;
725 	ccr_write_phys_dsgl(sc, dst, dsgl_nsegs);
726 	dst += sizeof(struct cpl_rx_phys_dsgl) + dsgl_len;
727 	memcpy(dst, iv, s->blkcipher.iv_len);
728 	dst += s->blkcipher.iv_len;
729 	if (imm_len != 0)
730 		crypto_copydata(crp->crp_flags, crp->crp_buf, crd->crd_skip,
731 		    crd->crd_len, dst);
732 	else
733 		ccr_write_ulptx_sgl(sc, dst, sgl_nsegs);
734 
735 	/* XXX: TODO backpressure */
736 	t4_wrq_tx(sc->adapter, wr);
737 
738 	return (0);
739 }
740 
741 static int
742 ccr_blkcipher_done(struct ccr_softc *sc, struct ccr_session *s,
743     struct cryptop *crp, const struct cpl_fw6_pld *cpl, int error)
744 {
745 
746 	/*
747 	 * The updated IV to permit chained requests is at
748 	 * cpl->data[2], but OCF doesn't permit chained requests.
749 	 */
750 	return (error);
751 }
752 
753 /*
754  * 'hashsize' is the length of a full digest.  'authsize' is the
755  * requested digest length for this operation which may be less
756  * than 'hashsize'.
757  */
758 static int
759 ccr_hmac_ctrl(unsigned int hashsize, unsigned int authsize)
760 {
761 
762 	if (authsize == 10)
763 		return (SCMD_HMAC_CTRL_TRUNC_RFC4366);
764 	if (authsize == 12)
765 		return (SCMD_HMAC_CTRL_IPSEC_96BIT);
766 	if (authsize == hashsize / 2)
767 		return (SCMD_HMAC_CTRL_DIV2);
768 	return (SCMD_HMAC_CTRL_NO_TRUNC);
769 }
770 
771 static int
772 ccr_authenc(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp,
773     struct cryptodesc *crda, struct cryptodesc *crde)
774 {
775 	char iv[CHCR_MAX_CRYPTO_IV_LEN];
776 	struct chcr_wr *crwr;
777 	struct wrqe *wr;
778 	struct auth_hash *axf;
779 	char *dst;
780 	u_int kctx_len, key_half, op_type, transhdr_len, wr_len;
781 	u_int hash_size_in_response, imm_len, iopad_size;
782 	u_int aad_start, aad_len, aad_stop;
783 	u_int auth_start, auth_stop, auth_insert;
784 	u_int cipher_start, cipher_stop;
785 	u_int hmac_ctrl, input_len;
786 	int dsgl_nsegs, dsgl_len;
787 	int sgl_nsegs, sgl_len;
788 	int error;
789 
790 	/*
791 	 * If there is a need in the future, requests with an empty
792 	 * payload could be supported as HMAC-only requests.
793 	 */
794 	if (s->blkcipher.key_len == 0 || crde->crd_len == 0)
795 		return (EINVAL);
796 	if (crde->crd_alg == CRYPTO_AES_CBC &&
797 	    (crde->crd_len % AES_BLOCK_LEN) != 0)
798 		return (EINVAL);
799 
800 	/*
801 	 * Compute the length of the AAD (data covered by the
802 	 * authentication descriptor but not the encryption
803 	 * descriptor).  To simplify the logic, AAD is only permitted
804 	 * before the cipher/plain text, not after.  This is true of
805 	 * all currently-generated requests.
806 	 */
807 	if (crda->crd_len + crda->crd_skip > crde->crd_len + crde->crd_skip)
808 		return (EINVAL);
809 	if (crda->crd_skip < crde->crd_skip) {
810 		if (crda->crd_skip + crda->crd_len > crde->crd_skip)
811 			aad_len = (crde->crd_skip - crda->crd_skip);
812 		else
813 			aad_len = crda->crd_len;
814 	} else
815 		aad_len = 0;
816 	if (aad_len + s->blkcipher.iv_len > MAX_AAD_LEN)
817 		return (EINVAL);
818 
819 	axf = s->hmac.auth_hash;
820 	hash_size_in_response = s->hmac.hash_len;
821 	if (crde->crd_flags & CRD_F_ENCRYPT)
822 		op_type = CHCR_ENCRYPT_OP;
823 	else
824 		op_type = CHCR_DECRYPT_OP;
825 
826 	/*
827 	 * The output buffer consists of the cipher text followed by
828 	 * the hash when encrypting.  For decryption it only contains
829 	 * the plain text.
830 	 *
831 	 * Due to a firmware bug, the output buffer must include a
832 	 * dummy output buffer for the IV and AAD prior to the real
833 	 * output buffer.
834 	 */
835 	if (op_type == CHCR_ENCRYPT_OP) {
836 		if (s->blkcipher.iv_len + aad_len + crde->crd_len +
837 		    hash_size_in_response > MAX_REQUEST_SIZE)
838 			return (EFBIG);
839 	} else {
840 		if (s->blkcipher.iv_len + aad_len + crde->crd_len >
841 		    MAX_REQUEST_SIZE)
842 			return (EFBIG);
843 	}
844 	sglist_reset(sc->sg_dsgl);
845 	error = sglist_append_sglist(sc->sg_dsgl, sc->sg_iv_aad, 0,
846 	    s->blkcipher.iv_len + aad_len);
847 	if (error)
848 		return (error);
849 	error = sglist_append_sglist(sc->sg_dsgl, sc->sg_crp, crde->crd_skip,
850 	    crde->crd_len);
851 	if (error)
852 		return (error);
853 	if (op_type == CHCR_ENCRYPT_OP) {
854 		error = sglist_append_sglist(sc->sg_dsgl, sc->sg_crp,
855 		    crda->crd_inject, hash_size_in_response);
856 		if (error)
857 			return (error);
858 	}
859 	dsgl_nsegs = ccr_count_sgl(sc->sg_dsgl, DSGL_SGE_MAXLEN);
860 	if (dsgl_nsegs > MAX_RX_PHYS_DSGL_SGE)
861 		return (EFBIG);
862 	dsgl_len = ccr_phys_dsgl_len(dsgl_nsegs);
863 
864 	/* PADs must be 128-bit aligned. */
865 	iopad_size = roundup2(s->hmac.partial_digest_len, 16);
866 
867 	/*
868 	 * The 'key' part of the key context consists of the key followed
869 	 * by the IPAD and OPAD.
870 	 */
871 	kctx_len = roundup2(s->blkcipher.key_len, 16) + iopad_size * 2;
872 	transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dsgl_len);
873 
874 	/*
875 	 * The input buffer consists of the IV, any AAD, and then the
876 	 * cipher/plain text.  For decryption requests the hash is
877 	 * appended after the cipher text.
878 	 *
879 	 * The IV is always stored at the start of the input buffer
880 	 * even though it may be duplicated in the payload.  The
881 	 * crypto engine doesn't work properly if the IV offset points
882 	 * inside of the AAD region, so a second copy is always
883 	 * required.
884 	 */
885 	input_len = aad_len + crde->crd_len;
886 
887 	/*
888 	 * The firmware hangs if sent a request which is a
889 	 * bit smaller than MAX_REQUEST_SIZE.  In particular, the
890 	 * firmware appears to require 512 - 16 bytes of spare room
891 	 * along with the size of the hash even if the hash isn't
892 	 * included in the input buffer.
893 	 */
894 	if (input_len + roundup2(axf->hashsize, 16) + (512 - 16) >
895 	    MAX_REQUEST_SIZE)
896 		return (EFBIG);
897 	if (op_type == CHCR_DECRYPT_OP)
898 		input_len += hash_size_in_response;
899 	if (ccr_use_imm_data(transhdr_len, s->blkcipher.iv_len + input_len)) {
900 		imm_len = input_len;
901 		sgl_nsegs = 0;
902 		sgl_len = 0;
903 	} else {
904 		imm_len = 0;
905 		sglist_reset(sc->sg_ulptx);
906 		if (aad_len != 0) {
907 			error = sglist_append_sglist(sc->sg_ulptx, sc->sg_crp,
908 			    crda->crd_skip, aad_len);
909 			if (error)
910 				return (error);
911 		}
912 		error = sglist_append_sglist(sc->sg_ulptx, sc->sg_crp,
913 		    crde->crd_skip, crde->crd_len);
914 		if (error)
915 			return (error);
916 		if (op_type == CHCR_DECRYPT_OP) {
917 			error = sglist_append_sglist(sc->sg_ulptx, sc->sg_crp,
918 			    crda->crd_inject, hash_size_in_response);
919 			if (error)
920 				return (error);
921 		}
922 		sgl_nsegs = sc->sg_ulptx->sg_nseg;
923 		sgl_len = ccr_ulptx_sgl_len(sgl_nsegs);
924 	}
925 
926 	/*
927 	 * Any auth-only data before the cipher region is marked as AAD.
928 	 * Auth-data that overlaps with the cipher region is placed in
929 	 * the auth section.
930 	 */
931 	if (aad_len != 0) {
932 		aad_start = s->blkcipher.iv_len + 1;
933 		aad_stop = aad_start + aad_len - 1;
934 	} else {
935 		aad_start = 0;
936 		aad_stop = 0;
937 	}
938 	cipher_start = s->blkcipher.iv_len + aad_len + 1;
939 	if (op_type == CHCR_DECRYPT_OP)
940 		cipher_stop = hash_size_in_response;
941 	else
942 		cipher_stop = 0;
943 	if (aad_len == crda->crd_len) {
944 		auth_start = 0;
945 		auth_stop = 0;
946 	} else {
947 		if (aad_len != 0)
948 			auth_start = cipher_start;
949 		else
950 			auth_start = s->blkcipher.iv_len + crda->crd_skip -
951 			    crde->crd_skip + 1;
952 		auth_stop = (crde->crd_skip + crde->crd_len) -
953 		    (crda->crd_skip + crda->crd_len) + cipher_stop;
954 	}
955 	if (op_type == CHCR_DECRYPT_OP)
956 		auth_insert = hash_size_in_response;
957 	else
958 		auth_insert = 0;
959 
960 	wr_len = roundup2(transhdr_len, 16) + s->blkcipher.iv_len +
961 	    roundup2(imm_len, 16) + sgl_len;
962 	if (wr_len > SGE_MAX_WR_LEN)
963 		return (EFBIG);
964 	wr = alloc_wrqe(wr_len, sc->txq);
965 	if (wr == NULL) {
966 		sc->stats_wr_nomem++;
967 		return (ENOMEM);
968 	}
969 	crwr = wrtod(wr);
970 	memset(crwr, 0, wr_len);
971 
972 	/*
973 	 * Read the existing IV from the request or generate a random
974 	 * one if none is provided.  Optionally copy the generated IV
975 	 * into the output buffer if requested.
976 	 */
977 	if (op_type == CHCR_ENCRYPT_OP) {
978 		if (crde->crd_flags & CRD_F_IV_EXPLICIT)
979 			memcpy(iv, crde->crd_iv, s->blkcipher.iv_len);
980 		else
981 			arc4rand(iv, s->blkcipher.iv_len, 0);
982 		if ((crde->crd_flags & CRD_F_IV_PRESENT) == 0)
983 			crypto_copyback(crp->crp_flags, crp->crp_buf,
984 			    crde->crd_inject, s->blkcipher.iv_len, iv);
985 	} else {
986 		if (crde->crd_flags & CRD_F_IV_EXPLICIT)
987 			memcpy(iv, crde->crd_iv, s->blkcipher.iv_len);
988 		else
989 			crypto_copydata(crp->crp_flags, crp->crp_buf,
990 			    crde->crd_inject, s->blkcipher.iv_len, iv);
991 	}
992 
993 	ccr_populate_wreq(sc, crwr, kctx_len, wr_len, imm_len, sgl_len,
994 	    op_type == CHCR_DECRYPT_OP ? hash_size_in_response : 0, crp);
995 
996 	/* XXX: Hardcodes SGE loopback channel of 0. */
997 	crwr->sec_cpl.op_ivinsrtofst = htobe32(
998 	    V_CPL_TX_SEC_PDU_OPCODE(CPL_TX_SEC_PDU) |
999 	    V_CPL_TX_SEC_PDU_RXCHID(sc->tx_channel_id) |
1000 	    V_CPL_TX_SEC_PDU_ACKFOLLOWS(0) | V_CPL_TX_SEC_PDU_ULPTXLPBK(1) |
1001 	    V_CPL_TX_SEC_PDU_CPLLEN(2) | V_CPL_TX_SEC_PDU_PLACEHOLDER(0) |
1002 	    V_CPL_TX_SEC_PDU_IVINSRTOFST(1));
1003 
1004 	crwr->sec_cpl.pldlen = htobe32(s->blkcipher.iv_len + input_len);
1005 
1006 	crwr->sec_cpl.aadstart_cipherstop_hi = htobe32(
1007 	    V_CPL_TX_SEC_PDU_AADSTART(aad_start) |
1008 	    V_CPL_TX_SEC_PDU_AADSTOP(aad_stop) |
1009 	    V_CPL_TX_SEC_PDU_CIPHERSTART(cipher_start) |
1010 	    V_CPL_TX_SEC_PDU_CIPHERSTOP_HI(cipher_stop >> 4));
1011 	crwr->sec_cpl.cipherstop_lo_authinsert = htobe32(
1012 	    V_CPL_TX_SEC_PDU_CIPHERSTOP_LO(cipher_stop & 0xf) |
1013 	    V_CPL_TX_SEC_PDU_AUTHSTART(auth_start) |
1014 	    V_CPL_TX_SEC_PDU_AUTHSTOP(auth_stop) |
1015 	    V_CPL_TX_SEC_PDU_AUTHINSERT(auth_insert));
1016 
1017 	/* These two flits are actually a CPL_TLS_TX_SCMD_FMT. */
1018 	hmac_ctrl = ccr_hmac_ctrl(axf->hashsize, hash_size_in_response);
1019 	crwr->sec_cpl.seqno_numivs = htobe32(
1020 	    V_SCMD_SEQ_NO_CTRL(0) |
1021 	    V_SCMD_PROTO_VERSION(SCMD_PROTO_VERSION_GENERIC) |
1022 	    V_SCMD_ENC_DEC_CTRL(op_type) |
1023 	    V_SCMD_CIPH_AUTH_SEQ_CTRL(op_type == CHCR_ENCRYPT_OP ? 1 : 0) |
1024 	    V_SCMD_CIPH_MODE(s->blkcipher.cipher_mode) |
1025 	    V_SCMD_AUTH_MODE(s->hmac.auth_mode) |
1026 	    V_SCMD_HMAC_CTRL(hmac_ctrl) |
1027 	    V_SCMD_IV_SIZE(s->blkcipher.iv_len / 2) |
1028 	    V_SCMD_NUM_IVS(0));
1029 	crwr->sec_cpl.ivgen_hdrlen = htobe32(
1030 	    V_SCMD_IV_GEN_CTRL(0) |
1031 	    V_SCMD_MORE_FRAGS(0) | V_SCMD_LAST_FRAG(0) | V_SCMD_MAC_ONLY(0) |
1032 	    V_SCMD_AADIVDROP(0) | V_SCMD_HDR_LEN(dsgl_len));
1033 
1034 	crwr->key_ctx.ctx_hdr = s->blkcipher.key_ctx_hdr;
1035 	switch (crde->crd_alg) {
1036 	case CRYPTO_AES_CBC:
1037 		if (crde->crd_flags & CRD_F_ENCRYPT)
1038 			memcpy(crwr->key_ctx.key, s->blkcipher.enckey,
1039 			    s->blkcipher.key_len);
1040 		else
1041 			memcpy(crwr->key_ctx.key, s->blkcipher.deckey,
1042 			    s->blkcipher.key_len);
1043 		break;
1044 	case CRYPTO_AES_ICM:
1045 		memcpy(crwr->key_ctx.key, s->blkcipher.enckey,
1046 		    s->blkcipher.key_len);
1047 		break;
1048 	case CRYPTO_AES_XTS:
1049 		key_half = s->blkcipher.key_len / 2;
1050 		memcpy(crwr->key_ctx.key, s->blkcipher.enckey + key_half,
1051 		    key_half);
1052 		if (crde->crd_flags & CRD_F_ENCRYPT)
1053 			memcpy(crwr->key_ctx.key + key_half,
1054 			    s->blkcipher.enckey, key_half);
1055 		else
1056 			memcpy(crwr->key_ctx.key + key_half,
1057 			    s->blkcipher.deckey, key_half);
1058 		break;
1059 	}
1060 
1061 	dst = crwr->key_ctx.key + roundup2(s->blkcipher.key_len, 16);
1062 	memcpy(dst, s->hmac.ipad, s->hmac.partial_digest_len);
1063 	memcpy(dst + iopad_size, s->hmac.opad, s->hmac.partial_digest_len);
1064 
1065 	dst = (char *)(crwr + 1) + kctx_len;
1066 	ccr_write_phys_dsgl(sc, dst, dsgl_nsegs);
1067 	dst += sizeof(struct cpl_rx_phys_dsgl) + dsgl_len;
1068 	memcpy(dst, iv, s->blkcipher.iv_len);
1069 	dst += s->blkcipher.iv_len;
1070 	if (imm_len != 0) {
1071 		if (aad_len != 0) {
1072 			crypto_copydata(crp->crp_flags, crp->crp_buf,
1073 			    crda->crd_skip, aad_len, dst);
1074 			dst += aad_len;
1075 		}
1076 		crypto_copydata(crp->crp_flags, crp->crp_buf, crde->crd_skip,
1077 		    crde->crd_len, dst);
1078 		dst += crde->crd_len;
1079 		if (op_type == CHCR_DECRYPT_OP)
1080 			crypto_copydata(crp->crp_flags, crp->crp_buf,
1081 			    crda->crd_inject, hash_size_in_response, dst);
1082 	} else
1083 		ccr_write_ulptx_sgl(sc, dst, sgl_nsegs);
1084 
1085 	/* XXX: TODO backpressure */
1086 	t4_wrq_tx(sc->adapter, wr);
1087 
1088 	return (0);
1089 }
1090 
1091 static int
1092 ccr_authenc_done(struct ccr_softc *sc, struct ccr_session *s,
1093     struct cryptop *crp, const struct cpl_fw6_pld *cpl, int error)
1094 {
1095 	struct cryptodesc *crd;
1096 
1097 	/*
1098 	 * The updated IV to permit chained requests is at
1099 	 * cpl->data[2], but OCF doesn't permit chained requests.
1100 	 *
1101 	 * For a decryption request, the hardware may do a verification
1102 	 * of the HMAC which will fail if the existing HMAC isn't in the
1103 	 * buffer.  If that happens, clear the error and copy the HMAC
1104 	 * from the CPL reply into the buffer.
1105 	 *
1106 	 * For encryption requests, crd should be the cipher request
1107 	 * which will have CRD_F_ENCRYPT set.  For decryption
1108 	 * requests, crp_desc will be the HMAC request which should
1109 	 * not have this flag set.
1110 	 */
1111 	crd = crp->crp_desc;
1112 	if (error == EBADMSG && !CHK_PAD_ERR_BIT(be64toh(cpl->data[0])) &&
1113 	    !(crd->crd_flags & CRD_F_ENCRYPT)) {
1114 		crypto_copyback(crp->crp_flags, crp->crp_buf, crd->crd_inject,
1115 		    s->hmac.hash_len, (c_caddr_t)(cpl + 1));
1116 		error = 0;
1117 	}
1118 	return (error);
1119 }
1120 
1121 static int
1122 ccr_gcm(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp,
1123     struct cryptodesc *crda, struct cryptodesc *crde)
1124 {
1125 	char iv[CHCR_MAX_CRYPTO_IV_LEN];
1126 	struct chcr_wr *crwr;
1127 	struct wrqe *wr;
1128 	char *dst;
1129 	u_int iv_len, kctx_len, op_type, transhdr_len, wr_len;
1130 	u_int hash_size_in_response, imm_len;
1131 	u_int aad_start, aad_stop, cipher_start, cipher_stop, auth_insert;
1132 	u_int hmac_ctrl, input_len;
1133 	int dsgl_nsegs, dsgl_len;
1134 	int sgl_nsegs, sgl_len;
1135 	int error;
1136 
1137 	if (s->blkcipher.key_len == 0)
1138 		return (EINVAL);
1139 
1140 	/*
1141 	 * The crypto engine doesn't handle GCM requests with an empty
1142 	 * payload, so handle those in software instead.
1143 	 */
1144 	if (crde->crd_len == 0)
1145 		return (EMSGSIZE);
1146 
1147 	/*
1148 	 * AAD is only permitted before the cipher/plain text, not
1149 	 * after.
1150 	 */
1151 	if (crda->crd_len + crda->crd_skip > crde->crd_len + crde->crd_skip)
1152 		return (EMSGSIZE);
1153 
1154 	if (crda->crd_len + AES_BLOCK_LEN > MAX_AAD_LEN)
1155 		return (EMSGSIZE);
1156 
1157 	hash_size_in_response = s->gmac.hash_len;
1158 	if (crde->crd_flags & CRD_F_ENCRYPT)
1159 		op_type = CHCR_ENCRYPT_OP;
1160 	else
1161 		op_type = CHCR_DECRYPT_OP;
1162 
1163 	/*
1164 	 * The IV handling for GCM in OCF is a bit more complicated in
1165 	 * that IPSec provides a full 16-byte IV (including the
1166 	 * counter), whereas the /dev/crypto interface sometimes
1167 	 * provides a full 16-byte IV (if no IV is provided in the
1168 	 * ioctl) and sometimes a 12-byte IV (if the IV was explicit).
1169 	 *
1170 	 * When provided a 12-byte IV, assume the IV is really 16 bytes
1171 	 * with a counter in the last 4 bytes initialized to 1.
1172 	 *
1173 	 * While iv_len is checked below, the value is currently
1174 	 * always set to 12 when creating a GCM session in this driver
1175 	 * due to limitations in OCF (there is no way to know what the
1176 	 * IV length of a given request will be).  This means that the
1177 	 * driver always assumes as 12-byte IV for now.
1178 	 */
1179 	if (s->blkcipher.iv_len == 12)
1180 		iv_len = AES_BLOCK_LEN;
1181 	else
1182 		iv_len = s->blkcipher.iv_len;
1183 
1184 	/*
1185 	 * The output buffer consists of the cipher text followed by
1186 	 * the tag when encrypting.  For decryption it only contains
1187 	 * the plain text.
1188 	 *
1189 	 * Due to a firmware bug, the output buffer must include a
1190 	 * dummy output buffer for the IV and AAD prior to the real
1191 	 * output buffer.
1192 	 */
1193 	if (op_type == CHCR_ENCRYPT_OP) {
1194 		if (iv_len + crda->crd_len + crde->crd_len +
1195 		    hash_size_in_response > MAX_REQUEST_SIZE)
1196 			return (EFBIG);
1197 	} else {
1198 		if (iv_len + crda->crd_len + crde->crd_len > MAX_REQUEST_SIZE)
1199 			return (EFBIG);
1200 	}
1201 	sglist_reset(sc->sg_dsgl);
1202 	error = sglist_append_sglist(sc->sg_dsgl, sc->sg_iv_aad, 0, iv_len +
1203 	    crda->crd_len);
1204 	if (error)
1205 		return (error);
1206 	error = sglist_append_sglist(sc->sg_dsgl, sc->sg_crp, crde->crd_skip,
1207 	    crde->crd_len);
1208 	if (error)
1209 		return (error);
1210 	if (op_type == CHCR_ENCRYPT_OP) {
1211 		error = sglist_append_sglist(sc->sg_dsgl, sc->sg_crp,
1212 		    crda->crd_inject, hash_size_in_response);
1213 		if (error)
1214 			return (error);
1215 	}
1216 	dsgl_nsegs = ccr_count_sgl(sc->sg_dsgl, DSGL_SGE_MAXLEN);
1217 	if (dsgl_nsegs > MAX_RX_PHYS_DSGL_SGE)
1218 		return (EFBIG);
1219 	dsgl_len = ccr_phys_dsgl_len(dsgl_nsegs);
1220 
1221 	/*
1222 	 * The 'key' part of the key context consists of the key followed
1223 	 * by the Galois hash key.
1224 	 */
1225 	kctx_len = roundup2(s->blkcipher.key_len, 16) + GMAC_BLOCK_LEN;
1226 	transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dsgl_len);
1227 
1228 	/*
1229 	 * The input buffer consists of the IV, any AAD, and then the
1230 	 * cipher/plain text.  For decryption requests the hash is
1231 	 * appended after the cipher text.
1232 	 *
1233 	 * The IV is always stored at the start of the input buffer
1234 	 * even though it may be duplicated in the payload.  The
1235 	 * crypto engine doesn't work properly if the IV offset points
1236 	 * inside of the AAD region, so a second copy is always
1237 	 * required.
1238 	 */
1239 	input_len = crda->crd_len + crde->crd_len;
1240 	if (op_type == CHCR_DECRYPT_OP)
1241 		input_len += hash_size_in_response;
1242 	if (input_len > MAX_REQUEST_SIZE)
1243 		return (EFBIG);
1244 	if (ccr_use_imm_data(transhdr_len, iv_len + input_len)) {
1245 		imm_len = input_len;
1246 		sgl_nsegs = 0;
1247 		sgl_len = 0;
1248 	} else {
1249 		imm_len = 0;
1250 		sglist_reset(sc->sg_ulptx);
1251 		if (crda->crd_len != 0) {
1252 			error = sglist_append_sglist(sc->sg_ulptx, sc->sg_crp,
1253 			    crda->crd_skip, crda->crd_len);
1254 			if (error)
1255 				return (error);
1256 		}
1257 		error = sglist_append_sglist(sc->sg_ulptx, sc->sg_crp,
1258 		    crde->crd_skip, crde->crd_len);
1259 		if (error)
1260 			return (error);
1261 		if (op_type == CHCR_DECRYPT_OP) {
1262 			error = sglist_append_sglist(sc->sg_ulptx, sc->sg_crp,
1263 			    crda->crd_inject, hash_size_in_response);
1264 			if (error)
1265 				return (error);
1266 		}
1267 		sgl_nsegs = sc->sg_ulptx->sg_nseg;
1268 		sgl_len = ccr_ulptx_sgl_len(sgl_nsegs);
1269 	}
1270 
1271 	if (crda->crd_len != 0) {
1272 		aad_start = iv_len + 1;
1273 		aad_stop = aad_start + crda->crd_len - 1;
1274 	} else {
1275 		aad_start = 0;
1276 		aad_stop = 0;
1277 	}
1278 	cipher_start = iv_len + crda->crd_len + 1;
1279 	if (op_type == CHCR_DECRYPT_OP)
1280 		cipher_stop = hash_size_in_response;
1281 	else
1282 		cipher_stop = 0;
1283 	if (op_type == CHCR_DECRYPT_OP)
1284 		auth_insert = hash_size_in_response;
1285 	else
1286 		auth_insert = 0;
1287 
1288 	wr_len = roundup2(transhdr_len, 16) + iv_len + roundup2(imm_len, 16) +
1289 	    sgl_len;
1290 	if (wr_len > SGE_MAX_WR_LEN)
1291 		return (EFBIG);
1292 	wr = alloc_wrqe(wr_len, sc->txq);
1293 	if (wr == NULL) {
1294 		sc->stats_wr_nomem++;
1295 		return (ENOMEM);
1296 	}
1297 	crwr = wrtod(wr);
1298 	memset(crwr, 0, wr_len);
1299 
1300 	/*
1301 	 * Read the existing IV from the request or generate a random
1302 	 * one if none is provided.  Optionally copy the generated IV
1303 	 * into the output buffer if requested.
1304 	 *
1305 	 * If the input IV is 12 bytes, append an explicit 4-byte
1306 	 * counter of 1.
1307 	 */
1308 	if (op_type == CHCR_ENCRYPT_OP) {
1309 		if (crde->crd_flags & CRD_F_IV_EXPLICIT)
1310 			memcpy(iv, crde->crd_iv, s->blkcipher.iv_len);
1311 		else
1312 			arc4rand(iv, s->blkcipher.iv_len, 0);
1313 		if ((crde->crd_flags & CRD_F_IV_PRESENT) == 0)
1314 			crypto_copyback(crp->crp_flags, crp->crp_buf,
1315 			    crde->crd_inject, s->blkcipher.iv_len, iv);
1316 	} else {
1317 		if (crde->crd_flags & CRD_F_IV_EXPLICIT)
1318 			memcpy(iv, crde->crd_iv, s->blkcipher.iv_len);
1319 		else
1320 			crypto_copydata(crp->crp_flags, crp->crp_buf,
1321 			    crde->crd_inject, s->blkcipher.iv_len, iv);
1322 	}
1323 	if (s->blkcipher.iv_len == 12)
1324 		*(uint32_t *)&iv[12] = htobe32(1);
1325 
1326 	ccr_populate_wreq(sc, crwr, kctx_len, wr_len, imm_len, sgl_len, 0,
1327 	    crp);
1328 
1329 	/* XXX: Hardcodes SGE loopback channel of 0. */
1330 	crwr->sec_cpl.op_ivinsrtofst = htobe32(
1331 	    V_CPL_TX_SEC_PDU_OPCODE(CPL_TX_SEC_PDU) |
1332 	    V_CPL_TX_SEC_PDU_RXCHID(sc->tx_channel_id) |
1333 	    V_CPL_TX_SEC_PDU_ACKFOLLOWS(0) | V_CPL_TX_SEC_PDU_ULPTXLPBK(1) |
1334 	    V_CPL_TX_SEC_PDU_CPLLEN(2) | V_CPL_TX_SEC_PDU_PLACEHOLDER(0) |
1335 	    V_CPL_TX_SEC_PDU_IVINSRTOFST(1));
1336 
1337 	crwr->sec_cpl.pldlen = htobe32(iv_len + input_len);
1338 
1339 	/*
1340 	 * NB: cipherstop is explicitly set to 0.  On encrypt it
1341 	 * should normally be set to 0 anyway (as the encrypt crd ends
1342 	 * at the end of the input).  However, for decrypt the cipher
1343 	 * ends before the tag in the AUTHENC case (and authstop is
1344 	 * set to stop before the tag), but for GCM the cipher still
1345 	 * runs to the end of the buffer.  Not sure if this is
1346 	 * intentional or a firmware quirk, but it is required for
1347 	 * working tag validation with GCM decryption.
1348 	 */
1349 	crwr->sec_cpl.aadstart_cipherstop_hi = htobe32(
1350 	    V_CPL_TX_SEC_PDU_AADSTART(aad_start) |
1351 	    V_CPL_TX_SEC_PDU_AADSTOP(aad_stop) |
1352 	    V_CPL_TX_SEC_PDU_CIPHERSTART(cipher_start) |
1353 	    V_CPL_TX_SEC_PDU_CIPHERSTOP_HI(0));
1354 	crwr->sec_cpl.cipherstop_lo_authinsert = htobe32(
1355 	    V_CPL_TX_SEC_PDU_CIPHERSTOP_LO(0) |
1356 	    V_CPL_TX_SEC_PDU_AUTHSTART(cipher_start) |
1357 	    V_CPL_TX_SEC_PDU_AUTHSTOP(cipher_stop) |
1358 	    V_CPL_TX_SEC_PDU_AUTHINSERT(auth_insert));
1359 
1360 	/* These two flits are actually a CPL_TLS_TX_SCMD_FMT. */
1361 	hmac_ctrl = ccr_hmac_ctrl(AES_GMAC_HASH_LEN, hash_size_in_response);
1362 	crwr->sec_cpl.seqno_numivs = htobe32(
1363 	    V_SCMD_SEQ_NO_CTRL(0) |
1364 	    V_SCMD_PROTO_VERSION(SCMD_PROTO_VERSION_GENERIC) |
1365 	    V_SCMD_ENC_DEC_CTRL(op_type) |
1366 	    V_SCMD_CIPH_AUTH_SEQ_CTRL(op_type == CHCR_ENCRYPT_OP ? 1 : 0) |
1367 	    V_SCMD_CIPH_MODE(SCMD_CIPH_MODE_AES_GCM) |
1368 	    V_SCMD_AUTH_MODE(SCMD_AUTH_MODE_GHASH) |
1369 	    V_SCMD_HMAC_CTRL(hmac_ctrl) |
1370 	    V_SCMD_IV_SIZE(iv_len / 2) |
1371 	    V_SCMD_NUM_IVS(0));
1372 	crwr->sec_cpl.ivgen_hdrlen = htobe32(
1373 	    V_SCMD_IV_GEN_CTRL(0) |
1374 	    V_SCMD_MORE_FRAGS(0) | V_SCMD_LAST_FRAG(0) | V_SCMD_MAC_ONLY(0) |
1375 	    V_SCMD_AADIVDROP(0) | V_SCMD_HDR_LEN(dsgl_len));
1376 
1377 	crwr->key_ctx.ctx_hdr = s->blkcipher.key_ctx_hdr;
1378 	memcpy(crwr->key_ctx.key, s->blkcipher.enckey, s->blkcipher.key_len);
1379 	dst = crwr->key_ctx.key + roundup2(s->blkcipher.key_len, 16);
1380 	memcpy(dst, s->gmac.ghash_h, GMAC_BLOCK_LEN);
1381 
1382 	dst = (char *)(crwr + 1) + kctx_len;
1383 	ccr_write_phys_dsgl(sc, dst, dsgl_nsegs);
1384 	dst += sizeof(struct cpl_rx_phys_dsgl) + dsgl_len;
1385 	memcpy(dst, iv, iv_len);
1386 	dst += iv_len;
1387 	if (imm_len != 0) {
1388 		if (crda->crd_len != 0) {
1389 			crypto_copydata(crp->crp_flags, crp->crp_buf,
1390 			    crda->crd_skip, crda->crd_len, dst);
1391 			dst += crda->crd_len;
1392 		}
1393 		crypto_copydata(crp->crp_flags, crp->crp_buf, crde->crd_skip,
1394 		    crde->crd_len, dst);
1395 		dst += crde->crd_len;
1396 		if (op_type == CHCR_DECRYPT_OP)
1397 			crypto_copydata(crp->crp_flags, crp->crp_buf,
1398 			    crda->crd_inject, hash_size_in_response, dst);
1399 	} else
1400 		ccr_write_ulptx_sgl(sc, dst, sgl_nsegs);
1401 
1402 	/* XXX: TODO backpressure */
1403 	t4_wrq_tx(sc->adapter, wr);
1404 
1405 	return (0);
1406 }
1407 
1408 static int
1409 ccr_gcm_done(struct ccr_softc *sc, struct ccr_session *s,
1410     struct cryptop *crp, const struct cpl_fw6_pld *cpl, int error)
1411 {
1412 
1413 	/*
1414 	 * The updated IV to permit chained requests is at
1415 	 * cpl->data[2], but OCF doesn't permit chained requests.
1416 	 *
1417 	 * Note that the hardware should always verify the GMAC hash.
1418 	 */
1419 	return (error);
1420 }
1421 
1422 /*
1423  * Handle a GCM request that is not supported by the crypto engine by
1424  * performing the operation in software.  Derived from swcr_authenc().
1425  */
1426 static void
1427 ccr_gcm_soft(struct ccr_session *s, struct cryptop *crp,
1428     struct cryptodesc *crda, struct cryptodesc *crde)
1429 {
1430 	struct auth_hash *axf;
1431 	struct enc_xform *exf;
1432 	void *auth_ctx;
1433 	uint8_t *kschedule;
1434 	char block[GMAC_BLOCK_LEN];
1435 	char digest[GMAC_DIGEST_LEN];
1436 	char iv[AES_BLOCK_LEN];
1437 	int error, i, len;
1438 
1439 	auth_ctx = NULL;
1440 	kschedule = NULL;
1441 
1442 	/* Initialize the MAC. */
1443 	switch (s->blkcipher.key_len) {
1444 	case 16:
1445 		axf = &auth_hash_nist_gmac_aes_128;
1446 		break;
1447 	case 24:
1448 		axf = &auth_hash_nist_gmac_aes_192;
1449 		break;
1450 	case 32:
1451 		axf = &auth_hash_nist_gmac_aes_256;
1452 		break;
1453 	default:
1454 		error = EINVAL;
1455 		goto out;
1456 	}
1457 	auth_ctx = malloc(axf->ctxsize, M_CCR, M_NOWAIT);
1458 	if (auth_ctx == NULL) {
1459 		error = ENOMEM;
1460 		goto out;
1461 	}
1462 	axf->Init(auth_ctx);
1463 	axf->Setkey(auth_ctx, s->blkcipher.enckey, s->blkcipher.key_len);
1464 
1465 	/* Initialize the cipher. */
1466 	exf = &enc_xform_aes_nist_gcm;
1467 	error = exf->setkey(&kschedule, s->blkcipher.enckey,
1468 	    s->blkcipher.key_len);
1469 	if (error)
1470 		goto out;
1471 
1472 	/*
1473 	 * This assumes a 12-byte IV from the crp.  See longer comment
1474 	 * above in ccr_gcm() for more details.
1475 	 */
1476 	if (crde->crd_flags & CRD_F_ENCRYPT) {
1477 		if (crde->crd_flags & CRD_F_IV_EXPLICIT)
1478 			memcpy(iv, crde->crd_iv, 12);
1479 		else
1480 			arc4rand(iv, 12, 0);
1481 		if ((crde->crd_flags & CRD_F_IV_PRESENT) == 0)
1482 			crypto_copyback(crp->crp_flags, crp->crp_buf,
1483 			    crde->crd_inject, 12, iv);
1484 	} else {
1485 		if (crde->crd_flags & CRD_F_IV_EXPLICIT)
1486 			memcpy(iv, crde->crd_iv, 12);
1487 		else
1488 			crypto_copydata(crp->crp_flags, crp->crp_buf,
1489 			    crde->crd_inject, 12, iv);
1490 	}
1491 	*(uint32_t *)&iv[12] = htobe32(1);
1492 
1493 	axf->Reinit(auth_ctx, iv, sizeof(iv));
1494 
1495 	/* MAC the AAD. */
1496 	for (i = 0; i < crda->crd_len; i += sizeof(block)) {
1497 		len = imin(crda->crd_len - i, sizeof(block));
1498 		crypto_copydata(crp->crp_flags, crp->crp_buf, crda->crd_skip +
1499 		    i, len, block);
1500 		bzero(block + len, sizeof(block) - len);
1501 		axf->Update(auth_ctx, block, sizeof(block));
1502 	}
1503 
1504 	exf->reinit(kschedule, iv);
1505 
1506 	/* Do encryption with MAC */
1507 	for (i = 0; i < crde->crd_len; i += sizeof(block)) {
1508 		len = imin(crde->crd_len - i, sizeof(block));
1509 		crypto_copydata(crp->crp_flags, crp->crp_buf, crde->crd_skip +
1510 		    i, len, block);
1511 		bzero(block + len, sizeof(block) - len);
1512 		if (crde->crd_flags & CRD_F_ENCRYPT) {
1513 			exf->encrypt(kschedule, block);
1514 			axf->Update(auth_ctx, block, len);
1515 			crypto_copyback(crp->crp_flags, crp->crp_buf,
1516 			    crde->crd_skip + i, len, block);
1517 		} else {
1518 			axf->Update(auth_ctx, block, len);
1519 		}
1520 	}
1521 
1522 	/* Length block. */
1523 	bzero(block, sizeof(block));
1524 	((uint32_t *)block)[1] = htobe32(crda->crd_len * 8);
1525 	((uint32_t *)block)[3] = htobe32(crde->crd_len * 8);
1526 	axf->Update(auth_ctx, block, sizeof(block));
1527 
1528 	/* Finalize MAC. */
1529 	axf->Final(digest, auth_ctx);
1530 
1531 	/* Inject or validate tag. */
1532 	if (crde->crd_flags & CRD_F_ENCRYPT) {
1533 		crypto_copyback(crp->crp_flags, crp->crp_buf, crda->crd_inject,
1534 		    sizeof(digest), digest);
1535 		error = 0;
1536 	} else {
1537 		char digest2[GMAC_DIGEST_LEN];
1538 
1539 		crypto_copydata(crp->crp_flags, crp->crp_buf, crda->crd_inject,
1540 		    sizeof(digest2), digest2);
1541 		if (timingsafe_bcmp(digest, digest2, sizeof(digest)) == 0) {
1542 			error = 0;
1543 
1544 			/* Tag matches, decrypt data. */
1545 			for (i = 0; i < crde->crd_len; i += sizeof(block)) {
1546 				len = imin(crde->crd_len - i, sizeof(block));
1547 				crypto_copydata(crp->crp_flags, crp->crp_buf,
1548 				    crde->crd_skip + i, len, block);
1549 				bzero(block + len, sizeof(block) - len);
1550 				exf->decrypt(kschedule, block);
1551 				crypto_copyback(crp->crp_flags, crp->crp_buf,
1552 				    crde->crd_skip + i, len, block);
1553 			}
1554 		} else
1555 			error = EBADMSG;
1556 	}
1557 
1558 	exf->zerokey(&kschedule);
1559 out:
1560 	if (auth_ctx != NULL) {
1561 		memset(auth_ctx, 0, axf->ctxsize);
1562 		free(auth_ctx, M_CCR);
1563 	}
1564 	crp->crp_etype = error;
1565 	crypto_done(crp);
1566 }
1567 
1568 static void
1569 ccr_identify(driver_t *driver, device_t parent)
1570 {
1571 	struct adapter *sc;
1572 
1573 	sc = device_get_softc(parent);
1574 	if (sc->cryptocaps & FW_CAPS_CONFIG_CRYPTO_LOOKASIDE &&
1575 	    device_find_child(parent, "ccr", -1) == NULL)
1576 		device_add_child(parent, "ccr", -1);
1577 }
1578 
1579 static int
1580 ccr_probe(device_t dev)
1581 {
1582 
1583 	device_set_desc(dev, "Chelsio Crypto Accelerator");
1584 	return (BUS_PROBE_DEFAULT);
1585 }
1586 
1587 static void
1588 ccr_sysctls(struct ccr_softc *sc)
1589 {
1590 	struct sysctl_ctx_list *ctx;
1591 	struct sysctl_oid *oid;
1592 	struct sysctl_oid_list *children;
1593 
1594 	ctx = device_get_sysctl_ctx(sc->dev);
1595 
1596 	/*
1597 	 * dev.ccr.X.
1598 	 */
1599 	oid = device_get_sysctl_tree(sc->dev);
1600 	children = SYSCTL_CHILDREN(oid);
1601 
1602 	/*
1603 	 * dev.ccr.X.stats.
1604 	 */
1605 	oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats", CTLFLAG_RD,
1606 	    NULL, "statistics");
1607 	children = SYSCTL_CHILDREN(oid);
1608 
1609 	SYSCTL_ADD_U64(ctx, children, OID_AUTO, "hash", CTLFLAG_RD,
1610 	    &sc->stats_hash, 0, "Hash requests submitted");
1611 	SYSCTL_ADD_U64(ctx, children, OID_AUTO, "hmac", CTLFLAG_RD,
1612 	    &sc->stats_hmac, 0, "HMAC requests submitted");
1613 	SYSCTL_ADD_U64(ctx, children, OID_AUTO, "cipher_encrypt", CTLFLAG_RD,
1614 	    &sc->stats_blkcipher_encrypt, 0,
1615 	    "Cipher encryption requests submitted");
1616 	SYSCTL_ADD_U64(ctx, children, OID_AUTO, "cipher_decrypt", CTLFLAG_RD,
1617 	    &sc->stats_blkcipher_decrypt, 0,
1618 	    "Cipher decryption requests submitted");
1619 	SYSCTL_ADD_U64(ctx, children, OID_AUTO, "authenc_encrypt", CTLFLAG_RD,
1620 	    &sc->stats_authenc_encrypt, 0,
1621 	    "Combined AES+HMAC encryption requests submitted");
1622 	SYSCTL_ADD_U64(ctx, children, OID_AUTO, "authenc_decrypt", CTLFLAG_RD,
1623 	    &sc->stats_authenc_decrypt, 0,
1624 	    "Combined AES+HMAC decryption requests submitted");
1625 	SYSCTL_ADD_U64(ctx, children, OID_AUTO, "gcm_encrypt", CTLFLAG_RD,
1626 	    &sc->stats_gcm_encrypt, 0, "AES-GCM encryption requests submitted");
1627 	SYSCTL_ADD_U64(ctx, children, OID_AUTO, "gcm_decrypt", CTLFLAG_RD,
1628 	    &sc->stats_gcm_decrypt, 0, "AES-GCM decryption requests submitted");
1629 	SYSCTL_ADD_U64(ctx, children, OID_AUTO, "wr_nomem", CTLFLAG_RD,
1630 	    &sc->stats_wr_nomem, 0, "Work request memory allocation failures");
1631 	SYSCTL_ADD_U64(ctx, children, OID_AUTO, "inflight", CTLFLAG_RD,
1632 	    &sc->stats_inflight, 0, "Requests currently pending");
1633 	SYSCTL_ADD_U64(ctx, children, OID_AUTO, "mac_error", CTLFLAG_RD,
1634 	    &sc->stats_mac_error, 0, "MAC errors");
1635 	SYSCTL_ADD_U64(ctx, children, OID_AUTO, "pad_error", CTLFLAG_RD,
1636 	    &sc->stats_pad_error, 0, "Padding errors");
1637 	SYSCTL_ADD_U64(ctx, children, OID_AUTO, "bad_session", CTLFLAG_RD,
1638 	    &sc->stats_bad_session, 0, "Requests with invalid session ID");
1639 	SYSCTL_ADD_U64(ctx, children, OID_AUTO, "sglist_error", CTLFLAG_RD,
1640 	    &sc->stats_sglist_error, 0,
1641 	    "Requests for which DMA mapping failed");
1642 	SYSCTL_ADD_U64(ctx, children, OID_AUTO, "process_error", CTLFLAG_RD,
1643 	    &sc->stats_process_error, 0, "Requests failed during queueing");
1644 	SYSCTL_ADD_U64(ctx, children, OID_AUTO, "sw_fallback", CTLFLAG_RD,
1645 	    &sc->stats_sw_fallback, 0,
1646 	    "Requests processed by falling back to software");
1647 }
1648 
1649 static int
1650 ccr_attach(device_t dev)
1651 {
1652 	struct ccr_softc *sc;
1653 	int32_t cid;
1654 
1655 	sc = device_get_softc(dev);
1656 	sc->dev = dev;
1657 	sc->adapter = device_get_softc(device_get_parent(dev));
1658 	sc->txq = &sc->adapter->sge.ctrlq[0];
1659 	sc->rxq = &sc->adapter->sge.rxq[0];
1660 	cid = crypto_get_driverid(dev, sizeof(struct ccr_session),
1661 	    CRYPTOCAP_F_HARDWARE);
1662 	if (cid < 0) {
1663 		device_printf(dev, "could not get crypto driver id\n");
1664 		return (ENXIO);
1665 	}
1666 	sc->cid = cid;
1667 	sc->adapter->ccr_softc = sc;
1668 
1669 	/* XXX: TODO? */
1670 	sc->tx_channel_id = 0;
1671 
1672 	mtx_init(&sc->lock, "ccr", NULL, MTX_DEF);
1673 	sc->sg_crp = sglist_alloc(TX_SGL_SEGS, M_WAITOK);
1674 	sc->sg_ulptx = sglist_alloc(TX_SGL_SEGS, M_WAITOK);
1675 	sc->sg_dsgl = sglist_alloc(MAX_RX_PHYS_DSGL_SGE, M_WAITOK);
1676 	sc->iv_aad_buf = malloc(MAX_AAD_LEN, M_CCR, M_WAITOK);
1677 	sc->sg_iv_aad = sglist_build(sc->iv_aad_buf, MAX_AAD_LEN, M_WAITOK);
1678 	ccr_sysctls(sc);
1679 
1680 	crypto_register(cid, CRYPTO_SHA1, 0, 0);
1681 	crypto_register(cid, CRYPTO_SHA2_224, 0, 0);
1682 	crypto_register(cid, CRYPTO_SHA2_256, 0, 0);
1683 	crypto_register(cid, CRYPTO_SHA2_384, 0, 0);
1684 	crypto_register(cid, CRYPTO_SHA2_512, 0, 0);
1685 	crypto_register(cid, CRYPTO_SHA1_HMAC, 0, 0);
1686 	crypto_register(cid, CRYPTO_SHA2_224_HMAC, 0, 0);
1687 	crypto_register(cid, CRYPTO_SHA2_256_HMAC, 0, 0);
1688 	crypto_register(cid, CRYPTO_SHA2_384_HMAC, 0, 0);
1689 	crypto_register(cid, CRYPTO_SHA2_512_HMAC, 0, 0);
1690 	crypto_register(cid, CRYPTO_AES_CBC, 0, 0);
1691 	crypto_register(cid, CRYPTO_AES_ICM, 0, 0);
1692 	crypto_register(cid, CRYPTO_AES_NIST_GCM_16, 0, 0);
1693 	crypto_register(cid, CRYPTO_AES_128_NIST_GMAC, 0, 0);
1694 	crypto_register(cid, CRYPTO_AES_192_NIST_GMAC, 0, 0);
1695 	crypto_register(cid, CRYPTO_AES_256_NIST_GMAC, 0, 0);
1696 	crypto_register(cid, CRYPTO_AES_XTS, 0, 0);
1697 	return (0);
1698 }
1699 
1700 static int
1701 ccr_detach(device_t dev)
1702 {
1703 	struct ccr_softc *sc;
1704 
1705 	sc = device_get_softc(dev);
1706 
1707 	mtx_lock(&sc->lock);
1708 	sc->detaching = true;
1709 	mtx_unlock(&sc->lock);
1710 
1711 	crypto_unregister_all(sc->cid);
1712 
1713 	mtx_destroy(&sc->lock);
1714 	sglist_free(sc->sg_iv_aad);
1715 	free(sc->iv_aad_buf, M_CCR);
1716 	sglist_free(sc->sg_dsgl);
1717 	sglist_free(sc->sg_ulptx);
1718 	sglist_free(sc->sg_crp);
1719 	sc->adapter->ccr_softc = NULL;
1720 	return (0);
1721 }
1722 
1723 static void
1724 ccr_copy_partial_hash(void *dst, int cri_alg, union authctx *auth_ctx)
1725 {
1726 	uint32_t *u32;
1727 	uint64_t *u64;
1728 	u_int i;
1729 
1730 	u32 = (uint32_t *)dst;
1731 	u64 = (uint64_t *)dst;
1732 	switch (cri_alg) {
1733 	case CRYPTO_SHA1:
1734 	case CRYPTO_SHA1_HMAC:
1735 		for (i = 0; i < SHA1_HASH_LEN / 4; i++)
1736 			u32[i] = htobe32(auth_ctx->sha1ctx.h.b32[i]);
1737 		break;
1738 	case CRYPTO_SHA2_224:
1739 	case CRYPTO_SHA2_224_HMAC:
1740 		for (i = 0; i < SHA2_256_HASH_LEN / 4; i++)
1741 			u32[i] = htobe32(auth_ctx->sha224ctx.state[i]);
1742 		break;
1743 	case CRYPTO_SHA2_256:
1744 	case CRYPTO_SHA2_256_HMAC:
1745 		for (i = 0; i < SHA2_256_HASH_LEN / 4; i++)
1746 			u32[i] = htobe32(auth_ctx->sha256ctx.state[i]);
1747 		break;
1748 	case CRYPTO_SHA2_384:
1749 	case CRYPTO_SHA2_384_HMAC:
1750 		for (i = 0; i < SHA2_512_HASH_LEN / 8; i++)
1751 			u64[i] = htobe64(auth_ctx->sha384ctx.state[i]);
1752 		break;
1753 	case CRYPTO_SHA2_512:
1754 	case CRYPTO_SHA2_512_HMAC:
1755 		for (i = 0; i < SHA2_512_HASH_LEN / 8; i++)
1756 			u64[i] = htobe64(auth_ctx->sha512ctx.state[i]);
1757 		break;
1758 	}
1759 }
1760 
1761 static void
1762 ccr_init_hash_digest(struct ccr_session *s, int cri_alg)
1763 {
1764 	union authctx auth_ctx;
1765 	struct auth_hash *axf;
1766 
1767 	axf = s->hmac.auth_hash;
1768 	axf->Init(&auth_ctx);
1769 	ccr_copy_partial_hash(s->hmac.ipad, cri_alg, &auth_ctx);
1770 }
1771 
1772 static void
1773 ccr_init_hmac_digest(struct ccr_session *s, int cri_alg, char *key,
1774     int klen)
1775 {
1776 	union authctx auth_ctx;
1777 	struct auth_hash *axf;
1778 	u_int i;
1779 
1780 	/*
1781 	 * If the key is larger than the block size, use the digest of
1782 	 * the key as the key instead.
1783 	 */
1784 	axf = s->hmac.auth_hash;
1785 	klen /= 8;
1786 	if (klen > axf->blocksize) {
1787 		axf->Init(&auth_ctx);
1788 		axf->Update(&auth_ctx, key, klen);
1789 		axf->Final(s->hmac.ipad, &auth_ctx);
1790 		klen = axf->hashsize;
1791 	} else
1792 		memcpy(s->hmac.ipad, key, klen);
1793 
1794 	memset(s->hmac.ipad + klen, 0, axf->blocksize - klen);
1795 	memcpy(s->hmac.opad, s->hmac.ipad, axf->blocksize);
1796 
1797 	for (i = 0; i < axf->blocksize; i++) {
1798 		s->hmac.ipad[i] ^= HMAC_IPAD_VAL;
1799 		s->hmac.opad[i] ^= HMAC_OPAD_VAL;
1800 	}
1801 
1802 	/*
1803 	 * Hash the raw ipad and opad and store the partial result in
1804 	 * the same buffer.
1805 	 */
1806 	axf->Init(&auth_ctx);
1807 	axf->Update(&auth_ctx, s->hmac.ipad, axf->blocksize);
1808 	ccr_copy_partial_hash(s->hmac.ipad, cri_alg, &auth_ctx);
1809 
1810 	axf->Init(&auth_ctx);
1811 	axf->Update(&auth_ctx, s->hmac.opad, axf->blocksize);
1812 	ccr_copy_partial_hash(s->hmac.opad, cri_alg, &auth_ctx);
1813 }
1814 
1815 /*
1816  * Borrowed from AES_GMAC_Setkey().
1817  */
1818 static void
1819 ccr_init_gmac_hash(struct ccr_session *s, char *key, int klen)
1820 {
1821 	static char zeroes[GMAC_BLOCK_LEN];
1822 	uint32_t keysched[4 * (RIJNDAEL_MAXNR + 1)];
1823 	int rounds;
1824 
1825 	rounds = rijndaelKeySetupEnc(keysched, key, klen);
1826 	rijndaelEncrypt(keysched, rounds, zeroes, s->gmac.ghash_h);
1827 }
1828 
1829 static int
1830 ccr_aes_check_keylen(int alg, int klen)
1831 {
1832 
1833 	switch (klen) {
1834 	case 128:
1835 	case 192:
1836 		if (alg == CRYPTO_AES_XTS)
1837 			return (EINVAL);
1838 		break;
1839 	case 256:
1840 		break;
1841 	case 512:
1842 		if (alg != CRYPTO_AES_XTS)
1843 			return (EINVAL);
1844 		break;
1845 	default:
1846 		return (EINVAL);
1847 	}
1848 	return (0);
1849 }
1850 
1851 static void
1852 ccr_aes_setkey(struct ccr_session *s, int alg, const void *key, int klen)
1853 {
1854 	unsigned int ck_size, iopad_size, kctx_flits, kctx_len, kbits, mk_size;
1855 	unsigned int opad_present;
1856 
1857 	if (alg == CRYPTO_AES_XTS)
1858 		kbits = klen / 2;
1859 	else
1860 		kbits = klen;
1861 	switch (kbits) {
1862 	case 128:
1863 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
1864 		break;
1865 	case 192:
1866 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
1867 		break;
1868 	case 256:
1869 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
1870 		break;
1871 	default:
1872 		panic("should not get here");
1873 	}
1874 
1875 	s->blkcipher.key_len = klen / 8;
1876 	memcpy(s->blkcipher.enckey, key, s->blkcipher.key_len);
1877 	switch (alg) {
1878 	case CRYPTO_AES_CBC:
1879 	case CRYPTO_AES_XTS:
1880 		t4_aes_getdeckey(s->blkcipher.deckey, key, kbits);
1881 		break;
1882 	}
1883 
1884 	kctx_len = roundup2(s->blkcipher.key_len, 16);
1885 	switch (s->mode) {
1886 	case AUTHENC:
1887 		mk_size = s->hmac.mk_size;
1888 		opad_present = 1;
1889 		iopad_size = roundup2(s->hmac.partial_digest_len, 16);
1890 		kctx_len += iopad_size * 2;
1891 		break;
1892 	case GCM:
1893 		mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_128;
1894 		opad_present = 0;
1895 		kctx_len += GMAC_BLOCK_LEN;
1896 		break;
1897 	default:
1898 		mk_size = CHCR_KEYCTX_NO_KEY;
1899 		opad_present = 0;
1900 		break;
1901 	}
1902 	kctx_flits = (sizeof(struct _key_ctx) + kctx_len) / 16;
1903 	s->blkcipher.key_ctx_hdr = htobe32(V_KEY_CONTEXT_CTX_LEN(kctx_flits) |
1904 	    V_KEY_CONTEXT_DUAL_CK(alg == CRYPTO_AES_XTS) |
1905 	    V_KEY_CONTEXT_OPAD_PRESENT(opad_present) |
1906 	    V_KEY_CONTEXT_SALT_PRESENT(1) | V_KEY_CONTEXT_CK_SIZE(ck_size) |
1907 	    V_KEY_CONTEXT_MK_SIZE(mk_size) | V_KEY_CONTEXT_VALID(1));
1908 }
1909 
1910 static int
1911 ccr_newsession(device_t dev, crypto_session_t cses, struct cryptoini *cri)
1912 {
1913 	struct ccr_softc *sc;
1914 	struct ccr_session *s;
1915 	struct auth_hash *auth_hash;
1916 	struct cryptoini *c, *hash, *cipher;
1917 	unsigned int auth_mode, cipher_mode, iv_len, mk_size;
1918 	unsigned int partial_digest_len;
1919 	int error;
1920 	bool gcm_hash, hmac;
1921 
1922 	if (cri == NULL)
1923 		return (EINVAL);
1924 
1925 	gcm_hash = false;
1926 	hmac = false;
1927 	cipher = NULL;
1928 	hash = NULL;
1929 	auth_hash = NULL;
1930 	auth_mode = SCMD_AUTH_MODE_NOP;
1931 	cipher_mode = SCMD_CIPH_MODE_NOP;
1932 	iv_len = 0;
1933 	mk_size = 0;
1934 	partial_digest_len = 0;
1935 	for (c = cri; c != NULL; c = c->cri_next) {
1936 		switch (c->cri_alg) {
1937 		case CRYPTO_SHA1:
1938 		case CRYPTO_SHA2_224:
1939 		case CRYPTO_SHA2_256:
1940 		case CRYPTO_SHA2_384:
1941 		case CRYPTO_SHA2_512:
1942 		case CRYPTO_SHA1_HMAC:
1943 		case CRYPTO_SHA2_224_HMAC:
1944 		case CRYPTO_SHA2_256_HMAC:
1945 		case CRYPTO_SHA2_384_HMAC:
1946 		case CRYPTO_SHA2_512_HMAC:
1947 		case CRYPTO_AES_128_NIST_GMAC:
1948 		case CRYPTO_AES_192_NIST_GMAC:
1949 		case CRYPTO_AES_256_NIST_GMAC:
1950 			if (hash)
1951 				return (EINVAL);
1952 			hash = c;
1953 			switch (c->cri_alg) {
1954 			case CRYPTO_SHA1:
1955 			case CRYPTO_SHA1_HMAC:
1956 				auth_hash = &auth_hash_hmac_sha1;
1957 				auth_mode = SCMD_AUTH_MODE_SHA1;
1958 				mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_160;
1959 				partial_digest_len = SHA1_HASH_LEN;
1960 				break;
1961 			case CRYPTO_SHA2_224:
1962 			case CRYPTO_SHA2_224_HMAC:
1963 				auth_hash = &auth_hash_hmac_sha2_224;
1964 				auth_mode = SCMD_AUTH_MODE_SHA224;
1965 				mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
1966 				partial_digest_len = SHA2_256_HASH_LEN;
1967 				break;
1968 			case CRYPTO_SHA2_256:
1969 			case CRYPTO_SHA2_256_HMAC:
1970 				auth_hash = &auth_hash_hmac_sha2_256;
1971 				auth_mode = SCMD_AUTH_MODE_SHA256;
1972 				mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
1973 				partial_digest_len = SHA2_256_HASH_LEN;
1974 				break;
1975 			case CRYPTO_SHA2_384:
1976 			case CRYPTO_SHA2_384_HMAC:
1977 				auth_hash = &auth_hash_hmac_sha2_384;
1978 				auth_mode = SCMD_AUTH_MODE_SHA512_384;
1979 				mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512;
1980 				partial_digest_len = SHA2_512_HASH_LEN;
1981 				break;
1982 			case CRYPTO_SHA2_512:
1983 			case CRYPTO_SHA2_512_HMAC:
1984 				auth_hash = &auth_hash_hmac_sha2_512;
1985 				auth_mode = SCMD_AUTH_MODE_SHA512_512;
1986 				mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512;
1987 				partial_digest_len = SHA2_512_HASH_LEN;
1988 				break;
1989 			case CRYPTO_AES_128_NIST_GMAC:
1990 			case CRYPTO_AES_192_NIST_GMAC:
1991 			case CRYPTO_AES_256_NIST_GMAC:
1992 				gcm_hash = true;
1993 				auth_mode = SCMD_AUTH_MODE_GHASH;
1994 				mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_128;
1995 				break;
1996 			}
1997 			switch (c->cri_alg) {
1998 			case CRYPTO_SHA1_HMAC:
1999 			case CRYPTO_SHA2_224_HMAC:
2000 			case CRYPTO_SHA2_256_HMAC:
2001 			case CRYPTO_SHA2_384_HMAC:
2002 			case CRYPTO_SHA2_512_HMAC:
2003 				hmac = true;
2004 				break;
2005 			}
2006 			break;
2007 		case CRYPTO_AES_CBC:
2008 		case CRYPTO_AES_ICM:
2009 		case CRYPTO_AES_NIST_GCM_16:
2010 		case CRYPTO_AES_XTS:
2011 			if (cipher)
2012 				return (EINVAL);
2013 			cipher = c;
2014 			switch (c->cri_alg) {
2015 			case CRYPTO_AES_CBC:
2016 				cipher_mode = SCMD_CIPH_MODE_AES_CBC;
2017 				iv_len = AES_BLOCK_LEN;
2018 				break;
2019 			case CRYPTO_AES_ICM:
2020 				cipher_mode = SCMD_CIPH_MODE_AES_CTR;
2021 				iv_len = AES_BLOCK_LEN;
2022 				break;
2023 			case CRYPTO_AES_NIST_GCM_16:
2024 				cipher_mode = SCMD_CIPH_MODE_AES_GCM;
2025 				iv_len = AES_GCM_IV_LEN;
2026 				break;
2027 			case CRYPTO_AES_XTS:
2028 				cipher_mode = SCMD_CIPH_MODE_AES_XTS;
2029 				iv_len = AES_BLOCK_LEN;
2030 				break;
2031 			}
2032 			if (c->cri_key != NULL) {
2033 				error = ccr_aes_check_keylen(c->cri_alg,
2034 				    c->cri_klen);
2035 				if (error)
2036 					return (error);
2037 			}
2038 			break;
2039 		default:
2040 			return (EINVAL);
2041 		}
2042 	}
2043 	if (gcm_hash != (cipher_mode == SCMD_CIPH_MODE_AES_GCM))
2044 		return (EINVAL);
2045 	if (hash == NULL && cipher == NULL)
2046 		return (EINVAL);
2047 	if (hash != NULL) {
2048 		if ((hmac || gcm_hash) && hash->cri_key == NULL)
2049 			return (EINVAL);
2050 		if (!(hmac || gcm_hash) && hash->cri_key != NULL)
2051 			return (EINVAL);
2052 	}
2053 
2054 	sc = device_get_softc(dev);
2055 
2056 	/*
2057 	 * XXX: Don't create a session if the queues aren't
2058 	 * initialized.  This is racy as the rxq can be destroyed by
2059 	 * the associated VI detaching.  Eventually ccr should use
2060 	 * dedicated queues.
2061 	 */
2062 	if (sc->rxq->iq.adapter == NULL || sc->txq->adapter == NULL)
2063 		return (ENXIO);
2064 
2065 	mtx_lock(&sc->lock);
2066 	if (sc->detaching) {
2067 		mtx_unlock(&sc->lock);
2068 		return (ENXIO);
2069 	}
2070 
2071 	s = crypto_get_driver_session(cses);
2072 
2073 	if (gcm_hash)
2074 		s->mode = GCM;
2075 	else if (hash != NULL && cipher != NULL)
2076 		s->mode = AUTHENC;
2077 	else if (hash != NULL) {
2078 		if (hmac)
2079 			s->mode = HMAC;
2080 		else
2081 			s->mode = HASH;
2082 	} else {
2083 		MPASS(cipher != NULL);
2084 		s->mode = BLKCIPHER;
2085 	}
2086 	if (gcm_hash) {
2087 		if (hash->cri_mlen == 0)
2088 			s->gmac.hash_len = AES_GMAC_HASH_LEN;
2089 		else
2090 			s->gmac.hash_len = hash->cri_mlen;
2091 		ccr_init_gmac_hash(s, hash->cri_key, hash->cri_klen);
2092 	} else if (hash != NULL) {
2093 		s->hmac.auth_hash = auth_hash;
2094 		s->hmac.auth_mode = auth_mode;
2095 		s->hmac.mk_size = mk_size;
2096 		s->hmac.partial_digest_len = partial_digest_len;
2097 		if (hash->cri_mlen == 0)
2098 			s->hmac.hash_len = auth_hash->hashsize;
2099 		else
2100 			s->hmac.hash_len = hash->cri_mlen;
2101 		if (hmac)
2102 			ccr_init_hmac_digest(s, hash->cri_alg, hash->cri_key,
2103 			    hash->cri_klen);
2104 		else
2105 			ccr_init_hash_digest(s, hash->cri_alg);
2106 	}
2107 	if (cipher != NULL) {
2108 		s->blkcipher.cipher_mode = cipher_mode;
2109 		s->blkcipher.iv_len = iv_len;
2110 		if (cipher->cri_key != NULL)
2111 			ccr_aes_setkey(s, cipher->cri_alg, cipher->cri_key,
2112 			    cipher->cri_klen);
2113 	}
2114 
2115 	s->active = true;
2116 	mtx_unlock(&sc->lock);
2117 	return (0);
2118 }
2119 
2120 static void
2121 ccr_freesession(device_t dev, crypto_session_t cses)
2122 {
2123 	struct ccr_softc *sc;
2124 	struct ccr_session *s;
2125 
2126 	sc = device_get_softc(dev);
2127 	s = crypto_get_driver_session(cses);
2128 	mtx_lock(&sc->lock);
2129 	if (s->pending != 0)
2130 		device_printf(dev,
2131 		    "session %p freed with %d pending requests\n", s,
2132 		    s->pending);
2133 	s->active = false;
2134 	mtx_unlock(&sc->lock);
2135 }
2136 
2137 static int
2138 ccr_process(device_t dev, struct cryptop *crp, int hint)
2139 {
2140 	struct ccr_softc *sc;
2141 	struct ccr_session *s;
2142 	struct cryptodesc *crd, *crda, *crde;
2143 	int error;
2144 
2145 	if (crp == NULL)
2146 		return (EINVAL);
2147 
2148 	crd = crp->crp_desc;
2149 	s = crypto_get_driver_session(crp->crp_session);
2150 	sc = device_get_softc(dev);
2151 
2152 	mtx_lock(&sc->lock);
2153 	error = ccr_populate_sglist(sc->sg_crp, crp);
2154 	if (error) {
2155 		sc->stats_sglist_error++;
2156 		goto out;
2157 	}
2158 
2159 	switch (s->mode) {
2160 	case HASH:
2161 		error = ccr_hash(sc, s, crp);
2162 		if (error == 0)
2163 			sc->stats_hash++;
2164 		break;
2165 	case HMAC:
2166 		if (crd->crd_flags & CRD_F_KEY_EXPLICIT)
2167 			ccr_init_hmac_digest(s, crd->crd_alg, crd->crd_key,
2168 			    crd->crd_klen);
2169 		error = ccr_hash(sc, s, crp);
2170 		if (error == 0)
2171 			sc->stats_hmac++;
2172 		break;
2173 	case BLKCIPHER:
2174 		if (crd->crd_flags & CRD_F_KEY_EXPLICIT) {
2175 			error = ccr_aes_check_keylen(crd->crd_alg,
2176 			    crd->crd_klen);
2177 			if (error)
2178 				break;
2179 			ccr_aes_setkey(s, crd->crd_alg, crd->crd_key,
2180 			    crd->crd_klen);
2181 		}
2182 		error = ccr_blkcipher(sc, s, crp);
2183 		if (error == 0) {
2184 			if (crd->crd_flags & CRD_F_ENCRYPT)
2185 				sc->stats_blkcipher_encrypt++;
2186 			else
2187 				sc->stats_blkcipher_decrypt++;
2188 		}
2189 		break;
2190 	case AUTHENC:
2191 		error = 0;
2192 		switch (crd->crd_alg) {
2193 		case CRYPTO_AES_CBC:
2194 		case CRYPTO_AES_ICM:
2195 		case CRYPTO_AES_XTS:
2196 			/* Only encrypt-then-authenticate supported. */
2197 			crde = crd;
2198 			crda = crd->crd_next;
2199 			if (!(crde->crd_flags & CRD_F_ENCRYPT)) {
2200 				error = EINVAL;
2201 				break;
2202 			}
2203 			break;
2204 		default:
2205 			crda = crd;
2206 			crde = crd->crd_next;
2207 			if (crde->crd_flags & CRD_F_ENCRYPT) {
2208 				error = EINVAL;
2209 				break;
2210 			}
2211 			break;
2212 		}
2213 		if (error)
2214 			break;
2215 		if (crda->crd_flags & CRD_F_KEY_EXPLICIT)
2216 			ccr_init_hmac_digest(s, crda->crd_alg, crda->crd_key,
2217 			    crda->crd_klen);
2218 		if (crde->crd_flags & CRD_F_KEY_EXPLICIT) {
2219 			error = ccr_aes_check_keylen(crde->crd_alg,
2220 			    crde->crd_klen);
2221 			if (error)
2222 				break;
2223 			ccr_aes_setkey(s, crde->crd_alg, crde->crd_key,
2224 			    crde->crd_klen);
2225 		}
2226 		error = ccr_authenc(sc, s, crp, crda, crde);
2227 		if (error == 0) {
2228 			if (crde->crd_flags & CRD_F_ENCRYPT)
2229 				sc->stats_authenc_encrypt++;
2230 			else
2231 				sc->stats_authenc_decrypt++;
2232 		}
2233 		break;
2234 	case GCM:
2235 		error = 0;
2236 		if (crd->crd_alg == CRYPTO_AES_NIST_GCM_16) {
2237 			crde = crd;
2238 			crda = crd->crd_next;
2239 		} else {
2240 			crda = crd;
2241 			crde = crd->crd_next;
2242 		}
2243 		if (crda->crd_flags & CRD_F_KEY_EXPLICIT)
2244 			ccr_init_gmac_hash(s, crda->crd_key, crda->crd_klen);
2245 		if (crde->crd_flags & CRD_F_KEY_EXPLICIT) {
2246 			error = ccr_aes_check_keylen(crde->crd_alg,
2247 			    crde->crd_klen);
2248 			if (error)
2249 				break;
2250 			ccr_aes_setkey(s, crde->crd_alg, crde->crd_key,
2251 			    crde->crd_klen);
2252 		}
2253 		if (crde->crd_len == 0) {
2254 			mtx_unlock(&sc->lock);
2255 			ccr_gcm_soft(s, crp, crda, crde);
2256 			return (0);
2257 		}
2258 		error = ccr_gcm(sc, s, crp, crda, crde);
2259 		if (error == EMSGSIZE) {
2260 			sc->stats_sw_fallback++;
2261 			mtx_unlock(&sc->lock);
2262 			ccr_gcm_soft(s, crp, crda, crde);
2263 			return (0);
2264 		}
2265 		if (error == 0) {
2266 			if (crde->crd_flags & CRD_F_ENCRYPT)
2267 				sc->stats_gcm_encrypt++;
2268 			else
2269 				sc->stats_gcm_decrypt++;
2270 		}
2271 		break;
2272 	}
2273 
2274 	if (error == 0) {
2275 		s->pending++;
2276 		sc->stats_inflight++;
2277 	} else
2278 		sc->stats_process_error++;
2279 
2280 out:
2281 	mtx_unlock(&sc->lock);
2282 
2283 	if (error) {
2284 		crp->crp_etype = error;
2285 		crypto_done(crp);
2286 	}
2287 
2288 	return (0);
2289 }
2290 
2291 static int
2292 do_cpl6_fw_pld(struct sge_iq *iq, const struct rss_header *rss,
2293     struct mbuf *m)
2294 {
2295 	struct ccr_softc *sc = iq->adapter->ccr_softc;
2296 	struct ccr_session *s;
2297 	const struct cpl_fw6_pld *cpl;
2298 	struct cryptop *crp;
2299 	uint32_t status;
2300 	int error;
2301 
2302 	if (m != NULL)
2303 		cpl = mtod(m, const void *);
2304 	else
2305 		cpl = (const void *)(rss + 1);
2306 
2307 	crp = (struct cryptop *)(uintptr_t)be64toh(cpl->data[1]);
2308 	s = crypto_get_driver_session(crp->crp_session);
2309 	status = be64toh(cpl->data[0]);
2310 	if (CHK_MAC_ERR_BIT(status) || CHK_PAD_ERR_BIT(status))
2311 		error = EBADMSG;
2312 	else
2313 		error = 0;
2314 
2315 	mtx_lock(&sc->lock);
2316 	s->pending--;
2317 	sc->stats_inflight--;
2318 
2319 	switch (s->mode) {
2320 	case HASH:
2321 	case HMAC:
2322 		error = ccr_hash_done(sc, s, crp, cpl, error);
2323 		break;
2324 	case BLKCIPHER:
2325 		error = ccr_blkcipher_done(sc, s, crp, cpl, error);
2326 		break;
2327 	case AUTHENC:
2328 		error = ccr_authenc_done(sc, s, crp, cpl, error);
2329 		break;
2330 	case GCM:
2331 		error = ccr_gcm_done(sc, s, crp, cpl, error);
2332 		break;
2333 	}
2334 
2335 	if (error == EBADMSG) {
2336 		if (CHK_MAC_ERR_BIT(status))
2337 			sc->stats_mac_error++;
2338 		if (CHK_PAD_ERR_BIT(status))
2339 			sc->stats_pad_error++;
2340 	}
2341 	mtx_unlock(&sc->lock);
2342 	crp->crp_etype = error;
2343 	crypto_done(crp);
2344 	m_freem(m);
2345 	return (0);
2346 }
2347 
2348 static int
2349 ccr_modevent(module_t mod, int cmd, void *arg)
2350 {
2351 
2352 	switch (cmd) {
2353 	case MOD_LOAD:
2354 		t4_register_cpl_handler(CPL_FW6_PLD, do_cpl6_fw_pld);
2355 		return (0);
2356 	case MOD_UNLOAD:
2357 		t4_register_cpl_handler(CPL_FW6_PLD, NULL);
2358 		return (0);
2359 	default:
2360 		return (EOPNOTSUPP);
2361 	}
2362 }
2363 
2364 static device_method_t ccr_methods[] = {
2365 	DEVMETHOD(device_identify,	ccr_identify),
2366 	DEVMETHOD(device_probe,		ccr_probe),
2367 	DEVMETHOD(device_attach,	ccr_attach),
2368 	DEVMETHOD(device_detach,	ccr_detach),
2369 
2370 	DEVMETHOD(cryptodev_newsession,	ccr_newsession),
2371 	DEVMETHOD(cryptodev_freesession, ccr_freesession),
2372 	DEVMETHOD(cryptodev_process,	ccr_process),
2373 
2374 	DEVMETHOD_END
2375 };
2376 
2377 static driver_t ccr_driver = {
2378 	"ccr",
2379 	ccr_methods,
2380 	sizeof(struct ccr_softc)
2381 };
2382 
2383 static devclass_t ccr_devclass;
2384 
2385 DRIVER_MODULE(ccr, t6nex, ccr_driver, ccr_devclass, ccr_modevent, NULL);
2386 MODULE_VERSION(ccr, 1);
2387 MODULE_DEPEND(ccr, crypto, 1, 1, 1);
2388 MODULE_DEPEND(ccr, t6nex, 1, 1, 1);
2389