xref: /freebsd/sys/dev/cxgbe/crypto/t4_crypto.c (revision 0946e70a3b60dec23922cf3e0c313cb0917fee0a)
1 /*-
2  * Copyright (c) 2017 Chelsio Communications, Inc.
3  * All rights reserved.
4  * Written by: John Baldwin <jhb@FreeBSD.org>
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  */
27 
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
30 
31 #include <sys/types.h>
32 #include <sys/bus.h>
33 #include <sys/lock.h>
34 #include <sys/malloc.h>
35 #include <sys/mutex.h>
36 #include <sys/module.h>
37 #include <sys/sglist.h>
38 
39 #include <opencrypto/cryptodev.h>
40 #include <opencrypto/xform.h>
41 
42 #include "cryptodev_if.h"
43 
44 #include "common/common.h"
45 #include "crypto/t4_crypto.h"
46 
47 /*
48  * Requests consist of:
49  *
50  * +-------------------------------+
51  * | struct fw_crypto_lookaside_wr |
52  * +-------------------------------+
53  * | struct ulp_txpkt              |
54  * +-------------------------------+
55  * | struct ulptx_idata            |
56  * +-------------------------------+
57  * | struct cpl_tx_sec_pdu         |
58  * +-------------------------------+
59  * | struct cpl_tls_tx_scmd_fmt    |
60  * +-------------------------------+
61  * | key context header            |
62  * +-------------------------------+
63  * | AES key                       |  ----- For requests with AES
64  * +-------------------------------+
65  * | Hash state                    |  ----- For hash-only requests
66  * +-------------------------------+ -
67  * | IPAD (16-byte aligned)        |  \
68  * +-------------------------------+  +---- For requests with HMAC
69  * | OPAD (16-byte aligned)        |  /
70  * +-------------------------------+ -
71  * | GMAC H                        |  ----- For AES-GCM
72  * +-------------------------------+ -
73  * | struct cpl_rx_phys_dsgl       |  \
74  * +-------------------------------+  +---- Destination buffer for
75  * | PHYS_DSGL entries             |  /     non-hash-only requests
76  * +-------------------------------+ -
77  * | 16 dummy bytes                |  ----- Only for HMAC/hash-only requests
78  * +-------------------------------+
79  * | IV                            |  ----- If immediate IV
80  * +-------------------------------+
81  * | Payload                       |  ----- If immediate Payload
82  * +-------------------------------+ -
83  * | struct ulptx_sgl              |  \
84  * +-------------------------------+  +---- If payload via SGL
85  * | SGL entries                   |  /
86  * +-------------------------------+ -
87  *
88  * Note that the key context must be padded to ensure 16-byte alignment.
89  * For HMAC requests, the key consists of the partial hash of the IPAD
90  * followed by the partial hash of the OPAD.
91  *
92  * Replies consist of:
93  *
94  * +-------------------------------+
95  * | struct cpl_fw6_pld            |
96  * +-------------------------------+
97  * | hash digest                   |  ----- For HMAC request with
98  * +-------------------------------+        'hash_size' set in work request
99  *
100  * A 32-bit big-endian error status word is supplied in the last 4
101  * bytes of data[0] in the CPL_FW6_PLD message.  bit 0 indicates a
102  * "MAC" error and bit 1 indicates a "PAD" error.
103  *
104  * The 64-bit 'cookie' field from the fw_crypto_lookaside_wr message
105  * in the request is returned in data[1] of the CPL_FW6_PLD message.
106  *
107  * For block cipher replies, the updated IV is supplied in data[2] and
108  * data[3] of the CPL_FW6_PLD message.
109  *
110  * For hash replies where the work request set 'hash_size' to request
111  * a copy of the hash in the reply, the hash digest is supplied
112  * immediately following the CPL_FW6_PLD message.
113  */
114 
115 /*
116  * The crypto engine supports a maximum AAD size of 511 bytes.
117  */
118 #define	MAX_AAD_LEN		511
119 
120 /*
121  * The documentation for CPL_RX_PHYS_DSGL claims a maximum of 32 SG
122  * entries.  While the CPL includes a 16-bit length field, the T6 can
123  * sometimes hang if an error occurs while processing a request with a
124  * single DSGL entry larger than 2k.
125  */
126 #define	MAX_RX_PHYS_DSGL_SGE	32
127 #define	DSGL_SGE_MAXLEN		2048
128 
129 /*
130  * The adapter only supports requests with a total input or output
131  * length of 64k-1 or smaller.  Longer requests either result in hung
132  * requests or incorrect results.
133  */
134 #define	MAX_REQUEST_SIZE	65535
135 
136 static MALLOC_DEFINE(M_CCR, "ccr", "Chelsio T6 crypto");
137 
138 struct ccr_session_hmac {
139 	struct auth_hash *auth_hash;
140 	int hash_len;
141 	unsigned int partial_digest_len;
142 	unsigned int auth_mode;
143 	unsigned int mk_size;
144 	char pads[CHCR_HASH_MAX_BLOCK_SIZE_128 * 2];
145 };
146 
147 struct ccr_session_gmac {
148 	int hash_len;
149 	char ghash_h[GMAC_BLOCK_LEN];
150 };
151 
152 struct ccr_session_ccm_mac {
153 	int hash_len;
154 };
155 
156 struct ccr_session_blkcipher {
157 	unsigned int cipher_mode;
158 	unsigned int key_len;
159 	unsigned int iv_len;
160 	__be32 key_ctx_hdr;
161 	char enckey[CHCR_AES_MAX_KEY_LEN];
162 	char deckey[CHCR_AES_MAX_KEY_LEN];
163 };
164 
165 struct ccr_port {
166 	struct sge_wrq *txq;
167 	struct sge_rxq *rxq;
168 	int tx_channel_id;
169 	u_int active_sessions;
170 };
171 
172 struct ccr_session {
173 	bool active;
174 	int pending;
175 	enum { HASH, HMAC, BLKCIPHER, ETA, GCM, CCM } mode;
176 	struct ccr_port *port;
177 	union {
178 		struct ccr_session_hmac hmac;
179 		struct ccr_session_gmac gmac;
180 		struct ccr_session_ccm_mac ccm_mac;
181 	};
182 	struct ccr_session_blkcipher blkcipher;
183 };
184 
185 struct ccr_softc {
186 	struct adapter *adapter;
187 	device_t dev;
188 	uint32_t cid;
189 	struct mtx lock;
190 	bool detaching;
191 	struct ccr_port ports[MAX_NPORTS];
192 	u_int port_mask;
193 
194 	/*
195 	 * Pre-allocate S/G lists used when preparing a work request.
196 	 * 'sg_input' contains an sglist describing the entire input
197 	 * buffer for a 'struct cryptop'.  'sg_output' contains an
198 	 * sglist describing the entire output buffer.  'sg_ulptx' is
199 	 * used to describe the data the engine should DMA as input
200 	 * via ULPTX_SGL.  'sg_dsgl' is used to describe the
201 	 * destination that cipher text and a tag should be written
202 	 * to.
203 	 */
204 	struct sglist *sg_input;
205 	struct sglist *sg_output;
206 	struct sglist *sg_ulptx;
207 	struct sglist *sg_dsgl;
208 
209 	/*
210 	 * Pre-allocate a dummy output buffer for the IV and AAD for
211 	 * AEAD requests.
212 	 */
213 	char *iv_aad_buf;
214 	struct sglist *sg_iv_aad;
215 
216 	/* Statistics. */
217 	uint64_t stats_blkcipher_encrypt;
218 	uint64_t stats_blkcipher_decrypt;
219 	uint64_t stats_hash;
220 	uint64_t stats_hmac;
221 	uint64_t stats_eta_encrypt;
222 	uint64_t stats_eta_decrypt;
223 	uint64_t stats_gcm_encrypt;
224 	uint64_t stats_gcm_decrypt;
225 	uint64_t stats_ccm_encrypt;
226 	uint64_t stats_ccm_decrypt;
227 	uint64_t stats_wr_nomem;
228 	uint64_t stats_inflight;
229 	uint64_t stats_mac_error;
230 	uint64_t stats_pad_error;
231 	uint64_t stats_bad_session;
232 	uint64_t stats_sglist_error;
233 	uint64_t stats_process_error;
234 	uint64_t stats_sw_fallback;
235 };
236 
237 /*
238  * Crypto requests involve two kind of scatter/gather lists.
239  *
240  * Non-hash-only requests require a PHYS_DSGL that describes the
241  * location to store the results of the encryption or decryption
242  * operation.  This SGL uses a different format (PHYS_DSGL) and should
243  * exclude the skip bytes at the start of the data as well as any AAD
244  * or IV.  For authenticated encryption requests it should include the
245  * destination of the hash or tag.
246  *
247  * The input payload may either be supplied inline as immediate data,
248  * or via a standard ULP_TX SGL.  This SGL should include AAD,
249  * ciphertext, and the hash or tag for authenticated decryption
250  * requests.
251  *
252  * These scatter/gather lists can describe different subsets of the
253  * buffers described by the crypto operation.  ccr_populate_sglist()
254  * generates a scatter/gather list that covers an entire crypto
255  * operation buffer that is then used to construct the other
256  * scatter/gather lists.
257  */
258 static int
259 ccr_populate_sglist(struct sglist *sg, struct crypto_buffer *cb)
260 {
261 	int error;
262 
263 	sglist_reset(sg);
264 	switch (cb->cb_type) {
265 	case CRYPTO_BUF_MBUF:
266 		error = sglist_append_mbuf(sg, cb->cb_mbuf);
267 		break;
268 	case CRYPTO_BUF_UIO:
269 		error = sglist_append_uio(sg, cb->cb_uio);
270 		break;
271 	case CRYPTO_BUF_CONTIG:
272 		error = sglist_append(sg, cb->cb_buf, cb->cb_buf_len);
273 		break;
274 	default:
275 		error = EINVAL;
276 	}
277 	return (error);
278 }
279 
280 /*
281  * Segments in 'sg' larger than 'maxsegsize' are counted as multiple
282  * segments.
283  */
284 static int
285 ccr_count_sgl(struct sglist *sg, int maxsegsize)
286 {
287 	int i, nsegs;
288 
289 	nsegs = 0;
290 	for (i = 0; i < sg->sg_nseg; i++)
291 		nsegs += howmany(sg->sg_segs[i].ss_len, maxsegsize);
292 	return (nsegs);
293 }
294 
295 /* These functions deal with PHYS_DSGL for the reply buffer. */
296 static inline int
297 ccr_phys_dsgl_len(int nsegs)
298 {
299 	int len;
300 
301 	len = (nsegs / 8) * sizeof(struct phys_sge_pairs);
302 	if ((nsegs % 8) != 0) {
303 		len += sizeof(uint16_t) * 8;
304 		len += roundup2(nsegs % 8, 2) * sizeof(uint64_t);
305 	}
306 	return (len);
307 }
308 
309 static void
310 ccr_write_phys_dsgl(struct ccr_softc *sc, struct ccr_session *s, void *dst,
311     int nsegs)
312 {
313 	struct sglist *sg;
314 	struct cpl_rx_phys_dsgl *cpl;
315 	struct phys_sge_pairs *sgl;
316 	vm_paddr_t paddr;
317 	size_t seglen;
318 	u_int i, j;
319 
320 	sg = sc->sg_dsgl;
321 	cpl = dst;
322 	cpl->op_to_tid = htobe32(V_CPL_RX_PHYS_DSGL_OPCODE(CPL_RX_PHYS_DSGL) |
323 	    V_CPL_RX_PHYS_DSGL_ISRDMA(0));
324 	cpl->pcirlxorder_to_noofsgentr = htobe32(
325 	    V_CPL_RX_PHYS_DSGL_PCIRLXORDER(0) |
326 	    V_CPL_RX_PHYS_DSGL_PCINOSNOOP(0) |
327 	    V_CPL_RX_PHYS_DSGL_PCITPHNTENB(0) | V_CPL_RX_PHYS_DSGL_DCAID(0) |
328 	    V_CPL_RX_PHYS_DSGL_NOOFSGENTR(nsegs));
329 	cpl->rss_hdr_int.opcode = CPL_RX_PHYS_ADDR;
330 	cpl->rss_hdr_int.qid = htobe16(s->port->rxq->iq.abs_id);
331 	cpl->rss_hdr_int.hash_val = 0;
332 	sgl = (struct phys_sge_pairs *)(cpl + 1);
333 	j = 0;
334 	for (i = 0; i < sg->sg_nseg; i++) {
335 		seglen = sg->sg_segs[i].ss_len;
336 		paddr = sg->sg_segs[i].ss_paddr;
337 		do {
338 			sgl->addr[j] = htobe64(paddr);
339 			if (seglen > DSGL_SGE_MAXLEN) {
340 				sgl->len[j] = htobe16(DSGL_SGE_MAXLEN);
341 				paddr += DSGL_SGE_MAXLEN;
342 				seglen -= DSGL_SGE_MAXLEN;
343 			} else {
344 				sgl->len[j] = htobe16(seglen);
345 				seglen = 0;
346 			}
347 			j++;
348 			if (j == 8) {
349 				sgl++;
350 				j = 0;
351 			}
352 		} while (seglen != 0);
353 	}
354 	MPASS(j + 8 * (sgl - (struct phys_sge_pairs *)(cpl + 1)) == nsegs);
355 }
356 
357 /* These functions deal with the ULPTX_SGL for input payload. */
358 static inline int
359 ccr_ulptx_sgl_len(int nsegs)
360 {
361 	u_int n;
362 
363 	nsegs--; /* first segment is part of ulptx_sgl */
364 	n = sizeof(struct ulptx_sgl) + 8 * ((3 * nsegs) / 2 + (nsegs & 1));
365 	return (roundup2(n, 16));
366 }
367 
368 static void
369 ccr_write_ulptx_sgl(struct ccr_softc *sc, void *dst, int nsegs)
370 {
371 	struct ulptx_sgl *usgl;
372 	struct sglist *sg;
373 	struct sglist_seg *ss;
374 	int i;
375 
376 	sg = sc->sg_ulptx;
377 	MPASS(nsegs == sg->sg_nseg);
378 	ss = &sg->sg_segs[0];
379 	usgl = dst;
380 	usgl->cmd_nsge = htobe32(V_ULPTX_CMD(ULP_TX_SC_DSGL) |
381 	    V_ULPTX_NSGE(nsegs));
382 	usgl->len0 = htobe32(ss->ss_len);
383 	usgl->addr0 = htobe64(ss->ss_paddr);
384 	ss++;
385 	for (i = 0; i < sg->sg_nseg - 1; i++) {
386 		usgl->sge[i / 2].len[i & 1] = htobe32(ss->ss_len);
387 		usgl->sge[i / 2].addr[i & 1] = htobe64(ss->ss_paddr);
388 		ss++;
389 	}
390 }
391 
392 static bool
393 ccr_use_imm_data(u_int transhdr_len, u_int input_len)
394 {
395 
396 	if (input_len > CRYPTO_MAX_IMM_TX_PKT_LEN)
397 		return (false);
398 	if (roundup2(transhdr_len, 16) + roundup2(input_len, 16) >
399 	    SGE_MAX_WR_LEN)
400 		return (false);
401 	return (true);
402 }
403 
404 static void
405 ccr_populate_wreq(struct ccr_softc *sc, struct ccr_session *s,
406     struct chcr_wr *crwr, u_int kctx_len, u_int wr_len, u_int imm_len,
407     u_int sgl_len, u_int hash_size, struct cryptop *crp)
408 {
409 	u_int cctx_size, idata_len;
410 
411 	cctx_size = sizeof(struct _key_ctx) + kctx_len;
412 	crwr->wreq.op_to_cctx_size = htobe32(
413 	    V_FW_CRYPTO_LOOKASIDE_WR_OPCODE(FW_CRYPTO_LOOKASIDE_WR) |
414 	    V_FW_CRYPTO_LOOKASIDE_WR_COMPL(0) |
415 	    V_FW_CRYPTO_LOOKASIDE_WR_IMM_LEN(imm_len) |
416 	    V_FW_CRYPTO_LOOKASIDE_WR_CCTX_LOC(1) |
417 	    V_FW_CRYPTO_LOOKASIDE_WR_CCTX_SIZE(cctx_size >> 4));
418 	crwr->wreq.len16_pkd = htobe32(
419 	    V_FW_CRYPTO_LOOKASIDE_WR_LEN16(wr_len / 16));
420 	crwr->wreq.session_id = 0;
421 	crwr->wreq.rx_chid_to_rx_q_id = htobe32(
422 	    V_FW_CRYPTO_LOOKASIDE_WR_RX_CHID(s->port->tx_channel_id) |
423 	    V_FW_CRYPTO_LOOKASIDE_WR_LCB(0) |
424 	    V_FW_CRYPTO_LOOKASIDE_WR_PHASH(0) |
425 	    V_FW_CRYPTO_LOOKASIDE_WR_IV(IV_NOP) |
426 	    V_FW_CRYPTO_LOOKASIDE_WR_FQIDX(0) |
427 	    V_FW_CRYPTO_LOOKASIDE_WR_TX_CH(0) |
428 	    V_FW_CRYPTO_LOOKASIDE_WR_RX_Q_ID(s->port->rxq->iq.abs_id));
429 	crwr->wreq.key_addr = 0;
430 	crwr->wreq.pld_size_hash_size = htobe32(
431 	    V_FW_CRYPTO_LOOKASIDE_WR_PLD_SIZE(sgl_len) |
432 	    V_FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE(hash_size));
433 	crwr->wreq.cookie = htobe64((uintptr_t)crp);
434 
435 	crwr->ulptx.cmd_dest = htobe32(V_ULPTX_CMD(ULP_TX_PKT) |
436 	    V_ULP_TXPKT_DATAMODIFY(0) |
437 	    V_ULP_TXPKT_CHANNELID(s->port->tx_channel_id) |
438 	    V_ULP_TXPKT_DEST(0) |
439 	    V_ULP_TXPKT_FID(s->port->rxq->iq.abs_id) | V_ULP_TXPKT_RO(1));
440 	crwr->ulptx.len = htobe32(
441 	    ((wr_len - sizeof(struct fw_crypto_lookaside_wr)) / 16));
442 
443 	crwr->sc_imm.cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM) |
444 	    V_ULP_TX_SC_MORE(sgl_len != 0 ? 1 : 0));
445 	idata_len = wr_len - offsetof(struct chcr_wr, sec_cpl) - sgl_len;
446 	if (imm_len % 16 != 0)
447 		idata_len -= 16 - imm_len % 16;
448 	crwr->sc_imm.len = htobe32(idata_len);
449 }
450 
451 static int
452 ccr_hash(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp)
453 {
454 	struct chcr_wr *crwr;
455 	struct wrqe *wr;
456 	struct auth_hash *axf;
457 	char *dst;
458 	u_int hash_size_in_response, kctx_flits, kctx_len, transhdr_len, wr_len;
459 	u_int hmac_ctrl, imm_len, iopad_size;
460 	int error, sgl_nsegs, sgl_len, use_opad;
461 
462 	/* Reject requests with too large of an input buffer. */
463 	if (crp->crp_payload_length > MAX_REQUEST_SIZE)
464 		return (EFBIG);
465 
466 	axf = s->hmac.auth_hash;
467 
468 	if (s->mode == HMAC) {
469 		use_opad = 1;
470 		hmac_ctrl = SCMD_HMAC_CTRL_NO_TRUNC;
471 	} else {
472 		use_opad = 0;
473 		hmac_ctrl = SCMD_HMAC_CTRL_NOP;
474 	}
475 
476 	/* PADs must be 128-bit aligned. */
477 	iopad_size = roundup2(s->hmac.partial_digest_len, 16);
478 
479 	/*
480 	 * The 'key' part of the context includes the aligned IPAD and
481 	 * OPAD.
482 	 */
483 	kctx_len = iopad_size;
484 	if (use_opad)
485 		kctx_len += iopad_size;
486 	hash_size_in_response = axf->hashsize;
487 	transhdr_len = HASH_TRANSHDR_SIZE(kctx_len);
488 
489 	if (crp->crp_payload_length == 0) {
490 		imm_len = axf->blocksize;
491 		sgl_nsegs = 0;
492 		sgl_len = 0;
493 	} else if (ccr_use_imm_data(transhdr_len, crp->crp_payload_length)) {
494 		imm_len = crp->crp_payload_length;
495 		sgl_nsegs = 0;
496 		sgl_len = 0;
497 	} else {
498 		imm_len = 0;
499 		sglist_reset(sc->sg_ulptx);
500 		error = sglist_append_sglist(sc->sg_ulptx, sc->sg_input,
501 		    crp->crp_payload_start, crp->crp_payload_length);
502 		if (error)
503 			return (error);
504 		sgl_nsegs = sc->sg_ulptx->sg_nseg;
505 		sgl_len = ccr_ulptx_sgl_len(sgl_nsegs);
506 	}
507 
508 	wr_len = roundup2(transhdr_len, 16) + roundup2(imm_len, 16) + sgl_len;
509 	if (wr_len > SGE_MAX_WR_LEN)
510 		return (EFBIG);
511 	wr = alloc_wrqe(wr_len, s->port->txq);
512 	if (wr == NULL) {
513 		sc->stats_wr_nomem++;
514 		return (ENOMEM);
515 	}
516 	crwr = wrtod(wr);
517 	memset(crwr, 0, wr_len);
518 
519 	ccr_populate_wreq(sc, s, crwr, kctx_len, wr_len, imm_len, sgl_len,
520 	    hash_size_in_response, crp);
521 
522 	crwr->sec_cpl.op_ivinsrtofst = htobe32(
523 	    V_CPL_TX_SEC_PDU_OPCODE(CPL_TX_SEC_PDU) |
524 	    V_CPL_TX_SEC_PDU_RXCHID(s->port->tx_channel_id) |
525 	    V_CPL_TX_SEC_PDU_ACKFOLLOWS(0) | V_CPL_TX_SEC_PDU_ULPTXLPBK(1) |
526 	    V_CPL_TX_SEC_PDU_CPLLEN(2) | V_CPL_TX_SEC_PDU_PLACEHOLDER(0) |
527 	    V_CPL_TX_SEC_PDU_IVINSRTOFST(0));
528 
529 	crwr->sec_cpl.pldlen = htobe32(crp->crp_payload_length == 0 ?
530 	    axf->blocksize : crp->crp_payload_length);
531 
532 	crwr->sec_cpl.cipherstop_lo_authinsert = htobe32(
533 	    V_CPL_TX_SEC_PDU_AUTHSTART(1) | V_CPL_TX_SEC_PDU_AUTHSTOP(0));
534 
535 	/* These two flits are actually a CPL_TLS_TX_SCMD_FMT. */
536 	crwr->sec_cpl.seqno_numivs = htobe32(
537 	    V_SCMD_SEQ_NO_CTRL(0) |
538 	    V_SCMD_PROTO_VERSION(SCMD_PROTO_VERSION_GENERIC) |
539 	    V_SCMD_CIPH_MODE(SCMD_CIPH_MODE_NOP) |
540 	    V_SCMD_AUTH_MODE(s->hmac.auth_mode) |
541 	    V_SCMD_HMAC_CTRL(hmac_ctrl));
542 	crwr->sec_cpl.ivgen_hdrlen = htobe32(
543 	    V_SCMD_LAST_FRAG(0) |
544 	    V_SCMD_MORE_FRAGS(crp->crp_payload_length == 0 ? 1 : 0) |
545 	    V_SCMD_MAC_ONLY(1));
546 
547 	memcpy(crwr->key_ctx.key, s->hmac.pads, kctx_len);
548 
549 	/* XXX: F_KEY_CONTEXT_SALT_PRESENT set, but 'salt' not set. */
550 	kctx_flits = (sizeof(struct _key_ctx) + kctx_len) / 16;
551 	crwr->key_ctx.ctx_hdr = htobe32(V_KEY_CONTEXT_CTX_LEN(kctx_flits) |
552 	    V_KEY_CONTEXT_OPAD_PRESENT(use_opad) |
553 	    V_KEY_CONTEXT_SALT_PRESENT(1) |
554 	    V_KEY_CONTEXT_CK_SIZE(CHCR_KEYCTX_NO_KEY) |
555 	    V_KEY_CONTEXT_MK_SIZE(s->hmac.mk_size) | V_KEY_CONTEXT_VALID(1));
556 
557 	dst = (char *)(crwr + 1) + kctx_len + DUMMY_BYTES;
558 	if (crp->crp_payload_length == 0) {
559 		dst[0] = 0x80;
560 		if (s->mode == HMAC)
561 			*(uint64_t *)(dst + axf->blocksize - sizeof(uint64_t)) =
562 			    htobe64(axf->blocksize << 3);
563 	} else if (imm_len != 0)
564 		crypto_copydata(crp, crp->crp_payload_start,
565 		    crp->crp_payload_length, dst);
566 	else
567 		ccr_write_ulptx_sgl(sc, dst, sgl_nsegs);
568 
569 	/* XXX: TODO backpressure */
570 	t4_wrq_tx(sc->adapter, wr);
571 
572 	return (0);
573 }
574 
575 static int
576 ccr_hash_done(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp,
577     const struct cpl_fw6_pld *cpl, int error)
578 {
579 	uint8_t hash[HASH_MAX_LEN];
580 
581 	if (error)
582 		return (error);
583 
584 	if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) {
585 		crypto_copydata(crp, crp->crp_digest_start, s->hmac.hash_len,
586 		    hash);
587 		if (timingsafe_bcmp((cpl + 1), hash, s->hmac.hash_len) != 0)
588 			return (EBADMSG);
589 	} else
590 		crypto_copyback(crp, crp->crp_digest_start, s->hmac.hash_len,
591 		    (cpl + 1));
592 	return (0);
593 }
594 
595 static int
596 ccr_blkcipher(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp)
597 {
598 	char iv[CHCR_MAX_CRYPTO_IV_LEN];
599 	struct chcr_wr *crwr;
600 	struct wrqe *wr;
601 	char *dst;
602 	u_int kctx_len, key_half, op_type, transhdr_len, wr_len;
603 	u_int imm_len, iv_len;
604 	int dsgl_nsegs, dsgl_len;
605 	int sgl_nsegs, sgl_len;
606 	int error;
607 
608 	if (s->blkcipher.key_len == 0 || crp->crp_payload_length == 0)
609 		return (EINVAL);
610 	if (s->blkcipher.cipher_mode == SCMD_CIPH_MODE_AES_CBC &&
611 	    (crp->crp_payload_length % AES_BLOCK_LEN) != 0)
612 		return (EINVAL);
613 
614 	/* Reject requests with too large of an input buffer. */
615 	if (crp->crp_payload_length > MAX_REQUEST_SIZE)
616 		return (EFBIG);
617 
618 	if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op))
619 		op_type = CHCR_ENCRYPT_OP;
620 	else
621 		op_type = CHCR_DECRYPT_OP;
622 
623 	sglist_reset(sc->sg_dsgl);
624 	if (CRYPTO_HAS_OUTPUT_BUFFER(crp))
625 		error = sglist_append_sglist(sc->sg_dsgl, sc->sg_output,
626 		    crp->crp_payload_output_start, crp->crp_payload_length);
627 	else
628 		error = sglist_append_sglist(sc->sg_dsgl, sc->sg_input,
629 		    crp->crp_payload_start, crp->crp_payload_length);
630 	if (error)
631 		return (error);
632 	dsgl_nsegs = ccr_count_sgl(sc->sg_dsgl, DSGL_SGE_MAXLEN);
633 	if (dsgl_nsegs > MAX_RX_PHYS_DSGL_SGE)
634 		return (EFBIG);
635 	dsgl_len = ccr_phys_dsgl_len(dsgl_nsegs);
636 
637 	/* The 'key' must be 128-bit aligned. */
638 	kctx_len = roundup2(s->blkcipher.key_len, 16);
639 	transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dsgl_len);
640 
641 	/* For AES-XTS we send a 16-byte IV in the work request. */
642 	if (s->blkcipher.cipher_mode == SCMD_CIPH_MODE_AES_XTS)
643 		iv_len = AES_BLOCK_LEN;
644 	else
645 		iv_len = s->blkcipher.iv_len;
646 
647 	if (ccr_use_imm_data(transhdr_len, crp->crp_payload_length + iv_len)) {
648 		imm_len = crp->crp_payload_length;
649 		sgl_nsegs = 0;
650 		sgl_len = 0;
651 	} else {
652 		imm_len = 0;
653 		sglist_reset(sc->sg_ulptx);
654 		error = sglist_append_sglist(sc->sg_ulptx, sc->sg_input,
655 		    crp->crp_payload_start, crp->crp_payload_length);
656 		if (error)
657 			return (error);
658 		sgl_nsegs = sc->sg_ulptx->sg_nseg;
659 		sgl_len = ccr_ulptx_sgl_len(sgl_nsegs);
660 	}
661 
662 	wr_len = roundup2(transhdr_len, 16) + iv_len +
663 	    roundup2(imm_len, 16) + sgl_len;
664 	if (wr_len > SGE_MAX_WR_LEN)
665 		return (EFBIG);
666 	wr = alloc_wrqe(wr_len, s->port->txq);
667 	if (wr == NULL) {
668 		sc->stats_wr_nomem++;
669 		return (ENOMEM);
670 	}
671 	crwr = wrtod(wr);
672 	memset(crwr, 0, wr_len);
673 
674 	crypto_read_iv(crp, iv);
675 
676 	/* Zero the remainder of the IV for AES-XTS. */
677 	memset(iv + s->blkcipher.iv_len, 0, iv_len - s->blkcipher.iv_len);
678 
679 	ccr_populate_wreq(sc, s, crwr, kctx_len, wr_len, imm_len, sgl_len, 0,
680 	    crp);
681 
682 	crwr->sec_cpl.op_ivinsrtofst = htobe32(
683 	    V_CPL_TX_SEC_PDU_OPCODE(CPL_TX_SEC_PDU) |
684 	    V_CPL_TX_SEC_PDU_RXCHID(s->port->tx_channel_id) |
685 	    V_CPL_TX_SEC_PDU_ACKFOLLOWS(0) | V_CPL_TX_SEC_PDU_ULPTXLPBK(1) |
686 	    V_CPL_TX_SEC_PDU_CPLLEN(2) | V_CPL_TX_SEC_PDU_PLACEHOLDER(0) |
687 	    V_CPL_TX_SEC_PDU_IVINSRTOFST(1));
688 
689 	crwr->sec_cpl.pldlen = htobe32(iv_len + crp->crp_payload_length);
690 
691 	crwr->sec_cpl.aadstart_cipherstop_hi = htobe32(
692 	    V_CPL_TX_SEC_PDU_CIPHERSTART(iv_len + 1) |
693 	    V_CPL_TX_SEC_PDU_CIPHERSTOP_HI(0));
694 	crwr->sec_cpl.cipherstop_lo_authinsert = htobe32(
695 	    V_CPL_TX_SEC_PDU_CIPHERSTOP_LO(0));
696 
697 	/* These two flits are actually a CPL_TLS_TX_SCMD_FMT. */
698 	crwr->sec_cpl.seqno_numivs = htobe32(
699 	    V_SCMD_SEQ_NO_CTRL(0) |
700 	    V_SCMD_PROTO_VERSION(SCMD_PROTO_VERSION_GENERIC) |
701 	    V_SCMD_ENC_DEC_CTRL(op_type) |
702 	    V_SCMD_CIPH_MODE(s->blkcipher.cipher_mode) |
703 	    V_SCMD_AUTH_MODE(SCMD_AUTH_MODE_NOP) |
704 	    V_SCMD_HMAC_CTRL(SCMD_HMAC_CTRL_NOP) |
705 	    V_SCMD_IV_SIZE(iv_len / 2) |
706 	    V_SCMD_NUM_IVS(0));
707 	crwr->sec_cpl.ivgen_hdrlen = htobe32(
708 	    V_SCMD_IV_GEN_CTRL(0) |
709 	    V_SCMD_MORE_FRAGS(0) | V_SCMD_LAST_FRAG(0) | V_SCMD_MAC_ONLY(0) |
710 	    V_SCMD_AADIVDROP(1) | V_SCMD_HDR_LEN(dsgl_len));
711 
712 	crwr->key_ctx.ctx_hdr = s->blkcipher.key_ctx_hdr;
713 	switch (s->blkcipher.cipher_mode) {
714 	case SCMD_CIPH_MODE_AES_CBC:
715 		if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op))
716 			memcpy(crwr->key_ctx.key, s->blkcipher.enckey,
717 			    s->blkcipher.key_len);
718 		else
719 			memcpy(crwr->key_ctx.key, s->blkcipher.deckey,
720 			    s->blkcipher.key_len);
721 		break;
722 	case SCMD_CIPH_MODE_AES_CTR:
723 		memcpy(crwr->key_ctx.key, s->blkcipher.enckey,
724 		    s->blkcipher.key_len);
725 		break;
726 	case SCMD_CIPH_MODE_AES_XTS:
727 		key_half = s->blkcipher.key_len / 2;
728 		memcpy(crwr->key_ctx.key, s->blkcipher.enckey + key_half,
729 		    key_half);
730 		if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op))
731 			memcpy(crwr->key_ctx.key + key_half,
732 			    s->blkcipher.enckey, key_half);
733 		else
734 			memcpy(crwr->key_ctx.key + key_half,
735 			    s->blkcipher.deckey, key_half);
736 		break;
737 	}
738 
739 	dst = (char *)(crwr + 1) + kctx_len;
740 	ccr_write_phys_dsgl(sc, s, dst, dsgl_nsegs);
741 	dst += sizeof(struct cpl_rx_phys_dsgl) + dsgl_len;
742 	memcpy(dst, iv, iv_len);
743 	dst += iv_len;
744 	if (imm_len != 0)
745 		crypto_copydata(crp, crp->crp_payload_start,
746 		    crp->crp_payload_length, dst);
747 	else
748 		ccr_write_ulptx_sgl(sc, dst, sgl_nsegs);
749 
750 	/* XXX: TODO backpressure */
751 	t4_wrq_tx(sc->adapter, wr);
752 
753 	explicit_bzero(iv, sizeof(iv));
754 	return (0);
755 }
756 
757 static int
758 ccr_blkcipher_done(struct ccr_softc *sc, struct ccr_session *s,
759     struct cryptop *crp, const struct cpl_fw6_pld *cpl, int error)
760 {
761 
762 	/*
763 	 * The updated IV to permit chained requests is at
764 	 * cpl->data[2], but OCF doesn't permit chained requests.
765 	 */
766 	return (error);
767 }
768 
769 /*
770  * 'hashsize' is the length of a full digest.  'authsize' is the
771  * requested digest length for this operation which may be less
772  * than 'hashsize'.
773  */
774 static int
775 ccr_hmac_ctrl(unsigned int hashsize, unsigned int authsize)
776 {
777 
778 	if (authsize == 10)
779 		return (SCMD_HMAC_CTRL_TRUNC_RFC4366);
780 	if (authsize == 12)
781 		return (SCMD_HMAC_CTRL_IPSEC_96BIT);
782 	if (authsize == hashsize / 2)
783 		return (SCMD_HMAC_CTRL_DIV2);
784 	return (SCMD_HMAC_CTRL_NO_TRUNC);
785 }
786 
787 static int
788 ccr_eta(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp)
789 {
790 	char iv[CHCR_MAX_CRYPTO_IV_LEN];
791 	struct chcr_wr *crwr;
792 	struct wrqe *wr;
793 	struct auth_hash *axf;
794 	char *dst;
795 	u_int kctx_len, key_half, op_type, transhdr_len, wr_len;
796 	u_int hash_size_in_response, imm_len, iopad_size, iv_len;
797 	u_int aad_start, aad_stop;
798 	u_int auth_insert;
799 	u_int cipher_start, cipher_stop;
800 	u_int hmac_ctrl, input_len;
801 	int dsgl_nsegs, dsgl_len;
802 	int sgl_nsegs, sgl_len;
803 	int error;
804 
805 	/*
806 	 * If there is a need in the future, requests with an empty
807 	 * payload could be supported as HMAC-only requests.
808 	 */
809 	if (s->blkcipher.key_len == 0 || crp->crp_payload_length == 0)
810 		return (EINVAL);
811 	if (s->blkcipher.cipher_mode == SCMD_CIPH_MODE_AES_CBC &&
812 	    (crp->crp_payload_length % AES_BLOCK_LEN) != 0)
813 		return (EINVAL);
814 
815 	/* For AES-XTS we send a 16-byte IV in the work request. */
816 	if (s->blkcipher.cipher_mode == SCMD_CIPH_MODE_AES_XTS)
817 		iv_len = AES_BLOCK_LEN;
818 	else
819 		iv_len = s->blkcipher.iv_len;
820 
821 	if (crp->crp_aad_length + iv_len > MAX_AAD_LEN)
822 		return (EINVAL);
823 
824 	axf = s->hmac.auth_hash;
825 	hash_size_in_response = s->hmac.hash_len;
826 	if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op))
827 		op_type = CHCR_ENCRYPT_OP;
828 	else
829 		op_type = CHCR_DECRYPT_OP;
830 
831 	/*
832 	 * The output buffer consists of the cipher text followed by
833 	 * the hash when encrypting.  For decryption it only contains
834 	 * the plain text.
835 	 *
836 	 * Due to a firmware bug, the output buffer must include a
837 	 * dummy output buffer for the IV and AAD prior to the real
838 	 * output buffer.
839 	 */
840 	if (op_type == CHCR_ENCRYPT_OP) {
841 		if (iv_len + crp->crp_aad_length + crp->crp_payload_length +
842 		    hash_size_in_response > MAX_REQUEST_SIZE)
843 			return (EFBIG);
844 	} else {
845 		if (iv_len + crp->crp_aad_length + crp->crp_payload_length >
846 		    MAX_REQUEST_SIZE)
847 			return (EFBIG);
848 	}
849 	sglist_reset(sc->sg_dsgl);
850 	error = sglist_append_sglist(sc->sg_dsgl, sc->sg_iv_aad, 0,
851 	    iv_len + crp->crp_aad_length);
852 	if (error)
853 		return (error);
854 	if (CRYPTO_HAS_OUTPUT_BUFFER(crp))
855 		error = sglist_append_sglist(sc->sg_dsgl, sc->sg_output,
856 		    crp->crp_payload_output_start, crp->crp_payload_length);
857 	else
858 		error = sglist_append_sglist(sc->sg_dsgl, sc->sg_input,
859 		    crp->crp_payload_start, crp->crp_payload_length);
860 	if (error)
861 		return (error);
862 	if (op_type == CHCR_ENCRYPT_OP) {
863 		if (CRYPTO_HAS_OUTPUT_BUFFER(crp))
864 			error = sglist_append_sglist(sc->sg_dsgl, sc->sg_output,
865 			    crp->crp_digest_start, hash_size_in_response);
866 		else
867 			error = sglist_append_sglist(sc->sg_dsgl, sc->sg_input,
868 			    crp->crp_digest_start, hash_size_in_response);
869 		if (error)
870 			return (error);
871 	}
872 	dsgl_nsegs = ccr_count_sgl(sc->sg_dsgl, DSGL_SGE_MAXLEN);
873 	if (dsgl_nsegs > MAX_RX_PHYS_DSGL_SGE)
874 		return (EFBIG);
875 	dsgl_len = ccr_phys_dsgl_len(dsgl_nsegs);
876 
877 	/* PADs must be 128-bit aligned. */
878 	iopad_size = roundup2(s->hmac.partial_digest_len, 16);
879 
880 	/*
881 	 * The 'key' part of the key context consists of the key followed
882 	 * by the IPAD and OPAD.
883 	 */
884 	kctx_len = roundup2(s->blkcipher.key_len, 16) + iopad_size * 2;
885 	transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dsgl_len);
886 
887 	/*
888 	 * The input buffer consists of the IV, any AAD, and then the
889 	 * cipher/plain text.  For decryption requests the hash is
890 	 * appended after the cipher text.
891 	 *
892 	 * The IV is always stored at the start of the input buffer
893 	 * even though it may be duplicated in the payload.  The
894 	 * crypto engine doesn't work properly if the IV offset points
895 	 * inside of the AAD region, so a second copy is always
896 	 * required.
897 	 */
898 	input_len = crp->crp_aad_length + crp->crp_payload_length;
899 
900 	/*
901 	 * The firmware hangs if sent a request which is a
902 	 * bit smaller than MAX_REQUEST_SIZE.  In particular, the
903 	 * firmware appears to require 512 - 16 bytes of spare room
904 	 * along with the size of the hash even if the hash isn't
905 	 * included in the input buffer.
906 	 */
907 	if (input_len + roundup2(axf->hashsize, 16) + (512 - 16) >
908 	    MAX_REQUEST_SIZE)
909 		return (EFBIG);
910 	if (op_type == CHCR_DECRYPT_OP)
911 		input_len += hash_size_in_response;
912 
913 	if (ccr_use_imm_data(transhdr_len, iv_len + input_len)) {
914 		imm_len = input_len;
915 		sgl_nsegs = 0;
916 		sgl_len = 0;
917 	} else {
918 		imm_len = 0;
919 		sglist_reset(sc->sg_ulptx);
920 		if (crp->crp_aad_length != 0) {
921 			if (crp->crp_aad != NULL)
922 				error = sglist_append(sc->sg_ulptx,
923 				    crp->crp_aad, crp->crp_aad_length);
924 			else
925 				error = sglist_append_sglist(sc->sg_ulptx,
926 				    sc->sg_input, crp->crp_aad_start,
927 				    crp->crp_aad_length);
928 			if (error)
929 				return (error);
930 		}
931 		error = sglist_append_sglist(sc->sg_ulptx, sc->sg_input,
932 		    crp->crp_payload_start, crp->crp_payload_length);
933 		if (error)
934 			return (error);
935 		if (op_type == CHCR_DECRYPT_OP) {
936 			error = sglist_append_sglist(sc->sg_ulptx, sc->sg_input,
937 			    crp->crp_digest_start, hash_size_in_response);
938 			if (error)
939 				return (error);
940 		}
941 		sgl_nsegs = sc->sg_ulptx->sg_nseg;
942 		sgl_len = ccr_ulptx_sgl_len(sgl_nsegs);
943 	}
944 
945 	/* Any AAD comes after the IV. */
946 	if (crp->crp_aad_length != 0) {
947 		aad_start = iv_len + 1;
948 		aad_stop = aad_start + crp->crp_aad_length - 1;
949 	} else {
950 		aad_start = 0;
951 		aad_stop = 0;
952 	}
953 	cipher_start = iv_len + crp->crp_aad_length + 1;
954 	if (op_type == CHCR_DECRYPT_OP)
955 		cipher_stop = hash_size_in_response;
956 	else
957 		cipher_stop = 0;
958 	if (op_type == CHCR_DECRYPT_OP)
959 		auth_insert = hash_size_in_response;
960 	else
961 		auth_insert = 0;
962 
963 	wr_len = roundup2(transhdr_len, 16) + iv_len + roundup2(imm_len, 16) +
964 	    sgl_len;
965 	if (wr_len > SGE_MAX_WR_LEN)
966 		return (EFBIG);
967 	wr = alloc_wrqe(wr_len, s->port->txq);
968 	if (wr == NULL) {
969 		sc->stats_wr_nomem++;
970 		return (ENOMEM);
971 	}
972 	crwr = wrtod(wr);
973 	memset(crwr, 0, wr_len);
974 
975 	crypto_read_iv(crp, iv);
976 
977 	/* Zero the remainder of the IV for AES-XTS. */
978 	memset(iv + s->blkcipher.iv_len, 0, iv_len - s->blkcipher.iv_len);
979 
980 	ccr_populate_wreq(sc, s, crwr, kctx_len, wr_len, imm_len, sgl_len,
981 	    op_type == CHCR_DECRYPT_OP ? hash_size_in_response : 0, crp);
982 
983 	crwr->sec_cpl.op_ivinsrtofst = htobe32(
984 	    V_CPL_TX_SEC_PDU_OPCODE(CPL_TX_SEC_PDU) |
985 	    V_CPL_TX_SEC_PDU_RXCHID(s->port->tx_channel_id) |
986 	    V_CPL_TX_SEC_PDU_ACKFOLLOWS(0) | V_CPL_TX_SEC_PDU_ULPTXLPBK(1) |
987 	    V_CPL_TX_SEC_PDU_CPLLEN(2) | V_CPL_TX_SEC_PDU_PLACEHOLDER(0) |
988 	    V_CPL_TX_SEC_PDU_IVINSRTOFST(1));
989 
990 	crwr->sec_cpl.pldlen = htobe32(iv_len + input_len);
991 
992 	crwr->sec_cpl.aadstart_cipherstop_hi = htobe32(
993 	    V_CPL_TX_SEC_PDU_AADSTART(aad_start) |
994 	    V_CPL_TX_SEC_PDU_AADSTOP(aad_stop) |
995 	    V_CPL_TX_SEC_PDU_CIPHERSTART(cipher_start) |
996 	    V_CPL_TX_SEC_PDU_CIPHERSTOP_HI(cipher_stop >> 4));
997 	crwr->sec_cpl.cipherstop_lo_authinsert = htobe32(
998 	    V_CPL_TX_SEC_PDU_CIPHERSTOP_LO(cipher_stop & 0xf) |
999 	    V_CPL_TX_SEC_PDU_AUTHSTART(cipher_start) |
1000 	    V_CPL_TX_SEC_PDU_AUTHSTOP(cipher_stop) |
1001 	    V_CPL_TX_SEC_PDU_AUTHINSERT(auth_insert));
1002 
1003 	/* These two flits are actually a CPL_TLS_TX_SCMD_FMT. */
1004 	hmac_ctrl = ccr_hmac_ctrl(axf->hashsize, hash_size_in_response);
1005 	crwr->sec_cpl.seqno_numivs = htobe32(
1006 	    V_SCMD_SEQ_NO_CTRL(0) |
1007 	    V_SCMD_PROTO_VERSION(SCMD_PROTO_VERSION_GENERIC) |
1008 	    V_SCMD_ENC_DEC_CTRL(op_type) |
1009 	    V_SCMD_CIPH_AUTH_SEQ_CTRL(op_type == CHCR_ENCRYPT_OP ? 1 : 0) |
1010 	    V_SCMD_CIPH_MODE(s->blkcipher.cipher_mode) |
1011 	    V_SCMD_AUTH_MODE(s->hmac.auth_mode) |
1012 	    V_SCMD_HMAC_CTRL(hmac_ctrl) |
1013 	    V_SCMD_IV_SIZE(iv_len / 2) |
1014 	    V_SCMD_NUM_IVS(0));
1015 	crwr->sec_cpl.ivgen_hdrlen = htobe32(
1016 	    V_SCMD_IV_GEN_CTRL(0) |
1017 	    V_SCMD_MORE_FRAGS(0) | V_SCMD_LAST_FRAG(0) | V_SCMD_MAC_ONLY(0) |
1018 	    V_SCMD_AADIVDROP(0) | V_SCMD_HDR_LEN(dsgl_len));
1019 
1020 	crwr->key_ctx.ctx_hdr = s->blkcipher.key_ctx_hdr;
1021 	switch (s->blkcipher.cipher_mode) {
1022 	case SCMD_CIPH_MODE_AES_CBC:
1023 		if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op))
1024 			memcpy(crwr->key_ctx.key, s->blkcipher.enckey,
1025 			    s->blkcipher.key_len);
1026 		else
1027 			memcpy(crwr->key_ctx.key, s->blkcipher.deckey,
1028 			    s->blkcipher.key_len);
1029 		break;
1030 	case SCMD_CIPH_MODE_AES_CTR:
1031 		memcpy(crwr->key_ctx.key, s->blkcipher.enckey,
1032 		    s->blkcipher.key_len);
1033 		break;
1034 	case SCMD_CIPH_MODE_AES_XTS:
1035 		key_half = s->blkcipher.key_len / 2;
1036 		memcpy(crwr->key_ctx.key, s->blkcipher.enckey + key_half,
1037 		    key_half);
1038 		if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op))
1039 			memcpy(crwr->key_ctx.key + key_half,
1040 			    s->blkcipher.enckey, key_half);
1041 		else
1042 			memcpy(crwr->key_ctx.key + key_half,
1043 			    s->blkcipher.deckey, key_half);
1044 		break;
1045 	}
1046 
1047 	dst = crwr->key_ctx.key + roundup2(s->blkcipher.key_len, 16);
1048 	memcpy(dst, s->hmac.pads, iopad_size * 2);
1049 
1050 	dst = (char *)(crwr + 1) + kctx_len;
1051 	ccr_write_phys_dsgl(sc, s, dst, dsgl_nsegs);
1052 	dst += sizeof(struct cpl_rx_phys_dsgl) + dsgl_len;
1053 	memcpy(dst, iv, iv_len);
1054 	dst += iv_len;
1055 	if (imm_len != 0) {
1056 		if (crp->crp_aad_length != 0) {
1057 			if (crp->crp_aad != NULL)
1058 				memcpy(dst, crp->crp_aad, crp->crp_aad_length);
1059 			else
1060 				crypto_copydata(crp, crp->crp_aad_start,
1061 				    crp->crp_aad_length, dst);
1062 			dst += crp->crp_aad_length;
1063 		}
1064 		crypto_copydata(crp, crp->crp_payload_start,
1065 		    crp->crp_payload_length, dst);
1066 		dst += crp->crp_payload_length;
1067 		if (op_type == CHCR_DECRYPT_OP)
1068 			crypto_copydata(crp, crp->crp_digest_start,
1069 			    hash_size_in_response, dst);
1070 	} else
1071 		ccr_write_ulptx_sgl(sc, dst, sgl_nsegs);
1072 
1073 	/* XXX: TODO backpressure */
1074 	t4_wrq_tx(sc->adapter, wr);
1075 
1076 	explicit_bzero(iv, sizeof(iv));
1077 	return (0);
1078 }
1079 
1080 static int
1081 ccr_eta_done(struct ccr_softc *sc, struct ccr_session *s,
1082     struct cryptop *crp, const struct cpl_fw6_pld *cpl, int error)
1083 {
1084 
1085 	/*
1086 	 * The updated IV to permit chained requests is at
1087 	 * cpl->data[2], but OCF doesn't permit chained requests.
1088 	 */
1089 	return (error);
1090 }
1091 
1092 static int
1093 ccr_gcm(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp)
1094 {
1095 	char iv[CHCR_MAX_CRYPTO_IV_LEN];
1096 	struct chcr_wr *crwr;
1097 	struct wrqe *wr;
1098 	char *dst;
1099 	u_int iv_len, kctx_len, op_type, transhdr_len, wr_len;
1100 	u_int hash_size_in_response, imm_len;
1101 	u_int aad_start, aad_stop, cipher_start, cipher_stop, auth_insert;
1102 	u_int hmac_ctrl, input_len;
1103 	int dsgl_nsegs, dsgl_len;
1104 	int sgl_nsegs, sgl_len;
1105 	int error;
1106 
1107 	if (s->blkcipher.key_len == 0)
1108 		return (EINVAL);
1109 
1110 	/*
1111 	 * The crypto engine doesn't handle GCM requests with an empty
1112 	 * payload, so handle those in software instead.
1113 	 */
1114 	if (crp->crp_payload_length == 0)
1115 		return (EMSGSIZE);
1116 
1117 	if (crp->crp_aad_length + AES_BLOCK_LEN > MAX_AAD_LEN)
1118 		return (EMSGSIZE);
1119 
1120 	hash_size_in_response = s->gmac.hash_len;
1121 	if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op))
1122 		op_type = CHCR_ENCRYPT_OP;
1123 	else
1124 		op_type = CHCR_DECRYPT_OP;
1125 
1126 	/*
1127 	 * The IV handling for GCM in OCF is a bit more complicated in
1128 	 * that IPSec provides a full 16-byte IV (including the
1129 	 * counter), whereas the /dev/crypto interface sometimes
1130 	 * provides a full 16-byte IV (if no IV is provided in the
1131 	 * ioctl) and sometimes a 12-byte IV (if the IV was explicit).
1132 	 *
1133 	 * When provided a 12-byte IV, assume the IV is really 16 bytes
1134 	 * with a counter in the last 4 bytes initialized to 1.
1135 	 *
1136 	 * While iv_len is checked below, the value is currently
1137 	 * always set to 12 when creating a GCM session in this driver
1138 	 * due to limitations in OCF (there is no way to know what the
1139 	 * IV length of a given request will be).  This means that the
1140 	 * driver always assumes as 12-byte IV for now.
1141 	 */
1142 	if (s->blkcipher.iv_len == 12)
1143 		iv_len = AES_BLOCK_LEN;
1144 	else
1145 		iv_len = s->blkcipher.iv_len;
1146 
1147 	/*
1148 	 * GCM requests should always provide an explicit IV.
1149 	 */
1150 	if ((crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0)
1151 		return (EINVAL);
1152 
1153 	/*
1154 	 * The output buffer consists of the cipher text followed by
1155 	 * the tag when encrypting.  For decryption it only contains
1156 	 * the plain text.
1157 	 *
1158 	 * Due to a firmware bug, the output buffer must include a
1159 	 * dummy output buffer for the IV and AAD prior to the real
1160 	 * output buffer.
1161 	 */
1162 	if (op_type == CHCR_ENCRYPT_OP) {
1163 		if (iv_len + crp->crp_aad_length + crp->crp_payload_length +
1164 		    hash_size_in_response > MAX_REQUEST_SIZE)
1165 			return (EFBIG);
1166 	} else {
1167 		if (iv_len + crp->crp_aad_length + crp->crp_payload_length >
1168 		    MAX_REQUEST_SIZE)
1169 			return (EFBIG);
1170 	}
1171 	sglist_reset(sc->sg_dsgl);
1172 	error = sglist_append_sglist(sc->sg_dsgl, sc->sg_iv_aad, 0, iv_len +
1173 	    crp->crp_aad_length);
1174 	if (error)
1175 		return (error);
1176 	if (CRYPTO_HAS_OUTPUT_BUFFER(crp))
1177 		error = sglist_append_sglist(sc->sg_dsgl, sc->sg_output,
1178 		    crp->crp_payload_output_start, crp->crp_payload_length);
1179 	else
1180 		error = sglist_append_sglist(sc->sg_dsgl, sc->sg_input,
1181 		    crp->crp_payload_start, crp->crp_payload_length);
1182 	if (error)
1183 		return (error);
1184 	if (op_type == CHCR_ENCRYPT_OP) {
1185 		if (CRYPTO_HAS_OUTPUT_BUFFER(crp))
1186 			error = sglist_append_sglist(sc->sg_dsgl, sc->sg_output,
1187 			    crp->crp_digest_start, hash_size_in_response);
1188 		else
1189 			error = sglist_append_sglist(sc->sg_dsgl, sc->sg_input,
1190 			    crp->crp_digest_start, hash_size_in_response);
1191 		if (error)
1192 			return (error);
1193 	}
1194 	dsgl_nsegs = ccr_count_sgl(sc->sg_dsgl, DSGL_SGE_MAXLEN);
1195 	if (dsgl_nsegs > MAX_RX_PHYS_DSGL_SGE)
1196 		return (EFBIG);
1197 	dsgl_len = ccr_phys_dsgl_len(dsgl_nsegs);
1198 
1199 	/*
1200 	 * The 'key' part of the key context consists of the key followed
1201 	 * by the Galois hash key.
1202 	 */
1203 	kctx_len = roundup2(s->blkcipher.key_len, 16) + GMAC_BLOCK_LEN;
1204 	transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dsgl_len);
1205 
1206 	/*
1207 	 * The input buffer consists of the IV, any AAD, and then the
1208 	 * cipher/plain text.  For decryption requests the hash is
1209 	 * appended after the cipher text.
1210 	 *
1211 	 * The IV is always stored at the start of the input buffer
1212 	 * even though it may be duplicated in the payload.  The
1213 	 * crypto engine doesn't work properly if the IV offset points
1214 	 * inside of the AAD region, so a second copy is always
1215 	 * required.
1216 	 */
1217 	input_len = crp->crp_aad_length + crp->crp_payload_length;
1218 	if (op_type == CHCR_DECRYPT_OP)
1219 		input_len += hash_size_in_response;
1220 	if (input_len > MAX_REQUEST_SIZE)
1221 		return (EFBIG);
1222 	if (ccr_use_imm_data(transhdr_len, iv_len + input_len)) {
1223 		imm_len = input_len;
1224 		sgl_nsegs = 0;
1225 		sgl_len = 0;
1226 	} else {
1227 		imm_len = 0;
1228 		sglist_reset(sc->sg_ulptx);
1229 		if (crp->crp_aad_length != 0) {
1230 			if (crp->crp_aad != NULL)
1231 				error = sglist_append(sc->sg_ulptx,
1232 				    crp->crp_aad, crp->crp_aad_length);
1233 			else
1234 				error = sglist_append_sglist(sc->sg_ulptx,
1235 				    sc->sg_input, crp->crp_aad_start,
1236 				    crp->crp_aad_length);
1237 			if (error)
1238 				return (error);
1239 		}
1240 		error = sglist_append_sglist(sc->sg_ulptx, sc->sg_input,
1241 		    crp->crp_payload_start, crp->crp_payload_length);
1242 		if (error)
1243 			return (error);
1244 		if (op_type == CHCR_DECRYPT_OP) {
1245 			error = sglist_append_sglist(sc->sg_ulptx, sc->sg_input,
1246 			    crp->crp_digest_start, hash_size_in_response);
1247 			if (error)
1248 				return (error);
1249 		}
1250 		sgl_nsegs = sc->sg_ulptx->sg_nseg;
1251 		sgl_len = ccr_ulptx_sgl_len(sgl_nsegs);
1252 	}
1253 
1254 	if (crp->crp_aad_length != 0) {
1255 		aad_start = iv_len + 1;
1256 		aad_stop = aad_start + crp->crp_aad_length - 1;
1257 	} else {
1258 		aad_start = 0;
1259 		aad_stop = 0;
1260 	}
1261 	cipher_start = iv_len + crp->crp_aad_length + 1;
1262 	if (op_type == CHCR_DECRYPT_OP)
1263 		cipher_stop = hash_size_in_response;
1264 	else
1265 		cipher_stop = 0;
1266 	if (op_type == CHCR_DECRYPT_OP)
1267 		auth_insert = hash_size_in_response;
1268 	else
1269 		auth_insert = 0;
1270 
1271 	wr_len = roundup2(transhdr_len, 16) + iv_len + roundup2(imm_len, 16) +
1272 	    sgl_len;
1273 	if (wr_len > SGE_MAX_WR_LEN)
1274 		return (EFBIG);
1275 	wr = alloc_wrqe(wr_len, s->port->txq);
1276 	if (wr == NULL) {
1277 		sc->stats_wr_nomem++;
1278 		return (ENOMEM);
1279 	}
1280 	crwr = wrtod(wr);
1281 	memset(crwr, 0, wr_len);
1282 
1283 	memcpy(iv, crp->crp_iv, s->blkcipher.iv_len);
1284 	if (s->blkcipher.iv_len == 12)
1285 		*(uint32_t *)&iv[12] = htobe32(1);
1286 
1287 	ccr_populate_wreq(sc, s, crwr, kctx_len, wr_len, imm_len, sgl_len, 0,
1288 	    crp);
1289 
1290 	crwr->sec_cpl.op_ivinsrtofst = htobe32(
1291 	    V_CPL_TX_SEC_PDU_OPCODE(CPL_TX_SEC_PDU) |
1292 	    V_CPL_TX_SEC_PDU_RXCHID(s->port->tx_channel_id) |
1293 	    V_CPL_TX_SEC_PDU_ACKFOLLOWS(0) | V_CPL_TX_SEC_PDU_ULPTXLPBK(1) |
1294 	    V_CPL_TX_SEC_PDU_CPLLEN(2) | V_CPL_TX_SEC_PDU_PLACEHOLDER(0) |
1295 	    V_CPL_TX_SEC_PDU_IVINSRTOFST(1));
1296 
1297 	crwr->sec_cpl.pldlen = htobe32(iv_len + input_len);
1298 
1299 	/*
1300 	 * NB: cipherstop is explicitly set to 0.  On encrypt it
1301 	 * should normally be set to 0 anyway.  However, for decrypt
1302 	 * the cipher ends before the tag in the ETA case (and
1303 	 * authstop is set to stop before the tag), but for GCM the
1304 	 * cipher still runs to the end of the buffer.  Not sure if
1305 	 * this is intentional or a firmware quirk, but it is required
1306 	 * for working tag validation with GCM decryption.
1307 	 */
1308 	crwr->sec_cpl.aadstart_cipherstop_hi = htobe32(
1309 	    V_CPL_TX_SEC_PDU_AADSTART(aad_start) |
1310 	    V_CPL_TX_SEC_PDU_AADSTOP(aad_stop) |
1311 	    V_CPL_TX_SEC_PDU_CIPHERSTART(cipher_start) |
1312 	    V_CPL_TX_SEC_PDU_CIPHERSTOP_HI(0));
1313 	crwr->sec_cpl.cipherstop_lo_authinsert = htobe32(
1314 	    V_CPL_TX_SEC_PDU_CIPHERSTOP_LO(0) |
1315 	    V_CPL_TX_SEC_PDU_AUTHSTART(cipher_start) |
1316 	    V_CPL_TX_SEC_PDU_AUTHSTOP(cipher_stop) |
1317 	    V_CPL_TX_SEC_PDU_AUTHINSERT(auth_insert));
1318 
1319 	/* These two flits are actually a CPL_TLS_TX_SCMD_FMT. */
1320 	hmac_ctrl = ccr_hmac_ctrl(AES_GMAC_HASH_LEN, hash_size_in_response);
1321 	crwr->sec_cpl.seqno_numivs = htobe32(
1322 	    V_SCMD_SEQ_NO_CTRL(0) |
1323 	    V_SCMD_PROTO_VERSION(SCMD_PROTO_VERSION_GENERIC) |
1324 	    V_SCMD_ENC_DEC_CTRL(op_type) |
1325 	    V_SCMD_CIPH_AUTH_SEQ_CTRL(op_type == CHCR_ENCRYPT_OP ? 1 : 0) |
1326 	    V_SCMD_CIPH_MODE(SCMD_CIPH_MODE_AES_GCM) |
1327 	    V_SCMD_AUTH_MODE(SCMD_AUTH_MODE_GHASH) |
1328 	    V_SCMD_HMAC_CTRL(hmac_ctrl) |
1329 	    V_SCMD_IV_SIZE(iv_len / 2) |
1330 	    V_SCMD_NUM_IVS(0));
1331 	crwr->sec_cpl.ivgen_hdrlen = htobe32(
1332 	    V_SCMD_IV_GEN_CTRL(0) |
1333 	    V_SCMD_MORE_FRAGS(0) | V_SCMD_LAST_FRAG(0) | V_SCMD_MAC_ONLY(0) |
1334 	    V_SCMD_AADIVDROP(0) | V_SCMD_HDR_LEN(dsgl_len));
1335 
1336 	crwr->key_ctx.ctx_hdr = s->blkcipher.key_ctx_hdr;
1337 	memcpy(crwr->key_ctx.key, s->blkcipher.enckey, s->blkcipher.key_len);
1338 	dst = crwr->key_ctx.key + roundup2(s->blkcipher.key_len, 16);
1339 	memcpy(dst, s->gmac.ghash_h, GMAC_BLOCK_LEN);
1340 
1341 	dst = (char *)(crwr + 1) + kctx_len;
1342 	ccr_write_phys_dsgl(sc, s, dst, dsgl_nsegs);
1343 	dst += sizeof(struct cpl_rx_phys_dsgl) + dsgl_len;
1344 	memcpy(dst, iv, iv_len);
1345 	dst += iv_len;
1346 	if (imm_len != 0) {
1347 		if (crp->crp_aad_length != 0) {
1348 			if (crp->crp_aad != NULL)
1349 				memcpy(dst, crp->crp_aad, crp->crp_aad_length);
1350 			else
1351 				crypto_copydata(crp, crp->crp_aad_start,
1352 				    crp->crp_aad_length, dst);
1353 			dst += crp->crp_aad_length;
1354 		}
1355 		crypto_copydata(crp, crp->crp_payload_start,
1356 		    crp->crp_payload_length, dst);
1357 		dst += crp->crp_payload_length;
1358 		if (op_type == CHCR_DECRYPT_OP)
1359 			crypto_copydata(crp, crp->crp_digest_start,
1360 			    hash_size_in_response, dst);
1361 	} else
1362 		ccr_write_ulptx_sgl(sc, dst, sgl_nsegs);
1363 
1364 	/* XXX: TODO backpressure */
1365 	t4_wrq_tx(sc->adapter, wr);
1366 
1367 	explicit_bzero(iv, sizeof(iv));
1368 	return (0);
1369 }
1370 
1371 static int
1372 ccr_gcm_done(struct ccr_softc *sc, struct ccr_session *s,
1373     struct cryptop *crp, const struct cpl_fw6_pld *cpl, int error)
1374 {
1375 
1376 	/*
1377 	 * The updated IV to permit chained requests is at
1378 	 * cpl->data[2], but OCF doesn't permit chained requests.
1379 	 *
1380 	 * Note that the hardware should always verify the GMAC hash.
1381 	 */
1382 	return (error);
1383 }
1384 
1385 /*
1386  * Handle a GCM request that is not supported by the crypto engine by
1387  * performing the operation in software.  Derived from swcr_authenc().
1388  */
1389 static void
1390 ccr_gcm_soft(struct ccr_session *s, struct cryptop *crp)
1391 {
1392 	struct auth_hash *axf;
1393 	struct enc_xform *exf;
1394 	void *auth_ctx, *kschedule;
1395 	char block[GMAC_BLOCK_LEN];
1396 	char digest[GMAC_DIGEST_LEN];
1397 	char iv[AES_BLOCK_LEN];
1398 	int error, i, len;
1399 
1400 	auth_ctx = NULL;
1401 	kschedule = NULL;
1402 
1403 	/* Initialize the MAC. */
1404 	switch (s->blkcipher.key_len) {
1405 	case 16:
1406 		axf = &auth_hash_nist_gmac_aes_128;
1407 		break;
1408 	case 24:
1409 		axf = &auth_hash_nist_gmac_aes_192;
1410 		break;
1411 	case 32:
1412 		axf = &auth_hash_nist_gmac_aes_256;
1413 		break;
1414 	default:
1415 		error = EINVAL;
1416 		goto out;
1417 	}
1418 	auth_ctx = malloc(axf->ctxsize, M_CCR, M_NOWAIT);
1419 	if (auth_ctx == NULL) {
1420 		error = ENOMEM;
1421 		goto out;
1422 	}
1423 	axf->Init(auth_ctx);
1424 	axf->Setkey(auth_ctx, s->blkcipher.enckey, s->blkcipher.key_len);
1425 
1426 	/* Initialize the cipher. */
1427 	exf = &enc_xform_aes_nist_gcm;
1428 	kschedule = malloc(exf->ctxsize, M_CCR, M_NOWAIT);
1429 	if (kschedule == NULL) {
1430 		error = ENOMEM;
1431 		goto out;
1432 	}
1433 	error = exf->setkey(kschedule, s->blkcipher.enckey,
1434 	    s->blkcipher.key_len);
1435 	if (error)
1436 		goto out;
1437 
1438 	/*
1439 	 * This assumes a 12-byte IV from the crp.  See longer comment
1440 	 * above in ccr_gcm() for more details.
1441 	 */
1442 	if ((crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0) {
1443 		error = EINVAL;
1444 		goto out;
1445 	}
1446 	memcpy(iv, crp->crp_iv, 12);
1447 	*(uint32_t *)&iv[12] = htobe32(1);
1448 
1449 	axf->Reinit(auth_ctx, iv, sizeof(iv));
1450 
1451 	/* MAC the AAD. */
1452 	if (crp->crp_aad != NULL) {
1453 		len = rounddown(crp->crp_aad_length, sizeof(block));
1454 		if (len != 0)
1455 			axf->Update(auth_ctx, crp->crp_aad, len);
1456 		if (crp->crp_aad_length != len) {
1457 			memset(block, 0, sizeof(block));
1458 			memcpy(block, (char *)crp->crp_aad + len,
1459 			    crp->crp_aad_length - len);
1460 			axf->Update(auth_ctx, block, sizeof(block));
1461 		}
1462 	} else {
1463 		for (i = 0; i < crp->crp_aad_length; i += sizeof(block)) {
1464 			len = imin(crp->crp_aad_length - i, sizeof(block));
1465 			crypto_copydata(crp, crp->crp_aad_start + i, len,
1466 			    block);
1467 			bzero(block + len, sizeof(block) - len);
1468 			axf->Update(auth_ctx, block, sizeof(block));
1469 		}
1470 	}
1471 
1472 	exf->reinit(kschedule, iv);
1473 
1474 	/* Do encryption with MAC */
1475 	for (i = 0; i < crp->crp_payload_length; i += sizeof(block)) {
1476 		len = imin(crp->crp_payload_length - i, sizeof(block));
1477 		crypto_copydata(crp, crp->crp_payload_start + i, len, block);
1478 		bzero(block + len, sizeof(block) - len);
1479 		if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) {
1480 			exf->encrypt(kschedule, block, block);
1481 			axf->Update(auth_ctx, block, len);
1482 			crypto_copyback(crp, crp->crp_payload_start + i, len,
1483 			    block);
1484 		} else {
1485 			axf->Update(auth_ctx, block, len);
1486 		}
1487 	}
1488 
1489 	/* Length block. */
1490 	bzero(block, sizeof(block));
1491 	((uint32_t *)block)[1] = htobe32(crp->crp_aad_length * 8);
1492 	((uint32_t *)block)[3] = htobe32(crp->crp_payload_length * 8);
1493 	axf->Update(auth_ctx, block, sizeof(block));
1494 
1495 	/* Finalize MAC. */
1496 	axf->Final(digest, auth_ctx);
1497 
1498 	/* Inject or validate tag. */
1499 	if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) {
1500 		crypto_copyback(crp, crp->crp_digest_start, sizeof(digest),
1501 		    digest);
1502 		error = 0;
1503 	} else {
1504 		char digest2[GMAC_DIGEST_LEN];
1505 
1506 		crypto_copydata(crp, crp->crp_digest_start, sizeof(digest2),
1507 		    digest2);
1508 		if (timingsafe_bcmp(digest, digest2, sizeof(digest)) == 0) {
1509 			error = 0;
1510 
1511 			/* Tag matches, decrypt data. */
1512 			for (i = 0; i < crp->crp_payload_length;
1513 			     i += sizeof(block)) {
1514 				len = imin(crp->crp_payload_length - i,
1515 				    sizeof(block));
1516 				crypto_copydata(crp, crp->crp_payload_start + i,
1517 				    len, block);
1518 				bzero(block + len, sizeof(block) - len);
1519 				exf->decrypt(kschedule, block, block);
1520 				crypto_copyback(crp, crp->crp_payload_start + i,
1521 				    len, block);
1522 			}
1523 		} else
1524 			error = EBADMSG;
1525 		explicit_bzero(digest2, sizeof(digest2));
1526 	}
1527 
1528 out:
1529 	zfree(kschedule, M_CCR);
1530 	zfree(auth_ctx, M_CCR);
1531 	explicit_bzero(block, sizeof(block));
1532 	explicit_bzero(iv, sizeof(iv));
1533 	explicit_bzero(digest, sizeof(digest));
1534 	crp->crp_etype = error;
1535 	crypto_done(crp);
1536 }
1537 
1538 static void
1539 generate_ccm_b0(struct cryptop *crp, u_int hash_size_in_response,
1540     const char *iv, char *b0)
1541 {
1542 	u_int i, payload_len;
1543 
1544 	/* NB: L is already set in the first byte of the IV. */
1545 	memcpy(b0, iv, CCM_B0_SIZE);
1546 
1547 	/* Set length of hash in bits 3 - 5. */
1548 	b0[0] |= (((hash_size_in_response - 2) / 2) << 3);
1549 
1550 	/* Store the payload length as a big-endian value. */
1551 	payload_len = crp->crp_payload_length;
1552 	for (i = 0; i < iv[0]; i++) {
1553 		b0[CCM_CBC_BLOCK_LEN - 1 - i] = payload_len;
1554 		payload_len >>= 8;
1555 	}
1556 
1557 	/*
1558 	 * If there is AAD in the request, set bit 6 in the flags
1559 	 * field and store the AAD length as a big-endian value at the
1560 	 * start of block 1.  This only assumes a 16-bit AAD length
1561 	 * since T6 doesn't support large AAD sizes.
1562 	 */
1563 	if (crp->crp_aad_length != 0) {
1564 		b0[0] |= (1 << 6);
1565 		*(uint16_t *)(b0 + CCM_B0_SIZE) = htobe16(crp->crp_aad_length);
1566 	}
1567 }
1568 
1569 static int
1570 ccr_ccm(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp)
1571 {
1572 	char iv[CHCR_MAX_CRYPTO_IV_LEN];
1573 	struct ulptx_idata *idata;
1574 	struct chcr_wr *crwr;
1575 	struct wrqe *wr;
1576 	char *dst;
1577 	u_int iv_len, kctx_len, op_type, transhdr_len, wr_len;
1578 	u_int aad_len, b0_len, hash_size_in_response, imm_len;
1579 	u_int aad_start, aad_stop, cipher_start, cipher_stop, auth_insert;
1580 	u_int hmac_ctrl, input_len;
1581 	int dsgl_nsegs, dsgl_len;
1582 	int sgl_nsegs, sgl_len;
1583 	int error;
1584 
1585 	if (s->blkcipher.key_len == 0)
1586 		return (EINVAL);
1587 
1588 	/*
1589 	 * The crypto engine doesn't handle CCM requests with an empty
1590 	 * payload, so handle those in software instead.
1591 	 */
1592 	if (crp->crp_payload_length == 0)
1593 		return (EMSGSIZE);
1594 
1595 	/*
1596 	 * CCM always includes block 0 in the AAD before AAD from the
1597 	 * request.
1598 	 */
1599 	b0_len = CCM_B0_SIZE;
1600 	if (crp->crp_aad_length != 0)
1601 		b0_len += CCM_AAD_FIELD_SIZE;
1602 	aad_len = b0_len + crp->crp_aad_length;
1603 
1604 	/*
1605 	 * CCM requests should always provide an explicit IV (really
1606 	 * the nonce).
1607 	 */
1608 	if ((crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0)
1609 		return (EINVAL);
1610 
1611 	/*
1612 	 * Always assume a 12 byte input nonce for now since that is
1613 	 * what OCF always generates.  The full IV in the work request
1614 	 * is 16 bytes.
1615 	 */
1616 	iv_len = AES_BLOCK_LEN;
1617 
1618 	if (iv_len + aad_len > MAX_AAD_LEN)
1619 		return (EMSGSIZE);
1620 
1621 	hash_size_in_response = s->ccm_mac.hash_len;
1622 	if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op))
1623 		op_type = CHCR_ENCRYPT_OP;
1624 	else
1625 		op_type = CHCR_DECRYPT_OP;
1626 
1627 	/*
1628 	 * The output buffer consists of the cipher text followed by
1629 	 * the tag when encrypting.  For decryption it only contains
1630 	 * the plain text.
1631 	 *
1632 	 * Due to a firmware bug, the output buffer must include a
1633 	 * dummy output buffer for the IV and AAD prior to the real
1634 	 * output buffer.
1635 	 */
1636 	if (op_type == CHCR_ENCRYPT_OP) {
1637 		if (iv_len + aad_len + crp->crp_payload_length +
1638 		    hash_size_in_response > MAX_REQUEST_SIZE)
1639 			return (EFBIG);
1640 	} else {
1641 		if (iv_len + aad_len + crp->crp_payload_length >
1642 		    MAX_REQUEST_SIZE)
1643 			return (EFBIG);
1644 	}
1645 	sglist_reset(sc->sg_dsgl);
1646 	error = sglist_append_sglist(sc->sg_dsgl, sc->sg_iv_aad, 0, iv_len +
1647 	    aad_len);
1648 	if (error)
1649 		return (error);
1650 	if (CRYPTO_HAS_OUTPUT_BUFFER(crp))
1651 		error = sglist_append_sglist(sc->sg_dsgl, sc->sg_output,
1652 		    crp->crp_payload_output_start, crp->crp_payload_length);
1653 	else
1654 		error = sglist_append_sglist(sc->sg_dsgl, sc->sg_input,
1655 		    crp->crp_payload_start, crp->crp_payload_length);
1656 	if (error)
1657 		return (error);
1658 	if (op_type == CHCR_ENCRYPT_OP) {
1659 		if (CRYPTO_HAS_OUTPUT_BUFFER(crp))
1660 			error = sglist_append_sglist(sc->sg_dsgl, sc->sg_output,
1661 			    crp->crp_digest_start, hash_size_in_response);
1662 		else
1663 			error = sglist_append_sglist(sc->sg_dsgl, sc->sg_input,
1664 			    crp->crp_digest_start, hash_size_in_response);
1665 		if (error)
1666 			return (error);
1667 	}
1668 	dsgl_nsegs = ccr_count_sgl(sc->sg_dsgl, DSGL_SGE_MAXLEN);
1669 	if (dsgl_nsegs > MAX_RX_PHYS_DSGL_SGE)
1670 		return (EFBIG);
1671 	dsgl_len = ccr_phys_dsgl_len(dsgl_nsegs);
1672 
1673 	/*
1674 	 * The 'key' part of the key context consists of two copies of
1675 	 * the AES key.
1676 	 */
1677 	kctx_len = roundup2(s->blkcipher.key_len, 16) * 2;
1678 	transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dsgl_len);
1679 
1680 	/*
1681 	 * The input buffer consists of the IV, AAD (including block
1682 	 * 0), and then the cipher/plain text.  For decryption
1683 	 * requests the hash is appended after the cipher text.
1684 	 *
1685 	 * The IV is always stored at the start of the input buffer
1686 	 * even though it may be duplicated in the payload.  The
1687 	 * crypto engine doesn't work properly if the IV offset points
1688 	 * inside of the AAD region, so a second copy is always
1689 	 * required.
1690 	 */
1691 	input_len = aad_len + crp->crp_payload_length;
1692 	if (op_type == CHCR_DECRYPT_OP)
1693 		input_len += hash_size_in_response;
1694 	if (input_len > MAX_REQUEST_SIZE)
1695 		return (EFBIG);
1696 	if (ccr_use_imm_data(transhdr_len, iv_len + input_len)) {
1697 		imm_len = input_len;
1698 		sgl_nsegs = 0;
1699 		sgl_len = 0;
1700 	} else {
1701 		/* Block 0 is passed as immediate data. */
1702 		imm_len = b0_len;
1703 
1704 		sglist_reset(sc->sg_ulptx);
1705 		if (crp->crp_aad_length != 0) {
1706 			if (crp->crp_aad != NULL)
1707 				error = sglist_append(sc->sg_ulptx,
1708 				    crp->crp_aad, crp->crp_aad_length);
1709 			else
1710 				error = sglist_append_sglist(sc->sg_ulptx,
1711 				    sc->sg_input, crp->crp_aad_start,
1712 				    crp->crp_aad_length);
1713 			if (error)
1714 				return (error);
1715 		}
1716 		error = sglist_append_sglist(sc->sg_ulptx, sc->sg_input,
1717 		    crp->crp_payload_start, crp->crp_payload_length);
1718 		if (error)
1719 			return (error);
1720 		if (op_type == CHCR_DECRYPT_OP) {
1721 			error = sglist_append_sglist(sc->sg_ulptx, sc->sg_input,
1722 			    crp->crp_digest_start, hash_size_in_response);
1723 			if (error)
1724 				return (error);
1725 		}
1726 		sgl_nsegs = sc->sg_ulptx->sg_nseg;
1727 		sgl_len = ccr_ulptx_sgl_len(sgl_nsegs);
1728 	}
1729 
1730 	aad_start = iv_len + 1;
1731 	aad_stop = aad_start + aad_len - 1;
1732 	cipher_start = aad_stop + 1;
1733 	if (op_type == CHCR_DECRYPT_OP)
1734 		cipher_stop = hash_size_in_response;
1735 	else
1736 		cipher_stop = 0;
1737 	if (op_type == CHCR_DECRYPT_OP)
1738 		auth_insert = hash_size_in_response;
1739 	else
1740 		auth_insert = 0;
1741 
1742 	wr_len = roundup2(transhdr_len, 16) + iv_len + roundup2(imm_len, 16) +
1743 	    sgl_len;
1744 	if (wr_len > SGE_MAX_WR_LEN)
1745 		return (EFBIG);
1746 	wr = alloc_wrqe(wr_len, s->port->txq);
1747 	if (wr == NULL) {
1748 		sc->stats_wr_nomem++;
1749 		return (ENOMEM);
1750 	}
1751 	crwr = wrtod(wr);
1752 	memset(crwr, 0, wr_len);
1753 
1754 	/*
1755 	 * Read the nonce from the request.  Use the nonce to generate
1756 	 * the full IV with the counter set to 0.
1757 	 */
1758 	memset(iv, 0, iv_len);
1759 	iv[0] = (15 - AES_CCM_IV_LEN) - 1;
1760 	memcpy(iv + 1, crp->crp_iv, AES_CCM_IV_LEN);
1761 
1762 	ccr_populate_wreq(sc, s, crwr, kctx_len, wr_len, imm_len, sgl_len, 0,
1763 	    crp);
1764 
1765 	crwr->sec_cpl.op_ivinsrtofst = htobe32(
1766 	    V_CPL_TX_SEC_PDU_OPCODE(CPL_TX_SEC_PDU) |
1767 	    V_CPL_TX_SEC_PDU_RXCHID(s->port->tx_channel_id) |
1768 	    V_CPL_TX_SEC_PDU_ACKFOLLOWS(0) | V_CPL_TX_SEC_PDU_ULPTXLPBK(1) |
1769 	    V_CPL_TX_SEC_PDU_CPLLEN(2) | V_CPL_TX_SEC_PDU_PLACEHOLDER(0) |
1770 	    V_CPL_TX_SEC_PDU_IVINSRTOFST(1));
1771 
1772 	crwr->sec_cpl.pldlen = htobe32(iv_len + input_len);
1773 
1774 	/*
1775 	 * NB: cipherstop is explicitly set to 0.  See comments above
1776 	 * in ccr_gcm().
1777 	 */
1778 	crwr->sec_cpl.aadstart_cipherstop_hi = htobe32(
1779 	    V_CPL_TX_SEC_PDU_AADSTART(aad_start) |
1780 	    V_CPL_TX_SEC_PDU_AADSTOP(aad_stop) |
1781 	    V_CPL_TX_SEC_PDU_CIPHERSTART(cipher_start) |
1782 	    V_CPL_TX_SEC_PDU_CIPHERSTOP_HI(0));
1783 	crwr->sec_cpl.cipherstop_lo_authinsert = htobe32(
1784 	    V_CPL_TX_SEC_PDU_CIPHERSTOP_LO(0) |
1785 	    V_CPL_TX_SEC_PDU_AUTHSTART(cipher_start) |
1786 	    V_CPL_TX_SEC_PDU_AUTHSTOP(cipher_stop) |
1787 	    V_CPL_TX_SEC_PDU_AUTHINSERT(auth_insert));
1788 
1789 	/* These two flits are actually a CPL_TLS_TX_SCMD_FMT. */
1790 	hmac_ctrl = ccr_hmac_ctrl(AES_CBC_MAC_HASH_LEN, hash_size_in_response);
1791 	crwr->sec_cpl.seqno_numivs = htobe32(
1792 	    V_SCMD_SEQ_NO_CTRL(0) |
1793 	    V_SCMD_PROTO_VERSION(SCMD_PROTO_VERSION_GENERIC) |
1794 	    V_SCMD_ENC_DEC_CTRL(op_type) |
1795 	    V_SCMD_CIPH_AUTH_SEQ_CTRL(op_type == CHCR_ENCRYPT_OP ? 0 : 1) |
1796 	    V_SCMD_CIPH_MODE(SCMD_CIPH_MODE_AES_CCM) |
1797 	    V_SCMD_AUTH_MODE(SCMD_AUTH_MODE_CBCMAC) |
1798 	    V_SCMD_HMAC_CTRL(hmac_ctrl) |
1799 	    V_SCMD_IV_SIZE(iv_len / 2) |
1800 	    V_SCMD_NUM_IVS(0));
1801 	crwr->sec_cpl.ivgen_hdrlen = htobe32(
1802 	    V_SCMD_IV_GEN_CTRL(0) |
1803 	    V_SCMD_MORE_FRAGS(0) | V_SCMD_LAST_FRAG(0) | V_SCMD_MAC_ONLY(0) |
1804 	    V_SCMD_AADIVDROP(0) | V_SCMD_HDR_LEN(dsgl_len));
1805 
1806 	crwr->key_ctx.ctx_hdr = s->blkcipher.key_ctx_hdr;
1807 	memcpy(crwr->key_ctx.key, s->blkcipher.enckey, s->blkcipher.key_len);
1808 	memcpy(crwr->key_ctx.key + roundup(s->blkcipher.key_len, 16),
1809 	    s->blkcipher.enckey, s->blkcipher.key_len);
1810 
1811 	dst = (char *)(crwr + 1) + kctx_len;
1812 	ccr_write_phys_dsgl(sc, s, dst, dsgl_nsegs);
1813 	dst += sizeof(struct cpl_rx_phys_dsgl) + dsgl_len;
1814 	memcpy(dst, iv, iv_len);
1815 	dst += iv_len;
1816 	generate_ccm_b0(crp, hash_size_in_response, iv, dst);
1817 	if (sgl_nsegs == 0) {
1818 		dst += b0_len;
1819 		if (crp->crp_aad_length != 0) {
1820 			if (crp->crp_aad != NULL)
1821 				memcpy(dst, crp->crp_aad, crp->crp_aad_length);
1822 			else
1823 				crypto_copydata(crp, crp->crp_aad_start,
1824 				    crp->crp_aad_length, dst);
1825 			dst += crp->crp_aad_length;
1826 		}
1827 		crypto_copydata(crp, crp->crp_payload_start,
1828 		    crp->crp_payload_length, dst);
1829 		dst += crp->crp_payload_length;
1830 		if (op_type == CHCR_DECRYPT_OP)
1831 			crypto_copydata(crp, crp->crp_digest_start,
1832 			    hash_size_in_response, dst);
1833 	} else {
1834 		dst += CCM_B0_SIZE;
1835 		if (b0_len > CCM_B0_SIZE) {
1836 			/*
1837 			 * If there is AAD, insert padding including a
1838 			 * ULP_TX_SC_NOOP so that the ULP_TX_SC_DSGL
1839 			 * is 16-byte aligned.
1840 			 */
1841 			KASSERT(b0_len - CCM_B0_SIZE == CCM_AAD_FIELD_SIZE,
1842 			    ("b0_len mismatch"));
1843 			memset(dst + CCM_AAD_FIELD_SIZE, 0,
1844 			    8 - CCM_AAD_FIELD_SIZE);
1845 			idata = (void *)(dst + 8);
1846 			idata->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_NOOP));
1847 			idata->len = htobe32(0);
1848 			dst = (void *)(idata + 1);
1849 		}
1850 		ccr_write_ulptx_sgl(sc, dst, sgl_nsegs);
1851 	}
1852 
1853 	/* XXX: TODO backpressure */
1854 	t4_wrq_tx(sc->adapter, wr);
1855 
1856 	explicit_bzero(iv, sizeof(iv));
1857 	return (0);
1858 }
1859 
1860 static int
1861 ccr_ccm_done(struct ccr_softc *sc, struct ccr_session *s,
1862     struct cryptop *crp, const struct cpl_fw6_pld *cpl, int error)
1863 {
1864 
1865 	/*
1866 	 * The updated IV to permit chained requests is at
1867 	 * cpl->data[2], but OCF doesn't permit chained requests.
1868 	 *
1869 	 * Note that the hardware should always verify the CBC MAC
1870 	 * hash.
1871 	 */
1872 	return (error);
1873 }
1874 
1875 /*
1876  * Handle a CCM request that is not supported by the crypto engine by
1877  * performing the operation in software.  Derived from swcr_authenc().
1878  */
1879 static void
1880 ccr_ccm_soft(struct ccr_session *s, struct cryptop *crp)
1881 {
1882 	struct auth_hash *axf;
1883 	struct enc_xform *exf;
1884 	union authctx *auth_ctx;
1885 	void *kschedule;
1886 	char block[CCM_CBC_BLOCK_LEN];
1887 	char digest[AES_CBC_MAC_HASH_LEN];
1888 	char iv[AES_CCM_IV_LEN];
1889 	int error, i, len;
1890 
1891 	auth_ctx = NULL;
1892 	kschedule = NULL;
1893 
1894 	/* Initialize the MAC. */
1895 	switch (s->blkcipher.key_len) {
1896 	case 16:
1897 		axf = &auth_hash_ccm_cbc_mac_128;
1898 		break;
1899 	case 24:
1900 		axf = &auth_hash_ccm_cbc_mac_192;
1901 		break;
1902 	case 32:
1903 		axf = &auth_hash_ccm_cbc_mac_256;
1904 		break;
1905 	default:
1906 		error = EINVAL;
1907 		goto out;
1908 	}
1909 	auth_ctx = malloc(axf->ctxsize, M_CCR, M_NOWAIT);
1910 	if (auth_ctx == NULL) {
1911 		error = ENOMEM;
1912 		goto out;
1913 	}
1914 	axf->Init(auth_ctx);
1915 	axf->Setkey(auth_ctx, s->blkcipher.enckey, s->blkcipher.key_len);
1916 
1917 	/* Initialize the cipher. */
1918 	exf = &enc_xform_ccm;
1919 	kschedule = malloc(exf->ctxsize, M_CCR, M_NOWAIT);
1920 	if (kschedule == NULL) {
1921 		error = ENOMEM;
1922 		goto out;
1923 	}
1924 	error = exf->setkey(kschedule, s->blkcipher.enckey,
1925 	    s->blkcipher.key_len);
1926 	if (error)
1927 		goto out;
1928 
1929 	if ((crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0) {
1930 		error = EINVAL;
1931 		goto out;
1932 	}
1933 	memcpy(iv, crp->crp_iv, AES_CCM_IV_LEN);
1934 
1935 	auth_ctx->aes_cbc_mac_ctx.authDataLength = crp->crp_aad_length;
1936 	auth_ctx->aes_cbc_mac_ctx.cryptDataLength = crp->crp_payload_length;
1937 	axf->Reinit(auth_ctx, iv, sizeof(iv));
1938 
1939 	/* MAC the AAD. */
1940 	if (crp->crp_aad != NULL)
1941 		error = axf->Update(auth_ctx, crp->crp_aad,
1942 		    crp->crp_aad_length);
1943 	else
1944 		error = crypto_apply(crp, crp->crp_aad_start,
1945 		    crp->crp_aad_length, axf->Update, auth_ctx);
1946 	if (error)
1947 		goto out;
1948 
1949 	exf->reinit(kschedule, iv);
1950 
1951 	/* Do encryption/decryption with MAC */
1952 	for (i = 0; i < crp->crp_payload_length; i += sizeof(block)) {
1953 		len = imin(crp->crp_payload_length - i, sizeof(block));
1954 		crypto_copydata(crp, crp->crp_payload_start + i, len, block);
1955 		bzero(block + len, sizeof(block) - len);
1956 		if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) {
1957 			axf->Update(auth_ctx, block, len);
1958 			exf->encrypt(kschedule, block, block);
1959 			crypto_copyback(crp, crp->crp_payload_start + i, len,
1960 			    block);
1961 		} else {
1962 			exf->decrypt(kschedule, block, block);
1963 			axf->Update(auth_ctx, block, len);
1964 		}
1965 	}
1966 
1967 	/* Finalize MAC. */
1968 	axf->Final(digest, auth_ctx);
1969 
1970 	/* Inject or validate tag. */
1971 	if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) {
1972 		crypto_copyback(crp, crp->crp_digest_start, sizeof(digest),
1973 		    digest);
1974 		error = 0;
1975 	} else {
1976 		char digest2[AES_CBC_MAC_HASH_LEN];
1977 
1978 		crypto_copydata(crp, crp->crp_digest_start, sizeof(digest2),
1979 		    digest2);
1980 		if (timingsafe_bcmp(digest, digest2, sizeof(digest)) == 0) {
1981 			error = 0;
1982 
1983 			/* Tag matches, decrypt data. */
1984 			exf->reinit(kschedule, iv);
1985 			for (i = 0; i < crp->crp_payload_length;
1986 			     i += sizeof(block)) {
1987 				len = imin(crp->crp_payload_length - i,
1988 				    sizeof(block));
1989 				crypto_copydata(crp, crp->crp_payload_start + i,
1990 				    len, block);
1991 				bzero(block + len, sizeof(block) - len);
1992 				exf->decrypt(kschedule, block, block);
1993 				crypto_copyback(crp, crp->crp_payload_start + i,
1994 				    len, block);
1995 			}
1996 		} else
1997 			error = EBADMSG;
1998 		explicit_bzero(digest2, sizeof(digest2));
1999 	}
2000 
2001 out:
2002 	zfree(kschedule, M_CCR);
2003 	zfree(auth_ctx, M_CCR);
2004 	explicit_bzero(block, sizeof(block));
2005 	explicit_bzero(iv, sizeof(iv));
2006 	explicit_bzero(digest, sizeof(digest));
2007 	crp->crp_etype = error;
2008 	crypto_done(crp);
2009 }
2010 
2011 static void
2012 ccr_identify(driver_t *driver, device_t parent)
2013 {
2014 	struct adapter *sc;
2015 
2016 	sc = device_get_softc(parent);
2017 	if (sc->cryptocaps & FW_CAPS_CONFIG_CRYPTO_LOOKASIDE &&
2018 	    device_find_child(parent, "ccr", -1) == NULL)
2019 		device_add_child(parent, "ccr", -1);
2020 }
2021 
2022 static int
2023 ccr_probe(device_t dev)
2024 {
2025 
2026 	device_set_desc(dev, "Chelsio Crypto Accelerator");
2027 	return (BUS_PROBE_DEFAULT);
2028 }
2029 
2030 static void
2031 ccr_sysctls(struct ccr_softc *sc)
2032 {
2033 	struct sysctl_ctx_list *ctx;
2034 	struct sysctl_oid *oid, *port_oid;
2035 	struct sysctl_oid_list *children;
2036 	char buf[16];
2037 	int i;
2038 
2039 	ctx = device_get_sysctl_ctx(sc->dev);
2040 
2041 	/*
2042 	 * dev.ccr.X.
2043 	 */
2044 	oid = device_get_sysctl_tree(sc->dev);
2045 	children = SYSCTL_CHILDREN(oid);
2046 
2047 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "port_mask", CTLFLAG_RW,
2048 	    &sc->port_mask, 0, "Mask of enabled ports");
2049 
2050 	/*
2051 	 * dev.ccr.X.stats.
2052 	 */
2053 	oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats",
2054 	    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "statistics");
2055 	children = SYSCTL_CHILDREN(oid);
2056 
2057 	SYSCTL_ADD_U64(ctx, children, OID_AUTO, "hash", CTLFLAG_RD,
2058 	    &sc->stats_hash, 0, "Hash requests submitted");
2059 	SYSCTL_ADD_U64(ctx, children, OID_AUTO, "hmac", CTLFLAG_RD,
2060 	    &sc->stats_hmac, 0, "HMAC requests submitted");
2061 	SYSCTL_ADD_U64(ctx, children, OID_AUTO, "cipher_encrypt", CTLFLAG_RD,
2062 	    &sc->stats_blkcipher_encrypt, 0,
2063 	    "Cipher encryption requests submitted");
2064 	SYSCTL_ADD_U64(ctx, children, OID_AUTO, "cipher_decrypt", CTLFLAG_RD,
2065 	    &sc->stats_blkcipher_decrypt, 0,
2066 	    "Cipher decryption requests submitted");
2067 	SYSCTL_ADD_U64(ctx, children, OID_AUTO, "eta_encrypt", CTLFLAG_RD,
2068 	    &sc->stats_eta_encrypt, 0,
2069 	    "Combined AES+HMAC encryption requests submitted");
2070 	SYSCTL_ADD_U64(ctx, children, OID_AUTO, "eta_decrypt", CTLFLAG_RD,
2071 	    &sc->stats_eta_decrypt, 0,
2072 	    "Combined AES+HMAC decryption requests submitted");
2073 	SYSCTL_ADD_U64(ctx, children, OID_AUTO, "gcm_encrypt", CTLFLAG_RD,
2074 	    &sc->stats_gcm_encrypt, 0, "AES-GCM encryption requests submitted");
2075 	SYSCTL_ADD_U64(ctx, children, OID_AUTO, "gcm_decrypt", CTLFLAG_RD,
2076 	    &sc->stats_gcm_decrypt, 0, "AES-GCM decryption requests submitted");
2077 	SYSCTL_ADD_U64(ctx, children, OID_AUTO, "ccm_encrypt", CTLFLAG_RD,
2078 	    &sc->stats_ccm_encrypt, 0, "AES-CCM encryption requests submitted");
2079 	SYSCTL_ADD_U64(ctx, children, OID_AUTO, "ccm_decrypt", CTLFLAG_RD,
2080 	    &sc->stats_ccm_decrypt, 0, "AES-CCM decryption requests submitted");
2081 	SYSCTL_ADD_U64(ctx, children, OID_AUTO, "wr_nomem", CTLFLAG_RD,
2082 	    &sc->stats_wr_nomem, 0, "Work request memory allocation failures");
2083 	SYSCTL_ADD_U64(ctx, children, OID_AUTO, "inflight", CTLFLAG_RD,
2084 	    &sc->stats_inflight, 0, "Requests currently pending");
2085 	SYSCTL_ADD_U64(ctx, children, OID_AUTO, "mac_error", CTLFLAG_RD,
2086 	    &sc->stats_mac_error, 0, "MAC errors");
2087 	SYSCTL_ADD_U64(ctx, children, OID_AUTO, "pad_error", CTLFLAG_RD,
2088 	    &sc->stats_pad_error, 0, "Padding errors");
2089 	SYSCTL_ADD_U64(ctx, children, OID_AUTO, "bad_session", CTLFLAG_RD,
2090 	    &sc->stats_bad_session, 0, "Requests with invalid session ID");
2091 	SYSCTL_ADD_U64(ctx, children, OID_AUTO, "sglist_error", CTLFLAG_RD,
2092 	    &sc->stats_sglist_error, 0,
2093 	    "Requests for which DMA mapping failed");
2094 	SYSCTL_ADD_U64(ctx, children, OID_AUTO, "process_error", CTLFLAG_RD,
2095 	    &sc->stats_process_error, 0, "Requests failed during queueing");
2096 	SYSCTL_ADD_U64(ctx, children, OID_AUTO, "sw_fallback", CTLFLAG_RD,
2097 	    &sc->stats_sw_fallback, 0,
2098 	    "Requests processed by falling back to software");
2099 
2100 	/*
2101 	 * dev.ccr.X.stats.port
2102 	 */
2103 	port_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "port",
2104 	    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Per-port statistics");
2105 
2106 	for (i = 0; i < nitems(sc->ports); i++) {
2107 		if (sc->ports[i].rxq == NULL)
2108 			continue;
2109 
2110 		/*
2111 		 * dev.ccr.X.stats.port.Y
2112 		 */
2113 		snprintf(buf, sizeof(buf), "%d", i);
2114 		oid = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(port_oid), OID_AUTO,
2115 		    buf, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, buf);
2116 		children = SYSCTL_CHILDREN(oid);
2117 
2118 		SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "active_sessions",
2119 		    CTLFLAG_RD, &sc->ports[i].active_sessions, 0,
2120 		    "Count of active sessions");
2121 	}
2122 }
2123 
2124 static void
2125 ccr_init_port(struct ccr_softc *sc, int port)
2126 {
2127 
2128 	sc->ports[port].txq = &sc->adapter->sge.ctrlq[port];
2129 	sc->ports[port].rxq =
2130 	    &sc->adapter->sge.rxq[sc->adapter->port[port]->vi->first_rxq];
2131 	sc->ports[port].tx_channel_id = port;
2132 	_Static_assert(sizeof(sc->port_mask) * NBBY >= MAX_NPORTS - 1,
2133 	    "Too many ports to fit in port_mask");
2134 	sc->port_mask |= 1u << port;
2135 }
2136 
2137 static int
2138 ccr_attach(device_t dev)
2139 {
2140 	struct ccr_softc *sc;
2141 	int32_t cid;
2142 	int i;
2143 
2144 	sc = device_get_softc(dev);
2145 	sc->dev = dev;
2146 	sc->adapter = device_get_softc(device_get_parent(dev));
2147 	for_each_port(sc->adapter, i) {
2148 		ccr_init_port(sc, i);
2149 	}
2150 	cid = crypto_get_driverid(dev, sizeof(struct ccr_session),
2151 	    CRYPTOCAP_F_HARDWARE);
2152 	if (cid < 0) {
2153 		device_printf(dev, "could not get crypto driver id\n");
2154 		return (ENXIO);
2155 	}
2156 	sc->cid = cid;
2157 	sc->adapter->ccr_softc = sc;
2158 
2159 	mtx_init(&sc->lock, "ccr", NULL, MTX_DEF);
2160 	sc->sg_input = sglist_alloc(TX_SGL_SEGS, M_WAITOK);
2161 	sc->sg_output = sglist_alloc(TX_SGL_SEGS, M_WAITOK);
2162 	sc->sg_ulptx = sglist_alloc(TX_SGL_SEGS, M_WAITOK);
2163 	sc->sg_dsgl = sglist_alloc(MAX_RX_PHYS_DSGL_SGE, M_WAITOK);
2164 	sc->iv_aad_buf = malloc(MAX_AAD_LEN, M_CCR, M_WAITOK);
2165 	sc->sg_iv_aad = sglist_build(sc->iv_aad_buf, MAX_AAD_LEN, M_WAITOK);
2166 	ccr_sysctls(sc);
2167 
2168 	return (0);
2169 }
2170 
2171 static int
2172 ccr_detach(device_t dev)
2173 {
2174 	struct ccr_softc *sc;
2175 
2176 	sc = device_get_softc(dev);
2177 
2178 	mtx_lock(&sc->lock);
2179 	sc->detaching = true;
2180 	mtx_unlock(&sc->lock);
2181 
2182 	crypto_unregister_all(sc->cid);
2183 
2184 	mtx_destroy(&sc->lock);
2185 	sglist_free(sc->sg_iv_aad);
2186 	free(sc->iv_aad_buf, M_CCR);
2187 	sglist_free(sc->sg_dsgl);
2188 	sglist_free(sc->sg_ulptx);
2189 	sglist_free(sc->sg_output);
2190 	sglist_free(sc->sg_input);
2191 	sc->adapter->ccr_softc = NULL;
2192 	return (0);
2193 }
2194 
2195 static void
2196 ccr_init_hash_digest(struct ccr_session *s)
2197 {
2198 	union authctx auth_ctx;
2199 	struct auth_hash *axf;
2200 
2201 	axf = s->hmac.auth_hash;
2202 	axf->Init(&auth_ctx);
2203 	t4_copy_partial_hash(axf->type, &auth_ctx, s->hmac.pads);
2204 }
2205 
2206 static bool
2207 ccr_aes_check_keylen(int alg, int klen)
2208 {
2209 
2210 	switch (klen * 8) {
2211 	case 128:
2212 	case 192:
2213 		if (alg == CRYPTO_AES_XTS)
2214 			return (false);
2215 		break;
2216 	case 256:
2217 		break;
2218 	case 512:
2219 		if (alg != CRYPTO_AES_XTS)
2220 			return (false);
2221 		break;
2222 	default:
2223 		return (false);
2224 	}
2225 	return (true);
2226 }
2227 
2228 static void
2229 ccr_aes_setkey(struct ccr_session *s, const void *key, int klen)
2230 {
2231 	unsigned int ck_size, iopad_size, kctx_flits, kctx_len, kbits, mk_size;
2232 	unsigned int opad_present;
2233 
2234 	if (s->blkcipher.cipher_mode == SCMD_CIPH_MODE_AES_XTS)
2235 		kbits = (klen / 2) * 8;
2236 	else
2237 		kbits = klen * 8;
2238 	switch (kbits) {
2239 	case 128:
2240 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
2241 		break;
2242 	case 192:
2243 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
2244 		break;
2245 	case 256:
2246 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
2247 		break;
2248 	default:
2249 		panic("should not get here");
2250 	}
2251 
2252 	s->blkcipher.key_len = klen;
2253 	memcpy(s->blkcipher.enckey, key, s->blkcipher.key_len);
2254 	switch (s->blkcipher.cipher_mode) {
2255 	case SCMD_CIPH_MODE_AES_CBC:
2256 	case SCMD_CIPH_MODE_AES_XTS:
2257 		t4_aes_getdeckey(s->blkcipher.deckey, key, kbits);
2258 		break;
2259 	}
2260 
2261 	kctx_len = roundup2(s->blkcipher.key_len, 16);
2262 	switch (s->mode) {
2263 	case ETA:
2264 		mk_size = s->hmac.mk_size;
2265 		opad_present = 1;
2266 		iopad_size = roundup2(s->hmac.partial_digest_len, 16);
2267 		kctx_len += iopad_size * 2;
2268 		break;
2269 	case GCM:
2270 		mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_128;
2271 		opad_present = 0;
2272 		kctx_len += GMAC_BLOCK_LEN;
2273 		break;
2274 	case CCM:
2275 		switch (kbits) {
2276 		case 128:
2277 			mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_128;
2278 			break;
2279 		case 192:
2280 			mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_192;
2281 			break;
2282 		case 256:
2283 			mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
2284 			break;
2285 		default:
2286 			panic("should not get here");
2287 		}
2288 		opad_present = 0;
2289 		kctx_len *= 2;
2290 		break;
2291 	default:
2292 		mk_size = CHCR_KEYCTX_NO_KEY;
2293 		opad_present = 0;
2294 		break;
2295 	}
2296 	kctx_flits = (sizeof(struct _key_ctx) + kctx_len) / 16;
2297 	s->blkcipher.key_ctx_hdr = htobe32(V_KEY_CONTEXT_CTX_LEN(kctx_flits) |
2298 	    V_KEY_CONTEXT_DUAL_CK(s->blkcipher.cipher_mode ==
2299 	    SCMD_CIPH_MODE_AES_XTS) |
2300 	    V_KEY_CONTEXT_OPAD_PRESENT(opad_present) |
2301 	    V_KEY_CONTEXT_SALT_PRESENT(1) | V_KEY_CONTEXT_CK_SIZE(ck_size) |
2302 	    V_KEY_CONTEXT_MK_SIZE(mk_size) | V_KEY_CONTEXT_VALID(1));
2303 }
2304 
2305 static bool
2306 ccr_auth_supported(const struct crypto_session_params *csp)
2307 {
2308 
2309 	switch (csp->csp_auth_alg) {
2310 	case CRYPTO_SHA1:
2311 	case CRYPTO_SHA2_224:
2312 	case CRYPTO_SHA2_256:
2313 	case CRYPTO_SHA2_384:
2314 	case CRYPTO_SHA2_512:
2315 	case CRYPTO_SHA1_HMAC:
2316 	case CRYPTO_SHA2_224_HMAC:
2317 	case CRYPTO_SHA2_256_HMAC:
2318 	case CRYPTO_SHA2_384_HMAC:
2319 	case CRYPTO_SHA2_512_HMAC:
2320 		break;
2321 	default:
2322 		return (false);
2323 	}
2324 	return (true);
2325 }
2326 
2327 static bool
2328 ccr_cipher_supported(const struct crypto_session_params *csp)
2329 {
2330 
2331 	switch (csp->csp_cipher_alg) {
2332 	case CRYPTO_AES_CBC:
2333 		if (csp->csp_ivlen != AES_BLOCK_LEN)
2334 			return (false);
2335 		break;
2336 	case CRYPTO_AES_ICM:
2337 		if (csp->csp_ivlen != AES_BLOCK_LEN)
2338 			return (false);
2339 		break;
2340 	case CRYPTO_AES_XTS:
2341 		if (csp->csp_ivlen != AES_XTS_IV_LEN)
2342 			return (false);
2343 		break;
2344 	default:
2345 		return (false);
2346 	}
2347 	return (ccr_aes_check_keylen(csp->csp_cipher_alg,
2348 	    csp->csp_cipher_klen));
2349 }
2350 
2351 static int
2352 ccr_cipher_mode(const struct crypto_session_params *csp)
2353 {
2354 
2355 	switch (csp->csp_cipher_alg) {
2356 	case CRYPTO_AES_CBC:
2357 		return (SCMD_CIPH_MODE_AES_CBC);
2358 	case CRYPTO_AES_ICM:
2359 		return (SCMD_CIPH_MODE_AES_CTR);
2360 	case CRYPTO_AES_NIST_GCM_16:
2361 		return (SCMD_CIPH_MODE_AES_GCM);
2362 	case CRYPTO_AES_XTS:
2363 		return (SCMD_CIPH_MODE_AES_XTS);
2364 	case CRYPTO_AES_CCM_16:
2365 		return (SCMD_CIPH_MODE_AES_CCM);
2366 	default:
2367 		return (SCMD_CIPH_MODE_NOP);
2368 	}
2369 }
2370 
2371 static int
2372 ccr_probesession(device_t dev, const struct crypto_session_params *csp)
2373 {
2374 	unsigned int cipher_mode;
2375 
2376 	if ((csp->csp_flags & ~(CSP_F_SEPARATE_OUTPUT | CSP_F_SEPARATE_AAD)) !=
2377 	    0)
2378 		return (EINVAL);
2379 	switch (csp->csp_mode) {
2380 	case CSP_MODE_DIGEST:
2381 		if (!ccr_auth_supported(csp))
2382 			return (EINVAL);
2383 		break;
2384 	case CSP_MODE_CIPHER:
2385 		if (!ccr_cipher_supported(csp))
2386 			return (EINVAL);
2387 		break;
2388 	case CSP_MODE_AEAD:
2389 		switch (csp->csp_cipher_alg) {
2390 		case CRYPTO_AES_NIST_GCM_16:
2391 			if (csp->csp_ivlen != AES_GCM_IV_LEN)
2392 				return (EINVAL);
2393 			if (csp->csp_auth_mlen < 0 ||
2394 			    csp->csp_auth_mlen > AES_GMAC_HASH_LEN)
2395 				return (EINVAL);
2396 			break;
2397 		case CRYPTO_AES_CCM_16:
2398 			if (csp->csp_ivlen != AES_CCM_IV_LEN)
2399 				return (EINVAL);
2400 			if (csp->csp_auth_mlen < 0 ||
2401 			    csp->csp_auth_mlen > AES_CBC_MAC_HASH_LEN)
2402 				return (EINVAL);
2403 			break;
2404 		default:
2405 			return (EINVAL);
2406 		}
2407 		break;
2408 	case CSP_MODE_ETA:
2409 		if (!ccr_auth_supported(csp) || !ccr_cipher_supported(csp))
2410 			return (EINVAL);
2411 		break;
2412 	default:
2413 		return (EINVAL);
2414 	}
2415 
2416 	if (csp->csp_cipher_klen != 0) {
2417 		cipher_mode = ccr_cipher_mode(csp);
2418 		if (cipher_mode == SCMD_CIPH_MODE_NOP)
2419 			return (EINVAL);
2420 	}
2421 
2422 	return (CRYPTODEV_PROBE_HARDWARE);
2423 }
2424 
2425 /*
2426  * Select an available port with the lowest number of active sessions.
2427  */
2428 static struct ccr_port *
2429 ccr_choose_port(struct ccr_softc *sc)
2430 {
2431 	struct ccr_port *best, *p;
2432 	int i;
2433 
2434 	mtx_assert(&sc->lock, MA_OWNED);
2435 	best = NULL;
2436 	for (i = 0; i < nitems(sc->ports); i++) {
2437 		p = &sc->ports[i];
2438 
2439 		/* Ignore non-existent ports. */
2440 		if (p->rxq == NULL)
2441 			continue;
2442 
2443 		/*
2444 		 * XXX: Ignore ports whose queues aren't initialized.
2445 		 * This is racy as the rxq can be destroyed by the
2446 		 * associated VI detaching.  Eventually ccr should use
2447 		 * dedicated queues.
2448 		 */
2449 		if (p->rxq->iq.adapter == NULL || p->txq->adapter == NULL)
2450 			continue;
2451 
2452 		if ((sc->port_mask & (1u << i)) == 0)
2453 			continue;
2454 
2455 		if (best == NULL ||
2456 		    p->active_sessions < best->active_sessions)
2457 			best = p;
2458 	}
2459 	return (best);
2460 }
2461 
2462 static int
2463 ccr_newsession(device_t dev, crypto_session_t cses,
2464     const struct crypto_session_params *csp)
2465 {
2466 	struct ccr_softc *sc;
2467 	struct ccr_session *s;
2468 	struct auth_hash *auth_hash;
2469 	unsigned int auth_mode, cipher_mode, mk_size;
2470 	unsigned int partial_digest_len;
2471 
2472 	switch (csp->csp_auth_alg) {
2473 	case CRYPTO_SHA1:
2474 	case CRYPTO_SHA1_HMAC:
2475 		auth_hash = &auth_hash_hmac_sha1;
2476 		auth_mode = SCMD_AUTH_MODE_SHA1;
2477 		mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_160;
2478 		partial_digest_len = SHA1_HASH_LEN;
2479 		break;
2480 	case CRYPTO_SHA2_224:
2481 	case CRYPTO_SHA2_224_HMAC:
2482 		auth_hash = &auth_hash_hmac_sha2_224;
2483 		auth_mode = SCMD_AUTH_MODE_SHA224;
2484 		mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
2485 		partial_digest_len = SHA2_256_HASH_LEN;
2486 		break;
2487 	case CRYPTO_SHA2_256:
2488 	case CRYPTO_SHA2_256_HMAC:
2489 		auth_hash = &auth_hash_hmac_sha2_256;
2490 		auth_mode = SCMD_AUTH_MODE_SHA256;
2491 		mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
2492 		partial_digest_len = SHA2_256_HASH_LEN;
2493 		break;
2494 	case CRYPTO_SHA2_384:
2495 	case CRYPTO_SHA2_384_HMAC:
2496 		auth_hash = &auth_hash_hmac_sha2_384;
2497 		auth_mode = SCMD_AUTH_MODE_SHA512_384;
2498 		mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512;
2499 		partial_digest_len = SHA2_512_HASH_LEN;
2500 		break;
2501 	case CRYPTO_SHA2_512:
2502 	case CRYPTO_SHA2_512_HMAC:
2503 		auth_hash = &auth_hash_hmac_sha2_512;
2504 		auth_mode = SCMD_AUTH_MODE_SHA512_512;
2505 		mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512;
2506 		partial_digest_len = SHA2_512_HASH_LEN;
2507 		break;
2508 	default:
2509 		auth_hash = NULL;
2510 		auth_mode = SCMD_AUTH_MODE_NOP;
2511 		mk_size = 0;
2512 		partial_digest_len = 0;
2513 		break;
2514 	}
2515 
2516 	cipher_mode = ccr_cipher_mode(csp);
2517 
2518 #ifdef INVARIANTS
2519 	switch (csp->csp_mode) {
2520 	case CSP_MODE_CIPHER:
2521 		if (cipher_mode == SCMD_CIPH_MODE_NOP ||
2522 		    cipher_mode == SCMD_CIPH_MODE_AES_GCM ||
2523 		    cipher_mode == SCMD_CIPH_MODE_AES_CCM)
2524 			panic("invalid cipher algo");
2525 		break;
2526 	case CSP_MODE_DIGEST:
2527 		if (auth_mode == SCMD_AUTH_MODE_NOP)
2528 			panic("invalid auth algo");
2529 		break;
2530 	case CSP_MODE_AEAD:
2531 		if (cipher_mode != SCMD_CIPH_MODE_AES_GCM &&
2532 		    cipher_mode != SCMD_CIPH_MODE_AES_CCM)
2533 			panic("invalid aead cipher algo");
2534 		if (auth_mode != SCMD_AUTH_MODE_NOP)
2535 			panic("invalid aead auth aglo");
2536 		break;
2537 	case CSP_MODE_ETA:
2538 		if (cipher_mode == SCMD_CIPH_MODE_NOP ||
2539 		    cipher_mode == SCMD_CIPH_MODE_AES_GCM ||
2540 		    cipher_mode == SCMD_CIPH_MODE_AES_CCM)
2541 			panic("invalid cipher algo");
2542 		if (auth_mode == SCMD_AUTH_MODE_NOP)
2543 			panic("invalid auth algo");
2544 		break;
2545 	default:
2546 		panic("invalid csp mode");
2547 	}
2548 #endif
2549 
2550 	sc = device_get_softc(dev);
2551 
2552 	mtx_lock(&sc->lock);
2553 	if (sc->detaching) {
2554 		mtx_unlock(&sc->lock);
2555 		return (ENXIO);
2556 	}
2557 
2558 	s = crypto_get_driver_session(cses);
2559 	s->port = ccr_choose_port(sc);
2560 	if (s->port == NULL) {
2561 		mtx_unlock(&sc->lock);
2562 		return (ENXIO);
2563 	}
2564 
2565 	switch (csp->csp_mode) {
2566 	case CSP_MODE_AEAD:
2567 		if (cipher_mode == SCMD_CIPH_MODE_AES_CCM)
2568 			s->mode = CCM;
2569 		else
2570 			s->mode = GCM;
2571 		break;
2572 	case CSP_MODE_ETA:
2573 		s->mode = ETA;
2574 		break;
2575 	case CSP_MODE_DIGEST:
2576 		if (csp->csp_auth_klen != 0)
2577 			s->mode = HMAC;
2578 		else
2579 			s->mode = HASH;
2580 		break;
2581 	case CSP_MODE_CIPHER:
2582 		s->mode = BLKCIPHER;
2583 		break;
2584 	}
2585 
2586 	if (s->mode == GCM) {
2587 		if (csp->csp_auth_mlen == 0)
2588 			s->gmac.hash_len = AES_GMAC_HASH_LEN;
2589 		else
2590 			s->gmac.hash_len = csp->csp_auth_mlen;
2591 		t4_init_gmac_hash(csp->csp_cipher_key, csp->csp_cipher_klen,
2592 		    s->gmac.ghash_h);
2593 	} else if (s->mode == CCM) {
2594 		if (csp->csp_auth_mlen == 0)
2595 			s->ccm_mac.hash_len = AES_CBC_MAC_HASH_LEN;
2596 		else
2597 			s->ccm_mac.hash_len = csp->csp_auth_mlen;
2598 	} else if (auth_mode != SCMD_AUTH_MODE_NOP) {
2599 		s->hmac.auth_hash = auth_hash;
2600 		s->hmac.auth_mode = auth_mode;
2601 		s->hmac.mk_size = mk_size;
2602 		s->hmac.partial_digest_len = partial_digest_len;
2603 		if (csp->csp_auth_mlen == 0)
2604 			s->hmac.hash_len = auth_hash->hashsize;
2605 		else
2606 			s->hmac.hash_len = csp->csp_auth_mlen;
2607 		if (csp->csp_auth_key != NULL)
2608 			t4_init_hmac_digest(auth_hash, partial_digest_len,
2609 			    csp->csp_auth_key, csp->csp_auth_klen,
2610 			    s->hmac.pads);
2611 		else
2612 			ccr_init_hash_digest(s);
2613 	}
2614 	if (cipher_mode != SCMD_CIPH_MODE_NOP) {
2615 		s->blkcipher.cipher_mode = cipher_mode;
2616 		s->blkcipher.iv_len = csp->csp_ivlen;
2617 		if (csp->csp_cipher_key != NULL)
2618 			ccr_aes_setkey(s, csp->csp_cipher_key,
2619 			    csp->csp_cipher_klen);
2620 	}
2621 
2622 	s->active = true;
2623 	s->port->active_sessions++;
2624 	mtx_unlock(&sc->lock);
2625 	return (0);
2626 }
2627 
2628 static void
2629 ccr_freesession(device_t dev, crypto_session_t cses)
2630 {
2631 	struct ccr_softc *sc;
2632 	struct ccr_session *s;
2633 
2634 	sc = device_get_softc(dev);
2635 	s = crypto_get_driver_session(cses);
2636 	mtx_lock(&sc->lock);
2637 	if (s->pending != 0)
2638 		device_printf(dev,
2639 		    "session %p freed with %d pending requests\n", s,
2640 		    s->pending);
2641 	s->active = false;
2642 	s->port->active_sessions--;
2643 	mtx_unlock(&sc->lock);
2644 }
2645 
2646 static int
2647 ccr_process(device_t dev, struct cryptop *crp, int hint)
2648 {
2649 	const struct crypto_session_params *csp;
2650 	struct ccr_softc *sc;
2651 	struct ccr_session *s;
2652 	int error;
2653 
2654 	csp = crypto_get_params(crp->crp_session);
2655 	s = crypto_get_driver_session(crp->crp_session);
2656 	sc = device_get_softc(dev);
2657 
2658 	mtx_lock(&sc->lock);
2659 	error = ccr_populate_sglist(sc->sg_input, &crp->crp_buf);
2660 	if (error == 0 && CRYPTO_HAS_OUTPUT_BUFFER(crp))
2661 		error = ccr_populate_sglist(sc->sg_output, &crp->crp_obuf);
2662 	if (error) {
2663 		sc->stats_sglist_error++;
2664 		goto out;
2665 	}
2666 
2667 	switch (s->mode) {
2668 	case HASH:
2669 		error = ccr_hash(sc, s, crp);
2670 		if (error == 0)
2671 			sc->stats_hash++;
2672 		break;
2673 	case HMAC:
2674 		if (crp->crp_auth_key != NULL)
2675 			t4_init_hmac_digest(s->hmac.auth_hash,
2676 			    s->hmac.partial_digest_len, crp->crp_auth_key,
2677 			    csp->csp_auth_klen, s->hmac.pads);
2678 		error = ccr_hash(sc, s, crp);
2679 		if (error == 0)
2680 			sc->stats_hmac++;
2681 		break;
2682 	case BLKCIPHER:
2683 		if (crp->crp_cipher_key != NULL)
2684 			ccr_aes_setkey(s, crp->crp_cipher_key,
2685 			    csp->csp_cipher_klen);
2686 		error = ccr_blkcipher(sc, s, crp);
2687 		if (error == 0) {
2688 			if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op))
2689 				sc->stats_blkcipher_encrypt++;
2690 			else
2691 				sc->stats_blkcipher_decrypt++;
2692 		}
2693 		break;
2694 	case ETA:
2695 		if (crp->crp_auth_key != NULL)
2696 			t4_init_hmac_digest(s->hmac.auth_hash,
2697 			    s->hmac.partial_digest_len, crp->crp_auth_key,
2698 			    csp->csp_auth_klen, s->hmac.pads);
2699 		if (crp->crp_cipher_key != NULL)
2700 			ccr_aes_setkey(s, crp->crp_cipher_key,
2701 			    csp->csp_cipher_klen);
2702 		error = ccr_eta(sc, s, crp);
2703 		if (error == 0) {
2704 			if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op))
2705 				sc->stats_eta_encrypt++;
2706 			else
2707 				sc->stats_eta_decrypt++;
2708 		}
2709 		break;
2710 	case GCM:
2711 		if (crp->crp_cipher_key != NULL) {
2712 			t4_init_gmac_hash(crp->crp_cipher_key,
2713 			    csp->csp_cipher_klen, s->gmac.ghash_h);
2714 			ccr_aes_setkey(s, crp->crp_cipher_key,
2715 			    csp->csp_cipher_klen);
2716 		}
2717 		if (crp->crp_payload_length == 0) {
2718 			mtx_unlock(&sc->lock);
2719 			ccr_gcm_soft(s, crp);
2720 			return (0);
2721 		}
2722 		error = ccr_gcm(sc, s, crp);
2723 		if (error == EMSGSIZE) {
2724 			sc->stats_sw_fallback++;
2725 			mtx_unlock(&sc->lock);
2726 			ccr_gcm_soft(s, crp);
2727 			return (0);
2728 		}
2729 		if (error == 0) {
2730 			if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op))
2731 				sc->stats_gcm_encrypt++;
2732 			else
2733 				sc->stats_gcm_decrypt++;
2734 		}
2735 		break;
2736 	case CCM:
2737 		if (crp->crp_cipher_key != NULL) {
2738 			ccr_aes_setkey(s, crp->crp_cipher_key,
2739 			    csp->csp_cipher_klen);
2740 		}
2741 		error = ccr_ccm(sc, s, crp);
2742 		if (error == EMSGSIZE) {
2743 			sc->stats_sw_fallback++;
2744 			mtx_unlock(&sc->lock);
2745 			ccr_ccm_soft(s, crp);
2746 			return (0);
2747 		}
2748 		if (error == 0) {
2749 			if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op))
2750 				sc->stats_ccm_encrypt++;
2751 			else
2752 				sc->stats_ccm_decrypt++;
2753 		}
2754 		break;
2755 	}
2756 
2757 	if (error == 0) {
2758 		s->pending++;
2759 		sc->stats_inflight++;
2760 	} else
2761 		sc->stats_process_error++;
2762 
2763 out:
2764 	mtx_unlock(&sc->lock);
2765 
2766 	if (error) {
2767 		crp->crp_etype = error;
2768 		crypto_done(crp);
2769 	}
2770 
2771 	return (0);
2772 }
2773 
2774 static int
2775 do_cpl6_fw_pld(struct sge_iq *iq, const struct rss_header *rss,
2776     struct mbuf *m)
2777 {
2778 	struct ccr_softc *sc = iq->adapter->ccr_softc;
2779 	struct ccr_session *s;
2780 	const struct cpl_fw6_pld *cpl;
2781 	struct cryptop *crp;
2782 	uint32_t status;
2783 	int error;
2784 
2785 	if (m != NULL)
2786 		cpl = mtod(m, const void *);
2787 	else
2788 		cpl = (const void *)(rss + 1);
2789 
2790 	crp = (struct cryptop *)(uintptr_t)be64toh(cpl->data[1]);
2791 	s = crypto_get_driver_session(crp->crp_session);
2792 	status = be64toh(cpl->data[0]);
2793 	if (CHK_MAC_ERR_BIT(status) || CHK_PAD_ERR_BIT(status))
2794 		error = EBADMSG;
2795 	else
2796 		error = 0;
2797 
2798 	mtx_lock(&sc->lock);
2799 	s->pending--;
2800 	sc->stats_inflight--;
2801 
2802 	switch (s->mode) {
2803 	case HASH:
2804 	case HMAC:
2805 		error = ccr_hash_done(sc, s, crp, cpl, error);
2806 		break;
2807 	case BLKCIPHER:
2808 		error = ccr_blkcipher_done(sc, s, crp, cpl, error);
2809 		break;
2810 	case ETA:
2811 		error = ccr_eta_done(sc, s, crp, cpl, error);
2812 		break;
2813 	case GCM:
2814 		error = ccr_gcm_done(sc, s, crp, cpl, error);
2815 		break;
2816 	case CCM:
2817 		error = ccr_ccm_done(sc, s, crp, cpl, error);
2818 		break;
2819 	}
2820 
2821 	if (error == EBADMSG) {
2822 		if (CHK_MAC_ERR_BIT(status))
2823 			sc->stats_mac_error++;
2824 		if (CHK_PAD_ERR_BIT(status))
2825 			sc->stats_pad_error++;
2826 	}
2827 	mtx_unlock(&sc->lock);
2828 	crp->crp_etype = error;
2829 	crypto_done(crp);
2830 	m_freem(m);
2831 	return (0);
2832 }
2833 
2834 static int
2835 ccr_modevent(module_t mod, int cmd, void *arg)
2836 {
2837 
2838 	switch (cmd) {
2839 	case MOD_LOAD:
2840 		t4_register_cpl_handler(CPL_FW6_PLD, do_cpl6_fw_pld);
2841 		return (0);
2842 	case MOD_UNLOAD:
2843 		t4_register_cpl_handler(CPL_FW6_PLD, NULL);
2844 		return (0);
2845 	default:
2846 		return (EOPNOTSUPP);
2847 	}
2848 }
2849 
2850 static device_method_t ccr_methods[] = {
2851 	DEVMETHOD(device_identify,	ccr_identify),
2852 	DEVMETHOD(device_probe,		ccr_probe),
2853 	DEVMETHOD(device_attach,	ccr_attach),
2854 	DEVMETHOD(device_detach,	ccr_detach),
2855 
2856 	DEVMETHOD(cryptodev_probesession, ccr_probesession),
2857 	DEVMETHOD(cryptodev_newsession,	ccr_newsession),
2858 	DEVMETHOD(cryptodev_freesession, ccr_freesession),
2859 	DEVMETHOD(cryptodev_process,	ccr_process),
2860 
2861 	DEVMETHOD_END
2862 };
2863 
2864 static driver_t ccr_driver = {
2865 	"ccr",
2866 	ccr_methods,
2867 	sizeof(struct ccr_softc)
2868 };
2869 
2870 static devclass_t ccr_devclass;
2871 
2872 DRIVER_MODULE(ccr, t6nex, ccr_driver, ccr_devclass, ccr_modevent, NULL);
2873 MODULE_VERSION(ccr, 1);
2874 MODULE_DEPEND(ccr, crypto, 1, 1, 1);
2875 MODULE_DEPEND(ccr, t6nex, 1, 1, 1);
2876