1 /*- 2 * Copyright (c) 2017 Chelsio Communications, Inc. 3 * All rights reserved. 4 * Written by: John Baldwin <jhb@FreeBSD.org> 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28 #include <sys/cdefs.h> 29 __FBSDID("$FreeBSD$"); 30 31 #include <sys/types.h> 32 #include <sys/bus.h> 33 #include <sys/lock.h> 34 #include <sys/malloc.h> 35 #include <sys/mutex.h> 36 #include <sys/module.h> 37 #include <sys/sglist.h> 38 39 #include <opencrypto/cryptodev.h> 40 #include <opencrypto/xform.h> 41 42 #include "cryptodev_if.h" 43 44 #include "common/common.h" 45 #include "crypto/t4_crypto.h" 46 47 /* 48 * Requests consist of: 49 * 50 * +-------------------------------+ 51 * | struct fw_crypto_lookaside_wr | 52 * +-------------------------------+ 53 * | struct ulp_txpkt | 54 * +-------------------------------+ 55 * | struct ulptx_idata | 56 * +-------------------------------+ 57 * | struct cpl_tx_sec_pdu | 58 * +-------------------------------+ 59 * | struct cpl_tls_tx_scmd_fmt | 60 * +-------------------------------+ 61 * | key context header | 62 * +-------------------------------+ 63 * | AES key | ----- For requests with AES 64 * +-------------------------------+ - 65 * | IPAD (16-byte aligned) | \ 66 * +-------------------------------+ +---- For requests with HMAC 67 * | OPAD (16-byte aligned) | / 68 * +-------------------------------+ - 69 * | GMAC H | ----- For AES-GCM 70 * +-------------------------------+ - 71 * | struct cpl_rx_phys_dsgl | \ 72 * +-------------------------------+ +---- Destination buffer for 73 * | PHYS_DSGL entries | / non-hash-only requests 74 * +-------------------------------+ - 75 * | 16 dummy bytes | ----- Only for hash-only requests 76 * +-------------------------------+ 77 * | IV | ----- If immediate IV 78 * +-------------------------------+ 79 * | Payload | ----- If immediate Payload 80 * +-------------------------------+ - 81 * | struct ulptx_sgl | \ 82 * +-------------------------------+ +---- If payload via SGL 83 * | SGL entries | / 84 * +-------------------------------+ - 85 * 86 * Note that the key context must be padded to ensure 16-byte alignment. 87 * For HMAC requests, the key consists of the partial hash of the IPAD 88 * followed by the partial hash of the OPAD. 89 * 90 * Replies consist of: 91 * 92 * +-------------------------------+ 93 * | struct cpl_fw6_pld | 94 * +-------------------------------+ 95 * | hash digest | ----- For HMAC request with 96 * +-------------------------------+ 'hash_size' set in work request 97 * 98 * A 32-bit big-endian error status word is supplied in the last 4 99 * bytes of data[0] in the CPL_FW6_PLD message. bit 0 indicates a 100 * "MAC" error and bit 1 indicates a "PAD" error. 101 * 102 * The 64-bit 'cookie' field from the fw_crypto_lookaside_wr message 103 * in the request is returned in data[1] of the CPL_FW6_PLD message. 104 * 105 * For block cipher replies, the updated IV is supplied in data[2] and 106 * data[3] of the CPL_FW6_PLD message. 107 * 108 * For hash replies where the work request set 'hash_size' to request 109 * a copy of the hash in the reply, the hash digest is supplied 110 * immediately following the CPL_FW6_PLD message. 111 */ 112 113 /* 114 * The documentation for CPL_RX_PHYS_DSGL claims a maximum of 32 115 * SG entries. 116 */ 117 #define MAX_RX_PHYS_DSGL_SGE 32 118 #define DSGL_SGE_MAXLEN 65535 119 120 /* 121 * The adapter only supports requests with a total input or output 122 * length of 64k-1 or smaller. Longer requests either result in hung 123 * requests or incorrect results. 124 */ 125 #define MAX_REQUEST_SIZE 65535 126 127 static MALLOC_DEFINE(M_CCR, "ccr", "Chelsio T6 crypto"); 128 129 struct ccr_session_hmac { 130 struct auth_hash *auth_hash; 131 int hash_len; 132 unsigned int partial_digest_len; 133 unsigned int auth_mode; 134 unsigned int mk_size; 135 char ipad[CHCR_HASH_MAX_BLOCK_SIZE_128]; 136 char opad[CHCR_HASH_MAX_BLOCK_SIZE_128]; 137 }; 138 139 struct ccr_session_gmac { 140 int hash_len; 141 char ghash_h[GMAC_BLOCK_LEN]; 142 }; 143 144 struct ccr_session_blkcipher { 145 unsigned int cipher_mode; 146 unsigned int key_len; 147 unsigned int iv_len; 148 __be32 key_ctx_hdr; 149 char enckey[CHCR_AES_MAX_KEY_LEN]; 150 char deckey[CHCR_AES_MAX_KEY_LEN]; 151 }; 152 153 struct ccr_session { 154 bool active; 155 int pending; 156 enum { HMAC, BLKCIPHER, AUTHENC, GCM } mode; 157 union { 158 struct ccr_session_hmac hmac; 159 struct ccr_session_gmac gmac; 160 }; 161 struct ccr_session_blkcipher blkcipher; 162 }; 163 164 struct ccr_softc { 165 struct adapter *adapter; 166 device_t dev; 167 uint32_t cid; 168 int tx_channel_id; 169 struct ccr_session *sessions; 170 int nsessions; 171 struct mtx lock; 172 bool detaching; 173 struct sge_wrq *txq; 174 struct sge_rxq *rxq; 175 176 /* 177 * Pre-allocate S/G lists used when preparing a work request. 178 * 'sg_crp' contains an sglist describing the entire buffer 179 * for a 'struct cryptop'. 'sg_ulptx' is used to describe 180 * the data the engine should DMA as input via ULPTX_SGL. 181 * 'sg_dsgl' is used to describe the destination that cipher 182 * text and a tag should be written to. 183 */ 184 struct sglist *sg_crp; 185 struct sglist *sg_ulptx; 186 struct sglist *sg_dsgl; 187 188 /* Statistics. */ 189 uint64_t stats_blkcipher_encrypt; 190 uint64_t stats_blkcipher_decrypt; 191 uint64_t stats_hmac; 192 uint64_t stats_authenc_encrypt; 193 uint64_t stats_authenc_decrypt; 194 uint64_t stats_gcm_encrypt; 195 uint64_t stats_gcm_decrypt; 196 uint64_t stats_wr_nomem; 197 uint64_t stats_inflight; 198 uint64_t stats_mac_error; 199 uint64_t stats_pad_error; 200 uint64_t stats_bad_session; 201 uint64_t stats_sglist_error; 202 uint64_t stats_process_error; 203 }; 204 205 /* 206 * Crypto requests involve two kind of scatter/gather lists. 207 * 208 * Non-hash-only requests require a PHYS_DSGL that describes the 209 * location to store the results of the encryption or decryption 210 * operation. This SGL uses a different format (PHYS_DSGL) and should 211 * exclude the crd_skip bytes at the start of the data as well as 212 * any AAD or IV. For authenticated encryption requests it should 213 * cover include the destination of the hash or tag. 214 * 215 * The input payload may either be supplied inline as immediate data, 216 * or via a standard ULP_TX SGL. This SGL should include AAD, 217 * ciphertext, and the hash or tag for authenticated decryption 218 * requests. 219 * 220 * These scatter/gather lists can describe different subsets of the 221 * buffer described by the crypto operation. ccr_populate_sglist() 222 * generates a scatter/gather list that covers the entire crypto 223 * operation buffer that is then used to construct the other 224 * scatter/gather lists. 225 */ 226 static int 227 ccr_populate_sglist(struct sglist *sg, struct cryptop *crp) 228 { 229 int error; 230 231 sglist_reset(sg); 232 if (crp->crp_flags & CRYPTO_F_IMBUF) 233 error = sglist_append_mbuf(sg, (struct mbuf *)crp->crp_buf); 234 else if (crp->crp_flags & CRYPTO_F_IOV) 235 error = sglist_append_uio(sg, (struct uio *)crp->crp_buf); 236 else 237 error = sglist_append(sg, crp->crp_buf, crp->crp_ilen); 238 return (error); 239 } 240 241 /* 242 * Segments in 'sg' larger than 'maxsegsize' are counted as multiple 243 * segments. 244 */ 245 static int 246 ccr_count_sgl(struct sglist *sg, int maxsegsize) 247 { 248 int i, nsegs; 249 250 nsegs = 0; 251 for (i = 0; i < sg->sg_nseg; i++) 252 nsegs += howmany(sg->sg_segs[i].ss_len, maxsegsize); 253 return (nsegs); 254 } 255 256 /* These functions deal with PHYS_DSGL for the reply buffer. */ 257 static inline int 258 ccr_phys_dsgl_len(int nsegs) 259 { 260 int len; 261 262 len = (nsegs / 8) * sizeof(struct phys_sge_pairs); 263 if ((nsegs % 8) != 0) { 264 len += sizeof(uint16_t) * 8; 265 len += roundup2(nsegs % 8, 2) * sizeof(uint64_t); 266 } 267 return (len); 268 } 269 270 static void 271 ccr_write_phys_dsgl(struct ccr_softc *sc, void *dst, int nsegs) 272 { 273 struct sglist *sg; 274 struct cpl_rx_phys_dsgl *cpl; 275 struct phys_sge_pairs *sgl; 276 vm_paddr_t paddr; 277 size_t seglen; 278 u_int i, j; 279 280 sg = sc->sg_dsgl; 281 cpl = dst; 282 cpl->op_to_tid = htobe32(V_CPL_RX_PHYS_DSGL_OPCODE(CPL_RX_PHYS_DSGL) | 283 V_CPL_RX_PHYS_DSGL_ISRDMA(0)); 284 cpl->pcirlxorder_to_noofsgentr = htobe32( 285 V_CPL_RX_PHYS_DSGL_PCIRLXORDER(0) | 286 V_CPL_RX_PHYS_DSGL_PCINOSNOOP(0) | 287 V_CPL_RX_PHYS_DSGL_PCITPHNTENB(0) | V_CPL_RX_PHYS_DSGL_DCAID(0) | 288 V_CPL_RX_PHYS_DSGL_NOOFSGENTR(nsegs)); 289 cpl->rss_hdr_int.opcode = CPL_RX_PHYS_ADDR; 290 cpl->rss_hdr_int.qid = htobe16(sc->rxq->iq.abs_id); 291 cpl->rss_hdr_int.hash_val = 0; 292 sgl = (struct phys_sge_pairs *)(cpl + 1); 293 j = 0; 294 for (i = 0; i < sg->sg_nseg; i++) { 295 seglen = sg->sg_segs[i].ss_len; 296 paddr = sg->sg_segs[i].ss_paddr; 297 do { 298 sgl->addr[j] = htobe64(paddr); 299 if (seglen > DSGL_SGE_MAXLEN) { 300 sgl->len[j] = htobe16(DSGL_SGE_MAXLEN); 301 paddr += DSGL_SGE_MAXLEN; 302 seglen -= DSGL_SGE_MAXLEN; 303 } else { 304 sgl->len[j] = htobe16(seglen); 305 seglen = 0; 306 } 307 j++; 308 if (j == 8) { 309 sgl++; 310 j = 0; 311 } 312 } while (seglen != 0); 313 } 314 MPASS(j + 8 * (sgl - (struct phys_sge_pairs *)(cpl + 1)) == nsegs); 315 } 316 317 /* These functions deal with the ULPTX_SGL for input payload. */ 318 static inline int 319 ccr_ulptx_sgl_len(int nsegs) 320 { 321 u_int n; 322 323 nsegs--; /* first segment is part of ulptx_sgl */ 324 n = sizeof(struct ulptx_sgl) + 8 * ((3 * nsegs) / 2 + (nsegs & 1)); 325 return (roundup2(n, 16)); 326 } 327 328 static void 329 ccr_write_ulptx_sgl(struct ccr_softc *sc, void *dst, int nsegs) 330 { 331 struct ulptx_sgl *usgl; 332 struct sglist *sg; 333 struct sglist_seg *ss; 334 int i; 335 336 sg = sc->sg_ulptx; 337 MPASS(nsegs == sg->sg_nseg); 338 ss = &sg->sg_segs[0]; 339 usgl = dst; 340 usgl->cmd_nsge = htobe32(V_ULPTX_CMD(ULP_TX_SC_DSGL) | 341 V_ULPTX_NSGE(nsegs)); 342 usgl->len0 = htobe32(ss->ss_len); 343 usgl->addr0 = htobe64(ss->ss_paddr); 344 ss++; 345 for (i = 0; i < sg->sg_nseg - 1; i++) { 346 usgl->sge[i / 2].len[i & 1] = htobe32(ss->ss_len); 347 usgl->sge[i / 2].addr[i & 1] = htobe64(ss->ss_paddr); 348 ss++; 349 } 350 351 } 352 353 static bool 354 ccr_use_imm_data(u_int transhdr_len, u_int input_len) 355 { 356 357 if (input_len > CRYPTO_MAX_IMM_TX_PKT_LEN) 358 return (false); 359 if (roundup2(transhdr_len, 16) + roundup2(input_len, 16) > 360 SGE_MAX_WR_LEN) 361 return (false); 362 return (true); 363 } 364 365 static void 366 ccr_populate_wreq(struct ccr_softc *sc, struct chcr_wr *crwr, u_int kctx_len, 367 u_int wr_len, uint32_t sid, u_int imm_len, u_int sgl_len, u_int hash_size, 368 u_int iv_loc, struct cryptop *crp) 369 { 370 u_int cctx_size; 371 372 cctx_size = sizeof(struct _key_ctx) + kctx_len; 373 crwr->wreq.op_to_cctx_size = htobe32( 374 V_FW_CRYPTO_LOOKASIDE_WR_OPCODE(FW_CRYPTO_LOOKASIDE_WR) | 375 V_FW_CRYPTO_LOOKASIDE_WR_COMPL(0) | 376 V_FW_CRYPTO_LOOKASIDE_WR_IMM_LEN(imm_len) | 377 V_FW_CRYPTO_LOOKASIDE_WR_CCTX_LOC(1) | 378 V_FW_CRYPTO_LOOKASIDE_WR_CCTX_SIZE(cctx_size >> 4)); 379 crwr->wreq.len16_pkd = htobe32( 380 V_FW_CRYPTO_LOOKASIDE_WR_LEN16(wr_len / 16)); 381 crwr->wreq.session_id = htobe32(sid); 382 crwr->wreq.rx_chid_to_rx_q_id = htobe32( 383 V_FW_CRYPTO_LOOKASIDE_WR_RX_CHID(sc->tx_channel_id) | 384 V_FW_CRYPTO_LOOKASIDE_WR_LCB(0) | 385 V_FW_CRYPTO_LOOKASIDE_WR_PHASH(0) | 386 V_FW_CRYPTO_LOOKASIDE_WR_IV(iv_loc) | 387 V_FW_CRYPTO_LOOKASIDE_WR_FQIDX(0) | 388 V_FW_CRYPTO_LOOKASIDE_WR_TX_CH(0) | 389 V_FW_CRYPTO_LOOKASIDE_WR_RX_Q_ID(sc->rxq->iq.abs_id)); 390 crwr->wreq.key_addr = 0; 391 crwr->wreq.pld_size_hash_size = htobe32( 392 V_FW_CRYPTO_LOOKASIDE_WR_PLD_SIZE(sgl_len) | 393 V_FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE(hash_size)); 394 crwr->wreq.cookie = htobe64((uintptr_t)crp); 395 396 crwr->ulptx.cmd_dest = htobe32(V_ULPTX_CMD(ULP_TX_PKT) | 397 V_ULP_TXPKT_DATAMODIFY(0) | 398 V_ULP_TXPKT_CHANNELID(sc->tx_channel_id) | V_ULP_TXPKT_DEST(0) | 399 V_ULP_TXPKT_FID(0) | V_ULP_TXPKT_RO(1)); 400 crwr->ulptx.len = htobe32( 401 ((wr_len - sizeof(struct fw_crypto_lookaside_wr)) / 16)); 402 403 crwr->sc_imm.cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM) | 404 V_ULP_TX_SC_MORE(imm_len != 0 ? 0 : 1)); 405 crwr->sc_imm.len = htobe32(wr_len - offsetof(struct chcr_wr, sec_cpl) - 406 sgl_len); 407 } 408 409 static int 410 ccr_hmac(struct ccr_softc *sc, uint32_t sid, struct ccr_session *s, 411 struct cryptop *crp) 412 { 413 struct chcr_wr *crwr; 414 struct wrqe *wr; 415 struct auth_hash *axf; 416 struct cryptodesc *crd; 417 char *dst; 418 u_int hash_size_in_response, kctx_flits, kctx_len, transhdr_len, wr_len; 419 u_int imm_len, iopad_size; 420 int error, sgl_nsegs, sgl_len; 421 422 crd = crp->crp_desc; 423 424 /* Reject requests with too large of an input buffer. */ 425 if (crd->crd_len > MAX_REQUEST_SIZE) 426 return (EFBIG); 427 428 axf = s->hmac.auth_hash; 429 430 /* PADs must be 128-bit aligned. */ 431 iopad_size = roundup2(s->hmac.partial_digest_len, 16); 432 433 /* 434 * The 'key' part of the context includes the aligned IPAD and 435 * OPAD. 436 */ 437 kctx_len = iopad_size * 2; 438 hash_size_in_response = axf->hashsize; 439 transhdr_len = HASH_TRANSHDR_SIZE(kctx_len); 440 441 if (crd->crd_len == 0) { 442 imm_len = axf->blocksize; 443 sgl_nsegs = 0; 444 sgl_len = 0; 445 } else if (ccr_use_imm_data(transhdr_len, crd->crd_len)) { 446 imm_len = crd->crd_len; 447 sgl_nsegs = 0; 448 sgl_len = 0; 449 } else { 450 imm_len = 0; 451 sglist_reset(sc->sg_ulptx); 452 error = sglist_append_sglist(sc->sg_ulptx, sc->sg_crp, 453 crd->crd_skip, crd->crd_len); 454 if (error) 455 return (error); 456 sgl_nsegs = sc->sg_ulptx->sg_nseg; 457 sgl_len = ccr_ulptx_sgl_len(sgl_nsegs); 458 } 459 460 wr_len = roundup2(transhdr_len, 16) + roundup2(imm_len, 16) + sgl_len; 461 wr = alloc_wrqe(wr_len, sc->txq); 462 if (wr == NULL) { 463 sc->stats_wr_nomem++; 464 return (ENOMEM); 465 } 466 crwr = wrtod(wr); 467 memset(crwr, 0, wr_len); 468 469 ccr_populate_wreq(sc, crwr, kctx_len, wr_len, sid, imm_len, sgl_len, 470 hash_size_in_response, IV_NOP, crp); 471 472 /* XXX: Hardcodes SGE loopback channel of 0. */ 473 crwr->sec_cpl.op_ivinsrtofst = htobe32( 474 V_CPL_TX_SEC_PDU_OPCODE(CPL_TX_SEC_PDU) | 475 V_CPL_TX_SEC_PDU_RXCHID(sc->tx_channel_id) | 476 V_CPL_TX_SEC_PDU_ACKFOLLOWS(0) | V_CPL_TX_SEC_PDU_ULPTXLPBK(1) | 477 V_CPL_TX_SEC_PDU_CPLLEN(2) | V_CPL_TX_SEC_PDU_PLACEHOLDER(0) | 478 V_CPL_TX_SEC_PDU_IVINSRTOFST(0)); 479 480 crwr->sec_cpl.pldlen = htobe32(crd->crd_len == 0 ? axf->blocksize : 481 crd->crd_len); 482 483 crwr->sec_cpl.cipherstop_lo_authinsert = htobe32( 484 V_CPL_TX_SEC_PDU_AUTHSTART(1) | V_CPL_TX_SEC_PDU_AUTHSTOP(0)); 485 486 /* These two flits are actually a CPL_TLS_TX_SCMD_FMT. */ 487 crwr->sec_cpl.seqno_numivs = htobe32( 488 V_SCMD_SEQ_NO_CTRL(0) | 489 V_SCMD_PROTO_VERSION(CHCR_SCMD_PROTO_VERSION_GENERIC) | 490 V_SCMD_CIPH_MODE(CHCR_SCMD_CIPHER_MODE_NOP) | 491 V_SCMD_AUTH_MODE(s->hmac.auth_mode) | 492 V_SCMD_HMAC_CTRL(CHCR_SCMD_HMAC_CTRL_NO_TRUNC)); 493 crwr->sec_cpl.ivgen_hdrlen = htobe32( 494 V_SCMD_LAST_FRAG(0) | 495 V_SCMD_MORE_FRAGS(crd->crd_len == 0 ? 1 : 0) | V_SCMD_MAC_ONLY(1)); 496 497 memcpy(crwr->key_ctx.key, s->hmac.ipad, s->hmac.partial_digest_len); 498 memcpy(crwr->key_ctx.key + iopad_size, s->hmac.opad, 499 s->hmac.partial_digest_len); 500 501 /* XXX: F_KEY_CONTEXT_SALT_PRESENT set, but 'salt' not set. */ 502 kctx_flits = (sizeof(struct _key_ctx) + kctx_len) / 16; 503 crwr->key_ctx.ctx_hdr = htobe32(V_KEY_CONTEXT_CTX_LEN(kctx_flits) | 504 V_KEY_CONTEXT_OPAD_PRESENT(1) | V_KEY_CONTEXT_SALT_PRESENT(1) | 505 V_KEY_CONTEXT_CK_SIZE(CHCR_KEYCTX_NO_KEY) | 506 V_KEY_CONTEXT_MK_SIZE(s->hmac.mk_size) | V_KEY_CONTEXT_VALID(1)); 507 508 dst = (char *)(crwr + 1) + kctx_len + DUMMY_BYTES; 509 if (crd->crd_len == 0) { 510 dst[0] = 0x80; 511 *(uint64_t *)(dst + axf->blocksize - sizeof(uint64_t)) = 512 htobe64(axf->blocksize << 3); 513 } else if (imm_len != 0) 514 crypto_copydata(crp->crp_flags, crp->crp_buf, crd->crd_skip, 515 crd->crd_len, dst); 516 else 517 ccr_write_ulptx_sgl(sc, dst, sgl_nsegs); 518 519 /* XXX: TODO backpressure */ 520 t4_wrq_tx(sc->adapter, wr); 521 522 return (0); 523 } 524 525 static int 526 ccr_hmac_done(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp, 527 const struct cpl_fw6_pld *cpl, int error) 528 { 529 struct cryptodesc *crd; 530 531 crd = crp->crp_desc; 532 if (error == 0) { 533 crypto_copyback(crp->crp_flags, crp->crp_buf, crd->crd_inject, 534 s->hmac.hash_len, (c_caddr_t)(cpl + 1)); 535 } 536 537 return (error); 538 } 539 540 static int 541 ccr_blkcipher(struct ccr_softc *sc, uint32_t sid, struct ccr_session *s, 542 struct cryptop *crp) 543 { 544 char iv[CHCR_MAX_CRYPTO_IV_LEN]; 545 struct chcr_wr *crwr; 546 struct wrqe *wr; 547 struct cryptodesc *crd; 548 char *dst; 549 u_int iv_loc, kctx_len, key_half, op_type, transhdr_len, wr_len; 550 u_int imm_len; 551 int dsgl_nsegs, dsgl_len; 552 int sgl_nsegs, sgl_len; 553 int error; 554 555 crd = crp->crp_desc; 556 557 if (s->blkcipher.key_len == 0 || crd->crd_len == 0) 558 return (EINVAL); 559 if (crd->crd_alg == CRYPTO_AES_CBC && 560 (crd->crd_len % AES_BLOCK_LEN) != 0) 561 return (EINVAL); 562 563 /* Reject requests with too large of an input buffer. */ 564 if (crd->crd_len > MAX_REQUEST_SIZE) 565 return (EFBIG); 566 567 iv_loc = IV_NOP; 568 if (crd->crd_flags & CRD_F_ENCRYPT) { 569 op_type = CHCR_ENCRYPT_OP; 570 if (crd->crd_flags & CRD_F_IV_EXPLICIT) 571 memcpy(iv, crd->crd_iv, s->blkcipher.iv_len); 572 else 573 arc4rand(iv, s->blkcipher.iv_len, 0); 574 iv_loc = IV_IMMEDIATE; 575 if ((crd->crd_flags & CRD_F_IV_PRESENT) == 0) 576 crypto_copyback(crp->crp_flags, crp->crp_buf, 577 crd->crd_inject, s->blkcipher.iv_len, iv); 578 } else { 579 op_type = CHCR_DECRYPT_OP; 580 if (crd->crd_flags & CRD_F_IV_EXPLICIT) { 581 memcpy(iv, crd->crd_iv, s->blkcipher.iv_len); 582 iv_loc = IV_IMMEDIATE; 583 } else 584 iv_loc = IV_DSGL; 585 } 586 587 sglist_reset(sc->sg_dsgl); 588 error = sglist_append_sglist(sc->sg_dsgl, sc->sg_crp, crd->crd_skip, 589 crd->crd_len); 590 if (error) 591 return (error); 592 dsgl_nsegs = ccr_count_sgl(sc->sg_dsgl, DSGL_SGE_MAXLEN); 593 if (dsgl_nsegs > MAX_RX_PHYS_DSGL_SGE) 594 return (EFBIG); 595 dsgl_len = ccr_phys_dsgl_len(dsgl_nsegs); 596 597 /* The 'key' must be 128-bit aligned. */ 598 kctx_len = roundup2(s->blkcipher.key_len, 16); 599 transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dsgl_len); 600 601 if (ccr_use_imm_data(transhdr_len, crd->crd_len + 602 s->blkcipher.iv_len)) { 603 imm_len = crd->crd_len; 604 if (iv_loc == IV_DSGL) { 605 crypto_copydata(crp->crp_flags, crp->crp_buf, 606 crd->crd_inject, s->blkcipher.iv_len, iv); 607 iv_loc = IV_IMMEDIATE; 608 } 609 sgl_nsegs = 0; 610 sgl_len = 0; 611 } else { 612 imm_len = 0; 613 sglist_reset(sc->sg_ulptx); 614 if (iv_loc == IV_DSGL) { 615 error = sglist_append_sglist(sc->sg_ulptx, sc->sg_crp, 616 crd->crd_inject, s->blkcipher.iv_len); 617 if (error) 618 return (error); 619 } 620 error = sglist_append_sglist(sc->sg_ulptx, sc->sg_crp, 621 crd->crd_skip, crd->crd_len); 622 if (error) 623 return (error); 624 sgl_nsegs = sc->sg_ulptx->sg_nseg; 625 sgl_len = ccr_ulptx_sgl_len(sgl_nsegs); 626 } 627 628 wr_len = roundup2(transhdr_len, 16) + roundup2(imm_len, 16) + sgl_len; 629 if (iv_loc == IV_IMMEDIATE) 630 wr_len += s->blkcipher.iv_len; 631 wr = alloc_wrqe(wr_len, sc->txq); 632 if (wr == NULL) { 633 sc->stats_wr_nomem++; 634 return (ENOMEM); 635 } 636 crwr = wrtod(wr); 637 memset(crwr, 0, wr_len); 638 639 ccr_populate_wreq(sc, crwr, kctx_len, wr_len, sid, imm_len, sgl_len, 0, 640 iv_loc, crp); 641 642 /* XXX: Hardcodes SGE loopback channel of 0. */ 643 crwr->sec_cpl.op_ivinsrtofst = htobe32( 644 V_CPL_TX_SEC_PDU_OPCODE(CPL_TX_SEC_PDU) | 645 V_CPL_TX_SEC_PDU_RXCHID(sc->tx_channel_id) | 646 V_CPL_TX_SEC_PDU_ACKFOLLOWS(0) | V_CPL_TX_SEC_PDU_ULPTXLPBK(1) | 647 V_CPL_TX_SEC_PDU_CPLLEN(2) | V_CPL_TX_SEC_PDU_PLACEHOLDER(0) | 648 V_CPL_TX_SEC_PDU_IVINSRTOFST(1)); 649 650 crwr->sec_cpl.pldlen = htobe32(s->blkcipher.iv_len + crd->crd_len); 651 652 crwr->sec_cpl.aadstart_cipherstop_hi = htobe32( 653 V_CPL_TX_SEC_PDU_CIPHERSTART(s->blkcipher.iv_len + 1) | 654 V_CPL_TX_SEC_PDU_CIPHERSTOP_HI(0)); 655 crwr->sec_cpl.cipherstop_lo_authinsert = htobe32( 656 V_CPL_TX_SEC_PDU_CIPHERSTOP_LO(0)); 657 658 /* These two flits are actually a CPL_TLS_TX_SCMD_FMT. */ 659 crwr->sec_cpl.seqno_numivs = htobe32( 660 V_SCMD_SEQ_NO_CTRL(0) | 661 V_SCMD_PROTO_VERSION(CHCR_SCMD_PROTO_VERSION_GENERIC) | 662 V_SCMD_ENC_DEC_CTRL(op_type) | 663 V_SCMD_CIPH_MODE(s->blkcipher.cipher_mode) | 664 V_SCMD_AUTH_MODE(CHCR_SCMD_AUTH_MODE_NOP) | 665 V_SCMD_HMAC_CTRL(CHCR_SCMD_HMAC_CTRL_NOP) | 666 V_SCMD_IV_SIZE(s->blkcipher.iv_len / 2) | 667 V_SCMD_NUM_IVS(0)); 668 crwr->sec_cpl.ivgen_hdrlen = htobe32( 669 V_SCMD_IV_GEN_CTRL(0) | 670 V_SCMD_MORE_FRAGS(0) | V_SCMD_LAST_FRAG(0) | V_SCMD_MAC_ONLY(0) | 671 V_SCMD_AADIVDROP(1) | V_SCMD_HDR_LEN(dsgl_len)); 672 673 crwr->key_ctx.ctx_hdr = s->blkcipher.key_ctx_hdr; 674 switch (crd->crd_alg) { 675 case CRYPTO_AES_CBC: 676 if (crd->crd_flags & CRD_F_ENCRYPT) 677 memcpy(crwr->key_ctx.key, s->blkcipher.enckey, 678 s->blkcipher.key_len); 679 else 680 memcpy(crwr->key_ctx.key, s->blkcipher.deckey, 681 s->blkcipher.key_len); 682 break; 683 case CRYPTO_AES_ICM: 684 memcpy(crwr->key_ctx.key, s->blkcipher.enckey, 685 s->blkcipher.key_len); 686 break; 687 case CRYPTO_AES_XTS: 688 key_half = s->blkcipher.key_len / 2; 689 memcpy(crwr->key_ctx.key, s->blkcipher.enckey + key_half, 690 key_half); 691 if (crd->crd_flags & CRD_F_ENCRYPT) 692 memcpy(crwr->key_ctx.key + key_half, 693 s->blkcipher.enckey, key_half); 694 else 695 memcpy(crwr->key_ctx.key + key_half, 696 s->blkcipher.deckey, key_half); 697 break; 698 } 699 700 dst = (char *)(crwr + 1) + kctx_len; 701 ccr_write_phys_dsgl(sc, dst, dsgl_nsegs); 702 dst += sizeof(struct cpl_rx_phys_dsgl) + dsgl_len; 703 if (iv_loc == IV_IMMEDIATE) { 704 memcpy(dst, iv, s->blkcipher.iv_len); 705 dst += s->blkcipher.iv_len; 706 } 707 if (imm_len != 0) 708 crypto_copydata(crp->crp_flags, crp->crp_buf, crd->crd_skip, 709 crd->crd_len, dst); 710 else 711 ccr_write_ulptx_sgl(sc, dst, sgl_nsegs); 712 713 /* XXX: TODO backpressure */ 714 t4_wrq_tx(sc->adapter, wr); 715 716 return (0); 717 } 718 719 static int 720 ccr_blkcipher_done(struct ccr_softc *sc, struct ccr_session *s, 721 struct cryptop *crp, const struct cpl_fw6_pld *cpl, int error) 722 { 723 724 /* 725 * The updated IV to permit chained requests is at 726 * cpl->data[2], but OCF doesn't permit chained requests. 727 */ 728 return (error); 729 } 730 731 /* 732 * 'hashsize' is the length of a full digest. 'authsize' is the 733 * requested digest length for this operation which may be less 734 * than 'hashsize'. 735 */ 736 static int 737 ccr_hmac_ctrl(unsigned int hashsize, unsigned int authsize) 738 { 739 740 if (authsize == 10) 741 return (CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366); 742 if (authsize == 12) 743 return (CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT); 744 if (authsize == hashsize / 2) 745 return (CHCR_SCMD_HMAC_CTRL_DIV2); 746 return (CHCR_SCMD_HMAC_CTRL_NO_TRUNC); 747 } 748 749 static int 750 ccr_authenc(struct ccr_softc *sc, uint32_t sid, struct ccr_session *s, 751 struct cryptop *crp, struct cryptodesc *crda, struct cryptodesc *crde) 752 { 753 char iv[CHCR_MAX_CRYPTO_IV_LEN]; 754 struct chcr_wr *crwr; 755 struct wrqe *wr; 756 struct auth_hash *axf; 757 char *dst; 758 u_int iv_loc, kctx_len, key_half, op_type, transhdr_len, wr_len; 759 u_int hash_size_in_response, imm_len, iopad_size; 760 u_int aad_start, aad_len, aad_stop; 761 u_int auth_start, auth_stop, auth_insert; 762 u_int cipher_start, cipher_stop; 763 u_int hmac_ctrl, input_len; 764 int dsgl_nsegs, dsgl_len; 765 int sgl_nsegs, sgl_len; 766 int error; 767 768 /* 769 * If there is a need in the future, requests with an empty 770 * payload could be supported as HMAC-only requests. 771 */ 772 if (s->blkcipher.key_len == 0 || crde->crd_len == 0) 773 return (EINVAL); 774 if (crde->crd_alg == CRYPTO_AES_CBC && 775 (crde->crd_len % AES_BLOCK_LEN) != 0) 776 return (EINVAL); 777 778 /* 779 * AAD is only permitted before the cipher/plain text, not 780 * after. 781 */ 782 if (crda->crd_len + crda->crd_skip > crde->crd_len + crde->crd_skip) 783 return (EINVAL); 784 785 axf = s->hmac.auth_hash; 786 hash_size_in_response = s->hmac.hash_len; 787 788 /* 789 * The IV is always stored at the start of the buffer even 790 * though it may be duplicated in the payload. The crypto 791 * engine doesn't work properly if the IV offset points inside 792 * of the AAD region, so a second copy is always required. 793 */ 794 iv_loc = IV_IMMEDIATE; 795 if (crde->crd_flags & CRD_F_ENCRYPT) { 796 op_type = CHCR_ENCRYPT_OP; 797 if (crde->crd_flags & CRD_F_IV_EXPLICIT) 798 memcpy(iv, crde->crd_iv, s->blkcipher.iv_len); 799 else 800 arc4rand(iv, s->blkcipher.iv_len, 0); 801 if ((crde->crd_flags & CRD_F_IV_PRESENT) == 0) 802 crypto_copyback(crp->crp_flags, crp->crp_buf, 803 crde->crd_inject, s->blkcipher.iv_len, iv); 804 } else { 805 op_type = CHCR_DECRYPT_OP; 806 if (crde->crd_flags & CRD_F_IV_EXPLICIT) 807 memcpy(iv, crde->crd_iv, s->blkcipher.iv_len); 808 else 809 crypto_copydata(crp->crp_flags, crp->crp_buf, 810 crde->crd_inject, s->blkcipher.iv_len, iv); 811 } 812 813 /* 814 * The output buffer consists of the cipher text followed by 815 * the hash when encrypting. For decryption it only contains 816 * the plain text. 817 */ 818 if (op_type == CHCR_ENCRYPT_OP) { 819 if (crde->crd_len + hash_size_in_response > MAX_REQUEST_SIZE) 820 return (EFBIG); 821 } else { 822 if (crde->crd_len > MAX_REQUEST_SIZE) 823 return (EFBIG); 824 } 825 sglist_reset(sc->sg_dsgl); 826 error = sglist_append_sglist(sc->sg_dsgl, sc->sg_crp, crde->crd_skip, 827 crde->crd_len); 828 if (error) 829 return (error); 830 if (op_type == CHCR_ENCRYPT_OP) { 831 error = sglist_append_sglist(sc->sg_dsgl, sc->sg_crp, 832 crda->crd_inject, hash_size_in_response); 833 if (error) 834 return (error); 835 } 836 dsgl_nsegs = ccr_count_sgl(sc->sg_dsgl, DSGL_SGE_MAXLEN); 837 if (dsgl_nsegs > MAX_RX_PHYS_DSGL_SGE) 838 return (EFBIG); 839 dsgl_len = ccr_phys_dsgl_len(dsgl_nsegs); 840 841 /* PADs must be 128-bit aligned. */ 842 iopad_size = roundup2(s->hmac.partial_digest_len, 16); 843 844 /* 845 * The 'key' part of the key context consists of the key followed 846 * by the IPAD and OPAD. 847 */ 848 kctx_len = roundup2(s->blkcipher.key_len, 16) + iopad_size * 2; 849 transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dsgl_len); 850 851 /* 852 * The input buffer consists of the IV, any AAD, and then the 853 * cipher/plain text. For decryption requests the hash is 854 * appended after the cipher text. 855 */ 856 if (crda->crd_skip < crde->crd_skip) { 857 if (crda->crd_skip + crda->crd_len > crde->crd_skip) 858 aad_len = (crde->crd_skip - crda->crd_skip); 859 else 860 aad_len = crda->crd_len; 861 } else 862 aad_len = 0; 863 input_len = aad_len + crde->crd_len; 864 865 /* 866 * The firmware hangs if sent a request which is a 867 * bit smaller than MAX_REQUEST_SIZE. In particular, the 868 * firmware appears to require 512 - 16 bytes of spare room 869 * along with the size of the hash even if the hash isn't 870 * included in the input buffer. 871 */ 872 if (input_len + roundup2(axf->hashsize, 16) + (512 - 16) > 873 MAX_REQUEST_SIZE) 874 return (EFBIG); 875 if (op_type == CHCR_DECRYPT_OP) 876 input_len += hash_size_in_response; 877 if (ccr_use_imm_data(transhdr_len, s->blkcipher.iv_len + input_len)) { 878 imm_len = input_len; 879 sgl_nsegs = 0; 880 sgl_len = 0; 881 } else { 882 imm_len = 0; 883 sglist_reset(sc->sg_ulptx); 884 if (aad_len != 0) { 885 error = sglist_append_sglist(sc->sg_ulptx, sc->sg_crp, 886 crda->crd_skip, aad_len); 887 if (error) 888 return (error); 889 } 890 error = sglist_append_sglist(sc->sg_ulptx, sc->sg_crp, 891 crde->crd_skip, crde->crd_len); 892 if (error) 893 return (error); 894 if (op_type == CHCR_DECRYPT_OP) { 895 error = sglist_append_sglist(sc->sg_ulptx, sc->sg_crp, 896 crda->crd_inject, hash_size_in_response); 897 if (error) 898 return (error); 899 } 900 sgl_nsegs = sc->sg_ulptx->sg_nseg; 901 sgl_len = ccr_ulptx_sgl_len(sgl_nsegs); 902 } 903 904 /* 905 * Any auth-only data before the cipher region is marked as AAD. 906 * Auth-data that overlaps with the cipher region is placed in 907 * the auth section. 908 */ 909 if (aad_len != 0) { 910 aad_start = s->blkcipher.iv_len + 1; 911 aad_stop = aad_start + aad_len - 1; 912 } else { 913 aad_start = 0; 914 aad_stop = 0; 915 } 916 cipher_start = s->blkcipher.iv_len + aad_len + 1; 917 if (op_type == CHCR_DECRYPT_OP) 918 cipher_stop = hash_size_in_response; 919 else 920 cipher_stop = 0; 921 if (aad_len == crda->crd_len) { 922 auth_start = 0; 923 auth_stop = 0; 924 } else { 925 if (aad_len != 0) 926 auth_start = cipher_start; 927 else 928 auth_start = s->blkcipher.iv_len + crda->crd_skip - 929 crde->crd_skip + 1; 930 auth_stop = (crde->crd_skip + crde->crd_len) - 931 (crda->crd_skip + crda->crd_len) + cipher_stop; 932 } 933 if (op_type == CHCR_DECRYPT_OP) 934 auth_insert = hash_size_in_response; 935 else 936 auth_insert = 0; 937 938 wr_len = roundup2(transhdr_len, 16) + roundup2(imm_len, 16) + sgl_len; 939 if (iv_loc == IV_IMMEDIATE) 940 wr_len += s->blkcipher.iv_len; 941 wr = alloc_wrqe(wr_len, sc->txq); 942 if (wr == NULL) { 943 sc->stats_wr_nomem++; 944 return (ENOMEM); 945 } 946 crwr = wrtod(wr); 947 memset(crwr, 0, wr_len); 948 949 ccr_populate_wreq(sc, crwr, kctx_len, wr_len, sid, imm_len, sgl_len, 950 op_type == CHCR_DECRYPT_OP ? hash_size_in_response : 0, iv_loc, 951 crp); 952 953 /* XXX: Hardcodes SGE loopback channel of 0. */ 954 crwr->sec_cpl.op_ivinsrtofst = htobe32( 955 V_CPL_TX_SEC_PDU_OPCODE(CPL_TX_SEC_PDU) | 956 V_CPL_TX_SEC_PDU_RXCHID(sc->tx_channel_id) | 957 V_CPL_TX_SEC_PDU_ACKFOLLOWS(0) | V_CPL_TX_SEC_PDU_ULPTXLPBK(1) | 958 V_CPL_TX_SEC_PDU_CPLLEN(2) | V_CPL_TX_SEC_PDU_PLACEHOLDER(0) | 959 V_CPL_TX_SEC_PDU_IVINSRTOFST(1)); 960 961 crwr->sec_cpl.pldlen = htobe32(s->blkcipher.iv_len + input_len); 962 963 crwr->sec_cpl.aadstart_cipherstop_hi = htobe32( 964 V_CPL_TX_SEC_PDU_AADSTART(aad_start) | 965 V_CPL_TX_SEC_PDU_AADSTOP(aad_stop) | 966 V_CPL_TX_SEC_PDU_CIPHERSTART(cipher_start) | 967 V_CPL_TX_SEC_PDU_CIPHERSTOP_HI(cipher_stop >> 4)); 968 crwr->sec_cpl.cipherstop_lo_authinsert = htobe32( 969 V_CPL_TX_SEC_PDU_CIPHERSTOP_LO(cipher_stop & 0xf) | 970 V_CPL_TX_SEC_PDU_AUTHSTART(auth_start) | 971 V_CPL_TX_SEC_PDU_AUTHSTOP(auth_stop) | 972 V_CPL_TX_SEC_PDU_AUTHINSERT(auth_insert)); 973 974 /* These two flits are actually a CPL_TLS_TX_SCMD_FMT. */ 975 hmac_ctrl = ccr_hmac_ctrl(axf->hashsize, hash_size_in_response); 976 crwr->sec_cpl.seqno_numivs = htobe32( 977 V_SCMD_SEQ_NO_CTRL(0) | 978 V_SCMD_PROTO_VERSION(CHCR_SCMD_PROTO_VERSION_GENERIC) | 979 V_SCMD_ENC_DEC_CTRL(op_type) | 980 V_SCMD_CIPH_AUTH_SEQ_CTRL(op_type == CHCR_ENCRYPT_OP ? 1 : 0) | 981 V_SCMD_CIPH_MODE(s->blkcipher.cipher_mode) | 982 V_SCMD_AUTH_MODE(s->hmac.auth_mode) | 983 V_SCMD_HMAC_CTRL(hmac_ctrl) | 984 V_SCMD_IV_SIZE(s->blkcipher.iv_len / 2) | 985 V_SCMD_NUM_IVS(0)); 986 crwr->sec_cpl.ivgen_hdrlen = htobe32( 987 V_SCMD_IV_GEN_CTRL(0) | 988 V_SCMD_MORE_FRAGS(0) | V_SCMD_LAST_FRAG(0) | V_SCMD_MAC_ONLY(0) | 989 V_SCMD_AADIVDROP(1) | V_SCMD_HDR_LEN(dsgl_len)); 990 991 crwr->key_ctx.ctx_hdr = s->blkcipher.key_ctx_hdr; 992 switch (crde->crd_alg) { 993 case CRYPTO_AES_CBC: 994 if (crde->crd_flags & CRD_F_ENCRYPT) 995 memcpy(crwr->key_ctx.key, s->blkcipher.enckey, 996 s->blkcipher.key_len); 997 else 998 memcpy(crwr->key_ctx.key, s->blkcipher.deckey, 999 s->blkcipher.key_len); 1000 break; 1001 case CRYPTO_AES_ICM: 1002 memcpy(crwr->key_ctx.key, s->blkcipher.enckey, 1003 s->blkcipher.key_len); 1004 break; 1005 case CRYPTO_AES_XTS: 1006 key_half = s->blkcipher.key_len / 2; 1007 memcpy(crwr->key_ctx.key, s->blkcipher.enckey + key_half, 1008 key_half); 1009 if (crde->crd_flags & CRD_F_ENCRYPT) 1010 memcpy(crwr->key_ctx.key + key_half, 1011 s->blkcipher.enckey, key_half); 1012 else 1013 memcpy(crwr->key_ctx.key + key_half, 1014 s->blkcipher.deckey, key_half); 1015 break; 1016 } 1017 1018 dst = crwr->key_ctx.key + roundup2(s->blkcipher.key_len, 16); 1019 memcpy(dst, s->hmac.ipad, s->hmac.partial_digest_len); 1020 memcpy(dst + iopad_size, s->hmac.opad, s->hmac.partial_digest_len); 1021 1022 dst = (char *)(crwr + 1) + kctx_len; 1023 ccr_write_phys_dsgl(sc, dst, dsgl_nsegs); 1024 dst += sizeof(struct cpl_rx_phys_dsgl) + dsgl_len; 1025 if (iv_loc == IV_IMMEDIATE) { 1026 memcpy(dst, iv, s->blkcipher.iv_len); 1027 dst += s->blkcipher.iv_len; 1028 } 1029 if (imm_len != 0) { 1030 if (aad_len != 0) { 1031 crypto_copydata(crp->crp_flags, crp->crp_buf, 1032 crda->crd_skip, aad_len, dst); 1033 dst += aad_len; 1034 } 1035 crypto_copydata(crp->crp_flags, crp->crp_buf, crde->crd_skip, 1036 crde->crd_len, dst); 1037 dst += crde->crd_len; 1038 if (op_type == CHCR_DECRYPT_OP) 1039 crypto_copydata(crp->crp_flags, crp->crp_buf, 1040 crda->crd_inject, hash_size_in_response, dst); 1041 } else 1042 ccr_write_ulptx_sgl(sc, dst, sgl_nsegs); 1043 1044 /* XXX: TODO backpressure */ 1045 t4_wrq_tx(sc->adapter, wr); 1046 1047 return (0); 1048 } 1049 1050 static int 1051 ccr_authenc_done(struct ccr_softc *sc, struct ccr_session *s, 1052 struct cryptop *crp, const struct cpl_fw6_pld *cpl, int error) 1053 { 1054 struct cryptodesc *crd; 1055 1056 /* 1057 * The updated IV to permit chained requests is at 1058 * cpl->data[2], but OCF doesn't permit chained requests. 1059 * 1060 * For a decryption request, the hardware may do a verification 1061 * of the HMAC which will fail if the existing HMAC isn't in the 1062 * buffer. If that happens, clear the error and copy the HMAC 1063 * from the CPL reply into the buffer. 1064 * 1065 * For encryption requests, crd should be the cipher request 1066 * which will have CRD_F_ENCRYPT set. For decryption 1067 * requests, crp_desc will be the HMAC request which should 1068 * not have this flag set. 1069 */ 1070 crd = crp->crp_desc; 1071 if (error == EBADMSG && !CHK_PAD_ERR_BIT(be64toh(cpl->data[0])) && 1072 !(crd->crd_flags & CRD_F_ENCRYPT)) { 1073 crypto_copyback(crp->crp_flags, crp->crp_buf, crd->crd_inject, 1074 s->hmac.hash_len, (c_caddr_t)(cpl + 1)); 1075 error = 0; 1076 } 1077 return (error); 1078 } 1079 1080 static int 1081 ccr_gcm(struct ccr_softc *sc, uint32_t sid, struct ccr_session *s, 1082 struct cryptop *crp, struct cryptodesc *crda, struct cryptodesc *crde) 1083 { 1084 char iv[CHCR_MAX_CRYPTO_IV_LEN]; 1085 struct chcr_wr *crwr; 1086 struct wrqe *wr; 1087 char *dst; 1088 u_int iv_len, iv_loc, kctx_len, op_type, transhdr_len, wr_len; 1089 u_int hash_size_in_response, imm_len; 1090 u_int aad_start, aad_stop, cipher_start, cipher_stop, auth_insert; 1091 u_int hmac_ctrl, input_len; 1092 int dsgl_nsegs, dsgl_len; 1093 int sgl_nsegs, sgl_len; 1094 int error; 1095 1096 if (s->blkcipher.key_len == 0) 1097 return (EINVAL); 1098 1099 /* 1100 * AAD is only permitted before the cipher/plain text, not 1101 * after. 1102 */ 1103 if (crda->crd_len + crda->crd_skip > crde->crd_len + crde->crd_skip) 1104 return (EINVAL); 1105 1106 hash_size_in_response = s->gmac.hash_len; 1107 1108 /* 1109 * The IV is always stored at the start of the buffer even 1110 * though it may be duplicated in the payload. The crypto 1111 * engine doesn't work properly if the IV offset points inside 1112 * of the AAD region, so a second copy is always required. 1113 * 1114 * The IV for GCM is further complicated in that IPSec 1115 * provides a full 16-byte IV (including the counter), whereas 1116 * the /dev/crypto interface sometimes provides a full 16-byte 1117 * IV (if no IV is provided in the ioctl) and sometimes a 1118 * 12-byte IV (if the IV was explicit). For now the driver 1119 * always assumes a 12-byte IV and initializes the low 4 byte 1120 * counter to 1. 1121 */ 1122 iv_loc = IV_IMMEDIATE; 1123 if (crde->crd_flags & CRD_F_ENCRYPT) { 1124 op_type = CHCR_ENCRYPT_OP; 1125 if (crde->crd_flags & CRD_F_IV_EXPLICIT) 1126 memcpy(iv, crde->crd_iv, s->blkcipher.iv_len); 1127 else 1128 arc4rand(iv, s->blkcipher.iv_len, 0); 1129 if ((crde->crd_flags & CRD_F_IV_PRESENT) == 0) 1130 crypto_copyback(crp->crp_flags, crp->crp_buf, 1131 crde->crd_inject, s->blkcipher.iv_len, iv); 1132 } else { 1133 op_type = CHCR_DECRYPT_OP; 1134 if (crde->crd_flags & CRD_F_IV_EXPLICIT) 1135 memcpy(iv, crde->crd_iv, s->blkcipher.iv_len); 1136 else 1137 crypto_copydata(crp->crp_flags, crp->crp_buf, 1138 crde->crd_inject, s->blkcipher.iv_len, iv); 1139 } 1140 1141 /* 1142 * If the input IV is 12 bytes, append an explicit counter of 1143 * 1. 1144 */ 1145 if (s->blkcipher.iv_len == 12) { 1146 *(uint32_t *)&iv[12] = htobe32(1); 1147 iv_len = AES_BLOCK_LEN; 1148 } else 1149 iv_len = s->blkcipher.iv_len; 1150 1151 /* 1152 * The output buffer consists of the cipher text followed by 1153 * the tag when encrypting. For decryption it only contains 1154 * the plain text. 1155 */ 1156 if (op_type == CHCR_ENCRYPT_OP) { 1157 if (crde->crd_len + hash_size_in_response > MAX_REQUEST_SIZE) 1158 return (EFBIG); 1159 } else { 1160 if (crde->crd_len > MAX_REQUEST_SIZE) 1161 return (EFBIG); 1162 } 1163 sglist_reset(sc->sg_dsgl); 1164 error = sglist_append_sglist(sc->sg_dsgl, sc->sg_crp, crde->crd_skip, 1165 crde->crd_len); 1166 if (error) 1167 return (error); 1168 if (op_type == CHCR_ENCRYPT_OP) { 1169 error = sglist_append_sglist(sc->sg_dsgl, sc->sg_crp, 1170 crda->crd_inject, hash_size_in_response); 1171 if (error) 1172 return (error); 1173 } 1174 dsgl_nsegs = ccr_count_sgl(sc->sg_dsgl, DSGL_SGE_MAXLEN); 1175 if (dsgl_nsegs > MAX_RX_PHYS_DSGL_SGE) 1176 return (EFBIG); 1177 dsgl_len = ccr_phys_dsgl_len(dsgl_nsegs); 1178 1179 /* 1180 * The 'key' part of the key context consists of the key followed 1181 * by the Galois hash key. 1182 */ 1183 kctx_len = roundup2(s->blkcipher.key_len, 16) + GMAC_BLOCK_LEN; 1184 transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dsgl_len); 1185 1186 /* 1187 * The input buffer consists of the IV, any AAD, and then the 1188 * cipher/plain text. For decryption requests the hash is 1189 * appended after the cipher text. 1190 */ 1191 input_len = crda->crd_len + crde->crd_len; 1192 if (op_type == CHCR_DECRYPT_OP) 1193 input_len += hash_size_in_response; 1194 if (input_len > MAX_REQUEST_SIZE) 1195 return (EFBIG); 1196 if (ccr_use_imm_data(transhdr_len, iv_len + input_len)) { 1197 imm_len = input_len; 1198 sgl_nsegs = 0; 1199 sgl_len = 0; 1200 } else { 1201 imm_len = 0; 1202 sglist_reset(sc->sg_ulptx); 1203 if (crda->crd_len != 0) { 1204 error = sglist_append_sglist(sc->sg_ulptx, sc->sg_crp, 1205 crda->crd_skip, crda->crd_len); 1206 if (error) 1207 return (error); 1208 } 1209 error = sglist_append_sglist(sc->sg_ulptx, sc->sg_crp, 1210 crde->crd_skip, crde->crd_len); 1211 if (error) 1212 return (error); 1213 if (op_type == CHCR_DECRYPT_OP) { 1214 error = sglist_append_sglist(sc->sg_ulptx, sc->sg_crp, 1215 crda->crd_inject, hash_size_in_response); 1216 if (error) 1217 return (error); 1218 } 1219 sgl_nsegs = sc->sg_ulptx->sg_nseg; 1220 sgl_len = ccr_ulptx_sgl_len(sgl_nsegs); 1221 } 1222 1223 if (crda->crd_len != 0) { 1224 aad_start = iv_len + 1; 1225 aad_stop = aad_start + crda->crd_len - 1; 1226 } else { 1227 aad_start = 0; 1228 aad_stop = 0; 1229 } 1230 cipher_start = iv_len + crda->crd_len + 1; 1231 if (op_type == CHCR_DECRYPT_OP) 1232 cipher_stop = hash_size_in_response; 1233 else 1234 cipher_stop = 0; 1235 if (op_type == CHCR_DECRYPT_OP) 1236 auth_insert = hash_size_in_response; 1237 else 1238 auth_insert = 0; 1239 1240 wr_len = roundup2(transhdr_len, 16) + roundup2(imm_len, 16) + sgl_len; 1241 if (iv_loc == IV_IMMEDIATE) 1242 wr_len += iv_len; 1243 wr = alloc_wrqe(wr_len, sc->txq); 1244 if (wr == NULL) { 1245 sc->stats_wr_nomem++; 1246 return (ENOMEM); 1247 } 1248 crwr = wrtod(wr); 1249 memset(crwr, 0, wr_len); 1250 1251 ccr_populate_wreq(sc, crwr, kctx_len, wr_len, sid, imm_len, sgl_len, 1252 0, iv_loc, crp); 1253 1254 /* XXX: Hardcodes SGE loopback channel of 0. */ 1255 crwr->sec_cpl.op_ivinsrtofst = htobe32( 1256 V_CPL_TX_SEC_PDU_OPCODE(CPL_TX_SEC_PDU) | 1257 V_CPL_TX_SEC_PDU_RXCHID(sc->tx_channel_id) | 1258 V_CPL_TX_SEC_PDU_ACKFOLLOWS(0) | V_CPL_TX_SEC_PDU_ULPTXLPBK(1) | 1259 V_CPL_TX_SEC_PDU_CPLLEN(2) | V_CPL_TX_SEC_PDU_PLACEHOLDER(0) | 1260 V_CPL_TX_SEC_PDU_IVINSRTOFST(1)); 1261 1262 crwr->sec_cpl.pldlen = htobe32(iv_len + input_len); 1263 1264 /* 1265 * NB: cipherstop is explicitly set to 0. On encrypt it 1266 * should normally be set to 0 anyway (as the encrypt crd ends 1267 * at the end of the input). However, for decrypt the cipher 1268 * ends before the tag in the AUTHENC case (and authstop is 1269 * set to stop before the tag), but for GCM the cipher still 1270 * runs to the end of the buffer. Not sure if this is 1271 * intentional or a firmware quirk, but it is required for 1272 * working tag validation with GCM decryption. 1273 */ 1274 crwr->sec_cpl.aadstart_cipherstop_hi = htobe32( 1275 V_CPL_TX_SEC_PDU_AADSTART(aad_start) | 1276 V_CPL_TX_SEC_PDU_AADSTOP(aad_stop) | 1277 V_CPL_TX_SEC_PDU_CIPHERSTART(cipher_start) | 1278 V_CPL_TX_SEC_PDU_CIPHERSTOP_HI(0)); 1279 crwr->sec_cpl.cipherstop_lo_authinsert = htobe32( 1280 V_CPL_TX_SEC_PDU_CIPHERSTOP_LO(0) | 1281 V_CPL_TX_SEC_PDU_AUTHSTART(cipher_start) | 1282 V_CPL_TX_SEC_PDU_AUTHSTOP(cipher_stop) | 1283 V_CPL_TX_SEC_PDU_AUTHINSERT(auth_insert)); 1284 1285 /* These two flits are actually a CPL_TLS_TX_SCMD_FMT. */ 1286 hmac_ctrl = ccr_hmac_ctrl(AES_GMAC_HASH_LEN, hash_size_in_response); 1287 crwr->sec_cpl.seqno_numivs = htobe32( 1288 V_SCMD_SEQ_NO_CTRL(0) | 1289 V_SCMD_PROTO_VERSION(CHCR_SCMD_PROTO_VERSION_GENERIC) | 1290 V_SCMD_ENC_DEC_CTRL(op_type) | 1291 V_SCMD_CIPH_AUTH_SEQ_CTRL(op_type == CHCR_ENCRYPT_OP ? 1 : 0) | 1292 V_SCMD_CIPH_MODE(CHCR_SCMD_CIPHER_MODE_AES_GCM) | 1293 V_SCMD_AUTH_MODE(CHCR_SCMD_AUTH_MODE_GHASH) | 1294 V_SCMD_HMAC_CTRL(hmac_ctrl) | 1295 V_SCMD_IV_SIZE(iv_len / 2) | 1296 V_SCMD_NUM_IVS(0)); 1297 crwr->sec_cpl.ivgen_hdrlen = htobe32( 1298 V_SCMD_IV_GEN_CTRL(0) | 1299 V_SCMD_MORE_FRAGS(0) | V_SCMD_LAST_FRAG(0) | V_SCMD_MAC_ONLY(0) | 1300 V_SCMD_AADIVDROP(1) | V_SCMD_HDR_LEN(dsgl_len)); 1301 1302 crwr->key_ctx.ctx_hdr = s->blkcipher.key_ctx_hdr; 1303 memcpy(crwr->key_ctx.key, s->blkcipher.enckey, s->blkcipher.key_len); 1304 dst = crwr->key_ctx.key + roundup2(s->blkcipher.key_len, 16); 1305 memcpy(dst, s->gmac.ghash_h, GMAC_BLOCK_LEN); 1306 1307 dst = (char *)(crwr + 1) + kctx_len; 1308 ccr_write_phys_dsgl(sc, dst, dsgl_nsegs); 1309 dst += sizeof(struct cpl_rx_phys_dsgl) + dsgl_len; 1310 if (iv_loc == IV_IMMEDIATE) { 1311 memcpy(dst, iv, iv_len); 1312 dst += iv_len; 1313 } 1314 if (imm_len != 0) { 1315 if (crda->crd_len != 0) { 1316 crypto_copydata(crp->crp_flags, crp->crp_buf, 1317 crda->crd_skip, crda->crd_len, dst); 1318 dst += crda->crd_len; 1319 } 1320 crypto_copydata(crp->crp_flags, crp->crp_buf, crde->crd_skip, 1321 crde->crd_len, dst); 1322 dst += crde->crd_len; 1323 if (op_type == CHCR_DECRYPT_OP) 1324 crypto_copydata(crp->crp_flags, crp->crp_buf, 1325 crda->crd_inject, hash_size_in_response, dst); 1326 } else 1327 ccr_write_ulptx_sgl(sc, dst, sgl_nsegs); 1328 1329 /* XXX: TODO backpressure */ 1330 t4_wrq_tx(sc->adapter, wr); 1331 1332 return (0); 1333 } 1334 1335 static int 1336 ccr_gcm_done(struct ccr_softc *sc, struct ccr_session *s, 1337 struct cryptop *crp, const struct cpl_fw6_pld *cpl, int error) 1338 { 1339 1340 /* 1341 * The updated IV to permit chained requests is at 1342 * cpl->data[2], but OCF doesn't permit chained requests. 1343 * 1344 * Note that the hardware should always verify the GMAC hash. 1345 */ 1346 return (error); 1347 } 1348 1349 /* 1350 * Handle a GCM request with an empty payload by performing the 1351 * operation in software. Derived from swcr_authenc(). 1352 */ 1353 static void 1354 ccr_gcm_soft(struct ccr_session *s, struct cryptop *crp, 1355 struct cryptodesc *crda, struct cryptodesc *crde) 1356 { 1357 struct aes_gmac_ctx gmac_ctx; 1358 char block[GMAC_BLOCK_LEN]; 1359 char digest[GMAC_DIGEST_LEN]; 1360 char iv[AES_BLOCK_LEN]; 1361 int i, len; 1362 1363 /* 1364 * This assumes a 12-byte IV from the crp. See longer comment 1365 * above in ccr_gcm() for more details. 1366 */ 1367 if (crde->crd_flags & CRD_F_ENCRYPT) { 1368 if (crde->crd_flags & CRD_F_IV_EXPLICIT) 1369 memcpy(iv, crde->crd_iv, 12); 1370 else 1371 arc4rand(iv, 12, 0); 1372 } else { 1373 if (crde->crd_flags & CRD_F_IV_EXPLICIT) 1374 memcpy(iv, crde->crd_iv, 12); 1375 else 1376 crypto_copydata(crp->crp_flags, crp->crp_buf, 1377 crde->crd_inject, 12, iv); 1378 } 1379 *(uint32_t *)&iv[12] = htobe32(1); 1380 1381 /* Initialize the MAC. */ 1382 AES_GMAC_Init(&gmac_ctx); 1383 AES_GMAC_Setkey(&gmac_ctx, s->blkcipher.enckey, s->blkcipher.key_len); 1384 AES_GMAC_Reinit(&gmac_ctx, iv, sizeof(iv)); 1385 1386 /* MAC the AAD. */ 1387 for (i = 0; i < crda->crd_len; i += sizeof(block)) { 1388 len = imin(crda->crd_len - i, sizeof(block)); 1389 crypto_copydata(crp->crp_flags, crp->crp_buf, crda->crd_skip + 1390 i, len, block); 1391 bzero(block + len, sizeof(block) - len); 1392 AES_GMAC_Update(&gmac_ctx, block, sizeof(block)); 1393 } 1394 1395 /* Length block. */ 1396 bzero(block, sizeof(block)); 1397 ((uint32_t *)block)[1] = htobe32(crda->crd_len * 8); 1398 AES_GMAC_Update(&gmac_ctx, block, sizeof(block)); 1399 AES_GMAC_Final(digest, &gmac_ctx); 1400 1401 if (crde->crd_flags & CRD_F_ENCRYPT) { 1402 crypto_copyback(crp->crp_flags, crp->crp_buf, crda->crd_inject, 1403 sizeof(digest), digest); 1404 crp->crp_etype = 0; 1405 } else { 1406 char digest2[GMAC_DIGEST_LEN]; 1407 1408 crypto_copydata(crp->crp_flags, crp->crp_buf, crda->crd_inject, 1409 sizeof(digest2), digest2); 1410 if (timingsafe_bcmp(digest, digest2, sizeof(digest)) == 0) 1411 crp->crp_etype = 0; 1412 else 1413 crp->crp_etype = EBADMSG; 1414 } 1415 crypto_done(crp); 1416 } 1417 1418 static void 1419 ccr_identify(driver_t *driver, device_t parent) 1420 { 1421 struct adapter *sc; 1422 1423 sc = device_get_softc(parent); 1424 if (sc->cryptocaps & FW_CAPS_CONFIG_CRYPTO_LOOKASIDE && 1425 device_find_child(parent, "ccr", -1) == NULL) 1426 device_add_child(parent, "ccr", -1); 1427 } 1428 1429 static int 1430 ccr_probe(device_t dev) 1431 { 1432 1433 device_set_desc(dev, "Chelsio Crypto Accelerator"); 1434 return (BUS_PROBE_DEFAULT); 1435 } 1436 1437 static void 1438 ccr_sysctls(struct ccr_softc *sc) 1439 { 1440 struct sysctl_ctx_list *ctx; 1441 struct sysctl_oid *oid; 1442 struct sysctl_oid_list *children; 1443 1444 ctx = device_get_sysctl_ctx(sc->dev); 1445 1446 /* 1447 * dev.ccr.X. 1448 */ 1449 oid = device_get_sysctl_tree(sc->dev); 1450 children = SYSCTL_CHILDREN(oid); 1451 1452 /* 1453 * dev.ccr.X.stats. 1454 */ 1455 oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats", CTLFLAG_RD, 1456 NULL, "statistics"); 1457 children = SYSCTL_CHILDREN(oid); 1458 1459 SYSCTL_ADD_U64(ctx, children, OID_AUTO, "hmac", CTLFLAG_RD, 1460 &sc->stats_hmac, 0, "HMAC requests submitted"); 1461 SYSCTL_ADD_U64(ctx, children, OID_AUTO, "cipher_encrypt", CTLFLAG_RD, 1462 &sc->stats_blkcipher_encrypt, 0, 1463 "Cipher encryption requests submitted"); 1464 SYSCTL_ADD_U64(ctx, children, OID_AUTO, "cipher_decrypt", CTLFLAG_RD, 1465 &sc->stats_blkcipher_decrypt, 0, 1466 "Cipher decryption requests submitted"); 1467 SYSCTL_ADD_U64(ctx, children, OID_AUTO, "authenc_encrypt", CTLFLAG_RD, 1468 &sc->stats_authenc_encrypt, 0, 1469 "Combined AES+HMAC encryption requests submitted"); 1470 SYSCTL_ADD_U64(ctx, children, OID_AUTO, "authenc_decrypt", CTLFLAG_RD, 1471 &sc->stats_authenc_decrypt, 0, 1472 "Combined AES+HMAC decryption requests submitted"); 1473 SYSCTL_ADD_U64(ctx, children, OID_AUTO, "gcm_encrypt", CTLFLAG_RD, 1474 &sc->stats_gcm_encrypt, 0, "AES-GCM encryption requests submitted"); 1475 SYSCTL_ADD_U64(ctx, children, OID_AUTO, "gcm_decrypt", CTLFLAG_RD, 1476 &sc->stats_gcm_decrypt, 0, "AES-GCM decryption requests submitted"); 1477 SYSCTL_ADD_U64(ctx, children, OID_AUTO, "wr_nomem", CTLFLAG_RD, 1478 &sc->stats_wr_nomem, 0, "Work request memory allocation failures"); 1479 SYSCTL_ADD_U64(ctx, children, OID_AUTO, "inflight", CTLFLAG_RD, 1480 &sc->stats_inflight, 0, "Requests currently pending"); 1481 SYSCTL_ADD_U64(ctx, children, OID_AUTO, "mac_error", CTLFLAG_RD, 1482 &sc->stats_mac_error, 0, "MAC errors"); 1483 SYSCTL_ADD_U64(ctx, children, OID_AUTO, "pad_error", CTLFLAG_RD, 1484 &sc->stats_pad_error, 0, "Padding errors"); 1485 SYSCTL_ADD_U64(ctx, children, OID_AUTO, "bad_session", CTLFLAG_RD, 1486 &sc->stats_pad_error, 0, "Requests with invalid session ID"); 1487 SYSCTL_ADD_U64(ctx, children, OID_AUTO, "sglist_error", CTLFLAG_RD, 1488 &sc->stats_pad_error, 0, "Requests for which DMA mapping failed"); 1489 SYSCTL_ADD_U64(ctx, children, OID_AUTO, "process_error", CTLFLAG_RD, 1490 &sc->stats_pad_error, 0, "Requests failed during queueing"); 1491 } 1492 1493 static int 1494 ccr_attach(device_t dev) 1495 { 1496 struct ccr_softc *sc; 1497 int32_t cid; 1498 1499 /* 1500 * TODO: Crypto requests will panic if the parent device isn't 1501 * initialized so that the queues are up and running. Need to 1502 * figure out how to handle that correctly, maybe just reject 1503 * requests if the adapter isn't fully initialized? 1504 */ 1505 sc = device_get_softc(dev); 1506 sc->dev = dev; 1507 sc->adapter = device_get_softc(device_get_parent(dev)); 1508 sc->txq = &sc->adapter->sge.ctrlq[0]; 1509 sc->rxq = &sc->adapter->sge.rxq[0]; 1510 cid = crypto_get_driverid(dev, CRYPTOCAP_F_HARDWARE); 1511 if (cid < 0) { 1512 device_printf(dev, "could not get crypto driver id\n"); 1513 return (ENXIO); 1514 } 1515 sc->cid = cid; 1516 sc->adapter->ccr_softc = sc; 1517 1518 /* XXX: TODO? */ 1519 sc->tx_channel_id = 0; 1520 1521 mtx_init(&sc->lock, "ccr", NULL, MTX_DEF); 1522 sc->sg_crp = sglist_alloc(TX_SGL_SEGS, M_WAITOK); 1523 sc->sg_ulptx = sglist_alloc(TX_SGL_SEGS, M_WAITOK); 1524 sc->sg_dsgl = sglist_alloc(MAX_RX_PHYS_DSGL_SGE, M_WAITOK); 1525 ccr_sysctls(sc); 1526 1527 crypto_register(cid, CRYPTO_SHA1_HMAC, 0, 0); 1528 crypto_register(cid, CRYPTO_SHA2_256_HMAC, 0, 0); 1529 crypto_register(cid, CRYPTO_SHA2_384_HMAC, 0, 0); 1530 crypto_register(cid, CRYPTO_SHA2_512_HMAC, 0, 0); 1531 crypto_register(cid, CRYPTO_AES_CBC, 0, 0); 1532 crypto_register(cid, CRYPTO_AES_ICM, 0, 0); 1533 crypto_register(cid, CRYPTO_AES_NIST_GCM_16, 0, 0); 1534 crypto_register(cid, CRYPTO_AES_128_NIST_GMAC, 0, 0); 1535 crypto_register(cid, CRYPTO_AES_192_NIST_GMAC, 0, 0); 1536 crypto_register(cid, CRYPTO_AES_256_NIST_GMAC, 0, 0); 1537 crypto_register(cid, CRYPTO_AES_XTS, 0, 0); 1538 return (0); 1539 } 1540 1541 static int 1542 ccr_detach(device_t dev) 1543 { 1544 struct ccr_softc *sc; 1545 int i; 1546 1547 sc = device_get_softc(dev); 1548 1549 mtx_lock(&sc->lock); 1550 for (i = 0; i < sc->nsessions; i++) { 1551 if (sc->sessions[i].active || sc->sessions[i].pending != 0) { 1552 mtx_unlock(&sc->lock); 1553 return (EBUSY); 1554 } 1555 } 1556 sc->detaching = true; 1557 mtx_unlock(&sc->lock); 1558 1559 crypto_unregister_all(sc->cid); 1560 free(sc->sessions, M_CCR); 1561 mtx_destroy(&sc->lock); 1562 sglist_free(sc->sg_dsgl); 1563 sglist_free(sc->sg_ulptx); 1564 sglist_free(sc->sg_crp); 1565 sc->adapter->ccr_softc = NULL; 1566 return (0); 1567 } 1568 1569 static void 1570 ccr_copy_partial_hash(void *dst, int cri_alg, union authctx *auth_ctx) 1571 { 1572 uint32_t *u32; 1573 uint64_t *u64; 1574 u_int i; 1575 1576 u32 = (uint32_t *)dst; 1577 u64 = (uint64_t *)dst; 1578 switch (cri_alg) { 1579 case CRYPTO_SHA1_HMAC: 1580 for (i = 0; i < SHA1_HASH_LEN / 4; i++) 1581 u32[i] = htobe32(auth_ctx->sha1ctx.h.b32[i]); 1582 break; 1583 case CRYPTO_SHA2_256_HMAC: 1584 for (i = 0; i < SHA2_256_HASH_LEN / 4; i++) 1585 u32[i] = htobe32(auth_ctx->sha256ctx.state[i]); 1586 break; 1587 case CRYPTO_SHA2_384_HMAC: 1588 for (i = 0; i < SHA2_512_HASH_LEN / 8; i++) 1589 u64[i] = htobe64(auth_ctx->sha384ctx.state[i]); 1590 break; 1591 case CRYPTO_SHA2_512_HMAC: 1592 for (i = 0; i < SHA2_512_HASH_LEN / 8; i++) 1593 u64[i] = htobe64(auth_ctx->sha512ctx.state[i]); 1594 break; 1595 } 1596 } 1597 1598 static void 1599 ccr_init_hmac_digest(struct ccr_session *s, int cri_alg, char *key, 1600 int klen) 1601 { 1602 union authctx auth_ctx; 1603 struct auth_hash *axf; 1604 u_int i; 1605 1606 /* 1607 * If the key is larger than the block size, use the digest of 1608 * the key as the key instead. 1609 */ 1610 axf = s->hmac.auth_hash; 1611 klen /= 8; 1612 if (klen > axf->blocksize) { 1613 axf->Init(&auth_ctx); 1614 axf->Update(&auth_ctx, key, klen); 1615 axf->Final(s->hmac.ipad, &auth_ctx); 1616 klen = axf->hashsize; 1617 } else 1618 memcpy(s->hmac.ipad, key, klen); 1619 1620 memset(s->hmac.ipad + klen, 0, axf->blocksize); 1621 memcpy(s->hmac.opad, s->hmac.ipad, axf->blocksize); 1622 1623 for (i = 0; i < axf->blocksize; i++) { 1624 s->hmac.ipad[i] ^= HMAC_IPAD_VAL; 1625 s->hmac.opad[i] ^= HMAC_OPAD_VAL; 1626 } 1627 1628 /* 1629 * Hash the raw ipad and opad and store the partial result in 1630 * the same buffer. 1631 */ 1632 axf->Init(&auth_ctx); 1633 axf->Update(&auth_ctx, s->hmac.ipad, axf->blocksize); 1634 ccr_copy_partial_hash(s->hmac.ipad, cri_alg, &auth_ctx); 1635 1636 axf->Init(&auth_ctx); 1637 axf->Update(&auth_ctx, s->hmac.opad, axf->blocksize); 1638 ccr_copy_partial_hash(s->hmac.opad, cri_alg, &auth_ctx); 1639 } 1640 1641 /* 1642 * Borrowed from AES_GMAC_Setkey(). 1643 */ 1644 static void 1645 ccr_init_gmac_hash(struct ccr_session *s, char *key, int klen) 1646 { 1647 static char zeroes[GMAC_BLOCK_LEN]; 1648 uint32_t keysched[4 * (RIJNDAEL_MAXNR + 1)]; 1649 int rounds; 1650 1651 rounds = rijndaelKeySetupEnc(keysched, key, klen); 1652 rijndaelEncrypt(keysched, rounds, zeroes, s->gmac.ghash_h); 1653 } 1654 1655 static int 1656 ccr_aes_check_keylen(int alg, int klen) 1657 { 1658 1659 switch (klen) { 1660 case 128: 1661 case 192: 1662 if (alg == CRYPTO_AES_XTS) 1663 return (EINVAL); 1664 break; 1665 case 256: 1666 break; 1667 case 512: 1668 if (alg != CRYPTO_AES_XTS) 1669 return (EINVAL); 1670 break; 1671 default: 1672 return (EINVAL); 1673 } 1674 return (0); 1675 } 1676 1677 /* 1678 * Borrowed from cesa_prep_aes_key(). We should perhaps have a public 1679 * function to generate this instead. 1680 * 1681 * NB: The crypto engine wants the words in the decryption key in reverse 1682 * order. 1683 */ 1684 static void 1685 ccr_aes_getdeckey(void *dec_key, const void *enc_key, unsigned int kbits) 1686 { 1687 uint32_t ek[4 * (RIJNDAEL_MAXNR + 1)]; 1688 uint32_t *dkey; 1689 int i; 1690 1691 rijndaelKeySetupEnc(ek, enc_key, kbits); 1692 dkey = dec_key; 1693 dkey += (kbits / 8) / 4; 1694 1695 switch (kbits) { 1696 case 128: 1697 for (i = 0; i < 4; i++) 1698 *--dkey = htobe32(ek[4 * 10 + i]); 1699 break; 1700 case 192: 1701 for (i = 0; i < 2; i++) 1702 *--dkey = htobe32(ek[4 * 11 + 2 + i]); 1703 for (i = 0; i < 4; i++) 1704 *--dkey = htobe32(ek[4 * 12 + i]); 1705 break; 1706 case 256: 1707 for (i = 0; i < 4; i++) 1708 *--dkey = htobe32(ek[4 * 13 + i]); 1709 for (i = 0; i < 4; i++) 1710 *--dkey = htobe32(ek[4 * 14 + i]); 1711 break; 1712 } 1713 MPASS(dkey == dec_key); 1714 } 1715 1716 static void 1717 ccr_aes_setkey(struct ccr_session *s, int alg, const void *key, int klen) 1718 { 1719 unsigned int ck_size, iopad_size, kctx_flits, kctx_len, kbits, mk_size; 1720 unsigned int opad_present; 1721 1722 if (alg == CRYPTO_AES_XTS) 1723 kbits = klen / 2; 1724 else 1725 kbits = klen; 1726 switch (kbits) { 1727 case 128: 1728 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128; 1729 break; 1730 case 192: 1731 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192; 1732 break; 1733 case 256: 1734 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256; 1735 break; 1736 default: 1737 panic("should not get here"); 1738 } 1739 1740 s->blkcipher.key_len = klen / 8; 1741 memcpy(s->blkcipher.enckey, key, s->blkcipher.key_len); 1742 switch (alg) { 1743 case CRYPTO_AES_CBC: 1744 case CRYPTO_AES_XTS: 1745 ccr_aes_getdeckey(s->blkcipher.deckey, key, kbits); 1746 break; 1747 } 1748 1749 kctx_len = roundup2(s->blkcipher.key_len, 16); 1750 switch (s->mode) { 1751 case AUTHENC: 1752 mk_size = s->hmac.mk_size; 1753 opad_present = 1; 1754 iopad_size = roundup2(s->hmac.partial_digest_len, 16); 1755 kctx_len += iopad_size * 2; 1756 break; 1757 case GCM: 1758 mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_128; 1759 opad_present = 0; 1760 kctx_len += GMAC_BLOCK_LEN; 1761 break; 1762 default: 1763 mk_size = CHCR_KEYCTX_NO_KEY; 1764 opad_present = 0; 1765 break; 1766 } 1767 kctx_flits = (sizeof(struct _key_ctx) + kctx_len) / 16; 1768 s->blkcipher.key_ctx_hdr = htobe32(V_KEY_CONTEXT_CTX_LEN(kctx_flits) | 1769 V_KEY_CONTEXT_DUAL_CK(alg == CRYPTO_AES_XTS) | 1770 V_KEY_CONTEXT_OPAD_PRESENT(opad_present) | 1771 V_KEY_CONTEXT_SALT_PRESENT(1) | V_KEY_CONTEXT_CK_SIZE(ck_size) | 1772 V_KEY_CONTEXT_MK_SIZE(mk_size) | V_KEY_CONTEXT_VALID(1)); 1773 } 1774 1775 static int 1776 ccr_newsession(device_t dev, uint32_t *sidp, struct cryptoini *cri) 1777 { 1778 struct ccr_softc *sc; 1779 struct ccr_session *s; 1780 struct auth_hash *auth_hash; 1781 struct cryptoini *c, *hash, *cipher; 1782 unsigned int auth_mode, cipher_mode, iv_len, mk_size; 1783 unsigned int partial_digest_len; 1784 int error, i, sess; 1785 bool gcm_hash; 1786 1787 if (sidp == NULL || cri == NULL) 1788 return (EINVAL); 1789 1790 gcm_hash = false; 1791 cipher = NULL; 1792 hash = NULL; 1793 auth_hash = NULL; 1794 auth_mode = CHCR_SCMD_AUTH_MODE_NOP; 1795 cipher_mode = CHCR_SCMD_CIPHER_MODE_NOP; 1796 iv_len = 0; 1797 mk_size = 0; 1798 partial_digest_len = 0; 1799 for (c = cri; c != NULL; c = c->cri_next) { 1800 switch (c->cri_alg) { 1801 case CRYPTO_SHA1_HMAC: 1802 case CRYPTO_SHA2_256_HMAC: 1803 case CRYPTO_SHA2_384_HMAC: 1804 case CRYPTO_SHA2_512_HMAC: 1805 case CRYPTO_AES_128_NIST_GMAC: 1806 case CRYPTO_AES_192_NIST_GMAC: 1807 case CRYPTO_AES_256_NIST_GMAC: 1808 if (hash) 1809 return (EINVAL); 1810 hash = c; 1811 switch (c->cri_alg) { 1812 case CRYPTO_SHA1_HMAC: 1813 auth_hash = &auth_hash_hmac_sha1; 1814 auth_mode = CHCR_SCMD_AUTH_MODE_SHA1; 1815 mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_160; 1816 partial_digest_len = SHA1_HASH_LEN; 1817 break; 1818 case CRYPTO_SHA2_256_HMAC: 1819 auth_hash = &auth_hash_hmac_sha2_256; 1820 auth_mode = CHCR_SCMD_AUTH_MODE_SHA256; 1821 mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256; 1822 partial_digest_len = SHA2_256_HASH_LEN; 1823 break; 1824 case CRYPTO_SHA2_384_HMAC: 1825 auth_hash = &auth_hash_hmac_sha2_384; 1826 auth_mode = CHCR_SCMD_AUTH_MODE_SHA512_384; 1827 mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512; 1828 partial_digest_len = SHA2_512_HASH_LEN; 1829 break; 1830 case CRYPTO_SHA2_512_HMAC: 1831 auth_hash = &auth_hash_hmac_sha2_512; 1832 auth_mode = CHCR_SCMD_AUTH_MODE_SHA512_512; 1833 mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512; 1834 partial_digest_len = SHA2_512_HASH_LEN; 1835 break; 1836 case CRYPTO_AES_128_NIST_GMAC: 1837 case CRYPTO_AES_192_NIST_GMAC: 1838 case CRYPTO_AES_256_NIST_GMAC: 1839 gcm_hash = true; 1840 auth_mode = CHCR_SCMD_AUTH_MODE_GHASH; 1841 mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_128; 1842 break; 1843 } 1844 break; 1845 case CRYPTO_AES_CBC: 1846 case CRYPTO_AES_ICM: 1847 case CRYPTO_AES_NIST_GCM_16: 1848 case CRYPTO_AES_XTS: 1849 if (cipher) 1850 return (EINVAL); 1851 cipher = c; 1852 switch (c->cri_alg) { 1853 case CRYPTO_AES_CBC: 1854 cipher_mode = CHCR_SCMD_CIPHER_MODE_AES_CBC; 1855 iv_len = AES_BLOCK_LEN; 1856 break; 1857 case CRYPTO_AES_ICM: 1858 cipher_mode = CHCR_SCMD_CIPHER_MODE_AES_CTR; 1859 iv_len = AES_BLOCK_LEN; 1860 break; 1861 case CRYPTO_AES_NIST_GCM_16: 1862 cipher_mode = CHCR_SCMD_CIPHER_MODE_AES_GCM; 1863 iv_len = AES_GCM_IV_LEN; 1864 break; 1865 case CRYPTO_AES_XTS: 1866 cipher_mode = CHCR_SCMD_CIPHER_MODE_AES_XTS; 1867 iv_len = AES_BLOCK_LEN; 1868 break; 1869 } 1870 if (c->cri_key != NULL) { 1871 error = ccr_aes_check_keylen(c->cri_alg, 1872 c->cri_klen); 1873 if (error) 1874 return (error); 1875 } 1876 break; 1877 default: 1878 return (EINVAL); 1879 } 1880 } 1881 if (gcm_hash != (cipher_mode == CHCR_SCMD_CIPHER_MODE_AES_GCM)) 1882 return (EINVAL); 1883 if (hash == NULL && cipher == NULL) 1884 return (EINVAL); 1885 if (hash != NULL && hash->cri_key == NULL) 1886 return (EINVAL); 1887 1888 sc = device_get_softc(dev); 1889 mtx_lock(&sc->lock); 1890 if (sc->detaching) { 1891 mtx_unlock(&sc->lock); 1892 return (ENXIO); 1893 } 1894 sess = -1; 1895 for (i = 0; i < sc->nsessions; i++) { 1896 if (!sc->sessions[i].active && sc->sessions[i].pending == 0) { 1897 sess = i; 1898 break; 1899 } 1900 } 1901 if (sess == -1) { 1902 s = malloc(sizeof(*s) * (sc->nsessions + 1), M_CCR, 1903 M_NOWAIT | M_ZERO); 1904 if (s == NULL) { 1905 mtx_unlock(&sc->lock); 1906 return (ENOMEM); 1907 } 1908 if (sc->sessions != NULL) 1909 memcpy(s, sc->sessions, sizeof(*s) * sc->nsessions); 1910 sess = sc->nsessions; 1911 free(sc->sessions, M_CCR); 1912 sc->sessions = s; 1913 sc->nsessions++; 1914 } 1915 1916 s = &sc->sessions[sess]; 1917 1918 if (gcm_hash) 1919 s->mode = GCM; 1920 else if (hash != NULL && cipher != NULL) 1921 s->mode = AUTHENC; 1922 else if (hash != NULL) 1923 s->mode = HMAC; 1924 else { 1925 MPASS(cipher != NULL); 1926 s->mode = BLKCIPHER; 1927 } 1928 if (gcm_hash) { 1929 if (hash->cri_mlen == 0) 1930 s->gmac.hash_len = AES_GMAC_HASH_LEN; 1931 else 1932 s->gmac.hash_len = hash->cri_mlen; 1933 ccr_init_gmac_hash(s, hash->cri_key, hash->cri_klen); 1934 } else if (hash != NULL) { 1935 s->hmac.auth_hash = auth_hash; 1936 s->hmac.auth_mode = auth_mode; 1937 s->hmac.mk_size = mk_size; 1938 s->hmac.partial_digest_len = partial_digest_len; 1939 if (hash->cri_mlen == 0) 1940 s->hmac.hash_len = auth_hash->hashsize; 1941 else 1942 s->hmac.hash_len = hash->cri_mlen; 1943 ccr_init_hmac_digest(s, hash->cri_alg, hash->cri_key, 1944 hash->cri_klen); 1945 } 1946 if (cipher != NULL) { 1947 s->blkcipher.cipher_mode = cipher_mode; 1948 s->blkcipher.iv_len = iv_len; 1949 if (cipher->cri_key != NULL) 1950 ccr_aes_setkey(s, cipher->cri_alg, cipher->cri_key, 1951 cipher->cri_klen); 1952 } 1953 1954 s->active = true; 1955 mtx_unlock(&sc->lock); 1956 1957 *sidp = sess; 1958 return (0); 1959 } 1960 1961 static int 1962 ccr_freesession(device_t dev, uint64_t tid) 1963 { 1964 struct ccr_softc *sc; 1965 uint32_t sid; 1966 int error; 1967 1968 sc = device_get_softc(dev); 1969 sid = CRYPTO_SESID2LID(tid); 1970 mtx_lock(&sc->lock); 1971 if (sid >= sc->nsessions || !sc->sessions[sid].active) 1972 error = EINVAL; 1973 else { 1974 if (sc->sessions[sid].pending != 0) 1975 device_printf(dev, 1976 "session %d freed with %d pending requests\n", sid, 1977 sc->sessions[sid].pending); 1978 sc->sessions[sid].active = false; 1979 error = 0; 1980 } 1981 mtx_unlock(&sc->lock); 1982 return (error); 1983 } 1984 1985 static int 1986 ccr_process(device_t dev, struct cryptop *crp, int hint) 1987 { 1988 struct ccr_softc *sc; 1989 struct ccr_session *s; 1990 struct cryptodesc *crd, *crda, *crde; 1991 uint32_t sid; 1992 int error; 1993 1994 if (crp == NULL) 1995 return (EINVAL); 1996 1997 crd = crp->crp_desc; 1998 sid = CRYPTO_SESID2LID(crp->crp_sid); 1999 sc = device_get_softc(dev); 2000 mtx_lock(&sc->lock); 2001 if (sid >= sc->nsessions || !sc->sessions[sid].active) { 2002 sc->stats_bad_session++; 2003 error = EINVAL; 2004 goto out; 2005 } 2006 2007 error = ccr_populate_sglist(sc->sg_crp, crp); 2008 if (error) { 2009 sc->stats_sglist_error++; 2010 goto out; 2011 } 2012 2013 s = &sc->sessions[sid]; 2014 switch (s->mode) { 2015 case HMAC: 2016 if (crd->crd_flags & CRD_F_KEY_EXPLICIT) 2017 ccr_init_hmac_digest(s, crd->crd_alg, crd->crd_key, 2018 crd->crd_klen); 2019 error = ccr_hmac(sc, sid, s, crp); 2020 if (error == 0) 2021 sc->stats_hmac++; 2022 break; 2023 case BLKCIPHER: 2024 if (crd->crd_flags & CRD_F_KEY_EXPLICIT) { 2025 error = ccr_aes_check_keylen(crd->crd_alg, 2026 crd->crd_klen); 2027 if (error) 2028 break; 2029 ccr_aes_setkey(s, crd->crd_alg, crd->crd_key, 2030 crd->crd_klen); 2031 } 2032 error = ccr_blkcipher(sc, sid, s, crp); 2033 if (error == 0) { 2034 if (crd->crd_flags & CRD_F_ENCRYPT) 2035 sc->stats_blkcipher_encrypt++; 2036 else 2037 sc->stats_blkcipher_decrypt++; 2038 } 2039 break; 2040 case AUTHENC: 2041 error = 0; 2042 switch (crd->crd_alg) { 2043 case CRYPTO_AES_CBC: 2044 case CRYPTO_AES_ICM: 2045 case CRYPTO_AES_XTS: 2046 /* Only encrypt-then-authenticate supported. */ 2047 crde = crd; 2048 crda = crd->crd_next; 2049 if (!(crde->crd_flags & CRD_F_ENCRYPT)) { 2050 error = EINVAL; 2051 break; 2052 } 2053 break; 2054 default: 2055 crda = crd; 2056 crde = crd->crd_next; 2057 if (crde->crd_flags & CRD_F_ENCRYPT) { 2058 error = EINVAL; 2059 break; 2060 } 2061 break; 2062 } 2063 if (error) 2064 break; 2065 if (crda->crd_flags & CRD_F_KEY_EXPLICIT) 2066 ccr_init_hmac_digest(s, crda->crd_alg, crda->crd_key, 2067 crda->crd_klen); 2068 if (crde->crd_flags & CRD_F_KEY_EXPLICIT) { 2069 error = ccr_aes_check_keylen(crde->crd_alg, 2070 crde->crd_klen); 2071 if (error) 2072 break; 2073 ccr_aes_setkey(s, crde->crd_alg, crde->crd_key, 2074 crde->crd_klen); 2075 } 2076 error = ccr_authenc(sc, sid, s, crp, crda, crde); 2077 if (error == 0) { 2078 if (crde->crd_flags & CRD_F_ENCRYPT) 2079 sc->stats_authenc_encrypt++; 2080 else 2081 sc->stats_authenc_decrypt++; 2082 } 2083 break; 2084 case GCM: 2085 error = 0; 2086 if (crd->crd_alg == CRYPTO_AES_NIST_GCM_16) { 2087 crde = crd; 2088 crda = crd->crd_next; 2089 } else { 2090 crda = crd; 2091 crde = crd->crd_next; 2092 } 2093 if (crda->crd_flags & CRD_F_KEY_EXPLICIT) 2094 ccr_init_gmac_hash(s, crda->crd_key, crda->crd_klen); 2095 if (crde->crd_flags & CRD_F_KEY_EXPLICIT) { 2096 error = ccr_aes_check_keylen(crde->crd_alg, 2097 crde->crd_klen); 2098 if (error) 2099 break; 2100 ccr_aes_setkey(s, crde->crd_alg, crde->crd_key, 2101 crde->crd_klen); 2102 } 2103 if (crde->crd_len == 0) { 2104 mtx_unlock(&sc->lock); 2105 ccr_gcm_soft(s, crp, crda, crde); 2106 return (0); 2107 } 2108 error = ccr_gcm(sc, sid, s, crp, crda, crde); 2109 if (error == 0) { 2110 if (crde->crd_flags & CRD_F_ENCRYPT) 2111 sc->stats_gcm_encrypt++; 2112 else 2113 sc->stats_gcm_decrypt++; 2114 } 2115 break; 2116 } 2117 2118 if (error == 0) { 2119 s->pending++; 2120 sc->stats_inflight++; 2121 } else 2122 sc->stats_process_error++; 2123 2124 out: 2125 mtx_unlock(&sc->lock); 2126 2127 if (error) { 2128 crp->crp_etype = error; 2129 crypto_done(crp); 2130 } 2131 2132 return (0); 2133 } 2134 2135 static int 2136 do_cpl6_fw_pld(struct sge_iq *iq, const struct rss_header *rss, 2137 struct mbuf *m) 2138 { 2139 struct ccr_softc *sc = iq->adapter->ccr_softc; 2140 struct ccr_session *s; 2141 const struct cpl_fw6_pld *cpl; 2142 struct cryptop *crp; 2143 uint32_t sid, status; 2144 int error; 2145 2146 if (m != NULL) 2147 cpl = mtod(m, const void *); 2148 else 2149 cpl = (const void *)(rss + 1); 2150 2151 crp = (struct cryptop *)(uintptr_t)be64toh(cpl->data[1]); 2152 sid = CRYPTO_SESID2LID(crp->crp_sid); 2153 status = be64toh(cpl->data[0]); 2154 if (CHK_MAC_ERR_BIT(status) || CHK_PAD_ERR_BIT(status)) 2155 error = EBADMSG; 2156 else 2157 error = 0; 2158 2159 mtx_lock(&sc->lock); 2160 MPASS(sid < sc->nsessions); 2161 s = &sc->sessions[sid]; 2162 s->pending--; 2163 sc->stats_inflight--; 2164 2165 switch (s->mode) { 2166 case HMAC: 2167 error = ccr_hmac_done(sc, s, crp, cpl, error); 2168 break; 2169 case BLKCIPHER: 2170 error = ccr_blkcipher_done(sc, s, crp, cpl, error); 2171 break; 2172 case AUTHENC: 2173 error = ccr_authenc_done(sc, s, crp, cpl, error); 2174 break; 2175 case GCM: 2176 error = ccr_gcm_done(sc, s, crp, cpl, error); 2177 break; 2178 } 2179 2180 if (error == EBADMSG) { 2181 if (CHK_MAC_ERR_BIT(status)) 2182 sc->stats_mac_error++; 2183 if (CHK_PAD_ERR_BIT(status)) 2184 sc->stats_pad_error++; 2185 } 2186 mtx_unlock(&sc->lock); 2187 crp->crp_etype = error; 2188 crypto_done(crp); 2189 m_freem(m); 2190 return (0); 2191 } 2192 2193 static int 2194 ccr_modevent(module_t mod, int cmd, void *arg) 2195 { 2196 2197 switch (cmd) { 2198 case MOD_LOAD: 2199 t4_register_cpl_handler(CPL_FW6_PLD, do_cpl6_fw_pld); 2200 return (0); 2201 case MOD_UNLOAD: 2202 t4_register_cpl_handler(CPL_FW6_PLD, NULL); 2203 return (0); 2204 default: 2205 return (EOPNOTSUPP); 2206 } 2207 } 2208 2209 static device_method_t ccr_methods[] = { 2210 DEVMETHOD(device_identify, ccr_identify), 2211 DEVMETHOD(device_probe, ccr_probe), 2212 DEVMETHOD(device_attach, ccr_attach), 2213 DEVMETHOD(device_detach, ccr_detach), 2214 2215 DEVMETHOD(cryptodev_newsession, ccr_newsession), 2216 DEVMETHOD(cryptodev_freesession, ccr_freesession), 2217 DEVMETHOD(cryptodev_process, ccr_process), 2218 2219 DEVMETHOD_END 2220 }; 2221 2222 static driver_t ccr_driver = { 2223 "ccr", 2224 ccr_methods, 2225 sizeof(struct ccr_softc) 2226 }; 2227 2228 static devclass_t ccr_devclass; 2229 2230 DRIVER_MODULE(ccr, t6nex, ccr_driver, ccr_devclass, ccr_modevent, NULL); 2231 MODULE_VERSION(ccr, 1); 2232 MODULE_DEPEND(ccr, crypto, 1, 1, 1); 2233 MODULE_DEPEND(ccr, t6nex, 1, 1, 1); 2234