1 /*- 2 * Copyright (c) 2017 Chelsio Communications, Inc. 3 * All rights reserved. 4 * Written by: John Baldwin <jhb@FreeBSD.org> 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28 #include <sys/cdefs.h> 29 __FBSDID("$FreeBSD$"); 30 31 #include <sys/types.h> 32 #include <sys/bus.h> 33 #include <sys/lock.h> 34 #include <sys/malloc.h> 35 #include <sys/mutex.h> 36 #include <sys/module.h> 37 #include <sys/sglist.h> 38 39 #include <opencrypto/cryptodev.h> 40 #include <opencrypto/xform.h> 41 42 #include "cryptodev_if.h" 43 44 #include "common/common.h" 45 #include "crypto/t4_crypto.h" 46 47 /* 48 * Requests consist of: 49 * 50 * +-------------------------------+ 51 * | struct fw_crypto_lookaside_wr | 52 * +-------------------------------+ 53 * | struct ulp_txpkt | 54 * +-------------------------------+ 55 * | struct ulptx_idata | 56 * +-------------------------------+ 57 * | struct cpl_tx_sec_pdu | 58 * +-------------------------------+ 59 * | struct cpl_tls_tx_scmd_fmt | 60 * +-------------------------------+ 61 * | key context header | 62 * +-------------------------------+ 63 * | AES key | ----- For requests with AES 64 * +-------------------------------+ 65 * | Hash state | ----- For hash-only requests 66 * +-------------------------------+ - 67 * | IPAD (16-byte aligned) | \ 68 * +-------------------------------+ +---- For requests with HMAC 69 * | OPAD (16-byte aligned) | / 70 * +-------------------------------+ - 71 * | GMAC H | ----- For AES-GCM 72 * +-------------------------------+ - 73 * | struct cpl_rx_phys_dsgl | \ 74 * +-------------------------------+ +---- Destination buffer for 75 * | PHYS_DSGL entries | / non-hash-only requests 76 * +-------------------------------+ - 77 * | 16 dummy bytes | ----- Only for HMAC/hash-only requests 78 * +-------------------------------+ 79 * | IV | ----- If immediate IV 80 * +-------------------------------+ 81 * | Payload | ----- If immediate Payload 82 * +-------------------------------+ - 83 * | struct ulptx_sgl | \ 84 * +-------------------------------+ +---- If payload via SGL 85 * | SGL entries | / 86 * +-------------------------------+ - 87 * 88 * Note that the key context must be padded to ensure 16-byte alignment. 89 * For HMAC requests, the key consists of the partial hash of the IPAD 90 * followed by the partial hash of the OPAD. 91 * 92 * Replies consist of: 93 * 94 * +-------------------------------+ 95 * | struct cpl_fw6_pld | 96 * +-------------------------------+ 97 * | hash digest | ----- For HMAC request with 98 * +-------------------------------+ 'hash_size' set in work request 99 * 100 * A 32-bit big-endian error status word is supplied in the last 4 101 * bytes of data[0] in the CPL_FW6_PLD message. bit 0 indicates a 102 * "MAC" error and bit 1 indicates a "PAD" error. 103 * 104 * The 64-bit 'cookie' field from the fw_crypto_lookaside_wr message 105 * in the request is returned in data[1] of the CPL_FW6_PLD message. 106 * 107 * For block cipher replies, the updated IV is supplied in data[2] and 108 * data[3] of the CPL_FW6_PLD message. 109 * 110 * For hash replies where the work request set 'hash_size' to request 111 * a copy of the hash in the reply, the hash digest is supplied 112 * immediately following the CPL_FW6_PLD message. 113 */ 114 115 /* 116 * The crypto engine supports a maximum AAD size of 511 bytes. 117 */ 118 #define MAX_AAD_LEN 511 119 120 /* 121 * The documentation for CPL_RX_PHYS_DSGL claims a maximum of 32 SG 122 * entries. While the CPL includes a 16-bit length field, the T6 can 123 * sometimes hang if an error occurs while processing a request with a 124 * single DSGL entry larger than 2k. 125 */ 126 #define MAX_RX_PHYS_DSGL_SGE 32 127 #define DSGL_SGE_MAXLEN 2048 128 129 /* 130 * The adapter only supports requests with a total input or output 131 * length of 64k-1 or smaller. Longer requests either result in hung 132 * requests or incorrect results. 133 */ 134 #define MAX_REQUEST_SIZE 65535 135 136 static MALLOC_DEFINE(M_CCR, "ccr", "Chelsio T6 crypto"); 137 138 struct ccr_session_hmac { 139 struct auth_hash *auth_hash; 140 int hash_len; 141 unsigned int partial_digest_len; 142 unsigned int auth_mode; 143 unsigned int mk_size; 144 char pads[CHCR_HASH_MAX_BLOCK_SIZE_128 * 2]; 145 }; 146 147 struct ccr_session_gmac { 148 int hash_len; 149 char ghash_h[GMAC_BLOCK_LEN]; 150 }; 151 152 struct ccr_session_ccm_mac { 153 int hash_len; 154 }; 155 156 struct ccr_session_blkcipher { 157 unsigned int cipher_mode; 158 unsigned int key_len; 159 unsigned int iv_len; 160 __be32 key_ctx_hdr; 161 char enckey[CHCR_AES_MAX_KEY_LEN]; 162 char deckey[CHCR_AES_MAX_KEY_LEN]; 163 }; 164 165 struct ccr_port { 166 struct sge_wrq *txq; 167 struct sge_rxq *rxq; 168 int tx_channel_id; 169 u_int active_sessions; 170 }; 171 172 struct ccr_session { 173 bool active; 174 int pending; 175 enum { HASH, HMAC, BLKCIPHER, ETA, GCM, CCM } mode; 176 struct ccr_port *port; 177 union { 178 struct ccr_session_hmac hmac; 179 struct ccr_session_gmac gmac; 180 struct ccr_session_ccm_mac ccm_mac; 181 }; 182 struct ccr_session_blkcipher blkcipher; 183 }; 184 185 struct ccr_softc { 186 struct adapter *adapter; 187 device_t dev; 188 uint32_t cid; 189 struct mtx lock; 190 bool detaching; 191 struct ccr_port ports[MAX_NPORTS]; 192 u_int port_mask; 193 194 /* 195 * Pre-allocate S/G lists used when preparing a work request. 196 * 'sg_input' contains an sglist describing the entire input 197 * buffer for a 'struct cryptop'. 'sg_output' contains an 198 * sglist describing the entire output buffer. 'sg_ulptx' is 199 * used to describe the data the engine should DMA as input 200 * via ULPTX_SGL. 'sg_dsgl' is used to describe the 201 * destination that cipher text and a tag should be written 202 * to. 203 */ 204 struct sglist *sg_input; 205 struct sglist *sg_output; 206 struct sglist *sg_ulptx; 207 struct sglist *sg_dsgl; 208 209 /* 210 * Pre-allocate a dummy output buffer for the IV and AAD for 211 * AEAD requests. 212 */ 213 char *iv_aad_buf; 214 struct sglist *sg_iv_aad; 215 216 /* Statistics. */ 217 uint64_t stats_blkcipher_encrypt; 218 uint64_t stats_blkcipher_decrypt; 219 uint64_t stats_hash; 220 uint64_t stats_hmac; 221 uint64_t stats_eta_encrypt; 222 uint64_t stats_eta_decrypt; 223 uint64_t stats_gcm_encrypt; 224 uint64_t stats_gcm_decrypt; 225 uint64_t stats_ccm_encrypt; 226 uint64_t stats_ccm_decrypt; 227 uint64_t stats_wr_nomem; 228 uint64_t stats_inflight; 229 uint64_t stats_mac_error; 230 uint64_t stats_pad_error; 231 uint64_t stats_bad_session; 232 uint64_t stats_sglist_error; 233 uint64_t stats_process_error; 234 uint64_t stats_sw_fallback; 235 }; 236 237 /* 238 * Crypto requests involve two kind of scatter/gather lists. 239 * 240 * Non-hash-only requests require a PHYS_DSGL that describes the 241 * location to store the results of the encryption or decryption 242 * operation. This SGL uses a different format (PHYS_DSGL) and should 243 * exclude the skip bytes at the start of the data as well as any AAD 244 * or IV. For authenticated encryption requests it should include the 245 * destination of the hash or tag. 246 * 247 * The input payload may either be supplied inline as immediate data, 248 * or via a standard ULP_TX SGL. This SGL should include AAD, 249 * ciphertext, and the hash or tag for authenticated decryption 250 * requests. 251 * 252 * These scatter/gather lists can describe different subsets of the 253 * buffers described by the crypto operation. ccr_populate_sglist() 254 * generates a scatter/gather list that covers an entire crypto 255 * operation buffer that is then used to construct the other 256 * scatter/gather lists. 257 */ 258 static int 259 ccr_populate_sglist(struct sglist *sg, struct crypto_buffer *cb) 260 { 261 int error; 262 263 sglist_reset(sg); 264 switch (cb->cb_type) { 265 case CRYPTO_BUF_MBUF: 266 error = sglist_append_mbuf(sg, cb->cb_mbuf); 267 break; 268 case CRYPTO_BUF_UIO: 269 error = sglist_append_uio(sg, cb->cb_uio); 270 break; 271 case CRYPTO_BUF_CONTIG: 272 error = sglist_append(sg, cb->cb_buf, cb->cb_buf_len); 273 break; 274 default: 275 error = EINVAL; 276 } 277 return (error); 278 } 279 280 /* 281 * Segments in 'sg' larger than 'maxsegsize' are counted as multiple 282 * segments. 283 */ 284 static int 285 ccr_count_sgl(struct sglist *sg, int maxsegsize) 286 { 287 int i, nsegs; 288 289 nsegs = 0; 290 for (i = 0; i < sg->sg_nseg; i++) 291 nsegs += howmany(sg->sg_segs[i].ss_len, maxsegsize); 292 return (nsegs); 293 } 294 295 /* These functions deal with PHYS_DSGL for the reply buffer. */ 296 static inline int 297 ccr_phys_dsgl_len(int nsegs) 298 { 299 int len; 300 301 len = (nsegs / 8) * sizeof(struct phys_sge_pairs); 302 if ((nsegs % 8) != 0) { 303 len += sizeof(uint16_t) * 8; 304 len += roundup2(nsegs % 8, 2) * sizeof(uint64_t); 305 } 306 return (len); 307 } 308 309 static void 310 ccr_write_phys_dsgl(struct ccr_softc *sc, struct ccr_session *s, void *dst, 311 int nsegs) 312 { 313 struct sglist *sg; 314 struct cpl_rx_phys_dsgl *cpl; 315 struct phys_sge_pairs *sgl; 316 vm_paddr_t paddr; 317 size_t seglen; 318 u_int i, j; 319 320 sg = sc->sg_dsgl; 321 cpl = dst; 322 cpl->op_to_tid = htobe32(V_CPL_RX_PHYS_DSGL_OPCODE(CPL_RX_PHYS_DSGL) | 323 V_CPL_RX_PHYS_DSGL_ISRDMA(0)); 324 cpl->pcirlxorder_to_noofsgentr = htobe32( 325 V_CPL_RX_PHYS_DSGL_PCIRLXORDER(0) | 326 V_CPL_RX_PHYS_DSGL_PCINOSNOOP(0) | 327 V_CPL_RX_PHYS_DSGL_PCITPHNTENB(0) | V_CPL_RX_PHYS_DSGL_DCAID(0) | 328 V_CPL_RX_PHYS_DSGL_NOOFSGENTR(nsegs)); 329 cpl->rss_hdr_int.opcode = CPL_RX_PHYS_ADDR; 330 cpl->rss_hdr_int.qid = htobe16(s->port->rxq->iq.abs_id); 331 cpl->rss_hdr_int.hash_val = 0; 332 sgl = (struct phys_sge_pairs *)(cpl + 1); 333 j = 0; 334 for (i = 0; i < sg->sg_nseg; i++) { 335 seglen = sg->sg_segs[i].ss_len; 336 paddr = sg->sg_segs[i].ss_paddr; 337 do { 338 sgl->addr[j] = htobe64(paddr); 339 if (seglen > DSGL_SGE_MAXLEN) { 340 sgl->len[j] = htobe16(DSGL_SGE_MAXLEN); 341 paddr += DSGL_SGE_MAXLEN; 342 seglen -= DSGL_SGE_MAXLEN; 343 } else { 344 sgl->len[j] = htobe16(seglen); 345 seglen = 0; 346 } 347 j++; 348 if (j == 8) { 349 sgl++; 350 j = 0; 351 } 352 } while (seglen != 0); 353 } 354 MPASS(j + 8 * (sgl - (struct phys_sge_pairs *)(cpl + 1)) == nsegs); 355 } 356 357 /* These functions deal with the ULPTX_SGL for input payload. */ 358 static inline int 359 ccr_ulptx_sgl_len(int nsegs) 360 { 361 u_int n; 362 363 nsegs--; /* first segment is part of ulptx_sgl */ 364 n = sizeof(struct ulptx_sgl) + 8 * ((3 * nsegs) / 2 + (nsegs & 1)); 365 return (roundup2(n, 16)); 366 } 367 368 static void 369 ccr_write_ulptx_sgl(struct ccr_softc *sc, void *dst, int nsegs) 370 { 371 struct ulptx_sgl *usgl; 372 struct sglist *sg; 373 struct sglist_seg *ss; 374 int i; 375 376 sg = sc->sg_ulptx; 377 MPASS(nsegs == sg->sg_nseg); 378 ss = &sg->sg_segs[0]; 379 usgl = dst; 380 usgl->cmd_nsge = htobe32(V_ULPTX_CMD(ULP_TX_SC_DSGL) | 381 V_ULPTX_NSGE(nsegs)); 382 usgl->len0 = htobe32(ss->ss_len); 383 usgl->addr0 = htobe64(ss->ss_paddr); 384 ss++; 385 for (i = 0; i < sg->sg_nseg - 1; i++) { 386 usgl->sge[i / 2].len[i & 1] = htobe32(ss->ss_len); 387 usgl->sge[i / 2].addr[i & 1] = htobe64(ss->ss_paddr); 388 ss++; 389 } 390 391 } 392 393 static bool 394 ccr_use_imm_data(u_int transhdr_len, u_int input_len) 395 { 396 397 if (input_len > CRYPTO_MAX_IMM_TX_PKT_LEN) 398 return (false); 399 if (roundup2(transhdr_len, 16) + roundup2(input_len, 16) > 400 SGE_MAX_WR_LEN) 401 return (false); 402 return (true); 403 } 404 405 static void 406 ccr_populate_wreq(struct ccr_softc *sc, struct ccr_session *s, 407 struct chcr_wr *crwr, u_int kctx_len, u_int wr_len, u_int imm_len, 408 u_int sgl_len, u_int hash_size, struct cryptop *crp) 409 { 410 u_int cctx_size, idata_len; 411 412 cctx_size = sizeof(struct _key_ctx) + kctx_len; 413 crwr->wreq.op_to_cctx_size = htobe32( 414 V_FW_CRYPTO_LOOKASIDE_WR_OPCODE(FW_CRYPTO_LOOKASIDE_WR) | 415 V_FW_CRYPTO_LOOKASIDE_WR_COMPL(0) | 416 V_FW_CRYPTO_LOOKASIDE_WR_IMM_LEN(imm_len) | 417 V_FW_CRYPTO_LOOKASIDE_WR_CCTX_LOC(1) | 418 V_FW_CRYPTO_LOOKASIDE_WR_CCTX_SIZE(cctx_size >> 4)); 419 crwr->wreq.len16_pkd = htobe32( 420 V_FW_CRYPTO_LOOKASIDE_WR_LEN16(wr_len / 16)); 421 crwr->wreq.session_id = 0; 422 crwr->wreq.rx_chid_to_rx_q_id = htobe32( 423 V_FW_CRYPTO_LOOKASIDE_WR_RX_CHID(s->port->tx_channel_id) | 424 V_FW_CRYPTO_LOOKASIDE_WR_LCB(0) | 425 V_FW_CRYPTO_LOOKASIDE_WR_PHASH(0) | 426 V_FW_CRYPTO_LOOKASIDE_WR_IV(IV_NOP) | 427 V_FW_CRYPTO_LOOKASIDE_WR_FQIDX(0) | 428 V_FW_CRYPTO_LOOKASIDE_WR_TX_CH(0) | 429 V_FW_CRYPTO_LOOKASIDE_WR_RX_Q_ID(s->port->rxq->iq.abs_id)); 430 crwr->wreq.key_addr = 0; 431 crwr->wreq.pld_size_hash_size = htobe32( 432 V_FW_CRYPTO_LOOKASIDE_WR_PLD_SIZE(sgl_len) | 433 V_FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE(hash_size)); 434 crwr->wreq.cookie = htobe64((uintptr_t)crp); 435 436 crwr->ulptx.cmd_dest = htobe32(V_ULPTX_CMD(ULP_TX_PKT) | 437 V_ULP_TXPKT_DATAMODIFY(0) | 438 V_ULP_TXPKT_CHANNELID(s->port->tx_channel_id) | 439 V_ULP_TXPKT_DEST(0) | 440 V_ULP_TXPKT_FID(s->port->rxq->iq.abs_id) | V_ULP_TXPKT_RO(1)); 441 crwr->ulptx.len = htobe32( 442 ((wr_len - sizeof(struct fw_crypto_lookaside_wr)) / 16)); 443 444 crwr->sc_imm.cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM) | 445 V_ULP_TX_SC_MORE(sgl_len != 0 ? 1 : 0)); 446 idata_len = wr_len - offsetof(struct chcr_wr, sec_cpl) - sgl_len; 447 if (imm_len % 16 != 0) 448 idata_len -= 16 - imm_len % 16; 449 crwr->sc_imm.len = htobe32(idata_len); 450 } 451 452 static int 453 ccr_hash(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp) 454 { 455 struct chcr_wr *crwr; 456 struct wrqe *wr; 457 struct auth_hash *axf; 458 char *dst; 459 u_int hash_size_in_response, kctx_flits, kctx_len, transhdr_len, wr_len; 460 u_int hmac_ctrl, imm_len, iopad_size; 461 int error, sgl_nsegs, sgl_len, use_opad; 462 463 /* Reject requests with too large of an input buffer. */ 464 if (crp->crp_payload_length > MAX_REQUEST_SIZE) 465 return (EFBIG); 466 467 axf = s->hmac.auth_hash; 468 469 if (s->mode == HMAC) { 470 use_opad = 1; 471 hmac_ctrl = SCMD_HMAC_CTRL_NO_TRUNC; 472 } else { 473 use_opad = 0; 474 hmac_ctrl = SCMD_HMAC_CTRL_NOP; 475 } 476 477 /* PADs must be 128-bit aligned. */ 478 iopad_size = roundup2(s->hmac.partial_digest_len, 16); 479 480 /* 481 * The 'key' part of the context includes the aligned IPAD and 482 * OPAD. 483 */ 484 kctx_len = iopad_size; 485 if (use_opad) 486 kctx_len += iopad_size; 487 hash_size_in_response = axf->hashsize; 488 transhdr_len = HASH_TRANSHDR_SIZE(kctx_len); 489 490 if (crp->crp_payload_length == 0) { 491 imm_len = axf->blocksize; 492 sgl_nsegs = 0; 493 sgl_len = 0; 494 } else if (ccr_use_imm_data(transhdr_len, crp->crp_payload_length)) { 495 imm_len = crp->crp_payload_length; 496 sgl_nsegs = 0; 497 sgl_len = 0; 498 } else { 499 imm_len = 0; 500 sglist_reset(sc->sg_ulptx); 501 error = sglist_append_sglist(sc->sg_ulptx, sc->sg_input, 502 crp->crp_payload_start, crp->crp_payload_length); 503 if (error) 504 return (error); 505 sgl_nsegs = sc->sg_ulptx->sg_nseg; 506 sgl_len = ccr_ulptx_sgl_len(sgl_nsegs); 507 } 508 509 wr_len = roundup2(transhdr_len, 16) + roundup2(imm_len, 16) + sgl_len; 510 if (wr_len > SGE_MAX_WR_LEN) 511 return (EFBIG); 512 wr = alloc_wrqe(wr_len, s->port->txq); 513 if (wr == NULL) { 514 sc->stats_wr_nomem++; 515 return (ENOMEM); 516 } 517 crwr = wrtod(wr); 518 memset(crwr, 0, wr_len); 519 520 ccr_populate_wreq(sc, s, crwr, kctx_len, wr_len, imm_len, sgl_len, 521 hash_size_in_response, crp); 522 523 crwr->sec_cpl.op_ivinsrtofst = htobe32( 524 V_CPL_TX_SEC_PDU_OPCODE(CPL_TX_SEC_PDU) | 525 V_CPL_TX_SEC_PDU_RXCHID(s->port->tx_channel_id) | 526 V_CPL_TX_SEC_PDU_ACKFOLLOWS(0) | V_CPL_TX_SEC_PDU_ULPTXLPBK(1) | 527 V_CPL_TX_SEC_PDU_CPLLEN(2) | V_CPL_TX_SEC_PDU_PLACEHOLDER(0) | 528 V_CPL_TX_SEC_PDU_IVINSRTOFST(0)); 529 530 crwr->sec_cpl.pldlen = htobe32(crp->crp_payload_length == 0 ? 531 axf->blocksize : crp->crp_payload_length); 532 533 crwr->sec_cpl.cipherstop_lo_authinsert = htobe32( 534 V_CPL_TX_SEC_PDU_AUTHSTART(1) | V_CPL_TX_SEC_PDU_AUTHSTOP(0)); 535 536 /* These two flits are actually a CPL_TLS_TX_SCMD_FMT. */ 537 crwr->sec_cpl.seqno_numivs = htobe32( 538 V_SCMD_SEQ_NO_CTRL(0) | 539 V_SCMD_PROTO_VERSION(SCMD_PROTO_VERSION_GENERIC) | 540 V_SCMD_CIPH_MODE(SCMD_CIPH_MODE_NOP) | 541 V_SCMD_AUTH_MODE(s->hmac.auth_mode) | 542 V_SCMD_HMAC_CTRL(hmac_ctrl)); 543 crwr->sec_cpl.ivgen_hdrlen = htobe32( 544 V_SCMD_LAST_FRAG(0) | 545 V_SCMD_MORE_FRAGS(crp->crp_payload_length == 0 ? 1 : 0) | 546 V_SCMD_MAC_ONLY(1)); 547 548 memcpy(crwr->key_ctx.key, s->hmac.pads, kctx_len); 549 550 /* XXX: F_KEY_CONTEXT_SALT_PRESENT set, but 'salt' not set. */ 551 kctx_flits = (sizeof(struct _key_ctx) + kctx_len) / 16; 552 crwr->key_ctx.ctx_hdr = htobe32(V_KEY_CONTEXT_CTX_LEN(kctx_flits) | 553 V_KEY_CONTEXT_OPAD_PRESENT(use_opad) | 554 V_KEY_CONTEXT_SALT_PRESENT(1) | 555 V_KEY_CONTEXT_CK_SIZE(CHCR_KEYCTX_NO_KEY) | 556 V_KEY_CONTEXT_MK_SIZE(s->hmac.mk_size) | V_KEY_CONTEXT_VALID(1)); 557 558 dst = (char *)(crwr + 1) + kctx_len + DUMMY_BYTES; 559 if (crp->crp_payload_length == 0) { 560 dst[0] = 0x80; 561 if (s->mode == HMAC) 562 *(uint64_t *)(dst + axf->blocksize - sizeof(uint64_t)) = 563 htobe64(axf->blocksize << 3); 564 } else if (imm_len != 0) 565 crypto_copydata(crp, crp->crp_payload_start, 566 crp->crp_payload_length, dst); 567 else 568 ccr_write_ulptx_sgl(sc, dst, sgl_nsegs); 569 570 /* XXX: TODO backpressure */ 571 t4_wrq_tx(sc->adapter, wr); 572 573 return (0); 574 } 575 576 static int 577 ccr_hash_done(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp, 578 const struct cpl_fw6_pld *cpl, int error) 579 { 580 uint8_t hash[HASH_MAX_LEN]; 581 582 if (error) 583 return (error); 584 585 if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) { 586 crypto_copydata(crp, crp->crp_digest_start, s->hmac.hash_len, 587 hash); 588 if (timingsafe_bcmp((cpl + 1), hash, s->hmac.hash_len) != 0) 589 return (EBADMSG); 590 } else 591 crypto_copyback(crp, crp->crp_digest_start, s->hmac.hash_len, 592 (cpl + 1)); 593 return (0); 594 } 595 596 static int 597 ccr_blkcipher(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp) 598 { 599 char iv[CHCR_MAX_CRYPTO_IV_LEN]; 600 struct chcr_wr *crwr; 601 struct wrqe *wr; 602 char *dst; 603 u_int kctx_len, key_half, op_type, transhdr_len, wr_len; 604 u_int imm_len, iv_len; 605 int dsgl_nsegs, dsgl_len; 606 int sgl_nsegs, sgl_len; 607 int error; 608 609 if (s->blkcipher.key_len == 0 || crp->crp_payload_length == 0) 610 return (EINVAL); 611 if (s->blkcipher.cipher_mode == SCMD_CIPH_MODE_AES_CBC && 612 (crp->crp_payload_length % AES_BLOCK_LEN) != 0) 613 return (EINVAL); 614 615 /* Reject requests with too large of an input buffer. */ 616 if (crp->crp_payload_length > MAX_REQUEST_SIZE) 617 return (EFBIG); 618 619 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) 620 op_type = CHCR_ENCRYPT_OP; 621 else 622 op_type = CHCR_DECRYPT_OP; 623 624 sglist_reset(sc->sg_dsgl); 625 if (CRYPTO_HAS_OUTPUT_BUFFER(crp)) 626 error = sglist_append_sglist(sc->sg_dsgl, sc->sg_output, 627 crp->crp_payload_output_start, crp->crp_payload_length); 628 else 629 error = sglist_append_sglist(sc->sg_dsgl, sc->sg_input, 630 crp->crp_payload_start, crp->crp_payload_length); 631 if (error) 632 return (error); 633 dsgl_nsegs = ccr_count_sgl(sc->sg_dsgl, DSGL_SGE_MAXLEN); 634 if (dsgl_nsegs > MAX_RX_PHYS_DSGL_SGE) 635 return (EFBIG); 636 dsgl_len = ccr_phys_dsgl_len(dsgl_nsegs); 637 638 /* The 'key' must be 128-bit aligned. */ 639 kctx_len = roundup2(s->blkcipher.key_len, 16); 640 transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dsgl_len); 641 642 /* For AES-XTS we send a 16-byte IV in the work request. */ 643 if (s->blkcipher.cipher_mode == SCMD_CIPH_MODE_AES_XTS) 644 iv_len = AES_BLOCK_LEN; 645 else 646 iv_len = s->blkcipher.iv_len; 647 648 if (ccr_use_imm_data(transhdr_len, crp->crp_payload_length + iv_len)) { 649 imm_len = crp->crp_payload_length; 650 sgl_nsegs = 0; 651 sgl_len = 0; 652 } else { 653 imm_len = 0; 654 sglist_reset(sc->sg_ulptx); 655 error = sglist_append_sglist(sc->sg_ulptx, sc->sg_input, 656 crp->crp_payload_start, crp->crp_payload_length); 657 if (error) 658 return (error); 659 sgl_nsegs = sc->sg_ulptx->sg_nseg; 660 sgl_len = ccr_ulptx_sgl_len(sgl_nsegs); 661 } 662 663 wr_len = roundup2(transhdr_len, 16) + iv_len + 664 roundup2(imm_len, 16) + sgl_len; 665 if (wr_len > SGE_MAX_WR_LEN) 666 return (EFBIG); 667 wr = alloc_wrqe(wr_len, s->port->txq); 668 if (wr == NULL) { 669 sc->stats_wr_nomem++; 670 return (ENOMEM); 671 } 672 crwr = wrtod(wr); 673 memset(crwr, 0, wr_len); 674 675 crypto_read_iv(crp, iv); 676 677 /* Zero the remainder of the IV for AES-XTS. */ 678 memset(iv + s->blkcipher.iv_len, 0, iv_len - s->blkcipher.iv_len); 679 680 ccr_populate_wreq(sc, s, crwr, kctx_len, wr_len, imm_len, sgl_len, 0, 681 crp); 682 683 crwr->sec_cpl.op_ivinsrtofst = htobe32( 684 V_CPL_TX_SEC_PDU_OPCODE(CPL_TX_SEC_PDU) | 685 V_CPL_TX_SEC_PDU_RXCHID(s->port->tx_channel_id) | 686 V_CPL_TX_SEC_PDU_ACKFOLLOWS(0) | V_CPL_TX_SEC_PDU_ULPTXLPBK(1) | 687 V_CPL_TX_SEC_PDU_CPLLEN(2) | V_CPL_TX_SEC_PDU_PLACEHOLDER(0) | 688 V_CPL_TX_SEC_PDU_IVINSRTOFST(1)); 689 690 crwr->sec_cpl.pldlen = htobe32(iv_len + crp->crp_payload_length); 691 692 crwr->sec_cpl.aadstart_cipherstop_hi = htobe32( 693 V_CPL_TX_SEC_PDU_CIPHERSTART(iv_len + 1) | 694 V_CPL_TX_SEC_PDU_CIPHERSTOP_HI(0)); 695 crwr->sec_cpl.cipherstop_lo_authinsert = htobe32( 696 V_CPL_TX_SEC_PDU_CIPHERSTOP_LO(0)); 697 698 /* These two flits are actually a CPL_TLS_TX_SCMD_FMT. */ 699 crwr->sec_cpl.seqno_numivs = htobe32( 700 V_SCMD_SEQ_NO_CTRL(0) | 701 V_SCMD_PROTO_VERSION(SCMD_PROTO_VERSION_GENERIC) | 702 V_SCMD_ENC_DEC_CTRL(op_type) | 703 V_SCMD_CIPH_MODE(s->blkcipher.cipher_mode) | 704 V_SCMD_AUTH_MODE(SCMD_AUTH_MODE_NOP) | 705 V_SCMD_HMAC_CTRL(SCMD_HMAC_CTRL_NOP) | 706 V_SCMD_IV_SIZE(iv_len / 2) | 707 V_SCMD_NUM_IVS(0)); 708 crwr->sec_cpl.ivgen_hdrlen = htobe32( 709 V_SCMD_IV_GEN_CTRL(0) | 710 V_SCMD_MORE_FRAGS(0) | V_SCMD_LAST_FRAG(0) | V_SCMD_MAC_ONLY(0) | 711 V_SCMD_AADIVDROP(1) | V_SCMD_HDR_LEN(dsgl_len)); 712 713 crwr->key_ctx.ctx_hdr = s->blkcipher.key_ctx_hdr; 714 switch (s->blkcipher.cipher_mode) { 715 case SCMD_CIPH_MODE_AES_CBC: 716 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) 717 memcpy(crwr->key_ctx.key, s->blkcipher.enckey, 718 s->blkcipher.key_len); 719 else 720 memcpy(crwr->key_ctx.key, s->blkcipher.deckey, 721 s->blkcipher.key_len); 722 break; 723 case SCMD_CIPH_MODE_AES_CTR: 724 memcpy(crwr->key_ctx.key, s->blkcipher.enckey, 725 s->blkcipher.key_len); 726 break; 727 case SCMD_CIPH_MODE_AES_XTS: 728 key_half = s->blkcipher.key_len / 2; 729 memcpy(crwr->key_ctx.key, s->blkcipher.enckey + key_half, 730 key_half); 731 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) 732 memcpy(crwr->key_ctx.key + key_half, 733 s->blkcipher.enckey, key_half); 734 else 735 memcpy(crwr->key_ctx.key + key_half, 736 s->blkcipher.deckey, key_half); 737 break; 738 } 739 740 dst = (char *)(crwr + 1) + kctx_len; 741 ccr_write_phys_dsgl(sc, s, dst, dsgl_nsegs); 742 dst += sizeof(struct cpl_rx_phys_dsgl) + dsgl_len; 743 memcpy(dst, iv, iv_len); 744 dst += iv_len; 745 if (imm_len != 0) 746 crypto_copydata(crp, crp->crp_payload_start, 747 crp->crp_payload_length, dst); 748 else 749 ccr_write_ulptx_sgl(sc, dst, sgl_nsegs); 750 751 /* XXX: TODO backpressure */ 752 t4_wrq_tx(sc->adapter, wr); 753 754 return (0); 755 } 756 757 static int 758 ccr_blkcipher_done(struct ccr_softc *sc, struct ccr_session *s, 759 struct cryptop *crp, const struct cpl_fw6_pld *cpl, int error) 760 { 761 762 /* 763 * The updated IV to permit chained requests is at 764 * cpl->data[2], but OCF doesn't permit chained requests. 765 */ 766 return (error); 767 } 768 769 /* 770 * 'hashsize' is the length of a full digest. 'authsize' is the 771 * requested digest length for this operation which may be less 772 * than 'hashsize'. 773 */ 774 static int 775 ccr_hmac_ctrl(unsigned int hashsize, unsigned int authsize) 776 { 777 778 if (authsize == 10) 779 return (SCMD_HMAC_CTRL_TRUNC_RFC4366); 780 if (authsize == 12) 781 return (SCMD_HMAC_CTRL_IPSEC_96BIT); 782 if (authsize == hashsize / 2) 783 return (SCMD_HMAC_CTRL_DIV2); 784 return (SCMD_HMAC_CTRL_NO_TRUNC); 785 } 786 787 static int 788 ccr_eta(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp) 789 { 790 char iv[CHCR_MAX_CRYPTO_IV_LEN]; 791 struct chcr_wr *crwr; 792 struct wrqe *wr; 793 struct auth_hash *axf; 794 char *dst; 795 u_int kctx_len, key_half, op_type, transhdr_len, wr_len; 796 u_int hash_size_in_response, imm_len, iopad_size, iv_len; 797 u_int aad_start, aad_stop; 798 u_int auth_insert; 799 u_int cipher_start, cipher_stop; 800 u_int hmac_ctrl, input_len; 801 int dsgl_nsegs, dsgl_len; 802 int sgl_nsegs, sgl_len; 803 int error; 804 805 /* 806 * If there is a need in the future, requests with an empty 807 * payload could be supported as HMAC-only requests. 808 */ 809 if (s->blkcipher.key_len == 0 || crp->crp_payload_length == 0) 810 return (EINVAL); 811 if (s->blkcipher.cipher_mode == SCMD_CIPH_MODE_AES_CBC && 812 (crp->crp_payload_length % AES_BLOCK_LEN) != 0) 813 return (EINVAL); 814 815 /* For AES-XTS we send a 16-byte IV in the work request. */ 816 if (s->blkcipher.cipher_mode == SCMD_CIPH_MODE_AES_XTS) 817 iv_len = AES_BLOCK_LEN; 818 else 819 iv_len = s->blkcipher.iv_len; 820 821 if (crp->crp_aad_length + iv_len > MAX_AAD_LEN) 822 return (EINVAL); 823 824 axf = s->hmac.auth_hash; 825 hash_size_in_response = s->hmac.hash_len; 826 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) 827 op_type = CHCR_ENCRYPT_OP; 828 else 829 op_type = CHCR_DECRYPT_OP; 830 831 /* 832 * The output buffer consists of the cipher text followed by 833 * the hash when encrypting. For decryption it only contains 834 * the plain text. 835 * 836 * Due to a firmware bug, the output buffer must include a 837 * dummy output buffer for the IV and AAD prior to the real 838 * output buffer. 839 */ 840 if (op_type == CHCR_ENCRYPT_OP) { 841 if (iv_len + crp->crp_aad_length + crp->crp_payload_length + 842 hash_size_in_response > MAX_REQUEST_SIZE) 843 return (EFBIG); 844 } else { 845 if (iv_len + crp->crp_aad_length + crp->crp_payload_length > 846 MAX_REQUEST_SIZE) 847 return (EFBIG); 848 } 849 sglist_reset(sc->sg_dsgl); 850 error = sglist_append_sglist(sc->sg_dsgl, sc->sg_iv_aad, 0, 851 iv_len + crp->crp_aad_length); 852 if (error) 853 return (error); 854 if (CRYPTO_HAS_OUTPUT_BUFFER(crp)) 855 error = sglist_append_sglist(sc->sg_dsgl, sc->sg_output, 856 crp->crp_payload_output_start, crp->crp_payload_length); 857 else 858 error = sglist_append_sglist(sc->sg_dsgl, sc->sg_input, 859 crp->crp_payload_start, crp->crp_payload_length); 860 if (error) 861 return (error); 862 if (op_type == CHCR_ENCRYPT_OP) { 863 if (CRYPTO_HAS_OUTPUT_BUFFER(crp)) 864 error = sglist_append_sglist(sc->sg_dsgl, sc->sg_output, 865 crp->crp_digest_start, hash_size_in_response); 866 else 867 error = sglist_append_sglist(sc->sg_dsgl, sc->sg_input, 868 crp->crp_digest_start, hash_size_in_response); 869 if (error) 870 return (error); 871 } 872 dsgl_nsegs = ccr_count_sgl(sc->sg_dsgl, DSGL_SGE_MAXLEN); 873 if (dsgl_nsegs > MAX_RX_PHYS_DSGL_SGE) 874 return (EFBIG); 875 dsgl_len = ccr_phys_dsgl_len(dsgl_nsegs); 876 877 /* PADs must be 128-bit aligned. */ 878 iopad_size = roundup2(s->hmac.partial_digest_len, 16); 879 880 /* 881 * The 'key' part of the key context consists of the key followed 882 * by the IPAD and OPAD. 883 */ 884 kctx_len = roundup2(s->blkcipher.key_len, 16) + iopad_size * 2; 885 transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dsgl_len); 886 887 /* 888 * The input buffer consists of the IV, any AAD, and then the 889 * cipher/plain text. For decryption requests the hash is 890 * appended after the cipher text. 891 * 892 * The IV is always stored at the start of the input buffer 893 * even though it may be duplicated in the payload. The 894 * crypto engine doesn't work properly if the IV offset points 895 * inside of the AAD region, so a second copy is always 896 * required. 897 */ 898 input_len = crp->crp_aad_length + crp->crp_payload_length; 899 900 /* 901 * The firmware hangs if sent a request which is a 902 * bit smaller than MAX_REQUEST_SIZE. In particular, the 903 * firmware appears to require 512 - 16 bytes of spare room 904 * along with the size of the hash even if the hash isn't 905 * included in the input buffer. 906 */ 907 if (input_len + roundup2(axf->hashsize, 16) + (512 - 16) > 908 MAX_REQUEST_SIZE) 909 return (EFBIG); 910 if (op_type == CHCR_DECRYPT_OP) 911 input_len += hash_size_in_response; 912 913 if (ccr_use_imm_data(transhdr_len, iv_len + input_len)) { 914 imm_len = input_len; 915 sgl_nsegs = 0; 916 sgl_len = 0; 917 } else { 918 imm_len = 0; 919 sglist_reset(sc->sg_ulptx); 920 if (crp->crp_aad_length != 0) { 921 error = sglist_append_sglist(sc->sg_ulptx, sc->sg_input, 922 crp->crp_aad_start, crp->crp_aad_length); 923 if (error) 924 return (error); 925 } 926 error = sglist_append_sglist(sc->sg_ulptx, sc->sg_input, 927 crp->crp_payload_start, crp->crp_payload_length); 928 if (error) 929 return (error); 930 if (op_type == CHCR_DECRYPT_OP) { 931 error = sglist_append_sglist(sc->sg_ulptx, sc->sg_input, 932 crp->crp_digest_start, hash_size_in_response); 933 if (error) 934 return (error); 935 } 936 sgl_nsegs = sc->sg_ulptx->sg_nseg; 937 sgl_len = ccr_ulptx_sgl_len(sgl_nsegs); 938 } 939 940 /* 941 * Any auth-only data before the cipher region is marked as AAD. 942 * Auth-data that overlaps with the cipher region is placed in 943 * the auth section. 944 */ 945 if (crp->crp_aad_length != 0) { 946 aad_start = iv_len + 1; 947 aad_stop = aad_start + crp->crp_aad_length - 1; 948 } else { 949 aad_start = 0; 950 aad_stop = 0; 951 } 952 cipher_start = iv_len + crp->crp_aad_length + 1; 953 if (op_type == CHCR_DECRYPT_OP) 954 cipher_stop = hash_size_in_response; 955 else 956 cipher_stop = 0; 957 if (op_type == CHCR_DECRYPT_OP) 958 auth_insert = hash_size_in_response; 959 else 960 auth_insert = 0; 961 962 wr_len = roundup2(transhdr_len, 16) + iv_len + roundup2(imm_len, 16) + 963 sgl_len; 964 if (wr_len > SGE_MAX_WR_LEN) 965 return (EFBIG); 966 wr = alloc_wrqe(wr_len, s->port->txq); 967 if (wr == NULL) { 968 sc->stats_wr_nomem++; 969 return (ENOMEM); 970 } 971 crwr = wrtod(wr); 972 memset(crwr, 0, wr_len); 973 974 crypto_read_iv(crp, iv); 975 976 /* Zero the remainder of the IV for AES-XTS. */ 977 memset(iv + s->blkcipher.iv_len, 0, iv_len - s->blkcipher.iv_len); 978 979 ccr_populate_wreq(sc, s, crwr, kctx_len, wr_len, imm_len, sgl_len, 980 op_type == CHCR_DECRYPT_OP ? hash_size_in_response : 0, crp); 981 982 crwr->sec_cpl.op_ivinsrtofst = htobe32( 983 V_CPL_TX_SEC_PDU_OPCODE(CPL_TX_SEC_PDU) | 984 V_CPL_TX_SEC_PDU_RXCHID(s->port->tx_channel_id) | 985 V_CPL_TX_SEC_PDU_ACKFOLLOWS(0) | V_CPL_TX_SEC_PDU_ULPTXLPBK(1) | 986 V_CPL_TX_SEC_PDU_CPLLEN(2) | V_CPL_TX_SEC_PDU_PLACEHOLDER(0) | 987 V_CPL_TX_SEC_PDU_IVINSRTOFST(1)); 988 989 crwr->sec_cpl.pldlen = htobe32(iv_len + input_len); 990 991 crwr->sec_cpl.aadstart_cipherstop_hi = htobe32( 992 V_CPL_TX_SEC_PDU_AADSTART(aad_start) | 993 V_CPL_TX_SEC_PDU_AADSTOP(aad_stop) | 994 V_CPL_TX_SEC_PDU_CIPHERSTART(cipher_start) | 995 V_CPL_TX_SEC_PDU_CIPHERSTOP_HI(cipher_stop >> 4)); 996 crwr->sec_cpl.cipherstop_lo_authinsert = htobe32( 997 V_CPL_TX_SEC_PDU_CIPHERSTOP_LO(cipher_stop & 0xf) | 998 V_CPL_TX_SEC_PDU_AUTHSTART(cipher_start) | 999 V_CPL_TX_SEC_PDU_AUTHSTOP(cipher_stop) | 1000 V_CPL_TX_SEC_PDU_AUTHINSERT(auth_insert)); 1001 1002 /* These two flits are actually a CPL_TLS_TX_SCMD_FMT. */ 1003 hmac_ctrl = ccr_hmac_ctrl(axf->hashsize, hash_size_in_response); 1004 crwr->sec_cpl.seqno_numivs = htobe32( 1005 V_SCMD_SEQ_NO_CTRL(0) | 1006 V_SCMD_PROTO_VERSION(SCMD_PROTO_VERSION_GENERIC) | 1007 V_SCMD_ENC_DEC_CTRL(op_type) | 1008 V_SCMD_CIPH_AUTH_SEQ_CTRL(op_type == CHCR_ENCRYPT_OP ? 1 : 0) | 1009 V_SCMD_CIPH_MODE(s->blkcipher.cipher_mode) | 1010 V_SCMD_AUTH_MODE(s->hmac.auth_mode) | 1011 V_SCMD_HMAC_CTRL(hmac_ctrl) | 1012 V_SCMD_IV_SIZE(iv_len / 2) | 1013 V_SCMD_NUM_IVS(0)); 1014 crwr->sec_cpl.ivgen_hdrlen = htobe32( 1015 V_SCMD_IV_GEN_CTRL(0) | 1016 V_SCMD_MORE_FRAGS(0) | V_SCMD_LAST_FRAG(0) | V_SCMD_MAC_ONLY(0) | 1017 V_SCMD_AADIVDROP(0) | V_SCMD_HDR_LEN(dsgl_len)); 1018 1019 crwr->key_ctx.ctx_hdr = s->blkcipher.key_ctx_hdr; 1020 switch (s->blkcipher.cipher_mode) { 1021 case SCMD_CIPH_MODE_AES_CBC: 1022 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) 1023 memcpy(crwr->key_ctx.key, s->blkcipher.enckey, 1024 s->blkcipher.key_len); 1025 else 1026 memcpy(crwr->key_ctx.key, s->blkcipher.deckey, 1027 s->blkcipher.key_len); 1028 break; 1029 case SCMD_CIPH_MODE_AES_CTR: 1030 memcpy(crwr->key_ctx.key, s->blkcipher.enckey, 1031 s->blkcipher.key_len); 1032 break; 1033 case SCMD_CIPH_MODE_AES_XTS: 1034 key_half = s->blkcipher.key_len / 2; 1035 memcpy(crwr->key_ctx.key, s->blkcipher.enckey + key_half, 1036 key_half); 1037 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) 1038 memcpy(crwr->key_ctx.key + key_half, 1039 s->blkcipher.enckey, key_half); 1040 else 1041 memcpy(crwr->key_ctx.key + key_half, 1042 s->blkcipher.deckey, key_half); 1043 break; 1044 } 1045 1046 dst = crwr->key_ctx.key + roundup2(s->blkcipher.key_len, 16); 1047 memcpy(dst, s->hmac.pads, iopad_size * 2); 1048 1049 dst = (char *)(crwr + 1) + kctx_len; 1050 ccr_write_phys_dsgl(sc, s, dst, dsgl_nsegs); 1051 dst += sizeof(struct cpl_rx_phys_dsgl) + dsgl_len; 1052 memcpy(dst, iv, iv_len); 1053 dst += iv_len; 1054 if (imm_len != 0) { 1055 if (crp->crp_aad_length != 0) { 1056 crypto_copydata(crp, crp->crp_aad_start, 1057 crp->crp_aad_length, dst); 1058 dst += crp->crp_aad_length; 1059 } 1060 crypto_copydata(crp, crp->crp_payload_start, 1061 crp->crp_payload_length, dst); 1062 dst += crp->crp_payload_length; 1063 if (op_type == CHCR_DECRYPT_OP) 1064 crypto_copydata(crp, crp->crp_digest_start, 1065 hash_size_in_response, dst); 1066 } else 1067 ccr_write_ulptx_sgl(sc, dst, sgl_nsegs); 1068 1069 /* XXX: TODO backpressure */ 1070 t4_wrq_tx(sc->adapter, wr); 1071 1072 return (0); 1073 } 1074 1075 static int 1076 ccr_eta_done(struct ccr_softc *sc, struct ccr_session *s, 1077 struct cryptop *crp, const struct cpl_fw6_pld *cpl, int error) 1078 { 1079 1080 /* 1081 * The updated IV to permit chained requests is at 1082 * cpl->data[2], but OCF doesn't permit chained requests. 1083 */ 1084 return (error); 1085 } 1086 1087 static int 1088 ccr_gcm(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp) 1089 { 1090 char iv[CHCR_MAX_CRYPTO_IV_LEN]; 1091 struct chcr_wr *crwr; 1092 struct wrqe *wr; 1093 char *dst; 1094 u_int iv_len, kctx_len, op_type, transhdr_len, wr_len; 1095 u_int hash_size_in_response, imm_len; 1096 u_int aad_start, aad_stop, cipher_start, cipher_stop, auth_insert; 1097 u_int hmac_ctrl, input_len; 1098 int dsgl_nsegs, dsgl_len; 1099 int sgl_nsegs, sgl_len; 1100 int error; 1101 1102 if (s->blkcipher.key_len == 0) 1103 return (EINVAL); 1104 1105 /* 1106 * The crypto engine doesn't handle GCM requests with an empty 1107 * payload, so handle those in software instead. 1108 */ 1109 if (crp->crp_payload_length == 0) 1110 return (EMSGSIZE); 1111 1112 if (crp->crp_aad_length + AES_BLOCK_LEN > MAX_AAD_LEN) 1113 return (EMSGSIZE); 1114 1115 hash_size_in_response = s->gmac.hash_len; 1116 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) 1117 op_type = CHCR_ENCRYPT_OP; 1118 else 1119 op_type = CHCR_DECRYPT_OP; 1120 1121 /* 1122 * The IV handling for GCM in OCF is a bit more complicated in 1123 * that IPSec provides a full 16-byte IV (including the 1124 * counter), whereas the /dev/crypto interface sometimes 1125 * provides a full 16-byte IV (if no IV is provided in the 1126 * ioctl) and sometimes a 12-byte IV (if the IV was explicit). 1127 * 1128 * When provided a 12-byte IV, assume the IV is really 16 bytes 1129 * with a counter in the last 4 bytes initialized to 1. 1130 * 1131 * While iv_len is checked below, the value is currently 1132 * always set to 12 when creating a GCM session in this driver 1133 * due to limitations in OCF (there is no way to know what the 1134 * IV length of a given request will be). This means that the 1135 * driver always assumes as 12-byte IV for now. 1136 */ 1137 if (s->blkcipher.iv_len == 12) 1138 iv_len = AES_BLOCK_LEN; 1139 else 1140 iv_len = s->blkcipher.iv_len; 1141 1142 /* 1143 * GCM requests should always provide an explicit IV. 1144 */ 1145 if ((crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0) 1146 return (EINVAL); 1147 1148 /* 1149 * The output buffer consists of the cipher text followed by 1150 * the tag when encrypting. For decryption it only contains 1151 * the plain text. 1152 * 1153 * Due to a firmware bug, the output buffer must include a 1154 * dummy output buffer for the IV and AAD prior to the real 1155 * output buffer. 1156 */ 1157 if (op_type == CHCR_ENCRYPT_OP) { 1158 if (iv_len + crp->crp_aad_length + crp->crp_payload_length + 1159 hash_size_in_response > MAX_REQUEST_SIZE) 1160 return (EFBIG); 1161 } else { 1162 if (iv_len + crp->crp_aad_length + crp->crp_payload_length > 1163 MAX_REQUEST_SIZE) 1164 return (EFBIG); 1165 } 1166 sglist_reset(sc->sg_dsgl); 1167 error = sglist_append_sglist(sc->sg_dsgl, sc->sg_iv_aad, 0, iv_len + 1168 crp->crp_aad_length); 1169 if (error) 1170 return (error); 1171 if (CRYPTO_HAS_OUTPUT_BUFFER(crp)) 1172 error = sglist_append_sglist(sc->sg_dsgl, sc->sg_output, 1173 crp->crp_payload_output_start, crp->crp_payload_length); 1174 else 1175 error = sglist_append_sglist(sc->sg_dsgl, sc->sg_input, 1176 crp->crp_payload_start, crp->crp_payload_length); 1177 if (error) 1178 return (error); 1179 if (op_type == CHCR_ENCRYPT_OP) { 1180 if (CRYPTO_HAS_OUTPUT_BUFFER(crp)) 1181 error = sglist_append_sglist(sc->sg_dsgl, sc->sg_output, 1182 crp->crp_digest_start, hash_size_in_response); 1183 else 1184 error = sglist_append_sglist(sc->sg_dsgl, sc->sg_input, 1185 crp->crp_digest_start, hash_size_in_response); 1186 if (error) 1187 return (error); 1188 } 1189 dsgl_nsegs = ccr_count_sgl(sc->sg_dsgl, DSGL_SGE_MAXLEN); 1190 if (dsgl_nsegs > MAX_RX_PHYS_DSGL_SGE) 1191 return (EFBIG); 1192 dsgl_len = ccr_phys_dsgl_len(dsgl_nsegs); 1193 1194 /* 1195 * The 'key' part of the key context consists of the key followed 1196 * by the Galois hash key. 1197 */ 1198 kctx_len = roundup2(s->blkcipher.key_len, 16) + GMAC_BLOCK_LEN; 1199 transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dsgl_len); 1200 1201 /* 1202 * The input buffer consists of the IV, any AAD, and then the 1203 * cipher/plain text. For decryption requests the hash is 1204 * appended after the cipher text. 1205 * 1206 * The IV is always stored at the start of the input buffer 1207 * even though it may be duplicated in the payload. The 1208 * crypto engine doesn't work properly if the IV offset points 1209 * inside of the AAD region, so a second copy is always 1210 * required. 1211 */ 1212 input_len = crp->crp_aad_length + crp->crp_payload_length; 1213 if (op_type == CHCR_DECRYPT_OP) 1214 input_len += hash_size_in_response; 1215 if (input_len > MAX_REQUEST_SIZE) 1216 return (EFBIG); 1217 if (ccr_use_imm_data(transhdr_len, iv_len + input_len)) { 1218 imm_len = input_len; 1219 sgl_nsegs = 0; 1220 sgl_len = 0; 1221 } else { 1222 imm_len = 0; 1223 sglist_reset(sc->sg_ulptx); 1224 if (crp->crp_aad_length != 0) { 1225 error = sglist_append_sglist(sc->sg_ulptx, sc->sg_input, 1226 crp->crp_aad_start, crp->crp_aad_length); 1227 if (error) 1228 return (error); 1229 } 1230 error = sglist_append_sglist(sc->sg_ulptx, sc->sg_input, 1231 crp->crp_payload_start, crp->crp_payload_length); 1232 if (error) 1233 return (error); 1234 if (op_type == CHCR_DECRYPT_OP) { 1235 error = sglist_append_sglist(sc->sg_ulptx, sc->sg_input, 1236 crp->crp_digest_start, hash_size_in_response); 1237 if (error) 1238 return (error); 1239 } 1240 sgl_nsegs = sc->sg_ulptx->sg_nseg; 1241 sgl_len = ccr_ulptx_sgl_len(sgl_nsegs); 1242 } 1243 1244 if (crp->crp_aad_length != 0) { 1245 aad_start = iv_len + 1; 1246 aad_stop = aad_start + crp->crp_aad_length - 1; 1247 } else { 1248 aad_start = 0; 1249 aad_stop = 0; 1250 } 1251 cipher_start = iv_len + crp->crp_aad_length + 1; 1252 if (op_type == CHCR_DECRYPT_OP) 1253 cipher_stop = hash_size_in_response; 1254 else 1255 cipher_stop = 0; 1256 if (op_type == CHCR_DECRYPT_OP) 1257 auth_insert = hash_size_in_response; 1258 else 1259 auth_insert = 0; 1260 1261 wr_len = roundup2(transhdr_len, 16) + iv_len + roundup2(imm_len, 16) + 1262 sgl_len; 1263 if (wr_len > SGE_MAX_WR_LEN) 1264 return (EFBIG); 1265 wr = alloc_wrqe(wr_len, s->port->txq); 1266 if (wr == NULL) { 1267 sc->stats_wr_nomem++; 1268 return (ENOMEM); 1269 } 1270 crwr = wrtod(wr); 1271 memset(crwr, 0, wr_len); 1272 1273 memcpy(iv, crp->crp_iv, s->blkcipher.iv_len); 1274 if (s->blkcipher.iv_len == 12) 1275 *(uint32_t *)&iv[12] = htobe32(1); 1276 1277 ccr_populate_wreq(sc, s, crwr, kctx_len, wr_len, imm_len, sgl_len, 0, 1278 crp); 1279 1280 crwr->sec_cpl.op_ivinsrtofst = htobe32( 1281 V_CPL_TX_SEC_PDU_OPCODE(CPL_TX_SEC_PDU) | 1282 V_CPL_TX_SEC_PDU_RXCHID(s->port->tx_channel_id) | 1283 V_CPL_TX_SEC_PDU_ACKFOLLOWS(0) | V_CPL_TX_SEC_PDU_ULPTXLPBK(1) | 1284 V_CPL_TX_SEC_PDU_CPLLEN(2) | V_CPL_TX_SEC_PDU_PLACEHOLDER(0) | 1285 V_CPL_TX_SEC_PDU_IVINSRTOFST(1)); 1286 1287 crwr->sec_cpl.pldlen = htobe32(iv_len + input_len); 1288 1289 /* 1290 * NB: cipherstop is explicitly set to 0. On encrypt it 1291 * should normally be set to 0 anyway. However, for decrypt 1292 * the cipher ends before the tag in the ETA case (and 1293 * authstop is set to stop before the tag), but for GCM the 1294 * cipher still runs to the end of the buffer. Not sure if 1295 * this is intentional or a firmware quirk, but it is required 1296 * for working tag validation with GCM decryption. 1297 */ 1298 crwr->sec_cpl.aadstart_cipherstop_hi = htobe32( 1299 V_CPL_TX_SEC_PDU_AADSTART(aad_start) | 1300 V_CPL_TX_SEC_PDU_AADSTOP(aad_stop) | 1301 V_CPL_TX_SEC_PDU_CIPHERSTART(cipher_start) | 1302 V_CPL_TX_SEC_PDU_CIPHERSTOP_HI(0)); 1303 crwr->sec_cpl.cipherstop_lo_authinsert = htobe32( 1304 V_CPL_TX_SEC_PDU_CIPHERSTOP_LO(0) | 1305 V_CPL_TX_SEC_PDU_AUTHSTART(cipher_start) | 1306 V_CPL_TX_SEC_PDU_AUTHSTOP(cipher_stop) | 1307 V_CPL_TX_SEC_PDU_AUTHINSERT(auth_insert)); 1308 1309 /* These two flits are actually a CPL_TLS_TX_SCMD_FMT. */ 1310 hmac_ctrl = ccr_hmac_ctrl(AES_GMAC_HASH_LEN, hash_size_in_response); 1311 crwr->sec_cpl.seqno_numivs = htobe32( 1312 V_SCMD_SEQ_NO_CTRL(0) | 1313 V_SCMD_PROTO_VERSION(SCMD_PROTO_VERSION_GENERIC) | 1314 V_SCMD_ENC_DEC_CTRL(op_type) | 1315 V_SCMD_CIPH_AUTH_SEQ_CTRL(op_type == CHCR_ENCRYPT_OP ? 1 : 0) | 1316 V_SCMD_CIPH_MODE(SCMD_CIPH_MODE_AES_GCM) | 1317 V_SCMD_AUTH_MODE(SCMD_AUTH_MODE_GHASH) | 1318 V_SCMD_HMAC_CTRL(hmac_ctrl) | 1319 V_SCMD_IV_SIZE(iv_len / 2) | 1320 V_SCMD_NUM_IVS(0)); 1321 crwr->sec_cpl.ivgen_hdrlen = htobe32( 1322 V_SCMD_IV_GEN_CTRL(0) | 1323 V_SCMD_MORE_FRAGS(0) | V_SCMD_LAST_FRAG(0) | V_SCMD_MAC_ONLY(0) | 1324 V_SCMD_AADIVDROP(0) | V_SCMD_HDR_LEN(dsgl_len)); 1325 1326 crwr->key_ctx.ctx_hdr = s->blkcipher.key_ctx_hdr; 1327 memcpy(crwr->key_ctx.key, s->blkcipher.enckey, s->blkcipher.key_len); 1328 dst = crwr->key_ctx.key + roundup2(s->blkcipher.key_len, 16); 1329 memcpy(dst, s->gmac.ghash_h, GMAC_BLOCK_LEN); 1330 1331 dst = (char *)(crwr + 1) + kctx_len; 1332 ccr_write_phys_dsgl(sc, s, dst, dsgl_nsegs); 1333 dst += sizeof(struct cpl_rx_phys_dsgl) + dsgl_len; 1334 memcpy(dst, iv, iv_len); 1335 dst += iv_len; 1336 if (imm_len != 0) { 1337 if (crp->crp_aad_length != 0) { 1338 crypto_copydata(crp, crp->crp_aad_start, 1339 crp->crp_aad_length, dst); 1340 dst += crp->crp_aad_length; 1341 } 1342 crypto_copydata(crp, crp->crp_payload_start, 1343 crp->crp_payload_length, dst); 1344 dst += crp->crp_payload_length; 1345 if (op_type == CHCR_DECRYPT_OP) 1346 crypto_copydata(crp, crp->crp_digest_start, 1347 hash_size_in_response, dst); 1348 } else 1349 ccr_write_ulptx_sgl(sc, dst, sgl_nsegs); 1350 1351 /* XXX: TODO backpressure */ 1352 t4_wrq_tx(sc->adapter, wr); 1353 1354 return (0); 1355 } 1356 1357 static int 1358 ccr_gcm_done(struct ccr_softc *sc, struct ccr_session *s, 1359 struct cryptop *crp, const struct cpl_fw6_pld *cpl, int error) 1360 { 1361 1362 /* 1363 * The updated IV to permit chained requests is at 1364 * cpl->data[2], but OCF doesn't permit chained requests. 1365 * 1366 * Note that the hardware should always verify the GMAC hash. 1367 */ 1368 return (error); 1369 } 1370 1371 /* 1372 * Handle a GCM request that is not supported by the crypto engine by 1373 * performing the operation in software. Derived from swcr_authenc(). 1374 */ 1375 static void 1376 ccr_gcm_soft(struct ccr_session *s, struct cryptop *crp) 1377 { 1378 struct auth_hash *axf; 1379 struct enc_xform *exf; 1380 void *auth_ctx, *kschedule; 1381 char block[GMAC_BLOCK_LEN]; 1382 char digest[GMAC_DIGEST_LEN]; 1383 char iv[AES_BLOCK_LEN]; 1384 int error, i, len; 1385 1386 auth_ctx = NULL; 1387 kschedule = NULL; 1388 1389 /* Initialize the MAC. */ 1390 switch (s->blkcipher.key_len) { 1391 case 16: 1392 axf = &auth_hash_nist_gmac_aes_128; 1393 break; 1394 case 24: 1395 axf = &auth_hash_nist_gmac_aes_192; 1396 break; 1397 case 32: 1398 axf = &auth_hash_nist_gmac_aes_256; 1399 break; 1400 default: 1401 error = EINVAL; 1402 goto out; 1403 } 1404 auth_ctx = malloc(axf->ctxsize, M_CCR, M_NOWAIT); 1405 if (auth_ctx == NULL) { 1406 error = ENOMEM; 1407 goto out; 1408 } 1409 axf->Init(auth_ctx); 1410 axf->Setkey(auth_ctx, s->blkcipher.enckey, s->blkcipher.key_len); 1411 1412 /* Initialize the cipher. */ 1413 exf = &enc_xform_aes_nist_gcm; 1414 kschedule = malloc(exf->ctxsize, M_CCR, M_NOWAIT); 1415 if (kschedule == NULL) { 1416 error = ENOMEM; 1417 goto out; 1418 } 1419 error = exf->setkey(kschedule, s->blkcipher.enckey, 1420 s->blkcipher.key_len); 1421 if (error) 1422 goto out; 1423 1424 /* 1425 * This assumes a 12-byte IV from the crp. See longer comment 1426 * above in ccr_gcm() for more details. 1427 */ 1428 if ((crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0) { 1429 error = EINVAL; 1430 goto out; 1431 } 1432 memcpy(iv, crp->crp_iv, 12); 1433 *(uint32_t *)&iv[12] = htobe32(1); 1434 1435 axf->Reinit(auth_ctx, iv, sizeof(iv)); 1436 1437 /* MAC the AAD. */ 1438 for (i = 0; i < crp->crp_aad_length; i += sizeof(block)) { 1439 len = imin(crp->crp_aad_length - i, sizeof(block)); 1440 crypto_copydata(crp, crp->crp_aad_start + i, len, block); 1441 bzero(block + len, sizeof(block) - len); 1442 axf->Update(auth_ctx, block, sizeof(block)); 1443 } 1444 1445 exf->reinit(kschedule, iv); 1446 1447 /* Do encryption with MAC */ 1448 for (i = 0; i < crp->crp_payload_length; i += sizeof(block)) { 1449 len = imin(crp->crp_payload_length - i, sizeof(block)); 1450 crypto_copydata(crp, crp->crp_payload_start + i, len, block); 1451 bzero(block + len, sizeof(block) - len); 1452 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) { 1453 exf->encrypt(kschedule, block, block); 1454 axf->Update(auth_ctx, block, len); 1455 crypto_copyback(crp, crp->crp_payload_start + i, len, 1456 block); 1457 } else { 1458 axf->Update(auth_ctx, block, len); 1459 } 1460 } 1461 1462 /* Length block. */ 1463 bzero(block, sizeof(block)); 1464 ((uint32_t *)block)[1] = htobe32(crp->crp_aad_length * 8); 1465 ((uint32_t *)block)[3] = htobe32(crp->crp_payload_length * 8); 1466 axf->Update(auth_ctx, block, sizeof(block)); 1467 1468 /* Finalize MAC. */ 1469 axf->Final(digest, auth_ctx); 1470 1471 /* Inject or validate tag. */ 1472 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) { 1473 crypto_copyback(crp, crp->crp_digest_start, sizeof(digest), 1474 digest); 1475 error = 0; 1476 } else { 1477 char digest2[GMAC_DIGEST_LEN]; 1478 1479 crypto_copydata(crp, crp->crp_digest_start, sizeof(digest2), 1480 digest2); 1481 if (timingsafe_bcmp(digest, digest2, sizeof(digest)) == 0) { 1482 error = 0; 1483 1484 /* Tag matches, decrypt data. */ 1485 for (i = 0; i < crp->crp_payload_length; 1486 i += sizeof(block)) { 1487 len = imin(crp->crp_payload_length - i, 1488 sizeof(block)); 1489 crypto_copydata(crp, crp->crp_payload_start + i, 1490 len, block); 1491 bzero(block + len, sizeof(block) - len); 1492 exf->decrypt(kschedule, block, block); 1493 crypto_copyback(crp, crp->crp_payload_start + i, 1494 len, block); 1495 } 1496 } else 1497 error = EBADMSG; 1498 } 1499 1500 out: 1501 zfree(kschedule, M_CCR); 1502 zfree(auth_ctx, M_CCR); 1503 crp->crp_etype = error; 1504 crypto_done(crp); 1505 } 1506 1507 static void 1508 generate_ccm_b0(struct cryptop *crp, u_int hash_size_in_response, 1509 const char *iv, char *b0) 1510 { 1511 u_int i, payload_len; 1512 1513 /* NB: L is already set in the first byte of the IV. */ 1514 memcpy(b0, iv, CCM_B0_SIZE); 1515 1516 /* Set length of hash in bits 3 - 5. */ 1517 b0[0] |= (((hash_size_in_response - 2) / 2) << 3); 1518 1519 /* Store the payload length as a big-endian value. */ 1520 payload_len = crp->crp_payload_length; 1521 for (i = 0; i < iv[0]; i++) { 1522 b0[CCM_CBC_BLOCK_LEN - 1 - i] = payload_len; 1523 payload_len >>= 8; 1524 } 1525 1526 /* 1527 * If there is AAD in the request, set bit 6 in the flags 1528 * field and store the AAD length as a big-endian value at the 1529 * start of block 1. This only assumes a 16-bit AAD length 1530 * since T6 doesn't support large AAD sizes. 1531 */ 1532 if (crp->crp_aad_length != 0) { 1533 b0[0] |= (1 << 6); 1534 *(uint16_t *)(b0 + CCM_B0_SIZE) = htobe16(crp->crp_aad_length); 1535 } 1536 } 1537 1538 static int 1539 ccr_ccm(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp) 1540 { 1541 char iv[CHCR_MAX_CRYPTO_IV_LEN]; 1542 struct ulptx_idata *idata; 1543 struct chcr_wr *crwr; 1544 struct wrqe *wr; 1545 char *dst; 1546 u_int iv_len, kctx_len, op_type, transhdr_len, wr_len; 1547 u_int aad_len, b0_len, hash_size_in_response, imm_len; 1548 u_int aad_start, aad_stop, cipher_start, cipher_stop, auth_insert; 1549 u_int hmac_ctrl, input_len; 1550 int dsgl_nsegs, dsgl_len; 1551 int sgl_nsegs, sgl_len; 1552 int error; 1553 1554 if (s->blkcipher.key_len == 0) 1555 return (EINVAL); 1556 1557 /* 1558 * The crypto engine doesn't handle CCM requests with an empty 1559 * payload, so handle those in software instead. 1560 */ 1561 if (crp->crp_payload_length == 0) 1562 return (EMSGSIZE); 1563 1564 /* 1565 * CCM always includes block 0 in the AAD before AAD from the 1566 * request. 1567 */ 1568 b0_len = CCM_B0_SIZE; 1569 if (crp->crp_aad_length != 0) 1570 b0_len += CCM_AAD_FIELD_SIZE; 1571 aad_len = b0_len + crp->crp_aad_length; 1572 1573 /* 1574 * CCM requests should always provide an explicit IV (really 1575 * the nonce). 1576 */ 1577 if ((crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0) 1578 return (EINVAL); 1579 1580 /* 1581 * Always assume a 12 byte input nonce for now since that is 1582 * what OCF always generates. The full IV in the work request 1583 * is 16 bytes. 1584 */ 1585 iv_len = AES_BLOCK_LEN; 1586 1587 if (iv_len + aad_len > MAX_AAD_LEN) 1588 return (EMSGSIZE); 1589 1590 hash_size_in_response = s->ccm_mac.hash_len; 1591 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) 1592 op_type = CHCR_ENCRYPT_OP; 1593 else 1594 op_type = CHCR_DECRYPT_OP; 1595 1596 /* 1597 * The output buffer consists of the cipher text followed by 1598 * the tag when encrypting. For decryption it only contains 1599 * the plain text. 1600 * 1601 * Due to a firmware bug, the output buffer must include a 1602 * dummy output buffer for the IV and AAD prior to the real 1603 * output buffer. 1604 */ 1605 if (op_type == CHCR_ENCRYPT_OP) { 1606 if (iv_len + aad_len + crp->crp_payload_length + 1607 hash_size_in_response > MAX_REQUEST_SIZE) 1608 return (EFBIG); 1609 } else { 1610 if (iv_len + aad_len + crp->crp_payload_length > 1611 MAX_REQUEST_SIZE) 1612 return (EFBIG); 1613 } 1614 sglist_reset(sc->sg_dsgl); 1615 error = sglist_append_sglist(sc->sg_dsgl, sc->sg_iv_aad, 0, iv_len + 1616 aad_len); 1617 if (error) 1618 return (error); 1619 if (CRYPTO_HAS_OUTPUT_BUFFER(crp)) 1620 error = sglist_append_sglist(sc->sg_dsgl, sc->sg_output, 1621 crp->crp_payload_output_start, crp->crp_payload_length); 1622 else 1623 error = sglist_append_sglist(sc->sg_dsgl, sc->sg_input, 1624 crp->crp_payload_start, crp->crp_payload_length); 1625 if (error) 1626 return (error); 1627 if (op_type == CHCR_ENCRYPT_OP) { 1628 if (CRYPTO_HAS_OUTPUT_BUFFER(crp)) 1629 error = sglist_append_sglist(sc->sg_dsgl, sc->sg_output, 1630 crp->crp_digest_start, hash_size_in_response); 1631 else 1632 error = sglist_append_sglist(sc->sg_dsgl, sc->sg_input, 1633 crp->crp_digest_start, hash_size_in_response); 1634 if (error) 1635 return (error); 1636 } 1637 dsgl_nsegs = ccr_count_sgl(sc->sg_dsgl, DSGL_SGE_MAXLEN); 1638 if (dsgl_nsegs > MAX_RX_PHYS_DSGL_SGE) 1639 return (EFBIG); 1640 dsgl_len = ccr_phys_dsgl_len(dsgl_nsegs); 1641 1642 /* 1643 * The 'key' part of the key context consists of two copies of 1644 * the AES key. 1645 */ 1646 kctx_len = roundup2(s->blkcipher.key_len, 16) * 2; 1647 transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dsgl_len); 1648 1649 /* 1650 * The input buffer consists of the IV, AAD (including block 1651 * 0), and then the cipher/plain text. For decryption 1652 * requests the hash is appended after the cipher text. 1653 * 1654 * The IV is always stored at the start of the input buffer 1655 * even though it may be duplicated in the payload. The 1656 * crypto engine doesn't work properly if the IV offset points 1657 * inside of the AAD region, so a second copy is always 1658 * required. 1659 */ 1660 input_len = aad_len + crp->crp_payload_length; 1661 if (op_type == CHCR_DECRYPT_OP) 1662 input_len += hash_size_in_response; 1663 if (input_len > MAX_REQUEST_SIZE) 1664 return (EFBIG); 1665 if (ccr_use_imm_data(transhdr_len, iv_len + input_len)) { 1666 imm_len = input_len; 1667 sgl_nsegs = 0; 1668 sgl_len = 0; 1669 } else { 1670 /* Block 0 is passed as immediate data. */ 1671 imm_len = b0_len; 1672 1673 sglist_reset(sc->sg_ulptx); 1674 if (crp->crp_aad_length != 0) { 1675 error = sglist_append_sglist(sc->sg_ulptx, sc->sg_input, 1676 crp->crp_aad_start, crp->crp_aad_length); 1677 if (error) 1678 return (error); 1679 } 1680 error = sglist_append_sglist(sc->sg_ulptx, sc->sg_input, 1681 crp->crp_payload_start, crp->crp_payload_length); 1682 if (error) 1683 return (error); 1684 if (op_type == CHCR_DECRYPT_OP) { 1685 error = sglist_append_sglist(sc->sg_ulptx, sc->sg_input, 1686 crp->crp_digest_start, hash_size_in_response); 1687 if (error) 1688 return (error); 1689 } 1690 sgl_nsegs = sc->sg_ulptx->sg_nseg; 1691 sgl_len = ccr_ulptx_sgl_len(sgl_nsegs); 1692 } 1693 1694 aad_start = iv_len + 1; 1695 aad_stop = aad_start + aad_len - 1; 1696 cipher_start = aad_stop + 1; 1697 if (op_type == CHCR_DECRYPT_OP) 1698 cipher_stop = hash_size_in_response; 1699 else 1700 cipher_stop = 0; 1701 if (op_type == CHCR_DECRYPT_OP) 1702 auth_insert = hash_size_in_response; 1703 else 1704 auth_insert = 0; 1705 1706 wr_len = roundup2(transhdr_len, 16) + iv_len + roundup2(imm_len, 16) + 1707 sgl_len; 1708 if (wr_len > SGE_MAX_WR_LEN) 1709 return (EFBIG); 1710 wr = alloc_wrqe(wr_len, s->port->txq); 1711 if (wr == NULL) { 1712 sc->stats_wr_nomem++; 1713 return (ENOMEM); 1714 } 1715 crwr = wrtod(wr); 1716 memset(crwr, 0, wr_len); 1717 1718 /* 1719 * Read the nonce from the request. Use the nonce to generate 1720 * the full IV with the counter set to 0. 1721 */ 1722 memset(iv, 0, iv_len); 1723 iv[0] = (15 - AES_CCM_IV_LEN) - 1; 1724 memcpy(iv + 1, crp->crp_iv, AES_CCM_IV_LEN); 1725 1726 ccr_populate_wreq(sc, s, crwr, kctx_len, wr_len, imm_len, sgl_len, 0, 1727 crp); 1728 1729 crwr->sec_cpl.op_ivinsrtofst = htobe32( 1730 V_CPL_TX_SEC_PDU_OPCODE(CPL_TX_SEC_PDU) | 1731 V_CPL_TX_SEC_PDU_RXCHID(s->port->tx_channel_id) | 1732 V_CPL_TX_SEC_PDU_ACKFOLLOWS(0) | V_CPL_TX_SEC_PDU_ULPTXLPBK(1) | 1733 V_CPL_TX_SEC_PDU_CPLLEN(2) | V_CPL_TX_SEC_PDU_PLACEHOLDER(0) | 1734 V_CPL_TX_SEC_PDU_IVINSRTOFST(1)); 1735 1736 crwr->sec_cpl.pldlen = htobe32(iv_len + input_len); 1737 1738 /* 1739 * NB: cipherstop is explicitly set to 0. See comments above 1740 * in ccr_gcm(). 1741 */ 1742 crwr->sec_cpl.aadstart_cipherstop_hi = htobe32( 1743 V_CPL_TX_SEC_PDU_AADSTART(aad_start) | 1744 V_CPL_TX_SEC_PDU_AADSTOP(aad_stop) | 1745 V_CPL_TX_SEC_PDU_CIPHERSTART(cipher_start) | 1746 V_CPL_TX_SEC_PDU_CIPHERSTOP_HI(0)); 1747 crwr->sec_cpl.cipherstop_lo_authinsert = htobe32( 1748 V_CPL_TX_SEC_PDU_CIPHERSTOP_LO(0) | 1749 V_CPL_TX_SEC_PDU_AUTHSTART(cipher_start) | 1750 V_CPL_TX_SEC_PDU_AUTHSTOP(cipher_stop) | 1751 V_CPL_TX_SEC_PDU_AUTHINSERT(auth_insert)); 1752 1753 /* These two flits are actually a CPL_TLS_TX_SCMD_FMT. */ 1754 hmac_ctrl = ccr_hmac_ctrl(AES_CBC_MAC_HASH_LEN, hash_size_in_response); 1755 crwr->sec_cpl.seqno_numivs = htobe32( 1756 V_SCMD_SEQ_NO_CTRL(0) | 1757 V_SCMD_PROTO_VERSION(SCMD_PROTO_VERSION_GENERIC) | 1758 V_SCMD_ENC_DEC_CTRL(op_type) | 1759 V_SCMD_CIPH_AUTH_SEQ_CTRL(op_type == CHCR_ENCRYPT_OP ? 0 : 1) | 1760 V_SCMD_CIPH_MODE(SCMD_CIPH_MODE_AES_CCM) | 1761 V_SCMD_AUTH_MODE(SCMD_AUTH_MODE_CBCMAC) | 1762 V_SCMD_HMAC_CTRL(hmac_ctrl) | 1763 V_SCMD_IV_SIZE(iv_len / 2) | 1764 V_SCMD_NUM_IVS(0)); 1765 crwr->sec_cpl.ivgen_hdrlen = htobe32( 1766 V_SCMD_IV_GEN_CTRL(0) | 1767 V_SCMD_MORE_FRAGS(0) | V_SCMD_LAST_FRAG(0) | V_SCMD_MAC_ONLY(0) | 1768 V_SCMD_AADIVDROP(0) | V_SCMD_HDR_LEN(dsgl_len)); 1769 1770 crwr->key_ctx.ctx_hdr = s->blkcipher.key_ctx_hdr; 1771 memcpy(crwr->key_ctx.key, s->blkcipher.enckey, s->blkcipher.key_len); 1772 memcpy(crwr->key_ctx.key + roundup(s->blkcipher.key_len, 16), 1773 s->blkcipher.enckey, s->blkcipher.key_len); 1774 1775 dst = (char *)(crwr + 1) + kctx_len; 1776 ccr_write_phys_dsgl(sc, s, dst, dsgl_nsegs); 1777 dst += sizeof(struct cpl_rx_phys_dsgl) + dsgl_len; 1778 memcpy(dst, iv, iv_len); 1779 dst += iv_len; 1780 generate_ccm_b0(crp, hash_size_in_response, iv, dst); 1781 if (sgl_nsegs == 0) { 1782 dst += b0_len; 1783 if (crp->crp_aad_length != 0) { 1784 crypto_copydata(crp, crp->crp_aad_start, 1785 crp->crp_aad_length, dst); 1786 dst += crp->crp_aad_length; 1787 } 1788 crypto_copydata(crp, crp->crp_payload_start, 1789 crp->crp_payload_length, dst); 1790 dst += crp->crp_payload_length; 1791 if (op_type == CHCR_DECRYPT_OP) 1792 crypto_copydata(crp, crp->crp_digest_start, 1793 hash_size_in_response, dst); 1794 } else { 1795 dst += CCM_B0_SIZE; 1796 if (b0_len > CCM_B0_SIZE) { 1797 /* 1798 * If there is AAD, insert padding including a 1799 * ULP_TX_SC_NOOP so that the ULP_TX_SC_DSGL 1800 * is 16-byte aligned. 1801 */ 1802 KASSERT(b0_len - CCM_B0_SIZE == CCM_AAD_FIELD_SIZE, 1803 ("b0_len mismatch")); 1804 memset(dst + CCM_AAD_FIELD_SIZE, 0, 1805 8 - CCM_AAD_FIELD_SIZE); 1806 idata = (void *)(dst + 8); 1807 idata->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_NOOP)); 1808 idata->len = htobe32(0); 1809 dst = (void *)(idata + 1); 1810 } 1811 ccr_write_ulptx_sgl(sc, dst, sgl_nsegs); 1812 } 1813 1814 /* XXX: TODO backpressure */ 1815 t4_wrq_tx(sc->adapter, wr); 1816 1817 return (0); 1818 } 1819 1820 static int 1821 ccr_ccm_done(struct ccr_softc *sc, struct ccr_session *s, 1822 struct cryptop *crp, const struct cpl_fw6_pld *cpl, int error) 1823 { 1824 1825 /* 1826 * The updated IV to permit chained requests is at 1827 * cpl->data[2], but OCF doesn't permit chained requests. 1828 * 1829 * Note that the hardware should always verify the CBC MAC 1830 * hash. 1831 */ 1832 return (error); 1833 } 1834 1835 /* 1836 * Handle a CCM request that is not supported by the crypto engine by 1837 * performing the operation in software. Derived from swcr_authenc(). 1838 */ 1839 static void 1840 ccr_ccm_soft(struct ccr_session *s, struct cryptop *crp) 1841 { 1842 struct auth_hash *axf; 1843 struct enc_xform *exf; 1844 union authctx *auth_ctx; 1845 void *kschedule; 1846 char block[CCM_CBC_BLOCK_LEN]; 1847 char digest[AES_CBC_MAC_HASH_LEN]; 1848 char iv[AES_CCM_IV_LEN]; 1849 int error, i, len; 1850 1851 auth_ctx = NULL; 1852 kschedule = NULL; 1853 1854 /* Initialize the MAC. */ 1855 switch (s->blkcipher.key_len) { 1856 case 16: 1857 axf = &auth_hash_ccm_cbc_mac_128; 1858 break; 1859 case 24: 1860 axf = &auth_hash_ccm_cbc_mac_192; 1861 break; 1862 case 32: 1863 axf = &auth_hash_ccm_cbc_mac_256; 1864 break; 1865 default: 1866 error = EINVAL; 1867 goto out; 1868 } 1869 auth_ctx = malloc(axf->ctxsize, M_CCR, M_NOWAIT); 1870 if (auth_ctx == NULL) { 1871 error = ENOMEM; 1872 goto out; 1873 } 1874 axf->Init(auth_ctx); 1875 axf->Setkey(auth_ctx, s->blkcipher.enckey, s->blkcipher.key_len); 1876 1877 /* Initialize the cipher. */ 1878 exf = &enc_xform_ccm; 1879 kschedule = malloc(exf->ctxsize, M_CCR, M_NOWAIT); 1880 if (kschedule == NULL) { 1881 error = ENOMEM; 1882 goto out; 1883 } 1884 error = exf->setkey(kschedule, s->blkcipher.enckey, 1885 s->blkcipher.key_len); 1886 if (error) 1887 goto out; 1888 1889 if ((crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0) { 1890 error = EINVAL; 1891 goto out; 1892 } 1893 memcpy(iv, crp->crp_iv, AES_CCM_IV_LEN); 1894 1895 auth_ctx->aes_cbc_mac_ctx.authDataLength = crp->crp_aad_length; 1896 auth_ctx->aes_cbc_mac_ctx.cryptDataLength = crp->crp_payload_length; 1897 axf->Reinit(auth_ctx, iv, sizeof(iv)); 1898 1899 /* MAC the AAD. */ 1900 for (i = 0; i < crp->crp_aad_length; i += sizeof(block)) { 1901 len = imin(crp->crp_aad_length - i, sizeof(block)); 1902 crypto_copydata(crp, crp->crp_aad_start + i, len, block); 1903 bzero(block + len, sizeof(block) - len); 1904 axf->Update(auth_ctx, block, sizeof(block)); 1905 } 1906 1907 exf->reinit(kschedule, iv); 1908 1909 /* Do encryption/decryption with MAC */ 1910 for (i = 0; i < crp->crp_payload_length; i += sizeof(block)) { 1911 len = imin(crp->crp_payload_length - i, sizeof(block)); 1912 crypto_copydata(crp, crp->crp_payload_start + i, len, block); 1913 bzero(block + len, sizeof(block) - len); 1914 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) { 1915 axf->Update(auth_ctx, block, len); 1916 exf->encrypt(kschedule, block, block); 1917 crypto_copyback(crp, crp->crp_payload_start + i, len, 1918 block); 1919 } else { 1920 exf->decrypt(kschedule, block, block); 1921 axf->Update(auth_ctx, block, len); 1922 } 1923 } 1924 1925 /* Finalize MAC. */ 1926 axf->Final(digest, auth_ctx); 1927 1928 /* Inject or validate tag. */ 1929 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) { 1930 crypto_copyback(crp, crp->crp_digest_start, sizeof(digest), 1931 digest); 1932 error = 0; 1933 } else { 1934 char digest2[AES_CBC_MAC_HASH_LEN]; 1935 1936 crypto_copydata(crp, crp->crp_digest_start, sizeof(digest2), 1937 digest2); 1938 if (timingsafe_bcmp(digest, digest2, sizeof(digest)) == 0) { 1939 error = 0; 1940 1941 /* Tag matches, decrypt data. */ 1942 exf->reinit(kschedule, iv); 1943 for (i = 0; i < crp->crp_payload_length; 1944 i += sizeof(block)) { 1945 len = imin(crp->crp_payload_length - i, 1946 sizeof(block)); 1947 crypto_copydata(crp, crp->crp_payload_start + i, 1948 len, block); 1949 bzero(block + len, sizeof(block) - len); 1950 exf->decrypt(kschedule, block, block); 1951 crypto_copyback(crp, crp->crp_payload_start + i, 1952 len, block); 1953 } 1954 } else 1955 error = EBADMSG; 1956 } 1957 1958 out: 1959 zfree(kschedule, M_CCR); 1960 zfree(auth_ctx, M_CCR); 1961 crp->crp_etype = error; 1962 crypto_done(crp); 1963 } 1964 1965 static void 1966 ccr_identify(driver_t *driver, device_t parent) 1967 { 1968 struct adapter *sc; 1969 1970 sc = device_get_softc(parent); 1971 if (sc->cryptocaps & FW_CAPS_CONFIG_CRYPTO_LOOKASIDE && 1972 device_find_child(parent, "ccr", -1) == NULL) 1973 device_add_child(parent, "ccr", -1); 1974 } 1975 1976 static int 1977 ccr_probe(device_t dev) 1978 { 1979 1980 device_set_desc(dev, "Chelsio Crypto Accelerator"); 1981 return (BUS_PROBE_DEFAULT); 1982 } 1983 1984 static void 1985 ccr_sysctls(struct ccr_softc *sc) 1986 { 1987 struct sysctl_ctx_list *ctx; 1988 struct sysctl_oid *oid, *port_oid; 1989 struct sysctl_oid_list *children; 1990 char buf[16]; 1991 int i; 1992 1993 ctx = device_get_sysctl_ctx(sc->dev); 1994 1995 /* 1996 * dev.ccr.X. 1997 */ 1998 oid = device_get_sysctl_tree(sc->dev); 1999 children = SYSCTL_CHILDREN(oid); 2000 2001 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "port_mask", CTLFLAG_RW, 2002 &sc->port_mask, 0, "Mask of enabled ports"); 2003 2004 /* 2005 * dev.ccr.X.stats. 2006 */ 2007 oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats", 2008 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "statistics"); 2009 children = SYSCTL_CHILDREN(oid); 2010 2011 SYSCTL_ADD_U64(ctx, children, OID_AUTO, "hash", CTLFLAG_RD, 2012 &sc->stats_hash, 0, "Hash requests submitted"); 2013 SYSCTL_ADD_U64(ctx, children, OID_AUTO, "hmac", CTLFLAG_RD, 2014 &sc->stats_hmac, 0, "HMAC requests submitted"); 2015 SYSCTL_ADD_U64(ctx, children, OID_AUTO, "cipher_encrypt", CTLFLAG_RD, 2016 &sc->stats_blkcipher_encrypt, 0, 2017 "Cipher encryption requests submitted"); 2018 SYSCTL_ADD_U64(ctx, children, OID_AUTO, "cipher_decrypt", CTLFLAG_RD, 2019 &sc->stats_blkcipher_decrypt, 0, 2020 "Cipher decryption requests submitted"); 2021 SYSCTL_ADD_U64(ctx, children, OID_AUTO, "eta_encrypt", CTLFLAG_RD, 2022 &sc->stats_eta_encrypt, 0, 2023 "Combined AES+HMAC encryption requests submitted"); 2024 SYSCTL_ADD_U64(ctx, children, OID_AUTO, "eta_decrypt", CTLFLAG_RD, 2025 &sc->stats_eta_decrypt, 0, 2026 "Combined AES+HMAC decryption requests submitted"); 2027 SYSCTL_ADD_U64(ctx, children, OID_AUTO, "gcm_encrypt", CTLFLAG_RD, 2028 &sc->stats_gcm_encrypt, 0, "AES-GCM encryption requests submitted"); 2029 SYSCTL_ADD_U64(ctx, children, OID_AUTO, "gcm_decrypt", CTLFLAG_RD, 2030 &sc->stats_gcm_decrypt, 0, "AES-GCM decryption requests submitted"); 2031 SYSCTL_ADD_U64(ctx, children, OID_AUTO, "ccm_encrypt", CTLFLAG_RD, 2032 &sc->stats_ccm_encrypt, 0, "AES-CCM encryption requests submitted"); 2033 SYSCTL_ADD_U64(ctx, children, OID_AUTO, "ccm_decrypt", CTLFLAG_RD, 2034 &sc->stats_ccm_decrypt, 0, "AES-CCM decryption requests submitted"); 2035 SYSCTL_ADD_U64(ctx, children, OID_AUTO, "wr_nomem", CTLFLAG_RD, 2036 &sc->stats_wr_nomem, 0, "Work request memory allocation failures"); 2037 SYSCTL_ADD_U64(ctx, children, OID_AUTO, "inflight", CTLFLAG_RD, 2038 &sc->stats_inflight, 0, "Requests currently pending"); 2039 SYSCTL_ADD_U64(ctx, children, OID_AUTO, "mac_error", CTLFLAG_RD, 2040 &sc->stats_mac_error, 0, "MAC errors"); 2041 SYSCTL_ADD_U64(ctx, children, OID_AUTO, "pad_error", CTLFLAG_RD, 2042 &sc->stats_pad_error, 0, "Padding errors"); 2043 SYSCTL_ADD_U64(ctx, children, OID_AUTO, "bad_session", CTLFLAG_RD, 2044 &sc->stats_bad_session, 0, "Requests with invalid session ID"); 2045 SYSCTL_ADD_U64(ctx, children, OID_AUTO, "sglist_error", CTLFLAG_RD, 2046 &sc->stats_sglist_error, 0, 2047 "Requests for which DMA mapping failed"); 2048 SYSCTL_ADD_U64(ctx, children, OID_AUTO, "process_error", CTLFLAG_RD, 2049 &sc->stats_process_error, 0, "Requests failed during queueing"); 2050 SYSCTL_ADD_U64(ctx, children, OID_AUTO, "sw_fallback", CTLFLAG_RD, 2051 &sc->stats_sw_fallback, 0, 2052 "Requests processed by falling back to software"); 2053 2054 /* 2055 * dev.ccr.X.stats.port 2056 */ 2057 port_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "port", 2058 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Per-port statistics"); 2059 2060 for (i = 0; i < nitems(sc->ports); i++) { 2061 if (sc->ports[i].rxq == NULL) 2062 continue; 2063 2064 /* 2065 * dev.ccr.X.stats.port.Y 2066 */ 2067 snprintf(buf, sizeof(buf), "%d", i); 2068 oid = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(port_oid), OID_AUTO, 2069 buf, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, buf); 2070 children = SYSCTL_CHILDREN(oid); 2071 2072 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "active_sessions", 2073 CTLFLAG_RD, &sc->ports[i].active_sessions, 0, 2074 "Count of active sessions"); 2075 } 2076 } 2077 2078 static void 2079 ccr_init_port(struct ccr_softc *sc, int port) 2080 { 2081 2082 sc->ports[port].txq = &sc->adapter->sge.ctrlq[port]; 2083 sc->ports[port].rxq = 2084 &sc->adapter->sge.rxq[sc->adapter->port[port]->vi->first_rxq]; 2085 sc->ports[port].tx_channel_id = port; 2086 _Static_assert(sizeof(sc->port_mask) * NBBY >= MAX_NPORTS - 1, 2087 "Too many ports to fit in port_mask"); 2088 sc->port_mask |= 1u << port; 2089 } 2090 2091 static int 2092 ccr_attach(device_t dev) 2093 { 2094 struct ccr_softc *sc; 2095 int32_t cid; 2096 int i; 2097 2098 sc = device_get_softc(dev); 2099 sc->dev = dev; 2100 sc->adapter = device_get_softc(device_get_parent(dev)); 2101 for_each_port(sc->adapter, i) { 2102 ccr_init_port(sc, i); 2103 } 2104 cid = crypto_get_driverid(dev, sizeof(struct ccr_session), 2105 CRYPTOCAP_F_HARDWARE); 2106 if (cid < 0) { 2107 device_printf(dev, "could not get crypto driver id\n"); 2108 return (ENXIO); 2109 } 2110 sc->cid = cid; 2111 sc->adapter->ccr_softc = sc; 2112 2113 mtx_init(&sc->lock, "ccr", NULL, MTX_DEF); 2114 sc->sg_input = sglist_alloc(TX_SGL_SEGS, M_WAITOK); 2115 sc->sg_output = sglist_alloc(TX_SGL_SEGS, M_WAITOK); 2116 sc->sg_ulptx = sglist_alloc(TX_SGL_SEGS, M_WAITOK); 2117 sc->sg_dsgl = sglist_alloc(MAX_RX_PHYS_DSGL_SGE, M_WAITOK); 2118 sc->iv_aad_buf = malloc(MAX_AAD_LEN, M_CCR, M_WAITOK); 2119 sc->sg_iv_aad = sglist_build(sc->iv_aad_buf, MAX_AAD_LEN, M_WAITOK); 2120 ccr_sysctls(sc); 2121 2122 return (0); 2123 } 2124 2125 static int 2126 ccr_detach(device_t dev) 2127 { 2128 struct ccr_softc *sc; 2129 2130 sc = device_get_softc(dev); 2131 2132 mtx_lock(&sc->lock); 2133 sc->detaching = true; 2134 mtx_unlock(&sc->lock); 2135 2136 crypto_unregister_all(sc->cid); 2137 2138 mtx_destroy(&sc->lock); 2139 sglist_free(sc->sg_iv_aad); 2140 free(sc->iv_aad_buf, M_CCR); 2141 sglist_free(sc->sg_dsgl); 2142 sglist_free(sc->sg_ulptx); 2143 sglist_free(sc->sg_output); 2144 sglist_free(sc->sg_input); 2145 sc->adapter->ccr_softc = NULL; 2146 return (0); 2147 } 2148 2149 static void 2150 ccr_init_hash_digest(struct ccr_session *s) 2151 { 2152 union authctx auth_ctx; 2153 struct auth_hash *axf; 2154 2155 axf = s->hmac.auth_hash; 2156 axf->Init(&auth_ctx); 2157 t4_copy_partial_hash(axf->type, &auth_ctx, s->hmac.pads); 2158 } 2159 2160 static bool 2161 ccr_aes_check_keylen(int alg, int klen) 2162 { 2163 2164 switch (klen * 8) { 2165 case 128: 2166 case 192: 2167 if (alg == CRYPTO_AES_XTS) 2168 return (false); 2169 break; 2170 case 256: 2171 break; 2172 case 512: 2173 if (alg != CRYPTO_AES_XTS) 2174 return (false); 2175 break; 2176 default: 2177 return (false); 2178 } 2179 return (true); 2180 } 2181 2182 static void 2183 ccr_aes_setkey(struct ccr_session *s, const void *key, int klen) 2184 { 2185 unsigned int ck_size, iopad_size, kctx_flits, kctx_len, kbits, mk_size; 2186 unsigned int opad_present; 2187 2188 if (s->blkcipher.cipher_mode == SCMD_CIPH_MODE_AES_XTS) 2189 kbits = (klen / 2) * 8; 2190 else 2191 kbits = klen * 8; 2192 switch (kbits) { 2193 case 128: 2194 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128; 2195 break; 2196 case 192: 2197 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192; 2198 break; 2199 case 256: 2200 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256; 2201 break; 2202 default: 2203 panic("should not get here"); 2204 } 2205 2206 s->blkcipher.key_len = klen; 2207 memcpy(s->blkcipher.enckey, key, s->blkcipher.key_len); 2208 switch (s->blkcipher.cipher_mode) { 2209 case SCMD_CIPH_MODE_AES_CBC: 2210 case SCMD_CIPH_MODE_AES_XTS: 2211 t4_aes_getdeckey(s->blkcipher.deckey, key, kbits); 2212 break; 2213 } 2214 2215 kctx_len = roundup2(s->blkcipher.key_len, 16); 2216 switch (s->mode) { 2217 case ETA: 2218 mk_size = s->hmac.mk_size; 2219 opad_present = 1; 2220 iopad_size = roundup2(s->hmac.partial_digest_len, 16); 2221 kctx_len += iopad_size * 2; 2222 break; 2223 case GCM: 2224 mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_128; 2225 opad_present = 0; 2226 kctx_len += GMAC_BLOCK_LEN; 2227 break; 2228 case CCM: 2229 switch (kbits) { 2230 case 128: 2231 mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_128; 2232 break; 2233 case 192: 2234 mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_192; 2235 break; 2236 case 256: 2237 mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256; 2238 break; 2239 default: 2240 panic("should not get here"); 2241 } 2242 opad_present = 0; 2243 kctx_len *= 2; 2244 break; 2245 default: 2246 mk_size = CHCR_KEYCTX_NO_KEY; 2247 opad_present = 0; 2248 break; 2249 } 2250 kctx_flits = (sizeof(struct _key_ctx) + kctx_len) / 16; 2251 s->blkcipher.key_ctx_hdr = htobe32(V_KEY_CONTEXT_CTX_LEN(kctx_flits) | 2252 V_KEY_CONTEXT_DUAL_CK(s->blkcipher.cipher_mode == 2253 SCMD_CIPH_MODE_AES_XTS) | 2254 V_KEY_CONTEXT_OPAD_PRESENT(opad_present) | 2255 V_KEY_CONTEXT_SALT_PRESENT(1) | V_KEY_CONTEXT_CK_SIZE(ck_size) | 2256 V_KEY_CONTEXT_MK_SIZE(mk_size) | V_KEY_CONTEXT_VALID(1)); 2257 } 2258 2259 static bool 2260 ccr_auth_supported(const struct crypto_session_params *csp) 2261 { 2262 2263 switch (csp->csp_auth_alg) { 2264 case CRYPTO_SHA1: 2265 case CRYPTO_SHA2_224: 2266 case CRYPTO_SHA2_256: 2267 case CRYPTO_SHA2_384: 2268 case CRYPTO_SHA2_512: 2269 case CRYPTO_SHA1_HMAC: 2270 case CRYPTO_SHA2_224_HMAC: 2271 case CRYPTO_SHA2_256_HMAC: 2272 case CRYPTO_SHA2_384_HMAC: 2273 case CRYPTO_SHA2_512_HMAC: 2274 break; 2275 default: 2276 return (false); 2277 } 2278 return (true); 2279 } 2280 2281 static bool 2282 ccr_cipher_supported(const struct crypto_session_params *csp) 2283 { 2284 2285 switch (csp->csp_cipher_alg) { 2286 case CRYPTO_AES_CBC: 2287 if (csp->csp_ivlen != AES_BLOCK_LEN) 2288 return (false); 2289 break; 2290 case CRYPTO_AES_ICM: 2291 if (csp->csp_ivlen != AES_BLOCK_LEN) 2292 return (false); 2293 break; 2294 case CRYPTO_AES_XTS: 2295 if (csp->csp_ivlen != AES_XTS_IV_LEN) 2296 return (false); 2297 break; 2298 default: 2299 return (false); 2300 } 2301 return (ccr_aes_check_keylen(csp->csp_cipher_alg, 2302 csp->csp_cipher_klen)); 2303 } 2304 2305 static int 2306 ccr_cipher_mode(const struct crypto_session_params *csp) 2307 { 2308 2309 switch (csp->csp_cipher_alg) { 2310 case CRYPTO_AES_CBC: 2311 return (SCMD_CIPH_MODE_AES_CBC); 2312 case CRYPTO_AES_ICM: 2313 return (SCMD_CIPH_MODE_AES_CTR); 2314 case CRYPTO_AES_NIST_GCM_16: 2315 return (SCMD_CIPH_MODE_AES_GCM); 2316 case CRYPTO_AES_XTS: 2317 return (SCMD_CIPH_MODE_AES_XTS); 2318 case CRYPTO_AES_CCM_16: 2319 return (SCMD_CIPH_MODE_AES_CCM); 2320 default: 2321 return (SCMD_CIPH_MODE_NOP); 2322 } 2323 } 2324 2325 static int 2326 ccr_probesession(device_t dev, const struct crypto_session_params *csp) 2327 { 2328 unsigned int cipher_mode; 2329 2330 if ((csp->csp_flags & ~(CSP_F_SEPARATE_OUTPUT)) != 0) 2331 return (EINVAL); 2332 switch (csp->csp_mode) { 2333 case CSP_MODE_DIGEST: 2334 if (!ccr_auth_supported(csp)) 2335 return (EINVAL); 2336 break; 2337 case CSP_MODE_CIPHER: 2338 if (!ccr_cipher_supported(csp)) 2339 return (EINVAL); 2340 break; 2341 case CSP_MODE_AEAD: 2342 switch (csp->csp_cipher_alg) { 2343 case CRYPTO_AES_NIST_GCM_16: 2344 if (csp->csp_ivlen != AES_GCM_IV_LEN) 2345 return (EINVAL); 2346 if (csp->csp_auth_mlen < 0 || 2347 csp->csp_auth_mlen > AES_GMAC_HASH_LEN) 2348 return (EINVAL); 2349 break; 2350 case CRYPTO_AES_CCM_16: 2351 if (csp->csp_ivlen != AES_CCM_IV_LEN) 2352 return (EINVAL); 2353 if (csp->csp_auth_mlen < 0 || 2354 csp->csp_auth_mlen > AES_CBC_MAC_HASH_LEN) 2355 return (EINVAL); 2356 break; 2357 default: 2358 return (EINVAL); 2359 } 2360 break; 2361 case CSP_MODE_ETA: 2362 if (!ccr_auth_supported(csp) || !ccr_cipher_supported(csp)) 2363 return (EINVAL); 2364 break; 2365 default: 2366 return (EINVAL); 2367 } 2368 2369 if (csp->csp_cipher_klen != 0) { 2370 cipher_mode = ccr_cipher_mode(csp); 2371 if (cipher_mode == SCMD_CIPH_MODE_NOP) 2372 return (EINVAL); 2373 } 2374 2375 return (CRYPTODEV_PROBE_HARDWARE); 2376 } 2377 2378 /* 2379 * Select an available port with the lowest number of active sessions. 2380 */ 2381 static struct ccr_port * 2382 ccr_choose_port(struct ccr_softc *sc) 2383 { 2384 struct ccr_port *best, *p; 2385 int i; 2386 2387 mtx_assert(&sc->lock, MA_OWNED); 2388 best = NULL; 2389 for (i = 0; i < nitems(sc->ports); i++) { 2390 p = &sc->ports[i]; 2391 2392 /* Ignore non-existent ports. */ 2393 if (p->rxq == NULL) 2394 continue; 2395 2396 /* 2397 * XXX: Ignore ports whose queues aren't initialized. 2398 * This is racy as the rxq can be destroyed by the 2399 * associated VI detaching. Eventually ccr should use 2400 * dedicated queues. 2401 */ 2402 if (p->rxq->iq.adapter == NULL || p->txq->adapter == NULL) 2403 continue; 2404 2405 if ((sc->port_mask & (1u << i)) == 0) 2406 continue; 2407 2408 if (best == NULL || 2409 p->active_sessions < best->active_sessions) 2410 best = p; 2411 } 2412 return (best); 2413 } 2414 2415 static int 2416 ccr_newsession(device_t dev, crypto_session_t cses, 2417 const struct crypto_session_params *csp) 2418 { 2419 struct ccr_softc *sc; 2420 struct ccr_session *s; 2421 struct auth_hash *auth_hash; 2422 unsigned int auth_mode, cipher_mode, mk_size; 2423 unsigned int partial_digest_len; 2424 2425 switch (csp->csp_auth_alg) { 2426 case CRYPTO_SHA1: 2427 case CRYPTO_SHA1_HMAC: 2428 auth_hash = &auth_hash_hmac_sha1; 2429 auth_mode = SCMD_AUTH_MODE_SHA1; 2430 mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_160; 2431 partial_digest_len = SHA1_HASH_LEN; 2432 break; 2433 case CRYPTO_SHA2_224: 2434 case CRYPTO_SHA2_224_HMAC: 2435 auth_hash = &auth_hash_hmac_sha2_224; 2436 auth_mode = SCMD_AUTH_MODE_SHA224; 2437 mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256; 2438 partial_digest_len = SHA2_256_HASH_LEN; 2439 break; 2440 case CRYPTO_SHA2_256: 2441 case CRYPTO_SHA2_256_HMAC: 2442 auth_hash = &auth_hash_hmac_sha2_256; 2443 auth_mode = SCMD_AUTH_MODE_SHA256; 2444 mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256; 2445 partial_digest_len = SHA2_256_HASH_LEN; 2446 break; 2447 case CRYPTO_SHA2_384: 2448 case CRYPTO_SHA2_384_HMAC: 2449 auth_hash = &auth_hash_hmac_sha2_384; 2450 auth_mode = SCMD_AUTH_MODE_SHA512_384; 2451 mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512; 2452 partial_digest_len = SHA2_512_HASH_LEN; 2453 break; 2454 case CRYPTO_SHA2_512: 2455 case CRYPTO_SHA2_512_HMAC: 2456 auth_hash = &auth_hash_hmac_sha2_512; 2457 auth_mode = SCMD_AUTH_MODE_SHA512_512; 2458 mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512; 2459 partial_digest_len = SHA2_512_HASH_LEN; 2460 break; 2461 default: 2462 auth_hash = NULL; 2463 auth_mode = SCMD_AUTH_MODE_NOP; 2464 mk_size = 0; 2465 partial_digest_len = 0; 2466 break; 2467 } 2468 2469 cipher_mode = ccr_cipher_mode(csp); 2470 2471 #ifdef INVARIANTS 2472 switch (csp->csp_mode) { 2473 case CSP_MODE_CIPHER: 2474 if (cipher_mode == SCMD_CIPH_MODE_NOP || 2475 cipher_mode == SCMD_CIPH_MODE_AES_GCM || 2476 cipher_mode == SCMD_CIPH_MODE_AES_CCM) 2477 panic("invalid cipher algo"); 2478 break; 2479 case CSP_MODE_DIGEST: 2480 if (auth_mode == SCMD_AUTH_MODE_NOP) 2481 panic("invalid auth algo"); 2482 break; 2483 case CSP_MODE_AEAD: 2484 if (cipher_mode != SCMD_CIPH_MODE_AES_GCM && 2485 cipher_mode != SCMD_CIPH_MODE_AES_CCM) 2486 panic("invalid aead cipher algo"); 2487 if (auth_mode != SCMD_AUTH_MODE_NOP) 2488 panic("invalid aead auth aglo"); 2489 break; 2490 case CSP_MODE_ETA: 2491 if (cipher_mode == SCMD_CIPH_MODE_NOP || 2492 cipher_mode == SCMD_CIPH_MODE_AES_GCM || 2493 cipher_mode == SCMD_CIPH_MODE_AES_CCM) 2494 panic("invalid cipher algo"); 2495 if (auth_mode == SCMD_AUTH_MODE_NOP) 2496 panic("invalid auth algo"); 2497 break; 2498 default: 2499 panic("invalid csp mode"); 2500 } 2501 #endif 2502 2503 sc = device_get_softc(dev); 2504 2505 mtx_lock(&sc->lock); 2506 if (sc->detaching) { 2507 mtx_unlock(&sc->lock); 2508 return (ENXIO); 2509 } 2510 2511 s = crypto_get_driver_session(cses); 2512 s->port = ccr_choose_port(sc); 2513 if (s->port == NULL) { 2514 mtx_unlock(&sc->lock); 2515 return (ENXIO); 2516 } 2517 2518 switch (csp->csp_mode) { 2519 case CSP_MODE_AEAD: 2520 if (cipher_mode == SCMD_CIPH_MODE_AES_CCM) 2521 s->mode = CCM; 2522 else 2523 s->mode = GCM; 2524 break; 2525 case CSP_MODE_ETA: 2526 s->mode = ETA; 2527 break; 2528 case CSP_MODE_DIGEST: 2529 if (csp->csp_auth_klen != 0) 2530 s->mode = HMAC; 2531 else 2532 s->mode = HASH; 2533 break; 2534 case CSP_MODE_CIPHER: 2535 s->mode = BLKCIPHER; 2536 break; 2537 } 2538 2539 if (s->mode == GCM) { 2540 if (csp->csp_auth_mlen == 0) 2541 s->gmac.hash_len = AES_GMAC_HASH_LEN; 2542 else 2543 s->gmac.hash_len = csp->csp_auth_mlen; 2544 t4_init_gmac_hash(csp->csp_cipher_key, csp->csp_cipher_klen, 2545 s->gmac.ghash_h); 2546 } else if (s->mode == CCM) { 2547 if (csp->csp_auth_mlen == 0) 2548 s->ccm_mac.hash_len = AES_CBC_MAC_HASH_LEN; 2549 else 2550 s->ccm_mac.hash_len = csp->csp_auth_mlen; 2551 } else if (auth_mode != SCMD_AUTH_MODE_NOP) { 2552 s->hmac.auth_hash = auth_hash; 2553 s->hmac.auth_mode = auth_mode; 2554 s->hmac.mk_size = mk_size; 2555 s->hmac.partial_digest_len = partial_digest_len; 2556 if (csp->csp_auth_mlen == 0) 2557 s->hmac.hash_len = auth_hash->hashsize; 2558 else 2559 s->hmac.hash_len = csp->csp_auth_mlen; 2560 if (csp->csp_auth_key != NULL) 2561 t4_init_hmac_digest(auth_hash, partial_digest_len, 2562 csp->csp_auth_key, csp->csp_auth_klen, 2563 s->hmac.pads); 2564 else 2565 ccr_init_hash_digest(s); 2566 } 2567 if (cipher_mode != SCMD_CIPH_MODE_NOP) { 2568 s->blkcipher.cipher_mode = cipher_mode; 2569 s->blkcipher.iv_len = csp->csp_ivlen; 2570 if (csp->csp_cipher_key != NULL) 2571 ccr_aes_setkey(s, csp->csp_cipher_key, 2572 csp->csp_cipher_klen); 2573 } 2574 2575 s->active = true; 2576 s->port->active_sessions++; 2577 mtx_unlock(&sc->lock); 2578 return (0); 2579 } 2580 2581 static void 2582 ccr_freesession(device_t dev, crypto_session_t cses) 2583 { 2584 struct ccr_softc *sc; 2585 struct ccr_session *s; 2586 2587 sc = device_get_softc(dev); 2588 s = crypto_get_driver_session(cses); 2589 mtx_lock(&sc->lock); 2590 if (s->pending != 0) 2591 device_printf(dev, 2592 "session %p freed with %d pending requests\n", s, 2593 s->pending); 2594 s->active = false; 2595 s->port->active_sessions--; 2596 mtx_unlock(&sc->lock); 2597 } 2598 2599 static int 2600 ccr_process(device_t dev, struct cryptop *crp, int hint) 2601 { 2602 const struct crypto_session_params *csp; 2603 struct ccr_softc *sc; 2604 struct ccr_session *s; 2605 int error; 2606 2607 csp = crypto_get_params(crp->crp_session); 2608 s = crypto_get_driver_session(crp->crp_session); 2609 sc = device_get_softc(dev); 2610 2611 mtx_lock(&sc->lock); 2612 error = ccr_populate_sglist(sc->sg_input, &crp->crp_buf); 2613 if (error == 0 && CRYPTO_HAS_OUTPUT_BUFFER(crp)) 2614 error = ccr_populate_sglist(sc->sg_output, &crp->crp_obuf); 2615 if (error) { 2616 sc->stats_sglist_error++; 2617 goto out; 2618 } 2619 2620 switch (s->mode) { 2621 case HASH: 2622 error = ccr_hash(sc, s, crp); 2623 if (error == 0) 2624 sc->stats_hash++; 2625 break; 2626 case HMAC: 2627 if (crp->crp_auth_key != NULL) 2628 t4_init_hmac_digest(s->hmac.auth_hash, 2629 s->hmac.partial_digest_len, crp->crp_auth_key, 2630 csp->csp_auth_klen, s->hmac.pads); 2631 error = ccr_hash(sc, s, crp); 2632 if (error == 0) 2633 sc->stats_hmac++; 2634 break; 2635 case BLKCIPHER: 2636 if (crp->crp_cipher_key != NULL) 2637 ccr_aes_setkey(s, crp->crp_cipher_key, 2638 csp->csp_cipher_klen); 2639 error = ccr_blkcipher(sc, s, crp); 2640 if (error == 0) { 2641 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) 2642 sc->stats_blkcipher_encrypt++; 2643 else 2644 sc->stats_blkcipher_decrypt++; 2645 } 2646 break; 2647 case ETA: 2648 if (crp->crp_auth_key != NULL) 2649 t4_init_hmac_digest(s->hmac.auth_hash, 2650 s->hmac.partial_digest_len, crp->crp_auth_key, 2651 csp->csp_auth_klen, s->hmac.pads); 2652 if (crp->crp_cipher_key != NULL) 2653 ccr_aes_setkey(s, crp->crp_cipher_key, 2654 csp->csp_cipher_klen); 2655 error = ccr_eta(sc, s, crp); 2656 if (error == 0) { 2657 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) 2658 sc->stats_eta_encrypt++; 2659 else 2660 sc->stats_eta_decrypt++; 2661 } 2662 break; 2663 case GCM: 2664 if (crp->crp_cipher_key != NULL) { 2665 t4_init_gmac_hash(crp->crp_cipher_key, 2666 csp->csp_cipher_klen, s->gmac.ghash_h); 2667 ccr_aes_setkey(s, crp->crp_cipher_key, 2668 csp->csp_cipher_klen); 2669 } 2670 if (crp->crp_payload_length == 0) { 2671 mtx_unlock(&sc->lock); 2672 ccr_gcm_soft(s, crp); 2673 return (0); 2674 } 2675 error = ccr_gcm(sc, s, crp); 2676 if (error == EMSGSIZE) { 2677 sc->stats_sw_fallback++; 2678 mtx_unlock(&sc->lock); 2679 ccr_gcm_soft(s, crp); 2680 return (0); 2681 } 2682 if (error == 0) { 2683 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) 2684 sc->stats_gcm_encrypt++; 2685 else 2686 sc->stats_gcm_decrypt++; 2687 } 2688 break; 2689 case CCM: 2690 if (crp->crp_cipher_key != NULL) { 2691 ccr_aes_setkey(s, crp->crp_cipher_key, 2692 csp->csp_cipher_klen); 2693 } 2694 error = ccr_ccm(sc, s, crp); 2695 if (error == EMSGSIZE) { 2696 sc->stats_sw_fallback++; 2697 mtx_unlock(&sc->lock); 2698 ccr_ccm_soft(s, crp); 2699 return (0); 2700 } 2701 if (error == 0) { 2702 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) 2703 sc->stats_ccm_encrypt++; 2704 else 2705 sc->stats_ccm_decrypt++; 2706 } 2707 break; 2708 } 2709 2710 if (error == 0) { 2711 s->pending++; 2712 sc->stats_inflight++; 2713 } else 2714 sc->stats_process_error++; 2715 2716 out: 2717 mtx_unlock(&sc->lock); 2718 2719 if (error) { 2720 crp->crp_etype = error; 2721 crypto_done(crp); 2722 } 2723 2724 return (0); 2725 } 2726 2727 static int 2728 do_cpl6_fw_pld(struct sge_iq *iq, const struct rss_header *rss, 2729 struct mbuf *m) 2730 { 2731 struct ccr_softc *sc = iq->adapter->ccr_softc; 2732 struct ccr_session *s; 2733 const struct cpl_fw6_pld *cpl; 2734 struct cryptop *crp; 2735 uint32_t status; 2736 int error; 2737 2738 if (m != NULL) 2739 cpl = mtod(m, const void *); 2740 else 2741 cpl = (const void *)(rss + 1); 2742 2743 crp = (struct cryptop *)(uintptr_t)be64toh(cpl->data[1]); 2744 s = crypto_get_driver_session(crp->crp_session); 2745 status = be64toh(cpl->data[0]); 2746 if (CHK_MAC_ERR_BIT(status) || CHK_PAD_ERR_BIT(status)) 2747 error = EBADMSG; 2748 else 2749 error = 0; 2750 2751 mtx_lock(&sc->lock); 2752 s->pending--; 2753 sc->stats_inflight--; 2754 2755 switch (s->mode) { 2756 case HASH: 2757 case HMAC: 2758 error = ccr_hash_done(sc, s, crp, cpl, error); 2759 break; 2760 case BLKCIPHER: 2761 error = ccr_blkcipher_done(sc, s, crp, cpl, error); 2762 break; 2763 case ETA: 2764 error = ccr_eta_done(sc, s, crp, cpl, error); 2765 break; 2766 case GCM: 2767 error = ccr_gcm_done(sc, s, crp, cpl, error); 2768 break; 2769 case CCM: 2770 error = ccr_ccm_done(sc, s, crp, cpl, error); 2771 break; 2772 } 2773 2774 if (error == EBADMSG) { 2775 if (CHK_MAC_ERR_BIT(status)) 2776 sc->stats_mac_error++; 2777 if (CHK_PAD_ERR_BIT(status)) 2778 sc->stats_pad_error++; 2779 } 2780 mtx_unlock(&sc->lock); 2781 crp->crp_etype = error; 2782 crypto_done(crp); 2783 m_freem(m); 2784 return (0); 2785 } 2786 2787 static int 2788 ccr_modevent(module_t mod, int cmd, void *arg) 2789 { 2790 2791 switch (cmd) { 2792 case MOD_LOAD: 2793 t4_register_cpl_handler(CPL_FW6_PLD, do_cpl6_fw_pld); 2794 return (0); 2795 case MOD_UNLOAD: 2796 t4_register_cpl_handler(CPL_FW6_PLD, NULL); 2797 return (0); 2798 default: 2799 return (EOPNOTSUPP); 2800 } 2801 } 2802 2803 static device_method_t ccr_methods[] = { 2804 DEVMETHOD(device_identify, ccr_identify), 2805 DEVMETHOD(device_probe, ccr_probe), 2806 DEVMETHOD(device_attach, ccr_attach), 2807 DEVMETHOD(device_detach, ccr_detach), 2808 2809 DEVMETHOD(cryptodev_probesession, ccr_probesession), 2810 DEVMETHOD(cryptodev_newsession, ccr_newsession), 2811 DEVMETHOD(cryptodev_freesession, ccr_freesession), 2812 DEVMETHOD(cryptodev_process, ccr_process), 2813 2814 DEVMETHOD_END 2815 }; 2816 2817 static driver_t ccr_driver = { 2818 "ccr", 2819 ccr_methods, 2820 sizeof(struct ccr_softc) 2821 }; 2822 2823 static devclass_t ccr_devclass; 2824 2825 DRIVER_MODULE(ccr, t6nex, ccr_driver, ccr_devclass, ccr_modevent, NULL); 2826 MODULE_VERSION(ccr, 1); 2827 MODULE_DEPEND(ccr, crypto, 1, 1, 1); 2828 MODULE_DEPEND(ccr, t6nex, 1, 1, 1); 2829