1 /*- 2 * Copyright (c) 2017 Chelsio Communications, Inc. 3 * All rights reserved. 4 * Written by: John Baldwin <jhb@FreeBSD.org> 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28 #include <sys/cdefs.h> 29 __FBSDID("$FreeBSD$"); 30 31 #include <sys/types.h> 32 #include <sys/bus.h> 33 #include <sys/lock.h> 34 #include <sys/malloc.h> 35 #include <sys/mutex.h> 36 #include <sys/module.h> 37 #include <sys/sglist.h> 38 39 #include <opencrypto/cryptodev.h> 40 #include <opencrypto/xform.h> 41 42 #include "cryptodev_if.h" 43 44 #include "common/common.h" 45 #include "crypto/t4_crypto.h" 46 47 /* 48 * Requests consist of: 49 * 50 * +-------------------------------+ 51 * | struct fw_crypto_lookaside_wr | 52 * +-------------------------------+ 53 * | struct ulp_txpkt | 54 * +-------------------------------+ 55 * | struct ulptx_idata | 56 * +-------------------------------+ 57 * | struct cpl_tx_sec_pdu | 58 * +-------------------------------+ 59 * | struct cpl_tls_tx_scmd_fmt | 60 * +-------------------------------+ 61 * | key context header | 62 * +-------------------------------+ 63 * | AES key | ----- For requests with AES 64 * +-------------------------------+ 65 * | Hash state | ----- For hash-only requests 66 * +-------------------------------+ - 67 * | IPAD (16-byte aligned) | \ 68 * +-------------------------------+ +---- For requests with HMAC 69 * | OPAD (16-byte aligned) | / 70 * +-------------------------------+ - 71 * | GMAC H | ----- For AES-GCM 72 * +-------------------------------+ - 73 * | struct cpl_rx_phys_dsgl | \ 74 * +-------------------------------+ +---- Destination buffer for 75 * | PHYS_DSGL entries | / non-hash-only requests 76 * +-------------------------------+ - 77 * | 16 dummy bytes | ----- Only for HMAC/hash-only requests 78 * +-------------------------------+ 79 * | IV | ----- If immediate IV 80 * +-------------------------------+ 81 * | Payload | ----- If immediate Payload 82 * +-------------------------------+ - 83 * | struct ulptx_sgl | \ 84 * +-------------------------------+ +---- If payload via SGL 85 * | SGL entries | / 86 * +-------------------------------+ - 87 * 88 * Note that the key context must be padded to ensure 16-byte alignment. 89 * For HMAC requests, the key consists of the partial hash of the IPAD 90 * followed by the partial hash of the OPAD. 91 * 92 * Replies consist of: 93 * 94 * +-------------------------------+ 95 * | struct cpl_fw6_pld | 96 * +-------------------------------+ 97 * | hash digest | ----- For HMAC request with 98 * +-------------------------------+ 'hash_size' set in work request 99 * 100 * A 32-bit big-endian error status word is supplied in the last 4 101 * bytes of data[0] in the CPL_FW6_PLD message. bit 0 indicates a 102 * "MAC" error and bit 1 indicates a "PAD" error. 103 * 104 * The 64-bit 'cookie' field from the fw_crypto_lookaside_wr message 105 * in the request is returned in data[1] of the CPL_FW6_PLD message. 106 * 107 * For block cipher replies, the updated IV is supplied in data[2] and 108 * data[3] of the CPL_FW6_PLD message. 109 * 110 * For hash replies where the work request set 'hash_size' to request 111 * a copy of the hash in the reply, the hash digest is supplied 112 * immediately following the CPL_FW6_PLD message. 113 */ 114 115 /* 116 * The crypto engine supports a maximum AAD size of 511 bytes. 117 */ 118 #define MAX_AAD_LEN 511 119 120 /* 121 * The documentation for CPL_RX_PHYS_DSGL claims a maximum of 32 SG 122 * entries. While the CPL includes a 16-bit length field, the T6 can 123 * sometimes hang if an error occurs while processing a request with a 124 * single DSGL entry larger than 2k. 125 */ 126 #define MAX_RX_PHYS_DSGL_SGE 32 127 #define DSGL_SGE_MAXLEN 2048 128 129 /* 130 * The adapter only supports requests with a total input or output 131 * length of 64k-1 or smaller. Longer requests either result in hung 132 * requests or incorrect results. 133 */ 134 #define MAX_REQUEST_SIZE 65535 135 136 static MALLOC_DEFINE(M_CCR, "ccr", "Chelsio T6 crypto"); 137 138 struct ccr_session_hmac { 139 struct auth_hash *auth_hash; 140 int hash_len; 141 unsigned int partial_digest_len; 142 unsigned int auth_mode; 143 unsigned int mk_size; 144 char pads[CHCR_HASH_MAX_BLOCK_SIZE_128 * 2]; 145 }; 146 147 struct ccr_session_gmac { 148 int hash_len; 149 char ghash_h[GMAC_BLOCK_LEN]; 150 }; 151 152 struct ccr_session_ccm_mac { 153 int hash_len; 154 }; 155 156 struct ccr_session_blkcipher { 157 unsigned int cipher_mode; 158 unsigned int key_len; 159 unsigned int iv_len; 160 __be32 key_ctx_hdr; 161 char enckey[CHCR_AES_MAX_KEY_LEN]; 162 char deckey[CHCR_AES_MAX_KEY_LEN]; 163 }; 164 165 struct ccr_port { 166 struct sge_wrq *txq; 167 struct sge_rxq *rxq; 168 int tx_channel_id; 169 u_int active_sessions; 170 }; 171 172 struct ccr_session { 173 #ifdef INVARIANTS 174 int pending; 175 #endif 176 enum { HASH, HMAC, BLKCIPHER, ETA, GCM, CCM } mode; 177 struct ccr_port *port; 178 union { 179 struct ccr_session_hmac hmac; 180 struct ccr_session_gmac gmac; 181 struct ccr_session_ccm_mac ccm_mac; 182 }; 183 struct ccr_session_blkcipher blkcipher; 184 struct mtx lock; 185 186 /* 187 * Pre-allocate S/G lists used when preparing a work request. 188 * 'sg_input' contains an sglist describing the entire input 189 * buffer for a 'struct cryptop'. 'sg_output' contains an 190 * sglist describing the entire output buffer. 'sg_ulptx' is 191 * used to describe the data the engine should DMA as input 192 * via ULPTX_SGL. 'sg_dsgl' is used to describe the 193 * destination that cipher text and a tag should be written 194 * to. 195 */ 196 struct sglist *sg_input; 197 struct sglist *sg_output; 198 struct sglist *sg_ulptx; 199 struct sglist *sg_dsgl; 200 }; 201 202 struct ccr_softc { 203 struct adapter *adapter; 204 device_t dev; 205 uint32_t cid; 206 struct mtx lock; 207 bool detaching; 208 struct ccr_port ports[MAX_NPORTS]; 209 u_int port_mask; 210 211 /* 212 * Pre-allocate a dummy output buffer for the IV and AAD for 213 * AEAD requests. 214 */ 215 char *iv_aad_buf; 216 struct sglist *sg_iv_aad; 217 218 /* Statistics. */ 219 counter_u64_t stats_blkcipher_encrypt; 220 counter_u64_t stats_blkcipher_decrypt; 221 counter_u64_t stats_hash; 222 counter_u64_t stats_hmac; 223 counter_u64_t stats_eta_encrypt; 224 counter_u64_t stats_eta_decrypt; 225 counter_u64_t stats_gcm_encrypt; 226 counter_u64_t stats_gcm_decrypt; 227 counter_u64_t stats_ccm_encrypt; 228 counter_u64_t stats_ccm_decrypt; 229 counter_u64_t stats_wr_nomem; 230 counter_u64_t stats_inflight; 231 counter_u64_t stats_mac_error; 232 counter_u64_t stats_pad_error; 233 counter_u64_t stats_sglist_error; 234 counter_u64_t stats_process_error; 235 counter_u64_t stats_sw_fallback; 236 }; 237 238 /* 239 * Crypto requests involve two kind of scatter/gather lists. 240 * 241 * Non-hash-only requests require a PHYS_DSGL that describes the 242 * location to store the results of the encryption or decryption 243 * operation. This SGL uses a different format (PHYS_DSGL) and should 244 * exclude the skip bytes at the start of the data as well as any AAD 245 * or IV. For authenticated encryption requests it should include the 246 * destination of the hash or tag. 247 * 248 * The input payload may either be supplied inline as immediate data, 249 * or via a standard ULP_TX SGL. This SGL should include AAD, 250 * ciphertext, and the hash or tag for authenticated decryption 251 * requests. 252 * 253 * These scatter/gather lists can describe different subsets of the 254 * buffers described by the crypto operation. ccr_populate_sglist() 255 * generates a scatter/gather list that covers an entire crypto 256 * operation buffer that is then used to construct the other 257 * scatter/gather lists. 258 */ 259 static int 260 ccr_populate_sglist(struct sglist *sg, struct crypto_buffer *cb) 261 { 262 int error; 263 264 sglist_reset(sg); 265 switch (cb->cb_type) { 266 case CRYPTO_BUF_MBUF: 267 error = sglist_append_mbuf(sg, cb->cb_mbuf); 268 break; 269 case CRYPTO_BUF_UIO: 270 error = sglist_append_uio(sg, cb->cb_uio); 271 break; 272 case CRYPTO_BUF_CONTIG: 273 error = sglist_append(sg, cb->cb_buf, cb->cb_buf_len); 274 break; 275 case CRYPTO_BUF_VMPAGE: 276 error = sglist_append_vmpages(sg, cb->cb_vm_page, 277 cb->cb_vm_page_len, cb->cb_vm_page_offset); 278 break; 279 default: 280 error = EINVAL; 281 } 282 return (error); 283 } 284 285 /* 286 * Segments in 'sg' larger than 'maxsegsize' are counted as multiple 287 * segments. 288 */ 289 static int 290 ccr_count_sgl(struct sglist *sg, int maxsegsize) 291 { 292 int i, nsegs; 293 294 nsegs = 0; 295 for (i = 0; i < sg->sg_nseg; i++) 296 nsegs += howmany(sg->sg_segs[i].ss_len, maxsegsize); 297 return (nsegs); 298 } 299 300 /* These functions deal with PHYS_DSGL for the reply buffer. */ 301 static inline int 302 ccr_phys_dsgl_len(int nsegs) 303 { 304 int len; 305 306 len = (nsegs / 8) * sizeof(struct phys_sge_pairs); 307 if ((nsegs % 8) != 0) { 308 len += sizeof(uint16_t) * 8; 309 len += roundup2(nsegs % 8, 2) * sizeof(uint64_t); 310 } 311 return (len); 312 } 313 314 static void 315 ccr_write_phys_dsgl(struct ccr_session *s, void *dst, int nsegs) 316 { 317 struct sglist *sg; 318 struct cpl_rx_phys_dsgl *cpl; 319 struct phys_sge_pairs *sgl; 320 vm_paddr_t paddr; 321 size_t seglen; 322 u_int i, j; 323 324 sg = s->sg_dsgl; 325 cpl = dst; 326 cpl->op_to_tid = htobe32(V_CPL_RX_PHYS_DSGL_OPCODE(CPL_RX_PHYS_DSGL) | 327 V_CPL_RX_PHYS_DSGL_ISRDMA(0)); 328 cpl->pcirlxorder_to_noofsgentr = htobe32( 329 V_CPL_RX_PHYS_DSGL_PCIRLXORDER(0) | 330 V_CPL_RX_PHYS_DSGL_PCINOSNOOP(0) | 331 V_CPL_RX_PHYS_DSGL_PCITPHNTENB(0) | V_CPL_RX_PHYS_DSGL_DCAID(0) | 332 V_CPL_RX_PHYS_DSGL_NOOFSGENTR(nsegs)); 333 cpl->rss_hdr_int.opcode = CPL_RX_PHYS_ADDR; 334 cpl->rss_hdr_int.qid = htobe16(s->port->rxq->iq.abs_id); 335 cpl->rss_hdr_int.hash_val = 0; 336 sgl = (struct phys_sge_pairs *)(cpl + 1); 337 j = 0; 338 for (i = 0; i < sg->sg_nseg; i++) { 339 seglen = sg->sg_segs[i].ss_len; 340 paddr = sg->sg_segs[i].ss_paddr; 341 do { 342 sgl->addr[j] = htobe64(paddr); 343 if (seglen > DSGL_SGE_MAXLEN) { 344 sgl->len[j] = htobe16(DSGL_SGE_MAXLEN); 345 paddr += DSGL_SGE_MAXLEN; 346 seglen -= DSGL_SGE_MAXLEN; 347 } else { 348 sgl->len[j] = htobe16(seglen); 349 seglen = 0; 350 } 351 j++; 352 if (j == 8) { 353 sgl++; 354 j = 0; 355 } 356 } while (seglen != 0); 357 } 358 MPASS(j + 8 * (sgl - (struct phys_sge_pairs *)(cpl + 1)) == nsegs); 359 } 360 361 /* These functions deal with the ULPTX_SGL for input payload. */ 362 static inline int 363 ccr_ulptx_sgl_len(int nsegs) 364 { 365 u_int n; 366 367 nsegs--; /* first segment is part of ulptx_sgl */ 368 n = sizeof(struct ulptx_sgl) + 8 * ((3 * nsegs) / 2 + (nsegs & 1)); 369 return (roundup2(n, 16)); 370 } 371 372 static void 373 ccr_write_ulptx_sgl(struct ccr_session *s, void *dst, int nsegs) 374 { 375 struct ulptx_sgl *usgl; 376 struct sglist *sg; 377 struct sglist_seg *ss; 378 int i; 379 380 sg = s->sg_ulptx; 381 MPASS(nsegs == sg->sg_nseg); 382 ss = &sg->sg_segs[0]; 383 usgl = dst; 384 usgl->cmd_nsge = htobe32(V_ULPTX_CMD(ULP_TX_SC_DSGL) | 385 V_ULPTX_NSGE(nsegs)); 386 usgl->len0 = htobe32(ss->ss_len); 387 usgl->addr0 = htobe64(ss->ss_paddr); 388 ss++; 389 for (i = 0; i < sg->sg_nseg - 1; i++) { 390 usgl->sge[i / 2].len[i & 1] = htobe32(ss->ss_len); 391 usgl->sge[i / 2].addr[i & 1] = htobe64(ss->ss_paddr); 392 ss++; 393 } 394 } 395 396 static bool 397 ccr_use_imm_data(u_int transhdr_len, u_int input_len) 398 { 399 400 if (input_len > CRYPTO_MAX_IMM_TX_PKT_LEN) 401 return (false); 402 if (roundup2(transhdr_len, 16) + roundup2(input_len, 16) > 403 SGE_MAX_WR_LEN) 404 return (false); 405 return (true); 406 } 407 408 static void 409 ccr_populate_wreq(struct ccr_softc *sc, struct ccr_session *s, 410 struct chcr_wr *crwr, u_int kctx_len, u_int wr_len, u_int imm_len, 411 u_int sgl_len, u_int hash_size, struct cryptop *crp) 412 { 413 u_int cctx_size, idata_len; 414 415 cctx_size = sizeof(struct _key_ctx) + kctx_len; 416 crwr->wreq.op_to_cctx_size = htobe32( 417 V_FW_CRYPTO_LOOKASIDE_WR_OPCODE(FW_CRYPTO_LOOKASIDE_WR) | 418 V_FW_CRYPTO_LOOKASIDE_WR_COMPL(0) | 419 V_FW_CRYPTO_LOOKASIDE_WR_IMM_LEN(imm_len) | 420 V_FW_CRYPTO_LOOKASIDE_WR_CCTX_LOC(1) | 421 V_FW_CRYPTO_LOOKASIDE_WR_CCTX_SIZE(cctx_size >> 4)); 422 crwr->wreq.len16_pkd = htobe32( 423 V_FW_CRYPTO_LOOKASIDE_WR_LEN16(wr_len / 16)); 424 crwr->wreq.session_id = 0; 425 crwr->wreq.rx_chid_to_rx_q_id = htobe32( 426 V_FW_CRYPTO_LOOKASIDE_WR_RX_CHID(s->port->tx_channel_id) | 427 V_FW_CRYPTO_LOOKASIDE_WR_LCB(0) | 428 V_FW_CRYPTO_LOOKASIDE_WR_PHASH(0) | 429 V_FW_CRYPTO_LOOKASIDE_WR_IV(IV_NOP) | 430 V_FW_CRYPTO_LOOKASIDE_WR_FQIDX(0) | 431 V_FW_CRYPTO_LOOKASIDE_WR_TX_CH(0) | 432 V_FW_CRYPTO_LOOKASIDE_WR_RX_Q_ID(s->port->rxq->iq.abs_id)); 433 crwr->wreq.key_addr = 0; 434 crwr->wreq.pld_size_hash_size = htobe32( 435 V_FW_CRYPTO_LOOKASIDE_WR_PLD_SIZE(sgl_len) | 436 V_FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE(hash_size)); 437 crwr->wreq.cookie = htobe64((uintptr_t)crp); 438 439 crwr->ulptx.cmd_dest = htobe32(V_ULPTX_CMD(ULP_TX_PKT) | 440 V_ULP_TXPKT_DATAMODIFY(0) | 441 V_ULP_TXPKT_CHANNELID(s->port->tx_channel_id) | 442 V_ULP_TXPKT_DEST(0) | 443 V_ULP_TXPKT_FID(s->port->rxq->iq.abs_id) | V_ULP_TXPKT_RO(1)); 444 crwr->ulptx.len = htobe32( 445 ((wr_len - sizeof(struct fw_crypto_lookaside_wr)) / 16)); 446 447 crwr->sc_imm.cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM) | 448 V_ULP_TX_SC_MORE(sgl_len != 0 ? 1 : 0)); 449 idata_len = wr_len - offsetof(struct chcr_wr, sec_cpl) - sgl_len; 450 if (imm_len % 16 != 0) 451 idata_len -= 16 - imm_len % 16; 452 crwr->sc_imm.len = htobe32(idata_len); 453 } 454 455 static int 456 ccr_hash(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp) 457 { 458 struct chcr_wr *crwr; 459 struct wrqe *wr; 460 struct auth_hash *axf; 461 char *dst; 462 u_int hash_size_in_response, kctx_flits, kctx_len, transhdr_len, wr_len; 463 u_int hmac_ctrl, imm_len, iopad_size; 464 int error, sgl_nsegs, sgl_len, use_opad; 465 466 /* Reject requests with too large of an input buffer. */ 467 if (crp->crp_payload_length > MAX_REQUEST_SIZE) 468 return (EFBIG); 469 470 axf = s->hmac.auth_hash; 471 472 if (s->mode == HMAC) { 473 use_opad = 1; 474 hmac_ctrl = SCMD_HMAC_CTRL_NO_TRUNC; 475 } else { 476 use_opad = 0; 477 hmac_ctrl = SCMD_HMAC_CTRL_NOP; 478 } 479 480 /* PADs must be 128-bit aligned. */ 481 iopad_size = roundup2(s->hmac.partial_digest_len, 16); 482 483 /* 484 * The 'key' part of the context includes the aligned IPAD and 485 * OPAD. 486 */ 487 kctx_len = iopad_size; 488 if (use_opad) 489 kctx_len += iopad_size; 490 hash_size_in_response = axf->hashsize; 491 transhdr_len = HASH_TRANSHDR_SIZE(kctx_len); 492 493 if (crp->crp_payload_length == 0) { 494 imm_len = axf->blocksize; 495 sgl_nsegs = 0; 496 sgl_len = 0; 497 } else if (ccr_use_imm_data(transhdr_len, crp->crp_payload_length)) { 498 imm_len = crp->crp_payload_length; 499 sgl_nsegs = 0; 500 sgl_len = 0; 501 } else { 502 imm_len = 0; 503 sglist_reset(s->sg_ulptx); 504 error = sglist_append_sglist(s->sg_ulptx, s->sg_input, 505 crp->crp_payload_start, crp->crp_payload_length); 506 if (error) 507 return (error); 508 sgl_nsegs = s->sg_ulptx->sg_nseg; 509 sgl_len = ccr_ulptx_sgl_len(sgl_nsegs); 510 } 511 512 wr_len = roundup2(transhdr_len, 16) + roundup2(imm_len, 16) + sgl_len; 513 if (wr_len > SGE_MAX_WR_LEN) 514 return (EFBIG); 515 wr = alloc_wrqe(wr_len, s->port->txq); 516 if (wr == NULL) { 517 counter_u64_add(sc->stats_wr_nomem, 1); 518 return (ENOMEM); 519 } 520 crwr = wrtod(wr); 521 memset(crwr, 0, wr_len); 522 523 ccr_populate_wreq(sc, s, crwr, kctx_len, wr_len, imm_len, sgl_len, 524 hash_size_in_response, crp); 525 526 crwr->sec_cpl.op_ivinsrtofst = htobe32( 527 V_CPL_TX_SEC_PDU_OPCODE(CPL_TX_SEC_PDU) | 528 V_CPL_TX_SEC_PDU_RXCHID(s->port->tx_channel_id) | 529 V_CPL_TX_SEC_PDU_ACKFOLLOWS(0) | V_CPL_TX_SEC_PDU_ULPTXLPBK(1) | 530 V_CPL_TX_SEC_PDU_CPLLEN(2) | V_CPL_TX_SEC_PDU_PLACEHOLDER(0) | 531 V_CPL_TX_SEC_PDU_IVINSRTOFST(0)); 532 533 crwr->sec_cpl.pldlen = htobe32(crp->crp_payload_length == 0 ? 534 axf->blocksize : crp->crp_payload_length); 535 536 crwr->sec_cpl.cipherstop_lo_authinsert = htobe32( 537 V_CPL_TX_SEC_PDU_AUTHSTART(1) | V_CPL_TX_SEC_PDU_AUTHSTOP(0)); 538 539 /* These two flits are actually a CPL_TLS_TX_SCMD_FMT. */ 540 crwr->sec_cpl.seqno_numivs = htobe32( 541 V_SCMD_SEQ_NO_CTRL(0) | 542 V_SCMD_PROTO_VERSION(SCMD_PROTO_VERSION_GENERIC) | 543 V_SCMD_CIPH_MODE(SCMD_CIPH_MODE_NOP) | 544 V_SCMD_AUTH_MODE(s->hmac.auth_mode) | 545 V_SCMD_HMAC_CTRL(hmac_ctrl)); 546 crwr->sec_cpl.ivgen_hdrlen = htobe32( 547 V_SCMD_LAST_FRAG(0) | 548 V_SCMD_MORE_FRAGS(crp->crp_payload_length == 0 ? 1 : 0) | 549 V_SCMD_MAC_ONLY(1)); 550 551 memcpy(crwr->key_ctx.key, s->hmac.pads, kctx_len); 552 553 /* XXX: F_KEY_CONTEXT_SALT_PRESENT set, but 'salt' not set. */ 554 kctx_flits = (sizeof(struct _key_ctx) + kctx_len) / 16; 555 crwr->key_ctx.ctx_hdr = htobe32(V_KEY_CONTEXT_CTX_LEN(kctx_flits) | 556 V_KEY_CONTEXT_OPAD_PRESENT(use_opad) | 557 V_KEY_CONTEXT_SALT_PRESENT(1) | 558 V_KEY_CONTEXT_CK_SIZE(CHCR_KEYCTX_NO_KEY) | 559 V_KEY_CONTEXT_MK_SIZE(s->hmac.mk_size) | V_KEY_CONTEXT_VALID(1)); 560 561 dst = (char *)(crwr + 1) + kctx_len + DUMMY_BYTES; 562 if (crp->crp_payload_length == 0) { 563 dst[0] = 0x80; 564 if (s->mode == HMAC) 565 *(uint64_t *)(dst + axf->blocksize - sizeof(uint64_t)) = 566 htobe64(axf->blocksize << 3); 567 } else if (imm_len != 0) 568 crypto_copydata(crp, crp->crp_payload_start, 569 crp->crp_payload_length, dst); 570 else 571 ccr_write_ulptx_sgl(s, dst, sgl_nsegs); 572 573 /* XXX: TODO backpressure */ 574 t4_wrq_tx(sc->adapter, wr); 575 576 return (0); 577 } 578 579 static int 580 ccr_hash_done(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp, 581 const struct cpl_fw6_pld *cpl, int error) 582 { 583 uint8_t hash[HASH_MAX_LEN]; 584 585 if (error) 586 return (error); 587 588 if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) { 589 crypto_copydata(crp, crp->crp_digest_start, s->hmac.hash_len, 590 hash); 591 if (timingsafe_bcmp((cpl + 1), hash, s->hmac.hash_len) != 0) 592 return (EBADMSG); 593 } else 594 crypto_copyback(crp, crp->crp_digest_start, s->hmac.hash_len, 595 (cpl + 1)); 596 return (0); 597 } 598 599 static int 600 ccr_blkcipher(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp) 601 { 602 char iv[CHCR_MAX_CRYPTO_IV_LEN]; 603 struct chcr_wr *crwr; 604 struct wrqe *wr; 605 char *dst; 606 u_int kctx_len, key_half, op_type, transhdr_len, wr_len; 607 u_int imm_len, iv_len; 608 int dsgl_nsegs, dsgl_len; 609 int sgl_nsegs, sgl_len; 610 int error; 611 612 if (s->blkcipher.key_len == 0 || crp->crp_payload_length == 0) 613 return (EINVAL); 614 if (s->blkcipher.cipher_mode == SCMD_CIPH_MODE_AES_CBC && 615 (crp->crp_payload_length % AES_BLOCK_LEN) != 0) 616 return (EINVAL); 617 618 /* Reject requests with too large of an input buffer. */ 619 if (crp->crp_payload_length > MAX_REQUEST_SIZE) 620 return (EFBIG); 621 622 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) 623 op_type = CHCR_ENCRYPT_OP; 624 else 625 op_type = CHCR_DECRYPT_OP; 626 627 sglist_reset(s->sg_dsgl); 628 if (CRYPTO_HAS_OUTPUT_BUFFER(crp)) 629 error = sglist_append_sglist(s->sg_dsgl, s->sg_output, 630 crp->crp_payload_output_start, crp->crp_payload_length); 631 else 632 error = sglist_append_sglist(s->sg_dsgl, s->sg_input, 633 crp->crp_payload_start, crp->crp_payload_length); 634 if (error) 635 return (error); 636 dsgl_nsegs = ccr_count_sgl(s->sg_dsgl, DSGL_SGE_MAXLEN); 637 if (dsgl_nsegs > MAX_RX_PHYS_DSGL_SGE) 638 return (EFBIG); 639 dsgl_len = ccr_phys_dsgl_len(dsgl_nsegs); 640 641 /* The 'key' must be 128-bit aligned. */ 642 kctx_len = roundup2(s->blkcipher.key_len, 16); 643 transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dsgl_len); 644 645 /* For AES-XTS we send a 16-byte IV in the work request. */ 646 if (s->blkcipher.cipher_mode == SCMD_CIPH_MODE_AES_XTS) 647 iv_len = AES_BLOCK_LEN; 648 else 649 iv_len = s->blkcipher.iv_len; 650 651 if (ccr_use_imm_data(transhdr_len, crp->crp_payload_length + iv_len)) { 652 imm_len = crp->crp_payload_length; 653 sgl_nsegs = 0; 654 sgl_len = 0; 655 } else { 656 imm_len = 0; 657 sglist_reset(s->sg_ulptx); 658 error = sglist_append_sglist(s->sg_ulptx, s->sg_input, 659 crp->crp_payload_start, crp->crp_payload_length); 660 if (error) 661 return (error); 662 sgl_nsegs = s->sg_ulptx->sg_nseg; 663 sgl_len = ccr_ulptx_sgl_len(sgl_nsegs); 664 } 665 666 wr_len = roundup2(transhdr_len, 16) + iv_len + 667 roundup2(imm_len, 16) + sgl_len; 668 if (wr_len > SGE_MAX_WR_LEN) 669 return (EFBIG); 670 wr = alloc_wrqe(wr_len, s->port->txq); 671 if (wr == NULL) { 672 counter_u64_add(sc->stats_wr_nomem, 1); 673 return (ENOMEM); 674 } 675 crwr = wrtod(wr); 676 memset(crwr, 0, wr_len); 677 678 crypto_read_iv(crp, iv); 679 680 /* Zero the remainder of the IV for AES-XTS. */ 681 memset(iv + s->blkcipher.iv_len, 0, iv_len - s->blkcipher.iv_len); 682 683 ccr_populate_wreq(sc, s, crwr, kctx_len, wr_len, imm_len, sgl_len, 0, 684 crp); 685 686 crwr->sec_cpl.op_ivinsrtofst = htobe32( 687 V_CPL_TX_SEC_PDU_OPCODE(CPL_TX_SEC_PDU) | 688 V_CPL_TX_SEC_PDU_RXCHID(s->port->tx_channel_id) | 689 V_CPL_TX_SEC_PDU_ACKFOLLOWS(0) | V_CPL_TX_SEC_PDU_ULPTXLPBK(1) | 690 V_CPL_TX_SEC_PDU_CPLLEN(2) | V_CPL_TX_SEC_PDU_PLACEHOLDER(0) | 691 V_CPL_TX_SEC_PDU_IVINSRTOFST(1)); 692 693 crwr->sec_cpl.pldlen = htobe32(iv_len + crp->crp_payload_length); 694 695 crwr->sec_cpl.aadstart_cipherstop_hi = htobe32( 696 V_CPL_TX_SEC_PDU_CIPHERSTART(iv_len + 1) | 697 V_CPL_TX_SEC_PDU_CIPHERSTOP_HI(0)); 698 crwr->sec_cpl.cipherstop_lo_authinsert = htobe32( 699 V_CPL_TX_SEC_PDU_CIPHERSTOP_LO(0)); 700 701 /* These two flits are actually a CPL_TLS_TX_SCMD_FMT. */ 702 crwr->sec_cpl.seqno_numivs = htobe32( 703 V_SCMD_SEQ_NO_CTRL(0) | 704 V_SCMD_PROTO_VERSION(SCMD_PROTO_VERSION_GENERIC) | 705 V_SCMD_ENC_DEC_CTRL(op_type) | 706 V_SCMD_CIPH_MODE(s->blkcipher.cipher_mode) | 707 V_SCMD_AUTH_MODE(SCMD_AUTH_MODE_NOP) | 708 V_SCMD_HMAC_CTRL(SCMD_HMAC_CTRL_NOP) | 709 V_SCMD_IV_SIZE(iv_len / 2) | 710 V_SCMD_NUM_IVS(0)); 711 crwr->sec_cpl.ivgen_hdrlen = htobe32( 712 V_SCMD_IV_GEN_CTRL(0) | 713 V_SCMD_MORE_FRAGS(0) | V_SCMD_LAST_FRAG(0) | V_SCMD_MAC_ONLY(0) | 714 V_SCMD_AADIVDROP(1) | V_SCMD_HDR_LEN(dsgl_len)); 715 716 crwr->key_ctx.ctx_hdr = s->blkcipher.key_ctx_hdr; 717 switch (s->blkcipher.cipher_mode) { 718 case SCMD_CIPH_MODE_AES_CBC: 719 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) 720 memcpy(crwr->key_ctx.key, s->blkcipher.enckey, 721 s->blkcipher.key_len); 722 else 723 memcpy(crwr->key_ctx.key, s->blkcipher.deckey, 724 s->blkcipher.key_len); 725 break; 726 case SCMD_CIPH_MODE_AES_CTR: 727 memcpy(crwr->key_ctx.key, s->blkcipher.enckey, 728 s->blkcipher.key_len); 729 break; 730 case SCMD_CIPH_MODE_AES_XTS: 731 key_half = s->blkcipher.key_len / 2; 732 memcpy(crwr->key_ctx.key, s->blkcipher.enckey + key_half, 733 key_half); 734 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) 735 memcpy(crwr->key_ctx.key + key_half, 736 s->blkcipher.enckey, key_half); 737 else 738 memcpy(crwr->key_ctx.key + key_half, 739 s->blkcipher.deckey, key_half); 740 break; 741 } 742 743 dst = (char *)(crwr + 1) + kctx_len; 744 ccr_write_phys_dsgl(s, dst, dsgl_nsegs); 745 dst += sizeof(struct cpl_rx_phys_dsgl) + dsgl_len; 746 memcpy(dst, iv, iv_len); 747 dst += iv_len; 748 if (imm_len != 0) 749 crypto_copydata(crp, crp->crp_payload_start, 750 crp->crp_payload_length, dst); 751 else 752 ccr_write_ulptx_sgl(s, dst, sgl_nsegs); 753 754 /* XXX: TODO backpressure */ 755 t4_wrq_tx(sc->adapter, wr); 756 757 explicit_bzero(iv, sizeof(iv)); 758 return (0); 759 } 760 761 static int 762 ccr_blkcipher_done(struct ccr_softc *sc, struct ccr_session *s, 763 struct cryptop *crp, const struct cpl_fw6_pld *cpl, int error) 764 { 765 766 /* 767 * The updated IV to permit chained requests is at 768 * cpl->data[2], but OCF doesn't permit chained requests. 769 */ 770 return (error); 771 } 772 773 /* 774 * 'hashsize' is the length of a full digest. 'authsize' is the 775 * requested digest length for this operation which may be less 776 * than 'hashsize'. 777 */ 778 static int 779 ccr_hmac_ctrl(unsigned int hashsize, unsigned int authsize) 780 { 781 782 if (authsize == 10) 783 return (SCMD_HMAC_CTRL_TRUNC_RFC4366); 784 if (authsize == 12) 785 return (SCMD_HMAC_CTRL_IPSEC_96BIT); 786 if (authsize == hashsize / 2) 787 return (SCMD_HMAC_CTRL_DIV2); 788 return (SCMD_HMAC_CTRL_NO_TRUNC); 789 } 790 791 static int 792 ccr_eta(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp) 793 { 794 char iv[CHCR_MAX_CRYPTO_IV_LEN]; 795 struct chcr_wr *crwr; 796 struct wrqe *wr; 797 struct auth_hash *axf; 798 char *dst; 799 u_int kctx_len, key_half, op_type, transhdr_len, wr_len; 800 u_int hash_size_in_response, imm_len, iopad_size, iv_len; 801 u_int aad_start, aad_stop; 802 u_int auth_insert; 803 u_int cipher_start, cipher_stop; 804 u_int hmac_ctrl, input_len; 805 int dsgl_nsegs, dsgl_len; 806 int sgl_nsegs, sgl_len; 807 int error; 808 809 /* 810 * If there is a need in the future, requests with an empty 811 * payload could be supported as HMAC-only requests. 812 */ 813 if (s->blkcipher.key_len == 0 || crp->crp_payload_length == 0) 814 return (EINVAL); 815 if (s->blkcipher.cipher_mode == SCMD_CIPH_MODE_AES_CBC && 816 (crp->crp_payload_length % AES_BLOCK_LEN) != 0) 817 return (EINVAL); 818 819 /* For AES-XTS we send a 16-byte IV in the work request. */ 820 if (s->blkcipher.cipher_mode == SCMD_CIPH_MODE_AES_XTS) 821 iv_len = AES_BLOCK_LEN; 822 else 823 iv_len = s->blkcipher.iv_len; 824 825 if (crp->crp_aad_length + iv_len > MAX_AAD_LEN) 826 return (EINVAL); 827 828 axf = s->hmac.auth_hash; 829 hash_size_in_response = s->hmac.hash_len; 830 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) 831 op_type = CHCR_ENCRYPT_OP; 832 else 833 op_type = CHCR_DECRYPT_OP; 834 835 /* 836 * The output buffer consists of the cipher text followed by 837 * the hash when encrypting. For decryption it only contains 838 * the plain text. 839 * 840 * Due to a firmware bug, the output buffer must include a 841 * dummy output buffer for the IV and AAD prior to the real 842 * output buffer. 843 */ 844 if (op_type == CHCR_ENCRYPT_OP) { 845 if (iv_len + crp->crp_aad_length + crp->crp_payload_length + 846 hash_size_in_response > MAX_REQUEST_SIZE) 847 return (EFBIG); 848 } else { 849 if (iv_len + crp->crp_aad_length + crp->crp_payload_length > 850 MAX_REQUEST_SIZE) 851 return (EFBIG); 852 } 853 sglist_reset(s->sg_dsgl); 854 error = sglist_append_sglist(s->sg_dsgl, sc->sg_iv_aad, 0, 855 iv_len + crp->crp_aad_length); 856 if (error) 857 return (error); 858 if (CRYPTO_HAS_OUTPUT_BUFFER(crp)) 859 error = sglist_append_sglist(s->sg_dsgl, s->sg_output, 860 crp->crp_payload_output_start, crp->crp_payload_length); 861 else 862 error = sglist_append_sglist(s->sg_dsgl, s->sg_input, 863 crp->crp_payload_start, crp->crp_payload_length); 864 if (error) 865 return (error); 866 if (op_type == CHCR_ENCRYPT_OP) { 867 if (CRYPTO_HAS_OUTPUT_BUFFER(crp)) 868 error = sglist_append_sglist(s->sg_dsgl, s->sg_output, 869 crp->crp_digest_start, hash_size_in_response); 870 else 871 error = sglist_append_sglist(s->sg_dsgl, s->sg_input, 872 crp->crp_digest_start, hash_size_in_response); 873 if (error) 874 return (error); 875 } 876 dsgl_nsegs = ccr_count_sgl(s->sg_dsgl, DSGL_SGE_MAXLEN); 877 if (dsgl_nsegs > MAX_RX_PHYS_DSGL_SGE) 878 return (EFBIG); 879 dsgl_len = ccr_phys_dsgl_len(dsgl_nsegs); 880 881 /* PADs must be 128-bit aligned. */ 882 iopad_size = roundup2(s->hmac.partial_digest_len, 16); 883 884 /* 885 * The 'key' part of the key context consists of the key followed 886 * by the IPAD and OPAD. 887 */ 888 kctx_len = roundup2(s->blkcipher.key_len, 16) + iopad_size * 2; 889 transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dsgl_len); 890 891 /* 892 * The input buffer consists of the IV, any AAD, and then the 893 * cipher/plain text. For decryption requests the hash is 894 * appended after the cipher text. 895 * 896 * The IV is always stored at the start of the input buffer 897 * even though it may be duplicated in the payload. The 898 * crypto engine doesn't work properly if the IV offset points 899 * inside of the AAD region, so a second copy is always 900 * required. 901 */ 902 input_len = crp->crp_aad_length + crp->crp_payload_length; 903 904 /* 905 * The firmware hangs if sent a request which is a 906 * bit smaller than MAX_REQUEST_SIZE. In particular, the 907 * firmware appears to require 512 - 16 bytes of spare room 908 * along with the size of the hash even if the hash isn't 909 * included in the input buffer. 910 */ 911 if (input_len + roundup2(axf->hashsize, 16) + (512 - 16) > 912 MAX_REQUEST_SIZE) 913 return (EFBIG); 914 if (op_type == CHCR_DECRYPT_OP) 915 input_len += hash_size_in_response; 916 917 if (ccr_use_imm_data(transhdr_len, iv_len + input_len)) { 918 imm_len = input_len; 919 sgl_nsegs = 0; 920 sgl_len = 0; 921 } else { 922 imm_len = 0; 923 sglist_reset(s->sg_ulptx); 924 if (crp->crp_aad_length != 0) { 925 if (crp->crp_aad != NULL) 926 error = sglist_append(s->sg_ulptx, 927 crp->crp_aad, crp->crp_aad_length); 928 else 929 error = sglist_append_sglist(s->sg_ulptx, 930 s->sg_input, crp->crp_aad_start, 931 crp->crp_aad_length); 932 if (error) 933 return (error); 934 } 935 error = sglist_append_sglist(s->sg_ulptx, s->sg_input, 936 crp->crp_payload_start, crp->crp_payload_length); 937 if (error) 938 return (error); 939 if (op_type == CHCR_DECRYPT_OP) { 940 error = sglist_append_sglist(s->sg_ulptx, s->sg_input, 941 crp->crp_digest_start, hash_size_in_response); 942 if (error) 943 return (error); 944 } 945 sgl_nsegs = s->sg_ulptx->sg_nseg; 946 sgl_len = ccr_ulptx_sgl_len(sgl_nsegs); 947 } 948 949 /* Any AAD comes after the IV. */ 950 if (crp->crp_aad_length != 0) { 951 aad_start = iv_len + 1; 952 aad_stop = aad_start + crp->crp_aad_length - 1; 953 } else { 954 aad_start = 0; 955 aad_stop = 0; 956 } 957 cipher_start = iv_len + crp->crp_aad_length + 1; 958 if (op_type == CHCR_DECRYPT_OP) 959 cipher_stop = hash_size_in_response; 960 else 961 cipher_stop = 0; 962 if (op_type == CHCR_DECRYPT_OP) 963 auth_insert = hash_size_in_response; 964 else 965 auth_insert = 0; 966 967 wr_len = roundup2(transhdr_len, 16) + iv_len + roundup2(imm_len, 16) + 968 sgl_len; 969 if (wr_len > SGE_MAX_WR_LEN) 970 return (EFBIG); 971 wr = alloc_wrqe(wr_len, s->port->txq); 972 if (wr == NULL) { 973 counter_u64_add(sc->stats_wr_nomem, 1); 974 return (ENOMEM); 975 } 976 crwr = wrtod(wr); 977 memset(crwr, 0, wr_len); 978 979 crypto_read_iv(crp, iv); 980 981 /* Zero the remainder of the IV for AES-XTS. */ 982 memset(iv + s->blkcipher.iv_len, 0, iv_len - s->blkcipher.iv_len); 983 984 ccr_populate_wreq(sc, s, crwr, kctx_len, wr_len, imm_len, sgl_len, 985 op_type == CHCR_DECRYPT_OP ? hash_size_in_response : 0, crp); 986 987 crwr->sec_cpl.op_ivinsrtofst = htobe32( 988 V_CPL_TX_SEC_PDU_OPCODE(CPL_TX_SEC_PDU) | 989 V_CPL_TX_SEC_PDU_RXCHID(s->port->tx_channel_id) | 990 V_CPL_TX_SEC_PDU_ACKFOLLOWS(0) | V_CPL_TX_SEC_PDU_ULPTXLPBK(1) | 991 V_CPL_TX_SEC_PDU_CPLLEN(2) | V_CPL_TX_SEC_PDU_PLACEHOLDER(0) | 992 V_CPL_TX_SEC_PDU_IVINSRTOFST(1)); 993 994 crwr->sec_cpl.pldlen = htobe32(iv_len + input_len); 995 996 crwr->sec_cpl.aadstart_cipherstop_hi = htobe32( 997 V_CPL_TX_SEC_PDU_AADSTART(aad_start) | 998 V_CPL_TX_SEC_PDU_AADSTOP(aad_stop) | 999 V_CPL_TX_SEC_PDU_CIPHERSTART(cipher_start) | 1000 V_CPL_TX_SEC_PDU_CIPHERSTOP_HI(cipher_stop >> 4)); 1001 crwr->sec_cpl.cipherstop_lo_authinsert = htobe32( 1002 V_CPL_TX_SEC_PDU_CIPHERSTOP_LO(cipher_stop & 0xf) | 1003 V_CPL_TX_SEC_PDU_AUTHSTART(cipher_start) | 1004 V_CPL_TX_SEC_PDU_AUTHSTOP(cipher_stop) | 1005 V_CPL_TX_SEC_PDU_AUTHINSERT(auth_insert)); 1006 1007 /* These two flits are actually a CPL_TLS_TX_SCMD_FMT. */ 1008 hmac_ctrl = ccr_hmac_ctrl(axf->hashsize, hash_size_in_response); 1009 crwr->sec_cpl.seqno_numivs = htobe32( 1010 V_SCMD_SEQ_NO_CTRL(0) | 1011 V_SCMD_PROTO_VERSION(SCMD_PROTO_VERSION_GENERIC) | 1012 V_SCMD_ENC_DEC_CTRL(op_type) | 1013 V_SCMD_CIPH_AUTH_SEQ_CTRL(op_type == CHCR_ENCRYPT_OP ? 1 : 0) | 1014 V_SCMD_CIPH_MODE(s->blkcipher.cipher_mode) | 1015 V_SCMD_AUTH_MODE(s->hmac.auth_mode) | 1016 V_SCMD_HMAC_CTRL(hmac_ctrl) | 1017 V_SCMD_IV_SIZE(iv_len / 2) | 1018 V_SCMD_NUM_IVS(0)); 1019 crwr->sec_cpl.ivgen_hdrlen = htobe32( 1020 V_SCMD_IV_GEN_CTRL(0) | 1021 V_SCMD_MORE_FRAGS(0) | V_SCMD_LAST_FRAG(0) | V_SCMD_MAC_ONLY(0) | 1022 V_SCMD_AADIVDROP(0) | V_SCMD_HDR_LEN(dsgl_len)); 1023 1024 crwr->key_ctx.ctx_hdr = s->blkcipher.key_ctx_hdr; 1025 switch (s->blkcipher.cipher_mode) { 1026 case SCMD_CIPH_MODE_AES_CBC: 1027 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) 1028 memcpy(crwr->key_ctx.key, s->blkcipher.enckey, 1029 s->blkcipher.key_len); 1030 else 1031 memcpy(crwr->key_ctx.key, s->blkcipher.deckey, 1032 s->blkcipher.key_len); 1033 break; 1034 case SCMD_CIPH_MODE_AES_CTR: 1035 memcpy(crwr->key_ctx.key, s->blkcipher.enckey, 1036 s->blkcipher.key_len); 1037 break; 1038 case SCMD_CIPH_MODE_AES_XTS: 1039 key_half = s->blkcipher.key_len / 2; 1040 memcpy(crwr->key_ctx.key, s->blkcipher.enckey + key_half, 1041 key_half); 1042 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) 1043 memcpy(crwr->key_ctx.key + key_half, 1044 s->blkcipher.enckey, key_half); 1045 else 1046 memcpy(crwr->key_ctx.key + key_half, 1047 s->blkcipher.deckey, key_half); 1048 break; 1049 } 1050 1051 dst = crwr->key_ctx.key + roundup2(s->blkcipher.key_len, 16); 1052 memcpy(dst, s->hmac.pads, iopad_size * 2); 1053 1054 dst = (char *)(crwr + 1) + kctx_len; 1055 ccr_write_phys_dsgl(s, dst, dsgl_nsegs); 1056 dst += sizeof(struct cpl_rx_phys_dsgl) + dsgl_len; 1057 memcpy(dst, iv, iv_len); 1058 dst += iv_len; 1059 if (imm_len != 0) { 1060 if (crp->crp_aad_length != 0) { 1061 if (crp->crp_aad != NULL) 1062 memcpy(dst, crp->crp_aad, crp->crp_aad_length); 1063 else 1064 crypto_copydata(crp, crp->crp_aad_start, 1065 crp->crp_aad_length, dst); 1066 dst += crp->crp_aad_length; 1067 } 1068 crypto_copydata(crp, crp->crp_payload_start, 1069 crp->crp_payload_length, dst); 1070 dst += crp->crp_payload_length; 1071 if (op_type == CHCR_DECRYPT_OP) 1072 crypto_copydata(crp, crp->crp_digest_start, 1073 hash_size_in_response, dst); 1074 } else 1075 ccr_write_ulptx_sgl(s, dst, sgl_nsegs); 1076 1077 /* XXX: TODO backpressure */ 1078 t4_wrq_tx(sc->adapter, wr); 1079 1080 explicit_bzero(iv, sizeof(iv)); 1081 return (0); 1082 } 1083 1084 static int 1085 ccr_eta_done(struct ccr_softc *sc, struct ccr_session *s, 1086 struct cryptop *crp, const struct cpl_fw6_pld *cpl, int error) 1087 { 1088 1089 /* 1090 * The updated IV to permit chained requests is at 1091 * cpl->data[2], but OCF doesn't permit chained requests. 1092 */ 1093 return (error); 1094 } 1095 1096 static int 1097 ccr_gcm(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp) 1098 { 1099 char iv[CHCR_MAX_CRYPTO_IV_LEN]; 1100 struct chcr_wr *crwr; 1101 struct wrqe *wr; 1102 char *dst; 1103 u_int iv_len, kctx_len, op_type, transhdr_len, wr_len; 1104 u_int hash_size_in_response, imm_len; 1105 u_int aad_start, aad_stop, cipher_start, cipher_stop, auth_insert; 1106 u_int hmac_ctrl, input_len; 1107 int dsgl_nsegs, dsgl_len; 1108 int sgl_nsegs, sgl_len; 1109 int error; 1110 1111 if (s->blkcipher.key_len == 0) 1112 return (EINVAL); 1113 1114 /* 1115 * The crypto engine doesn't handle GCM requests with an empty 1116 * payload, so handle those in software instead. 1117 */ 1118 if (crp->crp_payload_length == 0) 1119 return (EMSGSIZE); 1120 1121 if (crp->crp_aad_length + AES_BLOCK_LEN > MAX_AAD_LEN) 1122 return (EMSGSIZE); 1123 1124 hash_size_in_response = s->gmac.hash_len; 1125 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) 1126 op_type = CHCR_ENCRYPT_OP; 1127 else 1128 op_type = CHCR_DECRYPT_OP; 1129 1130 /* 1131 * The IV handling for GCM in OCF is a bit more complicated in 1132 * that IPSec provides a full 16-byte IV (including the 1133 * counter), whereas the /dev/crypto interface sometimes 1134 * provides a full 16-byte IV (if no IV is provided in the 1135 * ioctl) and sometimes a 12-byte IV (if the IV was explicit). 1136 * 1137 * When provided a 12-byte IV, assume the IV is really 16 bytes 1138 * with a counter in the last 4 bytes initialized to 1. 1139 * 1140 * While iv_len is checked below, the value is currently 1141 * always set to 12 when creating a GCM session in this driver 1142 * due to limitations in OCF (there is no way to know what the 1143 * IV length of a given request will be). This means that the 1144 * driver always assumes as 12-byte IV for now. 1145 */ 1146 if (s->blkcipher.iv_len == 12) 1147 iv_len = AES_BLOCK_LEN; 1148 else 1149 iv_len = s->blkcipher.iv_len; 1150 1151 /* 1152 * GCM requests should always provide an explicit IV. 1153 */ 1154 if ((crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0) 1155 return (EINVAL); 1156 1157 /* 1158 * The output buffer consists of the cipher text followed by 1159 * the tag when encrypting. For decryption it only contains 1160 * the plain text. 1161 * 1162 * Due to a firmware bug, the output buffer must include a 1163 * dummy output buffer for the IV and AAD prior to the real 1164 * output buffer. 1165 */ 1166 if (op_type == CHCR_ENCRYPT_OP) { 1167 if (iv_len + crp->crp_aad_length + crp->crp_payload_length + 1168 hash_size_in_response > MAX_REQUEST_SIZE) 1169 return (EFBIG); 1170 } else { 1171 if (iv_len + crp->crp_aad_length + crp->crp_payload_length > 1172 MAX_REQUEST_SIZE) 1173 return (EFBIG); 1174 } 1175 sglist_reset(s->sg_dsgl); 1176 error = sglist_append_sglist(s->sg_dsgl, sc->sg_iv_aad, 0, iv_len + 1177 crp->crp_aad_length); 1178 if (error) 1179 return (error); 1180 if (CRYPTO_HAS_OUTPUT_BUFFER(crp)) 1181 error = sglist_append_sglist(s->sg_dsgl, s->sg_output, 1182 crp->crp_payload_output_start, crp->crp_payload_length); 1183 else 1184 error = sglist_append_sglist(s->sg_dsgl, s->sg_input, 1185 crp->crp_payload_start, crp->crp_payload_length); 1186 if (error) 1187 return (error); 1188 if (op_type == CHCR_ENCRYPT_OP) { 1189 if (CRYPTO_HAS_OUTPUT_BUFFER(crp)) 1190 error = sglist_append_sglist(s->sg_dsgl, s->sg_output, 1191 crp->crp_digest_start, hash_size_in_response); 1192 else 1193 error = sglist_append_sglist(s->sg_dsgl, s->sg_input, 1194 crp->crp_digest_start, hash_size_in_response); 1195 if (error) 1196 return (error); 1197 } 1198 dsgl_nsegs = ccr_count_sgl(s->sg_dsgl, DSGL_SGE_MAXLEN); 1199 if (dsgl_nsegs > MAX_RX_PHYS_DSGL_SGE) 1200 return (EFBIG); 1201 dsgl_len = ccr_phys_dsgl_len(dsgl_nsegs); 1202 1203 /* 1204 * The 'key' part of the key context consists of the key followed 1205 * by the Galois hash key. 1206 */ 1207 kctx_len = roundup2(s->blkcipher.key_len, 16) + GMAC_BLOCK_LEN; 1208 transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dsgl_len); 1209 1210 /* 1211 * The input buffer consists of the IV, any AAD, and then the 1212 * cipher/plain text. For decryption requests the hash is 1213 * appended after the cipher text. 1214 * 1215 * The IV is always stored at the start of the input buffer 1216 * even though it may be duplicated in the payload. The 1217 * crypto engine doesn't work properly if the IV offset points 1218 * inside of the AAD region, so a second copy is always 1219 * required. 1220 */ 1221 input_len = crp->crp_aad_length + crp->crp_payload_length; 1222 if (op_type == CHCR_DECRYPT_OP) 1223 input_len += hash_size_in_response; 1224 if (input_len > MAX_REQUEST_SIZE) 1225 return (EFBIG); 1226 if (ccr_use_imm_data(transhdr_len, iv_len + input_len)) { 1227 imm_len = input_len; 1228 sgl_nsegs = 0; 1229 sgl_len = 0; 1230 } else { 1231 imm_len = 0; 1232 sglist_reset(s->sg_ulptx); 1233 if (crp->crp_aad_length != 0) { 1234 if (crp->crp_aad != NULL) 1235 error = sglist_append(s->sg_ulptx, 1236 crp->crp_aad, crp->crp_aad_length); 1237 else 1238 error = sglist_append_sglist(s->sg_ulptx, 1239 s->sg_input, crp->crp_aad_start, 1240 crp->crp_aad_length); 1241 if (error) 1242 return (error); 1243 } 1244 error = sglist_append_sglist(s->sg_ulptx, s->sg_input, 1245 crp->crp_payload_start, crp->crp_payload_length); 1246 if (error) 1247 return (error); 1248 if (op_type == CHCR_DECRYPT_OP) { 1249 error = sglist_append_sglist(s->sg_ulptx, s->sg_input, 1250 crp->crp_digest_start, hash_size_in_response); 1251 if (error) 1252 return (error); 1253 } 1254 sgl_nsegs = s->sg_ulptx->sg_nseg; 1255 sgl_len = ccr_ulptx_sgl_len(sgl_nsegs); 1256 } 1257 1258 if (crp->crp_aad_length != 0) { 1259 aad_start = iv_len + 1; 1260 aad_stop = aad_start + crp->crp_aad_length - 1; 1261 } else { 1262 aad_start = 0; 1263 aad_stop = 0; 1264 } 1265 cipher_start = iv_len + crp->crp_aad_length + 1; 1266 if (op_type == CHCR_DECRYPT_OP) 1267 cipher_stop = hash_size_in_response; 1268 else 1269 cipher_stop = 0; 1270 if (op_type == CHCR_DECRYPT_OP) 1271 auth_insert = hash_size_in_response; 1272 else 1273 auth_insert = 0; 1274 1275 wr_len = roundup2(transhdr_len, 16) + iv_len + roundup2(imm_len, 16) + 1276 sgl_len; 1277 if (wr_len > SGE_MAX_WR_LEN) 1278 return (EFBIG); 1279 wr = alloc_wrqe(wr_len, s->port->txq); 1280 if (wr == NULL) { 1281 counter_u64_add(sc->stats_wr_nomem, 1); 1282 return (ENOMEM); 1283 } 1284 crwr = wrtod(wr); 1285 memset(crwr, 0, wr_len); 1286 1287 memcpy(iv, crp->crp_iv, s->blkcipher.iv_len); 1288 if (s->blkcipher.iv_len == 12) 1289 *(uint32_t *)&iv[12] = htobe32(1); 1290 1291 ccr_populate_wreq(sc, s, crwr, kctx_len, wr_len, imm_len, sgl_len, 0, 1292 crp); 1293 1294 crwr->sec_cpl.op_ivinsrtofst = htobe32( 1295 V_CPL_TX_SEC_PDU_OPCODE(CPL_TX_SEC_PDU) | 1296 V_CPL_TX_SEC_PDU_RXCHID(s->port->tx_channel_id) | 1297 V_CPL_TX_SEC_PDU_ACKFOLLOWS(0) | V_CPL_TX_SEC_PDU_ULPTXLPBK(1) | 1298 V_CPL_TX_SEC_PDU_CPLLEN(2) | V_CPL_TX_SEC_PDU_PLACEHOLDER(0) | 1299 V_CPL_TX_SEC_PDU_IVINSRTOFST(1)); 1300 1301 crwr->sec_cpl.pldlen = htobe32(iv_len + input_len); 1302 1303 /* 1304 * NB: cipherstop is explicitly set to 0. On encrypt it 1305 * should normally be set to 0 anyway. However, for decrypt 1306 * the cipher ends before the tag in the ETA case (and 1307 * authstop is set to stop before the tag), but for GCM the 1308 * cipher still runs to the end of the buffer. Not sure if 1309 * this is intentional or a firmware quirk, but it is required 1310 * for working tag validation with GCM decryption. 1311 */ 1312 crwr->sec_cpl.aadstart_cipherstop_hi = htobe32( 1313 V_CPL_TX_SEC_PDU_AADSTART(aad_start) | 1314 V_CPL_TX_SEC_PDU_AADSTOP(aad_stop) | 1315 V_CPL_TX_SEC_PDU_CIPHERSTART(cipher_start) | 1316 V_CPL_TX_SEC_PDU_CIPHERSTOP_HI(0)); 1317 crwr->sec_cpl.cipherstop_lo_authinsert = htobe32( 1318 V_CPL_TX_SEC_PDU_CIPHERSTOP_LO(0) | 1319 V_CPL_TX_SEC_PDU_AUTHSTART(cipher_start) | 1320 V_CPL_TX_SEC_PDU_AUTHSTOP(cipher_stop) | 1321 V_CPL_TX_SEC_PDU_AUTHINSERT(auth_insert)); 1322 1323 /* These two flits are actually a CPL_TLS_TX_SCMD_FMT. */ 1324 hmac_ctrl = ccr_hmac_ctrl(AES_GMAC_HASH_LEN, hash_size_in_response); 1325 crwr->sec_cpl.seqno_numivs = htobe32( 1326 V_SCMD_SEQ_NO_CTRL(0) | 1327 V_SCMD_PROTO_VERSION(SCMD_PROTO_VERSION_GENERIC) | 1328 V_SCMD_ENC_DEC_CTRL(op_type) | 1329 V_SCMD_CIPH_AUTH_SEQ_CTRL(op_type == CHCR_ENCRYPT_OP ? 1 : 0) | 1330 V_SCMD_CIPH_MODE(SCMD_CIPH_MODE_AES_GCM) | 1331 V_SCMD_AUTH_MODE(SCMD_AUTH_MODE_GHASH) | 1332 V_SCMD_HMAC_CTRL(hmac_ctrl) | 1333 V_SCMD_IV_SIZE(iv_len / 2) | 1334 V_SCMD_NUM_IVS(0)); 1335 crwr->sec_cpl.ivgen_hdrlen = htobe32( 1336 V_SCMD_IV_GEN_CTRL(0) | 1337 V_SCMD_MORE_FRAGS(0) | V_SCMD_LAST_FRAG(0) | V_SCMD_MAC_ONLY(0) | 1338 V_SCMD_AADIVDROP(0) | V_SCMD_HDR_LEN(dsgl_len)); 1339 1340 crwr->key_ctx.ctx_hdr = s->blkcipher.key_ctx_hdr; 1341 memcpy(crwr->key_ctx.key, s->blkcipher.enckey, s->blkcipher.key_len); 1342 dst = crwr->key_ctx.key + roundup2(s->blkcipher.key_len, 16); 1343 memcpy(dst, s->gmac.ghash_h, GMAC_BLOCK_LEN); 1344 1345 dst = (char *)(crwr + 1) + kctx_len; 1346 ccr_write_phys_dsgl(s, dst, dsgl_nsegs); 1347 dst += sizeof(struct cpl_rx_phys_dsgl) + dsgl_len; 1348 memcpy(dst, iv, iv_len); 1349 dst += iv_len; 1350 if (imm_len != 0) { 1351 if (crp->crp_aad_length != 0) { 1352 if (crp->crp_aad != NULL) 1353 memcpy(dst, crp->crp_aad, crp->crp_aad_length); 1354 else 1355 crypto_copydata(crp, crp->crp_aad_start, 1356 crp->crp_aad_length, dst); 1357 dst += crp->crp_aad_length; 1358 } 1359 crypto_copydata(crp, crp->crp_payload_start, 1360 crp->crp_payload_length, dst); 1361 dst += crp->crp_payload_length; 1362 if (op_type == CHCR_DECRYPT_OP) 1363 crypto_copydata(crp, crp->crp_digest_start, 1364 hash_size_in_response, dst); 1365 } else 1366 ccr_write_ulptx_sgl(s, dst, sgl_nsegs); 1367 1368 /* XXX: TODO backpressure */ 1369 t4_wrq_tx(sc->adapter, wr); 1370 1371 explicit_bzero(iv, sizeof(iv)); 1372 return (0); 1373 } 1374 1375 static int 1376 ccr_gcm_done(struct ccr_softc *sc, struct ccr_session *s, 1377 struct cryptop *crp, const struct cpl_fw6_pld *cpl, int error) 1378 { 1379 1380 /* 1381 * The updated IV to permit chained requests is at 1382 * cpl->data[2], but OCF doesn't permit chained requests. 1383 * 1384 * Note that the hardware should always verify the GMAC hash. 1385 */ 1386 return (error); 1387 } 1388 1389 /* 1390 * Handle a GCM request that is not supported by the crypto engine by 1391 * performing the operation in software. Derived from swcr_authenc(). 1392 */ 1393 static void 1394 ccr_gcm_soft(struct ccr_session *s, struct cryptop *crp) 1395 { 1396 struct auth_hash *axf; 1397 struct enc_xform *exf; 1398 void *auth_ctx, *kschedule; 1399 char block[GMAC_BLOCK_LEN]; 1400 char digest[GMAC_DIGEST_LEN]; 1401 char iv[AES_BLOCK_LEN]; 1402 int error, i, len; 1403 1404 auth_ctx = NULL; 1405 kschedule = NULL; 1406 1407 /* Initialize the MAC. */ 1408 switch (s->blkcipher.key_len) { 1409 case 16: 1410 axf = &auth_hash_nist_gmac_aes_128; 1411 break; 1412 case 24: 1413 axf = &auth_hash_nist_gmac_aes_192; 1414 break; 1415 case 32: 1416 axf = &auth_hash_nist_gmac_aes_256; 1417 break; 1418 default: 1419 error = EINVAL; 1420 goto out; 1421 } 1422 auth_ctx = malloc(axf->ctxsize, M_CCR, M_NOWAIT); 1423 if (auth_ctx == NULL) { 1424 error = ENOMEM; 1425 goto out; 1426 } 1427 axf->Init(auth_ctx); 1428 axf->Setkey(auth_ctx, s->blkcipher.enckey, s->blkcipher.key_len); 1429 1430 /* Initialize the cipher. */ 1431 exf = &enc_xform_aes_nist_gcm; 1432 kschedule = malloc(exf->ctxsize, M_CCR, M_NOWAIT); 1433 if (kschedule == NULL) { 1434 error = ENOMEM; 1435 goto out; 1436 } 1437 error = exf->setkey(kschedule, s->blkcipher.enckey, 1438 s->blkcipher.key_len); 1439 if (error) 1440 goto out; 1441 1442 /* 1443 * This assumes a 12-byte IV from the crp. See longer comment 1444 * above in ccr_gcm() for more details. 1445 */ 1446 if ((crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0) { 1447 error = EINVAL; 1448 goto out; 1449 } 1450 memcpy(iv, crp->crp_iv, 12); 1451 *(uint32_t *)&iv[12] = htobe32(1); 1452 1453 axf->Reinit(auth_ctx, iv, sizeof(iv)); 1454 1455 /* MAC the AAD. */ 1456 if (crp->crp_aad != NULL) { 1457 len = rounddown(crp->crp_aad_length, sizeof(block)); 1458 if (len != 0) 1459 axf->Update(auth_ctx, crp->crp_aad, len); 1460 if (crp->crp_aad_length != len) { 1461 memset(block, 0, sizeof(block)); 1462 memcpy(block, (char *)crp->crp_aad + len, 1463 crp->crp_aad_length - len); 1464 axf->Update(auth_ctx, block, sizeof(block)); 1465 } 1466 } else { 1467 for (i = 0; i < crp->crp_aad_length; i += sizeof(block)) { 1468 len = imin(crp->crp_aad_length - i, sizeof(block)); 1469 crypto_copydata(crp, crp->crp_aad_start + i, len, 1470 block); 1471 bzero(block + len, sizeof(block) - len); 1472 axf->Update(auth_ctx, block, sizeof(block)); 1473 } 1474 } 1475 1476 exf->reinit(kschedule, iv); 1477 1478 /* Do encryption with MAC */ 1479 for (i = 0; i < crp->crp_payload_length; i += sizeof(block)) { 1480 len = imin(crp->crp_payload_length - i, sizeof(block)); 1481 crypto_copydata(crp, crp->crp_payload_start + i, len, block); 1482 bzero(block + len, sizeof(block) - len); 1483 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) { 1484 exf->encrypt(kschedule, block, block); 1485 axf->Update(auth_ctx, block, len); 1486 crypto_copyback(crp, crp->crp_payload_start + i, len, 1487 block); 1488 } else { 1489 axf->Update(auth_ctx, block, len); 1490 } 1491 } 1492 1493 /* Length block. */ 1494 bzero(block, sizeof(block)); 1495 ((uint32_t *)block)[1] = htobe32(crp->crp_aad_length * 8); 1496 ((uint32_t *)block)[3] = htobe32(crp->crp_payload_length * 8); 1497 axf->Update(auth_ctx, block, sizeof(block)); 1498 1499 /* Finalize MAC. */ 1500 axf->Final(digest, auth_ctx); 1501 1502 /* Inject or validate tag. */ 1503 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) { 1504 crypto_copyback(crp, crp->crp_digest_start, sizeof(digest), 1505 digest); 1506 error = 0; 1507 } else { 1508 char digest2[GMAC_DIGEST_LEN]; 1509 1510 crypto_copydata(crp, crp->crp_digest_start, sizeof(digest2), 1511 digest2); 1512 if (timingsafe_bcmp(digest, digest2, sizeof(digest)) == 0) { 1513 error = 0; 1514 1515 /* Tag matches, decrypt data. */ 1516 for (i = 0; i < crp->crp_payload_length; 1517 i += sizeof(block)) { 1518 len = imin(crp->crp_payload_length - i, 1519 sizeof(block)); 1520 crypto_copydata(crp, crp->crp_payload_start + i, 1521 len, block); 1522 bzero(block + len, sizeof(block) - len); 1523 exf->decrypt(kschedule, block, block); 1524 crypto_copyback(crp, crp->crp_payload_start + i, 1525 len, block); 1526 } 1527 } else 1528 error = EBADMSG; 1529 explicit_bzero(digest2, sizeof(digest2)); 1530 } 1531 1532 out: 1533 zfree(kschedule, M_CCR); 1534 zfree(auth_ctx, M_CCR); 1535 explicit_bzero(block, sizeof(block)); 1536 explicit_bzero(iv, sizeof(iv)); 1537 explicit_bzero(digest, sizeof(digest)); 1538 crp->crp_etype = error; 1539 crypto_done(crp); 1540 } 1541 1542 static void 1543 generate_ccm_b0(struct cryptop *crp, u_int hash_size_in_response, 1544 const char *iv, char *b0) 1545 { 1546 u_int i, payload_len; 1547 1548 /* NB: L is already set in the first byte of the IV. */ 1549 memcpy(b0, iv, CCM_B0_SIZE); 1550 1551 /* Set length of hash in bits 3 - 5. */ 1552 b0[0] |= (((hash_size_in_response - 2) / 2) << 3); 1553 1554 /* Store the payload length as a big-endian value. */ 1555 payload_len = crp->crp_payload_length; 1556 for (i = 0; i < iv[0]; i++) { 1557 b0[CCM_CBC_BLOCK_LEN - 1 - i] = payload_len; 1558 payload_len >>= 8; 1559 } 1560 1561 /* 1562 * If there is AAD in the request, set bit 6 in the flags 1563 * field and store the AAD length as a big-endian value at the 1564 * start of block 1. This only assumes a 16-bit AAD length 1565 * since T6 doesn't support large AAD sizes. 1566 */ 1567 if (crp->crp_aad_length != 0) { 1568 b0[0] |= (1 << 6); 1569 *(uint16_t *)(b0 + CCM_B0_SIZE) = htobe16(crp->crp_aad_length); 1570 } 1571 } 1572 1573 static int 1574 ccr_ccm(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp) 1575 { 1576 char iv[CHCR_MAX_CRYPTO_IV_LEN]; 1577 struct ulptx_idata *idata; 1578 struct chcr_wr *crwr; 1579 struct wrqe *wr; 1580 char *dst; 1581 u_int iv_len, kctx_len, op_type, transhdr_len, wr_len; 1582 u_int aad_len, b0_len, hash_size_in_response, imm_len; 1583 u_int aad_start, aad_stop, cipher_start, cipher_stop, auth_insert; 1584 u_int hmac_ctrl, input_len; 1585 int dsgl_nsegs, dsgl_len; 1586 int sgl_nsegs, sgl_len; 1587 int error; 1588 1589 if (s->blkcipher.key_len == 0) 1590 return (EINVAL); 1591 1592 /* 1593 * The crypto engine doesn't handle CCM requests with an empty 1594 * payload, so handle those in software instead. 1595 */ 1596 if (crp->crp_payload_length == 0) 1597 return (EMSGSIZE); 1598 1599 /* 1600 * CCM always includes block 0 in the AAD before AAD from the 1601 * request. 1602 */ 1603 b0_len = CCM_B0_SIZE; 1604 if (crp->crp_aad_length != 0) 1605 b0_len += CCM_AAD_FIELD_SIZE; 1606 aad_len = b0_len + crp->crp_aad_length; 1607 1608 /* 1609 * CCM requests should always provide an explicit IV (really 1610 * the nonce). 1611 */ 1612 if ((crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0) 1613 return (EINVAL); 1614 1615 /* 1616 * Always assume a 12 byte input nonce for now since that is 1617 * what OCF always generates. The full IV in the work request 1618 * is 16 bytes. 1619 */ 1620 iv_len = AES_BLOCK_LEN; 1621 1622 if (iv_len + aad_len > MAX_AAD_LEN) 1623 return (EMSGSIZE); 1624 1625 hash_size_in_response = s->ccm_mac.hash_len; 1626 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) 1627 op_type = CHCR_ENCRYPT_OP; 1628 else 1629 op_type = CHCR_DECRYPT_OP; 1630 1631 /* 1632 * The output buffer consists of the cipher text followed by 1633 * the tag when encrypting. For decryption it only contains 1634 * the plain text. 1635 * 1636 * Due to a firmware bug, the output buffer must include a 1637 * dummy output buffer for the IV and AAD prior to the real 1638 * output buffer. 1639 */ 1640 if (op_type == CHCR_ENCRYPT_OP) { 1641 if (iv_len + aad_len + crp->crp_payload_length + 1642 hash_size_in_response > MAX_REQUEST_SIZE) 1643 return (EFBIG); 1644 } else { 1645 if (iv_len + aad_len + crp->crp_payload_length > 1646 MAX_REQUEST_SIZE) 1647 return (EFBIG); 1648 } 1649 sglist_reset(s->sg_dsgl); 1650 error = sglist_append_sglist(s->sg_dsgl, sc->sg_iv_aad, 0, iv_len + 1651 aad_len); 1652 if (error) 1653 return (error); 1654 if (CRYPTO_HAS_OUTPUT_BUFFER(crp)) 1655 error = sglist_append_sglist(s->sg_dsgl, s->sg_output, 1656 crp->crp_payload_output_start, crp->crp_payload_length); 1657 else 1658 error = sglist_append_sglist(s->sg_dsgl, s->sg_input, 1659 crp->crp_payload_start, crp->crp_payload_length); 1660 if (error) 1661 return (error); 1662 if (op_type == CHCR_ENCRYPT_OP) { 1663 if (CRYPTO_HAS_OUTPUT_BUFFER(crp)) 1664 error = sglist_append_sglist(s->sg_dsgl, s->sg_output, 1665 crp->crp_digest_start, hash_size_in_response); 1666 else 1667 error = sglist_append_sglist(s->sg_dsgl, s->sg_input, 1668 crp->crp_digest_start, hash_size_in_response); 1669 if (error) 1670 return (error); 1671 } 1672 dsgl_nsegs = ccr_count_sgl(s->sg_dsgl, DSGL_SGE_MAXLEN); 1673 if (dsgl_nsegs > MAX_RX_PHYS_DSGL_SGE) 1674 return (EFBIG); 1675 dsgl_len = ccr_phys_dsgl_len(dsgl_nsegs); 1676 1677 /* 1678 * The 'key' part of the key context consists of two copies of 1679 * the AES key. 1680 */ 1681 kctx_len = roundup2(s->blkcipher.key_len, 16) * 2; 1682 transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dsgl_len); 1683 1684 /* 1685 * The input buffer consists of the IV, AAD (including block 1686 * 0), and then the cipher/plain text. For decryption 1687 * requests the hash is appended after the cipher text. 1688 * 1689 * The IV is always stored at the start of the input buffer 1690 * even though it may be duplicated in the payload. The 1691 * crypto engine doesn't work properly if the IV offset points 1692 * inside of the AAD region, so a second copy is always 1693 * required. 1694 */ 1695 input_len = aad_len + crp->crp_payload_length; 1696 if (op_type == CHCR_DECRYPT_OP) 1697 input_len += hash_size_in_response; 1698 if (input_len > MAX_REQUEST_SIZE) 1699 return (EFBIG); 1700 if (ccr_use_imm_data(transhdr_len, iv_len + input_len)) { 1701 imm_len = input_len; 1702 sgl_nsegs = 0; 1703 sgl_len = 0; 1704 } else { 1705 /* Block 0 is passed as immediate data. */ 1706 imm_len = b0_len; 1707 1708 sglist_reset(s->sg_ulptx); 1709 if (crp->crp_aad_length != 0) { 1710 if (crp->crp_aad != NULL) 1711 error = sglist_append(s->sg_ulptx, 1712 crp->crp_aad, crp->crp_aad_length); 1713 else 1714 error = sglist_append_sglist(s->sg_ulptx, 1715 s->sg_input, crp->crp_aad_start, 1716 crp->crp_aad_length); 1717 if (error) 1718 return (error); 1719 } 1720 error = sglist_append_sglist(s->sg_ulptx, s->sg_input, 1721 crp->crp_payload_start, crp->crp_payload_length); 1722 if (error) 1723 return (error); 1724 if (op_type == CHCR_DECRYPT_OP) { 1725 error = sglist_append_sglist(s->sg_ulptx, s->sg_input, 1726 crp->crp_digest_start, hash_size_in_response); 1727 if (error) 1728 return (error); 1729 } 1730 sgl_nsegs = s->sg_ulptx->sg_nseg; 1731 sgl_len = ccr_ulptx_sgl_len(sgl_nsegs); 1732 } 1733 1734 aad_start = iv_len + 1; 1735 aad_stop = aad_start + aad_len - 1; 1736 cipher_start = aad_stop + 1; 1737 if (op_type == CHCR_DECRYPT_OP) 1738 cipher_stop = hash_size_in_response; 1739 else 1740 cipher_stop = 0; 1741 if (op_type == CHCR_DECRYPT_OP) 1742 auth_insert = hash_size_in_response; 1743 else 1744 auth_insert = 0; 1745 1746 wr_len = roundup2(transhdr_len, 16) + iv_len + roundup2(imm_len, 16) + 1747 sgl_len; 1748 if (wr_len > SGE_MAX_WR_LEN) 1749 return (EFBIG); 1750 wr = alloc_wrqe(wr_len, s->port->txq); 1751 if (wr == NULL) { 1752 counter_u64_add(sc->stats_wr_nomem, 1); 1753 return (ENOMEM); 1754 } 1755 crwr = wrtod(wr); 1756 memset(crwr, 0, wr_len); 1757 1758 /* 1759 * Read the nonce from the request. Use the nonce to generate 1760 * the full IV with the counter set to 0. 1761 */ 1762 memset(iv, 0, iv_len); 1763 iv[0] = (15 - AES_CCM_IV_LEN) - 1; 1764 memcpy(iv + 1, crp->crp_iv, AES_CCM_IV_LEN); 1765 1766 ccr_populate_wreq(sc, s, crwr, kctx_len, wr_len, imm_len, sgl_len, 0, 1767 crp); 1768 1769 crwr->sec_cpl.op_ivinsrtofst = htobe32( 1770 V_CPL_TX_SEC_PDU_OPCODE(CPL_TX_SEC_PDU) | 1771 V_CPL_TX_SEC_PDU_RXCHID(s->port->tx_channel_id) | 1772 V_CPL_TX_SEC_PDU_ACKFOLLOWS(0) | V_CPL_TX_SEC_PDU_ULPTXLPBK(1) | 1773 V_CPL_TX_SEC_PDU_CPLLEN(2) | V_CPL_TX_SEC_PDU_PLACEHOLDER(0) | 1774 V_CPL_TX_SEC_PDU_IVINSRTOFST(1)); 1775 1776 crwr->sec_cpl.pldlen = htobe32(iv_len + input_len); 1777 1778 /* 1779 * NB: cipherstop is explicitly set to 0. See comments above 1780 * in ccr_gcm(). 1781 */ 1782 crwr->sec_cpl.aadstart_cipherstop_hi = htobe32( 1783 V_CPL_TX_SEC_PDU_AADSTART(aad_start) | 1784 V_CPL_TX_SEC_PDU_AADSTOP(aad_stop) | 1785 V_CPL_TX_SEC_PDU_CIPHERSTART(cipher_start) | 1786 V_CPL_TX_SEC_PDU_CIPHERSTOP_HI(0)); 1787 crwr->sec_cpl.cipherstop_lo_authinsert = htobe32( 1788 V_CPL_TX_SEC_PDU_CIPHERSTOP_LO(0) | 1789 V_CPL_TX_SEC_PDU_AUTHSTART(cipher_start) | 1790 V_CPL_TX_SEC_PDU_AUTHSTOP(cipher_stop) | 1791 V_CPL_TX_SEC_PDU_AUTHINSERT(auth_insert)); 1792 1793 /* These two flits are actually a CPL_TLS_TX_SCMD_FMT. */ 1794 hmac_ctrl = ccr_hmac_ctrl(AES_CBC_MAC_HASH_LEN, hash_size_in_response); 1795 crwr->sec_cpl.seqno_numivs = htobe32( 1796 V_SCMD_SEQ_NO_CTRL(0) | 1797 V_SCMD_PROTO_VERSION(SCMD_PROTO_VERSION_GENERIC) | 1798 V_SCMD_ENC_DEC_CTRL(op_type) | 1799 V_SCMD_CIPH_AUTH_SEQ_CTRL(op_type == CHCR_ENCRYPT_OP ? 0 : 1) | 1800 V_SCMD_CIPH_MODE(SCMD_CIPH_MODE_AES_CCM) | 1801 V_SCMD_AUTH_MODE(SCMD_AUTH_MODE_CBCMAC) | 1802 V_SCMD_HMAC_CTRL(hmac_ctrl) | 1803 V_SCMD_IV_SIZE(iv_len / 2) | 1804 V_SCMD_NUM_IVS(0)); 1805 crwr->sec_cpl.ivgen_hdrlen = htobe32( 1806 V_SCMD_IV_GEN_CTRL(0) | 1807 V_SCMD_MORE_FRAGS(0) | V_SCMD_LAST_FRAG(0) | V_SCMD_MAC_ONLY(0) | 1808 V_SCMD_AADIVDROP(0) | V_SCMD_HDR_LEN(dsgl_len)); 1809 1810 crwr->key_ctx.ctx_hdr = s->blkcipher.key_ctx_hdr; 1811 memcpy(crwr->key_ctx.key, s->blkcipher.enckey, s->blkcipher.key_len); 1812 memcpy(crwr->key_ctx.key + roundup(s->blkcipher.key_len, 16), 1813 s->blkcipher.enckey, s->blkcipher.key_len); 1814 1815 dst = (char *)(crwr + 1) + kctx_len; 1816 ccr_write_phys_dsgl(s, dst, dsgl_nsegs); 1817 dst += sizeof(struct cpl_rx_phys_dsgl) + dsgl_len; 1818 memcpy(dst, iv, iv_len); 1819 dst += iv_len; 1820 generate_ccm_b0(crp, hash_size_in_response, iv, dst); 1821 if (sgl_nsegs == 0) { 1822 dst += b0_len; 1823 if (crp->crp_aad_length != 0) { 1824 if (crp->crp_aad != NULL) 1825 memcpy(dst, crp->crp_aad, crp->crp_aad_length); 1826 else 1827 crypto_copydata(crp, crp->crp_aad_start, 1828 crp->crp_aad_length, dst); 1829 dst += crp->crp_aad_length; 1830 } 1831 crypto_copydata(crp, crp->crp_payload_start, 1832 crp->crp_payload_length, dst); 1833 dst += crp->crp_payload_length; 1834 if (op_type == CHCR_DECRYPT_OP) 1835 crypto_copydata(crp, crp->crp_digest_start, 1836 hash_size_in_response, dst); 1837 } else { 1838 dst += CCM_B0_SIZE; 1839 if (b0_len > CCM_B0_SIZE) { 1840 /* 1841 * If there is AAD, insert padding including a 1842 * ULP_TX_SC_NOOP so that the ULP_TX_SC_DSGL 1843 * is 16-byte aligned. 1844 */ 1845 KASSERT(b0_len - CCM_B0_SIZE == CCM_AAD_FIELD_SIZE, 1846 ("b0_len mismatch")); 1847 memset(dst + CCM_AAD_FIELD_SIZE, 0, 1848 8 - CCM_AAD_FIELD_SIZE); 1849 idata = (void *)(dst + 8); 1850 idata->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_NOOP)); 1851 idata->len = htobe32(0); 1852 dst = (void *)(idata + 1); 1853 } 1854 ccr_write_ulptx_sgl(s, dst, sgl_nsegs); 1855 } 1856 1857 /* XXX: TODO backpressure */ 1858 t4_wrq_tx(sc->adapter, wr); 1859 1860 explicit_bzero(iv, sizeof(iv)); 1861 return (0); 1862 } 1863 1864 static int 1865 ccr_ccm_done(struct ccr_softc *sc, struct ccr_session *s, 1866 struct cryptop *crp, const struct cpl_fw6_pld *cpl, int error) 1867 { 1868 1869 /* 1870 * The updated IV to permit chained requests is at 1871 * cpl->data[2], but OCF doesn't permit chained requests. 1872 * 1873 * Note that the hardware should always verify the CBC MAC 1874 * hash. 1875 */ 1876 return (error); 1877 } 1878 1879 /* 1880 * Handle a CCM request that is not supported by the crypto engine by 1881 * performing the operation in software. Derived from swcr_authenc(). 1882 */ 1883 static void 1884 ccr_ccm_soft(struct ccr_session *s, struct cryptop *crp) 1885 { 1886 struct auth_hash *axf; 1887 struct enc_xform *exf; 1888 union authctx *auth_ctx; 1889 void *kschedule; 1890 char block[CCM_CBC_BLOCK_LEN]; 1891 char digest[AES_CBC_MAC_HASH_LEN]; 1892 char iv[AES_CCM_IV_LEN]; 1893 int error, i, len; 1894 1895 auth_ctx = NULL; 1896 kschedule = NULL; 1897 1898 /* Initialize the MAC. */ 1899 switch (s->blkcipher.key_len) { 1900 case 16: 1901 axf = &auth_hash_ccm_cbc_mac_128; 1902 break; 1903 case 24: 1904 axf = &auth_hash_ccm_cbc_mac_192; 1905 break; 1906 case 32: 1907 axf = &auth_hash_ccm_cbc_mac_256; 1908 break; 1909 default: 1910 error = EINVAL; 1911 goto out; 1912 } 1913 auth_ctx = malloc(axf->ctxsize, M_CCR, M_NOWAIT); 1914 if (auth_ctx == NULL) { 1915 error = ENOMEM; 1916 goto out; 1917 } 1918 axf->Init(auth_ctx); 1919 axf->Setkey(auth_ctx, s->blkcipher.enckey, s->blkcipher.key_len); 1920 1921 /* Initialize the cipher. */ 1922 exf = &enc_xform_ccm; 1923 kschedule = malloc(exf->ctxsize, M_CCR, M_NOWAIT); 1924 if (kschedule == NULL) { 1925 error = ENOMEM; 1926 goto out; 1927 } 1928 error = exf->setkey(kschedule, s->blkcipher.enckey, 1929 s->blkcipher.key_len); 1930 if (error) 1931 goto out; 1932 1933 if ((crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0) { 1934 error = EINVAL; 1935 goto out; 1936 } 1937 memcpy(iv, crp->crp_iv, AES_CCM_IV_LEN); 1938 1939 auth_ctx->aes_cbc_mac_ctx.authDataLength = crp->crp_aad_length; 1940 auth_ctx->aes_cbc_mac_ctx.cryptDataLength = crp->crp_payload_length; 1941 axf->Reinit(auth_ctx, iv, sizeof(iv)); 1942 1943 /* MAC the AAD. */ 1944 if (crp->crp_aad != NULL) 1945 error = axf->Update(auth_ctx, crp->crp_aad, 1946 crp->crp_aad_length); 1947 else 1948 error = crypto_apply(crp, crp->crp_aad_start, 1949 crp->crp_aad_length, axf->Update, auth_ctx); 1950 if (error) 1951 goto out; 1952 1953 exf->reinit(kschedule, iv); 1954 1955 /* Do encryption/decryption with MAC */ 1956 for (i = 0; i < crp->crp_payload_length; i += sizeof(block)) { 1957 len = imin(crp->crp_payload_length - i, sizeof(block)); 1958 crypto_copydata(crp, crp->crp_payload_start + i, len, block); 1959 bzero(block + len, sizeof(block) - len); 1960 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) { 1961 axf->Update(auth_ctx, block, len); 1962 exf->encrypt(kschedule, block, block); 1963 crypto_copyback(crp, crp->crp_payload_start + i, len, 1964 block); 1965 } else { 1966 exf->decrypt(kschedule, block, block); 1967 axf->Update(auth_ctx, block, len); 1968 } 1969 } 1970 1971 /* Finalize MAC. */ 1972 axf->Final(digest, auth_ctx); 1973 1974 /* Inject or validate tag. */ 1975 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) { 1976 crypto_copyback(crp, crp->crp_digest_start, sizeof(digest), 1977 digest); 1978 error = 0; 1979 } else { 1980 char digest2[AES_CBC_MAC_HASH_LEN]; 1981 1982 crypto_copydata(crp, crp->crp_digest_start, sizeof(digest2), 1983 digest2); 1984 if (timingsafe_bcmp(digest, digest2, sizeof(digest)) == 0) { 1985 error = 0; 1986 1987 /* Tag matches, decrypt data. */ 1988 exf->reinit(kschedule, iv); 1989 for (i = 0; i < crp->crp_payload_length; 1990 i += sizeof(block)) { 1991 len = imin(crp->crp_payload_length - i, 1992 sizeof(block)); 1993 crypto_copydata(crp, crp->crp_payload_start + i, 1994 len, block); 1995 bzero(block + len, sizeof(block) - len); 1996 exf->decrypt(kschedule, block, block); 1997 crypto_copyback(crp, crp->crp_payload_start + i, 1998 len, block); 1999 } 2000 } else 2001 error = EBADMSG; 2002 explicit_bzero(digest2, sizeof(digest2)); 2003 } 2004 2005 out: 2006 zfree(kschedule, M_CCR); 2007 zfree(auth_ctx, M_CCR); 2008 explicit_bzero(block, sizeof(block)); 2009 explicit_bzero(iv, sizeof(iv)); 2010 explicit_bzero(digest, sizeof(digest)); 2011 crp->crp_etype = error; 2012 crypto_done(crp); 2013 } 2014 2015 static void 2016 ccr_identify(driver_t *driver, device_t parent) 2017 { 2018 struct adapter *sc; 2019 2020 sc = device_get_softc(parent); 2021 if (sc->cryptocaps & FW_CAPS_CONFIG_CRYPTO_LOOKASIDE && 2022 device_find_child(parent, "ccr", -1) == NULL) 2023 device_add_child(parent, "ccr", -1); 2024 } 2025 2026 static int 2027 ccr_probe(device_t dev) 2028 { 2029 2030 device_set_desc(dev, "Chelsio Crypto Accelerator"); 2031 return (BUS_PROBE_DEFAULT); 2032 } 2033 2034 static void 2035 ccr_sysctls(struct ccr_softc *sc) 2036 { 2037 struct sysctl_ctx_list *ctx; 2038 struct sysctl_oid *oid, *port_oid; 2039 struct sysctl_oid_list *children; 2040 char buf[16]; 2041 int i; 2042 2043 ctx = device_get_sysctl_ctx(sc->dev); 2044 2045 /* 2046 * dev.ccr.X. 2047 */ 2048 oid = device_get_sysctl_tree(sc->dev); 2049 children = SYSCTL_CHILDREN(oid); 2050 2051 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "port_mask", CTLFLAG_RW, 2052 &sc->port_mask, 0, "Mask of enabled ports"); 2053 2054 /* 2055 * dev.ccr.X.stats. 2056 */ 2057 oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats", 2058 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "statistics"); 2059 children = SYSCTL_CHILDREN(oid); 2060 2061 SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "hash", CTLFLAG_RD, 2062 &sc->stats_hash, "Hash requests submitted"); 2063 SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "hmac", CTLFLAG_RD, 2064 &sc->stats_hmac, "HMAC requests submitted"); 2065 SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "cipher_encrypt", 2066 CTLFLAG_RD, &sc->stats_blkcipher_encrypt, 2067 "Cipher encryption requests submitted"); 2068 SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "cipher_decrypt", 2069 CTLFLAG_RD, &sc->stats_blkcipher_decrypt, 2070 "Cipher decryption requests submitted"); 2071 SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "eta_encrypt", 2072 CTLFLAG_RD, &sc->stats_eta_encrypt, 2073 "Combined AES+HMAC encryption requests submitted"); 2074 SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "eta_decrypt", 2075 CTLFLAG_RD, &sc->stats_eta_decrypt, 2076 "Combined AES+HMAC decryption requests submitted"); 2077 SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "gcm_encrypt", 2078 CTLFLAG_RD, &sc->stats_gcm_encrypt, 2079 "AES-GCM encryption requests submitted"); 2080 SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "gcm_decrypt", 2081 CTLFLAG_RD, &sc->stats_gcm_decrypt, 2082 "AES-GCM decryption requests submitted"); 2083 SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "ccm_encrypt", 2084 CTLFLAG_RD, &sc->stats_ccm_encrypt, 2085 "AES-CCM encryption requests submitted"); 2086 SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "ccm_decrypt", 2087 CTLFLAG_RD, &sc->stats_ccm_decrypt, 2088 "AES-CCM decryption requests submitted"); 2089 SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "wr_nomem", CTLFLAG_RD, 2090 &sc->stats_wr_nomem, "Work request memory allocation failures"); 2091 SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "inflight", CTLFLAG_RD, 2092 &sc->stats_inflight, "Requests currently pending"); 2093 SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "mac_error", CTLFLAG_RD, 2094 &sc->stats_mac_error, "MAC errors"); 2095 SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "pad_error", CTLFLAG_RD, 2096 &sc->stats_pad_error, "Padding errors"); 2097 SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "sglist_error", 2098 CTLFLAG_RD, &sc->stats_sglist_error, 2099 "Requests for which DMA mapping failed"); 2100 SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "process_error", 2101 CTLFLAG_RD, &sc->stats_process_error, 2102 "Requests failed during queueing"); 2103 SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "sw_fallback", 2104 CTLFLAG_RD, &sc->stats_sw_fallback, 2105 "Requests processed by falling back to software"); 2106 2107 /* 2108 * dev.ccr.X.stats.port 2109 */ 2110 port_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "port", 2111 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Per-port statistics"); 2112 2113 for (i = 0; i < nitems(sc->ports); i++) { 2114 if (sc->ports[i].rxq == NULL) 2115 continue; 2116 2117 /* 2118 * dev.ccr.X.stats.port.Y 2119 */ 2120 snprintf(buf, sizeof(buf), "%d", i); 2121 oid = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(port_oid), OID_AUTO, 2122 buf, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, buf); 2123 children = SYSCTL_CHILDREN(oid); 2124 2125 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "active_sessions", 2126 CTLFLAG_RD, &sc->ports[i].active_sessions, 0, 2127 "Count of active sessions"); 2128 } 2129 } 2130 2131 static void 2132 ccr_init_port(struct ccr_softc *sc, int port) 2133 { 2134 2135 sc->ports[port].txq = &sc->adapter->sge.ctrlq[port]; 2136 sc->ports[port].rxq = 2137 &sc->adapter->sge.rxq[sc->adapter->port[port]->vi->first_rxq]; 2138 sc->ports[port].tx_channel_id = port; 2139 _Static_assert(sizeof(sc->port_mask) * NBBY >= MAX_NPORTS - 1, 2140 "Too many ports to fit in port_mask"); 2141 sc->port_mask |= 1u << port; 2142 } 2143 2144 static int 2145 ccr_attach(device_t dev) 2146 { 2147 struct ccr_softc *sc; 2148 int32_t cid; 2149 int i; 2150 2151 sc = device_get_softc(dev); 2152 sc->dev = dev; 2153 sc->adapter = device_get_softc(device_get_parent(dev)); 2154 for_each_port(sc->adapter, i) { 2155 ccr_init_port(sc, i); 2156 } 2157 cid = crypto_get_driverid(dev, sizeof(struct ccr_session), 2158 CRYPTOCAP_F_HARDWARE); 2159 if (cid < 0) { 2160 device_printf(dev, "could not get crypto driver id\n"); 2161 return (ENXIO); 2162 } 2163 sc->cid = cid; 2164 sc->adapter->ccr_softc = sc; 2165 2166 mtx_init(&sc->lock, "ccr", NULL, MTX_DEF); 2167 sc->iv_aad_buf = malloc(MAX_AAD_LEN, M_CCR, M_WAITOK); 2168 sc->sg_iv_aad = sglist_build(sc->iv_aad_buf, MAX_AAD_LEN, M_WAITOK); 2169 sc->stats_blkcipher_encrypt = counter_u64_alloc(M_WAITOK); 2170 sc->stats_blkcipher_decrypt = counter_u64_alloc(M_WAITOK); 2171 sc->stats_hash = counter_u64_alloc(M_WAITOK); 2172 sc->stats_hmac = counter_u64_alloc(M_WAITOK); 2173 sc->stats_eta_encrypt = counter_u64_alloc(M_WAITOK); 2174 sc->stats_eta_decrypt = counter_u64_alloc(M_WAITOK); 2175 sc->stats_gcm_encrypt = counter_u64_alloc(M_WAITOK); 2176 sc->stats_gcm_decrypt = counter_u64_alloc(M_WAITOK); 2177 sc->stats_ccm_encrypt = counter_u64_alloc(M_WAITOK); 2178 sc->stats_ccm_decrypt = counter_u64_alloc(M_WAITOK); 2179 sc->stats_wr_nomem = counter_u64_alloc(M_WAITOK); 2180 sc->stats_inflight = counter_u64_alloc(M_WAITOK); 2181 sc->stats_mac_error = counter_u64_alloc(M_WAITOK); 2182 sc->stats_pad_error = counter_u64_alloc(M_WAITOK); 2183 sc->stats_sglist_error = counter_u64_alloc(M_WAITOK); 2184 sc->stats_process_error = counter_u64_alloc(M_WAITOK); 2185 sc->stats_sw_fallback = counter_u64_alloc(M_WAITOK); 2186 ccr_sysctls(sc); 2187 2188 return (0); 2189 } 2190 2191 static int 2192 ccr_detach(device_t dev) 2193 { 2194 struct ccr_softc *sc; 2195 2196 sc = device_get_softc(dev); 2197 2198 mtx_lock(&sc->lock); 2199 sc->detaching = true; 2200 mtx_unlock(&sc->lock); 2201 2202 crypto_unregister_all(sc->cid); 2203 2204 mtx_destroy(&sc->lock); 2205 counter_u64_free(sc->stats_blkcipher_encrypt); 2206 counter_u64_free(sc->stats_blkcipher_decrypt); 2207 counter_u64_free(sc->stats_hash); 2208 counter_u64_free(sc->stats_hmac); 2209 counter_u64_free(sc->stats_eta_encrypt); 2210 counter_u64_free(sc->stats_eta_decrypt); 2211 counter_u64_free(sc->stats_gcm_encrypt); 2212 counter_u64_free(sc->stats_gcm_decrypt); 2213 counter_u64_free(sc->stats_ccm_encrypt); 2214 counter_u64_free(sc->stats_ccm_decrypt); 2215 counter_u64_free(sc->stats_wr_nomem); 2216 counter_u64_free(sc->stats_inflight); 2217 counter_u64_free(sc->stats_mac_error); 2218 counter_u64_free(sc->stats_pad_error); 2219 counter_u64_free(sc->stats_sglist_error); 2220 counter_u64_free(sc->stats_process_error); 2221 counter_u64_free(sc->stats_sw_fallback); 2222 sglist_free(sc->sg_iv_aad); 2223 free(sc->iv_aad_buf, M_CCR); 2224 sc->adapter->ccr_softc = NULL; 2225 return (0); 2226 } 2227 2228 static void 2229 ccr_init_hash_digest(struct ccr_session *s) 2230 { 2231 union authctx auth_ctx; 2232 struct auth_hash *axf; 2233 2234 axf = s->hmac.auth_hash; 2235 axf->Init(&auth_ctx); 2236 t4_copy_partial_hash(axf->type, &auth_ctx, s->hmac.pads); 2237 } 2238 2239 static bool 2240 ccr_aes_check_keylen(int alg, int klen) 2241 { 2242 2243 switch (klen * 8) { 2244 case 128: 2245 case 192: 2246 if (alg == CRYPTO_AES_XTS) 2247 return (false); 2248 break; 2249 case 256: 2250 break; 2251 case 512: 2252 if (alg != CRYPTO_AES_XTS) 2253 return (false); 2254 break; 2255 default: 2256 return (false); 2257 } 2258 return (true); 2259 } 2260 2261 static void 2262 ccr_aes_setkey(struct ccr_session *s, const void *key, int klen) 2263 { 2264 unsigned int ck_size, iopad_size, kctx_flits, kctx_len, kbits, mk_size; 2265 unsigned int opad_present; 2266 2267 if (s->blkcipher.cipher_mode == SCMD_CIPH_MODE_AES_XTS) 2268 kbits = (klen / 2) * 8; 2269 else 2270 kbits = klen * 8; 2271 switch (kbits) { 2272 case 128: 2273 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128; 2274 break; 2275 case 192: 2276 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192; 2277 break; 2278 case 256: 2279 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256; 2280 break; 2281 default: 2282 panic("should not get here"); 2283 } 2284 2285 s->blkcipher.key_len = klen; 2286 memcpy(s->blkcipher.enckey, key, s->blkcipher.key_len); 2287 switch (s->blkcipher.cipher_mode) { 2288 case SCMD_CIPH_MODE_AES_CBC: 2289 case SCMD_CIPH_MODE_AES_XTS: 2290 t4_aes_getdeckey(s->blkcipher.deckey, key, kbits); 2291 break; 2292 } 2293 2294 kctx_len = roundup2(s->blkcipher.key_len, 16); 2295 switch (s->mode) { 2296 case ETA: 2297 mk_size = s->hmac.mk_size; 2298 opad_present = 1; 2299 iopad_size = roundup2(s->hmac.partial_digest_len, 16); 2300 kctx_len += iopad_size * 2; 2301 break; 2302 case GCM: 2303 mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_128; 2304 opad_present = 0; 2305 kctx_len += GMAC_BLOCK_LEN; 2306 break; 2307 case CCM: 2308 switch (kbits) { 2309 case 128: 2310 mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_128; 2311 break; 2312 case 192: 2313 mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_192; 2314 break; 2315 case 256: 2316 mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256; 2317 break; 2318 default: 2319 panic("should not get here"); 2320 } 2321 opad_present = 0; 2322 kctx_len *= 2; 2323 break; 2324 default: 2325 mk_size = CHCR_KEYCTX_NO_KEY; 2326 opad_present = 0; 2327 break; 2328 } 2329 kctx_flits = (sizeof(struct _key_ctx) + kctx_len) / 16; 2330 s->blkcipher.key_ctx_hdr = htobe32(V_KEY_CONTEXT_CTX_LEN(kctx_flits) | 2331 V_KEY_CONTEXT_DUAL_CK(s->blkcipher.cipher_mode == 2332 SCMD_CIPH_MODE_AES_XTS) | 2333 V_KEY_CONTEXT_OPAD_PRESENT(opad_present) | 2334 V_KEY_CONTEXT_SALT_PRESENT(1) | V_KEY_CONTEXT_CK_SIZE(ck_size) | 2335 V_KEY_CONTEXT_MK_SIZE(mk_size) | V_KEY_CONTEXT_VALID(1)); 2336 } 2337 2338 static bool 2339 ccr_auth_supported(const struct crypto_session_params *csp) 2340 { 2341 2342 switch (csp->csp_auth_alg) { 2343 case CRYPTO_SHA1: 2344 case CRYPTO_SHA2_224: 2345 case CRYPTO_SHA2_256: 2346 case CRYPTO_SHA2_384: 2347 case CRYPTO_SHA2_512: 2348 case CRYPTO_SHA1_HMAC: 2349 case CRYPTO_SHA2_224_HMAC: 2350 case CRYPTO_SHA2_256_HMAC: 2351 case CRYPTO_SHA2_384_HMAC: 2352 case CRYPTO_SHA2_512_HMAC: 2353 break; 2354 default: 2355 return (false); 2356 } 2357 return (true); 2358 } 2359 2360 static bool 2361 ccr_cipher_supported(const struct crypto_session_params *csp) 2362 { 2363 2364 switch (csp->csp_cipher_alg) { 2365 case CRYPTO_AES_CBC: 2366 if (csp->csp_ivlen != AES_BLOCK_LEN) 2367 return (false); 2368 break; 2369 case CRYPTO_AES_ICM: 2370 if (csp->csp_ivlen != AES_BLOCK_LEN) 2371 return (false); 2372 break; 2373 case CRYPTO_AES_XTS: 2374 if (csp->csp_ivlen != AES_XTS_IV_LEN) 2375 return (false); 2376 break; 2377 default: 2378 return (false); 2379 } 2380 return (ccr_aes_check_keylen(csp->csp_cipher_alg, 2381 csp->csp_cipher_klen)); 2382 } 2383 2384 static int 2385 ccr_cipher_mode(const struct crypto_session_params *csp) 2386 { 2387 2388 switch (csp->csp_cipher_alg) { 2389 case CRYPTO_AES_CBC: 2390 return (SCMD_CIPH_MODE_AES_CBC); 2391 case CRYPTO_AES_ICM: 2392 return (SCMD_CIPH_MODE_AES_CTR); 2393 case CRYPTO_AES_NIST_GCM_16: 2394 return (SCMD_CIPH_MODE_AES_GCM); 2395 case CRYPTO_AES_XTS: 2396 return (SCMD_CIPH_MODE_AES_XTS); 2397 case CRYPTO_AES_CCM_16: 2398 return (SCMD_CIPH_MODE_AES_CCM); 2399 default: 2400 return (SCMD_CIPH_MODE_NOP); 2401 } 2402 } 2403 2404 static int 2405 ccr_probesession(device_t dev, const struct crypto_session_params *csp) 2406 { 2407 unsigned int cipher_mode; 2408 2409 if ((csp->csp_flags & ~(CSP_F_SEPARATE_OUTPUT | CSP_F_SEPARATE_AAD)) != 2410 0) 2411 return (EINVAL); 2412 switch (csp->csp_mode) { 2413 case CSP_MODE_DIGEST: 2414 if (!ccr_auth_supported(csp)) 2415 return (EINVAL); 2416 break; 2417 case CSP_MODE_CIPHER: 2418 if (!ccr_cipher_supported(csp)) 2419 return (EINVAL); 2420 break; 2421 case CSP_MODE_AEAD: 2422 switch (csp->csp_cipher_alg) { 2423 case CRYPTO_AES_NIST_GCM_16: 2424 if (csp->csp_ivlen != AES_GCM_IV_LEN) 2425 return (EINVAL); 2426 if (csp->csp_auth_mlen < 0 || 2427 csp->csp_auth_mlen > AES_GMAC_HASH_LEN) 2428 return (EINVAL); 2429 break; 2430 case CRYPTO_AES_CCM_16: 2431 if (csp->csp_ivlen != AES_CCM_IV_LEN) 2432 return (EINVAL); 2433 if (csp->csp_auth_mlen < 0 || 2434 csp->csp_auth_mlen > AES_CBC_MAC_HASH_LEN) 2435 return (EINVAL); 2436 break; 2437 default: 2438 return (EINVAL); 2439 } 2440 break; 2441 case CSP_MODE_ETA: 2442 if (!ccr_auth_supported(csp) || !ccr_cipher_supported(csp)) 2443 return (EINVAL); 2444 break; 2445 default: 2446 return (EINVAL); 2447 } 2448 2449 if (csp->csp_cipher_klen != 0) { 2450 cipher_mode = ccr_cipher_mode(csp); 2451 if (cipher_mode == SCMD_CIPH_MODE_NOP) 2452 return (EINVAL); 2453 } 2454 2455 return (CRYPTODEV_PROBE_HARDWARE); 2456 } 2457 2458 /* 2459 * Select an available port with the lowest number of active sessions. 2460 */ 2461 static struct ccr_port * 2462 ccr_choose_port(struct ccr_softc *sc) 2463 { 2464 struct ccr_port *best, *p; 2465 int i; 2466 2467 mtx_assert(&sc->lock, MA_OWNED); 2468 best = NULL; 2469 for (i = 0; i < nitems(sc->ports); i++) { 2470 p = &sc->ports[i]; 2471 2472 /* Ignore non-existent ports. */ 2473 if (p->rxq == NULL) 2474 continue; 2475 2476 /* 2477 * XXX: Ignore ports whose queues aren't initialized. 2478 * This is racy as the rxq can be destroyed by the 2479 * associated VI detaching. Eventually ccr should use 2480 * dedicated queues. 2481 */ 2482 if (p->rxq->iq.adapter == NULL || p->txq->adapter == NULL) 2483 continue; 2484 2485 if ((sc->port_mask & (1u << i)) == 0) 2486 continue; 2487 2488 if (best == NULL || 2489 p->active_sessions < best->active_sessions) 2490 best = p; 2491 } 2492 return (best); 2493 } 2494 2495 static void 2496 ccr_delete_session(struct ccr_session *s) 2497 { 2498 sglist_free(s->sg_input); 2499 sglist_free(s->sg_output); 2500 sglist_free(s->sg_ulptx); 2501 sglist_free(s->sg_dsgl); 2502 mtx_destroy(&s->lock); 2503 } 2504 2505 static int 2506 ccr_newsession(device_t dev, crypto_session_t cses, 2507 const struct crypto_session_params *csp) 2508 { 2509 struct ccr_softc *sc; 2510 struct ccr_session *s; 2511 struct auth_hash *auth_hash; 2512 unsigned int auth_mode, cipher_mode, mk_size; 2513 unsigned int partial_digest_len; 2514 2515 switch (csp->csp_auth_alg) { 2516 case CRYPTO_SHA1: 2517 case CRYPTO_SHA1_HMAC: 2518 auth_hash = &auth_hash_hmac_sha1; 2519 auth_mode = SCMD_AUTH_MODE_SHA1; 2520 mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_160; 2521 partial_digest_len = SHA1_HASH_LEN; 2522 break; 2523 case CRYPTO_SHA2_224: 2524 case CRYPTO_SHA2_224_HMAC: 2525 auth_hash = &auth_hash_hmac_sha2_224; 2526 auth_mode = SCMD_AUTH_MODE_SHA224; 2527 mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256; 2528 partial_digest_len = SHA2_256_HASH_LEN; 2529 break; 2530 case CRYPTO_SHA2_256: 2531 case CRYPTO_SHA2_256_HMAC: 2532 auth_hash = &auth_hash_hmac_sha2_256; 2533 auth_mode = SCMD_AUTH_MODE_SHA256; 2534 mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256; 2535 partial_digest_len = SHA2_256_HASH_LEN; 2536 break; 2537 case CRYPTO_SHA2_384: 2538 case CRYPTO_SHA2_384_HMAC: 2539 auth_hash = &auth_hash_hmac_sha2_384; 2540 auth_mode = SCMD_AUTH_MODE_SHA512_384; 2541 mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512; 2542 partial_digest_len = SHA2_512_HASH_LEN; 2543 break; 2544 case CRYPTO_SHA2_512: 2545 case CRYPTO_SHA2_512_HMAC: 2546 auth_hash = &auth_hash_hmac_sha2_512; 2547 auth_mode = SCMD_AUTH_MODE_SHA512_512; 2548 mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512; 2549 partial_digest_len = SHA2_512_HASH_LEN; 2550 break; 2551 default: 2552 auth_hash = NULL; 2553 auth_mode = SCMD_AUTH_MODE_NOP; 2554 mk_size = 0; 2555 partial_digest_len = 0; 2556 break; 2557 } 2558 2559 cipher_mode = ccr_cipher_mode(csp); 2560 2561 #ifdef INVARIANTS 2562 switch (csp->csp_mode) { 2563 case CSP_MODE_CIPHER: 2564 if (cipher_mode == SCMD_CIPH_MODE_NOP || 2565 cipher_mode == SCMD_CIPH_MODE_AES_GCM || 2566 cipher_mode == SCMD_CIPH_MODE_AES_CCM) 2567 panic("invalid cipher algo"); 2568 break; 2569 case CSP_MODE_DIGEST: 2570 if (auth_mode == SCMD_AUTH_MODE_NOP) 2571 panic("invalid auth algo"); 2572 break; 2573 case CSP_MODE_AEAD: 2574 if (cipher_mode != SCMD_CIPH_MODE_AES_GCM && 2575 cipher_mode != SCMD_CIPH_MODE_AES_CCM) 2576 panic("invalid aead cipher algo"); 2577 if (auth_mode != SCMD_AUTH_MODE_NOP) 2578 panic("invalid aead auth aglo"); 2579 break; 2580 case CSP_MODE_ETA: 2581 if (cipher_mode == SCMD_CIPH_MODE_NOP || 2582 cipher_mode == SCMD_CIPH_MODE_AES_GCM || 2583 cipher_mode == SCMD_CIPH_MODE_AES_CCM) 2584 panic("invalid cipher algo"); 2585 if (auth_mode == SCMD_AUTH_MODE_NOP) 2586 panic("invalid auth algo"); 2587 break; 2588 default: 2589 panic("invalid csp mode"); 2590 } 2591 #endif 2592 2593 s = crypto_get_driver_session(cses); 2594 mtx_init(&s->lock, "ccr session", NULL, MTX_DEF); 2595 s->sg_input = sglist_alloc(TX_SGL_SEGS, M_NOWAIT); 2596 s->sg_output = sglist_alloc(TX_SGL_SEGS, M_NOWAIT); 2597 s->sg_ulptx = sglist_alloc(TX_SGL_SEGS, M_NOWAIT); 2598 s->sg_dsgl = sglist_alloc(MAX_RX_PHYS_DSGL_SGE, M_NOWAIT); 2599 if (s->sg_input == NULL || s->sg_output == NULL || 2600 s->sg_ulptx == NULL || s->sg_dsgl == NULL) { 2601 ccr_delete_session(s); 2602 return (ENOMEM); 2603 } 2604 2605 sc = device_get_softc(dev); 2606 2607 mtx_lock(&sc->lock); 2608 if (sc->detaching) { 2609 mtx_unlock(&sc->lock); 2610 ccr_delete_session(s); 2611 return (ENXIO); 2612 } 2613 2614 s->port = ccr_choose_port(sc); 2615 if (s->port == NULL) { 2616 mtx_unlock(&sc->lock); 2617 ccr_delete_session(s); 2618 return (ENXIO); 2619 } 2620 2621 switch (csp->csp_mode) { 2622 case CSP_MODE_AEAD: 2623 if (cipher_mode == SCMD_CIPH_MODE_AES_CCM) 2624 s->mode = CCM; 2625 else 2626 s->mode = GCM; 2627 break; 2628 case CSP_MODE_ETA: 2629 s->mode = ETA; 2630 break; 2631 case CSP_MODE_DIGEST: 2632 if (csp->csp_auth_klen != 0) 2633 s->mode = HMAC; 2634 else 2635 s->mode = HASH; 2636 break; 2637 case CSP_MODE_CIPHER: 2638 s->mode = BLKCIPHER; 2639 break; 2640 } 2641 2642 if (s->mode == GCM) { 2643 if (csp->csp_auth_mlen == 0) 2644 s->gmac.hash_len = AES_GMAC_HASH_LEN; 2645 else 2646 s->gmac.hash_len = csp->csp_auth_mlen; 2647 t4_init_gmac_hash(csp->csp_cipher_key, csp->csp_cipher_klen, 2648 s->gmac.ghash_h); 2649 } else if (s->mode == CCM) { 2650 if (csp->csp_auth_mlen == 0) 2651 s->ccm_mac.hash_len = AES_CBC_MAC_HASH_LEN; 2652 else 2653 s->ccm_mac.hash_len = csp->csp_auth_mlen; 2654 } else if (auth_mode != SCMD_AUTH_MODE_NOP) { 2655 s->hmac.auth_hash = auth_hash; 2656 s->hmac.auth_mode = auth_mode; 2657 s->hmac.mk_size = mk_size; 2658 s->hmac.partial_digest_len = partial_digest_len; 2659 if (csp->csp_auth_mlen == 0) 2660 s->hmac.hash_len = auth_hash->hashsize; 2661 else 2662 s->hmac.hash_len = csp->csp_auth_mlen; 2663 if (csp->csp_auth_key != NULL) 2664 t4_init_hmac_digest(auth_hash, partial_digest_len, 2665 csp->csp_auth_key, csp->csp_auth_klen, 2666 s->hmac.pads); 2667 else 2668 ccr_init_hash_digest(s); 2669 } 2670 if (cipher_mode != SCMD_CIPH_MODE_NOP) { 2671 s->blkcipher.cipher_mode = cipher_mode; 2672 s->blkcipher.iv_len = csp->csp_ivlen; 2673 if (csp->csp_cipher_key != NULL) 2674 ccr_aes_setkey(s, csp->csp_cipher_key, 2675 csp->csp_cipher_klen); 2676 } 2677 2678 s->port->active_sessions++; 2679 mtx_unlock(&sc->lock); 2680 return (0); 2681 } 2682 2683 static void 2684 ccr_freesession(device_t dev, crypto_session_t cses) 2685 { 2686 struct ccr_softc *sc; 2687 struct ccr_session *s; 2688 2689 sc = device_get_softc(dev); 2690 s = crypto_get_driver_session(cses); 2691 #ifdef INVARIANTS 2692 if (s->pending != 0) 2693 device_printf(dev, 2694 "session %p freed with %d pending requests\n", s, 2695 s->pending); 2696 #endif 2697 mtx_lock(&sc->lock); 2698 s->port->active_sessions--; 2699 mtx_unlock(&sc->lock); 2700 ccr_delete_session(s); 2701 } 2702 2703 static int 2704 ccr_process(device_t dev, struct cryptop *crp, int hint) 2705 { 2706 const struct crypto_session_params *csp; 2707 struct ccr_softc *sc; 2708 struct ccr_session *s; 2709 int error; 2710 2711 csp = crypto_get_params(crp->crp_session); 2712 s = crypto_get_driver_session(crp->crp_session); 2713 sc = device_get_softc(dev); 2714 2715 mtx_lock(&s->lock); 2716 error = ccr_populate_sglist(s->sg_input, &crp->crp_buf); 2717 if (error == 0 && CRYPTO_HAS_OUTPUT_BUFFER(crp)) 2718 error = ccr_populate_sglist(s->sg_output, &crp->crp_obuf); 2719 if (error) { 2720 counter_u64_add(sc->stats_sglist_error, 1); 2721 goto out; 2722 } 2723 2724 switch (s->mode) { 2725 case HASH: 2726 error = ccr_hash(sc, s, crp); 2727 if (error == 0) 2728 counter_u64_add(sc->stats_hash, 1); 2729 break; 2730 case HMAC: 2731 if (crp->crp_auth_key != NULL) 2732 t4_init_hmac_digest(s->hmac.auth_hash, 2733 s->hmac.partial_digest_len, crp->crp_auth_key, 2734 csp->csp_auth_klen, s->hmac.pads); 2735 error = ccr_hash(sc, s, crp); 2736 if (error == 0) 2737 counter_u64_add(sc->stats_hmac, 1); 2738 break; 2739 case BLKCIPHER: 2740 if (crp->crp_cipher_key != NULL) 2741 ccr_aes_setkey(s, crp->crp_cipher_key, 2742 csp->csp_cipher_klen); 2743 error = ccr_blkcipher(sc, s, crp); 2744 if (error == 0) { 2745 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) 2746 counter_u64_add(sc->stats_blkcipher_encrypt, 1); 2747 else 2748 counter_u64_add(sc->stats_blkcipher_decrypt, 1); 2749 } 2750 break; 2751 case ETA: 2752 if (crp->crp_auth_key != NULL) 2753 t4_init_hmac_digest(s->hmac.auth_hash, 2754 s->hmac.partial_digest_len, crp->crp_auth_key, 2755 csp->csp_auth_klen, s->hmac.pads); 2756 if (crp->crp_cipher_key != NULL) 2757 ccr_aes_setkey(s, crp->crp_cipher_key, 2758 csp->csp_cipher_klen); 2759 error = ccr_eta(sc, s, crp); 2760 if (error == 0) { 2761 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) 2762 counter_u64_add(sc->stats_eta_encrypt, 1); 2763 else 2764 counter_u64_add(sc->stats_eta_decrypt, 1); 2765 } 2766 break; 2767 case GCM: 2768 if (crp->crp_cipher_key != NULL) { 2769 t4_init_gmac_hash(crp->crp_cipher_key, 2770 csp->csp_cipher_klen, s->gmac.ghash_h); 2771 ccr_aes_setkey(s, crp->crp_cipher_key, 2772 csp->csp_cipher_klen); 2773 } 2774 if (crp->crp_payload_length == 0) { 2775 mtx_unlock(&s->lock); 2776 ccr_gcm_soft(s, crp); 2777 return (0); 2778 } 2779 error = ccr_gcm(sc, s, crp); 2780 if (error == EMSGSIZE || error == EFBIG) { 2781 counter_u64_add(sc->stats_sw_fallback, 1); 2782 mtx_unlock(&s->lock); 2783 ccr_gcm_soft(s, crp); 2784 return (0); 2785 } 2786 if (error == 0) { 2787 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) 2788 counter_u64_add(sc->stats_gcm_encrypt, 1); 2789 else 2790 counter_u64_add(sc->stats_gcm_decrypt, 1); 2791 } 2792 break; 2793 case CCM: 2794 if (crp->crp_cipher_key != NULL) { 2795 ccr_aes_setkey(s, crp->crp_cipher_key, 2796 csp->csp_cipher_klen); 2797 } 2798 error = ccr_ccm(sc, s, crp); 2799 if (error == EMSGSIZE || error == EFBIG) { 2800 counter_u64_add(sc->stats_sw_fallback, 1); 2801 mtx_unlock(&s->lock); 2802 ccr_ccm_soft(s, crp); 2803 return (0); 2804 } 2805 if (error == 0) { 2806 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) 2807 counter_u64_add(sc->stats_ccm_encrypt, 1); 2808 else 2809 counter_u64_add(sc->stats_ccm_decrypt, 1); 2810 } 2811 break; 2812 } 2813 2814 if (error == 0) { 2815 #ifdef INVARIANTS 2816 s->pending++; 2817 #endif 2818 counter_u64_add(sc->stats_inflight, 1); 2819 } else 2820 counter_u64_add(sc->stats_process_error, 1); 2821 2822 out: 2823 mtx_unlock(&s->lock); 2824 2825 if (error) { 2826 crp->crp_etype = error; 2827 crypto_done(crp); 2828 } 2829 2830 return (0); 2831 } 2832 2833 static int 2834 do_cpl6_fw_pld(struct sge_iq *iq, const struct rss_header *rss, 2835 struct mbuf *m) 2836 { 2837 struct ccr_softc *sc = iq->adapter->ccr_softc; 2838 struct ccr_session *s; 2839 const struct cpl_fw6_pld *cpl; 2840 struct cryptop *crp; 2841 uint32_t status; 2842 int error; 2843 2844 if (m != NULL) 2845 cpl = mtod(m, const void *); 2846 else 2847 cpl = (const void *)(rss + 1); 2848 2849 crp = (struct cryptop *)(uintptr_t)be64toh(cpl->data[1]); 2850 s = crypto_get_driver_session(crp->crp_session); 2851 status = be64toh(cpl->data[0]); 2852 if (CHK_MAC_ERR_BIT(status) || CHK_PAD_ERR_BIT(status)) 2853 error = EBADMSG; 2854 else 2855 error = 0; 2856 2857 #ifdef INVARIANTS 2858 mtx_lock(&s->lock); 2859 s->pending--; 2860 mtx_unlock(&s->lock); 2861 #endif 2862 counter_u64_add(sc->stats_inflight, -1); 2863 2864 switch (s->mode) { 2865 case HASH: 2866 case HMAC: 2867 error = ccr_hash_done(sc, s, crp, cpl, error); 2868 break; 2869 case BLKCIPHER: 2870 error = ccr_blkcipher_done(sc, s, crp, cpl, error); 2871 break; 2872 case ETA: 2873 error = ccr_eta_done(sc, s, crp, cpl, error); 2874 break; 2875 case GCM: 2876 error = ccr_gcm_done(sc, s, crp, cpl, error); 2877 break; 2878 case CCM: 2879 error = ccr_ccm_done(sc, s, crp, cpl, error); 2880 break; 2881 } 2882 2883 if (error == EBADMSG) { 2884 if (CHK_MAC_ERR_BIT(status)) 2885 counter_u64_add(sc->stats_mac_error, 1); 2886 if (CHK_PAD_ERR_BIT(status)) 2887 counter_u64_add(sc->stats_pad_error, 1); 2888 } 2889 crp->crp_etype = error; 2890 crypto_done(crp); 2891 m_freem(m); 2892 return (0); 2893 } 2894 2895 static int 2896 ccr_modevent(module_t mod, int cmd, void *arg) 2897 { 2898 2899 switch (cmd) { 2900 case MOD_LOAD: 2901 t4_register_cpl_handler(CPL_FW6_PLD, do_cpl6_fw_pld); 2902 return (0); 2903 case MOD_UNLOAD: 2904 t4_register_cpl_handler(CPL_FW6_PLD, NULL); 2905 return (0); 2906 default: 2907 return (EOPNOTSUPP); 2908 } 2909 } 2910 2911 static device_method_t ccr_methods[] = { 2912 DEVMETHOD(device_identify, ccr_identify), 2913 DEVMETHOD(device_probe, ccr_probe), 2914 DEVMETHOD(device_attach, ccr_attach), 2915 DEVMETHOD(device_detach, ccr_detach), 2916 2917 DEVMETHOD(cryptodev_probesession, ccr_probesession), 2918 DEVMETHOD(cryptodev_newsession, ccr_newsession), 2919 DEVMETHOD(cryptodev_freesession, ccr_freesession), 2920 DEVMETHOD(cryptodev_process, ccr_process), 2921 2922 DEVMETHOD_END 2923 }; 2924 2925 static driver_t ccr_driver = { 2926 "ccr", 2927 ccr_methods, 2928 sizeof(struct ccr_softc) 2929 }; 2930 2931 static devclass_t ccr_devclass; 2932 2933 DRIVER_MODULE(ccr, t6nex, ccr_driver, ccr_devclass, ccr_modevent, NULL); 2934 MODULE_VERSION(ccr, 1); 2935 MODULE_DEPEND(ccr, crypto, 1, 1, 1); 2936 MODULE_DEPEND(ccr, t6nex, 1, 1, 1); 2937