1 /*- 2 * Copyright (c) 2017 Chelsio Communications, Inc. 3 * Copyright (c) 2021 The FreeBSD Foundation 4 * All rights reserved. 5 * Written by: John Baldwin <jhb@FreeBSD.org> 6 * 7 * Portions of this software were developed by Ararat River 8 * Consulting, LLC under sponsorship of the FreeBSD Foundation. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 */ 31 32 #include <sys/cdefs.h> 33 __FBSDID("$FreeBSD$"); 34 35 #include <sys/types.h> 36 #include <sys/bus.h> 37 #include <sys/lock.h> 38 #include <sys/malloc.h> 39 #include <sys/mutex.h> 40 #include <sys/module.h> 41 #include <sys/sglist.h> 42 43 #include <opencrypto/cryptodev.h> 44 #include <opencrypto/xform.h> 45 46 #include "cryptodev_if.h" 47 48 #include "common/common.h" 49 #include "crypto/t4_crypto.h" 50 51 /* 52 * Requests consist of: 53 * 54 * +-------------------------------+ 55 * | struct fw_crypto_lookaside_wr | 56 * +-------------------------------+ 57 * | struct ulp_txpkt | 58 * +-------------------------------+ 59 * | struct ulptx_idata | 60 * +-------------------------------+ 61 * | struct cpl_tx_sec_pdu | 62 * +-------------------------------+ 63 * | struct cpl_tls_tx_scmd_fmt | 64 * +-------------------------------+ 65 * | key context header | 66 * +-------------------------------+ 67 * | AES key | ----- For requests with AES 68 * +-------------------------------+ 69 * | Hash state | ----- For hash-only requests 70 * +-------------------------------+ - 71 * | IPAD (16-byte aligned) | \ 72 * +-------------------------------+ +---- For requests with HMAC 73 * | OPAD (16-byte aligned) | / 74 * +-------------------------------+ - 75 * | GMAC H | ----- For AES-GCM 76 * +-------------------------------+ - 77 * | struct cpl_rx_phys_dsgl | \ 78 * +-------------------------------+ +---- Destination buffer for 79 * | PHYS_DSGL entries | / non-hash-only requests 80 * +-------------------------------+ - 81 * | 16 dummy bytes | ----- Only for HMAC/hash-only requests 82 * +-------------------------------+ 83 * | IV | ----- If immediate IV 84 * +-------------------------------+ 85 * | Payload | ----- If immediate Payload 86 * +-------------------------------+ - 87 * | struct ulptx_sgl | \ 88 * +-------------------------------+ +---- If payload via SGL 89 * | SGL entries | / 90 * +-------------------------------+ - 91 * 92 * Note that the key context must be padded to ensure 16-byte alignment. 93 * For HMAC requests, the key consists of the partial hash of the IPAD 94 * followed by the partial hash of the OPAD. 95 * 96 * Replies consist of: 97 * 98 * +-------------------------------+ 99 * | struct cpl_fw6_pld | 100 * +-------------------------------+ 101 * | hash digest | ----- For HMAC request with 102 * +-------------------------------+ 'hash_size' set in work request 103 * 104 * A 32-bit big-endian error status word is supplied in the last 4 105 * bytes of data[0] in the CPL_FW6_PLD message. bit 0 indicates a 106 * "MAC" error and bit 1 indicates a "PAD" error. 107 * 108 * The 64-bit 'cookie' field from the fw_crypto_lookaside_wr message 109 * in the request is returned in data[1] of the CPL_FW6_PLD message. 110 * 111 * For block cipher replies, the updated IV is supplied in data[2] and 112 * data[3] of the CPL_FW6_PLD message. 113 * 114 * For hash replies where the work request set 'hash_size' to request 115 * a copy of the hash in the reply, the hash digest is supplied 116 * immediately following the CPL_FW6_PLD message. 117 */ 118 119 /* 120 * The crypto engine supports a maximum AAD size of 511 bytes. 121 */ 122 #define MAX_AAD_LEN 511 123 124 /* 125 * The documentation for CPL_RX_PHYS_DSGL claims a maximum of 32 SG 126 * entries. While the CPL includes a 16-bit length field, the T6 can 127 * sometimes hang if an error occurs while processing a request with a 128 * single DSGL entry larger than 2k. 129 */ 130 #define MAX_RX_PHYS_DSGL_SGE 32 131 #define DSGL_SGE_MAXLEN 2048 132 133 /* 134 * The adapter only supports requests with a total input or output 135 * length of 64k-1 or smaller. Longer requests either result in hung 136 * requests or incorrect results. 137 */ 138 #define MAX_REQUEST_SIZE 65535 139 140 static MALLOC_DEFINE(M_CCR, "ccr", "Chelsio T6 crypto"); 141 142 struct ccr_session_hmac { 143 const struct auth_hash *auth_hash; 144 int hash_len; 145 unsigned int partial_digest_len; 146 unsigned int auth_mode; 147 unsigned int mk_size; 148 char pads[CHCR_HASH_MAX_BLOCK_SIZE_128 * 2]; 149 }; 150 151 struct ccr_session_gmac { 152 int hash_len; 153 char ghash_h[GMAC_BLOCK_LEN]; 154 }; 155 156 struct ccr_session_ccm_mac { 157 int hash_len; 158 }; 159 160 struct ccr_session_blkcipher { 161 unsigned int cipher_mode; 162 unsigned int key_len; 163 unsigned int iv_len; 164 __be32 key_ctx_hdr; 165 char enckey[CHCR_AES_MAX_KEY_LEN]; 166 char deckey[CHCR_AES_MAX_KEY_LEN]; 167 }; 168 169 struct ccr_port { 170 struct sge_wrq *txq; 171 struct sge_rxq *rxq; 172 int rx_channel_id; 173 int tx_channel_id; 174 u_int active_sessions; 175 176 counter_u64_t stats_queued; 177 counter_u64_t stats_completed; 178 }; 179 180 struct ccr_session { 181 #ifdef INVARIANTS 182 int pending; 183 #endif 184 enum { HASH, HMAC, BLKCIPHER, ETA, GCM, CCM } mode; 185 struct ccr_port *port; 186 union { 187 struct ccr_session_hmac hmac; 188 struct ccr_session_gmac gmac; 189 struct ccr_session_ccm_mac ccm_mac; 190 }; 191 struct ccr_session_blkcipher blkcipher; 192 struct mtx lock; 193 194 /* 195 * Pre-allocate S/G lists used when preparing a work request. 196 * 'sg_input' contains an sglist describing the entire input 197 * buffer for a 'struct cryptop'. 'sg_output' contains an 198 * sglist describing the entire output buffer. 'sg_ulptx' is 199 * used to describe the data the engine should DMA as input 200 * via ULPTX_SGL. 'sg_dsgl' is used to describe the 201 * destination that cipher text and a tag should be written 202 * to. 203 */ 204 struct sglist *sg_input; 205 struct sglist *sg_output; 206 struct sglist *sg_ulptx; 207 struct sglist *sg_dsgl; 208 }; 209 210 struct ccr_softc { 211 struct adapter *adapter; 212 device_t dev; 213 uint32_t cid; 214 struct mtx lock; 215 bool detaching; 216 struct ccr_port ports[MAX_NPORTS]; 217 u_int port_mask; 218 int first_rxq_id; 219 220 /* 221 * Pre-allocate a dummy output buffer for the IV and AAD for 222 * AEAD requests. 223 */ 224 char *iv_aad_buf; 225 struct sglist *sg_iv_aad; 226 227 /* Statistics. */ 228 counter_u64_t stats_blkcipher_encrypt; 229 counter_u64_t stats_blkcipher_decrypt; 230 counter_u64_t stats_hash; 231 counter_u64_t stats_hmac; 232 counter_u64_t stats_eta_encrypt; 233 counter_u64_t stats_eta_decrypt; 234 counter_u64_t stats_gcm_encrypt; 235 counter_u64_t stats_gcm_decrypt; 236 counter_u64_t stats_ccm_encrypt; 237 counter_u64_t stats_ccm_decrypt; 238 counter_u64_t stats_wr_nomem; 239 counter_u64_t stats_inflight; 240 counter_u64_t stats_mac_error; 241 counter_u64_t stats_pad_error; 242 counter_u64_t stats_sglist_error; 243 counter_u64_t stats_process_error; 244 counter_u64_t stats_sw_fallback; 245 }; 246 247 /* 248 * Crypto requests involve two kind of scatter/gather lists. 249 * 250 * Non-hash-only requests require a PHYS_DSGL that describes the 251 * location to store the results of the encryption or decryption 252 * operation. This SGL uses a different format (PHYS_DSGL) and should 253 * exclude the skip bytes at the start of the data as well as any AAD 254 * or IV. For authenticated encryption requests it should include the 255 * destination of the hash or tag. 256 * 257 * The input payload may either be supplied inline as immediate data, 258 * or via a standard ULP_TX SGL. This SGL should include AAD, 259 * ciphertext, and the hash or tag for authenticated decryption 260 * requests. 261 * 262 * These scatter/gather lists can describe different subsets of the 263 * buffers described by the crypto operation. ccr_populate_sglist() 264 * generates a scatter/gather list that covers an entire crypto 265 * operation buffer that is then used to construct the other 266 * scatter/gather lists. 267 */ 268 static int 269 ccr_populate_sglist(struct sglist *sg, struct crypto_buffer *cb) 270 { 271 int error; 272 273 sglist_reset(sg); 274 switch (cb->cb_type) { 275 case CRYPTO_BUF_MBUF: 276 error = sglist_append_mbuf(sg, cb->cb_mbuf); 277 break; 278 case CRYPTO_BUF_SINGLE_MBUF: 279 error = sglist_append_single_mbuf(sg, cb->cb_mbuf); 280 break; 281 case CRYPTO_BUF_UIO: 282 error = sglist_append_uio(sg, cb->cb_uio); 283 break; 284 case CRYPTO_BUF_CONTIG: 285 error = sglist_append(sg, cb->cb_buf, cb->cb_buf_len); 286 break; 287 case CRYPTO_BUF_VMPAGE: 288 error = sglist_append_vmpages(sg, cb->cb_vm_page, 289 cb->cb_vm_page_len, cb->cb_vm_page_offset); 290 break; 291 default: 292 error = EINVAL; 293 } 294 return (error); 295 } 296 297 /* 298 * Segments in 'sg' larger than 'maxsegsize' are counted as multiple 299 * segments. 300 */ 301 static int 302 ccr_count_sgl(struct sglist *sg, int maxsegsize) 303 { 304 int i, nsegs; 305 306 nsegs = 0; 307 for (i = 0; i < sg->sg_nseg; i++) 308 nsegs += howmany(sg->sg_segs[i].ss_len, maxsegsize); 309 return (nsegs); 310 } 311 312 /* These functions deal with PHYS_DSGL for the reply buffer. */ 313 static inline int 314 ccr_phys_dsgl_len(int nsegs) 315 { 316 int len; 317 318 len = (nsegs / 8) * sizeof(struct phys_sge_pairs); 319 if ((nsegs % 8) != 0) { 320 len += sizeof(uint16_t) * 8; 321 len += roundup2(nsegs % 8, 2) * sizeof(uint64_t); 322 } 323 return (len); 324 } 325 326 static void 327 ccr_write_phys_dsgl(struct ccr_session *s, void *dst, int nsegs) 328 { 329 struct sglist *sg; 330 struct cpl_rx_phys_dsgl *cpl; 331 struct phys_sge_pairs *sgl; 332 vm_paddr_t paddr; 333 size_t seglen; 334 u_int i, j; 335 336 sg = s->sg_dsgl; 337 cpl = dst; 338 cpl->op_to_tid = htobe32(V_CPL_RX_PHYS_DSGL_OPCODE(CPL_RX_PHYS_DSGL) | 339 V_CPL_RX_PHYS_DSGL_ISRDMA(0)); 340 cpl->pcirlxorder_to_noofsgentr = htobe32( 341 V_CPL_RX_PHYS_DSGL_PCIRLXORDER(0) | 342 V_CPL_RX_PHYS_DSGL_PCINOSNOOP(0) | 343 V_CPL_RX_PHYS_DSGL_PCITPHNTENB(0) | V_CPL_RX_PHYS_DSGL_DCAID(0) | 344 V_CPL_RX_PHYS_DSGL_NOOFSGENTR(nsegs)); 345 cpl->rss_hdr_int.opcode = CPL_RX_PHYS_ADDR; 346 cpl->rss_hdr_int.qid = htobe16(s->port->rxq->iq.abs_id); 347 cpl->rss_hdr_int.hash_val = 0; 348 cpl->rss_hdr_int.channel = s->port->rx_channel_id; 349 sgl = (struct phys_sge_pairs *)(cpl + 1); 350 j = 0; 351 for (i = 0; i < sg->sg_nseg; i++) { 352 seglen = sg->sg_segs[i].ss_len; 353 paddr = sg->sg_segs[i].ss_paddr; 354 do { 355 sgl->addr[j] = htobe64(paddr); 356 if (seglen > DSGL_SGE_MAXLEN) { 357 sgl->len[j] = htobe16(DSGL_SGE_MAXLEN); 358 paddr += DSGL_SGE_MAXLEN; 359 seglen -= DSGL_SGE_MAXLEN; 360 } else { 361 sgl->len[j] = htobe16(seglen); 362 seglen = 0; 363 } 364 j++; 365 if (j == 8) { 366 sgl++; 367 j = 0; 368 } 369 } while (seglen != 0); 370 } 371 MPASS(j + 8 * (sgl - (struct phys_sge_pairs *)(cpl + 1)) == nsegs); 372 } 373 374 /* These functions deal with the ULPTX_SGL for input payload. */ 375 static inline int 376 ccr_ulptx_sgl_len(int nsegs) 377 { 378 u_int n; 379 380 nsegs--; /* first segment is part of ulptx_sgl */ 381 n = sizeof(struct ulptx_sgl) + 8 * ((3 * nsegs) / 2 + (nsegs & 1)); 382 return (roundup2(n, 16)); 383 } 384 385 static void 386 ccr_write_ulptx_sgl(struct ccr_session *s, void *dst, int nsegs) 387 { 388 struct ulptx_sgl *usgl; 389 struct sglist *sg; 390 struct sglist_seg *ss; 391 int i; 392 393 sg = s->sg_ulptx; 394 MPASS(nsegs == sg->sg_nseg); 395 ss = &sg->sg_segs[0]; 396 usgl = dst; 397 usgl->cmd_nsge = htobe32(V_ULPTX_CMD(ULP_TX_SC_DSGL) | 398 V_ULPTX_NSGE(nsegs)); 399 usgl->len0 = htobe32(ss->ss_len); 400 usgl->addr0 = htobe64(ss->ss_paddr); 401 ss++; 402 for (i = 0; i < sg->sg_nseg - 1; i++) { 403 usgl->sge[i / 2].len[i & 1] = htobe32(ss->ss_len); 404 usgl->sge[i / 2].addr[i & 1] = htobe64(ss->ss_paddr); 405 ss++; 406 } 407 } 408 409 static bool 410 ccr_use_imm_data(u_int transhdr_len, u_int input_len) 411 { 412 413 if (input_len > CRYPTO_MAX_IMM_TX_PKT_LEN) 414 return (false); 415 if (roundup2(transhdr_len, 16) + roundup2(input_len, 16) > 416 SGE_MAX_WR_LEN) 417 return (false); 418 return (true); 419 } 420 421 static void 422 ccr_populate_wreq(struct ccr_softc *sc, struct ccr_session *s, 423 struct chcr_wr *crwr, u_int kctx_len, u_int wr_len, u_int imm_len, 424 u_int sgl_len, u_int hash_size, struct cryptop *crp) 425 { 426 u_int cctx_size, idata_len; 427 428 cctx_size = sizeof(struct _key_ctx) + kctx_len; 429 crwr->wreq.op_to_cctx_size = htobe32( 430 V_FW_CRYPTO_LOOKASIDE_WR_OPCODE(FW_CRYPTO_LOOKASIDE_WR) | 431 V_FW_CRYPTO_LOOKASIDE_WR_COMPL(0) | 432 V_FW_CRYPTO_LOOKASIDE_WR_IMM_LEN(imm_len) | 433 V_FW_CRYPTO_LOOKASIDE_WR_CCTX_LOC(1) | 434 V_FW_CRYPTO_LOOKASIDE_WR_CCTX_SIZE(cctx_size >> 4)); 435 crwr->wreq.len16_pkd = htobe32( 436 V_FW_CRYPTO_LOOKASIDE_WR_LEN16(wr_len / 16)); 437 crwr->wreq.session_id = 0; 438 crwr->wreq.rx_chid_to_rx_q_id = htobe32( 439 V_FW_CRYPTO_LOOKASIDE_WR_RX_CHID(s->port->rx_channel_id) | 440 V_FW_CRYPTO_LOOKASIDE_WR_LCB(0) | 441 V_FW_CRYPTO_LOOKASIDE_WR_PHASH(0) | 442 V_FW_CRYPTO_LOOKASIDE_WR_IV(IV_NOP) | 443 V_FW_CRYPTO_LOOKASIDE_WR_FQIDX(0) | 444 V_FW_CRYPTO_LOOKASIDE_WR_TX_CH(0) | /* unused in firmware */ 445 V_FW_CRYPTO_LOOKASIDE_WR_RX_Q_ID(s->port->rxq->iq.abs_id)); 446 crwr->wreq.key_addr = 0; 447 crwr->wreq.pld_size_hash_size = htobe32( 448 V_FW_CRYPTO_LOOKASIDE_WR_PLD_SIZE(sgl_len) | 449 V_FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE(hash_size)); 450 crwr->wreq.cookie = htobe64((uintptr_t)crp); 451 452 crwr->ulptx.cmd_dest = htobe32(V_ULPTX_CMD(ULP_TX_PKT) | 453 V_ULP_TXPKT_DATAMODIFY(0) | 454 V_ULP_TXPKT_CHANNELID(s->port->tx_channel_id) | 455 V_ULP_TXPKT_DEST(0) | 456 V_ULP_TXPKT_FID(sc->first_rxq_id) | V_ULP_TXPKT_RO(1)); 457 crwr->ulptx.len = htobe32( 458 ((wr_len - sizeof(struct fw_crypto_lookaside_wr)) / 16)); 459 460 crwr->sc_imm.cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM) | 461 V_ULP_TX_SC_MORE(sgl_len != 0 ? 1 : 0)); 462 idata_len = wr_len - offsetof(struct chcr_wr, sec_cpl) - sgl_len; 463 if (imm_len % 16 != 0) 464 idata_len -= 16 - imm_len % 16; 465 crwr->sc_imm.len = htobe32(idata_len); 466 } 467 468 static int 469 ccr_hash(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp) 470 { 471 struct chcr_wr *crwr; 472 struct wrqe *wr; 473 const struct auth_hash *axf; 474 char *dst; 475 u_int hash_size_in_response, kctx_flits, kctx_len, transhdr_len, wr_len; 476 u_int hmac_ctrl, imm_len, iopad_size; 477 int error, sgl_nsegs, sgl_len, use_opad; 478 479 /* Reject requests with too large of an input buffer. */ 480 if (crp->crp_payload_length > MAX_REQUEST_SIZE) 481 return (EFBIG); 482 483 axf = s->hmac.auth_hash; 484 485 if (s->mode == HMAC) { 486 use_opad = 1; 487 hmac_ctrl = SCMD_HMAC_CTRL_NO_TRUNC; 488 } else { 489 use_opad = 0; 490 hmac_ctrl = SCMD_HMAC_CTRL_NOP; 491 } 492 493 /* PADs must be 128-bit aligned. */ 494 iopad_size = roundup2(s->hmac.partial_digest_len, 16); 495 496 /* 497 * The 'key' part of the context includes the aligned IPAD and 498 * OPAD. 499 */ 500 kctx_len = iopad_size; 501 if (use_opad) 502 kctx_len += iopad_size; 503 hash_size_in_response = axf->hashsize; 504 transhdr_len = HASH_TRANSHDR_SIZE(kctx_len); 505 506 if (crp->crp_payload_length == 0) { 507 imm_len = axf->blocksize; 508 sgl_nsegs = 0; 509 sgl_len = 0; 510 } else if (ccr_use_imm_data(transhdr_len, crp->crp_payload_length)) { 511 imm_len = crp->crp_payload_length; 512 sgl_nsegs = 0; 513 sgl_len = 0; 514 } else { 515 imm_len = 0; 516 sglist_reset(s->sg_ulptx); 517 error = sglist_append_sglist(s->sg_ulptx, s->sg_input, 518 crp->crp_payload_start, crp->crp_payload_length); 519 if (error) 520 return (error); 521 sgl_nsegs = s->sg_ulptx->sg_nseg; 522 sgl_len = ccr_ulptx_sgl_len(sgl_nsegs); 523 } 524 525 wr_len = roundup2(transhdr_len, 16) + roundup2(imm_len, 16) + sgl_len; 526 if (wr_len > SGE_MAX_WR_LEN) 527 return (EFBIG); 528 wr = alloc_wrqe(wr_len, s->port->txq); 529 if (wr == NULL) { 530 counter_u64_add(sc->stats_wr_nomem, 1); 531 return (ENOMEM); 532 } 533 crwr = wrtod(wr); 534 memset(crwr, 0, wr_len); 535 536 ccr_populate_wreq(sc, s, crwr, kctx_len, wr_len, imm_len, sgl_len, 537 hash_size_in_response, crp); 538 539 crwr->sec_cpl.op_ivinsrtofst = htobe32( 540 V_CPL_TX_SEC_PDU_OPCODE(CPL_TX_SEC_PDU) | 541 V_CPL_TX_SEC_PDU_RXCHID(s->port->rx_channel_id) | 542 V_CPL_TX_SEC_PDU_ACKFOLLOWS(0) | V_CPL_TX_SEC_PDU_ULPTXLPBK(1) | 543 V_CPL_TX_SEC_PDU_CPLLEN(2) | V_CPL_TX_SEC_PDU_PLACEHOLDER(0) | 544 V_CPL_TX_SEC_PDU_IVINSRTOFST(0)); 545 546 crwr->sec_cpl.pldlen = htobe32(crp->crp_payload_length == 0 ? 547 axf->blocksize : crp->crp_payload_length); 548 549 crwr->sec_cpl.cipherstop_lo_authinsert = htobe32( 550 V_CPL_TX_SEC_PDU_AUTHSTART(1) | V_CPL_TX_SEC_PDU_AUTHSTOP(0)); 551 552 /* These two flits are actually a CPL_TLS_TX_SCMD_FMT. */ 553 crwr->sec_cpl.seqno_numivs = htobe32( 554 V_SCMD_SEQ_NO_CTRL(0) | 555 V_SCMD_PROTO_VERSION(SCMD_PROTO_VERSION_GENERIC) | 556 V_SCMD_CIPH_MODE(SCMD_CIPH_MODE_NOP) | 557 V_SCMD_AUTH_MODE(s->hmac.auth_mode) | 558 V_SCMD_HMAC_CTRL(hmac_ctrl)); 559 crwr->sec_cpl.ivgen_hdrlen = htobe32( 560 V_SCMD_LAST_FRAG(0) | 561 V_SCMD_MORE_FRAGS(crp->crp_payload_length == 0 ? 1 : 0) | 562 V_SCMD_MAC_ONLY(1)); 563 564 memcpy(crwr->key_ctx.key, s->hmac.pads, kctx_len); 565 566 /* XXX: F_KEY_CONTEXT_SALT_PRESENT set, but 'salt' not set. */ 567 kctx_flits = (sizeof(struct _key_ctx) + kctx_len) / 16; 568 crwr->key_ctx.ctx_hdr = htobe32(V_KEY_CONTEXT_CTX_LEN(kctx_flits) | 569 V_KEY_CONTEXT_OPAD_PRESENT(use_opad) | 570 V_KEY_CONTEXT_SALT_PRESENT(1) | 571 V_KEY_CONTEXT_CK_SIZE(CHCR_KEYCTX_NO_KEY) | 572 V_KEY_CONTEXT_MK_SIZE(s->hmac.mk_size) | V_KEY_CONTEXT_VALID(1)); 573 574 dst = (char *)(crwr + 1) + kctx_len + DUMMY_BYTES; 575 if (crp->crp_payload_length == 0) { 576 dst[0] = 0x80; 577 if (s->mode == HMAC) 578 *(uint64_t *)(dst + axf->blocksize - sizeof(uint64_t)) = 579 htobe64(axf->blocksize << 3); 580 } else if (imm_len != 0) 581 crypto_copydata(crp, crp->crp_payload_start, 582 crp->crp_payload_length, dst); 583 else 584 ccr_write_ulptx_sgl(s, dst, sgl_nsegs); 585 586 /* XXX: TODO backpressure */ 587 t4_wrq_tx(sc->adapter, wr); 588 589 return (0); 590 } 591 592 static int 593 ccr_hash_done(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp, 594 const struct cpl_fw6_pld *cpl, int error) 595 { 596 uint8_t hash[HASH_MAX_LEN]; 597 598 if (error) 599 return (error); 600 601 if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) { 602 crypto_copydata(crp, crp->crp_digest_start, s->hmac.hash_len, 603 hash); 604 if (timingsafe_bcmp((cpl + 1), hash, s->hmac.hash_len) != 0) 605 return (EBADMSG); 606 } else 607 crypto_copyback(crp, crp->crp_digest_start, s->hmac.hash_len, 608 (cpl + 1)); 609 return (0); 610 } 611 612 static int 613 ccr_blkcipher(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp) 614 { 615 char iv[CHCR_MAX_CRYPTO_IV_LEN]; 616 struct chcr_wr *crwr; 617 struct wrqe *wr; 618 char *dst; 619 u_int kctx_len, key_half, op_type, transhdr_len, wr_len; 620 u_int imm_len, iv_len; 621 int dsgl_nsegs, dsgl_len; 622 int sgl_nsegs, sgl_len; 623 int error; 624 625 if (s->blkcipher.key_len == 0 || crp->crp_payload_length == 0) 626 return (EINVAL); 627 if (s->blkcipher.cipher_mode == SCMD_CIPH_MODE_AES_CBC && 628 (crp->crp_payload_length % AES_BLOCK_LEN) != 0) 629 return (EINVAL); 630 631 /* Reject requests with too large of an input buffer. */ 632 if (crp->crp_payload_length > MAX_REQUEST_SIZE) 633 return (EFBIG); 634 635 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) 636 op_type = CHCR_ENCRYPT_OP; 637 else 638 op_type = CHCR_DECRYPT_OP; 639 640 sglist_reset(s->sg_dsgl); 641 if (CRYPTO_HAS_OUTPUT_BUFFER(crp)) 642 error = sglist_append_sglist(s->sg_dsgl, s->sg_output, 643 crp->crp_payload_output_start, crp->crp_payload_length); 644 else 645 error = sglist_append_sglist(s->sg_dsgl, s->sg_input, 646 crp->crp_payload_start, crp->crp_payload_length); 647 if (error) 648 return (error); 649 dsgl_nsegs = ccr_count_sgl(s->sg_dsgl, DSGL_SGE_MAXLEN); 650 if (dsgl_nsegs > MAX_RX_PHYS_DSGL_SGE) 651 return (EFBIG); 652 dsgl_len = ccr_phys_dsgl_len(dsgl_nsegs); 653 654 /* The 'key' must be 128-bit aligned. */ 655 kctx_len = roundup2(s->blkcipher.key_len, 16); 656 transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dsgl_len); 657 658 /* For AES-XTS we send a 16-byte IV in the work request. */ 659 if (s->blkcipher.cipher_mode == SCMD_CIPH_MODE_AES_XTS) 660 iv_len = AES_BLOCK_LEN; 661 else 662 iv_len = s->blkcipher.iv_len; 663 664 if (ccr_use_imm_data(transhdr_len, crp->crp_payload_length + iv_len)) { 665 imm_len = crp->crp_payload_length; 666 sgl_nsegs = 0; 667 sgl_len = 0; 668 } else { 669 imm_len = 0; 670 sglist_reset(s->sg_ulptx); 671 error = sglist_append_sglist(s->sg_ulptx, s->sg_input, 672 crp->crp_payload_start, crp->crp_payload_length); 673 if (error) 674 return (error); 675 sgl_nsegs = s->sg_ulptx->sg_nseg; 676 sgl_len = ccr_ulptx_sgl_len(sgl_nsegs); 677 } 678 679 wr_len = roundup2(transhdr_len, 16) + iv_len + 680 roundup2(imm_len, 16) + sgl_len; 681 if (wr_len > SGE_MAX_WR_LEN) 682 return (EFBIG); 683 wr = alloc_wrqe(wr_len, s->port->txq); 684 if (wr == NULL) { 685 counter_u64_add(sc->stats_wr_nomem, 1); 686 return (ENOMEM); 687 } 688 crwr = wrtod(wr); 689 memset(crwr, 0, wr_len); 690 691 crypto_read_iv(crp, iv); 692 693 /* Zero the remainder of the IV for AES-XTS. */ 694 memset(iv + s->blkcipher.iv_len, 0, iv_len - s->blkcipher.iv_len); 695 696 ccr_populate_wreq(sc, s, crwr, kctx_len, wr_len, imm_len, sgl_len, 0, 697 crp); 698 699 crwr->sec_cpl.op_ivinsrtofst = htobe32( 700 V_CPL_TX_SEC_PDU_OPCODE(CPL_TX_SEC_PDU) | 701 V_CPL_TX_SEC_PDU_RXCHID(s->port->rx_channel_id) | 702 V_CPL_TX_SEC_PDU_ACKFOLLOWS(0) | V_CPL_TX_SEC_PDU_ULPTXLPBK(1) | 703 V_CPL_TX_SEC_PDU_CPLLEN(2) | V_CPL_TX_SEC_PDU_PLACEHOLDER(0) | 704 V_CPL_TX_SEC_PDU_IVINSRTOFST(1)); 705 706 crwr->sec_cpl.pldlen = htobe32(iv_len + crp->crp_payload_length); 707 708 crwr->sec_cpl.aadstart_cipherstop_hi = htobe32( 709 V_CPL_TX_SEC_PDU_CIPHERSTART(iv_len + 1) | 710 V_CPL_TX_SEC_PDU_CIPHERSTOP_HI(0)); 711 crwr->sec_cpl.cipherstop_lo_authinsert = htobe32( 712 V_CPL_TX_SEC_PDU_CIPHERSTOP_LO(0)); 713 714 /* These two flits are actually a CPL_TLS_TX_SCMD_FMT. */ 715 crwr->sec_cpl.seqno_numivs = htobe32( 716 V_SCMD_SEQ_NO_CTRL(0) | 717 V_SCMD_PROTO_VERSION(SCMD_PROTO_VERSION_GENERIC) | 718 V_SCMD_ENC_DEC_CTRL(op_type) | 719 V_SCMD_CIPH_MODE(s->blkcipher.cipher_mode) | 720 V_SCMD_AUTH_MODE(SCMD_AUTH_MODE_NOP) | 721 V_SCMD_HMAC_CTRL(SCMD_HMAC_CTRL_NOP) | 722 V_SCMD_IV_SIZE(iv_len / 2) | 723 V_SCMD_NUM_IVS(0)); 724 crwr->sec_cpl.ivgen_hdrlen = htobe32( 725 V_SCMD_IV_GEN_CTRL(0) | 726 V_SCMD_MORE_FRAGS(0) | V_SCMD_LAST_FRAG(0) | V_SCMD_MAC_ONLY(0) | 727 V_SCMD_AADIVDROP(1) | V_SCMD_HDR_LEN(dsgl_len)); 728 729 crwr->key_ctx.ctx_hdr = s->blkcipher.key_ctx_hdr; 730 switch (s->blkcipher.cipher_mode) { 731 case SCMD_CIPH_MODE_AES_CBC: 732 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) 733 memcpy(crwr->key_ctx.key, s->blkcipher.enckey, 734 s->blkcipher.key_len); 735 else 736 memcpy(crwr->key_ctx.key, s->blkcipher.deckey, 737 s->blkcipher.key_len); 738 break; 739 case SCMD_CIPH_MODE_AES_CTR: 740 memcpy(crwr->key_ctx.key, s->blkcipher.enckey, 741 s->blkcipher.key_len); 742 break; 743 case SCMD_CIPH_MODE_AES_XTS: 744 key_half = s->blkcipher.key_len / 2; 745 memcpy(crwr->key_ctx.key, s->blkcipher.enckey + key_half, 746 key_half); 747 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) 748 memcpy(crwr->key_ctx.key + key_half, 749 s->blkcipher.enckey, key_half); 750 else 751 memcpy(crwr->key_ctx.key + key_half, 752 s->blkcipher.deckey, key_half); 753 break; 754 } 755 756 dst = (char *)(crwr + 1) + kctx_len; 757 ccr_write_phys_dsgl(s, dst, dsgl_nsegs); 758 dst += sizeof(struct cpl_rx_phys_dsgl) + dsgl_len; 759 memcpy(dst, iv, iv_len); 760 dst += iv_len; 761 if (imm_len != 0) 762 crypto_copydata(crp, crp->crp_payload_start, 763 crp->crp_payload_length, dst); 764 else 765 ccr_write_ulptx_sgl(s, dst, sgl_nsegs); 766 767 /* XXX: TODO backpressure */ 768 t4_wrq_tx(sc->adapter, wr); 769 770 explicit_bzero(iv, sizeof(iv)); 771 return (0); 772 } 773 774 static int 775 ccr_blkcipher_done(struct ccr_softc *sc, struct ccr_session *s, 776 struct cryptop *crp, const struct cpl_fw6_pld *cpl, int error) 777 { 778 779 /* 780 * The updated IV to permit chained requests is at 781 * cpl->data[2], but OCF doesn't permit chained requests. 782 */ 783 return (error); 784 } 785 786 /* 787 * 'hashsize' is the length of a full digest. 'authsize' is the 788 * requested digest length for this operation which may be less 789 * than 'hashsize'. 790 */ 791 static int 792 ccr_hmac_ctrl(unsigned int hashsize, unsigned int authsize) 793 { 794 795 if (authsize == 10) 796 return (SCMD_HMAC_CTRL_TRUNC_RFC4366); 797 if (authsize == 12) 798 return (SCMD_HMAC_CTRL_IPSEC_96BIT); 799 if (authsize == hashsize / 2) 800 return (SCMD_HMAC_CTRL_DIV2); 801 return (SCMD_HMAC_CTRL_NO_TRUNC); 802 } 803 804 static int 805 ccr_eta(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp) 806 { 807 char iv[CHCR_MAX_CRYPTO_IV_LEN]; 808 struct chcr_wr *crwr; 809 struct wrqe *wr; 810 const struct auth_hash *axf; 811 char *dst; 812 u_int kctx_len, key_half, op_type, transhdr_len, wr_len; 813 u_int hash_size_in_response, imm_len, iopad_size, iv_len; 814 u_int aad_start, aad_stop; 815 u_int auth_insert; 816 u_int cipher_start, cipher_stop; 817 u_int hmac_ctrl, input_len; 818 int dsgl_nsegs, dsgl_len; 819 int sgl_nsegs, sgl_len; 820 int error; 821 822 /* 823 * If there is a need in the future, requests with an empty 824 * payload could be supported as HMAC-only requests. 825 */ 826 if (s->blkcipher.key_len == 0 || crp->crp_payload_length == 0) 827 return (EINVAL); 828 if (s->blkcipher.cipher_mode == SCMD_CIPH_MODE_AES_CBC && 829 (crp->crp_payload_length % AES_BLOCK_LEN) != 0) 830 return (EINVAL); 831 832 /* For AES-XTS we send a 16-byte IV in the work request. */ 833 if (s->blkcipher.cipher_mode == SCMD_CIPH_MODE_AES_XTS) 834 iv_len = AES_BLOCK_LEN; 835 else 836 iv_len = s->blkcipher.iv_len; 837 838 if (crp->crp_aad_length + iv_len > MAX_AAD_LEN) 839 return (EINVAL); 840 841 axf = s->hmac.auth_hash; 842 hash_size_in_response = s->hmac.hash_len; 843 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) 844 op_type = CHCR_ENCRYPT_OP; 845 else 846 op_type = CHCR_DECRYPT_OP; 847 848 /* 849 * The output buffer consists of the cipher text followed by 850 * the hash when encrypting. For decryption it only contains 851 * the plain text. 852 * 853 * Due to a firmware bug, the output buffer must include a 854 * dummy output buffer for the IV and AAD prior to the real 855 * output buffer. 856 */ 857 if (op_type == CHCR_ENCRYPT_OP) { 858 if (iv_len + crp->crp_aad_length + crp->crp_payload_length + 859 hash_size_in_response > MAX_REQUEST_SIZE) 860 return (EFBIG); 861 } else { 862 if (iv_len + crp->crp_aad_length + crp->crp_payload_length > 863 MAX_REQUEST_SIZE) 864 return (EFBIG); 865 } 866 sglist_reset(s->sg_dsgl); 867 error = sglist_append_sglist(s->sg_dsgl, sc->sg_iv_aad, 0, 868 iv_len + crp->crp_aad_length); 869 if (error) 870 return (error); 871 if (CRYPTO_HAS_OUTPUT_BUFFER(crp)) 872 error = sglist_append_sglist(s->sg_dsgl, s->sg_output, 873 crp->crp_payload_output_start, crp->crp_payload_length); 874 else 875 error = sglist_append_sglist(s->sg_dsgl, s->sg_input, 876 crp->crp_payload_start, crp->crp_payload_length); 877 if (error) 878 return (error); 879 if (op_type == CHCR_ENCRYPT_OP) { 880 if (CRYPTO_HAS_OUTPUT_BUFFER(crp)) 881 error = sglist_append_sglist(s->sg_dsgl, s->sg_output, 882 crp->crp_digest_start, hash_size_in_response); 883 else 884 error = sglist_append_sglist(s->sg_dsgl, s->sg_input, 885 crp->crp_digest_start, hash_size_in_response); 886 if (error) 887 return (error); 888 } 889 dsgl_nsegs = ccr_count_sgl(s->sg_dsgl, DSGL_SGE_MAXLEN); 890 if (dsgl_nsegs > MAX_RX_PHYS_DSGL_SGE) 891 return (EFBIG); 892 dsgl_len = ccr_phys_dsgl_len(dsgl_nsegs); 893 894 /* PADs must be 128-bit aligned. */ 895 iopad_size = roundup2(s->hmac.partial_digest_len, 16); 896 897 /* 898 * The 'key' part of the key context consists of the key followed 899 * by the IPAD and OPAD. 900 */ 901 kctx_len = roundup2(s->blkcipher.key_len, 16) + iopad_size * 2; 902 transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dsgl_len); 903 904 /* 905 * The input buffer consists of the IV, any AAD, and then the 906 * cipher/plain text. For decryption requests the hash is 907 * appended after the cipher text. 908 * 909 * The IV is always stored at the start of the input buffer 910 * even though it may be duplicated in the payload. The 911 * crypto engine doesn't work properly if the IV offset points 912 * inside of the AAD region, so a second copy is always 913 * required. 914 */ 915 input_len = crp->crp_aad_length + crp->crp_payload_length; 916 917 /* 918 * The firmware hangs if sent a request which is a 919 * bit smaller than MAX_REQUEST_SIZE. In particular, the 920 * firmware appears to require 512 - 16 bytes of spare room 921 * along with the size of the hash even if the hash isn't 922 * included in the input buffer. 923 */ 924 if (input_len + roundup2(axf->hashsize, 16) + (512 - 16) > 925 MAX_REQUEST_SIZE) 926 return (EFBIG); 927 if (op_type == CHCR_DECRYPT_OP) 928 input_len += hash_size_in_response; 929 930 if (ccr_use_imm_data(transhdr_len, iv_len + input_len)) { 931 imm_len = input_len; 932 sgl_nsegs = 0; 933 sgl_len = 0; 934 } else { 935 imm_len = 0; 936 sglist_reset(s->sg_ulptx); 937 if (crp->crp_aad_length != 0) { 938 if (crp->crp_aad != NULL) 939 error = sglist_append(s->sg_ulptx, 940 crp->crp_aad, crp->crp_aad_length); 941 else 942 error = sglist_append_sglist(s->sg_ulptx, 943 s->sg_input, crp->crp_aad_start, 944 crp->crp_aad_length); 945 if (error) 946 return (error); 947 } 948 error = sglist_append_sglist(s->sg_ulptx, s->sg_input, 949 crp->crp_payload_start, crp->crp_payload_length); 950 if (error) 951 return (error); 952 if (op_type == CHCR_DECRYPT_OP) { 953 error = sglist_append_sglist(s->sg_ulptx, s->sg_input, 954 crp->crp_digest_start, hash_size_in_response); 955 if (error) 956 return (error); 957 } 958 sgl_nsegs = s->sg_ulptx->sg_nseg; 959 sgl_len = ccr_ulptx_sgl_len(sgl_nsegs); 960 } 961 962 /* Any AAD comes after the IV. */ 963 if (crp->crp_aad_length != 0) { 964 aad_start = iv_len + 1; 965 aad_stop = aad_start + crp->crp_aad_length - 1; 966 } else { 967 aad_start = 0; 968 aad_stop = 0; 969 } 970 cipher_start = iv_len + crp->crp_aad_length + 1; 971 if (op_type == CHCR_DECRYPT_OP) 972 cipher_stop = hash_size_in_response; 973 else 974 cipher_stop = 0; 975 if (op_type == CHCR_DECRYPT_OP) 976 auth_insert = hash_size_in_response; 977 else 978 auth_insert = 0; 979 980 wr_len = roundup2(transhdr_len, 16) + iv_len + roundup2(imm_len, 16) + 981 sgl_len; 982 if (wr_len > SGE_MAX_WR_LEN) 983 return (EFBIG); 984 wr = alloc_wrqe(wr_len, s->port->txq); 985 if (wr == NULL) { 986 counter_u64_add(sc->stats_wr_nomem, 1); 987 return (ENOMEM); 988 } 989 crwr = wrtod(wr); 990 memset(crwr, 0, wr_len); 991 992 crypto_read_iv(crp, iv); 993 994 /* Zero the remainder of the IV for AES-XTS. */ 995 memset(iv + s->blkcipher.iv_len, 0, iv_len - s->blkcipher.iv_len); 996 997 ccr_populate_wreq(sc, s, crwr, kctx_len, wr_len, imm_len, sgl_len, 998 op_type == CHCR_DECRYPT_OP ? hash_size_in_response : 0, crp); 999 1000 crwr->sec_cpl.op_ivinsrtofst = htobe32( 1001 V_CPL_TX_SEC_PDU_OPCODE(CPL_TX_SEC_PDU) | 1002 V_CPL_TX_SEC_PDU_RXCHID(s->port->rx_channel_id) | 1003 V_CPL_TX_SEC_PDU_ACKFOLLOWS(0) | V_CPL_TX_SEC_PDU_ULPTXLPBK(1) | 1004 V_CPL_TX_SEC_PDU_CPLLEN(2) | V_CPL_TX_SEC_PDU_PLACEHOLDER(0) | 1005 V_CPL_TX_SEC_PDU_IVINSRTOFST(1)); 1006 1007 crwr->sec_cpl.pldlen = htobe32(iv_len + input_len); 1008 1009 crwr->sec_cpl.aadstart_cipherstop_hi = htobe32( 1010 V_CPL_TX_SEC_PDU_AADSTART(aad_start) | 1011 V_CPL_TX_SEC_PDU_AADSTOP(aad_stop) | 1012 V_CPL_TX_SEC_PDU_CIPHERSTART(cipher_start) | 1013 V_CPL_TX_SEC_PDU_CIPHERSTOP_HI(cipher_stop >> 4)); 1014 crwr->sec_cpl.cipherstop_lo_authinsert = htobe32( 1015 V_CPL_TX_SEC_PDU_CIPHERSTOP_LO(cipher_stop & 0xf) | 1016 V_CPL_TX_SEC_PDU_AUTHSTART(cipher_start) | 1017 V_CPL_TX_SEC_PDU_AUTHSTOP(cipher_stop) | 1018 V_CPL_TX_SEC_PDU_AUTHINSERT(auth_insert)); 1019 1020 /* These two flits are actually a CPL_TLS_TX_SCMD_FMT. */ 1021 hmac_ctrl = ccr_hmac_ctrl(axf->hashsize, hash_size_in_response); 1022 crwr->sec_cpl.seqno_numivs = htobe32( 1023 V_SCMD_SEQ_NO_CTRL(0) | 1024 V_SCMD_PROTO_VERSION(SCMD_PROTO_VERSION_GENERIC) | 1025 V_SCMD_ENC_DEC_CTRL(op_type) | 1026 V_SCMD_CIPH_AUTH_SEQ_CTRL(op_type == CHCR_ENCRYPT_OP ? 1 : 0) | 1027 V_SCMD_CIPH_MODE(s->blkcipher.cipher_mode) | 1028 V_SCMD_AUTH_MODE(s->hmac.auth_mode) | 1029 V_SCMD_HMAC_CTRL(hmac_ctrl) | 1030 V_SCMD_IV_SIZE(iv_len / 2) | 1031 V_SCMD_NUM_IVS(0)); 1032 crwr->sec_cpl.ivgen_hdrlen = htobe32( 1033 V_SCMD_IV_GEN_CTRL(0) | 1034 V_SCMD_MORE_FRAGS(0) | V_SCMD_LAST_FRAG(0) | V_SCMD_MAC_ONLY(0) | 1035 V_SCMD_AADIVDROP(0) | V_SCMD_HDR_LEN(dsgl_len)); 1036 1037 crwr->key_ctx.ctx_hdr = s->blkcipher.key_ctx_hdr; 1038 switch (s->blkcipher.cipher_mode) { 1039 case SCMD_CIPH_MODE_AES_CBC: 1040 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) 1041 memcpy(crwr->key_ctx.key, s->blkcipher.enckey, 1042 s->blkcipher.key_len); 1043 else 1044 memcpy(crwr->key_ctx.key, s->blkcipher.deckey, 1045 s->blkcipher.key_len); 1046 break; 1047 case SCMD_CIPH_MODE_AES_CTR: 1048 memcpy(crwr->key_ctx.key, s->blkcipher.enckey, 1049 s->blkcipher.key_len); 1050 break; 1051 case SCMD_CIPH_MODE_AES_XTS: 1052 key_half = s->blkcipher.key_len / 2; 1053 memcpy(crwr->key_ctx.key, s->blkcipher.enckey + key_half, 1054 key_half); 1055 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) 1056 memcpy(crwr->key_ctx.key + key_half, 1057 s->blkcipher.enckey, key_half); 1058 else 1059 memcpy(crwr->key_ctx.key + key_half, 1060 s->blkcipher.deckey, key_half); 1061 break; 1062 } 1063 1064 dst = crwr->key_ctx.key + roundup2(s->blkcipher.key_len, 16); 1065 memcpy(dst, s->hmac.pads, iopad_size * 2); 1066 1067 dst = (char *)(crwr + 1) + kctx_len; 1068 ccr_write_phys_dsgl(s, dst, dsgl_nsegs); 1069 dst += sizeof(struct cpl_rx_phys_dsgl) + dsgl_len; 1070 memcpy(dst, iv, iv_len); 1071 dst += iv_len; 1072 if (imm_len != 0) { 1073 if (crp->crp_aad_length != 0) { 1074 if (crp->crp_aad != NULL) 1075 memcpy(dst, crp->crp_aad, crp->crp_aad_length); 1076 else 1077 crypto_copydata(crp, crp->crp_aad_start, 1078 crp->crp_aad_length, dst); 1079 dst += crp->crp_aad_length; 1080 } 1081 crypto_copydata(crp, crp->crp_payload_start, 1082 crp->crp_payload_length, dst); 1083 dst += crp->crp_payload_length; 1084 if (op_type == CHCR_DECRYPT_OP) 1085 crypto_copydata(crp, crp->crp_digest_start, 1086 hash_size_in_response, dst); 1087 } else 1088 ccr_write_ulptx_sgl(s, dst, sgl_nsegs); 1089 1090 /* XXX: TODO backpressure */ 1091 t4_wrq_tx(sc->adapter, wr); 1092 1093 explicit_bzero(iv, sizeof(iv)); 1094 return (0); 1095 } 1096 1097 static int 1098 ccr_eta_done(struct ccr_softc *sc, struct ccr_session *s, 1099 struct cryptop *crp, const struct cpl_fw6_pld *cpl, int error) 1100 { 1101 1102 /* 1103 * The updated IV to permit chained requests is at 1104 * cpl->data[2], but OCF doesn't permit chained requests. 1105 */ 1106 return (error); 1107 } 1108 1109 static int 1110 ccr_gcm(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp) 1111 { 1112 char iv[CHCR_MAX_CRYPTO_IV_LEN]; 1113 struct chcr_wr *crwr; 1114 struct wrqe *wr; 1115 char *dst; 1116 u_int iv_len, kctx_len, op_type, transhdr_len, wr_len; 1117 u_int hash_size_in_response, imm_len; 1118 u_int aad_start, aad_stop, cipher_start, cipher_stop, auth_insert; 1119 u_int hmac_ctrl, input_len; 1120 int dsgl_nsegs, dsgl_len; 1121 int sgl_nsegs, sgl_len; 1122 int error; 1123 1124 if (s->blkcipher.key_len == 0) 1125 return (EINVAL); 1126 1127 /* 1128 * The crypto engine doesn't handle GCM requests with an empty 1129 * payload, so handle those in software instead. 1130 */ 1131 if (crp->crp_payload_length == 0) 1132 return (EMSGSIZE); 1133 1134 if (crp->crp_aad_length + AES_BLOCK_LEN > MAX_AAD_LEN) 1135 return (EMSGSIZE); 1136 1137 hash_size_in_response = s->gmac.hash_len; 1138 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) 1139 op_type = CHCR_ENCRYPT_OP; 1140 else 1141 op_type = CHCR_DECRYPT_OP; 1142 1143 iv_len = AES_BLOCK_LEN; 1144 1145 /* 1146 * GCM requests should always provide an explicit IV. 1147 */ 1148 if ((crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0) 1149 return (EINVAL); 1150 1151 /* 1152 * The output buffer consists of the cipher text followed by 1153 * the tag when encrypting. For decryption it only contains 1154 * the plain text. 1155 * 1156 * Due to a firmware bug, the output buffer must include a 1157 * dummy output buffer for the IV and AAD prior to the real 1158 * output buffer. 1159 */ 1160 if (op_type == CHCR_ENCRYPT_OP) { 1161 if (iv_len + crp->crp_aad_length + crp->crp_payload_length + 1162 hash_size_in_response > MAX_REQUEST_SIZE) 1163 return (EFBIG); 1164 } else { 1165 if (iv_len + crp->crp_aad_length + crp->crp_payload_length > 1166 MAX_REQUEST_SIZE) 1167 return (EFBIG); 1168 } 1169 sglist_reset(s->sg_dsgl); 1170 error = sglist_append_sglist(s->sg_dsgl, sc->sg_iv_aad, 0, iv_len + 1171 crp->crp_aad_length); 1172 if (error) 1173 return (error); 1174 if (CRYPTO_HAS_OUTPUT_BUFFER(crp)) 1175 error = sglist_append_sglist(s->sg_dsgl, s->sg_output, 1176 crp->crp_payload_output_start, crp->crp_payload_length); 1177 else 1178 error = sglist_append_sglist(s->sg_dsgl, s->sg_input, 1179 crp->crp_payload_start, crp->crp_payload_length); 1180 if (error) 1181 return (error); 1182 if (op_type == CHCR_ENCRYPT_OP) { 1183 if (CRYPTO_HAS_OUTPUT_BUFFER(crp)) 1184 error = sglist_append_sglist(s->sg_dsgl, s->sg_output, 1185 crp->crp_digest_start, hash_size_in_response); 1186 else 1187 error = sglist_append_sglist(s->sg_dsgl, s->sg_input, 1188 crp->crp_digest_start, hash_size_in_response); 1189 if (error) 1190 return (error); 1191 } 1192 dsgl_nsegs = ccr_count_sgl(s->sg_dsgl, DSGL_SGE_MAXLEN); 1193 if (dsgl_nsegs > MAX_RX_PHYS_DSGL_SGE) 1194 return (EFBIG); 1195 dsgl_len = ccr_phys_dsgl_len(dsgl_nsegs); 1196 1197 /* 1198 * The 'key' part of the key context consists of the key followed 1199 * by the Galois hash key. 1200 */ 1201 kctx_len = roundup2(s->blkcipher.key_len, 16) + GMAC_BLOCK_LEN; 1202 transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dsgl_len); 1203 1204 /* 1205 * The input buffer consists of the IV, any AAD, and then the 1206 * cipher/plain text. For decryption requests the hash is 1207 * appended after the cipher text. 1208 * 1209 * The IV is always stored at the start of the input buffer 1210 * even though it may be duplicated in the payload. The 1211 * crypto engine doesn't work properly if the IV offset points 1212 * inside of the AAD region, so a second copy is always 1213 * required. 1214 */ 1215 input_len = crp->crp_aad_length + crp->crp_payload_length; 1216 if (op_type == CHCR_DECRYPT_OP) 1217 input_len += hash_size_in_response; 1218 if (input_len > MAX_REQUEST_SIZE) 1219 return (EFBIG); 1220 if (ccr_use_imm_data(transhdr_len, iv_len + input_len)) { 1221 imm_len = input_len; 1222 sgl_nsegs = 0; 1223 sgl_len = 0; 1224 } else { 1225 imm_len = 0; 1226 sglist_reset(s->sg_ulptx); 1227 if (crp->crp_aad_length != 0) { 1228 if (crp->crp_aad != NULL) 1229 error = sglist_append(s->sg_ulptx, 1230 crp->crp_aad, crp->crp_aad_length); 1231 else 1232 error = sglist_append_sglist(s->sg_ulptx, 1233 s->sg_input, crp->crp_aad_start, 1234 crp->crp_aad_length); 1235 if (error) 1236 return (error); 1237 } 1238 error = sglist_append_sglist(s->sg_ulptx, s->sg_input, 1239 crp->crp_payload_start, crp->crp_payload_length); 1240 if (error) 1241 return (error); 1242 if (op_type == CHCR_DECRYPT_OP) { 1243 error = sglist_append_sglist(s->sg_ulptx, s->sg_input, 1244 crp->crp_digest_start, hash_size_in_response); 1245 if (error) 1246 return (error); 1247 } 1248 sgl_nsegs = s->sg_ulptx->sg_nseg; 1249 sgl_len = ccr_ulptx_sgl_len(sgl_nsegs); 1250 } 1251 1252 if (crp->crp_aad_length != 0) { 1253 aad_start = iv_len + 1; 1254 aad_stop = aad_start + crp->crp_aad_length - 1; 1255 } else { 1256 aad_start = 0; 1257 aad_stop = 0; 1258 } 1259 cipher_start = iv_len + crp->crp_aad_length + 1; 1260 if (op_type == CHCR_DECRYPT_OP) 1261 cipher_stop = hash_size_in_response; 1262 else 1263 cipher_stop = 0; 1264 if (op_type == CHCR_DECRYPT_OP) 1265 auth_insert = hash_size_in_response; 1266 else 1267 auth_insert = 0; 1268 1269 wr_len = roundup2(transhdr_len, 16) + iv_len + roundup2(imm_len, 16) + 1270 sgl_len; 1271 if (wr_len > SGE_MAX_WR_LEN) 1272 return (EFBIG); 1273 wr = alloc_wrqe(wr_len, s->port->txq); 1274 if (wr == NULL) { 1275 counter_u64_add(sc->stats_wr_nomem, 1); 1276 return (ENOMEM); 1277 } 1278 crwr = wrtod(wr); 1279 memset(crwr, 0, wr_len); 1280 1281 crypto_read_iv(crp, iv); 1282 *(uint32_t *)&iv[12] = htobe32(1); 1283 1284 ccr_populate_wreq(sc, s, crwr, kctx_len, wr_len, imm_len, sgl_len, 0, 1285 crp); 1286 1287 crwr->sec_cpl.op_ivinsrtofst = htobe32( 1288 V_CPL_TX_SEC_PDU_OPCODE(CPL_TX_SEC_PDU) | 1289 V_CPL_TX_SEC_PDU_RXCHID(s->port->rx_channel_id) | 1290 V_CPL_TX_SEC_PDU_ACKFOLLOWS(0) | V_CPL_TX_SEC_PDU_ULPTXLPBK(1) | 1291 V_CPL_TX_SEC_PDU_CPLLEN(2) | V_CPL_TX_SEC_PDU_PLACEHOLDER(0) | 1292 V_CPL_TX_SEC_PDU_IVINSRTOFST(1)); 1293 1294 crwr->sec_cpl.pldlen = htobe32(iv_len + input_len); 1295 1296 /* 1297 * NB: cipherstop is explicitly set to 0. On encrypt it 1298 * should normally be set to 0 anyway. However, for decrypt 1299 * the cipher ends before the tag in the ETA case (and 1300 * authstop is set to stop before the tag), but for GCM the 1301 * cipher still runs to the end of the buffer. Not sure if 1302 * this is intentional or a firmware quirk, but it is required 1303 * for working tag validation with GCM decryption. 1304 */ 1305 crwr->sec_cpl.aadstart_cipherstop_hi = htobe32( 1306 V_CPL_TX_SEC_PDU_AADSTART(aad_start) | 1307 V_CPL_TX_SEC_PDU_AADSTOP(aad_stop) | 1308 V_CPL_TX_SEC_PDU_CIPHERSTART(cipher_start) | 1309 V_CPL_TX_SEC_PDU_CIPHERSTOP_HI(0)); 1310 crwr->sec_cpl.cipherstop_lo_authinsert = htobe32( 1311 V_CPL_TX_SEC_PDU_CIPHERSTOP_LO(0) | 1312 V_CPL_TX_SEC_PDU_AUTHSTART(cipher_start) | 1313 V_CPL_TX_SEC_PDU_AUTHSTOP(cipher_stop) | 1314 V_CPL_TX_SEC_PDU_AUTHINSERT(auth_insert)); 1315 1316 /* These two flits are actually a CPL_TLS_TX_SCMD_FMT. */ 1317 hmac_ctrl = ccr_hmac_ctrl(AES_GMAC_HASH_LEN, hash_size_in_response); 1318 crwr->sec_cpl.seqno_numivs = htobe32( 1319 V_SCMD_SEQ_NO_CTRL(0) | 1320 V_SCMD_PROTO_VERSION(SCMD_PROTO_VERSION_GENERIC) | 1321 V_SCMD_ENC_DEC_CTRL(op_type) | 1322 V_SCMD_CIPH_AUTH_SEQ_CTRL(op_type == CHCR_ENCRYPT_OP ? 1 : 0) | 1323 V_SCMD_CIPH_MODE(SCMD_CIPH_MODE_AES_GCM) | 1324 V_SCMD_AUTH_MODE(SCMD_AUTH_MODE_GHASH) | 1325 V_SCMD_HMAC_CTRL(hmac_ctrl) | 1326 V_SCMD_IV_SIZE(iv_len / 2) | 1327 V_SCMD_NUM_IVS(0)); 1328 crwr->sec_cpl.ivgen_hdrlen = htobe32( 1329 V_SCMD_IV_GEN_CTRL(0) | 1330 V_SCMD_MORE_FRAGS(0) | V_SCMD_LAST_FRAG(0) | V_SCMD_MAC_ONLY(0) | 1331 V_SCMD_AADIVDROP(0) | V_SCMD_HDR_LEN(dsgl_len)); 1332 1333 crwr->key_ctx.ctx_hdr = s->blkcipher.key_ctx_hdr; 1334 memcpy(crwr->key_ctx.key, s->blkcipher.enckey, s->blkcipher.key_len); 1335 dst = crwr->key_ctx.key + roundup2(s->blkcipher.key_len, 16); 1336 memcpy(dst, s->gmac.ghash_h, GMAC_BLOCK_LEN); 1337 1338 dst = (char *)(crwr + 1) + kctx_len; 1339 ccr_write_phys_dsgl(s, dst, dsgl_nsegs); 1340 dst += sizeof(struct cpl_rx_phys_dsgl) + dsgl_len; 1341 memcpy(dst, iv, iv_len); 1342 dst += iv_len; 1343 if (imm_len != 0) { 1344 if (crp->crp_aad_length != 0) { 1345 if (crp->crp_aad != NULL) 1346 memcpy(dst, crp->crp_aad, crp->crp_aad_length); 1347 else 1348 crypto_copydata(crp, crp->crp_aad_start, 1349 crp->crp_aad_length, dst); 1350 dst += crp->crp_aad_length; 1351 } 1352 crypto_copydata(crp, crp->crp_payload_start, 1353 crp->crp_payload_length, dst); 1354 dst += crp->crp_payload_length; 1355 if (op_type == CHCR_DECRYPT_OP) 1356 crypto_copydata(crp, crp->crp_digest_start, 1357 hash_size_in_response, dst); 1358 } else 1359 ccr_write_ulptx_sgl(s, dst, sgl_nsegs); 1360 1361 /* XXX: TODO backpressure */ 1362 t4_wrq_tx(sc->adapter, wr); 1363 1364 explicit_bzero(iv, sizeof(iv)); 1365 return (0); 1366 } 1367 1368 static int 1369 ccr_gcm_done(struct ccr_softc *sc, struct ccr_session *s, 1370 struct cryptop *crp, const struct cpl_fw6_pld *cpl, int error) 1371 { 1372 1373 /* 1374 * The updated IV to permit chained requests is at 1375 * cpl->data[2], but OCF doesn't permit chained requests. 1376 * 1377 * Note that the hardware should always verify the GMAC hash. 1378 */ 1379 return (error); 1380 } 1381 1382 /* 1383 * Handle a GCM request that is not supported by the crypto engine by 1384 * performing the operation in software. Derived from swcr_authenc(). 1385 */ 1386 static void 1387 ccr_gcm_soft(struct ccr_session *s, struct cryptop *crp) 1388 { 1389 const struct auth_hash *axf; 1390 const struct enc_xform *exf; 1391 void *auth_ctx, *kschedule; 1392 char block[GMAC_BLOCK_LEN]; 1393 char digest[GMAC_DIGEST_LEN]; 1394 int error, i, len; 1395 1396 auth_ctx = NULL; 1397 kschedule = NULL; 1398 1399 /* Initialize the MAC. */ 1400 switch (s->blkcipher.key_len) { 1401 case 16: 1402 axf = &auth_hash_nist_gmac_aes_128; 1403 break; 1404 case 24: 1405 axf = &auth_hash_nist_gmac_aes_192; 1406 break; 1407 case 32: 1408 axf = &auth_hash_nist_gmac_aes_256; 1409 break; 1410 default: 1411 error = EINVAL; 1412 goto out; 1413 } 1414 auth_ctx = malloc(axf->ctxsize, M_CCR, M_NOWAIT); 1415 if (auth_ctx == NULL) { 1416 error = ENOMEM; 1417 goto out; 1418 } 1419 axf->Init(auth_ctx); 1420 axf->Setkey(auth_ctx, s->blkcipher.enckey, s->blkcipher.key_len); 1421 1422 /* Initialize the cipher. */ 1423 exf = &enc_xform_aes_nist_gcm; 1424 kschedule = malloc(exf->ctxsize, M_CCR, M_NOWAIT); 1425 if (kschedule == NULL) { 1426 error = ENOMEM; 1427 goto out; 1428 } 1429 error = exf->setkey(kschedule, s->blkcipher.enckey, 1430 s->blkcipher.key_len); 1431 if (error) 1432 goto out; 1433 1434 if ((crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0) { 1435 error = EINVAL; 1436 goto out; 1437 } 1438 1439 axf->Reinit(auth_ctx, crp->crp_iv, AES_GCM_IV_LEN); 1440 1441 /* MAC the AAD. */ 1442 if (crp->crp_aad != NULL) { 1443 len = rounddown(crp->crp_aad_length, sizeof(block)); 1444 if (len != 0) 1445 axf->Update(auth_ctx, crp->crp_aad, len); 1446 if (crp->crp_aad_length != len) { 1447 memset(block, 0, sizeof(block)); 1448 memcpy(block, (char *)crp->crp_aad + len, 1449 crp->crp_aad_length - len); 1450 axf->Update(auth_ctx, block, sizeof(block)); 1451 } 1452 } else { 1453 for (i = 0; i < crp->crp_aad_length; i += sizeof(block)) { 1454 len = imin(crp->crp_aad_length - i, sizeof(block)); 1455 crypto_copydata(crp, crp->crp_aad_start + i, len, 1456 block); 1457 bzero(block + len, sizeof(block) - len); 1458 axf->Update(auth_ctx, block, sizeof(block)); 1459 } 1460 } 1461 1462 exf->reinit(kschedule, crp->crp_iv, AES_GCM_IV_LEN); 1463 1464 /* Do encryption with MAC */ 1465 for (i = 0; i < crp->crp_payload_length; i += sizeof(block)) { 1466 len = imin(crp->crp_payload_length - i, sizeof(block)); 1467 crypto_copydata(crp, crp->crp_payload_start + i, len, block); 1468 bzero(block + len, sizeof(block) - len); 1469 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) { 1470 exf->encrypt(kschedule, block, block); 1471 axf->Update(auth_ctx, block, len); 1472 crypto_copyback(crp, crp->crp_payload_start + i, len, 1473 block); 1474 } else { 1475 axf->Update(auth_ctx, block, len); 1476 } 1477 } 1478 1479 /* Length block. */ 1480 bzero(block, sizeof(block)); 1481 ((uint32_t *)block)[1] = htobe32(crp->crp_aad_length * 8); 1482 ((uint32_t *)block)[3] = htobe32(crp->crp_payload_length * 8); 1483 axf->Update(auth_ctx, block, sizeof(block)); 1484 1485 /* Finalize MAC. */ 1486 axf->Final(digest, auth_ctx); 1487 1488 /* Inject or validate tag. */ 1489 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) { 1490 crypto_copyback(crp, crp->crp_digest_start, sizeof(digest), 1491 digest); 1492 error = 0; 1493 } else { 1494 char digest2[GMAC_DIGEST_LEN]; 1495 1496 crypto_copydata(crp, crp->crp_digest_start, sizeof(digest2), 1497 digest2); 1498 if (timingsafe_bcmp(digest, digest2, sizeof(digest)) == 0) { 1499 error = 0; 1500 1501 /* Tag matches, decrypt data. */ 1502 for (i = 0; i < crp->crp_payload_length; 1503 i += sizeof(block)) { 1504 len = imin(crp->crp_payload_length - i, 1505 sizeof(block)); 1506 crypto_copydata(crp, crp->crp_payload_start + i, 1507 len, block); 1508 bzero(block + len, sizeof(block) - len); 1509 exf->decrypt(kschedule, block, block); 1510 crypto_copyback(crp, crp->crp_payload_start + i, 1511 len, block); 1512 } 1513 } else 1514 error = EBADMSG; 1515 explicit_bzero(digest2, sizeof(digest2)); 1516 } 1517 1518 out: 1519 zfree(kschedule, M_CCR); 1520 zfree(auth_ctx, M_CCR); 1521 explicit_bzero(block, sizeof(block)); 1522 explicit_bzero(digest, sizeof(digest)); 1523 crp->crp_etype = error; 1524 crypto_done(crp); 1525 } 1526 1527 static int 1528 ccr_ccm_hmac_ctrl(unsigned int authsize) 1529 { 1530 switch (authsize) { 1531 case 4: 1532 return (SCMD_HMAC_CTRL_PL1); 1533 case 6: 1534 return (SCMD_HMAC_CTRL_PL2); 1535 case 8: 1536 return (SCMD_HMAC_CTRL_DIV2); 1537 case 10: 1538 return (SCMD_HMAC_CTRL_TRUNC_RFC4366); 1539 case 12: 1540 return (SCMD_HMAC_CTRL_IPSEC_96BIT); 1541 case 14: 1542 return (SCMD_HMAC_CTRL_PL3); 1543 case 16: 1544 return (SCMD_HMAC_CTRL_NO_TRUNC); 1545 default: 1546 __assert_unreachable(); 1547 } 1548 } 1549 1550 static void 1551 generate_ccm_b0(struct cryptop *crp, u_int hash_size_in_response, 1552 const char *iv, char *b0) 1553 { 1554 u_int i, payload_len, L; 1555 1556 /* NB: L is already set in the first byte of the IV. */ 1557 memcpy(b0, iv, CCM_B0_SIZE); 1558 L = iv[0] + 1; 1559 1560 /* Set length of hash in bits 3 - 5. */ 1561 b0[0] |= (((hash_size_in_response - 2) / 2) << 3); 1562 1563 /* Store the payload length as a big-endian value. */ 1564 payload_len = crp->crp_payload_length; 1565 for (i = 0; i < L; i++) { 1566 b0[CCM_CBC_BLOCK_LEN - 1 - i] = payload_len; 1567 payload_len >>= 8; 1568 } 1569 1570 /* 1571 * If there is AAD in the request, set bit 6 in the flags 1572 * field and store the AAD length as a big-endian value at the 1573 * start of block 1. This only assumes a 16-bit AAD length 1574 * since T6 doesn't support large AAD sizes. 1575 */ 1576 if (crp->crp_aad_length != 0) { 1577 b0[0] |= (1 << 6); 1578 *(uint16_t *)(b0 + CCM_B0_SIZE) = htobe16(crp->crp_aad_length); 1579 } 1580 } 1581 1582 static int 1583 ccr_ccm(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp) 1584 { 1585 char iv[CHCR_MAX_CRYPTO_IV_LEN]; 1586 const struct crypto_session_params *csp; 1587 struct ulptx_idata *idata; 1588 struct chcr_wr *crwr; 1589 struct wrqe *wr; 1590 char *dst; 1591 u_int iv_len, kctx_len, op_type, transhdr_len, wr_len; 1592 u_int aad_len, b0_len, hash_size_in_response, imm_len; 1593 u_int aad_start, aad_stop, cipher_start, cipher_stop, auth_insert; 1594 u_int hmac_ctrl, input_len; 1595 int dsgl_nsegs, dsgl_len; 1596 int sgl_nsegs, sgl_len; 1597 int error; 1598 1599 csp = crypto_get_params(crp->crp_session); 1600 1601 if (s->blkcipher.key_len == 0) 1602 return (EINVAL); 1603 1604 /* 1605 * The crypto engine doesn't handle CCM requests with an empty 1606 * payload, so handle those in software instead. 1607 */ 1608 if (crp->crp_payload_length == 0) 1609 return (EMSGSIZE); 1610 1611 /* The length has to fit within the length field in block 0. */ 1612 if (crp->crp_payload_length > ccm_max_payload_length(csp)) 1613 return (EMSGSIZE); 1614 1615 /* 1616 * CCM always includes block 0 in the AAD before AAD from the 1617 * request. 1618 */ 1619 b0_len = CCM_B0_SIZE; 1620 if (crp->crp_aad_length != 0) 1621 b0_len += CCM_AAD_FIELD_SIZE; 1622 aad_len = b0_len + crp->crp_aad_length; 1623 1624 /* 1625 * CCM requests should always provide an explicit IV (really 1626 * the nonce). 1627 */ 1628 if ((crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0) 1629 return (EINVAL); 1630 1631 /* 1632 * The IV in the work request is 16 bytes and not just the 1633 * nonce. 1634 */ 1635 iv_len = AES_BLOCK_LEN; 1636 1637 if (iv_len + aad_len > MAX_AAD_LEN) 1638 return (EMSGSIZE); 1639 1640 hash_size_in_response = s->ccm_mac.hash_len; 1641 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) 1642 op_type = CHCR_ENCRYPT_OP; 1643 else 1644 op_type = CHCR_DECRYPT_OP; 1645 1646 /* 1647 * The output buffer consists of the cipher text followed by 1648 * the tag when encrypting. For decryption it only contains 1649 * the plain text. 1650 * 1651 * Due to a firmware bug, the output buffer must include a 1652 * dummy output buffer for the IV and AAD prior to the real 1653 * output buffer. 1654 */ 1655 if (op_type == CHCR_ENCRYPT_OP) { 1656 if (iv_len + aad_len + crp->crp_payload_length + 1657 hash_size_in_response > MAX_REQUEST_SIZE) 1658 return (EFBIG); 1659 } else { 1660 if (iv_len + aad_len + crp->crp_payload_length > 1661 MAX_REQUEST_SIZE) 1662 return (EFBIG); 1663 } 1664 sglist_reset(s->sg_dsgl); 1665 error = sglist_append_sglist(s->sg_dsgl, sc->sg_iv_aad, 0, iv_len + 1666 aad_len); 1667 if (error) 1668 return (error); 1669 if (CRYPTO_HAS_OUTPUT_BUFFER(crp)) 1670 error = sglist_append_sglist(s->sg_dsgl, s->sg_output, 1671 crp->crp_payload_output_start, crp->crp_payload_length); 1672 else 1673 error = sglist_append_sglist(s->sg_dsgl, s->sg_input, 1674 crp->crp_payload_start, crp->crp_payload_length); 1675 if (error) 1676 return (error); 1677 if (op_type == CHCR_ENCRYPT_OP) { 1678 if (CRYPTO_HAS_OUTPUT_BUFFER(crp)) 1679 error = sglist_append_sglist(s->sg_dsgl, s->sg_output, 1680 crp->crp_digest_start, hash_size_in_response); 1681 else 1682 error = sglist_append_sglist(s->sg_dsgl, s->sg_input, 1683 crp->crp_digest_start, hash_size_in_response); 1684 if (error) 1685 return (error); 1686 } 1687 dsgl_nsegs = ccr_count_sgl(s->sg_dsgl, DSGL_SGE_MAXLEN); 1688 if (dsgl_nsegs > MAX_RX_PHYS_DSGL_SGE) 1689 return (EFBIG); 1690 dsgl_len = ccr_phys_dsgl_len(dsgl_nsegs); 1691 1692 /* 1693 * The 'key' part of the key context consists of two copies of 1694 * the AES key. 1695 */ 1696 kctx_len = roundup2(s->blkcipher.key_len, 16) * 2; 1697 transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dsgl_len); 1698 1699 /* 1700 * The input buffer consists of the IV, AAD (including block 1701 * 0), and then the cipher/plain text. For decryption 1702 * requests the hash is appended after the cipher text. 1703 * 1704 * The IV is always stored at the start of the input buffer 1705 * even though it may be duplicated in the payload. The 1706 * crypto engine doesn't work properly if the IV offset points 1707 * inside of the AAD region, so a second copy is always 1708 * required. 1709 */ 1710 input_len = aad_len + crp->crp_payload_length; 1711 if (op_type == CHCR_DECRYPT_OP) 1712 input_len += hash_size_in_response; 1713 if (input_len > MAX_REQUEST_SIZE) 1714 return (EFBIG); 1715 if (ccr_use_imm_data(transhdr_len, iv_len + input_len)) { 1716 imm_len = input_len; 1717 sgl_nsegs = 0; 1718 sgl_len = 0; 1719 } else { 1720 /* Block 0 is passed as immediate data. */ 1721 imm_len = b0_len; 1722 1723 sglist_reset(s->sg_ulptx); 1724 if (crp->crp_aad_length != 0) { 1725 if (crp->crp_aad != NULL) 1726 error = sglist_append(s->sg_ulptx, 1727 crp->crp_aad, crp->crp_aad_length); 1728 else 1729 error = sglist_append_sglist(s->sg_ulptx, 1730 s->sg_input, crp->crp_aad_start, 1731 crp->crp_aad_length); 1732 if (error) 1733 return (error); 1734 } 1735 error = sglist_append_sglist(s->sg_ulptx, s->sg_input, 1736 crp->crp_payload_start, crp->crp_payload_length); 1737 if (error) 1738 return (error); 1739 if (op_type == CHCR_DECRYPT_OP) { 1740 error = sglist_append_sglist(s->sg_ulptx, s->sg_input, 1741 crp->crp_digest_start, hash_size_in_response); 1742 if (error) 1743 return (error); 1744 } 1745 sgl_nsegs = s->sg_ulptx->sg_nseg; 1746 sgl_len = ccr_ulptx_sgl_len(sgl_nsegs); 1747 } 1748 1749 aad_start = iv_len + 1; 1750 aad_stop = aad_start + aad_len - 1; 1751 cipher_start = aad_stop + 1; 1752 if (op_type == CHCR_DECRYPT_OP) 1753 cipher_stop = hash_size_in_response; 1754 else 1755 cipher_stop = 0; 1756 if (op_type == CHCR_DECRYPT_OP) 1757 auth_insert = hash_size_in_response; 1758 else 1759 auth_insert = 0; 1760 1761 wr_len = roundup2(transhdr_len, 16) + iv_len + roundup2(imm_len, 16) + 1762 sgl_len; 1763 if (wr_len > SGE_MAX_WR_LEN) 1764 return (EFBIG); 1765 wr = alloc_wrqe(wr_len, s->port->txq); 1766 if (wr == NULL) { 1767 counter_u64_add(sc->stats_wr_nomem, 1); 1768 return (ENOMEM); 1769 } 1770 crwr = wrtod(wr); 1771 memset(crwr, 0, wr_len); 1772 1773 /* 1774 * Read the nonce from the request. Use the nonce to generate 1775 * the full IV with the counter set to 0. 1776 */ 1777 memset(iv, 0, iv_len); 1778 iv[0] = (15 - csp->csp_ivlen) - 1; 1779 crypto_read_iv(crp, iv + 1); 1780 1781 ccr_populate_wreq(sc, s, crwr, kctx_len, wr_len, imm_len, sgl_len, 0, 1782 crp); 1783 1784 crwr->sec_cpl.op_ivinsrtofst = htobe32( 1785 V_CPL_TX_SEC_PDU_OPCODE(CPL_TX_SEC_PDU) | 1786 V_CPL_TX_SEC_PDU_RXCHID(s->port->rx_channel_id) | 1787 V_CPL_TX_SEC_PDU_ACKFOLLOWS(0) | V_CPL_TX_SEC_PDU_ULPTXLPBK(1) | 1788 V_CPL_TX_SEC_PDU_CPLLEN(2) | V_CPL_TX_SEC_PDU_PLACEHOLDER(0) | 1789 V_CPL_TX_SEC_PDU_IVINSRTOFST(1)); 1790 1791 crwr->sec_cpl.pldlen = htobe32(iv_len + input_len); 1792 1793 /* 1794 * NB: cipherstop is explicitly set to 0. See comments above 1795 * in ccr_gcm(). 1796 */ 1797 crwr->sec_cpl.aadstart_cipherstop_hi = htobe32( 1798 V_CPL_TX_SEC_PDU_AADSTART(aad_start) | 1799 V_CPL_TX_SEC_PDU_AADSTOP(aad_stop) | 1800 V_CPL_TX_SEC_PDU_CIPHERSTART(cipher_start) | 1801 V_CPL_TX_SEC_PDU_CIPHERSTOP_HI(0)); 1802 crwr->sec_cpl.cipherstop_lo_authinsert = htobe32( 1803 V_CPL_TX_SEC_PDU_CIPHERSTOP_LO(0) | 1804 V_CPL_TX_SEC_PDU_AUTHSTART(cipher_start) | 1805 V_CPL_TX_SEC_PDU_AUTHSTOP(cipher_stop) | 1806 V_CPL_TX_SEC_PDU_AUTHINSERT(auth_insert)); 1807 1808 /* These two flits are actually a CPL_TLS_TX_SCMD_FMT. */ 1809 hmac_ctrl = ccr_ccm_hmac_ctrl(hash_size_in_response); 1810 crwr->sec_cpl.seqno_numivs = htobe32( 1811 V_SCMD_SEQ_NO_CTRL(0) | 1812 V_SCMD_PROTO_VERSION(SCMD_PROTO_VERSION_GENERIC) | 1813 V_SCMD_ENC_DEC_CTRL(op_type) | 1814 V_SCMD_CIPH_AUTH_SEQ_CTRL(op_type == CHCR_ENCRYPT_OP ? 0 : 1) | 1815 V_SCMD_CIPH_MODE(SCMD_CIPH_MODE_AES_CCM) | 1816 V_SCMD_AUTH_MODE(SCMD_AUTH_MODE_CBCMAC) | 1817 V_SCMD_HMAC_CTRL(hmac_ctrl) | 1818 V_SCMD_IV_SIZE(iv_len / 2) | 1819 V_SCMD_NUM_IVS(0)); 1820 crwr->sec_cpl.ivgen_hdrlen = htobe32( 1821 V_SCMD_IV_GEN_CTRL(0) | 1822 V_SCMD_MORE_FRAGS(0) | V_SCMD_LAST_FRAG(0) | V_SCMD_MAC_ONLY(0) | 1823 V_SCMD_AADIVDROP(0) | V_SCMD_HDR_LEN(dsgl_len)); 1824 1825 crwr->key_ctx.ctx_hdr = s->blkcipher.key_ctx_hdr; 1826 memcpy(crwr->key_ctx.key, s->blkcipher.enckey, s->blkcipher.key_len); 1827 memcpy(crwr->key_ctx.key + roundup(s->blkcipher.key_len, 16), 1828 s->blkcipher.enckey, s->blkcipher.key_len); 1829 1830 dst = (char *)(crwr + 1) + kctx_len; 1831 ccr_write_phys_dsgl(s, dst, dsgl_nsegs); 1832 dst += sizeof(struct cpl_rx_phys_dsgl) + dsgl_len; 1833 memcpy(dst, iv, iv_len); 1834 dst += iv_len; 1835 generate_ccm_b0(crp, hash_size_in_response, iv, dst); 1836 if (sgl_nsegs == 0) { 1837 dst += b0_len; 1838 if (crp->crp_aad_length != 0) { 1839 if (crp->crp_aad != NULL) 1840 memcpy(dst, crp->crp_aad, crp->crp_aad_length); 1841 else 1842 crypto_copydata(crp, crp->crp_aad_start, 1843 crp->crp_aad_length, dst); 1844 dst += crp->crp_aad_length; 1845 } 1846 crypto_copydata(crp, crp->crp_payload_start, 1847 crp->crp_payload_length, dst); 1848 dst += crp->crp_payload_length; 1849 if (op_type == CHCR_DECRYPT_OP) 1850 crypto_copydata(crp, crp->crp_digest_start, 1851 hash_size_in_response, dst); 1852 } else { 1853 dst += CCM_B0_SIZE; 1854 if (b0_len > CCM_B0_SIZE) { 1855 /* 1856 * If there is AAD, insert padding including a 1857 * ULP_TX_SC_NOOP so that the ULP_TX_SC_DSGL 1858 * is 16-byte aligned. 1859 */ 1860 KASSERT(b0_len - CCM_B0_SIZE == CCM_AAD_FIELD_SIZE, 1861 ("b0_len mismatch")); 1862 memset(dst + CCM_AAD_FIELD_SIZE, 0, 1863 8 - CCM_AAD_FIELD_SIZE); 1864 idata = (void *)(dst + 8); 1865 idata->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_NOOP)); 1866 idata->len = htobe32(0); 1867 dst = (void *)(idata + 1); 1868 } 1869 ccr_write_ulptx_sgl(s, dst, sgl_nsegs); 1870 } 1871 1872 /* XXX: TODO backpressure */ 1873 t4_wrq_tx(sc->adapter, wr); 1874 1875 explicit_bzero(iv, sizeof(iv)); 1876 return (0); 1877 } 1878 1879 static int 1880 ccr_ccm_done(struct ccr_softc *sc, struct ccr_session *s, 1881 struct cryptop *crp, const struct cpl_fw6_pld *cpl, int error) 1882 { 1883 1884 /* 1885 * The updated IV to permit chained requests is at 1886 * cpl->data[2], but OCF doesn't permit chained requests. 1887 * 1888 * Note that the hardware should always verify the CBC MAC 1889 * hash. 1890 */ 1891 return (error); 1892 } 1893 1894 /* 1895 * Handle a CCM request that is not supported by the crypto engine by 1896 * performing the operation in software. Derived from swcr_ccm(). 1897 */ 1898 static void 1899 build_ccm_b0(const char *nonce, u_int nonce_length, u_int aad_length, 1900 u_int data_length, u_int tag_length, uint8_t *b0) 1901 { 1902 uint8_t *bp; 1903 uint8_t flags, L; 1904 1905 KASSERT(nonce_length >= 7 && nonce_length <= 13, 1906 ("nonce_length must be between 7 and 13 bytes")); 1907 1908 /* 1909 * Need to determine the L field value. This is the number of 1910 * bytes needed to specify the length of the message; the length 1911 * is whatever is left in the 16 bytes after specifying flags and 1912 * the nonce. 1913 */ 1914 L = 15 - nonce_length; 1915 1916 flags = ((aad_length > 0) << 6) + 1917 (((tag_length - 2) / 2) << 3) + 1918 L - 1; 1919 1920 /* 1921 * Now we need to set up the first block, which has flags, nonce, 1922 * and the message length. 1923 */ 1924 b0[0] = flags; 1925 memcpy(b0 + 1, nonce, nonce_length); 1926 bp = b0 + 1 + nonce_length; 1927 1928 /* Need to copy L' [aka L-1] bytes of data_length */ 1929 for (uint8_t *dst = b0 + CCM_CBC_BLOCK_LEN - 1; dst >= bp; dst--) { 1930 *dst = data_length; 1931 data_length >>= 8; 1932 } 1933 } 1934 1935 /* NB: OCF only supports AAD lengths < 2^32. */ 1936 static int 1937 build_ccm_aad_length(u_int aad_length, uint8_t *blk) 1938 { 1939 if (aad_length < ((1 << 16) - (1 << 8))) { 1940 be16enc(blk, aad_length); 1941 return (sizeof(uint16_t)); 1942 } else { 1943 blk[0] = 0xff; 1944 blk[1] = 0xfe; 1945 be32enc(blk + 2, aad_length); 1946 return (2 + sizeof(uint32_t)); 1947 } 1948 } 1949 1950 static void 1951 ccr_ccm_soft(struct ccr_session *s, struct cryptop *crp) 1952 { 1953 const struct crypto_session_params *csp; 1954 const struct auth_hash *axf; 1955 const struct enc_xform *exf; 1956 union authctx *auth_ctx; 1957 void *kschedule; 1958 char block[CCM_CBC_BLOCK_LEN]; 1959 char tag[AES_CBC_MAC_HASH_LEN]; 1960 u_int taglen; 1961 int error, i, len; 1962 1963 auth_ctx = NULL; 1964 kschedule = NULL; 1965 taglen = s->ccm_mac.hash_len; 1966 1967 csp = crypto_get_params(crp->crp_session); 1968 if (crp->crp_payload_length > ccm_max_payload_length(csp)) { 1969 error = EMSGSIZE; 1970 goto out; 1971 } 1972 1973 /* Initialize the MAC. */ 1974 switch (s->blkcipher.key_len) { 1975 case 16: 1976 axf = &auth_hash_ccm_cbc_mac_128; 1977 break; 1978 case 24: 1979 axf = &auth_hash_ccm_cbc_mac_192; 1980 break; 1981 case 32: 1982 axf = &auth_hash_ccm_cbc_mac_256; 1983 break; 1984 default: 1985 error = EINVAL; 1986 goto out; 1987 } 1988 auth_ctx = malloc(axf->ctxsize, M_CCR, M_NOWAIT); 1989 if (auth_ctx == NULL) { 1990 error = ENOMEM; 1991 goto out; 1992 } 1993 axf->Init(auth_ctx); 1994 axf->Setkey(auth_ctx, s->blkcipher.enckey, s->blkcipher.key_len); 1995 1996 /* Initialize the cipher. */ 1997 exf = &enc_xform_ccm; 1998 kschedule = malloc(exf->ctxsize, M_CCR, M_NOWAIT); 1999 if (kschedule == NULL) { 2000 error = ENOMEM; 2001 goto out; 2002 } 2003 error = exf->setkey(kschedule, s->blkcipher.enckey, 2004 s->blkcipher.key_len); 2005 if (error) 2006 goto out; 2007 2008 if ((crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0) { 2009 error = EINVAL; 2010 goto out; 2011 } 2012 2013 axf->Reinit(auth_ctx, crp->crp_iv, csp->csp_ivlen); 2014 2015 /* Supply MAC with b0. */ 2016 build_ccm_b0(crp->crp_iv, csp->csp_ivlen, crp->crp_aad_length, 2017 crp->crp_payload_length, taglen, block); 2018 axf->Update(auth_ctx, block, CCM_CBC_BLOCK_LEN); 2019 2020 /* MAC the AAD. */ 2021 if (crp->crp_aad_length != 0) { 2022 len = build_ccm_aad_length(crp->crp_aad_length, block); 2023 axf->Update(auth_ctx, block, len); 2024 if (crp->crp_aad != NULL) 2025 axf->Update(auth_ctx, crp->crp_aad, 2026 crp->crp_aad_length); 2027 else 2028 crypto_apply(crp, crp->crp_aad_start, 2029 crp->crp_aad_length, axf->Update, auth_ctx); 2030 2031 /* Pad the AAD (including length field) to a full block. */ 2032 len = (len + crp->crp_aad_length) % CCM_CBC_BLOCK_LEN; 2033 if (len != 0) { 2034 len = CCM_CBC_BLOCK_LEN - len; 2035 memset(block, 0, CCM_CBC_BLOCK_LEN); 2036 axf->Update(auth_ctx, block, len); 2037 } 2038 } 2039 2040 exf->reinit(kschedule, crp->crp_iv, csp->csp_ivlen); 2041 2042 /* Do encryption/decryption with MAC */ 2043 for (i = 0; i < crp->crp_payload_length; i += sizeof(block)) { 2044 len = imin(crp->crp_payload_length - i, sizeof(block)); 2045 crypto_copydata(crp, crp->crp_payload_start + i, len, block); 2046 bzero(block + len, sizeof(block) - len); 2047 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) { 2048 axf->Update(auth_ctx, block, len); 2049 exf->encrypt(kschedule, block, block); 2050 crypto_copyback(crp, crp->crp_payload_start + i, len, 2051 block); 2052 } else { 2053 exf->decrypt(kschedule, block, block); 2054 axf->Update(auth_ctx, block, len); 2055 } 2056 } 2057 2058 /* Finalize MAC. */ 2059 axf->Final(tag, auth_ctx); 2060 2061 /* Inject or validate tag. */ 2062 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) { 2063 crypto_copyback(crp, crp->crp_digest_start, taglen, tag); 2064 error = 0; 2065 } else { 2066 char tag2[AES_CBC_MAC_HASH_LEN]; 2067 2068 crypto_copydata(crp, crp->crp_digest_start, taglen, tag2); 2069 if (timingsafe_bcmp(tag, tag2, taglen) == 0) { 2070 error = 0; 2071 2072 /* Tag matches, decrypt data. */ 2073 exf->reinit(kschedule, crp->crp_iv, csp->csp_ivlen); 2074 for (i = 0; i < crp->crp_payload_length; 2075 i += sizeof(block)) { 2076 len = imin(crp->crp_payload_length - i, 2077 sizeof(block)); 2078 crypto_copydata(crp, crp->crp_payload_start + i, 2079 len, block); 2080 bzero(block + len, sizeof(block) - len); 2081 exf->decrypt(kschedule, block, block); 2082 crypto_copyback(crp, crp->crp_payload_start + i, 2083 len, block); 2084 } 2085 } else 2086 error = EBADMSG; 2087 explicit_bzero(tag2, sizeof(tag2)); 2088 } 2089 2090 out: 2091 zfree(kschedule, M_CCR); 2092 zfree(auth_ctx, M_CCR); 2093 explicit_bzero(block, sizeof(block)); 2094 explicit_bzero(tag, sizeof(tag)); 2095 crp->crp_etype = error; 2096 crypto_done(crp); 2097 } 2098 2099 static void 2100 ccr_identify(driver_t *driver, device_t parent) 2101 { 2102 struct adapter *sc; 2103 2104 sc = device_get_softc(parent); 2105 if (sc->cryptocaps & FW_CAPS_CONFIG_CRYPTO_LOOKASIDE && 2106 device_find_child(parent, "ccr", -1) == NULL) 2107 device_add_child(parent, "ccr", -1); 2108 } 2109 2110 static int 2111 ccr_probe(device_t dev) 2112 { 2113 2114 device_set_desc(dev, "Chelsio Crypto Accelerator"); 2115 return (BUS_PROBE_DEFAULT); 2116 } 2117 2118 static void 2119 ccr_sysctls(struct ccr_softc *sc) 2120 { 2121 struct sysctl_ctx_list *ctx; 2122 struct sysctl_oid *oid, *port_oid; 2123 struct sysctl_oid_list *children; 2124 char buf[16]; 2125 int i; 2126 2127 ctx = device_get_sysctl_ctx(sc->dev); 2128 2129 /* 2130 * dev.ccr.X. 2131 */ 2132 oid = device_get_sysctl_tree(sc->dev); 2133 children = SYSCTL_CHILDREN(oid); 2134 2135 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "port_mask", CTLFLAG_RW, 2136 &sc->port_mask, 0, "Mask of enabled ports"); 2137 2138 /* 2139 * dev.ccr.X.stats. 2140 */ 2141 oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats", 2142 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "statistics"); 2143 children = SYSCTL_CHILDREN(oid); 2144 2145 SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "hash", CTLFLAG_RD, 2146 &sc->stats_hash, "Hash requests submitted"); 2147 SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "hmac", CTLFLAG_RD, 2148 &sc->stats_hmac, "HMAC requests submitted"); 2149 SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "cipher_encrypt", 2150 CTLFLAG_RD, &sc->stats_blkcipher_encrypt, 2151 "Cipher encryption requests submitted"); 2152 SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "cipher_decrypt", 2153 CTLFLAG_RD, &sc->stats_blkcipher_decrypt, 2154 "Cipher decryption requests submitted"); 2155 SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "eta_encrypt", 2156 CTLFLAG_RD, &sc->stats_eta_encrypt, 2157 "Combined AES+HMAC encryption requests submitted"); 2158 SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "eta_decrypt", 2159 CTLFLAG_RD, &sc->stats_eta_decrypt, 2160 "Combined AES+HMAC decryption requests submitted"); 2161 SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "gcm_encrypt", 2162 CTLFLAG_RD, &sc->stats_gcm_encrypt, 2163 "AES-GCM encryption requests submitted"); 2164 SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "gcm_decrypt", 2165 CTLFLAG_RD, &sc->stats_gcm_decrypt, 2166 "AES-GCM decryption requests submitted"); 2167 SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "ccm_encrypt", 2168 CTLFLAG_RD, &sc->stats_ccm_encrypt, 2169 "AES-CCM encryption requests submitted"); 2170 SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "ccm_decrypt", 2171 CTLFLAG_RD, &sc->stats_ccm_decrypt, 2172 "AES-CCM decryption requests submitted"); 2173 SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "wr_nomem", CTLFLAG_RD, 2174 &sc->stats_wr_nomem, "Work request memory allocation failures"); 2175 SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "inflight", CTLFLAG_RD, 2176 &sc->stats_inflight, "Requests currently pending"); 2177 SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "mac_error", CTLFLAG_RD, 2178 &sc->stats_mac_error, "MAC errors"); 2179 SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "pad_error", CTLFLAG_RD, 2180 &sc->stats_pad_error, "Padding errors"); 2181 SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "sglist_error", 2182 CTLFLAG_RD, &sc->stats_sglist_error, 2183 "Requests for which DMA mapping failed"); 2184 SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "process_error", 2185 CTLFLAG_RD, &sc->stats_process_error, 2186 "Requests failed during queueing"); 2187 SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "sw_fallback", 2188 CTLFLAG_RD, &sc->stats_sw_fallback, 2189 "Requests processed by falling back to software"); 2190 2191 /* 2192 * dev.ccr.X.stats.port 2193 */ 2194 port_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "port", 2195 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Per-port statistics"); 2196 2197 for (i = 0; i < nitems(sc->ports); i++) { 2198 if (sc->ports[i].rxq == NULL) 2199 continue; 2200 2201 /* 2202 * dev.ccr.X.stats.port.Y 2203 */ 2204 snprintf(buf, sizeof(buf), "%d", i); 2205 oid = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(port_oid), OID_AUTO, 2206 buf, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, buf); 2207 children = SYSCTL_CHILDREN(oid); 2208 2209 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "active_sessions", 2210 CTLFLAG_RD, &sc->ports[i].active_sessions, 0, 2211 "Count of active sessions"); 2212 SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "queued", 2213 CTLFLAG_RD, &sc->ports[i].stats_queued, "Requests queued"); 2214 SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "completed", 2215 CTLFLAG_RD, &sc->ports[i].stats_completed, 2216 "Requests completed"); 2217 } 2218 } 2219 2220 static void 2221 ccr_init_port(struct ccr_softc *sc, int port) 2222 { 2223 struct port_info *pi; 2224 2225 pi = sc->adapter->port[port]; 2226 sc->ports[port].txq = &sc->adapter->sge.ctrlq[port]; 2227 sc->ports[port].rxq = &sc->adapter->sge.rxq[pi->vi->first_rxq]; 2228 sc->ports[port].rx_channel_id = pi->rx_c_chan; 2229 sc->ports[port].tx_channel_id = pi->tx_chan; 2230 sc->ports[port].stats_queued = counter_u64_alloc(M_WAITOK); 2231 sc->ports[port].stats_completed = counter_u64_alloc(M_WAITOK); 2232 _Static_assert(sizeof(sc->port_mask) * NBBY >= MAX_NPORTS - 1, 2233 "Too many ports to fit in port_mask"); 2234 2235 /* 2236 * Completions for crypto requests on port 1 can sometimes 2237 * return a stale cookie value due to a firmware bug. Disable 2238 * requests on port 1 by default on affected firmware. 2239 */ 2240 if (sc->adapter->params.fw_vers >= FW_VERSION32(1, 25, 4, 0) || 2241 port == 0) 2242 sc->port_mask |= 1u << port; 2243 } 2244 2245 static int 2246 ccr_attach(device_t dev) 2247 { 2248 struct ccr_softc *sc; 2249 int32_t cid; 2250 int i; 2251 2252 sc = device_get_softc(dev); 2253 sc->dev = dev; 2254 sc->adapter = device_get_softc(device_get_parent(dev)); 2255 for_each_port(sc->adapter, i) { 2256 ccr_init_port(sc, i); 2257 } 2258 cid = crypto_get_driverid(dev, sizeof(struct ccr_session), 2259 CRYPTOCAP_F_HARDWARE); 2260 if (cid < 0) { 2261 device_printf(dev, "could not get crypto driver id\n"); 2262 return (ENXIO); 2263 } 2264 sc->cid = cid; 2265 sc->adapter->ccr_softc = sc; 2266 2267 /* 2268 * The FID must be the first RXQ for port 0 regardless of 2269 * which port is used to service the request. 2270 */ 2271 sc->first_rxq_id = sc->adapter->sge.rxq[0].iq.abs_id; 2272 2273 mtx_init(&sc->lock, "ccr", NULL, MTX_DEF); 2274 sc->iv_aad_buf = malloc(MAX_AAD_LEN, M_CCR, M_WAITOK); 2275 sc->sg_iv_aad = sglist_build(sc->iv_aad_buf, MAX_AAD_LEN, M_WAITOK); 2276 sc->stats_blkcipher_encrypt = counter_u64_alloc(M_WAITOK); 2277 sc->stats_blkcipher_decrypt = counter_u64_alloc(M_WAITOK); 2278 sc->stats_hash = counter_u64_alloc(M_WAITOK); 2279 sc->stats_hmac = counter_u64_alloc(M_WAITOK); 2280 sc->stats_eta_encrypt = counter_u64_alloc(M_WAITOK); 2281 sc->stats_eta_decrypt = counter_u64_alloc(M_WAITOK); 2282 sc->stats_gcm_encrypt = counter_u64_alloc(M_WAITOK); 2283 sc->stats_gcm_decrypt = counter_u64_alloc(M_WAITOK); 2284 sc->stats_ccm_encrypt = counter_u64_alloc(M_WAITOK); 2285 sc->stats_ccm_decrypt = counter_u64_alloc(M_WAITOK); 2286 sc->stats_wr_nomem = counter_u64_alloc(M_WAITOK); 2287 sc->stats_inflight = counter_u64_alloc(M_WAITOK); 2288 sc->stats_mac_error = counter_u64_alloc(M_WAITOK); 2289 sc->stats_pad_error = counter_u64_alloc(M_WAITOK); 2290 sc->stats_sglist_error = counter_u64_alloc(M_WAITOK); 2291 sc->stats_process_error = counter_u64_alloc(M_WAITOK); 2292 sc->stats_sw_fallback = counter_u64_alloc(M_WAITOK); 2293 ccr_sysctls(sc); 2294 2295 return (0); 2296 } 2297 2298 static void 2299 ccr_free_port(struct ccr_softc *sc, int port) 2300 { 2301 2302 counter_u64_free(sc->ports[port].stats_queued); 2303 counter_u64_free(sc->ports[port].stats_completed); 2304 } 2305 2306 static int 2307 ccr_detach(device_t dev) 2308 { 2309 struct ccr_softc *sc; 2310 int i; 2311 2312 sc = device_get_softc(dev); 2313 2314 mtx_lock(&sc->lock); 2315 sc->detaching = true; 2316 mtx_unlock(&sc->lock); 2317 2318 crypto_unregister_all(sc->cid); 2319 2320 mtx_destroy(&sc->lock); 2321 counter_u64_free(sc->stats_blkcipher_encrypt); 2322 counter_u64_free(sc->stats_blkcipher_decrypt); 2323 counter_u64_free(sc->stats_hash); 2324 counter_u64_free(sc->stats_hmac); 2325 counter_u64_free(sc->stats_eta_encrypt); 2326 counter_u64_free(sc->stats_eta_decrypt); 2327 counter_u64_free(sc->stats_gcm_encrypt); 2328 counter_u64_free(sc->stats_gcm_decrypt); 2329 counter_u64_free(sc->stats_ccm_encrypt); 2330 counter_u64_free(sc->stats_ccm_decrypt); 2331 counter_u64_free(sc->stats_wr_nomem); 2332 counter_u64_free(sc->stats_inflight); 2333 counter_u64_free(sc->stats_mac_error); 2334 counter_u64_free(sc->stats_pad_error); 2335 counter_u64_free(sc->stats_sglist_error); 2336 counter_u64_free(sc->stats_process_error); 2337 counter_u64_free(sc->stats_sw_fallback); 2338 for_each_port(sc->adapter, i) { 2339 ccr_free_port(sc, i); 2340 } 2341 sglist_free(sc->sg_iv_aad); 2342 free(sc->iv_aad_buf, M_CCR); 2343 sc->adapter->ccr_softc = NULL; 2344 return (0); 2345 } 2346 2347 static void 2348 ccr_init_hash_digest(struct ccr_session *s) 2349 { 2350 union authctx auth_ctx; 2351 const struct auth_hash *axf; 2352 2353 axf = s->hmac.auth_hash; 2354 axf->Init(&auth_ctx); 2355 t4_copy_partial_hash(axf->type, &auth_ctx, s->hmac.pads); 2356 } 2357 2358 static bool 2359 ccr_aes_check_keylen(int alg, int klen) 2360 { 2361 2362 switch (klen * 8) { 2363 case 128: 2364 case 192: 2365 if (alg == CRYPTO_AES_XTS) 2366 return (false); 2367 break; 2368 case 256: 2369 break; 2370 case 512: 2371 if (alg != CRYPTO_AES_XTS) 2372 return (false); 2373 break; 2374 default: 2375 return (false); 2376 } 2377 return (true); 2378 } 2379 2380 static void 2381 ccr_aes_setkey(struct ccr_session *s, const void *key, int klen) 2382 { 2383 unsigned int ck_size, iopad_size, kctx_flits, kctx_len, kbits, mk_size; 2384 unsigned int opad_present; 2385 2386 if (s->blkcipher.cipher_mode == SCMD_CIPH_MODE_AES_XTS) 2387 kbits = (klen / 2) * 8; 2388 else 2389 kbits = klen * 8; 2390 switch (kbits) { 2391 case 128: 2392 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128; 2393 break; 2394 case 192: 2395 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192; 2396 break; 2397 case 256: 2398 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256; 2399 break; 2400 default: 2401 panic("should not get here"); 2402 } 2403 2404 s->blkcipher.key_len = klen; 2405 memcpy(s->blkcipher.enckey, key, s->blkcipher.key_len); 2406 switch (s->blkcipher.cipher_mode) { 2407 case SCMD_CIPH_MODE_AES_CBC: 2408 case SCMD_CIPH_MODE_AES_XTS: 2409 t4_aes_getdeckey(s->blkcipher.deckey, key, kbits); 2410 break; 2411 } 2412 2413 kctx_len = roundup2(s->blkcipher.key_len, 16); 2414 switch (s->mode) { 2415 case ETA: 2416 mk_size = s->hmac.mk_size; 2417 opad_present = 1; 2418 iopad_size = roundup2(s->hmac.partial_digest_len, 16); 2419 kctx_len += iopad_size * 2; 2420 break; 2421 case GCM: 2422 mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_128; 2423 opad_present = 0; 2424 kctx_len += GMAC_BLOCK_LEN; 2425 break; 2426 case CCM: 2427 switch (kbits) { 2428 case 128: 2429 mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_128; 2430 break; 2431 case 192: 2432 mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_192; 2433 break; 2434 case 256: 2435 mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256; 2436 break; 2437 default: 2438 panic("should not get here"); 2439 } 2440 opad_present = 0; 2441 kctx_len *= 2; 2442 break; 2443 default: 2444 mk_size = CHCR_KEYCTX_NO_KEY; 2445 opad_present = 0; 2446 break; 2447 } 2448 kctx_flits = (sizeof(struct _key_ctx) + kctx_len) / 16; 2449 s->blkcipher.key_ctx_hdr = htobe32(V_KEY_CONTEXT_CTX_LEN(kctx_flits) | 2450 V_KEY_CONTEXT_DUAL_CK(s->blkcipher.cipher_mode == 2451 SCMD_CIPH_MODE_AES_XTS) | 2452 V_KEY_CONTEXT_OPAD_PRESENT(opad_present) | 2453 V_KEY_CONTEXT_SALT_PRESENT(1) | V_KEY_CONTEXT_CK_SIZE(ck_size) | 2454 V_KEY_CONTEXT_MK_SIZE(mk_size) | V_KEY_CONTEXT_VALID(1)); 2455 } 2456 2457 static bool 2458 ccr_auth_supported(const struct crypto_session_params *csp) 2459 { 2460 2461 switch (csp->csp_auth_alg) { 2462 case CRYPTO_SHA1: 2463 case CRYPTO_SHA2_224: 2464 case CRYPTO_SHA2_256: 2465 case CRYPTO_SHA2_384: 2466 case CRYPTO_SHA2_512: 2467 case CRYPTO_SHA1_HMAC: 2468 case CRYPTO_SHA2_224_HMAC: 2469 case CRYPTO_SHA2_256_HMAC: 2470 case CRYPTO_SHA2_384_HMAC: 2471 case CRYPTO_SHA2_512_HMAC: 2472 break; 2473 default: 2474 return (false); 2475 } 2476 return (true); 2477 } 2478 2479 static bool 2480 ccr_cipher_supported(const struct crypto_session_params *csp) 2481 { 2482 2483 switch (csp->csp_cipher_alg) { 2484 case CRYPTO_AES_CBC: 2485 if (csp->csp_ivlen != AES_BLOCK_LEN) 2486 return (false); 2487 break; 2488 case CRYPTO_AES_ICM: 2489 if (csp->csp_ivlen != AES_BLOCK_LEN) 2490 return (false); 2491 break; 2492 case CRYPTO_AES_XTS: 2493 if (csp->csp_ivlen != AES_XTS_IV_LEN) 2494 return (false); 2495 break; 2496 default: 2497 return (false); 2498 } 2499 return (ccr_aes_check_keylen(csp->csp_cipher_alg, 2500 csp->csp_cipher_klen)); 2501 } 2502 2503 static int 2504 ccr_cipher_mode(const struct crypto_session_params *csp) 2505 { 2506 2507 switch (csp->csp_cipher_alg) { 2508 case CRYPTO_AES_CBC: 2509 return (SCMD_CIPH_MODE_AES_CBC); 2510 case CRYPTO_AES_ICM: 2511 return (SCMD_CIPH_MODE_AES_CTR); 2512 case CRYPTO_AES_NIST_GCM_16: 2513 return (SCMD_CIPH_MODE_AES_GCM); 2514 case CRYPTO_AES_XTS: 2515 return (SCMD_CIPH_MODE_AES_XTS); 2516 case CRYPTO_AES_CCM_16: 2517 return (SCMD_CIPH_MODE_AES_CCM); 2518 default: 2519 return (SCMD_CIPH_MODE_NOP); 2520 } 2521 } 2522 2523 static int 2524 ccr_probesession(device_t dev, const struct crypto_session_params *csp) 2525 { 2526 unsigned int cipher_mode; 2527 2528 if ((csp->csp_flags & ~(CSP_F_SEPARATE_OUTPUT | CSP_F_SEPARATE_AAD)) != 2529 0) 2530 return (EINVAL); 2531 switch (csp->csp_mode) { 2532 case CSP_MODE_DIGEST: 2533 if (!ccr_auth_supported(csp)) 2534 return (EINVAL); 2535 break; 2536 case CSP_MODE_CIPHER: 2537 if (!ccr_cipher_supported(csp)) 2538 return (EINVAL); 2539 break; 2540 case CSP_MODE_AEAD: 2541 switch (csp->csp_cipher_alg) { 2542 case CRYPTO_AES_NIST_GCM_16: 2543 if (csp->csp_ivlen != AES_GCM_IV_LEN) 2544 return (EINVAL); 2545 if (csp->csp_auth_mlen < 0 || 2546 csp->csp_auth_mlen > AES_GMAC_HASH_LEN) 2547 return (EINVAL); 2548 break; 2549 case CRYPTO_AES_CCM_16: 2550 break; 2551 default: 2552 return (EINVAL); 2553 } 2554 break; 2555 case CSP_MODE_ETA: 2556 if (!ccr_auth_supported(csp) || !ccr_cipher_supported(csp)) 2557 return (EINVAL); 2558 break; 2559 default: 2560 return (EINVAL); 2561 } 2562 2563 if (csp->csp_cipher_klen != 0) { 2564 cipher_mode = ccr_cipher_mode(csp); 2565 if (cipher_mode == SCMD_CIPH_MODE_NOP) 2566 return (EINVAL); 2567 } 2568 2569 return (CRYPTODEV_PROBE_HARDWARE); 2570 } 2571 2572 /* 2573 * Select an available port with the lowest number of active sessions. 2574 */ 2575 static struct ccr_port * 2576 ccr_choose_port(struct ccr_softc *sc) 2577 { 2578 struct ccr_port *best, *p; 2579 int i; 2580 2581 mtx_assert(&sc->lock, MA_OWNED); 2582 best = NULL; 2583 for (i = 0; i < nitems(sc->ports); i++) { 2584 p = &sc->ports[i]; 2585 2586 /* Ignore non-existent ports. */ 2587 if (p->rxq == NULL) 2588 continue; 2589 2590 /* 2591 * XXX: Ignore ports whose queues aren't initialized. 2592 * This is racy as the rxq can be destroyed by the 2593 * associated VI detaching. Eventually ccr should use 2594 * dedicated queues. 2595 */ 2596 if (p->rxq->iq.adapter == NULL || p->txq->adapter == NULL) 2597 continue; 2598 2599 if ((sc->port_mask & (1u << i)) == 0) 2600 continue; 2601 2602 if (best == NULL || 2603 p->active_sessions < best->active_sessions) 2604 best = p; 2605 } 2606 return (best); 2607 } 2608 2609 static void 2610 ccr_delete_session(struct ccr_session *s) 2611 { 2612 sglist_free(s->sg_input); 2613 sglist_free(s->sg_output); 2614 sglist_free(s->sg_ulptx); 2615 sglist_free(s->sg_dsgl); 2616 mtx_destroy(&s->lock); 2617 } 2618 2619 static int 2620 ccr_newsession(device_t dev, crypto_session_t cses, 2621 const struct crypto_session_params *csp) 2622 { 2623 struct ccr_softc *sc; 2624 struct ccr_session *s; 2625 const struct auth_hash *auth_hash; 2626 unsigned int auth_mode, cipher_mode, mk_size; 2627 unsigned int partial_digest_len; 2628 2629 switch (csp->csp_auth_alg) { 2630 case CRYPTO_SHA1: 2631 case CRYPTO_SHA1_HMAC: 2632 auth_hash = &auth_hash_hmac_sha1; 2633 auth_mode = SCMD_AUTH_MODE_SHA1; 2634 mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_160; 2635 partial_digest_len = SHA1_HASH_LEN; 2636 break; 2637 case CRYPTO_SHA2_224: 2638 case CRYPTO_SHA2_224_HMAC: 2639 auth_hash = &auth_hash_hmac_sha2_224; 2640 auth_mode = SCMD_AUTH_MODE_SHA224; 2641 mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256; 2642 partial_digest_len = SHA2_256_HASH_LEN; 2643 break; 2644 case CRYPTO_SHA2_256: 2645 case CRYPTO_SHA2_256_HMAC: 2646 auth_hash = &auth_hash_hmac_sha2_256; 2647 auth_mode = SCMD_AUTH_MODE_SHA256; 2648 mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256; 2649 partial_digest_len = SHA2_256_HASH_LEN; 2650 break; 2651 case CRYPTO_SHA2_384: 2652 case CRYPTO_SHA2_384_HMAC: 2653 auth_hash = &auth_hash_hmac_sha2_384; 2654 auth_mode = SCMD_AUTH_MODE_SHA512_384; 2655 mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512; 2656 partial_digest_len = SHA2_512_HASH_LEN; 2657 break; 2658 case CRYPTO_SHA2_512: 2659 case CRYPTO_SHA2_512_HMAC: 2660 auth_hash = &auth_hash_hmac_sha2_512; 2661 auth_mode = SCMD_AUTH_MODE_SHA512_512; 2662 mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512; 2663 partial_digest_len = SHA2_512_HASH_LEN; 2664 break; 2665 default: 2666 auth_hash = NULL; 2667 auth_mode = SCMD_AUTH_MODE_NOP; 2668 mk_size = 0; 2669 partial_digest_len = 0; 2670 break; 2671 } 2672 2673 cipher_mode = ccr_cipher_mode(csp); 2674 2675 #ifdef INVARIANTS 2676 switch (csp->csp_mode) { 2677 case CSP_MODE_CIPHER: 2678 if (cipher_mode == SCMD_CIPH_MODE_NOP || 2679 cipher_mode == SCMD_CIPH_MODE_AES_GCM || 2680 cipher_mode == SCMD_CIPH_MODE_AES_CCM) 2681 panic("invalid cipher algo"); 2682 break; 2683 case CSP_MODE_DIGEST: 2684 if (auth_mode == SCMD_AUTH_MODE_NOP) 2685 panic("invalid auth algo"); 2686 break; 2687 case CSP_MODE_AEAD: 2688 if (cipher_mode != SCMD_CIPH_MODE_AES_GCM && 2689 cipher_mode != SCMD_CIPH_MODE_AES_CCM) 2690 panic("invalid aead cipher algo"); 2691 if (auth_mode != SCMD_AUTH_MODE_NOP) 2692 panic("invalid aead auth aglo"); 2693 break; 2694 case CSP_MODE_ETA: 2695 if (cipher_mode == SCMD_CIPH_MODE_NOP || 2696 cipher_mode == SCMD_CIPH_MODE_AES_GCM || 2697 cipher_mode == SCMD_CIPH_MODE_AES_CCM) 2698 panic("invalid cipher algo"); 2699 if (auth_mode == SCMD_AUTH_MODE_NOP) 2700 panic("invalid auth algo"); 2701 break; 2702 default: 2703 panic("invalid csp mode"); 2704 } 2705 #endif 2706 2707 s = crypto_get_driver_session(cses); 2708 mtx_init(&s->lock, "ccr session", NULL, MTX_DEF); 2709 s->sg_input = sglist_alloc(TX_SGL_SEGS, M_NOWAIT); 2710 s->sg_output = sglist_alloc(TX_SGL_SEGS, M_NOWAIT); 2711 s->sg_ulptx = sglist_alloc(TX_SGL_SEGS, M_NOWAIT); 2712 s->sg_dsgl = sglist_alloc(MAX_RX_PHYS_DSGL_SGE, M_NOWAIT); 2713 if (s->sg_input == NULL || s->sg_output == NULL || 2714 s->sg_ulptx == NULL || s->sg_dsgl == NULL) { 2715 ccr_delete_session(s); 2716 return (ENOMEM); 2717 } 2718 2719 sc = device_get_softc(dev); 2720 2721 mtx_lock(&sc->lock); 2722 if (sc->detaching) { 2723 mtx_unlock(&sc->lock); 2724 ccr_delete_session(s); 2725 return (ENXIO); 2726 } 2727 2728 s->port = ccr_choose_port(sc); 2729 if (s->port == NULL) { 2730 mtx_unlock(&sc->lock); 2731 ccr_delete_session(s); 2732 return (ENXIO); 2733 } 2734 2735 switch (csp->csp_mode) { 2736 case CSP_MODE_AEAD: 2737 if (cipher_mode == SCMD_CIPH_MODE_AES_CCM) 2738 s->mode = CCM; 2739 else 2740 s->mode = GCM; 2741 break; 2742 case CSP_MODE_ETA: 2743 s->mode = ETA; 2744 break; 2745 case CSP_MODE_DIGEST: 2746 if (csp->csp_auth_klen != 0) 2747 s->mode = HMAC; 2748 else 2749 s->mode = HASH; 2750 break; 2751 case CSP_MODE_CIPHER: 2752 s->mode = BLKCIPHER; 2753 break; 2754 } 2755 2756 if (s->mode == GCM) { 2757 if (csp->csp_auth_mlen == 0) 2758 s->gmac.hash_len = AES_GMAC_HASH_LEN; 2759 else 2760 s->gmac.hash_len = csp->csp_auth_mlen; 2761 t4_init_gmac_hash(csp->csp_cipher_key, csp->csp_cipher_klen, 2762 s->gmac.ghash_h); 2763 } else if (s->mode == CCM) { 2764 if (csp->csp_auth_mlen == 0) 2765 s->ccm_mac.hash_len = AES_CBC_MAC_HASH_LEN; 2766 else 2767 s->ccm_mac.hash_len = csp->csp_auth_mlen; 2768 } else if (auth_mode != SCMD_AUTH_MODE_NOP) { 2769 s->hmac.auth_hash = auth_hash; 2770 s->hmac.auth_mode = auth_mode; 2771 s->hmac.mk_size = mk_size; 2772 s->hmac.partial_digest_len = partial_digest_len; 2773 if (csp->csp_auth_mlen == 0) 2774 s->hmac.hash_len = auth_hash->hashsize; 2775 else 2776 s->hmac.hash_len = csp->csp_auth_mlen; 2777 if (csp->csp_auth_key != NULL) 2778 t4_init_hmac_digest(auth_hash, partial_digest_len, 2779 csp->csp_auth_key, csp->csp_auth_klen, 2780 s->hmac.pads); 2781 else 2782 ccr_init_hash_digest(s); 2783 } 2784 if (cipher_mode != SCMD_CIPH_MODE_NOP) { 2785 s->blkcipher.cipher_mode = cipher_mode; 2786 s->blkcipher.iv_len = csp->csp_ivlen; 2787 if (csp->csp_cipher_key != NULL) 2788 ccr_aes_setkey(s, csp->csp_cipher_key, 2789 csp->csp_cipher_klen); 2790 } 2791 2792 s->port->active_sessions++; 2793 mtx_unlock(&sc->lock); 2794 return (0); 2795 } 2796 2797 static void 2798 ccr_freesession(device_t dev, crypto_session_t cses) 2799 { 2800 struct ccr_softc *sc; 2801 struct ccr_session *s; 2802 2803 sc = device_get_softc(dev); 2804 s = crypto_get_driver_session(cses); 2805 #ifdef INVARIANTS 2806 if (s->pending != 0) 2807 device_printf(dev, 2808 "session %p freed with %d pending requests\n", s, 2809 s->pending); 2810 #endif 2811 mtx_lock(&sc->lock); 2812 s->port->active_sessions--; 2813 mtx_unlock(&sc->lock); 2814 ccr_delete_session(s); 2815 } 2816 2817 static int 2818 ccr_process(device_t dev, struct cryptop *crp, int hint) 2819 { 2820 const struct crypto_session_params *csp; 2821 struct ccr_softc *sc; 2822 struct ccr_session *s; 2823 int error; 2824 2825 csp = crypto_get_params(crp->crp_session); 2826 s = crypto_get_driver_session(crp->crp_session); 2827 sc = device_get_softc(dev); 2828 2829 mtx_lock(&s->lock); 2830 error = ccr_populate_sglist(s->sg_input, &crp->crp_buf); 2831 if (error == 0 && CRYPTO_HAS_OUTPUT_BUFFER(crp)) 2832 error = ccr_populate_sglist(s->sg_output, &crp->crp_obuf); 2833 if (error) { 2834 counter_u64_add(sc->stats_sglist_error, 1); 2835 goto out; 2836 } 2837 2838 switch (s->mode) { 2839 case HASH: 2840 error = ccr_hash(sc, s, crp); 2841 if (error == 0) 2842 counter_u64_add(sc->stats_hash, 1); 2843 break; 2844 case HMAC: 2845 if (crp->crp_auth_key != NULL) 2846 t4_init_hmac_digest(s->hmac.auth_hash, 2847 s->hmac.partial_digest_len, crp->crp_auth_key, 2848 csp->csp_auth_klen, s->hmac.pads); 2849 error = ccr_hash(sc, s, crp); 2850 if (error == 0) 2851 counter_u64_add(sc->stats_hmac, 1); 2852 break; 2853 case BLKCIPHER: 2854 if (crp->crp_cipher_key != NULL) 2855 ccr_aes_setkey(s, crp->crp_cipher_key, 2856 csp->csp_cipher_klen); 2857 error = ccr_blkcipher(sc, s, crp); 2858 if (error == 0) { 2859 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) 2860 counter_u64_add(sc->stats_blkcipher_encrypt, 1); 2861 else 2862 counter_u64_add(sc->stats_blkcipher_decrypt, 1); 2863 } 2864 break; 2865 case ETA: 2866 if (crp->crp_auth_key != NULL) 2867 t4_init_hmac_digest(s->hmac.auth_hash, 2868 s->hmac.partial_digest_len, crp->crp_auth_key, 2869 csp->csp_auth_klen, s->hmac.pads); 2870 if (crp->crp_cipher_key != NULL) 2871 ccr_aes_setkey(s, crp->crp_cipher_key, 2872 csp->csp_cipher_klen); 2873 error = ccr_eta(sc, s, crp); 2874 if (error == 0) { 2875 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) 2876 counter_u64_add(sc->stats_eta_encrypt, 1); 2877 else 2878 counter_u64_add(sc->stats_eta_decrypt, 1); 2879 } 2880 break; 2881 case GCM: 2882 if (crp->crp_cipher_key != NULL) { 2883 t4_init_gmac_hash(crp->crp_cipher_key, 2884 csp->csp_cipher_klen, s->gmac.ghash_h); 2885 ccr_aes_setkey(s, crp->crp_cipher_key, 2886 csp->csp_cipher_klen); 2887 } 2888 if (crp->crp_payload_length == 0) { 2889 mtx_unlock(&s->lock); 2890 ccr_gcm_soft(s, crp); 2891 return (0); 2892 } 2893 error = ccr_gcm(sc, s, crp); 2894 if (error == EMSGSIZE || error == EFBIG) { 2895 counter_u64_add(sc->stats_sw_fallback, 1); 2896 mtx_unlock(&s->lock); 2897 ccr_gcm_soft(s, crp); 2898 return (0); 2899 } 2900 if (error == 0) { 2901 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) 2902 counter_u64_add(sc->stats_gcm_encrypt, 1); 2903 else 2904 counter_u64_add(sc->stats_gcm_decrypt, 1); 2905 } 2906 break; 2907 case CCM: 2908 if (crp->crp_cipher_key != NULL) { 2909 ccr_aes_setkey(s, crp->crp_cipher_key, 2910 csp->csp_cipher_klen); 2911 } 2912 error = ccr_ccm(sc, s, crp); 2913 if (error == EMSGSIZE || error == EFBIG) { 2914 counter_u64_add(sc->stats_sw_fallback, 1); 2915 mtx_unlock(&s->lock); 2916 ccr_ccm_soft(s, crp); 2917 return (0); 2918 } 2919 if (error == 0) { 2920 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) 2921 counter_u64_add(sc->stats_ccm_encrypt, 1); 2922 else 2923 counter_u64_add(sc->stats_ccm_decrypt, 1); 2924 } 2925 break; 2926 } 2927 2928 if (error == 0) { 2929 #ifdef INVARIANTS 2930 s->pending++; 2931 #endif 2932 counter_u64_add(sc->stats_inflight, 1); 2933 counter_u64_add(s->port->stats_queued, 1); 2934 } else 2935 counter_u64_add(sc->stats_process_error, 1); 2936 2937 out: 2938 mtx_unlock(&s->lock); 2939 2940 if (error) { 2941 crp->crp_etype = error; 2942 crypto_done(crp); 2943 } 2944 2945 return (0); 2946 } 2947 2948 static int 2949 do_cpl6_fw_pld(struct sge_iq *iq, const struct rss_header *rss, 2950 struct mbuf *m) 2951 { 2952 struct ccr_softc *sc = iq->adapter->ccr_softc; 2953 struct ccr_session *s; 2954 const struct cpl_fw6_pld *cpl; 2955 struct cryptop *crp; 2956 uint32_t status; 2957 int error; 2958 2959 if (m != NULL) 2960 cpl = mtod(m, const void *); 2961 else 2962 cpl = (const void *)(rss + 1); 2963 2964 crp = (struct cryptop *)(uintptr_t)be64toh(cpl->data[1]); 2965 s = crypto_get_driver_session(crp->crp_session); 2966 status = be64toh(cpl->data[0]); 2967 if (CHK_MAC_ERR_BIT(status) || CHK_PAD_ERR_BIT(status)) 2968 error = EBADMSG; 2969 else 2970 error = 0; 2971 2972 #ifdef INVARIANTS 2973 mtx_lock(&s->lock); 2974 s->pending--; 2975 mtx_unlock(&s->lock); 2976 #endif 2977 counter_u64_add(sc->stats_inflight, -1); 2978 counter_u64_add(s->port->stats_completed, 1); 2979 2980 switch (s->mode) { 2981 case HASH: 2982 case HMAC: 2983 error = ccr_hash_done(sc, s, crp, cpl, error); 2984 break; 2985 case BLKCIPHER: 2986 error = ccr_blkcipher_done(sc, s, crp, cpl, error); 2987 break; 2988 case ETA: 2989 error = ccr_eta_done(sc, s, crp, cpl, error); 2990 break; 2991 case GCM: 2992 error = ccr_gcm_done(sc, s, crp, cpl, error); 2993 break; 2994 case CCM: 2995 error = ccr_ccm_done(sc, s, crp, cpl, error); 2996 break; 2997 } 2998 2999 if (error == EBADMSG) { 3000 if (CHK_MAC_ERR_BIT(status)) 3001 counter_u64_add(sc->stats_mac_error, 1); 3002 if (CHK_PAD_ERR_BIT(status)) 3003 counter_u64_add(sc->stats_pad_error, 1); 3004 } 3005 crp->crp_etype = error; 3006 crypto_done(crp); 3007 m_freem(m); 3008 return (0); 3009 } 3010 3011 static int 3012 ccr_modevent(module_t mod, int cmd, void *arg) 3013 { 3014 3015 switch (cmd) { 3016 case MOD_LOAD: 3017 t4_register_cpl_handler(CPL_FW6_PLD, do_cpl6_fw_pld); 3018 return (0); 3019 case MOD_UNLOAD: 3020 t4_register_cpl_handler(CPL_FW6_PLD, NULL); 3021 return (0); 3022 default: 3023 return (EOPNOTSUPP); 3024 } 3025 } 3026 3027 static device_method_t ccr_methods[] = { 3028 DEVMETHOD(device_identify, ccr_identify), 3029 DEVMETHOD(device_probe, ccr_probe), 3030 DEVMETHOD(device_attach, ccr_attach), 3031 DEVMETHOD(device_detach, ccr_detach), 3032 3033 DEVMETHOD(cryptodev_probesession, ccr_probesession), 3034 DEVMETHOD(cryptodev_newsession, ccr_newsession), 3035 DEVMETHOD(cryptodev_freesession, ccr_freesession), 3036 DEVMETHOD(cryptodev_process, ccr_process), 3037 3038 DEVMETHOD_END 3039 }; 3040 3041 static driver_t ccr_driver = { 3042 "ccr", 3043 ccr_methods, 3044 sizeof(struct ccr_softc) 3045 }; 3046 3047 static devclass_t ccr_devclass; 3048 3049 DRIVER_MODULE(ccr, t6nex, ccr_driver, ccr_devclass, ccr_modevent, NULL); 3050 MODULE_VERSION(ccr, 1); 3051 MODULE_DEPEND(ccr, crypto, 1, 1, 1); 3052 MODULE_DEPEND(ccr, t6nex, 1, 1, 1); 3053