1 /*- 2 * Copyright (c) 2017 Chelsio Communications, Inc. 3 * All rights reserved. 4 * Written by: John Baldwin <jhb@FreeBSD.org> 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28 #include <sys/cdefs.h> 29 __FBSDID("$FreeBSD$"); 30 31 #include <sys/types.h> 32 #include <sys/bus.h> 33 #include <sys/lock.h> 34 #include <sys/malloc.h> 35 #include <sys/mutex.h> 36 #include <sys/module.h> 37 #include <sys/sglist.h> 38 39 #include <opencrypto/cryptodev.h> 40 #include <opencrypto/xform.h> 41 42 #include "cryptodev_if.h" 43 44 #include "common/common.h" 45 #include "crypto/t4_crypto.h" 46 47 /* 48 * Requests consist of: 49 * 50 * +-------------------------------+ 51 * | struct fw_crypto_lookaside_wr | 52 * +-------------------------------+ 53 * | struct ulp_txpkt | 54 * +-------------------------------+ 55 * | struct ulptx_idata | 56 * +-------------------------------+ 57 * | struct cpl_tx_sec_pdu | 58 * +-------------------------------+ 59 * | struct cpl_tls_tx_scmd_fmt | 60 * +-------------------------------+ 61 * | key context header | 62 * +-------------------------------+ 63 * | AES key | ----- For requests with AES 64 * +-------------------------------+ 65 * | Hash state | ----- For hash-only requests 66 * +-------------------------------+ - 67 * | IPAD (16-byte aligned) | \ 68 * +-------------------------------+ +---- For requests with HMAC 69 * | OPAD (16-byte aligned) | / 70 * +-------------------------------+ - 71 * | GMAC H | ----- For AES-GCM 72 * +-------------------------------+ - 73 * | struct cpl_rx_phys_dsgl | \ 74 * +-------------------------------+ +---- Destination buffer for 75 * | PHYS_DSGL entries | / non-hash-only requests 76 * +-------------------------------+ - 77 * | 16 dummy bytes | ----- Only for HMAC/hash-only requests 78 * +-------------------------------+ 79 * | IV | ----- If immediate IV 80 * +-------------------------------+ 81 * | Payload | ----- If immediate Payload 82 * +-------------------------------+ - 83 * | struct ulptx_sgl | \ 84 * +-------------------------------+ +---- If payload via SGL 85 * | SGL entries | / 86 * +-------------------------------+ - 87 * 88 * Note that the key context must be padded to ensure 16-byte alignment. 89 * For HMAC requests, the key consists of the partial hash of the IPAD 90 * followed by the partial hash of the OPAD. 91 * 92 * Replies consist of: 93 * 94 * +-------------------------------+ 95 * | struct cpl_fw6_pld | 96 * +-------------------------------+ 97 * | hash digest | ----- For HMAC request with 98 * +-------------------------------+ 'hash_size' set in work request 99 * 100 * A 32-bit big-endian error status word is supplied in the last 4 101 * bytes of data[0] in the CPL_FW6_PLD message. bit 0 indicates a 102 * "MAC" error and bit 1 indicates a "PAD" error. 103 * 104 * The 64-bit 'cookie' field from the fw_crypto_lookaside_wr message 105 * in the request is returned in data[1] of the CPL_FW6_PLD message. 106 * 107 * For block cipher replies, the updated IV is supplied in data[2] and 108 * data[3] of the CPL_FW6_PLD message. 109 * 110 * For hash replies where the work request set 'hash_size' to request 111 * a copy of the hash in the reply, the hash digest is supplied 112 * immediately following the CPL_FW6_PLD message. 113 */ 114 115 /* 116 * The crypto engine supports a maximum AAD size of 511 bytes. 117 */ 118 #define MAX_AAD_LEN 511 119 120 /* 121 * The documentation for CPL_RX_PHYS_DSGL claims a maximum of 32 SG 122 * entries. While the CPL includes a 16-bit length field, the T6 can 123 * sometimes hang if an error occurs while processing a request with a 124 * single DSGL entry larger than 2k. 125 */ 126 #define MAX_RX_PHYS_DSGL_SGE 32 127 #define DSGL_SGE_MAXLEN 2048 128 129 /* 130 * The adapter only supports requests with a total input or output 131 * length of 64k-1 or smaller. Longer requests either result in hung 132 * requests or incorrect results. 133 */ 134 #define MAX_REQUEST_SIZE 65535 135 136 static MALLOC_DEFINE(M_CCR, "ccr", "Chelsio T6 crypto"); 137 138 struct ccr_session_hmac { 139 struct auth_hash *auth_hash; 140 int hash_len; 141 unsigned int partial_digest_len; 142 unsigned int auth_mode; 143 unsigned int mk_size; 144 char pads[CHCR_HASH_MAX_BLOCK_SIZE_128 * 2]; 145 }; 146 147 struct ccr_session_gmac { 148 int hash_len; 149 char ghash_h[GMAC_BLOCK_LEN]; 150 }; 151 152 struct ccr_session_ccm_mac { 153 int hash_len; 154 }; 155 156 struct ccr_session_blkcipher { 157 unsigned int cipher_mode; 158 unsigned int key_len; 159 unsigned int iv_len; 160 __be32 key_ctx_hdr; 161 char enckey[CHCR_AES_MAX_KEY_LEN]; 162 char deckey[CHCR_AES_MAX_KEY_LEN]; 163 }; 164 165 struct ccr_port { 166 struct sge_wrq *txq; 167 struct sge_rxq *rxq; 168 int rx_channel_id; 169 int tx_channel_id; 170 u_int active_sessions; 171 172 counter_u64_t stats_queued; 173 counter_u64_t stats_completed; 174 }; 175 176 struct ccr_session { 177 #ifdef INVARIANTS 178 int pending; 179 #endif 180 enum { HASH, HMAC, BLKCIPHER, ETA, GCM, CCM } mode; 181 struct ccr_port *port; 182 union { 183 struct ccr_session_hmac hmac; 184 struct ccr_session_gmac gmac; 185 struct ccr_session_ccm_mac ccm_mac; 186 }; 187 struct ccr_session_blkcipher blkcipher; 188 struct mtx lock; 189 190 /* 191 * Pre-allocate S/G lists used when preparing a work request. 192 * 'sg_input' contains an sglist describing the entire input 193 * buffer for a 'struct cryptop'. 'sg_output' contains an 194 * sglist describing the entire output buffer. 'sg_ulptx' is 195 * used to describe the data the engine should DMA as input 196 * via ULPTX_SGL. 'sg_dsgl' is used to describe the 197 * destination that cipher text and a tag should be written 198 * to. 199 */ 200 struct sglist *sg_input; 201 struct sglist *sg_output; 202 struct sglist *sg_ulptx; 203 struct sglist *sg_dsgl; 204 }; 205 206 struct ccr_softc { 207 struct adapter *adapter; 208 device_t dev; 209 uint32_t cid; 210 struct mtx lock; 211 bool detaching; 212 struct ccr_port ports[MAX_NPORTS]; 213 u_int port_mask; 214 int first_rxq_id; 215 216 /* 217 * Pre-allocate a dummy output buffer for the IV and AAD for 218 * AEAD requests. 219 */ 220 char *iv_aad_buf; 221 struct sglist *sg_iv_aad; 222 223 /* Statistics. */ 224 counter_u64_t stats_blkcipher_encrypt; 225 counter_u64_t stats_blkcipher_decrypt; 226 counter_u64_t stats_hash; 227 counter_u64_t stats_hmac; 228 counter_u64_t stats_eta_encrypt; 229 counter_u64_t stats_eta_decrypt; 230 counter_u64_t stats_gcm_encrypt; 231 counter_u64_t stats_gcm_decrypt; 232 counter_u64_t stats_ccm_encrypt; 233 counter_u64_t stats_ccm_decrypt; 234 counter_u64_t stats_wr_nomem; 235 counter_u64_t stats_inflight; 236 counter_u64_t stats_mac_error; 237 counter_u64_t stats_pad_error; 238 counter_u64_t stats_sglist_error; 239 counter_u64_t stats_process_error; 240 counter_u64_t stats_sw_fallback; 241 }; 242 243 /* 244 * Crypto requests involve two kind of scatter/gather lists. 245 * 246 * Non-hash-only requests require a PHYS_DSGL that describes the 247 * location to store the results of the encryption or decryption 248 * operation. This SGL uses a different format (PHYS_DSGL) and should 249 * exclude the skip bytes at the start of the data as well as any AAD 250 * or IV. For authenticated encryption requests it should include the 251 * destination of the hash or tag. 252 * 253 * The input payload may either be supplied inline as immediate data, 254 * or via a standard ULP_TX SGL. This SGL should include AAD, 255 * ciphertext, and the hash or tag for authenticated decryption 256 * requests. 257 * 258 * These scatter/gather lists can describe different subsets of the 259 * buffers described by the crypto operation. ccr_populate_sglist() 260 * generates a scatter/gather list that covers an entire crypto 261 * operation buffer that is then used to construct the other 262 * scatter/gather lists. 263 */ 264 static int 265 ccr_populate_sglist(struct sglist *sg, struct crypto_buffer *cb) 266 { 267 int error; 268 269 sglist_reset(sg); 270 switch (cb->cb_type) { 271 case CRYPTO_BUF_MBUF: 272 error = sglist_append_mbuf(sg, cb->cb_mbuf); 273 break; 274 case CRYPTO_BUF_SINGLE_MBUF: 275 error = sglist_append_single_mbuf(sg, cb->cb_mbuf); 276 break; 277 case CRYPTO_BUF_UIO: 278 error = sglist_append_uio(sg, cb->cb_uio); 279 break; 280 case CRYPTO_BUF_CONTIG: 281 error = sglist_append(sg, cb->cb_buf, cb->cb_buf_len); 282 break; 283 case CRYPTO_BUF_VMPAGE: 284 error = sglist_append_vmpages(sg, cb->cb_vm_page, 285 cb->cb_vm_page_len, cb->cb_vm_page_offset); 286 break; 287 default: 288 error = EINVAL; 289 } 290 return (error); 291 } 292 293 /* 294 * Segments in 'sg' larger than 'maxsegsize' are counted as multiple 295 * segments. 296 */ 297 static int 298 ccr_count_sgl(struct sglist *sg, int maxsegsize) 299 { 300 int i, nsegs; 301 302 nsegs = 0; 303 for (i = 0; i < sg->sg_nseg; i++) 304 nsegs += howmany(sg->sg_segs[i].ss_len, maxsegsize); 305 return (nsegs); 306 } 307 308 /* These functions deal with PHYS_DSGL for the reply buffer. */ 309 static inline int 310 ccr_phys_dsgl_len(int nsegs) 311 { 312 int len; 313 314 len = (nsegs / 8) * sizeof(struct phys_sge_pairs); 315 if ((nsegs % 8) != 0) { 316 len += sizeof(uint16_t) * 8; 317 len += roundup2(nsegs % 8, 2) * sizeof(uint64_t); 318 } 319 return (len); 320 } 321 322 static void 323 ccr_write_phys_dsgl(struct ccr_session *s, void *dst, int nsegs) 324 { 325 struct sglist *sg; 326 struct cpl_rx_phys_dsgl *cpl; 327 struct phys_sge_pairs *sgl; 328 vm_paddr_t paddr; 329 size_t seglen; 330 u_int i, j; 331 332 sg = s->sg_dsgl; 333 cpl = dst; 334 cpl->op_to_tid = htobe32(V_CPL_RX_PHYS_DSGL_OPCODE(CPL_RX_PHYS_DSGL) | 335 V_CPL_RX_PHYS_DSGL_ISRDMA(0)); 336 cpl->pcirlxorder_to_noofsgentr = htobe32( 337 V_CPL_RX_PHYS_DSGL_PCIRLXORDER(0) | 338 V_CPL_RX_PHYS_DSGL_PCINOSNOOP(0) | 339 V_CPL_RX_PHYS_DSGL_PCITPHNTENB(0) | V_CPL_RX_PHYS_DSGL_DCAID(0) | 340 V_CPL_RX_PHYS_DSGL_NOOFSGENTR(nsegs)); 341 cpl->rss_hdr_int.opcode = CPL_RX_PHYS_ADDR; 342 cpl->rss_hdr_int.qid = htobe16(s->port->rxq->iq.abs_id); 343 cpl->rss_hdr_int.hash_val = 0; 344 cpl->rss_hdr_int.channel = s->port->rx_channel_id; 345 sgl = (struct phys_sge_pairs *)(cpl + 1); 346 j = 0; 347 for (i = 0; i < sg->sg_nseg; i++) { 348 seglen = sg->sg_segs[i].ss_len; 349 paddr = sg->sg_segs[i].ss_paddr; 350 do { 351 sgl->addr[j] = htobe64(paddr); 352 if (seglen > DSGL_SGE_MAXLEN) { 353 sgl->len[j] = htobe16(DSGL_SGE_MAXLEN); 354 paddr += DSGL_SGE_MAXLEN; 355 seglen -= DSGL_SGE_MAXLEN; 356 } else { 357 sgl->len[j] = htobe16(seglen); 358 seglen = 0; 359 } 360 j++; 361 if (j == 8) { 362 sgl++; 363 j = 0; 364 } 365 } while (seglen != 0); 366 } 367 MPASS(j + 8 * (sgl - (struct phys_sge_pairs *)(cpl + 1)) == nsegs); 368 } 369 370 /* These functions deal with the ULPTX_SGL for input payload. */ 371 static inline int 372 ccr_ulptx_sgl_len(int nsegs) 373 { 374 u_int n; 375 376 nsegs--; /* first segment is part of ulptx_sgl */ 377 n = sizeof(struct ulptx_sgl) + 8 * ((3 * nsegs) / 2 + (nsegs & 1)); 378 return (roundup2(n, 16)); 379 } 380 381 static void 382 ccr_write_ulptx_sgl(struct ccr_session *s, void *dst, int nsegs) 383 { 384 struct ulptx_sgl *usgl; 385 struct sglist *sg; 386 struct sglist_seg *ss; 387 int i; 388 389 sg = s->sg_ulptx; 390 MPASS(nsegs == sg->sg_nseg); 391 ss = &sg->sg_segs[0]; 392 usgl = dst; 393 usgl->cmd_nsge = htobe32(V_ULPTX_CMD(ULP_TX_SC_DSGL) | 394 V_ULPTX_NSGE(nsegs)); 395 usgl->len0 = htobe32(ss->ss_len); 396 usgl->addr0 = htobe64(ss->ss_paddr); 397 ss++; 398 for (i = 0; i < sg->sg_nseg - 1; i++) { 399 usgl->sge[i / 2].len[i & 1] = htobe32(ss->ss_len); 400 usgl->sge[i / 2].addr[i & 1] = htobe64(ss->ss_paddr); 401 ss++; 402 } 403 } 404 405 static bool 406 ccr_use_imm_data(u_int transhdr_len, u_int input_len) 407 { 408 409 if (input_len > CRYPTO_MAX_IMM_TX_PKT_LEN) 410 return (false); 411 if (roundup2(transhdr_len, 16) + roundup2(input_len, 16) > 412 SGE_MAX_WR_LEN) 413 return (false); 414 return (true); 415 } 416 417 static void 418 ccr_populate_wreq(struct ccr_softc *sc, struct ccr_session *s, 419 struct chcr_wr *crwr, u_int kctx_len, u_int wr_len, u_int imm_len, 420 u_int sgl_len, u_int hash_size, struct cryptop *crp) 421 { 422 u_int cctx_size, idata_len; 423 424 cctx_size = sizeof(struct _key_ctx) + kctx_len; 425 crwr->wreq.op_to_cctx_size = htobe32( 426 V_FW_CRYPTO_LOOKASIDE_WR_OPCODE(FW_CRYPTO_LOOKASIDE_WR) | 427 V_FW_CRYPTO_LOOKASIDE_WR_COMPL(0) | 428 V_FW_CRYPTO_LOOKASIDE_WR_IMM_LEN(imm_len) | 429 V_FW_CRYPTO_LOOKASIDE_WR_CCTX_LOC(1) | 430 V_FW_CRYPTO_LOOKASIDE_WR_CCTX_SIZE(cctx_size >> 4)); 431 crwr->wreq.len16_pkd = htobe32( 432 V_FW_CRYPTO_LOOKASIDE_WR_LEN16(wr_len / 16)); 433 crwr->wreq.session_id = 0; 434 crwr->wreq.rx_chid_to_rx_q_id = htobe32( 435 V_FW_CRYPTO_LOOKASIDE_WR_RX_CHID(s->port->rx_channel_id) | 436 V_FW_CRYPTO_LOOKASIDE_WR_LCB(0) | 437 V_FW_CRYPTO_LOOKASIDE_WR_PHASH(0) | 438 V_FW_CRYPTO_LOOKASIDE_WR_IV(IV_NOP) | 439 V_FW_CRYPTO_LOOKASIDE_WR_FQIDX(0) | 440 V_FW_CRYPTO_LOOKASIDE_WR_TX_CH(0) | /* unused in firmware */ 441 V_FW_CRYPTO_LOOKASIDE_WR_RX_Q_ID(s->port->rxq->iq.abs_id)); 442 crwr->wreq.key_addr = 0; 443 crwr->wreq.pld_size_hash_size = htobe32( 444 V_FW_CRYPTO_LOOKASIDE_WR_PLD_SIZE(sgl_len) | 445 V_FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE(hash_size)); 446 crwr->wreq.cookie = htobe64((uintptr_t)crp); 447 448 crwr->ulptx.cmd_dest = htobe32(V_ULPTX_CMD(ULP_TX_PKT) | 449 V_ULP_TXPKT_DATAMODIFY(0) | 450 V_ULP_TXPKT_CHANNELID(s->port->tx_channel_id) | 451 V_ULP_TXPKT_DEST(0) | 452 V_ULP_TXPKT_FID(sc->first_rxq_id) | V_ULP_TXPKT_RO(1)); 453 crwr->ulptx.len = htobe32( 454 ((wr_len - sizeof(struct fw_crypto_lookaside_wr)) / 16)); 455 456 crwr->sc_imm.cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM) | 457 V_ULP_TX_SC_MORE(sgl_len != 0 ? 1 : 0)); 458 idata_len = wr_len - offsetof(struct chcr_wr, sec_cpl) - sgl_len; 459 if (imm_len % 16 != 0) 460 idata_len -= 16 - imm_len % 16; 461 crwr->sc_imm.len = htobe32(idata_len); 462 } 463 464 static int 465 ccr_hash(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp) 466 { 467 struct chcr_wr *crwr; 468 struct wrqe *wr; 469 struct auth_hash *axf; 470 char *dst; 471 u_int hash_size_in_response, kctx_flits, kctx_len, transhdr_len, wr_len; 472 u_int hmac_ctrl, imm_len, iopad_size; 473 int error, sgl_nsegs, sgl_len, use_opad; 474 475 /* Reject requests with too large of an input buffer. */ 476 if (crp->crp_payload_length > MAX_REQUEST_SIZE) 477 return (EFBIG); 478 479 axf = s->hmac.auth_hash; 480 481 if (s->mode == HMAC) { 482 use_opad = 1; 483 hmac_ctrl = SCMD_HMAC_CTRL_NO_TRUNC; 484 } else { 485 use_opad = 0; 486 hmac_ctrl = SCMD_HMAC_CTRL_NOP; 487 } 488 489 /* PADs must be 128-bit aligned. */ 490 iopad_size = roundup2(s->hmac.partial_digest_len, 16); 491 492 /* 493 * The 'key' part of the context includes the aligned IPAD and 494 * OPAD. 495 */ 496 kctx_len = iopad_size; 497 if (use_opad) 498 kctx_len += iopad_size; 499 hash_size_in_response = axf->hashsize; 500 transhdr_len = HASH_TRANSHDR_SIZE(kctx_len); 501 502 if (crp->crp_payload_length == 0) { 503 imm_len = axf->blocksize; 504 sgl_nsegs = 0; 505 sgl_len = 0; 506 } else if (ccr_use_imm_data(transhdr_len, crp->crp_payload_length)) { 507 imm_len = crp->crp_payload_length; 508 sgl_nsegs = 0; 509 sgl_len = 0; 510 } else { 511 imm_len = 0; 512 sglist_reset(s->sg_ulptx); 513 error = sglist_append_sglist(s->sg_ulptx, s->sg_input, 514 crp->crp_payload_start, crp->crp_payload_length); 515 if (error) 516 return (error); 517 sgl_nsegs = s->sg_ulptx->sg_nseg; 518 sgl_len = ccr_ulptx_sgl_len(sgl_nsegs); 519 } 520 521 wr_len = roundup2(transhdr_len, 16) + roundup2(imm_len, 16) + sgl_len; 522 if (wr_len > SGE_MAX_WR_LEN) 523 return (EFBIG); 524 wr = alloc_wrqe(wr_len, s->port->txq); 525 if (wr == NULL) { 526 counter_u64_add(sc->stats_wr_nomem, 1); 527 return (ENOMEM); 528 } 529 crwr = wrtod(wr); 530 memset(crwr, 0, wr_len); 531 532 ccr_populate_wreq(sc, s, crwr, kctx_len, wr_len, imm_len, sgl_len, 533 hash_size_in_response, crp); 534 535 crwr->sec_cpl.op_ivinsrtofst = htobe32( 536 V_CPL_TX_SEC_PDU_OPCODE(CPL_TX_SEC_PDU) | 537 V_CPL_TX_SEC_PDU_RXCHID(s->port->rx_channel_id) | 538 V_CPL_TX_SEC_PDU_ACKFOLLOWS(0) | V_CPL_TX_SEC_PDU_ULPTXLPBK(1) | 539 V_CPL_TX_SEC_PDU_CPLLEN(2) | V_CPL_TX_SEC_PDU_PLACEHOLDER(0) | 540 V_CPL_TX_SEC_PDU_IVINSRTOFST(0)); 541 542 crwr->sec_cpl.pldlen = htobe32(crp->crp_payload_length == 0 ? 543 axf->blocksize : crp->crp_payload_length); 544 545 crwr->sec_cpl.cipherstop_lo_authinsert = htobe32( 546 V_CPL_TX_SEC_PDU_AUTHSTART(1) | V_CPL_TX_SEC_PDU_AUTHSTOP(0)); 547 548 /* These two flits are actually a CPL_TLS_TX_SCMD_FMT. */ 549 crwr->sec_cpl.seqno_numivs = htobe32( 550 V_SCMD_SEQ_NO_CTRL(0) | 551 V_SCMD_PROTO_VERSION(SCMD_PROTO_VERSION_GENERIC) | 552 V_SCMD_CIPH_MODE(SCMD_CIPH_MODE_NOP) | 553 V_SCMD_AUTH_MODE(s->hmac.auth_mode) | 554 V_SCMD_HMAC_CTRL(hmac_ctrl)); 555 crwr->sec_cpl.ivgen_hdrlen = htobe32( 556 V_SCMD_LAST_FRAG(0) | 557 V_SCMD_MORE_FRAGS(crp->crp_payload_length == 0 ? 1 : 0) | 558 V_SCMD_MAC_ONLY(1)); 559 560 memcpy(crwr->key_ctx.key, s->hmac.pads, kctx_len); 561 562 /* XXX: F_KEY_CONTEXT_SALT_PRESENT set, but 'salt' not set. */ 563 kctx_flits = (sizeof(struct _key_ctx) + kctx_len) / 16; 564 crwr->key_ctx.ctx_hdr = htobe32(V_KEY_CONTEXT_CTX_LEN(kctx_flits) | 565 V_KEY_CONTEXT_OPAD_PRESENT(use_opad) | 566 V_KEY_CONTEXT_SALT_PRESENT(1) | 567 V_KEY_CONTEXT_CK_SIZE(CHCR_KEYCTX_NO_KEY) | 568 V_KEY_CONTEXT_MK_SIZE(s->hmac.mk_size) | V_KEY_CONTEXT_VALID(1)); 569 570 dst = (char *)(crwr + 1) + kctx_len + DUMMY_BYTES; 571 if (crp->crp_payload_length == 0) { 572 dst[0] = 0x80; 573 if (s->mode == HMAC) 574 *(uint64_t *)(dst + axf->blocksize - sizeof(uint64_t)) = 575 htobe64(axf->blocksize << 3); 576 } else if (imm_len != 0) 577 crypto_copydata(crp, crp->crp_payload_start, 578 crp->crp_payload_length, dst); 579 else 580 ccr_write_ulptx_sgl(s, dst, sgl_nsegs); 581 582 /* XXX: TODO backpressure */ 583 t4_wrq_tx(sc->adapter, wr); 584 585 return (0); 586 } 587 588 static int 589 ccr_hash_done(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp, 590 const struct cpl_fw6_pld *cpl, int error) 591 { 592 uint8_t hash[HASH_MAX_LEN]; 593 594 if (error) 595 return (error); 596 597 if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) { 598 crypto_copydata(crp, crp->crp_digest_start, s->hmac.hash_len, 599 hash); 600 if (timingsafe_bcmp((cpl + 1), hash, s->hmac.hash_len) != 0) 601 return (EBADMSG); 602 } else 603 crypto_copyback(crp, crp->crp_digest_start, s->hmac.hash_len, 604 (cpl + 1)); 605 return (0); 606 } 607 608 static int 609 ccr_blkcipher(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp) 610 { 611 char iv[CHCR_MAX_CRYPTO_IV_LEN]; 612 struct chcr_wr *crwr; 613 struct wrqe *wr; 614 char *dst; 615 u_int kctx_len, key_half, op_type, transhdr_len, wr_len; 616 u_int imm_len, iv_len; 617 int dsgl_nsegs, dsgl_len; 618 int sgl_nsegs, sgl_len; 619 int error; 620 621 if (s->blkcipher.key_len == 0 || crp->crp_payload_length == 0) 622 return (EINVAL); 623 if (s->blkcipher.cipher_mode == SCMD_CIPH_MODE_AES_CBC && 624 (crp->crp_payload_length % AES_BLOCK_LEN) != 0) 625 return (EINVAL); 626 627 /* Reject requests with too large of an input buffer. */ 628 if (crp->crp_payload_length > MAX_REQUEST_SIZE) 629 return (EFBIG); 630 631 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) 632 op_type = CHCR_ENCRYPT_OP; 633 else 634 op_type = CHCR_DECRYPT_OP; 635 636 sglist_reset(s->sg_dsgl); 637 if (CRYPTO_HAS_OUTPUT_BUFFER(crp)) 638 error = sglist_append_sglist(s->sg_dsgl, s->sg_output, 639 crp->crp_payload_output_start, crp->crp_payload_length); 640 else 641 error = sglist_append_sglist(s->sg_dsgl, s->sg_input, 642 crp->crp_payload_start, crp->crp_payload_length); 643 if (error) 644 return (error); 645 dsgl_nsegs = ccr_count_sgl(s->sg_dsgl, DSGL_SGE_MAXLEN); 646 if (dsgl_nsegs > MAX_RX_PHYS_DSGL_SGE) 647 return (EFBIG); 648 dsgl_len = ccr_phys_dsgl_len(dsgl_nsegs); 649 650 /* The 'key' must be 128-bit aligned. */ 651 kctx_len = roundup2(s->blkcipher.key_len, 16); 652 transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dsgl_len); 653 654 /* For AES-XTS we send a 16-byte IV in the work request. */ 655 if (s->blkcipher.cipher_mode == SCMD_CIPH_MODE_AES_XTS) 656 iv_len = AES_BLOCK_LEN; 657 else 658 iv_len = s->blkcipher.iv_len; 659 660 if (ccr_use_imm_data(transhdr_len, crp->crp_payload_length + iv_len)) { 661 imm_len = crp->crp_payload_length; 662 sgl_nsegs = 0; 663 sgl_len = 0; 664 } else { 665 imm_len = 0; 666 sglist_reset(s->sg_ulptx); 667 error = sglist_append_sglist(s->sg_ulptx, s->sg_input, 668 crp->crp_payload_start, crp->crp_payload_length); 669 if (error) 670 return (error); 671 sgl_nsegs = s->sg_ulptx->sg_nseg; 672 sgl_len = ccr_ulptx_sgl_len(sgl_nsegs); 673 } 674 675 wr_len = roundup2(transhdr_len, 16) + iv_len + 676 roundup2(imm_len, 16) + sgl_len; 677 if (wr_len > SGE_MAX_WR_LEN) 678 return (EFBIG); 679 wr = alloc_wrqe(wr_len, s->port->txq); 680 if (wr == NULL) { 681 counter_u64_add(sc->stats_wr_nomem, 1); 682 return (ENOMEM); 683 } 684 crwr = wrtod(wr); 685 memset(crwr, 0, wr_len); 686 687 crypto_read_iv(crp, iv); 688 689 /* Zero the remainder of the IV for AES-XTS. */ 690 memset(iv + s->blkcipher.iv_len, 0, iv_len - s->blkcipher.iv_len); 691 692 ccr_populate_wreq(sc, s, crwr, kctx_len, wr_len, imm_len, sgl_len, 0, 693 crp); 694 695 crwr->sec_cpl.op_ivinsrtofst = htobe32( 696 V_CPL_TX_SEC_PDU_OPCODE(CPL_TX_SEC_PDU) | 697 V_CPL_TX_SEC_PDU_RXCHID(s->port->rx_channel_id) | 698 V_CPL_TX_SEC_PDU_ACKFOLLOWS(0) | V_CPL_TX_SEC_PDU_ULPTXLPBK(1) | 699 V_CPL_TX_SEC_PDU_CPLLEN(2) | V_CPL_TX_SEC_PDU_PLACEHOLDER(0) | 700 V_CPL_TX_SEC_PDU_IVINSRTOFST(1)); 701 702 crwr->sec_cpl.pldlen = htobe32(iv_len + crp->crp_payload_length); 703 704 crwr->sec_cpl.aadstart_cipherstop_hi = htobe32( 705 V_CPL_TX_SEC_PDU_CIPHERSTART(iv_len + 1) | 706 V_CPL_TX_SEC_PDU_CIPHERSTOP_HI(0)); 707 crwr->sec_cpl.cipherstop_lo_authinsert = htobe32( 708 V_CPL_TX_SEC_PDU_CIPHERSTOP_LO(0)); 709 710 /* These two flits are actually a CPL_TLS_TX_SCMD_FMT. */ 711 crwr->sec_cpl.seqno_numivs = htobe32( 712 V_SCMD_SEQ_NO_CTRL(0) | 713 V_SCMD_PROTO_VERSION(SCMD_PROTO_VERSION_GENERIC) | 714 V_SCMD_ENC_DEC_CTRL(op_type) | 715 V_SCMD_CIPH_MODE(s->blkcipher.cipher_mode) | 716 V_SCMD_AUTH_MODE(SCMD_AUTH_MODE_NOP) | 717 V_SCMD_HMAC_CTRL(SCMD_HMAC_CTRL_NOP) | 718 V_SCMD_IV_SIZE(iv_len / 2) | 719 V_SCMD_NUM_IVS(0)); 720 crwr->sec_cpl.ivgen_hdrlen = htobe32( 721 V_SCMD_IV_GEN_CTRL(0) | 722 V_SCMD_MORE_FRAGS(0) | V_SCMD_LAST_FRAG(0) | V_SCMD_MAC_ONLY(0) | 723 V_SCMD_AADIVDROP(1) | V_SCMD_HDR_LEN(dsgl_len)); 724 725 crwr->key_ctx.ctx_hdr = s->blkcipher.key_ctx_hdr; 726 switch (s->blkcipher.cipher_mode) { 727 case SCMD_CIPH_MODE_AES_CBC: 728 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) 729 memcpy(crwr->key_ctx.key, s->blkcipher.enckey, 730 s->blkcipher.key_len); 731 else 732 memcpy(crwr->key_ctx.key, s->blkcipher.deckey, 733 s->blkcipher.key_len); 734 break; 735 case SCMD_CIPH_MODE_AES_CTR: 736 memcpy(crwr->key_ctx.key, s->blkcipher.enckey, 737 s->blkcipher.key_len); 738 break; 739 case SCMD_CIPH_MODE_AES_XTS: 740 key_half = s->blkcipher.key_len / 2; 741 memcpy(crwr->key_ctx.key, s->blkcipher.enckey + key_half, 742 key_half); 743 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) 744 memcpy(crwr->key_ctx.key + key_half, 745 s->blkcipher.enckey, key_half); 746 else 747 memcpy(crwr->key_ctx.key + key_half, 748 s->blkcipher.deckey, key_half); 749 break; 750 } 751 752 dst = (char *)(crwr + 1) + kctx_len; 753 ccr_write_phys_dsgl(s, dst, dsgl_nsegs); 754 dst += sizeof(struct cpl_rx_phys_dsgl) + dsgl_len; 755 memcpy(dst, iv, iv_len); 756 dst += iv_len; 757 if (imm_len != 0) 758 crypto_copydata(crp, crp->crp_payload_start, 759 crp->crp_payload_length, dst); 760 else 761 ccr_write_ulptx_sgl(s, dst, sgl_nsegs); 762 763 /* XXX: TODO backpressure */ 764 t4_wrq_tx(sc->adapter, wr); 765 766 explicit_bzero(iv, sizeof(iv)); 767 return (0); 768 } 769 770 static int 771 ccr_blkcipher_done(struct ccr_softc *sc, struct ccr_session *s, 772 struct cryptop *crp, const struct cpl_fw6_pld *cpl, int error) 773 { 774 775 /* 776 * The updated IV to permit chained requests is at 777 * cpl->data[2], but OCF doesn't permit chained requests. 778 */ 779 return (error); 780 } 781 782 /* 783 * 'hashsize' is the length of a full digest. 'authsize' is the 784 * requested digest length for this operation which may be less 785 * than 'hashsize'. 786 */ 787 static int 788 ccr_hmac_ctrl(unsigned int hashsize, unsigned int authsize) 789 { 790 791 if (authsize == 10) 792 return (SCMD_HMAC_CTRL_TRUNC_RFC4366); 793 if (authsize == 12) 794 return (SCMD_HMAC_CTRL_IPSEC_96BIT); 795 if (authsize == hashsize / 2) 796 return (SCMD_HMAC_CTRL_DIV2); 797 return (SCMD_HMAC_CTRL_NO_TRUNC); 798 } 799 800 static int 801 ccr_eta(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp) 802 { 803 char iv[CHCR_MAX_CRYPTO_IV_LEN]; 804 struct chcr_wr *crwr; 805 struct wrqe *wr; 806 struct auth_hash *axf; 807 char *dst; 808 u_int kctx_len, key_half, op_type, transhdr_len, wr_len; 809 u_int hash_size_in_response, imm_len, iopad_size, iv_len; 810 u_int aad_start, aad_stop; 811 u_int auth_insert; 812 u_int cipher_start, cipher_stop; 813 u_int hmac_ctrl, input_len; 814 int dsgl_nsegs, dsgl_len; 815 int sgl_nsegs, sgl_len; 816 int error; 817 818 /* 819 * If there is a need in the future, requests with an empty 820 * payload could be supported as HMAC-only requests. 821 */ 822 if (s->blkcipher.key_len == 0 || crp->crp_payload_length == 0) 823 return (EINVAL); 824 if (s->blkcipher.cipher_mode == SCMD_CIPH_MODE_AES_CBC && 825 (crp->crp_payload_length % AES_BLOCK_LEN) != 0) 826 return (EINVAL); 827 828 /* For AES-XTS we send a 16-byte IV in the work request. */ 829 if (s->blkcipher.cipher_mode == SCMD_CIPH_MODE_AES_XTS) 830 iv_len = AES_BLOCK_LEN; 831 else 832 iv_len = s->blkcipher.iv_len; 833 834 if (crp->crp_aad_length + iv_len > MAX_AAD_LEN) 835 return (EINVAL); 836 837 axf = s->hmac.auth_hash; 838 hash_size_in_response = s->hmac.hash_len; 839 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) 840 op_type = CHCR_ENCRYPT_OP; 841 else 842 op_type = CHCR_DECRYPT_OP; 843 844 /* 845 * The output buffer consists of the cipher text followed by 846 * the hash when encrypting. For decryption it only contains 847 * the plain text. 848 * 849 * Due to a firmware bug, the output buffer must include a 850 * dummy output buffer for the IV and AAD prior to the real 851 * output buffer. 852 */ 853 if (op_type == CHCR_ENCRYPT_OP) { 854 if (iv_len + crp->crp_aad_length + crp->crp_payload_length + 855 hash_size_in_response > MAX_REQUEST_SIZE) 856 return (EFBIG); 857 } else { 858 if (iv_len + crp->crp_aad_length + crp->crp_payload_length > 859 MAX_REQUEST_SIZE) 860 return (EFBIG); 861 } 862 sglist_reset(s->sg_dsgl); 863 error = sglist_append_sglist(s->sg_dsgl, sc->sg_iv_aad, 0, 864 iv_len + crp->crp_aad_length); 865 if (error) 866 return (error); 867 if (CRYPTO_HAS_OUTPUT_BUFFER(crp)) 868 error = sglist_append_sglist(s->sg_dsgl, s->sg_output, 869 crp->crp_payload_output_start, crp->crp_payload_length); 870 else 871 error = sglist_append_sglist(s->sg_dsgl, s->sg_input, 872 crp->crp_payload_start, crp->crp_payload_length); 873 if (error) 874 return (error); 875 if (op_type == CHCR_ENCRYPT_OP) { 876 if (CRYPTO_HAS_OUTPUT_BUFFER(crp)) 877 error = sglist_append_sglist(s->sg_dsgl, s->sg_output, 878 crp->crp_digest_start, hash_size_in_response); 879 else 880 error = sglist_append_sglist(s->sg_dsgl, s->sg_input, 881 crp->crp_digest_start, hash_size_in_response); 882 if (error) 883 return (error); 884 } 885 dsgl_nsegs = ccr_count_sgl(s->sg_dsgl, DSGL_SGE_MAXLEN); 886 if (dsgl_nsegs > MAX_RX_PHYS_DSGL_SGE) 887 return (EFBIG); 888 dsgl_len = ccr_phys_dsgl_len(dsgl_nsegs); 889 890 /* PADs must be 128-bit aligned. */ 891 iopad_size = roundup2(s->hmac.partial_digest_len, 16); 892 893 /* 894 * The 'key' part of the key context consists of the key followed 895 * by the IPAD and OPAD. 896 */ 897 kctx_len = roundup2(s->blkcipher.key_len, 16) + iopad_size * 2; 898 transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dsgl_len); 899 900 /* 901 * The input buffer consists of the IV, any AAD, and then the 902 * cipher/plain text. For decryption requests the hash is 903 * appended after the cipher text. 904 * 905 * The IV is always stored at the start of the input buffer 906 * even though it may be duplicated in the payload. The 907 * crypto engine doesn't work properly if the IV offset points 908 * inside of the AAD region, so a second copy is always 909 * required. 910 */ 911 input_len = crp->crp_aad_length + crp->crp_payload_length; 912 913 /* 914 * The firmware hangs if sent a request which is a 915 * bit smaller than MAX_REQUEST_SIZE. In particular, the 916 * firmware appears to require 512 - 16 bytes of spare room 917 * along with the size of the hash even if the hash isn't 918 * included in the input buffer. 919 */ 920 if (input_len + roundup2(axf->hashsize, 16) + (512 - 16) > 921 MAX_REQUEST_SIZE) 922 return (EFBIG); 923 if (op_type == CHCR_DECRYPT_OP) 924 input_len += hash_size_in_response; 925 926 if (ccr_use_imm_data(transhdr_len, iv_len + input_len)) { 927 imm_len = input_len; 928 sgl_nsegs = 0; 929 sgl_len = 0; 930 } else { 931 imm_len = 0; 932 sglist_reset(s->sg_ulptx); 933 if (crp->crp_aad_length != 0) { 934 if (crp->crp_aad != NULL) 935 error = sglist_append(s->sg_ulptx, 936 crp->crp_aad, crp->crp_aad_length); 937 else 938 error = sglist_append_sglist(s->sg_ulptx, 939 s->sg_input, crp->crp_aad_start, 940 crp->crp_aad_length); 941 if (error) 942 return (error); 943 } 944 error = sglist_append_sglist(s->sg_ulptx, s->sg_input, 945 crp->crp_payload_start, crp->crp_payload_length); 946 if (error) 947 return (error); 948 if (op_type == CHCR_DECRYPT_OP) { 949 error = sglist_append_sglist(s->sg_ulptx, s->sg_input, 950 crp->crp_digest_start, hash_size_in_response); 951 if (error) 952 return (error); 953 } 954 sgl_nsegs = s->sg_ulptx->sg_nseg; 955 sgl_len = ccr_ulptx_sgl_len(sgl_nsegs); 956 } 957 958 /* Any AAD comes after the IV. */ 959 if (crp->crp_aad_length != 0) { 960 aad_start = iv_len + 1; 961 aad_stop = aad_start + crp->crp_aad_length - 1; 962 } else { 963 aad_start = 0; 964 aad_stop = 0; 965 } 966 cipher_start = iv_len + crp->crp_aad_length + 1; 967 if (op_type == CHCR_DECRYPT_OP) 968 cipher_stop = hash_size_in_response; 969 else 970 cipher_stop = 0; 971 if (op_type == CHCR_DECRYPT_OP) 972 auth_insert = hash_size_in_response; 973 else 974 auth_insert = 0; 975 976 wr_len = roundup2(transhdr_len, 16) + iv_len + roundup2(imm_len, 16) + 977 sgl_len; 978 if (wr_len > SGE_MAX_WR_LEN) 979 return (EFBIG); 980 wr = alloc_wrqe(wr_len, s->port->txq); 981 if (wr == NULL) { 982 counter_u64_add(sc->stats_wr_nomem, 1); 983 return (ENOMEM); 984 } 985 crwr = wrtod(wr); 986 memset(crwr, 0, wr_len); 987 988 crypto_read_iv(crp, iv); 989 990 /* Zero the remainder of the IV for AES-XTS. */ 991 memset(iv + s->blkcipher.iv_len, 0, iv_len - s->blkcipher.iv_len); 992 993 ccr_populate_wreq(sc, s, crwr, kctx_len, wr_len, imm_len, sgl_len, 994 op_type == CHCR_DECRYPT_OP ? hash_size_in_response : 0, crp); 995 996 crwr->sec_cpl.op_ivinsrtofst = htobe32( 997 V_CPL_TX_SEC_PDU_OPCODE(CPL_TX_SEC_PDU) | 998 V_CPL_TX_SEC_PDU_RXCHID(s->port->rx_channel_id) | 999 V_CPL_TX_SEC_PDU_ACKFOLLOWS(0) | V_CPL_TX_SEC_PDU_ULPTXLPBK(1) | 1000 V_CPL_TX_SEC_PDU_CPLLEN(2) | V_CPL_TX_SEC_PDU_PLACEHOLDER(0) | 1001 V_CPL_TX_SEC_PDU_IVINSRTOFST(1)); 1002 1003 crwr->sec_cpl.pldlen = htobe32(iv_len + input_len); 1004 1005 crwr->sec_cpl.aadstart_cipherstop_hi = htobe32( 1006 V_CPL_TX_SEC_PDU_AADSTART(aad_start) | 1007 V_CPL_TX_SEC_PDU_AADSTOP(aad_stop) | 1008 V_CPL_TX_SEC_PDU_CIPHERSTART(cipher_start) | 1009 V_CPL_TX_SEC_PDU_CIPHERSTOP_HI(cipher_stop >> 4)); 1010 crwr->sec_cpl.cipherstop_lo_authinsert = htobe32( 1011 V_CPL_TX_SEC_PDU_CIPHERSTOP_LO(cipher_stop & 0xf) | 1012 V_CPL_TX_SEC_PDU_AUTHSTART(cipher_start) | 1013 V_CPL_TX_SEC_PDU_AUTHSTOP(cipher_stop) | 1014 V_CPL_TX_SEC_PDU_AUTHINSERT(auth_insert)); 1015 1016 /* These two flits are actually a CPL_TLS_TX_SCMD_FMT. */ 1017 hmac_ctrl = ccr_hmac_ctrl(axf->hashsize, hash_size_in_response); 1018 crwr->sec_cpl.seqno_numivs = htobe32( 1019 V_SCMD_SEQ_NO_CTRL(0) | 1020 V_SCMD_PROTO_VERSION(SCMD_PROTO_VERSION_GENERIC) | 1021 V_SCMD_ENC_DEC_CTRL(op_type) | 1022 V_SCMD_CIPH_AUTH_SEQ_CTRL(op_type == CHCR_ENCRYPT_OP ? 1 : 0) | 1023 V_SCMD_CIPH_MODE(s->blkcipher.cipher_mode) | 1024 V_SCMD_AUTH_MODE(s->hmac.auth_mode) | 1025 V_SCMD_HMAC_CTRL(hmac_ctrl) | 1026 V_SCMD_IV_SIZE(iv_len / 2) | 1027 V_SCMD_NUM_IVS(0)); 1028 crwr->sec_cpl.ivgen_hdrlen = htobe32( 1029 V_SCMD_IV_GEN_CTRL(0) | 1030 V_SCMD_MORE_FRAGS(0) | V_SCMD_LAST_FRAG(0) | V_SCMD_MAC_ONLY(0) | 1031 V_SCMD_AADIVDROP(0) | V_SCMD_HDR_LEN(dsgl_len)); 1032 1033 crwr->key_ctx.ctx_hdr = s->blkcipher.key_ctx_hdr; 1034 switch (s->blkcipher.cipher_mode) { 1035 case SCMD_CIPH_MODE_AES_CBC: 1036 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) 1037 memcpy(crwr->key_ctx.key, s->blkcipher.enckey, 1038 s->blkcipher.key_len); 1039 else 1040 memcpy(crwr->key_ctx.key, s->blkcipher.deckey, 1041 s->blkcipher.key_len); 1042 break; 1043 case SCMD_CIPH_MODE_AES_CTR: 1044 memcpy(crwr->key_ctx.key, s->blkcipher.enckey, 1045 s->blkcipher.key_len); 1046 break; 1047 case SCMD_CIPH_MODE_AES_XTS: 1048 key_half = s->blkcipher.key_len / 2; 1049 memcpy(crwr->key_ctx.key, s->blkcipher.enckey + key_half, 1050 key_half); 1051 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) 1052 memcpy(crwr->key_ctx.key + key_half, 1053 s->blkcipher.enckey, key_half); 1054 else 1055 memcpy(crwr->key_ctx.key + key_half, 1056 s->blkcipher.deckey, key_half); 1057 break; 1058 } 1059 1060 dst = crwr->key_ctx.key + roundup2(s->blkcipher.key_len, 16); 1061 memcpy(dst, s->hmac.pads, iopad_size * 2); 1062 1063 dst = (char *)(crwr + 1) + kctx_len; 1064 ccr_write_phys_dsgl(s, dst, dsgl_nsegs); 1065 dst += sizeof(struct cpl_rx_phys_dsgl) + dsgl_len; 1066 memcpy(dst, iv, iv_len); 1067 dst += iv_len; 1068 if (imm_len != 0) { 1069 if (crp->crp_aad_length != 0) { 1070 if (crp->crp_aad != NULL) 1071 memcpy(dst, crp->crp_aad, crp->crp_aad_length); 1072 else 1073 crypto_copydata(crp, crp->crp_aad_start, 1074 crp->crp_aad_length, dst); 1075 dst += crp->crp_aad_length; 1076 } 1077 crypto_copydata(crp, crp->crp_payload_start, 1078 crp->crp_payload_length, dst); 1079 dst += crp->crp_payload_length; 1080 if (op_type == CHCR_DECRYPT_OP) 1081 crypto_copydata(crp, crp->crp_digest_start, 1082 hash_size_in_response, dst); 1083 } else 1084 ccr_write_ulptx_sgl(s, dst, sgl_nsegs); 1085 1086 /* XXX: TODO backpressure */ 1087 t4_wrq_tx(sc->adapter, wr); 1088 1089 explicit_bzero(iv, sizeof(iv)); 1090 return (0); 1091 } 1092 1093 static int 1094 ccr_eta_done(struct ccr_softc *sc, struct ccr_session *s, 1095 struct cryptop *crp, const struct cpl_fw6_pld *cpl, int error) 1096 { 1097 1098 /* 1099 * The updated IV to permit chained requests is at 1100 * cpl->data[2], but OCF doesn't permit chained requests. 1101 */ 1102 return (error); 1103 } 1104 1105 static int 1106 ccr_gcm(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp) 1107 { 1108 char iv[CHCR_MAX_CRYPTO_IV_LEN]; 1109 struct chcr_wr *crwr; 1110 struct wrqe *wr; 1111 char *dst; 1112 u_int iv_len, kctx_len, op_type, transhdr_len, wr_len; 1113 u_int hash_size_in_response, imm_len; 1114 u_int aad_start, aad_stop, cipher_start, cipher_stop, auth_insert; 1115 u_int hmac_ctrl, input_len; 1116 int dsgl_nsegs, dsgl_len; 1117 int sgl_nsegs, sgl_len; 1118 int error; 1119 1120 if (s->blkcipher.key_len == 0) 1121 return (EINVAL); 1122 1123 /* 1124 * The crypto engine doesn't handle GCM requests with an empty 1125 * payload, so handle those in software instead. 1126 */ 1127 if (crp->crp_payload_length == 0) 1128 return (EMSGSIZE); 1129 1130 if (crp->crp_aad_length + AES_BLOCK_LEN > MAX_AAD_LEN) 1131 return (EMSGSIZE); 1132 1133 hash_size_in_response = s->gmac.hash_len; 1134 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) 1135 op_type = CHCR_ENCRYPT_OP; 1136 else 1137 op_type = CHCR_DECRYPT_OP; 1138 1139 /* 1140 * The IV handling for GCM in OCF is a bit more complicated in 1141 * that IPSec provides a full 16-byte IV (including the 1142 * counter), whereas the /dev/crypto interface sometimes 1143 * provides a full 16-byte IV (if no IV is provided in the 1144 * ioctl) and sometimes a 12-byte IV (if the IV was explicit). 1145 * 1146 * When provided a 12-byte IV, assume the IV is really 16 bytes 1147 * with a counter in the last 4 bytes initialized to 1. 1148 * 1149 * While iv_len is checked below, the value is currently 1150 * always set to 12 when creating a GCM session in this driver 1151 * due to limitations in OCF (there is no way to know what the 1152 * IV length of a given request will be). This means that the 1153 * driver always assumes as 12-byte IV for now. 1154 */ 1155 if (s->blkcipher.iv_len == 12) 1156 iv_len = AES_BLOCK_LEN; 1157 else 1158 iv_len = s->blkcipher.iv_len; 1159 1160 /* 1161 * GCM requests should always provide an explicit IV. 1162 */ 1163 if ((crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0) 1164 return (EINVAL); 1165 1166 /* 1167 * The output buffer consists of the cipher text followed by 1168 * the tag when encrypting. For decryption it only contains 1169 * the plain text. 1170 * 1171 * Due to a firmware bug, the output buffer must include a 1172 * dummy output buffer for the IV and AAD prior to the real 1173 * output buffer. 1174 */ 1175 if (op_type == CHCR_ENCRYPT_OP) { 1176 if (iv_len + crp->crp_aad_length + crp->crp_payload_length + 1177 hash_size_in_response > MAX_REQUEST_SIZE) 1178 return (EFBIG); 1179 } else { 1180 if (iv_len + crp->crp_aad_length + crp->crp_payload_length > 1181 MAX_REQUEST_SIZE) 1182 return (EFBIG); 1183 } 1184 sglist_reset(s->sg_dsgl); 1185 error = sglist_append_sglist(s->sg_dsgl, sc->sg_iv_aad, 0, iv_len + 1186 crp->crp_aad_length); 1187 if (error) 1188 return (error); 1189 if (CRYPTO_HAS_OUTPUT_BUFFER(crp)) 1190 error = sglist_append_sglist(s->sg_dsgl, s->sg_output, 1191 crp->crp_payload_output_start, crp->crp_payload_length); 1192 else 1193 error = sglist_append_sglist(s->sg_dsgl, s->sg_input, 1194 crp->crp_payload_start, crp->crp_payload_length); 1195 if (error) 1196 return (error); 1197 if (op_type == CHCR_ENCRYPT_OP) { 1198 if (CRYPTO_HAS_OUTPUT_BUFFER(crp)) 1199 error = sglist_append_sglist(s->sg_dsgl, s->sg_output, 1200 crp->crp_digest_start, hash_size_in_response); 1201 else 1202 error = sglist_append_sglist(s->sg_dsgl, s->sg_input, 1203 crp->crp_digest_start, hash_size_in_response); 1204 if (error) 1205 return (error); 1206 } 1207 dsgl_nsegs = ccr_count_sgl(s->sg_dsgl, DSGL_SGE_MAXLEN); 1208 if (dsgl_nsegs > MAX_RX_PHYS_DSGL_SGE) 1209 return (EFBIG); 1210 dsgl_len = ccr_phys_dsgl_len(dsgl_nsegs); 1211 1212 /* 1213 * The 'key' part of the key context consists of the key followed 1214 * by the Galois hash key. 1215 */ 1216 kctx_len = roundup2(s->blkcipher.key_len, 16) + GMAC_BLOCK_LEN; 1217 transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dsgl_len); 1218 1219 /* 1220 * The input buffer consists of the IV, any AAD, and then the 1221 * cipher/plain text. For decryption requests the hash is 1222 * appended after the cipher text. 1223 * 1224 * The IV is always stored at the start of the input buffer 1225 * even though it may be duplicated in the payload. The 1226 * crypto engine doesn't work properly if the IV offset points 1227 * inside of the AAD region, so a second copy is always 1228 * required. 1229 */ 1230 input_len = crp->crp_aad_length + crp->crp_payload_length; 1231 if (op_type == CHCR_DECRYPT_OP) 1232 input_len += hash_size_in_response; 1233 if (input_len > MAX_REQUEST_SIZE) 1234 return (EFBIG); 1235 if (ccr_use_imm_data(transhdr_len, iv_len + input_len)) { 1236 imm_len = input_len; 1237 sgl_nsegs = 0; 1238 sgl_len = 0; 1239 } else { 1240 imm_len = 0; 1241 sglist_reset(s->sg_ulptx); 1242 if (crp->crp_aad_length != 0) { 1243 if (crp->crp_aad != NULL) 1244 error = sglist_append(s->sg_ulptx, 1245 crp->crp_aad, crp->crp_aad_length); 1246 else 1247 error = sglist_append_sglist(s->sg_ulptx, 1248 s->sg_input, crp->crp_aad_start, 1249 crp->crp_aad_length); 1250 if (error) 1251 return (error); 1252 } 1253 error = sglist_append_sglist(s->sg_ulptx, s->sg_input, 1254 crp->crp_payload_start, crp->crp_payload_length); 1255 if (error) 1256 return (error); 1257 if (op_type == CHCR_DECRYPT_OP) { 1258 error = sglist_append_sglist(s->sg_ulptx, s->sg_input, 1259 crp->crp_digest_start, hash_size_in_response); 1260 if (error) 1261 return (error); 1262 } 1263 sgl_nsegs = s->sg_ulptx->sg_nseg; 1264 sgl_len = ccr_ulptx_sgl_len(sgl_nsegs); 1265 } 1266 1267 if (crp->crp_aad_length != 0) { 1268 aad_start = iv_len + 1; 1269 aad_stop = aad_start + crp->crp_aad_length - 1; 1270 } else { 1271 aad_start = 0; 1272 aad_stop = 0; 1273 } 1274 cipher_start = iv_len + crp->crp_aad_length + 1; 1275 if (op_type == CHCR_DECRYPT_OP) 1276 cipher_stop = hash_size_in_response; 1277 else 1278 cipher_stop = 0; 1279 if (op_type == CHCR_DECRYPT_OP) 1280 auth_insert = hash_size_in_response; 1281 else 1282 auth_insert = 0; 1283 1284 wr_len = roundup2(transhdr_len, 16) + iv_len + roundup2(imm_len, 16) + 1285 sgl_len; 1286 if (wr_len > SGE_MAX_WR_LEN) 1287 return (EFBIG); 1288 wr = alloc_wrqe(wr_len, s->port->txq); 1289 if (wr == NULL) { 1290 counter_u64_add(sc->stats_wr_nomem, 1); 1291 return (ENOMEM); 1292 } 1293 crwr = wrtod(wr); 1294 memset(crwr, 0, wr_len); 1295 1296 memcpy(iv, crp->crp_iv, s->blkcipher.iv_len); 1297 if (s->blkcipher.iv_len == 12) 1298 *(uint32_t *)&iv[12] = htobe32(1); 1299 1300 ccr_populate_wreq(sc, s, crwr, kctx_len, wr_len, imm_len, sgl_len, 0, 1301 crp); 1302 1303 crwr->sec_cpl.op_ivinsrtofst = htobe32( 1304 V_CPL_TX_SEC_PDU_OPCODE(CPL_TX_SEC_PDU) | 1305 V_CPL_TX_SEC_PDU_RXCHID(s->port->rx_channel_id) | 1306 V_CPL_TX_SEC_PDU_ACKFOLLOWS(0) | V_CPL_TX_SEC_PDU_ULPTXLPBK(1) | 1307 V_CPL_TX_SEC_PDU_CPLLEN(2) | V_CPL_TX_SEC_PDU_PLACEHOLDER(0) | 1308 V_CPL_TX_SEC_PDU_IVINSRTOFST(1)); 1309 1310 crwr->sec_cpl.pldlen = htobe32(iv_len + input_len); 1311 1312 /* 1313 * NB: cipherstop is explicitly set to 0. On encrypt it 1314 * should normally be set to 0 anyway. However, for decrypt 1315 * the cipher ends before the tag in the ETA case (and 1316 * authstop is set to stop before the tag), but for GCM the 1317 * cipher still runs to the end of the buffer. Not sure if 1318 * this is intentional or a firmware quirk, but it is required 1319 * for working tag validation with GCM decryption. 1320 */ 1321 crwr->sec_cpl.aadstart_cipherstop_hi = htobe32( 1322 V_CPL_TX_SEC_PDU_AADSTART(aad_start) | 1323 V_CPL_TX_SEC_PDU_AADSTOP(aad_stop) | 1324 V_CPL_TX_SEC_PDU_CIPHERSTART(cipher_start) | 1325 V_CPL_TX_SEC_PDU_CIPHERSTOP_HI(0)); 1326 crwr->sec_cpl.cipherstop_lo_authinsert = htobe32( 1327 V_CPL_TX_SEC_PDU_CIPHERSTOP_LO(0) | 1328 V_CPL_TX_SEC_PDU_AUTHSTART(cipher_start) | 1329 V_CPL_TX_SEC_PDU_AUTHSTOP(cipher_stop) | 1330 V_CPL_TX_SEC_PDU_AUTHINSERT(auth_insert)); 1331 1332 /* These two flits are actually a CPL_TLS_TX_SCMD_FMT. */ 1333 hmac_ctrl = ccr_hmac_ctrl(AES_GMAC_HASH_LEN, hash_size_in_response); 1334 crwr->sec_cpl.seqno_numivs = htobe32( 1335 V_SCMD_SEQ_NO_CTRL(0) | 1336 V_SCMD_PROTO_VERSION(SCMD_PROTO_VERSION_GENERIC) | 1337 V_SCMD_ENC_DEC_CTRL(op_type) | 1338 V_SCMD_CIPH_AUTH_SEQ_CTRL(op_type == CHCR_ENCRYPT_OP ? 1 : 0) | 1339 V_SCMD_CIPH_MODE(SCMD_CIPH_MODE_AES_GCM) | 1340 V_SCMD_AUTH_MODE(SCMD_AUTH_MODE_GHASH) | 1341 V_SCMD_HMAC_CTRL(hmac_ctrl) | 1342 V_SCMD_IV_SIZE(iv_len / 2) | 1343 V_SCMD_NUM_IVS(0)); 1344 crwr->sec_cpl.ivgen_hdrlen = htobe32( 1345 V_SCMD_IV_GEN_CTRL(0) | 1346 V_SCMD_MORE_FRAGS(0) | V_SCMD_LAST_FRAG(0) | V_SCMD_MAC_ONLY(0) | 1347 V_SCMD_AADIVDROP(0) | V_SCMD_HDR_LEN(dsgl_len)); 1348 1349 crwr->key_ctx.ctx_hdr = s->blkcipher.key_ctx_hdr; 1350 memcpy(crwr->key_ctx.key, s->blkcipher.enckey, s->blkcipher.key_len); 1351 dst = crwr->key_ctx.key + roundup2(s->blkcipher.key_len, 16); 1352 memcpy(dst, s->gmac.ghash_h, GMAC_BLOCK_LEN); 1353 1354 dst = (char *)(crwr + 1) + kctx_len; 1355 ccr_write_phys_dsgl(s, dst, dsgl_nsegs); 1356 dst += sizeof(struct cpl_rx_phys_dsgl) + dsgl_len; 1357 memcpy(dst, iv, iv_len); 1358 dst += iv_len; 1359 if (imm_len != 0) { 1360 if (crp->crp_aad_length != 0) { 1361 if (crp->crp_aad != NULL) 1362 memcpy(dst, crp->crp_aad, crp->crp_aad_length); 1363 else 1364 crypto_copydata(crp, crp->crp_aad_start, 1365 crp->crp_aad_length, dst); 1366 dst += crp->crp_aad_length; 1367 } 1368 crypto_copydata(crp, crp->crp_payload_start, 1369 crp->crp_payload_length, dst); 1370 dst += crp->crp_payload_length; 1371 if (op_type == CHCR_DECRYPT_OP) 1372 crypto_copydata(crp, crp->crp_digest_start, 1373 hash_size_in_response, dst); 1374 } else 1375 ccr_write_ulptx_sgl(s, dst, sgl_nsegs); 1376 1377 /* XXX: TODO backpressure */ 1378 t4_wrq_tx(sc->adapter, wr); 1379 1380 explicit_bzero(iv, sizeof(iv)); 1381 return (0); 1382 } 1383 1384 static int 1385 ccr_gcm_done(struct ccr_softc *sc, struct ccr_session *s, 1386 struct cryptop *crp, const struct cpl_fw6_pld *cpl, int error) 1387 { 1388 1389 /* 1390 * The updated IV to permit chained requests is at 1391 * cpl->data[2], but OCF doesn't permit chained requests. 1392 * 1393 * Note that the hardware should always verify the GMAC hash. 1394 */ 1395 return (error); 1396 } 1397 1398 /* 1399 * Handle a GCM request that is not supported by the crypto engine by 1400 * performing the operation in software. Derived from swcr_authenc(). 1401 */ 1402 static void 1403 ccr_gcm_soft(struct ccr_session *s, struct cryptop *crp) 1404 { 1405 struct auth_hash *axf; 1406 struct enc_xform *exf; 1407 void *auth_ctx, *kschedule; 1408 char block[GMAC_BLOCK_LEN]; 1409 char digest[GMAC_DIGEST_LEN]; 1410 char iv[AES_BLOCK_LEN]; 1411 int error, i, len; 1412 1413 auth_ctx = NULL; 1414 kschedule = NULL; 1415 1416 /* Initialize the MAC. */ 1417 switch (s->blkcipher.key_len) { 1418 case 16: 1419 axf = &auth_hash_nist_gmac_aes_128; 1420 break; 1421 case 24: 1422 axf = &auth_hash_nist_gmac_aes_192; 1423 break; 1424 case 32: 1425 axf = &auth_hash_nist_gmac_aes_256; 1426 break; 1427 default: 1428 error = EINVAL; 1429 goto out; 1430 } 1431 auth_ctx = malloc(axf->ctxsize, M_CCR, M_NOWAIT); 1432 if (auth_ctx == NULL) { 1433 error = ENOMEM; 1434 goto out; 1435 } 1436 axf->Init(auth_ctx); 1437 axf->Setkey(auth_ctx, s->blkcipher.enckey, s->blkcipher.key_len); 1438 1439 /* Initialize the cipher. */ 1440 exf = &enc_xform_aes_nist_gcm; 1441 kschedule = malloc(exf->ctxsize, M_CCR, M_NOWAIT); 1442 if (kschedule == NULL) { 1443 error = ENOMEM; 1444 goto out; 1445 } 1446 error = exf->setkey(kschedule, s->blkcipher.enckey, 1447 s->blkcipher.key_len); 1448 if (error) 1449 goto out; 1450 1451 /* 1452 * This assumes a 12-byte IV from the crp. See longer comment 1453 * above in ccr_gcm() for more details. 1454 */ 1455 if ((crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0) { 1456 error = EINVAL; 1457 goto out; 1458 } 1459 memcpy(iv, crp->crp_iv, 12); 1460 *(uint32_t *)&iv[12] = htobe32(1); 1461 1462 axf->Reinit(auth_ctx, iv, sizeof(iv)); 1463 1464 /* MAC the AAD. */ 1465 if (crp->crp_aad != NULL) { 1466 len = rounddown(crp->crp_aad_length, sizeof(block)); 1467 if (len != 0) 1468 axf->Update(auth_ctx, crp->crp_aad, len); 1469 if (crp->crp_aad_length != len) { 1470 memset(block, 0, sizeof(block)); 1471 memcpy(block, (char *)crp->crp_aad + len, 1472 crp->crp_aad_length - len); 1473 axf->Update(auth_ctx, block, sizeof(block)); 1474 } 1475 } else { 1476 for (i = 0; i < crp->crp_aad_length; i += sizeof(block)) { 1477 len = imin(crp->crp_aad_length - i, sizeof(block)); 1478 crypto_copydata(crp, crp->crp_aad_start + i, len, 1479 block); 1480 bzero(block + len, sizeof(block) - len); 1481 axf->Update(auth_ctx, block, sizeof(block)); 1482 } 1483 } 1484 1485 exf->reinit(kschedule, iv); 1486 1487 /* Do encryption with MAC */ 1488 for (i = 0; i < crp->crp_payload_length; i += sizeof(block)) { 1489 len = imin(crp->crp_payload_length - i, sizeof(block)); 1490 crypto_copydata(crp, crp->crp_payload_start + i, len, block); 1491 bzero(block + len, sizeof(block) - len); 1492 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) { 1493 exf->encrypt(kschedule, block, block); 1494 axf->Update(auth_ctx, block, len); 1495 crypto_copyback(crp, crp->crp_payload_start + i, len, 1496 block); 1497 } else { 1498 axf->Update(auth_ctx, block, len); 1499 } 1500 } 1501 1502 /* Length block. */ 1503 bzero(block, sizeof(block)); 1504 ((uint32_t *)block)[1] = htobe32(crp->crp_aad_length * 8); 1505 ((uint32_t *)block)[3] = htobe32(crp->crp_payload_length * 8); 1506 axf->Update(auth_ctx, block, sizeof(block)); 1507 1508 /* Finalize MAC. */ 1509 axf->Final(digest, auth_ctx); 1510 1511 /* Inject or validate tag. */ 1512 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) { 1513 crypto_copyback(crp, crp->crp_digest_start, sizeof(digest), 1514 digest); 1515 error = 0; 1516 } else { 1517 char digest2[GMAC_DIGEST_LEN]; 1518 1519 crypto_copydata(crp, crp->crp_digest_start, sizeof(digest2), 1520 digest2); 1521 if (timingsafe_bcmp(digest, digest2, sizeof(digest)) == 0) { 1522 error = 0; 1523 1524 /* Tag matches, decrypt data. */ 1525 for (i = 0; i < crp->crp_payload_length; 1526 i += sizeof(block)) { 1527 len = imin(crp->crp_payload_length - i, 1528 sizeof(block)); 1529 crypto_copydata(crp, crp->crp_payload_start + i, 1530 len, block); 1531 bzero(block + len, sizeof(block) - len); 1532 exf->decrypt(kschedule, block, block); 1533 crypto_copyback(crp, crp->crp_payload_start + i, 1534 len, block); 1535 } 1536 } else 1537 error = EBADMSG; 1538 explicit_bzero(digest2, sizeof(digest2)); 1539 } 1540 1541 out: 1542 zfree(kschedule, M_CCR); 1543 zfree(auth_ctx, M_CCR); 1544 explicit_bzero(block, sizeof(block)); 1545 explicit_bzero(iv, sizeof(iv)); 1546 explicit_bzero(digest, sizeof(digest)); 1547 crp->crp_etype = error; 1548 crypto_done(crp); 1549 } 1550 1551 static void 1552 generate_ccm_b0(struct cryptop *crp, u_int hash_size_in_response, 1553 const char *iv, char *b0) 1554 { 1555 u_int i, payload_len; 1556 1557 /* NB: L is already set in the first byte of the IV. */ 1558 memcpy(b0, iv, CCM_B0_SIZE); 1559 1560 /* Set length of hash in bits 3 - 5. */ 1561 b0[0] |= (((hash_size_in_response - 2) / 2) << 3); 1562 1563 /* Store the payload length as a big-endian value. */ 1564 payload_len = crp->crp_payload_length; 1565 for (i = 0; i < iv[0]; i++) { 1566 b0[CCM_CBC_BLOCK_LEN - 1 - i] = payload_len; 1567 payload_len >>= 8; 1568 } 1569 1570 /* 1571 * If there is AAD in the request, set bit 6 in the flags 1572 * field and store the AAD length as a big-endian value at the 1573 * start of block 1. This only assumes a 16-bit AAD length 1574 * since T6 doesn't support large AAD sizes. 1575 */ 1576 if (crp->crp_aad_length != 0) { 1577 b0[0] |= (1 << 6); 1578 *(uint16_t *)(b0 + CCM_B0_SIZE) = htobe16(crp->crp_aad_length); 1579 } 1580 } 1581 1582 static int 1583 ccr_ccm(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp) 1584 { 1585 char iv[CHCR_MAX_CRYPTO_IV_LEN]; 1586 struct ulptx_idata *idata; 1587 struct chcr_wr *crwr; 1588 struct wrqe *wr; 1589 char *dst; 1590 u_int iv_len, kctx_len, op_type, transhdr_len, wr_len; 1591 u_int aad_len, b0_len, hash_size_in_response, imm_len; 1592 u_int aad_start, aad_stop, cipher_start, cipher_stop, auth_insert; 1593 u_int hmac_ctrl, input_len; 1594 int dsgl_nsegs, dsgl_len; 1595 int sgl_nsegs, sgl_len; 1596 int error; 1597 1598 if (s->blkcipher.key_len == 0) 1599 return (EINVAL); 1600 1601 /* 1602 * The crypto engine doesn't handle CCM requests with an empty 1603 * payload, so handle those in software instead. 1604 */ 1605 if (crp->crp_payload_length == 0) 1606 return (EMSGSIZE); 1607 1608 /* 1609 * CCM always includes block 0 in the AAD before AAD from the 1610 * request. 1611 */ 1612 b0_len = CCM_B0_SIZE; 1613 if (crp->crp_aad_length != 0) 1614 b0_len += CCM_AAD_FIELD_SIZE; 1615 aad_len = b0_len + crp->crp_aad_length; 1616 1617 /* 1618 * CCM requests should always provide an explicit IV (really 1619 * the nonce). 1620 */ 1621 if ((crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0) 1622 return (EINVAL); 1623 1624 /* 1625 * Always assume a 12 byte input nonce for now since that is 1626 * what OCF always generates. The full IV in the work request 1627 * is 16 bytes. 1628 */ 1629 iv_len = AES_BLOCK_LEN; 1630 1631 if (iv_len + aad_len > MAX_AAD_LEN) 1632 return (EMSGSIZE); 1633 1634 hash_size_in_response = s->ccm_mac.hash_len; 1635 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) 1636 op_type = CHCR_ENCRYPT_OP; 1637 else 1638 op_type = CHCR_DECRYPT_OP; 1639 1640 /* 1641 * The output buffer consists of the cipher text followed by 1642 * the tag when encrypting. For decryption it only contains 1643 * the plain text. 1644 * 1645 * Due to a firmware bug, the output buffer must include a 1646 * dummy output buffer for the IV and AAD prior to the real 1647 * output buffer. 1648 */ 1649 if (op_type == CHCR_ENCRYPT_OP) { 1650 if (iv_len + aad_len + crp->crp_payload_length + 1651 hash_size_in_response > MAX_REQUEST_SIZE) 1652 return (EFBIG); 1653 } else { 1654 if (iv_len + aad_len + crp->crp_payload_length > 1655 MAX_REQUEST_SIZE) 1656 return (EFBIG); 1657 } 1658 sglist_reset(s->sg_dsgl); 1659 error = sglist_append_sglist(s->sg_dsgl, sc->sg_iv_aad, 0, iv_len + 1660 aad_len); 1661 if (error) 1662 return (error); 1663 if (CRYPTO_HAS_OUTPUT_BUFFER(crp)) 1664 error = sglist_append_sglist(s->sg_dsgl, s->sg_output, 1665 crp->crp_payload_output_start, crp->crp_payload_length); 1666 else 1667 error = sglist_append_sglist(s->sg_dsgl, s->sg_input, 1668 crp->crp_payload_start, crp->crp_payload_length); 1669 if (error) 1670 return (error); 1671 if (op_type == CHCR_ENCRYPT_OP) { 1672 if (CRYPTO_HAS_OUTPUT_BUFFER(crp)) 1673 error = sglist_append_sglist(s->sg_dsgl, s->sg_output, 1674 crp->crp_digest_start, hash_size_in_response); 1675 else 1676 error = sglist_append_sglist(s->sg_dsgl, s->sg_input, 1677 crp->crp_digest_start, hash_size_in_response); 1678 if (error) 1679 return (error); 1680 } 1681 dsgl_nsegs = ccr_count_sgl(s->sg_dsgl, DSGL_SGE_MAXLEN); 1682 if (dsgl_nsegs > MAX_RX_PHYS_DSGL_SGE) 1683 return (EFBIG); 1684 dsgl_len = ccr_phys_dsgl_len(dsgl_nsegs); 1685 1686 /* 1687 * The 'key' part of the key context consists of two copies of 1688 * the AES key. 1689 */ 1690 kctx_len = roundup2(s->blkcipher.key_len, 16) * 2; 1691 transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dsgl_len); 1692 1693 /* 1694 * The input buffer consists of the IV, AAD (including block 1695 * 0), and then the cipher/plain text. For decryption 1696 * requests the hash is appended after the cipher text. 1697 * 1698 * The IV is always stored at the start of the input buffer 1699 * even though it may be duplicated in the payload. The 1700 * crypto engine doesn't work properly if the IV offset points 1701 * inside of the AAD region, so a second copy is always 1702 * required. 1703 */ 1704 input_len = aad_len + crp->crp_payload_length; 1705 if (op_type == CHCR_DECRYPT_OP) 1706 input_len += hash_size_in_response; 1707 if (input_len > MAX_REQUEST_SIZE) 1708 return (EFBIG); 1709 if (ccr_use_imm_data(transhdr_len, iv_len + input_len)) { 1710 imm_len = input_len; 1711 sgl_nsegs = 0; 1712 sgl_len = 0; 1713 } else { 1714 /* Block 0 is passed as immediate data. */ 1715 imm_len = b0_len; 1716 1717 sglist_reset(s->sg_ulptx); 1718 if (crp->crp_aad_length != 0) { 1719 if (crp->crp_aad != NULL) 1720 error = sglist_append(s->sg_ulptx, 1721 crp->crp_aad, crp->crp_aad_length); 1722 else 1723 error = sglist_append_sglist(s->sg_ulptx, 1724 s->sg_input, crp->crp_aad_start, 1725 crp->crp_aad_length); 1726 if (error) 1727 return (error); 1728 } 1729 error = sglist_append_sglist(s->sg_ulptx, s->sg_input, 1730 crp->crp_payload_start, crp->crp_payload_length); 1731 if (error) 1732 return (error); 1733 if (op_type == CHCR_DECRYPT_OP) { 1734 error = sglist_append_sglist(s->sg_ulptx, s->sg_input, 1735 crp->crp_digest_start, hash_size_in_response); 1736 if (error) 1737 return (error); 1738 } 1739 sgl_nsegs = s->sg_ulptx->sg_nseg; 1740 sgl_len = ccr_ulptx_sgl_len(sgl_nsegs); 1741 } 1742 1743 aad_start = iv_len + 1; 1744 aad_stop = aad_start + aad_len - 1; 1745 cipher_start = aad_stop + 1; 1746 if (op_type == CHCR_DECRYPT_OP) 1747 cipher_stop = hash_size_in_response; 1748 else 1749 cipher_stop = 0; 1750 if (op_type == CHCR_DECRYPT_OP) 1751 auth_insert = hash_size_in_response; 1752 else 1753 auth_insert = 0; 1754 1755 wr_len = roundup2(transhdr_len, 16) + iv_len + roundup2(imm_len, 16) + 1756 sgl_len; 1757 if (wr_len > SGE_MAX_WR_LEN) 1758 return (EFBIG); 1759 wr = alloc_wrqe(wr_len, s->port->txq); 1760 if (wr == NULL) { 1761 counter_u64_add(sc->stats_wr_nomem, 1); 1762 return (ENOMEM); 1763 } 1764 crwr = wrtod(wr); 1765 memset(crwr, 0, wr_len); 1766 1767 /* 1768 * Read the nonce from the request. Use the nonce to generate 1769 * the full IV with the counter set to 0. 1770 */ 1771 memset(iv, 0, iv_len); 1772 iv[0] = (15 - AES_CCM_IV_LEN) - 1; 1773 memcpy(iv + 1, crp->crp_iv, AES_CCM_IV_LEN); 1774 1775 ccr_populate_wreq(sc, s, crwr, kctx_len, wr_len, imm_len, sgl_len, 0, 1776 crp); 1777 1778 crwr->sec_cpl.op_ivinsrtofst = htobe32( 1779 V_CPL_TX_SEC_PDU_OPCODE(CPL_TX_SEC_PDU) | 1780 V_CPL_TX_SEC_PDU_RXCHID(s->port->rx_channel_id) | 1781 V_CPL_TX_SEC_PDU_ACKFOLLOWS(0) | V_CPL_TX_SEC_PDU_ULPTXLPBK(1) | 1782 V_CPL_TX_SEC_PDU_CPLLEN(2) | V_CPL_TX_SEC_PDU_PLACEHOLDER(0) | 1783 V_CPL_TX_SEC_PDU_IVINSRTOFST(1)); 1784 1785 crwr->sec_cpl.pldlen = htobe32(iv_len + input_len); 1786 1787 /* 1788 * NB: cipherstop is explicitly set to 0. See comments above 1789 * in ccr_gcm(). 1790 */ 1791 crwr->sec_cpl.aadstart_cipherstop_hi = htobe32( 1792 V_CPL_TX_SEC_PDU_AADSTART(aad_start) | 1793 V_CPL_TX_SEC_PDU_AADSTOP(aad_stop) | 1794 V_CPL_TX_SEC_PDU_CIPHERSTART(cipher_start) | 1795 V_CPL_TX_SEC_PDU_CIPHERSTOP_HI(0)); 1796 crwr->sec_cpl.cipherstop_lo_authinsert = htobe32( 1797 V_CPL_TX_SEC_PDU_CIPHERSTOP_LO(0) | 1798 V_CPL_TX_SEC_PDU_AUTHSTART(cipher_start) | 1799 V_CPL_TX_SEC_PDU_AUTHSTOP(cipher_stop) | 1800 V_CPL_TX_SEC_PDU_AUTHINSERT(auth_insert)); 1801 1802 /* These two flits are actually a CPL_TLS_TX_SCMD_FMT. */ 1803 hmac_ctrl = ccr_hmac_ctrl(AES_CBC_MAC_HASH_LEN, hash_size_in_response); 1804 crwr->sec_cpl.seqno_numivs = htobe32( 1805 V_SCMD_SEQ_NO_CTRL(0) | 1806 V_SCMD_PROTO_VERSION(SCMD_PROTO_VERSION_GENERIC) | 1807 V_SCMD_ENC_DEC_CTRL(op_type) | 1808 V_SCMD_CIPH_AUTH_SEQ_CTRL(op_type == CHCR_ENCRYPT_OP ? 0 : 1) | 1809 V_SCMD_CIPH_MODE(SCMD_CIPH_MODE_AES_CCM) | 1810 V_SCMD_AUTH_MODE(SCMD_AUTH_MODE_CBCMAC) | 1811 V_SCMD_HMAC_CTRL(hmac_ctrl) | 1812 V_SCMD_IV_SIZE(iv_len / 2) | 1813 V_SCMD_NUM_IVS(0)); 1814 crwr->sec_cpl.ivgen_hdrlen = htobe32( 1815 V_SCMD_IV_GEN_CTRL(0) | 1816 V_SCMD_MORE_FRAGS(0) | V_SCMD_LAST_FRAG(0) | V_SCMD_MAC_ONLY(0) | 1817 V_SCMD_AADIVDROP(0) | V_SCMD_HDR_LEN(dsgl_len)); 1818 1819 crwr->key_ctx.ctx_hdr = s->blkcipher.key_ctx_hdr; 1820 memcpy(crwr->key_ctx.key, s->blkcipher.enckey, s->blkcipher.key_len); 1821 memcpy(crwr->key_ctx.key + roundup(s->blkcipher.key_len, 16), 1822 s->blkcipher.enckey, s->blkcipher.key_len); 1823 1824 dst = (char *)(crwr + 1) + kctx_len; 1825 ccr_write_phys_dsgl(s, dst, dsgl_nsegs); 1826 dst += sizeof(struct cpl_rx_phys_dsgl) + dsgl_len; 1827 memcpy(dst, iv, iv_len); 1828 dst += iv_len; 1829 generate_ccm_b0(crp, hash_size_in_response, iv, dst); 1830 if (sgl_nsegs == 0) { 1831 dst += b0_len; 1832 if (crp->crp_aad_length != 0) { 1833 if (crp->crp_aad != NULL) 1834 memcpy(dst, crp->crp_aad, crp->crp_aad_length); 1835 else 1836 crypto_copydata(crp, crp->crp_aad_start, 1837 crp->crp_aad_length, dst); 1838 dst += crp->crp_aad_length; 1839 } 1840 crypto_copydata(crp, crp->crp_payload_start, 1841 crp->crp_payload_length, dst); 1842 dst += crp->crp_payload_length; 1843 if (op_type == CHCR_DECRYPT_OP) 1844 crypto_copydata(crp, crp->crp_digest_start, 1845 hash_size_in_response, dst); 1846 } else { 1847 dst += CCM_B0_SIZE; 1848 if (b0_len > CCM_B0_SIZE) { 1849 /* 1850 * If there is AAD, insert padding including a 1851 * ULP_TX_SC_NOOP so that the ULP_TX_SC_DSGL 1852 * is 16-byte aligned. 1853 */ 1854 KASSERT(b0_len - CCM_B0_SIZE == CCM_AAD_FIELD_SIZE, 1855 ("b0_len mismatch")); 1856 memset(dst + CCM_AAD_FIELD_SIZE, 0, 1857 8 - CCM_AAD_FIELD_SIZE); 1858 idata = (void *)(dst + 8); 1859 idata->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_NOOP)); 1860 idata->len = htobe32(0); 1861 dst = (void *)(idata + 1); 1862 } 1863 ccr_write_ulptx_sgl(s, dst, sgl_nsegs); 1864 } 1865 1866 /* XXX: TODO backpressure */ 1867 t4_wrq_tx(sc->adapter, wr); 1868 1869 explicit_bzero(iv, sizeof(iv)); 1870 return (0); 1871 } 1872 1873 static int 1874 ccr_ccm_done(struct ccr_softc *sc, struct ccr_session *s, 1875 struct cryptop *crp, const struct cpl_fw6_pld *cpl, int error) 1876 { 1877 1878 /* 1879 * The updated IV to permit chained requests is at 1880 * cpl->data[2], but OCF doesn't permit chained requests. 1881 * 1882 * Note that the hardware should always verify the CBC MAC 1883 * hash. 1884 */ 1885 return (error); 1886 } 1887 1888 /* 1889 * Handle a CCM request that is not supported by the crypto engine by 1890 * performing the operation in software. Derived from swcr_authenc(). 1891 */ 1892 static void 1893 ccr_ccm_soft(struct ccr_session *s, struct cryptop *crp) 1894 { 1895 struct auth_hash *axf; 1896 struct enc_xform *exf; 1897 union authctx *auth_ctx; 1898 void *kschedule; 1899 char block[CCM_CBC_BLOCK_LEN]; 1900 char digest[AES_CBC_MAC_HASH_LEN]; 1901 char iv[AES_CCM_IV_LEN]; 1902 int error, i, len; 1903 1904 auth_ctx = NULL; 1905 kschedule = NULL; 1906 1907 /* Initialize the MAC. */ 1908 switch (s->blkcipher.key_len) { 1909 case 16: 1910 axf = &auth_hash_ccm_cbc_mac_128; 1911 break; 1912 case 24: 1913 axf = &auth_hash_ccm_cbc_mac_192; 1914 break; 1915 case 32: 1916 axf = &auth_hash_ccm_cbc_mac_256; 1917 break; 1918 default: 1919 error = EINVAL; 1920 goto out; 1921 } 1922 auth_ctx = malloc(axf->ctxsize, M_CCR, M_NOWAIT); 1923 if (auth_ctx == NULL) { 1924 error = ENOMEM; 1925 goto out; 1926 } 1927 axf->Init(auth_ctx); 1928 axf->Setkey(auth_ctx, s->blkcipher.enckey, s->blkcipher.key_len); 1929 1930 /* Initialize the cipher. */ 1931 exf = &enc_xform_ccm; 1932 kschedule = malloc(exf->ctxsize, M_CCR, M_NOWAIT); 1933 if (kschedule == NULL) { 1934 error = ENOMEM; 1935 goto out; 1936 } 1937 error = exf->setkey(kschedule, s->blkcipher.enckey, 1938 s->blkcipher.key_len); 1939 if (error) 1940 goto out; 1941 1942 if ((crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0) { 1943 error = EINVAL; 1944 goto out; 1945 } 1946 memcpy(iv, crp->crp_iv, AES_CCM_IV_LEN); 1947 1948 auth_ctx->aes_cbc_mac_ctx.authDataLength = crp->crp_aad_length; 1949 auth_ctx->aes_cbc_mac_ctx.cryptDataLength = crp->crp_payload_length; 1950 axf->Reinit(auth_ctx, iv, sizeof(iv)); 1951 1952 /* MAC the AAD. */ 1953 if (crp->crp_aad != NULL) 1954 error = axf->Update(auth_ctx, crp->crp_aad, 1955 crp->crp_aad_length); 1956 else 1957 error = crypto_apply(crp, crp->crp_aad_start, 1958 crp->crp_aad_length, axf->Update, auth_ctx); 1959 if (error) 1960 goto out; 1961 1962 exf->reinit(kschedule, iv); 1963 1964 /* Do encryption/decryption with MAC */ 1965 for (i = 0; i < crp->crp_payload_length; i += sizeof(block)) { 1966 len = imin(crp->crp_payload_length - i, sizeof(block)); 1967 crypto_copydata(crp, crp->crp_payload_start + i, len, block); 1968 bzero(block + len, sizeof(block) - len); 1969 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) { 1970 axf->Update(auth_ctx, block, len); 1971 exf->encrypt(kschedule, block, block); 1972 crypto_copyback(crp, crp->crp_payload_start + i, len, 1973 block); 1974 } else { 1975 exf->decrypt(kschedule, block, block); 1976 axf->Update(auth_ctx, block, len); 1977 } 1978 } 1979 1980 /* Finalize MAC. */ 1981 axf->Final(digest, auth_ctx); 1982 1983 /* Inject or validate tag. */ 1984 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) { 1985 crypto_copyback(crp, crp->crp_digest_start, sizeof(digest), 1986 digest); 1987 error = 0; 1988 } else { 1989 char digest2[AES_CBC_MAC_HASH_LEN]; 1990 1991 crypto_copydata(crp, crp->crp_digest_start, sizeof(digest2), 1992 digest2); 1993 if (timingsafe_bcmp(digest, digest2, sizeof(digest)) == 0) { 1994 error = 0; 1995 1996 /* Tag matches, decrypt data. */ 1997 exf->reinit(kschedule, iv); 1998 for (i = 0; i < crp->crp_payload_length; 1999 i += sizeof(block)) { 2000 len = imin(crp->crp_payload_length - i, 2001 sizeof(block)); 2002 crypto_copydata(crp, crp->crp_payload_start + i, 2003 len, block); 2004 bzero(block + len, sizeof(block) - len); 2005 exf->decrypt(kschedule, block, block); 2006 crypto_copyback(crp, crp->crp_payload_start + i, 2007 len, block); 2008 } 2009 } else 2010 error = EBADMSG; 2011 explicit_bzero(digest2, sizeof(digest2)); 2012 } 2013 2014 out: 2015 zfree(kschedule, M_CCR); 2016 zfree(auth_ctx, M_CCR); 2017 explicit_bzero(block, sizeof(block)); 2018 explicit_bzero(iv, sizeof(iv)); 2019 explicit_bzero(digest, sizeof(digest)); 2020 crp->crp_etype = error; 2021 crypto_done(crp); 2022 } 2023 2024 static void 2025 ccr_identify(driver_t *driver, device_t parent) 2026 { 2027 struct adapter *sc; 2028 2029 sc = device_get_softc(parent); 2030 if (sc->cryptocaps & FW_CAPS_CONFIG_CRYPTO_LOOKASIDE && 2031 device_find_child(parent, "ccr", -1) == NULL) 2032 device_add_child(parent, "ccr", -1); 2033 } 2034 2035 static int 2036 ccr_probe(device_t dev) 2037 { 2038 2039 device_set_desc(dev, "Chelsio Crypto Accelerator"); 2040 return (BUS_PROBE_DEFAULT); 2041 } 2042 2043 static void 2044 ccr_sysctls(struct ccr_softc *sc) 2045 { 2046 struct sysctl_ctx_list *ctx; 2047 struct sysctl_oid *oid, *port_oid; 2048 struct sysctl_oid_list *children; 2049 char buf[16]; 2050 int i; 2051 2052 ctx = device_get_sysctl_ctx(sc->dev); 2053 2054 /* 2055 * dev.ccr.X. 2056 */ 2057 oid = device_get_sysctl_tree(sc->dev); 2058 children = SYSCTL_CHILDREN(oid); 2059 2060 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "port_mask", CTLFLAG_RW, 2061 &sc->port_mask, 0, "Mask of enabled ports"); 2062 2063 /* 2064 * dev.ccr.X.stats. 2065 */ 2066 oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats", 2067 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "statistics"); 2068 children = SYSCTL_CHILDREN(oid); 2069 2070 SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "hash", CTLFLAG_RD, 2071 &sc->stats_hash, "Hash requests submitted"); 2072 SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "hmac", CTLFLAG_RD, 2073 &sc->stats_hmac, "HMAC requests submitted"); 2074 SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "cipher_encrypt", 2075 CTLFLAG_RD, &sc->stats_blkcipher_encrypt, 2076 "Cipher encryption requests submitted"); 2077 SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "cipher_decrypt", 2078 CTLFLAG_RD, &sc->stats_blkcipher_decrypt, 2079 "Cipher decryption requests submitted"); 2080 SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "eta_encrypt", 2081 CTLFLAG_RD, &sc->stats_eta_encrypt, 2082 "Combined AES+HMAC encryption requests submitted"); 2083 SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "eta_decrypt", 2084 CTLFLAG_RD, &sc->stats_eta_decrypt, 2085 "Combined AES+HMAC decryption requests submitted"); 2086 SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "gcm_encrypt", 2087 CTLFLAG_RD, &sc->stats_gcm_encrypt, 2088 "AES-GCM encryption requests submitted"); 2089 SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "gcm_decrypt", 2090 CTLFLAG_RD, &sc->stats_gcm_decrypt, 2091 "AES-GCM decryption requests submitted"); 2092 SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "ccm_encrypt", 2093 CTLFLAG_RD, &sc->stats_ccm_encrypt, 2094 "AES-CCM encryption requests submitted"); 2095 SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "ccm_decrypt", 2096 CTLFLAG_RD, &sc->stats_ccm_decrypt, 2097 "AES-CCM decryption requests submitted"); 2098 SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "wr_nomem", CTLFLAG_RD, 2099 &sc->stats_wr_nomem, "Work request memory allocation failures"); 2100 SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "inflight", CTLFLAG_RD, 2101 &sc->stats_inflight, "Requests currently pending"); 2102 SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "mac_error", CTLFLAG_RD, 2103 &sc->stats_mac_error, "MAC errors"); 2104 SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "pad_error", CTLFLAG_RD, 2105 &sc->stats_pad_error, "Padding errors"); 2106 SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "sglist_error", 2107 CTLFLAG_RD, &sc->stats_sglist_error, 2108 "Requests for which DMA mapping failed"); 2109 SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "process_error", 2110 CTLFLAG_RD, &sc->stats_process_error, 2111 "Requests failed during queueing"); 2112 SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "sw_fallback", 2113 CTLFLAG_RD, &sc->stats_sw_fallback, 2114 "Requests processed by falling back to software"); 2115 2116 /* 2117 * dev.ccr.X.stats.port 2118 */ 2119 port_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "port", 2120 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Per-port statistics"); 2121 2122 for (i = 0; i < nitems(sc->ports); i++) { 2123 if (sc->ports[i].rxq == NULL) 2124 continue; 2125 2126 /* 2127 * dev.ccr.X.stats.port.Y 2128 */ 2129 snprintf(buf, sizeof(buf), "%d", i); 2130 oid = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(port_oid), OID_AUTO, 2131 buf, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, buf); 2132 children = SYSCTL_CHILDREN(oid); 2133 2134 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "active_sessions", 2135 CTLFLAG_RD, &sc->ports[i].active_sessions, 0, 2136 "Count of active sessions"); 2137 SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "queued", 2138 CTLFLAG_RD, &sc->ports[i].stats_queued, "Requests queued"); 2139 SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "completed", 2140 CTLFLAG_RD, &sc->ports[i].stats_completed, 2141 "Requests completed"); 2142 } 2143 } 2144 2145 static void 2146 ccr_init_port(struct ccr_softc *sc, int port) 2147 { 2148 struct port_info *pi; 2149 2150 pi = sc->adapter->port[port]; 2151 sc->ports[port].txq = &sc->adapter->sge.ctrlq[port]; 2152 sc->ports[port].rxq = &sc->adapter->sge.rxq[pi->vi->first_rxq]; 2153 sc->ports[port].rx_channel_id = pi->rx_c_chan; 2154 sc->ports[port].tx_channel_id = pi->tx_chan; 2155 sc->ports[port].stats_queued = counter_u64_alloc(M_WAITOK); 2156 sc->ports[port].stats_completed = counter_u64_alloc(M_WAITOK); 2157 _Static_assert(sizeof(sc->port_mask) * NBBY >= MAX_NPORTS - 1, 2158 "Too many ports to fit in port_mask"); 2159 2160 /* 2161 * Completions for crypto requests on port 1 can sometimes 2162 * return a stale cookie value due to a firmware bug. Disable 2163 * requests on port 1 by default on affected firmware. 2164 */ 2165 if (sc->adapter->params.fw_vers >= FW_VERSION32(1, 25, 4, 0) || 2166 port == 0) 2167 sc->port_mask |= 1u << port; 2168 } 2169 2170 static int 2171 ccr_attach(device_t dev) 2172 { 2173 struct ccr_softc *sc; 2174 int32_t cid; 2175 int i; 2176 2177 sc = device_get_softc(dev); 2178 sc->dev = dev; 2179 sc->adapter = device_get_softc(device_get_parent(dev)); 2180 for_each_port(sc->adapter, i) { 2181 ccr_init_port(sc, i); 2182 } 2183 cid = crypto_get_driverid(dev, sizeof(struct ccr_session), 2184 CRYPTOCAP_F_HARDWARE); 2185 if (cid < 0) { 2186 device_printf(dev, "could not get crypto driver id\n"); 2187 return (ENXIO); 2188 } 2189 sc->cid = cid; 2190 sc->adapter->ccr_softc = sc; 2191 2192 /* 2193 * The FID must be the first RXQ for port 0 regardless of 2194 * which port is used to service the request. 2195 */ 2196 sc->first_rxq_id = sc->adapter->sge.rxq[0].iq.abs_id; 2197 2198 mtx_init(&sc->lock, "ccr", NULL, MTX_DEF); 2199 sc->iv_aad_buf = malloc(MAX_AAD_LEN, M_CCR, M_WAITOK); 2200 sc->sg_iv_aad = sglist_build(sc->iv_aad_buf, MAX_AAD_LEN, M_WAITOK); 2201 sc->stats_blkcipher_encrypt = counter_u64_alloc(M_WAITOK); 2202 sc->stats_blkcipher_decrypt = counter_u64_alloc(M_WAITOK); 2203 sc->stats_hash = counter_u64_alloc(M_WAITOK); 2204 sc->stats_hmac = counter_u64_alloc(M_WAITOK); 2205 sc->stats_eta_encrypt = counter_u64_alloc(M_WAITOK); 2206 sc->stats_eta_decrypt = counter_u64_alloc(M_WAITOK); 2207 sc->stats_gcm_encrypt = counter_u64_alloc(M_WAITOK); 2208 sc->stats_gcm_decrypt = counter_u64_alloc(M_WAITOK); 2209 sc->stats_ccm_encrypt = counter_u64_alloc(M_WAITOK); 2210 sc->stats_ccm_decrypt = counter_u64_alloc(M_WAITOK); 2211 sc->stats_wr_nomem = counter_u64_alloc(M_WAITOK); 2212 sc->stats_inflight = counter_u64_alloc(M_WAITOK); 2213 sc->stats_mac_error = counter_u64_alloc(M_WAITOK); 2214 sc->stats_pad_error = counter_u64_alloc(M_WAITOK); 2215 sc->stats_sglist_error = counter_u64_alloc(M_WAITOK); 2216 sc->stats_process_error = counter_u64_alloc(M_WAITOK); 2217 sc->stats_sw_fallback = counter_u64_alloc(M_WAITOK); 2218 ccr_sysctls(sc); 2219 2220 return (0); 2221 } 2222 2223 static void 2224 ccr_free_port(struct ccr_softc *sc, int port) 2225 { 2226 2227 counter_u64_free(sc->ports[port].stats_queued); 2228 counter_u64_free(sc->ports[port].stats_completed); 2229 } 2230 2231 static int 2232 ccr_detach(device_t dev) 2233 { 2234 struct ccr_softc *sc; 2235 int i; 2236 2237 sc = device_get_softc(dev); 2238 2239 mtx_lock(&sc->lock); 2240 sc->detaching = true; 2241 mtx_unlock(&sc->lock); 2242 2243 crypto_unregister_all(sc->cid); 2244 2245 mtx_destroy(&sc->lock); 2246 counter_u64_free(sc->stats_blkcipher_encrypt); 2247 counter_u64_free(sc->stats_blkcipher_decrypt); 2248 counter_u64_free(sc->stats_hash); 2249 counter_u64_free(sc->stats_hmac); 2250 counter_u64_free(sc->stats_eta_encrypt); 2251 counter_u64_free(sc->stats_eta_decrypt); 2252 counter_u64_free(sc->stats_gcm_encrypt); 2253 counter_u64_free(sc->stats_gcm_decrypt); 2254 counter_u64_free(sc->stats_ccm_encrypt); 2255 counter_u64_free(sc->stats_ccm_decrypt); 2256 counter_u64_free(sc->stats_wr_nomem); 2257 counter_u64_free(sc->stats_inflight); 2258 counter_u64_free(sc->stats_mac_error); 2259 counter_u64_free(sc->stats_pad_error); 2260 counter_u64_free(sc->stats_sglist_error); 2261 counter_u64_free(sc->stats_process_error); 2262 counter_u64_free(sc->stats_sw_fallback); 2263 for_each_port(sc->adapter, i) { 2264 ccr_free_port(sc, i); 2265 } 2266 sglist_free(sc->sg_iv_aad); 2267 free(sc->iv_aad_buf, M_CCR); 2268 sc->adapter->ccr_softc = NULL; 2269 return (0); 2270 } 2271 2272 static void 2273 ccr_init_hash_digest(struct ccr_session *s) 2274 { 2275 union authctx auth_ctx; 2276 struct auth_hash *axf; 2277 2278 axf = s->hmac.auth_hash; 2279 axf->Init(&auth_ctx); 2280 t4_copy_partial_hash(axf->type, &auth_ctx, s->hmac.pads); 2281 } 2282 2283 static bool 2284 ccr_aes_check_keylen(int alg, int klen) 2285 { 2286 2287 switch (klen * 8) { 2288 case 128: 2289 case 192: 2290 if (alg == CRYPTO_AES_XTS) 2291 return (false); 2292 break; 2293 case 256: 2294 break; 2295 case 512: 2296 if (alg != CRYPTO_AES_XTS) 2297 return (false); 2298 break; 2299 default: 2300 return (false); 2301 } 2302 return (true); 2303 } 2304 2305 static void 2306 ccr_aes_setkey(struct ccr_session *s, const void *key, int klen) 2307 { 2308 unsigned int ck_size, iopad_size, kctx_flits, kctx_len, kbits, mk_size; 2309 unsigned int opad_present; 2310 2311 if (s->blkcipher.cipher_mode == SCMD_CIPH_MODE_AES_XTS) 2312 kbits = (klen / 2) * 8; 2313 else 2314 kbits = klen * 8; 2315 switch (kbits) { 2316 case 128: 2317 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128; 2318 break; 2319 case 192: 2320 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192; 2321 break; 2322 case 256: 2323 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256; 2324 break; 2325 default: 2326 panic("should not get here"); 2327 } 2328 2329 s->blkcipher.key_len = klen; 2330 memcpy(s->blkcipher.enckey, key, s->blkcipher.key_len); 2331 switch (s->blkcipher.cipher_mode) { 2332 case SCMD_CIPH_MODE_AES_CBC: 2333 case SCMD_CIPH_MODE_AES_XTS: 2334 t4_aes_getdeckey(s->blkcipher.deckey, key, kbits); 2335 break; 2336 } 2337 2338 kctx_len = roundup2(s->blkcipher.key_len, 16); 2339 switch (s->mode) { 2340 case ETA: 2341 mk_size = s->hmac.mk_size; 2342 opad_present = 1; 2343 iopad_size = roundup2(s->hmac.partial_digest_len, 16); 2344 kctx_len += iopad_size * 2; 2345 break; 2346 case GCM: 2347 mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_128; 2348 opad_present = 0; 2349 kctx_len += GMAC_BLOCK_LEN; 2350 break; 2351 case CCM: 2352 switch (kbits) { 2353 case 128: 2354 mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_128; 2355 break; 2356 case 192: 2357 mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_192; 2358 break; 2359 case 256: 2360 mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256; 2361 break; 2362 default: 2363 panic("should not get here"); 2364 } 2365 opad_present = 0; 2366 kctx_len *= 2; 2367 break; 2368 default: 2369 mk_size = CHCR_KEYCTX_NO_KEY; 2370 opad_present = 0; 2371 break; 2372 } 2373 kctx_flits = (sizeof(struct _key_ctx) + kctx_len) / 16; 2374 s->blkcipher.key_ctx_hdr = htobe32(V_KEY_CONTEXT_CTX_LEN(kctx_flits) | 2375 V_KEY_CONTEXT_DUAL_CK(s->blkcipher.cipher_mode == 2376 SCMD_CIPH_MODE_AES_XTS) | 2377 V_KEY_CONTEXT_OPAD_PRESENT(opad_present) | 2378 V_KEY_CONTEXT_SALT_PRESENT(1) | V_KEY_CONTEXT_CK_SIZE(ck_size) | 2379 V_KEY_CONTEXT_MK_SIZE(mk_size) | V_KEY_CONTEXT_VALID(1)); 2380 } 2381 2382 static bool 2383 ccr_auth_supported(const struct crypto_session_params *csp) 2384 { 2385 2386 switch (csp->csp_auth_alg) { 2387 case CRYPTO_SHA1: 2388 case CRYPTO_SHA2_224: 2389 case CRYPTO_SHA2_256: 2390 case CRYPTO_SHA2_384: 2391 case CRYPTO_SHA2_512: 2392 case CRYPTO_SHA1_HMAC: 2393 case CRYPTO_SHA2_224_HMAC: 2394 case CRYPTO_SHA2_256_HMAC: 2395 case CRYPTO_SHA2_384_HMAC: 2396 case CRYPTO_SHA2_512_HMAC: 2397 break; 2398 default: 2399 return (false); 2400 } 2401 return (true); 2402 } 2403 2404 static bool 2405 ccr_cipher_supported(const struct crypto_session_params *csp) 2406 { 2407 2408 switch (csp->csp_cipher_alg) { 2409 case CRYPTO_AES_CBC: 2410 if (csp->csp_ivlen != AES_BLOCK_LEN) 2411 return (false); 2412 break; 2413 case CRYPTO_AES_ICM: 2414 if (csp->csp_ivlen != AES_BLOCK_LEN) 2415 return (false); 2416 break; 2417 case CRYPTO_AES_XTS: 2418 if (csp->csp_ivlen != AES_XTS_IV_LEN) 2419 return (false); 2420 break; 2421 default: 2422 return (false); 2423 } 2424 return (ccr_aes_check_keylen(csp->csp_cipher_alg, 2425 csp->csp_cipher_klen)); 2426 } 2427 2428 static int 2429 ccr_cipher_mode(const struct crypto_session_params *csp) 2430 { 2431 2432 switch (csp->csp_cipher_alg) { 2433 case CRYPTO_AES_CBC: 2434 return (SCMD_CIPH_MODE_AES_CBC); 2435 case CRYPTO_AES_ICM: 2436 return (SCMD_CIPH_MODE_AES_CTR); 2437 case CRYPTO_AES_NIST_GCM_16: 2438 return (SCMD_CIPH_MODE_AES_GCM); 2439 case CRYPTO_AES_XTS: 2440 return (SCMD_CIPH_MODE_AES_XTS); 2441 case CRYPTO_AES_CCM_16: 2442 return (SCMD_CIPH_MODE_AES_CCM); 2443 default: 2444 return (SCMD_CIPH_MODE_NOP); 2445 } 2446 } 2447 2448 static int 2449 ccr_probesession(device_t dev, const struct crypto_session_params *csp) 2450 { 2451 unsigned int cipher_mode; 2452 2453 if ((csp->csp_flags & ~(CSP_F_SEPARATE_OUTPUT | CSP_F_SEPARATE_AAD)) != 2454 0) 2455 return (EINVAL); 2456 switch (csp->csp_mode) { 2457 case CSP_MODE_DIGEST: 2458 if (!ccr_auth_supported(csp)) 2459 return (EINVAL); 2460 break; 2461 case CSP_MODE_CIPHER: 2462 if (!ccr_cipher_supported(csp)) 2463 return (EINVAL); 2464 break; 2465 case CSP_MODE_AEAD: 2466 switch (csp->csp_cipher_alg) { 2467 case CRYPTO_AES_NIST_GCM_16: 2468 if (csp->csp_ivlen != AES_GCM_IV_LEN) 2469 return (EINVAL); 2470 if (csp->csp_auth_mlen < 0 || 2471 csp->csp_auth_mlen > AES_GMAC_HASH_LEN) 2472 return (EINVAL); 2473 break; 2474 case CRYPTO_AES_CCM_16: 2475 if (csp->csp_ivlen != AES_CCM_IV_LEN) 2476 return (EINVAL); 2477 if (csp->csp_auth_mlen < 0 || 2478 csp->csp_auth_mlen > AES_CBC_MAC_HASH_LEN) 2479 return (EINVAL); 2480 break; 2481 default: 2482 return (EINVAL); 2483 } 2484 break; 2485 case CSP_MODE_ETA: 2486 if (!ccr_auth_supported(csp) || !ccr_cipher_supported(csp)) 2487 return (EINVAL); 2488 break; 2489 default: 2490 return (EINVAL); 2491 } 2492 2493 if (csp->csp_cipher_klen != 0) { 2494 cipher_mode = ccr_cipher_mode(csp); 2495 if (cipher_mode == SCMD_CIPH_MODE_NOP) 2496 return (EINVAL); 2497 } 2498 2499 return (CRYPTODEV_PROBE_HARDWARE); 2500 } 2501 2502 /* 2503 * Select an available port with the lowest number of active sessions. 2504 */ 2505 static struct ccr_port * 2506 ccr_choose_port(struct ccr_softc *sc) 2507 { 2508 struct ccr_port *best, *p; 2509 int i; 2510 2511 mtx_assert(&sc->lock, MA_OWNED); 2512 best = NULL; 2513 for (i = 0; i < nitems(sc->ports); i++) { 2514 p = &sc->ports[i]; 2515 2516 /* Ignore non-existent ports. */ 2517 if (p->rxq == NULL) 2518 continue; 2519 2520 /* 2521 * XXX: Ignore ports whose queues aren't initialized. 2522 * This is racy as the rxq can be destroyed by the 2523 * associated VI detaching. Eventually ccr should use 2524 * dedicated queues. 2525 */ 2526 if (p->rxq->iq.adapter == NULL || p->txq->adapter == NULL) 2527 continue; 2528 2529 if ((sc->port_mask & (1u << i)) == 0) 2530 continue; 2531 2532 if (best == NULL || 2533 p->active_sessions < best->active_sessions) 2534 best = p; 2535 } 2536 return (best); 2537 } 2538 2539 static void 2540 ccr_delete_session(struct ccr_session *s) 2541 { 2542 sglist_free(s->sg_input); 2543 sglist_free(s->sg_output); 2544 sglist_free(s->sg_ulptx); 2545 sglist_free(s->sg_dsgl); 2546 mtx_destroy(&s->lock); 2547 } 2548 2549 static int 2550 ccr_newsession(device_t dev, crypto_session_t cses, 2551 const struct crypto_session_params *csp) 2552 { 2553 struct ccr_softc *sc; 2554 struct ccr_session *s; 2555 struct auth_hash *auth_hash; 2556 unsigned int auth_mode, cipher_mode, mk_size; 2557 unsigned int partial_digest_len; 2558 2559 switch (csp->csp_auth_alg) { 2560 case CRYPTO_SHA1: 2561 case CRYPTO_SHA1_HMAC: 2562 auth_hash = &auth_hash_hmac_sha1; 2563 auth_mode = SCMD_AUTH_MODE_SHA1; 2564 mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_160; 2565 partial_digest_len = SHA1_HASH_LEN; 2566 break; 2567 case CRYPTO_SHA2_224: 2568 case CRYPTO_SHA2_224_HMAC: 2569 auth_hash = &auth_hash_hmac_sha2_224; 2570 auth_mode = SCMD_AUTH_MODE_SHA224; 2571 mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256; 2572 partial_digest_len = SHA2_256_HASH_LEN; 2573 break; 2574 case CRYPTO_SHA2_256: 2575 case CRYPTO_SHA2_256_HMAC: 2576 auth_hash = &auth_hash_hmac_sha2_256; 2577 auth_mode = SCMD_AUTH_MODE_SHA256; 2578 mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256; 2579 partial_digest_len = SHA2_256_HASH_LEN; 2580 break; 2581 case CRYPTO_SHA2_384: 2582 case CRYPTO_SHA2_384_HMAC: 2583 auth_hash = &auth_hash_hmac_sha2_384; 2584 auth_mode = SCMD_AUTH_MODE_SHA512_384; 2585 mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512; 2586 partial_digest_len = SHA2_512_HASH_LEN; 2587 break; 2588 case CRYPTO_SHA2_512: 2589 case CRYPTO_SHA2_512_HMAC: 2590 auth_hash = &auth_hash_hmac_sha2_512; 2591 auth_mode = SCMD_AUTH_MODE_SHA512_512; 2592 mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512; 2593 partial_digest_len = SHA2_512_HASH_LEN; 2594 break; 2595 default: 2596 auth_hash = NULL; 2597 auth_mode = SCMD_AUTH_MODE_NOP; 2598 mk_size = 0; 2599 partial_digest_len = 0; 2600 break; 2601 } 2602 2603 cipher_mode = ccr_cipher_mode(csp); 2604 2605 #ifdef INVARIANTS 2606 switch (csp->csp_mode) { 2607 case CSP_MODE_CIPHER: 2608 if (cipher_mode == SCMD_CIPH_MODE_NOP || 2609 cipher_mode == SCMD_CIPH_MODE_AES_GCM || 2610 cipher_mode == SCMD_CIPH_MODE_AES_CCM) 2611 panic("invalid cipher algo"); 2612 break; 2613 case CSP_MODE_DIGEST: 2614 if (auth_mode == SCMD_AUTH_MODE_NOP) 2615 panic("invalid auth algo"); 2616 break; 2617 case CSP_MODE_AEAD: 2618 if (cipher_mode != SCMD_CIPH_MODE_AES_GCM && 2619 cipher_mode != SCMD_CIPH_MODE_AES_CCM) 2620 panic("invalid aead cipher algo"); 2621 if (auth_mode != SCMD_AUTH_MODE_NOP) 2622 panic("invalid aead auth aglo"); 2623 break; 2624 case CSP_MODE_ETA: 2625 if (cipher_mode == SCMD_CIPH_MODE_NOP || 2626 cipher_mode == SCMD_CIPH_MODE_AES_GCM || 2627 cipher_mode == SCMD_CIPH_MODE_AES_CCM) 2628 panic("invalid cipher algo"); 2629 if (auth_mode == SCMD_AUTH_MODE_NOP) 2630 panic("invalid auth algo"); 2631 break; 2632 default: 2633 panic("invalid csp mode"); 2634 } 2635 #endif 2636 2637 s = crypto_get_driver_session(cses); 2638 mtx_init(&s->lock, "ccr session", NULL, MTX_DEF); 2639 s->sg_input = sglist_alloc(TX_SGL_SEGS, M_NOWAIT); 2640 s->sg_output = sglist_alloc(TX_SGL_SEGS, M_NOWAIT); 2641 s->sg_ulptx = sglist_alloc(TX_SGL_SEGS, M_NOWAIT); 2642 s->sg_dsgl = sglist_alloc(MAX_RX_PHYS_DSGL_SGE, M_NOWAIT); 2643 if (s->sg_input == NULL || s->sg_output == NULL || 2644 s->sg_ulptx == NULL || s->sg_dsgl == NULL) { 2645 ccr_delete_session(s); 2646 return (ENOMEM); 2647 } 2648 2649 sc = device_get_softc(dev); 2650 2651 mtx_lock(&sc->lock); 2652 if (sc->detaching) { 2653 mtx_unlock(&sc->lock); 2654 ccr_delete_session(s); 2655 return (ENXIO); 2656 } 2657 2658 s->port = ccr_choose_port(sc); 2659 if (s->port == NULL) { 2660 mtx_unlock(&sc->lock); 2661 ccr_delete_session(s); 2662 return (ENXIO); 2663 } 2664 2665 switch (csp->csp_mode) { 2666 case CSP_MODE_AEAD: 2667 if (cipher_mode == SCMD_CIPH_MODE_AES_CCM) 2668 s->mode = CCM; 2669 else 2670 s->mode = GCM; 2671 break; 2672 case CSP_MODE_ETA: 2673 s->mode = ETA; 2674 break; 2675 case CSP_MODE_DIGEST: 2676 if (csp->csp_auth_klen != 0) 2677 s->mode = HMAC; 2678 else 2679 s->mode = HASH; 2680 break; 2681 case CSP_MODE_CIPHER: 2682 s->mode = BLKCIPHER; 2683 break; 2684 } 2685 2686 if (s->mode == GCM) { 2687 if (csp->csp_auth_mlen == 0) 2688 s->gmac.hash_len = AES_GMAC_HASH_LEN; 2689 else 2690 s->gmac.hash_len = csp->csp_auth_mlen; 2691 t4_init_gmac_hash(csp->csp_cipher_key, csp->csp_cipher_klen, 2692 s->gmac.ghash_h); 2693 } else if (s->mode == CCM) { 2694 if (csp->csp_auth_mlen == 0) 2695 s->ccm_mac.hash_len = AES_CBC_MAC_HASH_LEN; 2696 else 2697 s->ccm_mac.hash_len = csp->csp_auth_mlen; 2698 } else if (auth_mode != SCMD_AUTH_MODE_NOP) { 2699 s->hmac.auth_hash = auth_hash; 2700 s->hmac.auth_mode = auth_mode; 2701 s->hmac.mk_size = mk_size; 2702 s->hmac.partial_digest_len = partial_digest_len; 2703 if (csp->csp_auth_mlen == 0) 2704 s->hmac.hash_len = auth_hash->hashsize; 2705 else 2706 s->hmac.hash_len = csp->csp_auth_mlen; 2707 if (csp->csp_auth_key != NULL) 2708 t4_init_hmac_digest(auth_hash, partial_digest_len, 2709 csp->csp_auth_key, csp->csp_auth_klen, 2710 s->hmac.pads); 2711 else 2712 ccr_init_hash_digest(s); 2713 } 2714 if (cipher_mode != SCMD_CIPH_MODE_NOP) { 2715 s->blkcipher.cipher_mode = cipher_mode; 2716 s->blkcipher.iv_len = csp->csp_ivlen; 2717 if (csp->csp_cipher_key != NULL) 2718 ccr_aes_setkey(s, csp->csp_cipher_key, 2719 csp->csp_cipher_klen); 2720 } 2721 2722 s->port->active_sessions++; 2723 mtx_unlock(&sc->lock); 2724 return (0); 2725 } 2726 2727 static void 2728 ccr_freesession(device_t dev, crypto_session_t cses) 2729 { 2730 struct ccr_softc *sc; 2731 struct ccr_session *s; 2732 2733 sc = device_get_softc(dev); 2734 s = crypto_get_driver_session(cses); 2735 #ifdef INVARIANTS 2736 if (s->pending != 0) 2737 device_printf(dev, 2738 "session %p freed with %d pending requests\n", s, 2739 s->pending); 2740 #endif 2741 mtx_lock(&sc->lock); 2742 s->port->active_sessions--; 2743 mtx_unlock(&sc->lock); 2744 ccr_delete_session(s); 2745 } 2746 2747 static int 2748 ccr_process(device_t dev, struct cryptop *crp, int hint) 2749 { 2750 const struct crypto_session_params *csp; 2751 struct ccr_softc *sc; 2752 struct ccr_session *s; 2753 int error; 2754 2755 csp = crypto_get_params(crp->crp_session); 2756 s = crypto_get_driver_session(crp->crp_session); 2757 sc = device_get_softc(dev); 2758 2759 mtx_lock(&s->lock); 2760 error = ccr_populate_sglist(s->sg_input, &crp->crp_buf); 2761 if (error == 0 && CRYPTO_HAS_OUTPUT_BUFFER(crp)) 2762 error = ccr_populate_sglist(s->sg_output, &crp->crp_obuf); 2763 if (error) { 2764 counter_u64_add(sc->stats_sglist_error, 1); 2765 goto out; 2766 } 2767 2768 switch (s->mode) { 2769 case HASH: 2770 error = ccr_hash(sc, s, crp); 2771 if (error == 0) 2772 counter_u64_add(sc->stats_hash, 1); 2773 break; 2774 case HMAC: 2775 if (crp->crp_auth_key != NULL) 2776 t4_init_hmac_digest(s->hmac.auth_hash, 2777 s->hmac.partial_digest_len, crp->crp_auth_key, 2778 csp->csp_auth_klen, s->hmac.pads); 2779 error = ccr_hash(sc, s, crp); 2780 if (error == 0) 2781 counter_u64_add(sc->stats_hmac, 1); 2782 break; 2783 case BLKCIPHER: 2784 if (crp->crp_cipher_key != NULL) 2785 ccr_aes_setkey(s, crp->crp_cipher_key, 2786 csp->csp_cipher_klen); 2787 error = ccr_blkcipher(sc, s, crp); 2788 if (error == 0) { 2789 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) 2790 counter_u64_add(sc->stats_blkcipher_encrypt, 1); 2791 else 2792 counter_u64_add(sc->stats_blkcipher_decrypt, 1); 2793 } 2794 break; 2795 case ETA: 2796 if (crp->crp_auth_key != NULL) 2797 t4_init_hmac_digest(s->hmac.auth_hash, 2798 s->hmac.partial_digest_len, crp->crp_auth_key, 2799 csp->csp_auth_klen, s->hmac.pads); 2800 if (crp->crp_cipher_key != NULL) 2801 ccr_aes_setkey(s, crp->crp_cipher_key, 2802 csp->csp_cipher_klen); 2803 error = ccr_eta(sc, s, crp); 2804 if (error == 0) { 2805 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) 2806 counter_u64_add(sc->stats_eta_encrypt, 1); 2807 else 2808 counter_u64_add(sc->stats_eta_decrypt, 1); 2809 } 2810 break; 2811 case GCM: 2812 if (crp->crp_cipher_key != NULL) { 2813 t4_init_gmac_hash(crp->crp_cipher_key, 2814 csp->csp_cipher_klen, s->gmac.ghash_h); 2815 ccr_aes_setkey(s, crp->crp_cipher_key, 2816 csp->csp_cipher_klen); 2817 } 2818 if (crp->crp_payload_length == 0) { 2819 mtx_unlock(&s->lock); 2820 ccr_gcm_soft(s, crp); 2821 return (0); 2822 } 2823 error = ccr_gcm(sc, s, crp); 2824 if (error == EMSGSIZE || error == EFBIG) { 2825 counter_u64_add(sc->stats_sw_fallback, 1); 2826 mtx_unlock(&s->lock); 2827 ccr_gcm_soft(s, crp); 2828 return (0); 2829 } 2830 if (error == 0) { 2831 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) 2832 counter_u64_add(sc->stats_gcm_encrypt, 1); 2833 else 2834 counter_u64_add(sc->stats_gcm_decrypt, 1); 2835 } 2836 break; 2837 case CCM: 2838 if (crp->crp_cipher_key != NULL) { 2839 ccr_aes_setkey(s, crp->crp_cipher_key, 2840 csp->csp_cipher_klen); 2841 } 2842 error = ccr_ccm(sc, s, crp); 2843 if (error == EMSGSIZE || error == EFBIG) { 2844 counter_u64_add(sc->stats_sw_fallback, 1); 2845 mtx_unlock(&s->lock); 2846 ccr_ccm_soft(s, crp); 2847 return (0); 2848 } 2849 if (error == 0) { 2850 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) 2851 counter_u64_add(sc->stats_ccm_encrypt, 1); 2852 else 2853 counter_u64_add(sc->stats_ccm_decrypt, 1); 2854 } 2855 break; 2856 } 2857 2858 if (error == 0) { 2859 #ifdef INVARIANTS 2860 s->pending++; 2861 #endif 2862 counter_u64_add(sc->stats_inflight, 1); 2863 counter_u64_add(s->port->stats_queued, 1); 2864 } else 2865 counter_u64_add(sc->stats_process_error, 1); 2866 2867 out: 2868 mtx_unlock(&s->lock); 2869 2870 if (error) { 2871 crp->crp_etype = error; 2872 crypto_done(crp); 2873 } 2874 2875 return (0); 2876 } 2877 2878 static int 2879 do_cpl6_fw_pld(struct sge_iq *iq, const struct rss_header *rss, 2880 struct mbuf *m) 2881 { 2882 struct ccr_softc *sc = iq->adapter->ccr_softc; 2883 struct ccr_session *s; 2884 const struct cpl_fw6_pld *cpl; 2885 struct cryptop *crp; 2886 uint32_t status; 2887 int error; 2888 2889 if (m != NULL) 2890 cpl = mtod(m, const void *); 2891 else 2892 cpl = (const void *)(rss + 1); 2893 2894 crp = (struct cryptop *)(uintptr_t)be64toh(cpl->data[1]); 2895 s = crypto_get_driver_session(crp->crp_session); 2896 status = be64toh(cpl->data[0]); 2897 if (CHK_MAC_ERR_BIT(status) || CHK_PAD_ERR_BIT(status)) 2898 error = EBADMSG; 2899 else 2900 error = 0; 2901 2902 #ifdef INVARIANTS 2903 mtx_lock(&s->lock); 2904 s->pending--; 2905 mtx_unlock(&s->lock); 2906 #endif 2907 counter_u64_add(sc->stats_inflight, -1); 2908 counter_u64_add(s->port->stats_completed, 1); 2909 2910 switch (s->mode) { 2911 case HASH: 2912 case HMAC: 2913 error = ccr_hash_done(sc, s, crp, cpl, error); 2914 break; 2915 case BLKCIPHER: 2916 error = ccr_blkcipher_done(sc, s, crp, cpl, error); 2917 break; 2918 case ETA: 2919 error = ccr_eta_done(sc, s, crp, cpl, error); 2920 break; 2921 case GCM: 2922 error = ccr_gcm_done(sc, s, crp, cpl, error); 2923 break; 2924 case CCM: 2925 error = ccr_ccm_done(sc, s, crp, cpl, error); 2926 break; 2927 } 2928 2929 if (error == EBADMSG) { 2930 if (CHK_MAC_ERR_BIT(status)) 2931 counter_u64_add(sc->stats_mac_error, 1); 2932 if (CHK_PAD_ERR_BIT(status)) 2933 counter_u64_add(sc->stats_pad_error, 1); 2934 } 2935 crp->crp_etype = error; 2936 crypto_done(crp); 2937 m_freem(m); 2938 return (0); 2939 } 2940 2941 static int 2942 ccr_modevent(module_t mod, int cmd, void *arg) 2943 { 2944 2945 switch (cmd) { 2946 case MOD_LOAD: 2947 t4_register_cpl_handler(CPL_FW6_PLD, do_cpl6_fw_pld); 2948 return (0); 2949 case MOD_UNLOAD: 2950 t4_register_cpl_handler(CPL_FW6_PLD, NULL); 2951 return (0); 2952 default: 2953 return (EOPNOTSUPP); 2954 } 2955 } 2956 2957 static device_method_t ccr_methods[] = { 2958 DEVMETHOD(device_identify, ccr_identify), 2959 DEVMETHOD(device_probe, ccr_probe), 2960 DEVMETHOD(device_attach, ccr_attach), 2961 DEVMETHOD(device_detach, ccr_detach), 2962 2963 DEVMETHOD(cryptodev_probesession, ccr_probesession), 2964 DEVMETHOD(cryptodev_newsession, ccr_newsession), 2965 DEVMETHOD(cryptodev_freesession, ccr_freesession), 2966 DEVMETHOD(cryptodev_process, ccr_process), 2967 2968 DEVMETHOD_END 2969 }; 2970 2971 static driver_t ccr_driver = { 2972 "ccr", 2973 ccr_methods, 2974 sizeof(struct ccr_softc) 2975 }; 2976 2977 static devclass_t ccr_devclass; 2978 2979 DRIVER_MODULE(ccr, t6nex, ccr_driver, ccr_devclass, ccr_modevent, NULL); 2980 MODULE_VERSION(ccr, 1); 2981 MODULE_DEPEND(ccr, crypto, 1, 1, 1); 2982 MODULE_DEPEND(ccr, t6nex, 1, 1, 1); 2983