1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (C) 2009-2011 Semihalf. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29 /* 30 * CESA SRAM Memory Map: 31 * 32 * +------------------------+ <= sc->sc_sram_base_va + CESA_SRAM_SIZE 33 * | | 34 * | DATA | 35 * | | 36 * +------------------------+ <= sc->sc_sram_base_va + CESA_DATA(0) 37 * | struct cesa_sa_data | 38 * +------------------------+ 39 * | struct cesa_sa_hdesc | 40 * +------------------------+ <= sc->sc_sram_base_va 41 */ 42 43 #include <sys/cdefs.h> 44 __FBSDID("$FreeBSD$"); 45 46 #include <sys/param.h> 47 #include <sys/systm.h> 48 #include <sys/bus.h> 49 #include <sys/endian.h> 50 #include <sys/kernel.h> 51 #include <sys/lock.h> 52 #include <sys/mbuf.h> 53 #include <sys/module.h> 54 #include <sys/mutex.h> 55 #include <sys/rman.h> 56 57 #include <machine/bus.h> 58 #include <machine/intr.h> 59 #include <machine/resource.h> 60 #include <machine/fdt.h> 61 62 #include <dev/fdt/simplebus.h> 63 #include <dev/fdt/fdt_common.h> 64 #include <dev/ofw/ofw_bus.h> 65 #include <dev/ofw/ofw_bus_subr.h> 66 67 #include <sys/md5.h> 68 #include <crypto/sha1.h> 69 #include <crypto/sha2/sha256.h> 70 #include <crypto/rijndael/rijndael.h> 71 #include <opencrypto/cryptodev.h> 72 #include <opencrypto/xform.h> 73 #include "cryptodev_if.h" 74 75 #include <arm/mv/mvreg.h> 76 #include <arm/mv/mvvar.h> 77 #include "cesa.h" 78 79 static int cesa_probe(device_t); 80 static int cesa_attach(device_t); 81 static int cesa_attach_late(device_t); 82 static int cesa_detach(device_t); 83 static void cesa_intr(void *); 84 static int cesa_probesession(device_t, 85 const struct crypto_session_params *); 86 static int cesa_newsession(device_t, crypto_session_t, 87 const struct crypto_session_params *); 88 static int cesa_process(device_t, struct cryptop *, int); 89 90 static struct resource_spec cesa_res_spec[] = { 91 { SYS_RES_MEMORY, 0, RF_ACTIVE }, 92 { SYS_RES_MEMORY, 1, RF_ACTIVE }, 93 { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE }, 94 { -1, 0 } 95 }; 96 97 static device_method_t cesa_methods[] = { 98 /* Device interface */ 99 DEVMETHOD(device_probe, cesa_probe), 100 DEVMETHOD(device_attach, cesa_attach), 101 DEVMETHOD(device_detach, cesa_detach), 102 103 /* Crypto device methods */ 104 DEVMETHOD(cryptodev_probesession, cesa_probesession), 105 DEVMETHOD(cryptodev_newsession, cesa_newsession), 106 DEVMETHOD(cryptodev_process, cesa_process), 107 108 DEVMETHOD_END 109 }; 110 111 static driver_t cesa_driver = { 112 "cesa", 113 cesa_methods, 114 sizeof (struct cesa_softc) 115 }; 116 static devclass_t cesa_devclass; 117 118 DRIVER_MODULE(cesa, simplebus, cesa_driver, cesa_devclass, 0, 0); 119 MODULE_DEPEND(cesa, crypto, 1, 1, 1); 120 121 static void 122 cesa_dump_cshd(struct cesa_softc *sc, struct cesa_sa_hdesc *cshd) 123 { 124 #ifdef DEBUG 125 device_t dev; 126 127 dev = sc->sc_dev; 128 device_printf(dev, "CESA SA Hardware Descriptor:\n"); 129 device_printf(dev, "\t\tconfig: 0x%08X\n", cshd->cshd_config); 130 device_printf(dev, "\t\te_src: 0x%08X\n", cshd->cshd_enc_src); 131 device_printf(dev, "\t\te_dst: 0x%08X\n", cshd->cshd_enc_dst); 132 device_printf(dev, "\t\te_dlen: 0x%08X\n", cshd->cshd_enc_dlen); 133 device_printf(dev, "\t\te_key: 0x%08X\n", cshd->cshd_enc_key); 134 device_printf(dev, "\t\te_iv_1: 0x%08X\n", cshd->cshd_enc_iv); 135 device_printf(dev, "\t\te_iv_2: 0x%08X\n", cshd->cshd_enc_iv_buf); 136 device_printf(dev, "\t\tm_src: 0x%08X\n", cshd->cshd_mac_src); 137 device_printf(dev, "\t\tm_dst: 0x%08X\n", cshd->cshd_mac_dst); 138 device_printf(dev, "\t\tm_dlen: 0x%08X\n", cshd->cshd_mac_dlen); 139 device_printf(dev, "\t\tm_tlen: 0x%08X\n", cshd->cshd_mac_total_dlen); 140 device_printf(dev, "\t\tm_iv_i: 0x%08X\n", cshd->cshd_mac_iv_in); 141 device_printf(dev, "\t\tm_iv_o: 0x%08X\n", cshd->cshd_mac_iv_out); 142 #endif 143 } 144 145 static void 146 cesa_alloc_dma_mem_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) 147 { 148 struct cesa_dma_mem *cdm; 149 150 if (error) 151 return; 152 153 KASSERT(nseg == 1, ("Got wrong number of DMA segments, should be 1.")); 154 cdm = arg; 155 cdm->cdm_paddr = segs->ds_addr; 156 } 157 158 static int 159 cesa_alloc_dma_mem(struct cesa_softc *sc, struct cesa_dma_mem *cdm, 160 bus_size_t size) 161 { 162 int error; 163 164 KASSERT(cdm->cdm_vaddr == NULL, 165 ("%s(): DMA memory descriptor in use.", __func__)); 166 167 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), /* parent */ 168 PAGE_SIZE, 0, /* alignment, boundary */ 169 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 170 BUS_SPACE_MAXADDR, /* highaddr */ 171 NULL, NULL, /* filtfunc, filtfuncarg */ 172 size, 1, /* maxsize, nsegments */ 173 size, 0, /* maxsegsz, flags */ 174 NULL, NULL, /* lockfunc, lockfuncarg */ 175 &cdm->cdm_tag); /* dmat */ 176 if (error) { 177 device_printf(sc->sc_dev, "failed to allocate busdma tag, error" 178 " %i!\n", error); 179 180 goto err1; 181 } 182 183 error = bus_dmamem_alloc(cdm->cdm_tag, &cdm->cdm_vaddr, 184 BUS_DMA_NOWAIT | BUS_DMA_ZERO, &cdm->cdm_map); 185 if (error) { 186 device_printf(sc->sc_dev, "failed to allocate DMA safe" 187 " memory, error %i!\n", error); 188 189 goto err2; 190 } 191 192 error = bus_dmamap_load(cdm->cdm_tag, cdm->cdm_map, cdm->cdm_vaddr, 193 size, cesa_alloc_dma_mem_cb, cdm, BUS_DMA_NOWAIT); 194 if (error) { 195 device_printf(sc->sc_dev, "cannot get address of the DMA" 196 " memory, error %i\n", error); 197 198 goto err3; 199 } 200 201 return (0); 202 err3: 203 bus_dmamem_free(cdm->cdm_tag, cdm->cdm_vaddr, cdm->cdm_map); 204 err2: 205 bus_dma_tag_destroy(cdm->cdm_tag); 206 err1: 207 cdm->cdm_vaddr = NULL; 208 return (error); 209 } 210 211 static void 212 cesa_free_dma_mem(struct cesa_dma_mem *cdm) 213 { 214 215 bus_dmamap_unload(cdm->cdm_tag, cdm->cdm_map); 216 bus_dmamem_free(cdm->cdm_tag, cdm->cdm_vaddr, cdm->cdm_map); 217 bus_dma_tag_destroy(cdm->cdm_tag); 218 cdm->cdm_vaddr = NULL; 219 } 220 221 static void 222 cesa_sync_dma_mem(struct cesa_dma_mem *cdm, bus_dmasync_op_t op) 223 { 224 225 /* Sync only if dma memory is valid */ 226 if (cdm->cdm_vaddr != NULL) 227 bus_dmamap_sync(cdm->cdm_tag, cdm->cdm_map, op); 228 } 229 230 static void 231 cesa_sync_desc(struct cesa_softc *sc, bus_dmasync_op_t op) 232 { 233 234 cesa_sync_dma_mem(&sc->sc_tdesc_cdm, op); 235 cesa_sync_dma_mem(&sc->sc_sdesc_cdm, op); 236 cesa_sync_dma_mem(&sc->sc_requests_cdm, op); 237 } 238 239 static struct cesa_request * 240 cesa_alloc_request(struct cesa_softc *sc) 241 { 242 struct cesa_request *cr; 243 244 CESA_GENERIC_ALLOC_LOCKED(sc, cr, requests); 245 if (!cr) 246 return (NULL); 247 248 STAILQ_INIT(&cr->cr_tdesc); 249 STAILQ_INIT(&cr->cr_sdesc); 250 251 return (cr); 252 } 253 254 static void 255 cesa_free_request(struct cesa_softc *sc, struct cesa_request *cr) 256 { 257 258 /* Free TDMA descriptors assigned to this request */ 259 CESA_LOCK(sc, tdesc); 260 STAILQ_CONCAT(&sc->sc_free_tdesc, &cr->cr_tdesc); 261 CESA_UNLOCK(sc, tdesc); 262 263 /* Free SA descriptors assigned to this request */ 264 CESA_LOCK(sc, sdesc); 265 STAILQ_CONCAT(&sc->sc_free_sdesc, &cr->cr_sdesc); 266 CESA_UNLOCK(sc, sdesc); 267 268 /* Unload DMA memory associated with request */ 269 if (cr->cr_dmap_loaded) { 270 bus_dmamap_unload(sc->sc_data_dtag, cr->cr_dmap); 271 cr->cr_dmap_loaded = 0; 272 } 273 274 CESA_GENERIC_FREE_LOCKED(sc, cr, requests); 275 } 276 277 static void 278 cesa_enqueue_request(struct cesa_softc *sc, struct cesa_request *cr) 279 { 280 281 CESA_LOCK(sc, requests); 282 STAILQ_INSERT_TAIL(&sc->sc_ready_requests, cr, cr_stq); 283 CESA_UNLOCK(sc, requests); 284 } 285 286 static struct cesa_tdma_desc * 287 cesa_alloc_tdesc(struct cesa_softc *sc) 288 { 289 struct cesa_tdma_desc *ctd; 290 291 CESA_GENERIC_ALLOC_LOCKED(sc, ctd, tdesc); 292 293 if (!ctd) 294 device_printf(sc->sc_dev, "TDMA descriptors pool exhaused. " 295 "Consider increasing CESA_TDMA_DESCRIPTORS.\n"); 296 297 return (ctd); 298 } 299 300 static struct cesa_sa_desc * 301 cesa_alloc_sdesc(struct cesa_softc *sc, struct cesa_request *cr) 302 { 303 struct cesa_sa_desc *csd; 304 305 CESA_GENERIC_ALLOC_LOCKED(sc, csd, sdesc); 306 if (!csd) { 307 device_printf(sc->sc_dev, "SA descriptors pool exhaused. " 308 "Consider increasing CESA_SA_DESCRIPTORS.\n"); 309 return (NULL); 310 } 311 312 STAILQ_INSERT_TAIL(&cr->cr_sdesc, csd, csd_stq); 313 314 /* Fill-in SA descriptor with default values */ 315 csd->csd_cshd->cshd_enc_key = CESA_SA_DATA(csd_key); 316 csd->csd_cshd->cshd_enc_iv = CESA_SA_DATA(csd_iv); 317 csd->csd_cshd->cshd_enc_iv_buf = CESA_SA_DATA(csd_iv); 318 csd->csd_cshd->cshd_enc_src = 0; 319 csd->csd_cshd->cshd_enc_dst = 0; 320 csd->csd_cshd->cshd_enc_dlen = 0; 321 csd->csd_cshd->cshd_mac_dst = CESA_SA_DATA(csd_hash); 322 csd->csd_cshd->cshd_mac_iv_in = CESA_SA_DATA(csd_hiv_in); 323 csd->csd_cshd->cshd_mac_iv_out = CESA_SA_DATA(csd_hiv_out); 324 csd->csd_cshd->cshd_mac_src = 0; 325 csd->csd_cshd->cshd_mac_dlen = 0; 326 327 return (csd); 328 } 329 330 static struct cesa_tdma_desc * 331 cesa_tdma_copy(struct cesa_softc *sc, bus_addr_t dst, bus_addr_t src, 332 bus_size_t size) 333 { 334 struct cesa_tdma_desc *ctd; 335 336 ctd = cesa_alloc_tdesc(sc); 337 if (!ctd) 338 return (NULL); 339 340 ctd->ctd_cthd->cthd_dst = dst; 341 ctd->ctd_cthd->cthd_src = src; 342 ctd->ctd_cthd->cthd_byte_count = size; 343 344 /* Handle special control packet */ 345 if (size != 0) 346 ctd->ctd_cthd->cthd_flags = CESA_CTHD_OWNED; 347 else 348 ctd->ctd_cthd->cthd_flags = 0; 349 350 return (ctd); 351 } 352 353 static struct cesa_tdma_desc * 354 cesa_tdma_copyin_sa_data(struct cesa_softc *sc, struct cesa_request *cr) 355 { 356 357 return (cesa_tdma_copy(sc, sc->sc_sram_base_pa + 358 sizeof(struct cesa_sa_hdesc), cr->cr_csd_paddr, 359 sizeof(struct cesa_sa_data))); 360 } 361 362 static struct cesa_tdma_desc * 363 cesa_tdma_copyout_sa_data(struct cesa_softc *sc, struct cesa_request *cr) 364 { 365 366 return (cesa_tdma_copy(sc, cr->cr_csd_paddr, sc->sc_sram_base_pa + 367 sizeof(struct cesa_sa_hdesc), sizeof(struct cesa_sa_data))); 368 } 369 370 static struct cesa_tdma_desc * 371 cesa_tdma_copy_sdesc(struct cesa_softc *sc, struct cesa_sa_desc *csd) 372 { 373 374 return (cesa_tdma_copy(sc, sc->sc_sram_base_pa, csd->csd_cshd_paddr, 375 sizeof(struct cesa_sa_hdesc))); 376 } 377 378 static void 379 cesa_append_tdesc(struct cesa_request *cr, struct cesa_tdma_desc *ctd) 380 { 381 struct cesa_tdma_desc *ctd_prev; 382 383 if (!STAILQ_EMPTY(&cr->cr_tdesc)) { 384 ctd_prev = STAILQ_LAST(&cr->cr_tdesc, cesa_tdma_desc, ctd_stq); 385 ctd_prev->ctd_cthd->cthd_next = ctd->ctd_cthd_paddr; 386 } 387 388 ctd->ctd_cthd->cthd_next = 0; 389 STAILQ_INSERT_TAIL(&cr->cr_tdesc, ctd, ctd_stq); 390 } 391 392 static int 393 cesa_append_packet(struct cesa_softc *sc, struct cesa_request *cr, 394 struct cesa_packet *cp, struct cesa_sa_desc *csd) 395 { 396 struct cesa_tdma_desc *ctd, *tmp; 397 398 /* Copy SA descriptor for this packet */ 399 ctd = cesa_tdma_copy_sdesc(sc, csd); 400 if (!ctd) 401 return (ENOMEM); 402 403 cesa_append_tdesc(cr, ctd); 404 405 /* Copy data to be processed */ 406 STAILQ_FOREACH_SAFE(ctd, &cp->cp_copyin, ctd_stq, tmp) 407 cesa_append_tdesc(cr, ctd); 408 STAILQ_INIT(&cp->cp_copyin); 409 410 /* Insert control descriptor */ 411 ctd = cesa_tdma_copy(sc, 0, 0, 0); 412 if (!ctd) 413 return (ENOMEM); 414 415 cesa_append_tdesc(cr, ctd); 416 417 /* Copy back results */ 418 STAILQ_FOREACH_SAFE(ctd, &cp->cp_copyout, ctd_stq, tmp) 419 cesa_append_tdesc(cr, ctd); 420 STAILQ_INIT(&cp->cp_copyout); 421 422 return (0); 423 } 424 425 static void 426 cesa_set_mkey(struct cesa_session *cs, int alg, const uint8_t *mkey, int mklen) 427 { 428 union authctx auth_ctx; 429 uint32_t *hout; 430 uint32_t *hin; 431 int i; 432 433 hin = (uint32_t *)cs->cs_hiv_in; 434 hout = (uint32_t *)cs->cs_hiv_out; 435 436 switch (alg) { 437 case CRYPTO_MD5_HMAC: 438 hmac_init_ipad(&auth_hash_hmac_md5, mkey, mklen, &auth_ctx); 439 memcpy(hin, auth_ctx.md5ctx.state, 440 sizeof(auth_ctx.md5ctx.state)); 441 hmac_init_opad(&auth_hash_hmac_md5, mkey, mklen, &auth_ctx); 442 memcpy(hout, auth_ctx.md5ctx.state, 443 sizeof(auth_ctx.md5ctx.state)); 444 break; 445 case CRYPTO_SHA1_HMAC: 446 hmac_init_ipad(&auth_hash_hmac_sha1, mkey, mklen, &auth_ctx); 447 memcpy(hin, auth_ctx.sha1ctx.h.b32, 448 sizeof(auth_ctx.sha1ctx.h.b32)); 449 hmac_init_opad(&auth_hash_hmac_sha1, mkey, mklen, &auth_ctx); 450 memcpy(hout, auth_ctx.sha1ctx.h.b32, 451 sizeof(auth_ctx.sha1ctx.h.b32)); 452 break; 453 case CRYPTO_SHA2_256_HMAC: 454 hmac_init_ipad(&auth_hash_hmac_sha2_256, mkey, mklen, 455 &auth_ctx); 456 memcpy(hin, auth_ctx.sha256ctx.state, 457 sizeof(auth_ctx.sha256ctx.state)); 458 hmac_init_opad(&auth_hash_hmac_sha2_256, mkey, mklen, 459 &auth_ctx); 460 memcpy(hout, auth_ctx.sha256ctx.state, 461 sizeof(auth_ctx.sha256ctx.state)); 462 break; 463 default: 464 panic("shouldn't get here"); 465 } 466 467 for (i = 0; i < CESA_MAX_HASH_LEN / sizeof(uint32_t); i++) { 468 hin[i] = htobe32(hin[i]); 469 hout[i] = htobe32(hout[i]); 470 } 471 } 472 473 static int 474 cesa_prep_aes_key(struct cesa_session *cs, 475 const struct crypto_session_params *csp) 476 { 477 uint32_t ek[4 * (RIJNDAEL_MAXNR + 1)]; 478 uint32_t *dkey; 479 int i; 480 481 rijndaelKeySetupEnc(ek, cs->cs_key, csp->csp_cipher_klen * 8); 482 483 cs->cs_config &= ~CESA_CSH_AES_KLEN_MASK; 484 dkey = (uint32_t *)cs->cs_aes_dkey; 485 486 switch (csp->csp_cipher_klen) { 487 case 16: 488 cs->cs_config |= CESA_CSH_AES_KLEN_128; 489 for (i = 0; i < 4; i++) 490 *dkey++ = htobe32(ek[4 * 10 + i]); 491 break; 492 case 24: 493 cs->cs_config |= CESA_CSH_AES_KLEN_192; 494 for (i = 0; i < 4; i++) 495 *dkey++ = htobe32(ek[4 * 12 + i]); 496 for (i = 0; i < 2; i++) 497 *dkey++ = htobe32(ek[4 * 11 + 2 + i]); 498 break; 499 case 32: 500 cs->cs_config |= CESA_CSH_AES_KLEN_256; 501 for (i = 0; i < 4; i++) 502 *dkey++ = htobe32(ek[4 * 14 + i]); 503 for (i = 0; i < 4; i++) 504 *dkey++ = htobe32(ek[4 * 13 + i]); 505 break; 506 default: 507 return (EINVAL); 508 } 509 510 return (0); 511 } 512 513 static void 514 cesa_start_packet(struct cesa_packet *cp, unsigned int size) 515 { 516 517 cp->cp_size = size; 518 cp->cp_offset = 0; 519 STAILQ_INIT(&cp->cp_copyin); 520 STAILQ_INIT(&cp->cp_copyout); 521 } 522 523 static int 524 cesa_fill_packet(struct cesa_softc *sc, struct cesa_packet *cp, 525 bus_dma_segment_t *seg) 526 { 527 struct cesa_tdma_desc *ctd; 528 unsigned int bsize; 529 530 /* Calculate size of block copy */ 531 bsize = MIN(seg->ds_len, cp->cp_size - cp->cp_offset); 532 533 if (bsize > 0) { 534 ctd = cesa_tdma_copy(sc, sc->sc_sram_base_pa + 535 CESA_DATA(cp->cp_offset), seg->ds_addr, bsize); 536 if (!ctd) 537 return (-ENOMEM); 538 539 STAILQ_INSERT_TAIL(&cp->cp_copyin, ctd, ctd_stq); 540 541 ctd = cesa_tdma_copy(sc, seg->ds_addr, sc->sc_sram_base_pa + 542 CESA_DATA(cp->cp_offset), bsize); 543 if (!ctd) 544 return (-ENOMEM); 545 546 STAILQ_INSERT_TAIL(&cp->cp_copyout, ctd, ctd_stq); 547 548 seg->ds_len -= bsize; 549 seg->ds_addr += bsize; 550 cp->cp_offset += bsize; 551 } 552 553 return (bsize); 554 } 555 556 static void 557 cesa_create_chain_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) 558 { 559 unsigned int mpsize, fragmented; 560 unsigned int mlen, mskip, tmlen; 561 struct cesa_chain_info *cci; 562 unsigned int elen, eskip; 563 unsigned int skip, len; 564 struct cesa_sa_desc *csd; 565 struct cesa_request *cr; 566 struct cryptop *crp; 567 struct cesa_softc *sc; 568 struct cesa_packet cp; 569 bus_dma_segment_t seg; 570 uint32_t config; 571 int size; 572 573 cci = arg; 574 sc = cci->cci_sc; 575 cr = cci->cci_cr; 576 crp = cr->cr_crp; 577 578 if (error) { 579 cci->cci_error = error; 580 return; 581 } 582 583 /* 584 * Only do a combined op if the AAD is adjacent to the payload 585 * and the AAD length is a multiple of the IV length. The 586 * checks against 'config' are to avoid recursing when the 587 * logic below invokes separate operations. 588 */ 589 config = cci->cci_config; 590 if (((config & CESA_CSHD_OP_MASK) == CESA_CSHD_MAC_AND_ENC || 591 (config & CESA_CSHD_OP_MASK) == CESA_CSHD_ENC_AND_MAC) && 592 crp->crp_aad_length != 0 && 593 (crp->crp_aad_length & (cr->cr_cs->cs_ivlen - 1)) != 0) { 594 /* 595 * Data alignment in the request does not meet CESA requiremnts 596 * for combined encryption/decryption and hashing. We have to 597 * split the request to separate operations and process them 598 * one by one. 599 */ 600 if ((config & CESA_CSHD_OP_MASK) == CESA_CSHD_MAC_AND_ENC) { 601 config &= ~CESA_CSHD_OP_MASK; 602 603 cci->cci_config = config | CESA_CSHD_MAC; 604 cesa_create_chain_cb(cci, segs, nseg, 0); 605 606 cci->cci_config = config | CESA_CSHD_ENC; 607 cesa_create_chain_cb(cci, segs, nseg, 0); 608 } else { 609 config &= ~CESA_CSHD_OP_MASK; 610 611 cci->cci_config = config | CESA_CSHD_ENC; 612 cesa_create_chain_cb(cci, segs, nseg, 0); 613 614 cci->cci_config = config | CESA_CSHD_MAC; 615 cesa_create_chain_cb(cci, segs, nseg, 0); 616 } 617 618 return; 619 } 620 621 mskip = mlen = eskip = elen = 0; 622 623 if (crp->crp_aad_length == 0) { 624 skip = crp->crp_payload_start; 625 len = crp->crp_payload_length; 626 switch (config & CESA_CSHD_OP_MASK) { 627 case CESA_CSHD_ENC: 628 eskip = skip; 629 elen = len; 630 break; 631 case CESA_CSHD_MAC: 632 mskip = skip; 633 mlen = len; 634 break; 635 default: 636 eskip = skip; 637 elen = len; 638 mskip = skip; 639 mlen = len; 640 break; 641 } 642 } else { 643 /* 644 * For an encryption-only separate request, only 645 * process the payload. For combined requests and 646 * hash-only requests, process the entire region. 647 */ 648 switch (config & CESA_CSHD_OP_MASK) { 649 case CESA_CSHD_ENC: 650 skip = crp->crp_payload_start; 651 len = crp->crp_payload_length; 652 eskip = skip; 653 elen = len; 654 break; 655 case CESA_CSHD_MAC: 656 skip = crp->crp_aad_start; 657 len = crp->crp_aad_length + crp->crp_payload_length; 658 mskip = skip; 659 mlen = len; 660 break; 661 default: 662 skip = crp->crp_aad_start; 663 len = crp->crp_aad_length + crp->crp_payload_length; 664 mskip = skip; 665 mlen = len; 666 eskip = crp->crp_payload_start; 667 elen = crp->crp_payload_length; 668 break; 669 } 670 } 671 672 tmlen = mlen; 673 fragmented = 0; 674 mpsize = CESA_MAX_PACKET_SIZE; 675 mpsize &= ~((cr->cr_cs->cs_ivlen - 1) | (cr->cr_cs->cs_mblen - 1)); 676 677 /* Start first packet in chain */ 678 cesa_start_packet(&cp, MIN(mpsize, len)); 679 680 while (nseg-- && len > 0) { 681 seg = *(segs++); 682 683 /* 684 * Skip data in buffer on which neither ENC nor MAC operation 685 * is requested. 686 */ 687 if (skip > 0) { 688 size = MIN(skip, seg.ds_len); 689 skip -= size; 690 691 seg.ds_addr += size; 692 seg.ds_len -= size; 693 694 if (eskip > 0) 695 eskip -= size; 696 697 if (mskip > 0) 698 mskip -= size; 699 700 if (seg.ds_len == 0) 701 continue; 702 } 703 704 while (1) { 705 /* 706 * Fill in current packet with data. Break if there is 707 * no more data in current DMA segment or an error 708 * occurred. 709 */ 710 size = cesa_fill_packet(sc, &cp, &seg); 711 if (size <= 0) { 712 error = -size; 713 break; 714 } 715 716 len -= size; 717 718 /* If packet is full, append it to the chain */ 719 if (cp.cp_size == cp.cp_offset) { 720 csd = cesa_alloc_sdesc(sc, cr); 721 if (!csd) { 722 error = ENOMEM; 723 break; 724 } 725 726 /* Create SA descriptor for this packet */ 727 csd->csd_cshd->cshd_config = cci->cci_config; 728 csd->csd_cshd->cshd_mac_total_dlen = tmlen; 729 730 /* 731 * Enable fragmentation if request will not fit 732 * into one packet. 733 */ 734 if (len > 0) { 735 if (!fragmented) { 736 fragmented = 1; 737 csd->csd_cshd->cshd_config |= 738 CESA_CSHD_FRAG_FIRST; 739 } else 740 csd->csd_cshd->cshd_config |= 741 CESA_CSHD_FRAG_MIDDLE; 742 } else if (fragmented) 743 csd->csd_cshd->cshd_config |= 744 CESA_CSHD_FRAG_LAST; 745 746 if (eskip < cp.cp_size && elen > 0) { 747 csd->csd_cshd->cshd_enc_src = 748 CESA_DATA(eskip); 749 csd->csd_cshd->cshd_enc_dst = 750 CESA_DATA(eskip); 751 csd->csd_cshd->cshd_enc_dlen = 752 MIN(elen, cp.cp_size - eskip); 753 } 754 755 if (mskip < cp.cp_size && mlen > 0) { 756 csd->csd_cshd->cshd_mac_src = 757 CESA_DATA(mskip); 758 csd->csd_cshd->cshd_mac_dlen = 759 MIN(mlen, cp.cp_size - mskip); 760 } 761 762 elen -= csd->csd_cshd->cshd_enc_dlen; 763 eskip -= MIN(eskip, cp.cp_size); 764 mlen -= csd->csd_cshd->cshd_mac_dlen; 765 mskip -= MIN(mskip, cp.cp_size); 766 767 cesa_dump_cshd(sc, csd->csd_cshd); 768 769 /* Append packet to the request */ 770 error = cesa_append_packet(sc, cr, &cp, csd); 771 if (error) 772 break; 773 774 /* Start a new packet, as current is full */ 775 cesa_start_packet(&cp, MIN(mpsize, len)); 776 } 777 } 778 779 if (error) 780 break; 781 } 782 783 if (error) { 784 /* 785 * Move all allocated resources to the request. They will be 786 * freed later. 787 */ 788 STAILQ_CONCAT(&cr->cr_tdesc, &cp.cp_copyin); 789 STAILQ_CONCAT(&cr->cr_tdesc, &cp.cp_copyout); 790 cci->cci_error = error; 791 } 792 } 793 794 static int 795 cesa_create_chain(struct cesa_softc *sc, 796 const struct crypto_session_params *csp, struct cesa_request *cr) 797 { 798 struct cesa_chain_info cci; 799 struct cesa_tdma_desc *ctd; 800 uint32_t config; 801 int error; 802 803 error = 0; 804 CESA_LOCK_ASSERT(sc, sessions); 805 806 /* Create request metadata */ 807 if (csp->csp_cipher_klen != 0) { 808 if (csp->csp_cipher_alg == CRYPTO_AES_CBC && 809 !CRYPTO_OP_IS_ENCRYPT(cr->cr_crp->crp_op)) 810 memcpy(cr->cr_csd->csd_key, cr->cr_cs->cs_aes_dkey, 811 csp->csp_cipher_klen); 812 else 813 memcpy(cr->cr_csd->csd_key, cr->cr_cs->cs_key, 814 csp->csp_cipher_klen); 815 } 816 817 if (csp->csp_auth_klen != 0) { 818 memcpy(cr->cr_csd->csd_hiv_in, cr->cr_cs->cs_hiv_in, 819 CESA_MAX_HASH_LEN); 820 memcpy(cr->cr_csd->csd_hiv_out, cr->cr_cs->cs_hiv_out, 821 CESA_MAX_HASH_LEN); 822 } 823 824 ctd = cesa_tdma_copyin_sa_data(sc, cr); 825 if (!ctd) 826 return (ENOMEM); 827 828 cesa_append_tdesc(cr, ctd); 829 830 /* Prepare SA configuration */ 831 config = cr->cr_cs->cs_config; 832 833 if (csp->csp_cipher_alg != 0 && 834 !CRYPTO_OP_IS_ENCRYPT(cr->cr_crp->crp_op)) 835 config |= CESA_CSHD_DECRYPT; 836 switch (csp->csp_mode) { 837 case CSP_MODE_CIPHER: 838 config |= CESA_CSHD_ENC; 839 break; 840 case CSP_MODE_DIGEST: 841 config |= CESA_CSHD_MAC; 842 break; 843 case CSP_MODE_ETA: 844 config |= (config & CESA_CSHD_DECRYPT) ? CESA_CSHD_MAC_AND_ENC : 845 CESA_CSHD_ENC_AND_MAC; 846 break; 847 } 848 849 /* Create data packets */ 850 cci.cci_sc = sc; 851 cci.cci_cr = cr; 852 cci.cci_config = config; 853 cci.cci_error = 0; 854 855 error = bus_dmamap_load_crp(sc->sc_data_dtag, cr->cr_dmap, cr->cr_crp, 856 cesa_create_chain_cb, &cci, BUS_DMA_NOWAIT); 857 858 if (!error) 859 cr->cr_dmap_loaded = 1; 860 861 if (cci.cci_error) 862 error = cci.cci_error; 863 864 if (error) 865 return (error); 866 867 /* Read back request metadata */ 868 ctd = cesa_tdma_copyout_sa_data(sc, cr); 869 if (!ctd) 870 return (ENOMEM); 871 872 cesa_append_tdesc(cr, ctd); 873 874 return (0); 875 } 876 877 static void 878 cesa_execute(struct cesa_softc *sc) 879 { 880 struct cesa_tdma_desc *prev_ctd, *ctd; 881 struct cesa_request *prev_cr, *cr; 882 883 CESA_LOCK(sc, requests); 884 885 /* 886 * If ready list is empty, there is nothing to execute. If queued list 887 * is not empty, the hardware is busy and we cannot start another 888 * execution. 889 */ 890 if (STAILQ_EMPTY(&sc->sc_ready_requests) || 891 !STAILQ_EMPTY(&sc->sc_queued_requests)) { 892 CESA_UNLOCK(sc, requests); 893 return; 894 } 895 896 /* Move all ready requests to queued list */ 897 STAILQ_CONCAT(&sc->sc_queued_requests, &sc->sc_ready_requests); 898 STAILQ_INIT(&sc->sc_ready_requests); 899 900 /* Create one execution chain from all requests on the list */ 901 if (STAILQ_FIRST(&sc->sc_queued_requests) != 902 STAILQ_LAST(&sc->sc_queued_requests, cesa_request, cr_stq)) { 903 prev_cr = NULL; 904 cesa_sync_dma_mem(&sc->sc_tdesc_cdm, BUS_DMASYNC_POSTREAD | 905 BUS_DMASYNC_POSTWRITE); 906 907 STAILQ_FOREACH(cr, &sc->sc_queued_requests, cr_stq) { 908 if (prev_cr) { 909 ctd = STAILQ_FIRST(&cr->cr_tdesc); 910 prev_ctd = STAILQ_LAST(&prev_cr->cr_tdesc, 911 cesa_tdma_desc, ctd_stq); 912 913 prev_ctd->ctd_cthd->cthd_next = 914 ctd->ctd_cthd_paddr; 915 } 916 917 prev_cr = cr; 918 } 919 920 cesa_sync_dma_mem(&sc->sc_tdesc_cdm, BUS_DMASYNC_PREREAD | 921 BUS_DMASYNC_PREWRITE); 922 } 923 924 /* Start chain execution in hardware */ 925 cr = STAILQ_FIRST(&sc->sc_queued_requests); 926 ctd = STAILQ_FIRST(&cr->cr_tdesc); 927 928 CESA_TDMA_WRITE(sc, CESA_TDMA_ND, ctd->ctd_cthd_paddr); 929 930 if (sc->sc_soc_id == MV_DEV_88F6828 || 931 sc->sc_soc_id == MV_DEV_88F6820 || 932 sc->sc_soc_id == MV_DEV_88F6810) 933 CESA_REG_WRITE(sc, CESA_SA_CMD, CESA_SA_CMD_ACTVATE | CESA_SA_CMD_SHA2); 934 else 935 CESA_REG_WRITE(sc, CESA_SA_CMD, CESA_SA_CMD_ACTVATE); 936 937 CESA_UNLOCK(sc, requests); 938 } 939 940 static int 941 cesa_setup_sram(struct cesa_softc *sc) 942 { 943 phandle_t sram_node; 944 ihandle_t sram_ihandle; 945 pcell_t sram_handle, sram_reg[2]; 946 void *sram_va; 947 int rv; 948 949 rv = OF_getencprop(ofw_bus_get_node(sc->sc_dev), "sram-handle", 950 (void *)&sram_handle, sizeof(sram_handle)); 951 if (rv <= 0) 952 return (rv); 953 954 sram_ihandle = (ihandle_t)sram_handle; 955 sram_node = OF_instance_to_package(sram_ihandle); 956 957 rv = OF_getencprop(sram_node, "reg", (void *)sram_reg, sizeof(sram_reg)); 958 if (rv <= 0) 959 return (rv); 960 961 sc->sc_sram_base_pa = sram_reg[0]; 962 /* Store SRAM size to be able to unmap in detach() */ 963 sc->sc_sram_size = sram_reg[1]; 964 965 if (sc->sc_soc_id != MV_DEV_88F6828 && 966 sc->sc_soc_id != MV_DEV_88F6820 && 967 sc->sc_soc_id != MV_DEV_88F6810) 968 return (0); 969 970 /* SRAM memory was not mapped in platform_sram_devmap(), map it now */ 971 sram_va = pmap_mapdev(sc->sc_sram_base_pa, sc->sc_sram_size); 972 if (sram_va == NULL) 973 return (ENOMEM); 974 sc->sc_sram_base_va = (vm_offset_t)sram_va; 975 976 return (0); 977 } 978 979 /* 980 * Function: device_from_node 981 * This function returns appropriate device_t to phandle_t 982 * Parameters: 983 * root - device where you want to start search 984 * if you provide NULL here, function will take 985 * "root0" device as root. 986 * node - we are checking every device_t to be 987 * appropriate with this. 988 */ 989 static device_t 990 device_from_node(device_t root, phandle_t node) 991 { 992 device_t *children, retval; 993 int nkid, i; 994 995 /* Nothing matches no node */ 996 if (node == -1) 997 return (NULL); 998 999 if (root == NULL) 1000 /* Get root of device tree */ 1001 if ((root = device_lookup_by_name("root0")) == NULL) 1002 return (NULL); 1003 1004 if (device_get_children(root, &children, &nkid) != 0) 1005 return (NULL); 1006 1007 retval = NULL; 1008 for (i = 0; i < nkid; i++) { 1009 /* Check if device and node matches */ 1010 if (OFW_BUS_GET_NODE(root, children[i]) == node) { 1011 retval = children[i]; 1012 break; 1013 } 1014 /* or go deeper */ 1015 if ((retval = device_from_node(children[i], node)) != NULL) 1016 break; 1017 } 1018 free(children, M_TEMP); 1019 1020 return (retval); 1021 } 1022 1023 static int 1024 cesa_setup_sram_armada(struct cesa_softc *sc) 1025 { 1026 phandle_t sram_node; 1027 ihandle_t sram_ihandle; 1028 pcell_t sram_handle[2]; 1029 void *sram_va; 1030 int rv, j; 1031 struct resource_list rl; 1032 struct resource_list_entry *rle; 1033 struct simplebus_softc *ssc; 1034 device_t sdev; 1035 1036 /* Get refs to SRAMS from CESA node */ 1037 rv = OF_getencprop(ofw_bus_get_node(sc->sc_dev), "marvell,crypto-srams", 1038 (void *)sram_handle, sizeof(sram_handle)); 1039 if (rv <= 0) 1040 return (rv); 1041 1042 if (sc->sc_cesa_engine_id >= 2) 1043 return (ENXIO); 1044 1045 /* Get SRAM node on the basis of sc_cesa_engine_id */ 1046 sram_ihandle = (ihandle_t)sram_handle[sc->sc_cesa_engine_id]; 1047 sram_node = OF_instance_to_package(sram_ihandle); 1048 1049 /* Get device_t of simplebus (sram_node parent) */ 1050 sdev = device_from_node(NULL, OF_parent(sram_node)); 1051 if (!sdev) 1052 return (ENXIO); 1053 1054 ssc = device_get_softc(sdev); 1055 1056 resource_list_init(&rl); 1057 /* Parse reg property to resource list */ 1058 ofw_bus_reg_to_rl(sdev, sram_node, ssc->acells, 1059 ssc->scells, &rl); 1060 1061 /* We expect only one resource */ 1062 rle = resource_list_find(&rl, SYS_RES_MEMORY, 0); 1063 if (rle == NULL) 1064 return (ENXIO); 1065 1066 /* Remap through ranges property */ 1067 for (j = 0; j < ssc->nranges; j++) { 1068 if (rle->start >= ssc->ranges[j].bus && 1069 rle->end < ssc->ranges[j].bus + ssc->ranges[j].size) { 1070 rle->start -= ssc->ranges[j].bus; 1071 rle->start += ssc->ranges[j].host; 1072 rle->end -= ssc->ranges[j].bus; 1073 rle->end += ssc->ranges[j].host; 1074 } 1075 } 1076 1077 sc->sc_sram_base_pa = rle->start; 1078 sc->sc_sram_size = rle->count; 1079 1080 /* SRAM memory was not mapped in platform_sram_devmap(), map it now */ 1081 sram_va = pmap_mapdev(sc->sc_sram_base_pa, sc->sc_sram_size); 1082 if (sram_va == NULL) 1083 return (ENOMEM); 1084 sc->sc_sram_base_va = (vm_offset_t)sram_va; 1085 1086 return (0); 1087 } 1088 1089 struct ofw_compat_data cesa_devices[] = { 1090 { "mrvl,cesa", (uintptr_t)true }, 1091 { "marvell,armada-38x-crypto", (uintptr_t)true }, 1092 { NULL, 0 } 1093 }; 1094 1095 static int 1096 cesa_probe(device_t dev) 1097 { 1098 1099 if (!ofw_bus_status_okay(dev)) 1100 return (ENXIO); 1101 1102 if (!ofw_bus_search_compatible(dev, cesa_devices)->ocd_data) 1103 return (ENXIO); 1104 1105 device_set_desc(dev, "Marvell Cryptographic Engine and Security " 1106 "Accelerator"); 1107 1108 return (BUS_PROBE_DEFAULT); 1109 } 1110 1111 static int 1112 cesa_attach(device_t dev) 1113 { 1114 static int engine_idx = 0; 1115 struct simplebus_devinfo *ndi; 1116 struct resource_list *rl; 1117 struct cesa_softc *sc; 1118 1119 if (!ofw_bus_is_compatible(dev, "marvell,armada-38x-crypto")) 1120 return (cesa_attach_late(dev)); 1121 1122 /* 1123 * Get simplebus_devinfo which contains 1124 * resource list filled with adresses and 1125 * interrupts read form FDT. 1126 * Let's correct it by splitting resources 1127 * for each engine. 1128 */ 1129 if ((ndi = device_get_ivars(dev)) == NULL) 1130 return (ENXIO); 1131 1132 rl = &ndi->rl; 1133 1134 switch (engine_idx) { 1135 case 0: 1136 /* Update regs values */ 1137 resource_list_add(rl, SYS_RES_MEMORY, 0, CESA0_TDMA_ADDR, 1138 CESA0_TDMA_ADDR + CESA_TDMA_SIZE - 1, CESA_TDMA_SIZE); 1139 resource_list_add(rl, SYS_RES_MEMORY, 1, CESA0_CESA_ADDR, 1140 CESA0_CESA_ADDR + CESA_CESA_SIZE - 1, CESA_CESA_SIZE); 1141 1142 /* Remove unused interrupt */ 1143 resource_list_delete(rl, SYS_RES_IRQ, 1); 1144 break; 1145 1146 case 1: 1147 /* Update regs values */ 1148 resource_list_add(rl, SYS_RES_MEMORY, 0, CESA1_TDMA_ADDR, 1149 CESA1_TDMA_ADDR + CESA_TDMA_SIZE - 1, CESA_TDMA_SIZE); 1150 resource_list_add(rl, SYS_RES_MEMORY, 1, CESA1_CESA_ADDR, 1151 CESA1_CESA_ADDR + CESA_CESA_SIZE - 1, CESA_CESA_SIZE); 1152 1153 /* Remove unused interrupt */ 1154 resource_list_delete(rl, SYS_RES_IRQ, 0); 1155 resource_list_find(rl, SYS_RES_IRQ, 1)->rid = 0; 1156 break; 1157 1158 default: 1159 device_printf(dev, "Bad cesa engine_idx\n"); 1160 return (ENXIO); 1161 } 1162 1163 sc = device_get_softc(dev); 1164 sc->sc_cesa_engine_id = engine_idx; 1165 1166 /* 1167 * Call simplebus_add_device only once. 1168 * It will create second cesa driver instance 1169 * with the same FDT node as first instance. 1170 * When second driver reach this function, 1171 * it will be configured to use second cesa engine 1172 */ 1173 if (engine_idx == 0) 1174 simplebus_add_device(device_get_parent(dev), ofw_bus_get_node(dev), 1175 0, "cesa", 1, NULL); 1176 1177 engine_idx++; 1178 1179 return (cesa_attach_late(dev)); 1180 } 1181 1182 static int 1183 cesa_attach_late(device_t dev) 1184 { 1185 struct cesa_softc *sc; 1186 uint32_t d, r, val; 1187 int error; 1188 int i; 1189 1190 sc = device_get_softc(dev); 1191 sc->sc_blocked = 0; 1192 sc->sc_error = 0; 1193 sc->sc_dev = dev; 1194 1195 soc_id(&d, &r); 1196 1197 switch (d) { 1198 case MV_DEV_88F6281: 1199 case MV_DEV_88F6282: 1200 /* Check if CESA peripheral device has power turned on */ 1201 if (soc_power_ctrl_get(CPU_PM_CTRL_CRYPTO) == 1202 CPU_PM_CTRL_CRYPTO) { 1203 device_printf(dev, "not powered on\n"); 1204 return (ENXIO); 1205 } 1206 sc->sc_tperr = 0; 1207 break; 1208 case MV_DEV_88F6828: 1209 case MV_DEV_88F6820: 1210 case MV_DEV_88F6810: 1211 sc->sc_tperr = 0; 1212 break; 1213 case MV_DEV_MV78100: 1214 case MV_DEV_MV78100_Z0: 1215 /* Check if CESA peripheral device has power turned on */ 1216 if (soc_power_ctrl_get(CPU_PM_CTRL_CRYPTO) != 1217 CPU_PM_CTRL_CRYPTO) { 1218 device_printf(dev, "not powered on\n"); 1219 return (ENXIO); 1220 } 1221 sc->sc_tperr = CESA_ICR_TPERR; 1222 break; 1223 default: 1224 return (ENXIO); 1225 } 1226 1227 sc->sc_soc_id = d; 1228 1229 /* Initialize mutexes */ 1230 mtx_init(&sc->sc_sc_lock, device_get_nameunit(dev), 1231 "CESA Shared Data", MTX_DEF); 1232 mtx_init(&sc->sc_tdesc_lock, device_get_nameunit(dev), 1233 "CESA TDMA Descriptors Pool", MTX_DEF); 1234 mtx_init(&sc->sc_sdesc_lock, device_get_nameunit(dev), 1235 "CESA SA Descriptors Pool", MTX_DEF); 1236 mtx_init(&sc->sc_requests_lock, device_get_nameunit(dev), 1237 "CESA Requests Pool", MTX_DEF); 1238 mtx_init(&sc->sc_sessions_lock, device_get_nameunit(dev), 1239 "CESA Sessions Pool", MTX_DEF); 1240 1241 /* Allocate I/O and IRQ resources */ 1242 error = bus_alloc_resources(dev, cesa_res_spec, sc->sc_res); 1243 if (error) { 1244 device_printf(dev, "could not allocate resources\n"); 1245 goto err0; 1246 } 1247 1248 /* Acquire SRAM base address */ 1249 if (!ofw_bus_is_compatible(dev, "marvell,armada-38x-crypto")) 1250 error = cesa_setup_sram(sc); 1251 else 1252 error = cesa_setup_sram_armada(sc); 1253 1254 if (error) { 1255 device_printf(dev, "could not setup SRAM\n"); 1256 goto err1; 1257 } 1258 1259 /* Setup interrupt handler */ 1260 error = bus_setup_intr(dev, sc->sc_res[RES_CESA_IRQ], INTR_TYPE_NET | 1261 INTR_MPSAFE, NULL, cesa_intr, sc, &(sc->sc_icookie)); 1262 if (error) { 1263 device_printf(dev, "could not setup engine completion irq\n"); 1264 goto err2; 1265 } 1266 1267 /* Create DMA tag for processed data */ 1268 error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */ 1269 1, 0, /* alignment, boundary */ 1270 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 1271 BUS_SPACE_MAXADDR, /* highaddr */ 1272 NULL, NULL, /* filtfunc, filtfuncarg */ 1273 CESA_MAX_REQUEST_SIZE, /* maxsize */ 1274 CESA_MAX_FRAGMENTS, /* nsegments */ 1275 CESA_MAX_REQUEST_SIZE, 0, /* maxsegsz, flags */ 1276 NULL, NULL, /* lockfunc, lockfuncarg */ 1277 &sc->sc_data_dtag); /* dmat */ 1278 if (error) 1279 goto err3; 1280 1281 /* Initialize data structures: TDMA Descriptors Pool */ 1282 error = cesa_alloc_dma_mem(sc, &sc->sc_tdesc_cdm, 1283 CESA_TDMA_DESCRIPTORS * sizeof(struct cesa_tdma_hdesc)); 1284 if (error) 1285 goto err4; 1286 1287 STAILQ_INIT(&sc->sc_free_tdesc); 1288 for (i = 0; i < CESA_TDMA_DESCRIPTORS; i++) { 1289 sc->sc_tdesc[i].ctd_cthd = 1290 (struct cesa_tdma_hdesc *)(sc->sc_tdesc_cdm.cdm_vaddr) + i; 1291 sc->sc_tdesc[i].ctd_cthd_paddr = sc->sc_tdesc_cdm.cdm_paddr + 1292 (i * sizeof(struct cesa_tdma_hdesc)); 1293 STAILQ_INSERT_TAIL(&sc->sc_free_tdesc, &sc->sc_tdesc[i], 1294 ctd_stq); 1295 } 1296 1297 /* Initialize data structures: SA Descriptors Pool */ 1298 error = cesa_alloc_dma_mem(sc, &sc->sc_sdesc_cdm, 1299 CESA_SA_DESCRIPTORS * sizeof(struct cesa_sa_hdesc)); 1300 if (error) 1301 goto err5; 1302 1303 STAILQ_INIT(&sc->sc_free_sdesc); 1304 for (i = 0; i < CESA_SA_DESCRIPTORS; i++) { 1305 sc->sc_sdesc[i].csd_cshd = 1306 (struct cesa_sa_hdesc *)(sc->sc_sdesc_cdm.cdm_vaddr) + i; 1307 sc->sc_sdesc[i].csd_cshd_paddr = sc->sc_sdesc_cdm.cdm_paddr + 1308 (i * sizeof(struct cesa_sa_hdesc)); 1309 STAILQ_INSERT_TAIL(&sc->sc_free_sdesc, &sc->sc_sdesc[i], 1310 csd_stq); 1311 } 1312 1313 /* Initialize data structures: Requests Pool */ 1314 error = cesa_alloc_dma_mem(sc, &sc->sc_requests_cdm, 1315 CESA_REQUESTS * sizeof(struct cesa_sa_data)); 1316 if (error) 1317 goto err6; 1318 1319 STAILQ_INIT(&sc->sc_free_requests); 1320 STAILQ_INIT(&sc->sc_ready_requests); 1321 STAILQ_INIT(&sc->sc_queued_requests); 1322 for (i = 0; i < CESA_REQUESTS; i++) { 1323 sc->sc_requests[i].cr_csd = 1324 (struct cesa_sa_data *)(sc->sc_requests_cdm.cdm_vaddr) + i; 1325 sc->sc_requests[i].cr_csd_paddr = 1326 sc->sc_requests_cdm.cdm_paddr + 1327 (i * sizeof(struct cesa_sa_data)); 1328 1329 /* Preallocate DMA maps */ 1330 error = bus_dmamap_create(sc->sc_data_dtag, 0, 1331 &sc->sc_requests[i].cr_dmap); 1332 if (error && i > 0) { 1333 i--; 1334 do { 1335 bus_dmamap_destroy(sc->sc_data_dtag, 1336 sc->sc_requests[i].cr_dmap); 1337 } while (i--); 1338 1339 goto err7; 1340 } 1341 1342 STAILQ_INSERT_TAIL(&sc->sc_free_requests, &sc->sc_requests[i], 1343 cr_stq); 1344 } 1345 1346 /* 1347 * Initialize TDMA: 1348 * - Burst limit: 128 bytes, 1349 * - Outstanding reads enabled, 1350 * - No byte-swap. 1351 */ 1352 val = CESA_TDMA_CR_DBL128 | CESA_TDMA_CR_SBL128 | 1353 CESA_TDMA_CR_ORDEN | CESA_TDMA_CR_NBS | CESA_TDMA_CR_ENABLE; 1354 1355 if (sc->sc_soc_id == MV_DEV_88F6828 || 1356 sc->sc_soc_id == MV_DEV_88F6820 || 1357 sc->sc_soc_id == MV_DEV_88F6810) 1358 val |= CESA_TDMA_NUM_OUTSTAND; 1359 1360 CESA_TDMA_WRITE(sc, CESA_TDMA_CR, val); 1361 1362 /* 1363 * Initialize SA: 1364 * - SA descriptor is present at beginning of CESA SRAM, 1365 * - Multi-packet chain mode, 1366 * - Cooperation with TDMA enabled. 1367 */ 1368 CESA_REG_WRITE(sc, CESA_SA_DPR, 0); 1369 CESA_REG_WRITE(sc, CESA_SA_CR, CESA_SA_CR_ACTIVATE_TDMA | 1370 CESA_SA_CR_WAIT_FOR_TDMA | CESA_SA_CR_MULTI_MODE); 1371 1372 /* Unmask interrupts */ 1373 CESA_REG_WRITE(sc, CESA_ICR, 0); 1374 CESA_REG_WRITE(sc, CESA_ICM, CESA_ICM_ACCTDMA | sc->sc_tperr); 1375 CESA_TDMA_WRITE(sc, CESA_TDMA_ECR, 0); 1376 CESA_TDMA_WRITE(sc, CESA_TDMA_EMR, CESA_TDMA_EMR_MISS | 1377 CESA_TDMA_EMR_DOUBLE_HIT | CESA_TDMA_EMR_BOTH_HIT | 1378 CESA_TDMA_EMR_DATA_ERROR); 1379 1380 /* Register in OCF */ 1381 sc->sc_cid = crypto_get_driverid(dev, sizeof(struct cesa_session), 1382 CRYPTOCAP_F_HARDWARE); 1383 if (sc->sc_cid < 0) { 1384 device_printf(dev, "could not get crypto driver id\n"); 1385 goto err8; 1386 } 1387 1388 return (0); 1389 err8: 1390 for (i = 0; i < CESA_REQUESTS; i++) 1391 bus_dmamap_destroy(sc->sc_data_dtag, 1392 sc->sc_requests[i].cr_dmap); 1393 err7: 1394 cesa_free_dma_mem(&sc->sc_requests_cdm); 1395 err6: 1396 cesa_free_dma_mem(&sc->sc_sdesc_cdm); 1397 err5: 1398 cesa_free_dma_mem(&sc->sc_tdesc_cdm); 1399 err4: 1400 bus_dma_tag_destroy(sc->sc_data_dtag); 1401 err3: 1402 bus_teardown_intr(dev, sc->sc_res[RES_CESA_IRQ], sc->sc_icookie); 1403 err2: 1404 if (sc->sc_soc_id == MV_DEV_88F6828 || 1405 sc->sc_soc_id == MV_DEV_88F6820 || 1406 sc->sc_soc_id == MV_DEV_88F6810) 1407 pmap_unmapdev(sc->sc_sram_base_va, sc->sc_sram_size); 1408 err1: 1409 bus_release_resources(dev, cesa_res_spec, sc->sc_res); 1410 err0: 1411 mtx_destroy(&sc->sc_sessions_lock); 1412 mtx_destroy(&sc->sc_requests_lock); 1413 mtx_destroy(&sc->sc_sdesc_lock); 1414 mtx_destroy(&sc->sc_tdesc_lock); 1415 mtx_destroy(&sc->sc_sc_lock); 1416 return (ENXIO); 1417 } 1418 1419 static int 1420 cesa_detach(device_t dev) 1421 { 1422 struct cesa_softc *sc; 1423 int i; 1424 1425 sc = device_get_softc(dev); 1426 1427 /* TODO: Wait for queued requests completion before shutdown. */ 1428 1429 /* Mask interrupts */ 1430 CESA_REG_WRITE(sc, CESA_ICM, 0); 1431 CESA_TDMA_WRITE(sc, CESA_TDMA_EMR, 0); 1432 1433 /* Unregister from OCF */ 1434 crypto_unregister_all(sc->sc_cid); 1435 1436 /* Free DMA Maps */ 1437 for (i = 0; i < CESA_REQUESTS; i++) 1438 bus_dmamap_destroy(sc->sc_data_dtag, 1439 sc->sc_requests[i].cr_dmap); 1440 1441 /* Free DMA Memory */ 1442 cesa_free_dma_mem(&sc->sc_requests_cdm); 1443 cesa_free_dma_mem(&sc->sc_sdesc_cdm); 1444 cesa_free_dma_mem(&sc->sc_tdesc_cdm); 1445 1446 /* Free DMA Tag */ 1447 bus_dma_tag_destroy(sc->sc_data_dtag); 1448 1449 /* Stop interrupt */ 1450 bus_teardown_intr(dev, sc->sc_res[RES_CESA_IRQ], sc->sc_icookie); 1451 1452 /* Relase I/O and IRQ resources */ 1453 bus_release_resources(dev, cesa_res_spec, sc->sc_res); 1454 1455 /* Unmap SRAM memory */ 1456 if (sc->sc_soc_id == MV_DEV_88F6828 || 1457 sc->sc_soc_id == MV_DEV_88F6820 || 1458 sc->sc_soc_id == MV_DEV_88F6810) 1459 pmap_unmapdev(sc->sc_sram_base_va, sc->sc_sram_size); 1460 1461 /* Destroy mutexes */ 1462 mtx_destroy(&sc->sc_sessions_lock); 1463 mtx_destroy(&sc->sc_requests_lock); 1464 mtx_destroy(&sc->sc_sdesc_lock); 1465 mtx_destroy(&sc->sc_tdesc_lock); 1466 mtx_destroy(&sc->sc_sc_lock); 1467 1468 return (0); 1469 } 1470 1471 static void 1472 cesa_intr(void *arg) 1473 { 1474 STAILQ_HEAD(, cesa_request) requests; 1475 struct cesa_request *cr, *tmp; 1476 struct cesa_softc *sc; 1477 uint32_t ecr, icr; 1478 uint8_t hash[HASH_MAX_LEN]; 1479 int blocked; 1480 1481 sc = arg; 1482 1483 /* Ack interrupt */ 1484 ecr = CESA_TDMA_READ(sc, CESA_TDMA_ECR); 1485 CESA_TDMA_WRITE(sc, CESA_TDMA_ECR, 0); 1486 icr = CESA_REG_READ(sc, CESA_ICR); 1487 CESA_REG_WRITE(sc, CESA_ICR, 0); 1488 1489 /* Check for TDMA errors */ 1490 if (ecr & CESA_TDMA_ECR_MISS) { 1491 device_printf(sc->sc_dev, "TDMA Miss error detected!\n"); 1492 sc->sc_error = EIO; 1493 } 1494 1495 if (ecr & CESA_TDMA_ECR_DOUBLE_HIT) { 1496 device_printf(sc->sc_dev, "TDMA Double Hit error detected!\n"); 1497 sc->sc_error = EIO; 1498 } 1499 1500 if (ecr & CESA_TDMA_ECR_BOTH_HIT) { 1501 device_printf(sc->sc_dev, "TDMA Both Hit error detected!\n"); 1502 sc->sc_error = EIO; 1503 } 1504 1505 if (ecr & CESA_TDMA_ECR_DATA_ERROR) { 1506 device_printf(sc->sc_dev, "TDMA Data error detected!\n"); 1507 sc->sc_error = EIO; 1508 } 1509 1510 /* Check for CESA errors */ 1511 if (icr & sc->sc_tperr) { 1512 device_printf(sc->sc_dev, "CESA SRAM Parity error detected!\n"); 1513 sc->sc_error = EIO; 1514 } 1515 1516 /* If there is nothing more to do, return */ 1517 if ((icr & CESA_ICR_ACCTDMA) == 0) 1518 return; 1519 1520 /* Get all finished requests */ 1521 CESA_LOCK(sc, requests); 1522 STAILQ_INIT(&requests); 1523 STAILQ_CONCAT(&requests, &sc->sc_queued_requests); 1524 STAILQ_INIT(&sc->sc_queued_requests); 1525 CESA_UNLOCK(sc, requests); 1526 1527 /* Execute all ready requests */ 1528 cesa_execute(sc); 1529 1530 /* Process completed requests */ 1531 cesa_sync_dma_mem(&sc->sc_requests_cdm, BUS_DMASYNC_POSTREAD | 1532 BUS_DMASYNC_POSTWRITE); 1533 1534 STAILQ_FOREACH_SAFE(cr, &requests, cr_stq, tmp) { 1535 bus_dmamap_sync(sc->sc_data_dtag, cr->cr_dmap, 1536 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1537 1538 cr->cr_crp->crp_etype = sc->sc_error; 1539 if (cr->cr_cs->cs_hlen != 0 && cr->cr_crp->crp_etype == 0) { 1540 if (cr->cr_crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) { 1541 crypto_copydata(cr->cr_crp, 1542 cr->cr_crp->crp_digest_start, 1543 cr->cr_cs->cs_hlen, hash); 1544 if (timingsafe_bcmp(hash, cr->cr_csd->csd_hash, 1545 cr->cr_cs->cs_hlen) != 0) 1546 cr->cr_crp->crp_etype = EBADMSG; 1547 } else 1548 crypto_copyback(cr->cr_crp, 1549 cr->cr_crp->crp_digest_start, 1550 cr->cr_cs->cs_hlen, cr->cr_csd->csd_hash); 1551 } 1552 crypto_done(cr->cr_crp); 1553 cesa_free_request(sc, cr); 1554 } 1555 1556 cesa_sync_dma_mem(&sc->sc_requests_cdm, BUS_DMASYNC_PREREAD | 1557 BUS_DMASYNC_PREWRITE); 1558 1559 sc->sc_error = 0; 1560 1561 /* Unblock driver if it ran out of resources */ 1562 CESA_LOCK(sc, sc); 1563 blocked = sc->sc_blocked; 1564 sc->sc_blocked = 0; 1565 CESA_UNLOCK(sc, sc); 1566 1567 if (blocked) 1568 crypto_unblock(sc->sc_cid, blocked); 1569 } 1570 1571 static bool 1572 cesa_cipher_supported(const struct crypto_session_params *csp) 1573 { 1574 1575 switch (csp->csp_cipher_alg) { 1576 case CRYPTO_AES_CBC: 1577 if (csp->csp_ivlen != AES_BLOCK_LEN) 1578 return (false); 1579 break; 1580 case CRYPTO_DES_CBC: 1581 if (csp->csp_ivlen != DES_BLOCK_LEN) 1582 return (false); 1583 break; 1584 case CRYPTO_3DES_CBC: 1585 if (csp->csp_ivlen != DES3_BLOCK_LEN) 1586 return (false); 1587 break; 1588 default: 1589 return (false); 1590 } 1591 1592 if (csp->csp_cipher_klen > CESA_MAX_KEY_LEN) 1593 return (false); 1594 1595 return (true); 1596 } 1597 1598 static bool 1599 cesa_auth_supported(struct cesa_softc *sc, 1600 const struct crypto_session_params *csp) 1601 { 1602 1603 switch (csp->csp_auth_alg) { 1604 case CRYPTO_SHA2_256_HMAC: 1605 if (!(sc->sc_soc_id == MV_DEV_88F6828 || 1606 sc->sc_soc_id == MV_DEV_88F6820 || 1607 sc->sc_soc_id == MV_DEV_88F6810)) 1608 return (false); 1609 /* FALLTHROUGH */ 1610 case CRYPTO_MD5: 1611 case CRYPTO_MD5_HMAC: 1612 case CRYPTO_SHA1: 1613 case CRYPTO_SHA1_HMAC: 1614 break; 1615 default: 1616 return (false); 1617 } 1618 1619 if (csp->csp_auth_klen > CESA_MAX_MKEY_LEN) 1620 return (false); 1621 1622 return (true); 1623 } 1624 1625 static int 1626 cesa_probesession(device_t dev, const struct crypto_session_params *csp) 1627 { 1628 struct cesa_softc *sc; 1629 1630 sc = device_get_softc(dev); 1631 if (csp->csp_flags != 0) 1632 return (EINVAL); 1633 switch (csp->csp_mode) { 1634 case CSP_MODE_DIGEST: 1635 if (!cesa_auth_supported(sc, csp)) 1636 return (EINVAL); 1637 break; 1638 case CSP_MODE_CIPHER: 1639 if (!cesa_cipher_supported(csp)) 1640 return (EINVAL); 1641 break; 1642 case CSP_MODE_ETA: 1643 if (!cesa_auth_supported(sc, csp) || 1644 !cesa_cipher_supported(csp)) 1645 return (EINVAL); 1646 break; 1647 default: 1648 return (EINVAL); 1649 } 1650 return (CRYPTODEV_PROBE_HARDWARE); 1651 } 1652 1653 static int 1654 cesa_newsession(device_t dev, crypto_session_t cses, 1655 const struct crypto_session_params *csp) 1656 { 1657 struct cesa_session *cs; 1658 struct cesa_softc *sc; 1659 int error; 1660 1661 sc = device_get_softc(dev); 1662 error = 0; 1663 1664 /* Allocate session */ 1665 cs = crypto_get_driver_session(cses); 1666 1667 /* Prepare CESA configuration */ 1668 cs->cs_config = 0; 1669 cs->cs_ivlen = 1; 1670 cs->cs_mblen = 1; 1671 1672 switch (csp->csp_cipher_alg) { 1673 case CRYPTO_AES_CBC: 1674 cs->cs_config |= CESA_CSHD_AES | CESA_CSHD_CBC; 1675 cs->cs_ivlen = AES_BLOCK_LEN; 1676 break; 1677 case CRYPTO_DES_CBC: 1678 cs->cs_config |= CESA_CSHD_DES | CESA_CSHD_CBC; 1679 cs->cs_ivlen = DES_BLOCK_LEN; 1680 break; 1681 case CRYPTO_3DES_CBC: 1682 cs->cs_config |= CESA_CSHD_3DES | CESA_CSHD_3DES_EDE | 1683 CESA_CSHD_CBC; 1684 cs->cs_ivlen = DES3_BLOCK_LEN; 1685 break; 1686 } 1687 1688 switch (csp->csp_auth_alg) { 1689 case CRYPTO_MD5: 1690 cs->cs_mblen = 1; 1691 cs->cs_hlen = (csp->csp_auth_mlen == 0) ? MD5_HASH_LEN : 1692 csp->csp_auth_mlen; 1693 cs->cs_config |= CESA_CSHD_MD5; 1694 break; 1695 case CRYPTO_MD5_HMAC: 1696 cs->cs_mblen = MD5_BLOCK_LEN; 1697 cs->cs_hlen = (csp->csp_auth_mlen == 0) ? MD5_HASH_LEN : 1698 csp->csp_auth_mlen; 1699 cs->cs_config |= CESA_CSHD_MD5_HMAC; 1700 if (cs->cs_hlen == CESA_HMAC_TRUNC_LEN) 1701 cs->cs_config |= CESA_CSHD_96_BIT_HMAC; 1702 break; 1703 case CRYPTO_SHA1: 1704 cs->cs_mblen = 1; 1705 cs->cs_hlen = (csp->csp_auth_mlen == 0) ? SHA1_HASH_LEN : 1706 csp->csp_auth_mlen; 1707 cs->cs_config |= CESA_CSHD_SHA1; 1708 break; 1709 case CRYPTO_SHA1_HMAC: 1710 cs->cs_mblen = SHA1_BLOCK_LEN; 1711 cs->cs_hlen = (csp->csp_auth_mlen == 0) ? SHA1_HASH_LEN : 1712 csp->csp_auth_mlen; 1713 cs->cs_config |= CESA_CSHD_SHA1_HMAC; 1714 if (cs->cs_hlen == CESA_HMAC_TRUNC_LEN) 1715 cs->cs_config |= CESA_CSHD_96_BIT_HMAC; 1716 break; 1717 case CRYPTO_SHA2_256_HMAC: 1718 cs->cs_mblen = SHA2_256_BLOCK_LEN; 1719 cs->cs_hlen = (csp->csp_auth_mlen == 0) ? SHA2_256_HASH_LEN : 1720 csp->csp_auth_mlen; 1721 cs->cs_config |= CESA_CSHD_SHA2_256_HMAC; 1722 break; 1723 } 1724 1725 /* Save cipher key */ 1726 if (csp->csp_cipher_key != NULL) { 1727 memcpy(cs->cs_key, csp->csp_cipher_key, 1728 csp->csp_cipher_klen); 1729 if (csp->csp_cipher_alg == CRYPTO_AES_CBC) 1730 error = cesa_prep_aes_key(cs, csp); 1731 } 1732 1733 /* Save digest key */ 1734 if (csp->csp_auth_key != NULL) 1735 cesa_set_mkey(cs, csp->csp_auth_alg, csp->csp_auth_key, 1736 csp->csp_auth_klen); 1737 1738 return (error); 1739 } 1740 1741 static int 1742 cesa_process(device_t dev, struct cryptop *crp, int hint) 1743 { 1744 const struct crypto_session_params *csp; 1745 struct cesa_request *cr; 1746 struct cesa_session *cs; 1747 struct cesa_softc *sc; 1748 int error; 1749 1750 sc = device_get_softc(dev); 1751 error = 0; 1752 1753 cs = crypto_get_driver_session(crp->crp_session); 1754 csp = crypto_get_params(crp->crp_session); 1755 1756 /* Check and parse input */ 1757 if (crp->crp_ilen > CESA_MAX_REQUEST_SIZE) { 1758 crp->crp_etype = E2BIG; 1759 crypto_done(crp); 1760 return (0); 1761 } 1762 1763 /* 1764 * For requests with AAD, only requests where the AAD is 1765 * immediately adjacent to the payload are supported. 1766 */ 1767 if (crp->crp_aad_length != 0 && 1768 (crp->crp_aad_start + crp->crp_aad_length) != 1769 crp->crp_payload_start) { 1770 crp->crp_etype = EINVAL; 1771 crypto_done(crp); 1772 return (0); 1773 } 1774 1775 /* 1776 * Get request descriptor. Block driver if there is no free 1777 * descriptors in pool. 1778 */ 1779 cr = cesa_alloc_request(sc); 1780 if (!cr) { 1781 CESA_LOCK(sc, sc); 1782 sc->sc_blocked = CRYPTO_SYMQ; 1783 CESA_UNLOCK(sc, sc); 1784 return (ERESTART); 1785 } 1786 1787 /* Prepare request */ 1788 cr->cr_crp = crp; 1789 cr->cr_cs = cs; 1790 1791 CESA_LOCK(sc, sessions); 1792 cesa_sync_desc(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1793 1794 if (csp->csp_cipher_alg != 0) { 1795 if (crp->crp_flags & CRYPTO_F_IV_GENERATE) { 1796 arc4rand(cr->cr_csd->csd_iv, csp->csp_ivlen, 0); 1797 crypto_copyback(crp, crp->crp_iv_start, csp->csp_ivlen, 1798 cr->cr_csd->csd_iv); 1799 } else if (crp->crp_flags & CRYPTO_F_IV_SEPARATE) 1800 memcpy(cr->cr_csd->csd_iv, crp->crp_iv, csp->csp_ivlen); 1801 else 1802 crypto_copydata(crp, crp->crp_iv_start, csp->csp_ivlen, 1803 cr->cr_csd->csd_iv); 1804 } 1805 1806 if (crp->crp_cipher_key != NULL) { 1807 memcpy(cs->cs_key, crp->crp_cipher_key, 1808 csp->csp_cipher_klen); 1809 if (csp->csp_cipher_alg == CRYPTO_AES_CBC) 1810 error = cesa_prep_aes_key(cs, csp); 1811 } 1812 1813 if (!error && crp->crp_auth_key != NULL) 1814 cesa_set_mkey(cs, csp->csp_auth_alg, crp->crp_auth_key, 1815 csp->csp_auth_klen); 1816 1817 /* Convert request to chain of TDMA and SA descriptors */ 1818 if (!error) 1819 error = cesa_create_chain(sc, csp, cr); 1820 1821 cesa_sync_desc(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1822 CESA_UNLOCK(sc, sessions); 1823 1824 if (error) { 1825 cesa_free_request(sc, cr); 1826 crp->crp_etype = error; 1827 crypto_done(crp); 1828 return (0); 1829 } 1830 1831 bus_dmamap_sync(sc->sc_data_dtag, cr->cr_dmap, BUS_DMASYNC_PREREAD | 1832 BUS_DMASYNC_PREWRITE); 1833 1834 /* Enqueue request to execution */ 1835 cesa_enqueue_request(sc, cr); 1836 1837 /* Start execution, if we have no more requests in queue */ 1838 if ((hint & CRYPTO_HINT_MORE) == 0) 1839 cesa_execute(sc); 1840 1841 return (0); 1842 } 1843