1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (C) 2009-2011 Semihalf. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29 /* 30 * CESA SRAM Memory Map: 31 * 32 * +------------------------+ <= sc->sc_sram_base_va + CESA_SRAM_SIZE 33 * | | 34 * | DATA | 35 * | | 36 * +------------------------+ <= sc->sc_sram_base_va + CESA_DATA(0) 37 * | struct cesa_sa_data | 38 * +------------------------+ 39 * | struct cesa_sa_hdesc | 40 * +------------------------+ <= sc->sc_sram_base_va 41 */ 42 43 #include <sys/cdefs.h> 44 __FBSDID("$FreeBSD$"); 45 46 #include <sys/param.h> 47 #include <sys/systm.h> 48 #include <sys/bus.h> 49 #include <sys/endian.h> 50 #include <sys/kernel.h> 51 #include <sys/lock.h> 52 #include <sys/mbuf.h> 53 #include <sys/module.h> 54 #include <sys/mutex.h> 55 #include <sys/rman.h> 56 57 #include <machine/bus.h> 58 #include <machine/intr.h> 59 #include <machine/resource.h> 60 #include <machine/fdt.h> 61 62 #include <dev/fdt/simplebus.h> 63 #include <dev/fdt/fdt_common.h> 64 #include <dev/ofw/ofw_bus.h> 65 #include <dev/ofw/ofw_bus_subr.h> 66 67 #include <sys/md5.h> 68 #include <crypto/sha1.h> 69 #include <crypto/sha2/sha256.h> 70 #include <crypto/rijndael/rijndael.h> 71 #include <opencrypto/cryptodev.h> 72 #include "cryptodev_if.h" 73 74 #include <arm/mv/mvreg.h> 75 #include <arm/mv/mvvar.h> 76 #include "cesa.h" 77 78 static int cesa_probe(device_t); 79 static int cesa_attach(device_t); 80 static int cesa_attach_late(device_t); 81 static int cesa_detach(device_t); 82 static void cesa_intr(void *); 83 static int cesa_newsession(device_t, u_int32_t *, struct cryptoini *); 84 static int cesa_freesession(device_t, u_int64_t); 85 static int cesa_process(device_t, struct cryptop *, int); 86 87 static struct resource_spec cesa_res_spec[] = { 88 { SYS_RES_MEMORY, 0, RF_ACTIVE }, 89 { SYS_RES_MEMORY, 1, RF_ACTIVE }, 90 { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE }, 91 { -1, 0 } 92 }; 93 94 static device_method_t cesa_methods[] = { 95 /* Device interface */ 96 DEVMETHOD(device_probe, cesa_probe), 97 DEVMETHOD(device_attach, cesa_attach), 98 DEVMETHOD(device_detach, cesa_detach), 99 100 /* Crypto device methods */ 101 DEVMETHOD(cryptodev_newsession, cesa_newsession), 102 DEVMETHOD(cryptodev_freesession,cesa_freesession), 103 DEVMETHOD(cryptodev_process, cesa_process), 104 105 DEVMETHOD_END 106 }; 107 108 static driver_t cesa_driver = { 109 "cesa", 110 cesa_methods, 111 sizeof (struct cesa_softc) 112 }; 113 static devclass_t cesa_devclass; 114 115 DRIVER_MODULE(cesa, simplebus, cesa_driver, cesa_devclass, 0, 0); 116 MODULE_DEPEND(cesa, crypto, 1, 1, 1); 117 118 static void 119 cesa_dump_cshd(struct cesa_softc *sc, struct cesa_sa_hdesc *cshd) 120 { 121 #ifdef DEBUG 122 device_t dev; 123 124 dev = sc->sc_dev; 125 device_printf(dev, "CESA SA Hardware Descriptor:\n"); 126 device_printf(dev, "\t\tconfig: 0x%08X\n", cshd->cshd_config); 127 device_printf(dev, "\t\te_src: 0x%08X\n", cshd->cshd_enc_src); 128 device_printf(dev, "\t\te_dst: 0x%08X\n", cshd->cshd_enc_dst); 129 device_printf(dev, "\t\te_dlen: 0x%08X\n", cshd->cshd_enc_dlen); 130 device_printf(dev, "\t\te_key: 0x%08X\n", cshd->cshd_enc_key); 131 device_printf(dev, "\t\te_iv_1: 0x%08X\n", cshd->cshd_enc_iv); 132 device_printf(dev, "\t\te_iv_2: 0x%08X\n", cshd->cshd_enc_iv_buf); 133 device_printf(dev, "\t\tm_src: 0x%08X\n", cshd->cshd_mac_src); 134 device_printf(dev, "\t\tm_dst: 0x%08X\n", cshd->cshd_mac_dst); 135 device_printf(dev, "\t\tm_dlen: 0x%08X\n", cshd->cshd_mac_dlen); 136 device_printf(dev, "\t\tm_tlen: 0x%08X\n", cshd->cshd_mac_total_dlen); 137 device_printf(dev, "\t\tm_iv_i: 0x%08X\n", cshd->cshd_mac_iv_in); 138 device_printf(dev, "\t\tm_iv_o: 0x%08X\n", cshd->cshd_mac_iv_out); 139 #endif 140 } 141 142 static void 143 cesa_alloc_dma_mem_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) 144 { 145 struct cesa_dma_mem *cdm; 146 147 if (error) 148 return; 149 150 KASSERT(nseg == 1, ("Got wrong number of DMA segments, should be 1.")); 151 cdm = arg; 152 cdm->cdm_paddr = segs->ds_addr; 153 } 154 155 static int 156 cesa_alloc_dma_mem(struct cesa_softc *sc, struct cesa_dma_mem *cdm, 157 bus_size_t size) 158 { 159 int error; 160 161 KASSERT(cdm->cdm_vaddr == NULL, 162 ("%s(): DMA memory descriptor in use.", __func__)); 163 164 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), /* parent */ 165 PAGE_SIZE, 0, /* alignment, boundary */ 166 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 167 BUS_SPACE_MAXADDR, /* highaddr */ 168 NULL, NULL, /* filtfunc, filtfuncarg */ 169 size, 1, /* maxsize, nsegments */ 170 size, 0, /* maxsegsz, flags */ 171 NULL, NULL, /* lockfunc, lockfuncarg */ 172 &cdm->cdm_tag); /* dmat */ 173 if (error) { 174 device_printf(sc->sc_dev, "failed to allocate busdma tag, error" 175 " %i!\n", error); 176 177 goto err1; 178 } 179 180 error = bus_dmamem_alloc(cdm->cdm_tag, &cdm->cdm_vaddr, 181 BUS_DMA_NOWAIT | BUS_DMA_ZERO, &cdm->cdm_map); 182 if (error) { 183 device_printf(sc->sc_dev, "failed to allocate DMA safe" 184 " memory, error %i!\n", error); 185 186 goto err2; 187 } 188 189 error = bus_dmamap_load(cdm->cdm_tag, cdm->cdm_map, cdm->cdm_vaddr, 190 size, cesa_alloc_dma_mem_cb, cdm, BUS_DMA_NOWAIT); 191 if (error) { 192 device_printf(sc->sc_dev, "cannot get address of the DMA" 193 " memory, error %i\n", error); 194 195 goto err3; 196 } 197 198 return (0); 199 err3: 200 bus_dmamem_free(cdm->cdm_tag, cdm->cdm_vaddr, cdm->cdm_map); 201 err2: 202 bus_dma_tag_destroy(cdm->cdm_tag); 203 err1: 204 cdm->cdm_vaddr = NULL; 205 return (error); 206 } 207 208 static void 209 cesa_free_dma_mem(struct cesa_dma_mem *cdm) 210 { 211 212 bus_dmamap_unload(cdm->cdm_tag, cdm->cdm_map); 213 bus_dmamem_free(cdm->cdm_tag, cdm->cdm_vaddr, cdm->cdm_map); 214 bus_dma_tag_destroy(cdm->cdm_tag); 215 cdm->cdm_vaddr = NULL; 216 } 217 218 static void 219 cesa_sync_dma_mem(struct cesa_dma_mem *cdm, bus_dmasync_op_t op) 220 { 221 222 /* Sync only if dma memory is valid */ 223 if (cdm->cdm_vaddr != NULL) 224 bus_dmamap_sync(cdm->cdm_tag, cdm->cdm_map, op); 225 } 226 227 static void 228 cesa_sync_desc(struct cesa_softc *sc, bus_dmasync_op_t op) 229 { 230 231 cesa_sync_dma_mem(&sc->sc_tdesc_cdm, op); 232 cesa_sync_dma_mem(&sc->sc_sdesc_cdm, op); 233 cesa_sync_dma_mem(&sc->sc_requests_cdm, op); 234 } 235 236 static struct cesa_session * 237 cesa_alloc_session(struct cesa_softc *sc) 238 { 239 struct cesa_session *cs; 240 241 CESA_GENERIC_ALLOC_LOCKED(sc, cs, sessions); 242 243 return (cs); 244 } 245 246 static struct cesa_session * 247 cesa_get_session(struct cesa_softc *sc, uint32_t sid) 248 { 249 250 if (sid >= CESA_SESSIONS) 251 return (NULL); 252 253 return (&sc->sc_sessions[sid]); 254 } 255 256 static void 257 cesa_free_session(struct cesa_softc *sc, struct cesa_session *cs) 258 { 259 260 CESA_GENERIC_FREE_LOCKED(sc, cs, sessions); 261 } 262 263 static struct cesa_request * 264 cesa_alloc_request(struct cesa_softc *sc) 265 { 266 struct cesa_request *cr; 267 268 CESA_GENERIC_ALLOC_LOCKED(sc, cr, requests); 269 if (!cr) 270 return (NULL); 271 272 STAILQ_INIT(&cr->cr_tdesc); 273 STAILQ_INIT(&cr->cr_sdesc); 274 275 return (cr); 276 } 277 278 static void 279 cesa_free_request(struct cesa_softc *sc, struct cesa_request *cr) 280 { 281 282 /* Free TDMA descriptors assigned to this request */ 283 CESA_LOCK(sc, tdesc); 284 STAILQ_CONCAT(&sc->sc_free_tdesc, &cr->cr_tdesc); 285 CESA_UNLOCK(sc, tdesc); 286 287 /* Free SA descriptors assigned to this request */ 288 CESA_LOCK(sc, sdesc); 289 STAILQ_CONCAT(&sc->sc_free_sdesc, &cr->cr_sdesc); 290 CESA_UNLOCK(sc, sdesc); 291 292 /* Unload DMA memory associated with request */ 293 if (cr->cr_dmap_loaded) { 294 bus_dmamap_unload(sc->sc_data_dtag, cr->cr_dmap); 295 cr->cr_dmap_loaded = 0; 296 } 297 298 CESA_GENERIC_FREE_LOCKED(sc, cr, requests); 299 } 300 301 static void 302 cesa_enqueue_request(struct cesa_softc *sc, struct cesa_request *cr) 303 { 304 305 CESA_LOCK(sc, requests); 306 STAILQ_INSERT_TAIL(&sc->sc_ready_requests, cr, cr_stq); 307 CESA_UNLOCK(sc, requests); 308 } 309 310 static struct cesa_tdma_desc * 311 cesa_alloc_tdesc(struct cesa_softc *sc) 312 { 313 struct cesa_tdma_desc *ctd; 314 315 CESA_GENERIC_ALLOC_LOCKED(sc, ctd, tdesc); 316 317 if (!ctd) 318 device_printf(sc->sc_dev, "TDMA descriptors pool exhaused. " 319 "Consider increasing CESA_TDMA_DESCRIPTORS.\n"); 320 321 return (ctd); 322 } 323 324 static struct cesa_sa_desc * 325 cesa_alloc_sdesc(struct cesa_softc *sc, struct cesa_request *cr) 326 { 327 struct cesa_sa_desc *csd; 328 329 CESA_GENERIC_ALLOC_LOCKED(sc, csd, sdesc); 330 if (!csd) { 331 device_printf(sc->sc_dev, "SA descriptors pool exhaused. " 332 "Consider increasing CESA_SA_DESCRIPTORS.\n"); 333 return (NULL); 334 } 335 336 STAILQ_INSERT_TAIL(&cr->cr_sdesc, csd, csd_stq); 337 338 /* Fill-in SA descriptor with default values */ 339 csd->csd_cshd->cshd_enc_key = CESA_SA_DATA(csd_key); 340 csd->csd_cshd->cshd_enc_iv = CESA_SA_DATA(csd_iv); 341 csd->csd_cshd->cshd_enc_iv_buf = CESA_SA_DATA(csd_iv); 342 csd->csd_cshd->cshd_enc_src = 0; 343 csd->csd_cshd->cshd_enc_dst = 0; 344 csd->csd_cshd->cshd_enc_dlen = 0; 345 csd->csd_cshd->cshd_mac_dst = CESA_SA_DATA(csd_hash); 346 csd->csd_cshd->cshd_mac_iv_in = CESA_SA_DATA(csd_hiv_in); 347 csd->csd_cshd->cshd_mac_iv_out = CESA_SA_DATA(csd_hiv_out); 348 csd->csd_cshd->cshd_mac_src = 0; 349 csd->csd_cshd->cshd_mac_dlen = 0; 350 351 return (csd); 352 } 353 354 static struct cesa_tdma_desc * 355 cesa_tdma_copy(struct cesa_softc *sc, bus_addr_t dst, bus_addr_t src, 356 bus_size_t size) 357 { 358 struct cesa_tdma_desc *ctd; 359 360 ctd = cesa_alloc_tdesc(sc); 361 if (!ctd) 362 return (NULL); 363 364 ctd->ctd_cthd->cthd_dst = dst; 365 ctd->ctd_cthd->cthd_src = src; 366 ctd->ctd_cthd->cthd_byte_count = size; 367 368 /* Handle special control packet */ 369 if (size != 0) 370 ctd->ctd_cthd->cthd_flags = CESA_CTHD_OWNED; 371 else 372 ctd->ctd_cthd->cthd_flags = 0; 373 374 return (ctd); 375 } 376 377 static struct cesa_tdma_desc * 378 cesa_tdma_copyin_sa_data(struct cesa_softc *sc, struct cesa_request *cr) 379 { 380 381 return (cesa_tdma_copy(sc, sc->sc_sram_base_pa + 382 sizeof(struct cesa_sa_hdesc), cr->cr_csd_paddr, 383 sizeof(struct cesa_sa_data))); 384 } 385 386 static struct cesa_tdma_desc * 387 cesa_tdma_copyout_sa_data(struct cesa_softc *sc, struct cesa_request *cr) 388 { 389 390 return (cesa_tdma_copy(sc, cr->cr_csd_paddr, sc->sc_sram_base_pa + 391 sizeof(struct cesa_sa_hdesc), sizeof(struct cesa_sa_data))); 392 } 393 394 static struct cesa_tdma_desc * 395 cesa_tdma_copy_sdesc(struct cesa_softc *sc, struct cesa_sa_desc *csd) 396 { 397 398 return (cesa_tdma_copy(sc, sc->sc_sram_base_pa, csd->csd_cshd_paddr, 399 sizeof(struct cesa_sa_hdesc))); 400 } 401 402 static void 403 cesa_append_tdesc(struct cesa_request *cr, struct cesa_tdma_desc *ctd) 404 { 405 struct cesa_tdma_desc *ctd_prev; 406 407 if (!STAILQ_EMPTY(&cr->cr_tdesc)) { 408 ctd_prev = STAILQ_LAST(&cr->cr_tdesc, cesa_tdma_desc, ctd_stq); 409 ctd_prev->ctd_cthd->cthd_next = ctd->ctd_cthd_paddr; 410 } 411 412 ctd->ctd_cthd->cthd_next = 0; 413 STAILQ_INSERT_TAIL(&cr->cr_tdesc, ctd, ctd_stq); 414 } 415 416 static int 417 cesa_append_packet(struct cesa_softc *sc, struct cesa_request *cr, 418 struct cesa_packet *cp, struct cesa_sa_desc *csd) 419 { 420 struct cesa_tdma_desc *ctd, *tmp; 421 422 /* Copy SA descriptor for this packet */ 423 ctd = cesa_tdma_copy_sdesc(sc, csd); 424 if (!ctd) 425 return (ENOMEM); 426 427 cesa_append_tdesc(cr, ctd); 428 429 /* Copy data to be processed */ 430 STAILQ_FOREACH_SAFE(ctd, &cp->cp_copyin, ctd_stq, tmp) 431 cesa_append_tdesc(cr, ctd); 432 STAILQ_INIT(&cp->cp_copyin); 433 434 /* Insert control descriptor */ 435 ctd = cesa_tdma_copy(sc, 0, 0, 0); 436 if (!ctd) 437 return (ENOMEM); 438 439 cesa_append_tdesc(cr, ctd); 440 441 /* Copy back results */ 442 STAILQ_FOREACH_SAFE(ctd, &cp->cp_copyout, ctd_stq, tmp) 443 cesa_append_tdesc(cr, ctd); 444 STAILQ_INIT(&cp->cp_copyout); 445 446 return (0); 447 } 448 449 static int 450 cesa_set_mkey(struct cesa_session *cs, int alg, const uint8_t *mkey, int mklen) 451 { 452 uint8_t ipad[CESA_MAX_HMAC_BLOCK_LEN]; 453 uint8_t opad[CESA_MAX_HMAC_BLOCK_LEN]; 454 SHA1_CTX sha1ctx; 455 SHA256_CTX sha256ctx; 456 MD5_CTX md5ctx; 457 uint32_t *hout; 458 uint32_t *hin; 459 int i; 460 461 memset(ipad, HMAC_IPAD_VAL, CESA_MAX_HMAC_BLOCK_LEN); 462 memset(opad, HMAC_OPAD_VAL, CESA_MAX_HMAC_BLOCK_LEN); 463 for (i = 0; i < mklen; i++) { 464 ipad[i] ^= mkey[i]; 465 opad[i] ^= mkey[i]; 466 } 467 468 hin = (uint32_t *)cs->cs_hiv_in; 469 hout = (uint32_t *)cs->cs_hiv_out; 470 471 switch (alg) { 472 case CRYPTO_MD5_HMAC: 473 MD5Init(&md5ctx); 474 MD5Update(&md5ctx, ipad, MD5_HMAC_BLOCK_LEN); 475 memcpy(hin, md5ctx.state, sizeof(md5ctx.state)); 476 MD5Init(&md5ctx); 477 MD5Update(&md5ctx, opad, MD5_HMAC_BLOCK_LEN); 478 memcpy(hout, md5ctx.state, sizeof(md5ctx.state)); 479 break; 480 case CRYPTO_SHA1_HMAC: 481 SHA1Init(&sha1ctx); 482 SHA1Update(&sha1ctx, ipad, SHA1_HMAC_BLOCK_LEN); 483 memcpy(hin, sha1ctx.h.b32, sizeof(sha1ctx.h.b32)); 484 SHA1Init(&sha1ctx); 485 SHA1Update(&sha1ctx, opad, SHA1_HMAC_BLOCK_LEN); 486 memcpy(hout, sha1ctx.h.b32, sizeof(sha1ctx.h.b32)); 487 break; 488 case CRYPTO_SHA2_256_HMAC: 489 SHA256_Init(&sha256ctx); 490 SHA256_Update(&sha256ctx, ipad, SHA2_256_HMAC_BLOCK_LEN); 491 memcpy(hin, sha256ctx.state, sizeof(sha256ctx.state)); 492 SHA256_Init(&sha256ctx); 493 SHA256_Update(&sha256ctx, opad, SHA2_256_HMAC_BLOCK_LEN); 494 memcpy(hout, sha256ctx.state, sizeof(sha256ctx.state)); 495 break; 496 default: 497 return (EINVAL); 498 } 499 500 for (i = 0; i < CESA_MAX_HASH_LEN / sizeof(uint32_t); i++) { 501 hin[i] = htobe32(hin[i]); 502 hout[i] = htobe32(hout[i]); 503 } 504 505 return (0); 506 } 507 508 static int 509 cesa_prep_aes_key(struct cesa_session *cs) 510 { 511 uint32_t ek[4 * (RIJNDAEL_MAXNR + 1)]; 512 uint32_t *dkey; 513 int i; 514 515 rijndaelKeySetupEnc(ek, cs->cs_key, cs->cs_klen * 8); 516 517 cs->cs_config &= ~CESA_CSH_AES_KLEN_MASK; 518 dkey = (uint32_t *)cs->cs_aes_dkey; 519 520 switch (cs->cs_klen) { 521 case 16: 522 cs->cs_config |= CESA_CSH_AES_KLEN_128; 523 for (i = 0; i < 4; i++) 524 *dkey++ = htobe32(ek[4 * 10 + i]); 525 break; 526 case 24: 527 cs->cs_config |= CESA_CSH_AES_KLEN_192; 528 for (i = 0; i < 4; i++) 529 *dkey++ = htobe32(ek[4 * 12 + i]); 530 for (i = 0; i < 2; i++) 531 *dkey++ = htobe32(ek[4 * 11 + 2 + i]); 532 break; 533 case 32: 534 cs->cs_config |= CESA_CSH_AES_KLEN_256; 535 for (i = 0; i < 4; i++) 536 *dkey++ = htobe32(ek[4 * 14 + i]); 537 for (i = 0; i < 4; i++) 538 *dkey++ = htobe32(ek[4 * 13 + i]); 539 break; 540 default: 541 return (EINVAL); 542 } 543 544 return (0); 545 } 546 547 static int 548 cesa_is_hash(int alg) 549 { 550 551 switch (alg) { 552 case CRYPTO_MD5: 553 case CRYPTO_MD5_HMAC: 554 case CRYPTO_SHA1: 555 case CRYPTO_SHA1_HMAC: 556 case CRYPTO_SHA2_256_HMAC: 557 return (1); 558 default: 559 return (0); 560 } 561 } 562 563 static void 564 cesa_start_packet(struct cesa_packet *cp, unsigned int size) 565 { 566 567 cp->cp_size = size; 568 cp->cp_offset = 0; 569 STAILQ_INIT(&cp->cp_copyin); 570 STAILQ_INIT(&cp->cp_copyout); 571 } 572 573 static int 574 cesa_fill_packet(struct cesa_softc *sc, struct cesa_packet *cp, 575 bus_dma_segment_t *seg) 576 { 577 struct cesa_tdma_desc *ctd; 578 unsigned int bsize; 579 580 /* Calculate size of block copy */ 581 bsize = MIN(seg->ds_len, cp->cp_size - cp->cp_offset); 582 583 if (bsize > 0) { 584 ctd = cesa_tdma_copy(sc, sc->sc_sram_base_pa + 585 CESA_DATA(cp->cp_offset), seg->ds_addr, bsize); 586 if (!ctd) 587 return (-ENOMEM); 588 589 STAILQ_INSERT_TAIL(&cp->cp_copyin, ctd, ctd_stq); 590 591 ctd = cesa_tdma_copy(sc, seg->ds_addr, sc->sc_sram_base_pa + 592 CESA_DATA(cp->cp_offset), bsize); 593 if (!ctd) 594 return (-ENOMEM); 595 596 STAILQ_INSERT_TAIL(&cp->cp_copyout, ctd, ctd_stq); 597 598 seg->ds_len -= bsize; 599 seg->ds_addr += bsize; 600 cp->cp_offset += bsize; 601 } 602 603 return (bsize); 604 } 605 606 static void 607 cesa_create_chain_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) 608 { 609 unsigned int mpsize, fragmented; 610 unsigned int mlen, mskip, tmlen; 611 struct cesa_chain_info *cci; 612 unsigned int elen, eskip; 613 unsigned int skip, len; 614 struct cesa_sa_desc *csd; 615 struct cesa_request *cr; 616 struct cesa_softc *sc; 617 struct cesa_packet cp; 618 bus_dma_segment_t seg; 619 uint32_t config; 620 int size; 621 622 cci = arg; 623 sc = cci->cci_sc; 624 cr = cci->cci_cr; 625 626 if (error) { 627 cci->cci_error = error; 628 return; 629 } 630 631 elen = cci->cci_enc ? cci->cci_enc->crd_len : 0; 632 eskip = cci->cci_enc ? cci->cci_enc->crd_skip : 0; 633 mlen = cci->cci_mac ? cci->cci_mac->crd_len : 0; 634 mskip = cci->cci_mac ? cci->cci_mac->crd_skip : 0; 635 636 if (elen && mlen && 637 ((eskip > mskip && ((eskip - mskip) & (cr->cr_cs->cs_ivlen - 1))) || 638 (mskip > eskip && ((mskip - eskip) & (cr->cr_cs->cs_mblen - 1))) || 639 (eskip > (mskip + mlen)) || (mskip > (eskip + elen)))) { 640 /* 641 * Data alignment in the request does not meet CESA requiremnts 642 * for combined encryption/decryption and hashing. We have to 643 * split the request to separate operations and process them 644 * one by one. 645 */ 646 config = cci->cci_config; 647 if ((config & CESA_CSHD_OP_MASK) == CESA_CSHD_MAC_AND_ENC) { 648 config &= ~CESA_CSHD_OP_MASK; 649 650 cci->cci_config = config | CESA_CSHD_MAC; 651 cci->cci_enc = NULL; 652 cci->cci_mac = cr->cr_mac; 653 cesa_create_chain_cb(cci, segs, nseg, cci->cci_error); 654 655 cci->cci_config = config | CESA_CSHD_ENC; 656 cci->cci_enc = cr->cr_enc; 657 cci->cci_mac = NULL; 658 cesa_create_chain_cb(cci, segs, nseg, cci->cci_error); 659 } else { 660 config &= ~CESA_CSHD_OP_MASK; 661 662 cci->cci_config = config | CESA_CSHD_ENC; 663 cci->cci_enc = cr->cr_enc; 664 cci->cci_mac = NULL; 665 cesa_create_chain_cb(cci, segs, nseg, cci->cci_error); 666 667 cci->cci_config = config | CESA_CSHD_MAC; 668 cci->cci_enc = NULL; 669 cci->cci_mac = cr->cr_mac; 670 cesa_create_chain_cb(cci, segs, nseg, cci->cci_error); 671 } 672 673 return; 674 } 675 676 tmlen = mlen; 677 fragmented = 0; 678 mpsize = CESA_MAX_PACKET_SIZE; 679 mpsize &= ~((cr->cr_cs->cs_ivlen - 1) | (cr->cr_cs->cs_mblen - 1)); 680 681 if (elen && mlen) { 682 skip = MIN(eskip, mskip); 683 len = MAX(elen + eskip, mlen + mskip) - skip; 684 } else if (elen) { 685 skip = eskip; 686 len = elen; 687 } else { 688 skip = mskip; 689 len = mlen; 690 } 691 692 /* Start first packet in chain */ 693 cesa_start_packet(&cp, MIN(mpsize, len)); 694 695 while (nseg-- && len > 0) { 696 seg = *(segs++); 697 698 /* 699 * Skip data in buffer on which neither ENC nor MAC operation 700 * is requested. 701 */ 702 if (skip > 0) { 703 size = MIN(skip, seg.ds_len); 704 skip -= size; 705 706 seg.ds_addr += size; 707 seg.ds_len -= size; 708 709 if (eskip > 0) 710 eskip -= size; 711 712 if (mskip > 0) 713 mskip -= size; 714 715 if (seg.ds_len == 0) 716 continue; 717 } 718 719 while (1) { 720 /* 721 * Fill in current packet with data. Break if there is 722 * no more data in current DMA segment or an error 723 * occurred. 724 */ 725 size = cesa_fill_packet(sc, &cp, &seg); 726 if (size <= 0) { 727 error = -size; 728 break; 729 } 730 731 len -= size; 732 733 /* If packet is full, append it to the chain */ 734 if (cp.cp_size == cp.cp_offset) { 735 csd = cesa_alloc_sdesc(sc, cr); 736 if (!csd) { 737 error = ENOMEM; 738 break; 739 } 740 741 /* Create SA descriptor for this packet */ 742 csd->csd_cshd->cshd_config = cci->cci_config; 743 csd->csd_cshd->cshd_mac_total_dlen = tmlen; 744 745 /* 746 * Enable fragmentation if request will not fit 747 * into one packet. 748 */ 749 if (len > 0) { 750 if (!fragmented) { 751 fragmented = 1; 752 csd->csd_cshd->cshd_config |= 753 CESA_CSHD_FRAG_FIRST; 754 } else 755 csd->csd_cshd->cshd_config |= 756 CESA_CSHD_FRAG_MIDDLE; 757 } else if (fragmented) 758 csd->csd_cshd->cshd_config |= 759 CESA_CSHD_FRAG_LAST; 760 761 if (eskip < cp.cp_size && elen > 0) { 762 csd->csd_cshd->cshd_enc_src = 763 CESA_DATA(eskip); 764 csd->csd_cshd->cshd_enc_dst = 765 CESA_DATA(eskip); 766 csd->csd_cshd->cshd_enc_dlen = 767 MIN(elen, cp.cp_size - eskip); 768 } 769 770 if (mskip < cp.cp_size && mlen > 0) { 771 csd->csd_cshd->cshd_mac_src = 772 CESA_DATA(mskip); 773 csd->csd_cshd->cshd_mac_dlen = 774 MIN(mlen, cp.cp_size - mskip); 775 } 776 777 elen -= csd->csd_cshd->cshd_enc_dlen; 778 eskip -= MIN(eskip, cp.cp_size); 779 mlen -= csd->csd_cshd->cshd_mac_dlen; 780 mskip -= MIN(mskip, cp.cp_size); 781 782 cesa_dump_cshd(sc, csd->csd_cshd); 783 784 /* Append packet to the request */ 785 error = cesa_append_packet(sc, cr, &cp, csd); 786 if (error) 787 break; 788 789 /* Start a new packet, as current is full */ 790 cesa_start_packet(&cp, MIN(mpsize, len)); 791 } 792 } 793 794 if (error) 795 break; 796 } 797 798 if (error) { 799 /* 800 * Move all allocated resources to the request. They will be 801 * freed later. 802 */ 803 STAILQ_CONCAT(&cr->cr_tdesc, &cp.cp_copyin); 804 STAILQ_CONCAT(&cr->cr_tdesc, &cp.cp_copyout); 805 cci->cci_error = error; 806 } 807 } 808 809 static void 810 cesa_create_chain_cb2(void *arg, bus_dma_segment_t *segs, int nseg, 811 bus_size_t size, int error) 812 { 813 814 cesa_create_chain_cb(arg, segs, nseg, error); 815 } 816 817 static int 818 cesa_create_chain(struct cesa_softc *sc, struct cesa_request *cr) 819 { 820 struct cesa_chain_info cci; 821 struct cesa_tdma_desc *ctd; 822 uint32_t config; 823 int error; 824 825 error = 0; 826 CESA_LOCK_ASSERT(sc, sessions); 827 828 /* Create request metadata */ 829 if (cr->cr_enc) { 830 if (cr->cr_enc->crd_alg == CRYPTO_AES_CBC && 831 (cr->cr_enc->crd_flags & CRD_F_ENCRYPT) == 0) 832 memcpy(cr->cr_csd->csd_key, cr->cr_cs->cs_aes_dkey, 833 cr->cr_cs->cs_klen); 834 else 835 memcpy(cr->cr_csd->csd_key, cr->cr_cs->cs_key, 836 cr->cr_cs->cs_klen); 837 } 838 839 if (cr->cr_mac) { 840 memcpy(cr->cr_csd->csd_hiv_in, cr->cr_cs->cs_hiv_in, 841 CESA_MAX_HASH_LEN); 842 memcpy(cr->cr_csd->csd_hiv_out, cr->cr_cs->cs_hiv_out, 843 CESA_MAX_HASH_LEN); 844 } 845 846 ctd = cesa_tdma_copyin_sa_data(sc, cr); 847 if (!ctd) 848 return (ENOMEM); 849 850 cesa_append_tdesc(cr, ctd); 851 852 /* Prepare SA configuration */ 853 config = cr->cr_cs->cs_config; 854 855 if (cr->cr_enc && (cr->cr_enc->crd_flags & CRD_F_ENCRYPT) == 0) 856 config |= CESA_CSHD_DECRYPT; 857 if (cr->cr_enc && !cr->cr_mac) 858 config |= CESA_CSHD_ENC; 859 if (!cr->cr_enc && cr->cr_mac) 860 config |= CESA_CSHD_MAC; 861 if (cr->cr_enc && cr->cr_mac) 862 config |= (config & CESA_CSHD_DECRYPT) ? CESA_CSHD_MAC_AND_ENC : 863 CESA_CSHD_ENC_AND_MAC; 864 865 /* Create data packets */ 866 cci.cci_sc = sc; 867 cci.cci_cr = cr; 868 cci.cci_enc = cr->cr_enc; 869 cci.cci_mac = cr->cr_mac; 870 cci.cci_config = config; 871 cci.cci_error = 0; 872 873 if (cr->cr_crp->crp_flags & CRYPTO_F_IOV) 874 error = bus_dmamap_load_uio(sc->sc_data_dtag, 875 cr->cr_dmap, (struct uio *)cr->cr_crp->crp_buf, 876 cesa_create_chain_cb2, &cci, BUS_DMA_NOWAIT); 877 else if (cr->cr_crp->crp_flags & CRYPTO_F_IMBUF) 878 error = bus_dmamap_load_mbuf(sc->sc_data_dtag, 879 cr->cr_dmap, (struct mbuf *)cr->cr_crp->crp_buf, 880 cesa_create_chain_cb2, &cci, BUS_DMA_NOWAIT); 881 else 882 error = bus_dmamap_load(sc->sc_data_dtag, 883 cr->cr_dmap, cr->cr_crp->crp_buf, 884 cr->cr_crp->crp_ilen, cesa_create_chain_cb, &cci, 885 BUS_DMA_NOWAIT); 886 887 if (!error) 888 cr->cr_dmap_loaded = 1; 889 890 if (cci.cci_error) 891 error = cci.cci_error; 892 893 if (error) 894 return (error); 895 896 /* Read back request metadata */ 897 ctd = cesa_tdma_copyout_sa_data(sc, cr); 898 if (!ctd) 899 return (ENOMEM); 900 901 cesa_append_tdesc(cr, ctd); 902 903 return (0); 904 } 905 906 static void 907 cesa_execute(struct cesa_softc *sc) 908 { 909 struct cesa_tdma_desc *prev_ctd, *ctd; 910 struct cesa_request *prev_cr, *cr; 911 912 CESA_LOCK(sc, requests); 913 914 /* 915 * If ready list is empty, there is nothing to execute. If queued list 916 * is not empty, the hardware is busy and we cannot start another 917 * execution. 918 */ 919 if (STAILQ_EMPTY(&sc->sc_ready_requests) || 920 !STAILQ_EMPTY(&sc->sc_queued_requests)) { 921 CESA_UNLOCK(sc, requests); 922 return; 923 } 924 925 /* Move all ready requests to queued list */ 926 STAILQ_CONCAT(&sc->sc_queued_requests, &sc->sc_ready_requests); 927 STAILQ_INIT(&sc->sc_ready_requests); 928 929 /* Create one execution chain from all requests on the list */ 930 if (STAILQ_FIRST(&sc->sc_queued_requests) != 931 STAILQ_LAST(&sc->sc_queued_requests, cesa_request, cr_stq)) { 932 prev_cr = NULL; 933 cesa_sync_dma_mem(&sc->sc_tdesc_cdm, BUS_DMASYNC_POSTREAD | 934 BUS_DMASYNC_POSTWRITE); 935 936 STAILQ_FOREACH(cr, &sc->sc_queued_requests, cr_stq) { 937 if (prev_cr) { 938 ctd = STAILQ_FIRST(&cr->cr_tdesc); 939 prev_ctd = STAILQ_LAST(&prev_cr->cr_tdesc, 940 cesa_tdma_desc, ctd_stq); 941 942 prev_ctd->ctd_cthd->cthd_next = 943 ctd->ctd_cthd_paddr; 944 } 945 946 prev_cr = cr; 947 } 948 949 cesa_sync_dma_mem(&sc->sc_tdesc_cdm, BUS_DMASYNC_PREREAD | 950 BUS_DMASYNC_PREWRITE); 951 } 952 953 /* Start chain execution in hardware */ 954 cr = STAILQ_FIRST(&sc->sc_queued_requests); 955 ctd = STAILQ_FIRST(&cr->cr_tdesc); 956 957 CESA_TDMA_WRITE(sc, CESA_TDMA_ND, ctd->ctd_cthd_paddr); 958 959 if (sc->sc_soc_id == MV_DEV_88F6828 || 960 sc->sc_soc_id == MV_DEV_88F6820 || 961 sc->sc_soc_id == MV_DEV_88F6810) 962 CESA_REG_WRITE(sc, CESA_SA_CMD, CESA_SA_CMD_ACTVATE | CESA_SA_CMD_SHA2); 963 else 964 CESA_REG_WRITE(sc, CESA_SA_CMD, CESA_SA_CMD_ACTVATE); 965 966 CESA_UNLOCK(sc, requests); 967 } 968 969 static int 970 cesa_setup_sram(struct cesa_softc *sc) 971 { 972 phandle_t sram_node; 973 ihandle_t sram_ihandle; 974 pcell_t sram_handle, sram_reg[2]; 975 void *sram_va; 976 int rv; 977 978 rv = OF_getencprop(ofw_bus_get_node(sc->sc_dev), "sram-handle", 979 (void *)&sram_handle, sizeof(sram_handle)); 980 if (rv <= 0) 981 return (rv); 982 983 sram_ihandle = (ihandle_t)sram_handle; 984 sram_node = OF_instance_to_package(sram_ihandle); 985 986 rv = OF_getencprop(sram_node, "reg", (void *)sram_reg, sizeof(sram_reg)); 987 if (rv <= 0) 988 return (rv); 989 990 sc->sc_sram_base_pa = sram_reg[0]; 991 /* Store SRAM size to be able to unmap in detach() */ 992 sc->sc_sram_size = sram_reg[1]; 993 994 if (sc->sc_soc_id != MV_DEV_88F6828 && 995 sc->sc_soc_id != MV_DEV_88F6820 && 996 sc->sc_soc_id != MV_DEV_88F6810) 997 return (0); 998 999 /* SRAM memory was not mapped in platform_sram_devmap(), map it now */ 1000 sram_va = pmap_mapdev(sc->sc_sram_base_pa, sc->sc_sram_size); 1001 if (sram_va == NULL) 1002 return (ENOMEM); 1003 sc->sc_sram_base_va = (vm_offset_t)sram_va; 1004 1005 return (0); 1006 } 1007 1008 /* 1009 * Function: device_from_node 1010 * This function returns appropriate device_t to phandle_t 1011 * Parameters: 1012 * root - device where you want to start search 1013 * if you provide NULL here, function will take 1014 * "root0" device as root. 1015 * node - we are checking every device_t to be 1016 * appropriate with this. 1017 */ 1018 static device_t 1019 device_from_node(device_t root, phandle_t node) 1020 { 1021 device_t *children, retval; 1022 int nkid, i; 1023 1024 /* Nothing matches no node */ 1025 if (node == -1) 1026 return (NULL); 1027 1028 if (root == NULL) 1029 /* Get root of device tree */ 1030 if ((root = device_lookup_by_name("root0")) == NULL) 1031 return (NULL); 1032 1033 if (device_get_children(root, &children, &nkid) != 0) 1034 return (NULL); 1035 1036 retval = NULL; 1037 for (i = 0; i < nkid; i++) { 1038 /* Check if device and node matches */ 1039 if (OFW_BUS_GET_NODE(root, children[i]) == node) { 1040 retval = children[i]; 1041 break; 1042 } 1043 /* or go deeper */ 1044 if ((retval = device_from_node(children[i], node)) != NULL) 1045 break; 1046 } 1047 free(children, M_TEMP); 1048 1049 return (retval); 1050 } 1051 1052 static int 1053 cesa_setup_sram_armada(struct cesa_softc *sc) 1054 { 1055 phandle_t sram_node; 1056 ihandle_t sram_ihandle; 1057 pcell_t sram_handle[2]; 1058 void *sram_va; 1059 int rv, j; 1060 struct resource_list rl; 1061 struct resource_list_entry *rle; 1062 struct simplebus_softc *ssc; 1063 device_t sdev; 1064 1065 /* Get refs to SRAMS from CESA node */ 1066 rv = OF_getencprop(ofw_bus_get_node(sc->sc_dev), "marvell,crypto-srams", 1067 (void *)sram_handle, sizeof(sram_handle)); 1068 if (rv <= 0) 1069 return (rv); 1070 1071 if (sc->sc_cesa_engine_id >= 2) 1072 return (ENXIO); 1073 1074 /* Get SRAM node on the basis of sc_cesa_engine_id */ 1075 sram_ihandle = (ihandle_t)sram_handle[sc->sc_cesa_engine_id]; 1076 sram_node = OF_instance_to_package(sram_ihandle); 1077 1078 /* Get device_t of simplebus (sram_node parent) */ 1079 sdev = device_from_node(NULL, OF_parent(sram_node)); 1080 if (!sdev) 1081 return (ENXIO); 1082 1083 ssc = device_get_softc(sdev); 1084 1085 resource_list_init(&rl); 1086 /* Parse reg property to resource list */ 1087 ofw_bus_reg_to_rl(sdev, sram_node, ssc->acells, 1088 ssc->scells, &rl); 1089 1090 /* We expect only one resource */ 1091 rle = resource_list_find(&rl, SYS_RES_MEMORY, 0); 1092 if (rle == NULL) 1093 return (ENXIO); 1094 1095 /* Remap through ranges property */ 1096 for (j = 0; j < ssc->nranges; j++) { 1097 if (rle->start >= ssc->ranges[j].bus && 1098 rle->end < ssc->ranges[j].bus + ssc->ranges[j].size) { 1099 rle->start -= ssc->ranges[j].bus; 1100 rle->start += ssc->ranges[j].host; 1101 rle->end -= ssc->ranges[j].bus; 1102 rle->end += ssc->ranges[j].host; 1103 } 1104 } 1105 1106 sc->sc_sram_base_pa = rle->start; 1107 sc->sc_sram_size = rle->count; 1108 1109 /* SRAM memory was not mapped in platform_sram_devmap(), map it now */ 1110 sram_va = pmap_mapdev(sc->sc_sram_base_pa, sc->sc_sram_size); 1111 if (sram_va == NULL) 1112 return (ENOMEM); 1113 sc->sc_sram_base_va = (vm_offset_t)sram_va; 1114 1115 return (0); 1116 } 1117 1118 struct ofw_compat_data cesa_devices[] = { 1119 { "mrvl,cesa", (uintptr_t)true }, 1120 { "marvell,armada-38x-crypto", (uintptr_t)true }, 1121 { NULL, 0 } 1122 }; 1123 1124 static int 1125 cesa_probe(device_t dev) 1126 { 1127 1128 if (!ofw_bus_status_okay(dev)) 1129 return (ENXIO); 1130 1131 if (!ofw_bus_search_compatible(dev, cesa_devices)->ocd_data) 1132 return (ENXIO); 1133 1134 device_set_desc(dev, "Marvell Cryptographic Engine and Security " 1135 "Accelerator"); 1136 1137 return (BUS_PROBE_DEFAULT); 1138 } 1139 1140 static int 1141 cesa_attach(device_t dev) 1142 { 1143 static int engine_idx = 0; 1144 struct simplebus_devinfo *ndi; 1145 struct resource_list *rl; 1146 struct cesa_softc *sc; 1147 1148 if (!ofw_bus_is_compatible(dev, "marvell,armada-38x-crypto")) 1149 return (cesa_attach_late(dev)); 1150 1151 /* 1152 * Get simplebus_devinfo which contains 1153 * resource list filled with adresses and 1154 * interrupts read form FDT. 1155 * Let's correct it by splitting resources 1156 * for each engine. 1157 */ 1158 if ((ndi = device_get_ivars(dev)) == NULL) 1159 return (ENXIO); 1160 1161 rl = &ndi->rl; 1162 1163 switch (engine_idx) { 1164 case 0: 1165 /* Update regs values */ 1166 resource_list_add(rl, SYS_RES_MEMORY, 0, CESA0_TDMA_ADDR, 1167 CESA0_TDMA_ADDR + CESA_TDMA_SIZE - 1, CESA_TDMA_SIZE); 1168 resource_list_add(rl, SYS_RES_MEMORY, 1, CESA0_CESA_ADDR, 1169 CESA0_CESA_ADDR + CESA_CESA_SIZE - 1, CESA_CESA_SIZE); 1170 1171 /* Remove unused interrupt */ 1172 resource_list_delete(rl, SYS_RES_IRQ, 1); 1173 break; 1174 1175 case 1: 1176 /* Update regs values */ 1177 resource_list_add(rl, SYS_RES_MEMORY, 0, CESA1_TDMA_ADDR, 1178 CESA1_TDMA_ADDR + CESA_TDMA_SIZE - 1, CESA_TDMA_SIZE); 1179 resource_list_add(rl, SYS_RES_MEMORY, 1, CESA1_CESA_ADDR, 1180 CESA1_CESA_ADDR + CESA_CESA_SIZE - 1, CESA_CESA_SIZE); 1181 1182 /* Remove unused interrupt */ 1183 resource_list_delete(rl, SYS_RES_IRQ, 0); 1184 resource_list_find(rl, SYS_RES_IRQ, 1)->rid = 0; 1185 break; 1186 1187 default: 1188 device_printf(dev, "Bad cesa engine_idx\n"); 1189 return (ENXIO); 1190 } 1191 1192 sc = device_get_softc(dev); 1193 sc->sc_cesa_engine_id = engine_idx; 1194 1195 /* 1196 * Call simplebus_add_device only once. 1197 * It will create second cesa driver instance 1198 * with the same FDT node as first instance. 1199 * When second driver reach this function, 1200 * it will be configured to use second cesa engine 1201 */ 1202 if (engine_idx == 0) 1203 simplebus_add_device(device_get_parent(dev), ofw_bus_get_node(dev), 1204 0, "cesa", 1, NULL); 1205 1206 engine_idx++; 1207 1208 return (cesa_attach_late(dev)); 1209 } 1210 1211 static int 1212 cesa_attach_late(device_t dev) 1213 { 1214 struct cesa_softc *sc; 1215 uint32_t d, r, val; 1216 int error; 1217 int i; 1218 1219 sc = device_get_softc(dev); 1220 sc->sc_blocked = 0; 1221 sc->sc_error = 0; 1222 sc->sc_dev = dev; 1223 1224 soc_id(&d, &r); 1225 1226 switch (d) { 1227 case MV_DEV_88F6281: 1228 case MV_DEV_88F6282: 1229 /* Check if CESA peripheral device has power turned on */ 1230 if (soc_power_ctrl_get(CPU_PM_CTRL_CRYPTO) == 1231 CPU_PM_CTRL_CRYPTO) { 1232 device_printf(dev, "not powered on\n"); 1233 return (ENXIO); 1234 } 1235 sc->sc_tperr = 0; 1236 break; 1237 case MV_DEV_88F6828: 1238 case MV_DEV_88F6820: 1239 case MV_DEV_88F6810: 1240 sc->sc_tperr = 0; 1241 break; 1242 case MV_DEV_MV78100: 1243 case MV_DEV_MV78100_Z0: 1244 /* Check if CESA peripheral device has power turned on */ 1245 if (soc_power_ctrl_get(CPU_PM_CTRL_CRYPTO) != 1246 CPU_PM_CTRL_CRYPTO) { 1247 device_printf(dev, "not powered on\n"); 1248 return (ENXIO); 1249 } 1250 sc->sc_tperr = CESA_ICR_TPERR; 1251 break; 1252 default: 1253 return (ENXIO); 1254 } 1255 1256 sc->sc_soc_id = d; 1257 1258 /* Initialize mutexes */ 1259 mtx_init(&sc->sc_sc_lock, device_get_nameunit(dev), 1260 "CESA Shared Data", MTX_DEF); 1261 mtx_init(&sc->sc_tdesc_lock, device_get_nameunit(dev), 1262 "CESA TDMA Descriptors Pool", MTX_DEF); 1263 mtx_init(&sc->sc_sdesc_lock, device_get_nameunit(dev), 1264 "CESA SA Descriptors Pool", MTX_DEF); 1265 mtx_init(&sc->sc_requests_lock, device_get_nameunit(dev), 1266 "CESA Requests Pool", MTX_DEF); 1267 mtx_init(&sc->sc_sessions_lock, device_get_nameunit(dev), 1268 "CESA Sessions Pool", MTX_DEF); 1269 1270 /* Allocate I/O and IRQ resources */ 1271 error = bus_alloc_resources(dev, cesa_res_spec, sc->sc_res); 1272 if (error) { 1273 device_printf(dev, "could not allocate resources\n"); 1274 goto err0; 1275 } 1276 1277 /* Acquire SRAM base address */ 1278 if (!ofw_bus_is_compatible(dev, "marvell,armada-38x-crypto")) 1279 error = cesa_setup_sram(sc); 1280 else 1281 error = cesa_setup_sram_armada(sc); 1282 1283 if (error) { 1284 device_printf(dev, "could not setup SRAM\n"); 1285 goto err1; 1286 } 1287 1288 /* Setup interrupt handler */ 1289 error = bus_setup_intr(dev, sc->sc_res[RES_CESA_IRQ], INTR_TYPE_NET | 1290 INTR_MPSAFE, NULL, cesa_intr, sc, &(sc->sc_icookie)); 1291 if (error) { 1292 device_printf(dev, "could not setup engine completion irq\n"); 1293 goto err2; 1294 } 1295 1296 /* Create DMA tag for processed data */ 1297 error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */ 1298 1, 0, /* alignment, boundary */ 1299 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 1300 BUS_SPACE_MAXADDR, /* highaddr */ 1301 NULL, NULL, /* filtfunc, filtfuncarg */ 1302 CESA_MAX_REQUEST_SIZE, /* maxsize */ 1303 CESA_MAX_FRAGMENTS, /* nsegments */ 1304 CESA_MAX_REQUEST_SIZE, 0, /* maxsegsz, flags */ 1305 NULL, NULL, /* lockfunc, lockfuncarg */ 1306 &sc->sc_data_dtag); /* dmat */ 1307 if (error) 1308 goto err3; 1309 1310 /* Initialize data structures: TDMA Descriptors Pool */ 1311 error = cesa_alloc_dma_mem(sc, &sc->sc_tdesc_cdm, 1312 CESA_TDMA_DESCRIPTORS * sizeof(struct cesa_tdma_hdesc)); 1313 if (error) 1314 goto err4; 1315 1316 STAILQ_INIT(&sc->sc_free_tdesc); 1317 for (i = 0; i < CESA_TDMA_DESCRIPTORS; i++) { 1318 sc->sc_tdesc[i].ctd_cthd = 1319 (struct cesa_tdma_hdesc *)(sc->sc_tdesc_cdm.cdm_vaddr) + i; 1320 sc->sc_tdesc[i].ctd_cthd_paddr = sc->sc_tdesc_cdm.cdm_paddr + 1321 (i * sizeof(struct cesa_tdma_hdesc)); 1322 STAILQ_INSERT_TAIL(&sc->sc_free_tdesc, &sc->sc_tdesc[i], 1323 ctd_stq); 1324 } 1325 1326 /* Initialize data structures: SA Descriptors Pool */ 1327 error = cesa_alloc_dma_mem(sc, &sc->sc_sdesc_cdm, 1328 CESA_SA_DESCRIPTORS * sizeof(struct cesa_sa_hdesc)); 1329 if (error) 1330 goto err5; 1331 1332 STAILQ_INIT(&sc->sc_free_sdesc); 1333 for (i = 0; i < CESA_SA_DESCRIPTORS; i++) { 1334 sc->sc_sdesc[i].csd_cshd = 1335 (struct cesa_sa_hdesc *)(sc->sc_sdesc_cdm.cdm_vaddr) + i; 1336 sc->sc_sdesc[i].csd_cshd_paddr = sc->sc_sdesc_cdm.cdm_paddr + 1337 (i * sizeof(struct cesa_sa_hdesc)); 1338 STAILQ_INSERT_TAIL(&sc->sc_free_sdesc, &sc->sc_sdesc[i], 1339 csd_stq); 1340 } 1341 1342 /* Initialize data structures: Requests Pool */ 1343 error = cesa_alloc_dma_mem(sc, &sc->sc_requests_cdm, 1344 CESA_REQUESTS * sizeof(struct cesa_sa_data)); 1345 if (error) 1346 goto err6; 1347 1348 STAILQ_INIT(&sc->sc_free_requests); 1349 STAILQ_INIT(&sc->sc_ready_requests); 1350 STAILQ_INIT(&sc->sc_queued_requests); 1351 for (i = 0; i < CESA_REQUESTS; i++) { 1352 sc->sc_requests[i].cr_csd = 1353 (struct cesa_sa_data *)(sc->sc_requests_cdm.cdm_vaddr) + i; 1354 sc->sc_requests[i].cr_csd_paddr = 1355 sc->sc_requests_cdm.cdm_paddr + 1356 (i * sizeof(struct cesa_sa_data)); 1357 1358 /* Preallocate DMA maps */ 1359 error = bus_dmamap_create(sc->sc_data_dtag, 0, 1360 &sc->sc_requests[i].cr_dmap); 1361 if (error && i > 0) { 1362 i--; 1363 do { 1364 bus_dmamap_destroy(sc->sc_data_dtag, 1365 sc->sc_requests[i].cr_dmap); 1366 } while (i--); 1367 1368 goto err7; 1369 } 1370 1371 STAILQ_INSERT_TAIL(&sc->sc_free_requests, &sc->sc_requests[i], 1372 cr_stq); 1373 } 1374 1375 /* Initialize data structures: Sessions Pool */ 1376 STAILQ_INIT(&sc->sc_free_sessions); 1377 for (i = 0; i < CESA_SESSIONS; i++) { 1378 sc->sc_sessions[i].cs_sid = i; 1379 STAILQ_INSERT_TAIL(&sc->sc_free_sessions, &sc->sc_sessions[i], 1380 cs_stq); 1381 } 1382 1383 /* 1384 * Initialize TDMA: 1385 * - Burst limit: 128 bytes, 1386 * - Outstanding reads enabled, 1387 * - No byte-swap. 1388 */ 1389 val = CESA_TDMA_CR_DBL128 | CESA_TDMA_CR_SBL128 | 1390 CESA_TDMA_CR_ORDEN | CESA_TDMA_CR_NBS | CESA_TDMA_CR_ENABLE; 1391 1392 if (sc->sc_soc_id == MV_DEV_88F6828 || 1393 sc->sc_soc_id == MV_DEV_88F6820 || 1394 sc->sc_soc_id == MV_DEV_88F6810) 1395 val |= CESA_TDMA_NUM_OUTSTAND; 1396 1397 CESA_TDMA_WRITE(sc, CESA_TDMA_CR, val); 1398 1399 /* 1400 * Initialize SA: 1401 * - SA descriptor is present at beginning of CESA SRAM, 1402 * - Multi-packet chain mode, 1403 * - Cooperation with TDMA enabled. 1404 */ 1405 CESA_REG_WRITE(sc, CESA_SA_DPR, 0); 1406 CESA_REG_WRITE(sc, CESA_SA_CR, CESA_SA_CR_ACTIVATE_TDMA | 1407 CESA_SA_CR_WAIT_FOR_TDMA | CESA_SA_CR_MULTI_MODE); 1408 1409 /* Unmask interrupts */ 1410 CESA_REG_WRITE(sc, CESA_ICR, 0); 1411 CESA_REG_WRITE(sc, CESA_ICM, CESA_ICM_ACCTDMA | sc->sc_tperr); 1412 CESA_TDMA_WRITE(sc, CESA_TDMA_ECR, 0); 1413 CESA_TDMA_WRITE(sc, CESA_TDMA_EMR, CESA_TDMA_EMR_MISS | 1414 CESA_TDMA_EMR_DOUBLE_HIT | CESA_TDMA_EMR_BOTH_HIT | 1415 CESA_TDMA_EMR_DATA_ERROR); 1416 1417 /* Register in OCF */ 1418 sc->sc_cid = crypto_get_driverid(dev, CRYPTOCAP_F_HARDWARE); 1419 if (sc->sc_cid < 0) { 1420 device_printf(dev, "could not get crypto driver id\n"); 1421 goto err8; 1422 } 1423 1424 crypto_register(sc->sc_cid, CRYPTO_AES_CBC, 0, 0); 1425 crypto_register(sc->sc_cid, CRYPTO_DES_CBC, 0, 0); 1426 crypto_register(sc->sc_cid, CRYPTO_3DES_CBC, 0, 0); 1427 crypto_register(sc->sc_cid, CRYPTO_MD5, 0, 0); 1428 crypto_register(sc->sc_cid, CRYPTO_MD5_HMAC, 0, 0); 1429 crypto_register(sc->sc_cid, CRYPTO_SHA1, 0, 0); 1430 crypto_register(sc->sc_cid, CRYPTO_SHA1_HMAC, 0, 0); 1431 if (sc->sc_soc_id == MV_DEV_88F6828 || 1432 sc->sc_soc_id == MV_DEV_88F6820 || 1433 sc->sc_soc_id == MV_DEV_88F6810) 1434 crypto_register(sc->sc_cid, CRYPTO_SHA2_256_HMAC, 0, 0); 1435 1436 return (0); 1437 err8: 1438 for (i = 0; i < CESA_REQUESTS; i++) 1439 bus_dmamap_destroy(sc->sc_data_dtag, 1440 sc->sc_requests[i].cr_dmap); 1441 err7: 1442 cesa_free_dma_mem(&sc->sc_requests_cdm); 1443 err6: 1444 cesa_free_dma_mem(&sc->sc_sdesc_cdm); 1445 err5: 1446 cesa_free_dma_mem(&sc->sc_tdesc_cdm); 1447 err4: 1448 bus_dma_tag_destroy(sc->sc_data_dtag); 1449 err3: 1450 bus_teardown_intr(dev, sc->sc_res[RES_CESA_IRQ], sc->sc_icookie); 1451 err2: 1452 if (sc->sc_soc_id == MV_DEV_88F6828 || 1453 sc->sc_soc_id == MV_DEV_88F6820 || 1454 sc->sc_soc_id == MV_DEV_88F6810) 1455 pmap_unmapdev(sc->sc_sram_base_va, sc->sc_sram_size); 1456 err1: 1457 bus_release_resources(dev, cesa_res_spec, sc->sc_res); 1458 err0: 1459 mtx_destroy(&sc->sc_sessions_lock); 1460 mtx_destroy(&sc->sc_requests_lock); 1461 mtx_destroy(&sc->sc_sdesc_lock); 1462 mtx_destroy(&sc->sc_tdesc_lock); 1463 mtx_destroy(&sc->sc_sc_lock); 1464 return (ENXIO); 1465 } 1466 1467 static int 1468 cesa_detach(device_t dev) 1469 { 1470 struct cesa_softc *sc; 1471 int i; 1472 1473 sc = device_get_softc(dev); 1474 1475 /* TODO: Wait for queued requests completion before shutdown. */ 1476 1477 /* Mask interrupts */ 1478 CESA_REG_WRITE(sc, CESA_ICM, 0); 1479 CESA_TDMA_WRITE(sc, CESA_TDMA_EMR, 0); 1480 1481 /* Unregister from OCF */ 1482 crypto_unregister_all(sc->sc_cid); 1483 1484 /* Free DMA Maps */ 1485 for (i = 0; i < CESA_REQUESTS; i++) 1486 bus_dmamap_destroy(sc->sc_data_dtag, 1487 sc->sc_requests[i].cr_dmap); 1488 1489 /* Free DMA Memory */ 1490 cesa_free_dma_mem(&sc->sc_requests_cdm); 1491 cesa_free_dma_mem(&sc->sc_sdesc_cdm); 1492 cesa_free_dma_mem(&sc->sc_tdesc_cdm); 1493 1494 /* Free DMA Tag */ 1495 bus_dma_tag_destroy(sc->sc_data_dtag); 1496 1497 /* Stop interrupt */ 1498 bus_teardown_intr(dev, sc->sc_res[RES_CESA_IRQ], sc->sc_icookie); 1499 1500 /* Relase I/O and IRQ resources */ 1501 bus_release_resources(dev, cesa_res_spec, sc->sc_res); 1502 1503 /* Unmap SRAM memory */ 1504 if (sc->sc_soc_id == MV_DEV_88F6828 || 1505 sc->sc_soc_id == MV_DEV_88F6820 || 1506 sc->sc_soc_id == MV_DEV_88F6810) 1507 pmap_unmapdev(sc->sc_sram_base_va, sc->sc_sram_size); 1508 1509 /* Destroy mutexes */ 1510 mtx_destroy(&sc->sc_sessions_lock); 1511 mtx_destroy(&sc->sc_requests_lock); 1512 mtx_destroy(&sc->sc_sdesc_lock); 1513 mtx_destroy(&sc->sc_tdesc_lock); 1514 mtx_destroy(&sc->sc_sc_lock); 1515 1516 return (0); 1517 } 1518 1519 static void 1520 cesa_intr(void *arg) 1521 { 1522 STAILQ_HEAD(, cesa_request) requests; 1523 struct cesa_request *cr, *tmp; 1524 struct cesa_softc *sc; 1525 uint32_t ecr, icr; 1526 int blocked; 1527 1528 sc = arg; 1529 1530 /* Ack interrupt */ 1531 ecr = CESA_TDMA_READ(sc, CESA_TDMA_ECR); 1532 CESA_TDMA_WRITE(sc, CESA_TDMA_ECR, 0); 1533 icr = CESA_REG_READ(sc, CESA_ICR); 1534 CESA_REG_WRITE(sc, CESA_ICR, 0); 1535 1536 /* Check for TDMA errors */ 1537 if (ecr & CESA_TDMA_ECR_MISS) { 1538 device_printf(sc->sc_dev, "TDMA Miss error detected!\n"); 1539 sc->sc_error = EIO; 1540 } 1541 1542 if (ecr & CESA_TDMA_ECR_DOUBLE_HIT) { 1543 device_printf(sc->sc_dev, "TDMA Double Hit error detected!\n"); 1544 sc->sc_error = EIO; 1545 } 1546 1547 if (ecr & CESA_TDMA_ECR_BOTH_HIT) { 1548 device_printf(sc->sc_dev, "TDMA Both Hit error detected!\n"); 1549 sc->sc_error = EIO; 1550 } 1551 1552 if (ecr & CESA_TDMA_ECR_DATA_ERROR) { 1553 device_printf(sc->sc_dev, "TDMA Data error detected!\n"); 1554 sc->sc_error = EIO; 1555 } 1556 1557 /* Check for CESA errors */ 1558 if (icr & sc->sc_tperr) { 1559 device_printf(sc->sc_dev, "CESA SRAM Parity error detected!\n"); 1560 sc->sc_error = EIO; 1561 } 1562 1563 /* If there is nothing more to do, return */ 1564 if ((icr & CESA_ICR_ACCTDMA) == 0) 1565 return; 1566 1567 /* Get all finished requests */ 1568 CESA_LOCK(sc, requests); 1569 STAILQ_INIT(&requests); 1570 STAILQ_CONCAT(&requests, &sc->sc_queued_requests); 1571 STAILQ_INIT(&sc->sc_queued_requests); 1572 CESA_UNLOCK(sc, requests); 1573 1574 /* Execute all ready requests */ 1575 cesa_execute(sc); 1576 1577 /* Process completed requests */ 1578 cesa_sync_dma_mem(&sc->sc_requests_cdm, BUS_DMASYNC_POSTREAD | 1579 BUS_DMASYNC_POSTWRITE); 1580 1581 STAILQ_FOREACH_SAFE(cr, &requests, cr_stq, tmp) { 1582 bus_dmamap_sync(sc->sc_data_dtag, cr->cr_dmap, 1583 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1584 1585 cr->cr_crp->crp_etype = sc->sc_error; 1586 if (cr->cr_mac) 1587 crypto_copyback(cr->cr_crp->crp_flags, 1588 cr->cr_crp->crp_buf, cr->cr_mac->crd_inject, 1589 cr->cr_cs->cs_hlen, cr->cr_csd->csd_hash); 1590 1591 crypto_done(cr->cr_crp); 1592 cesa_free_request(sc, cr); 1593 } 1594 1595 cesa_sync_dma_mem(&sc->sc_requests_cdm, BUS_DMASYNC_PREREAD | 1596 BUS_DMASYNC_PREWRITE); 1597 1598 sc->sc_error = 0; 1599 1600 /* Unblock driver if it ran out of resources */ 1601 CESA_LOCK(sc, sc); 1602 blocked = sc->sc_blocked; 1603 sc->sc_blocked = 0; 1604 CESA_UNLOCK(sc, sc); 1605 1606 if (blocked) 1607 crypto_unblock(sc->sc_cid, blocked); 1608 } 1609 1610 static int 1611 cesa_newsession(device_t dev, uint32_t *sidp, struct cryptoini *cri) 1612 { 1613 struct cesa_session *cs; 1614 struct cesa_softc *sc; 1615 struct cryptoini *enc; 1616 struct cryptoini *mac; 1617 int error; 1618 1619 sc = device_get_softc(dev); 1620 enc = NULL; 1621 mac = NULL; 1622 error = 0; 1623 1624 /* Check and parse input */ 1625 if (cesa_is_hash(cri->cri_alg)) 1626 mac = cri; 1627 else 1628 enc = cri; 1629 1630 cri = cri->cri_next; 1631 1632 if (cri) { 1633 if (!enc && !cesa_is_hash(cri->cri_alg)) 1634 enc = cri; 1635 1636 if (!mac && cesa_is_hash(cri->cri_alg)) 1637 mac = cri; 1638 1639 if (cri->cri_next || !(enc && mac)) 1640 return (EINVAL); 1641 } 1642 1643 if ((enc && (enc->cri_klen / 8) > CESA_MAX_KEY_LEN) || 1644 (mac && (mac->cri_klen / 8) > CESA_MAX_MKEY_LEN)) 1645 return (E2BIG); 1646 1647 /* Allocate session */ 1648 cs = cesa_alloc_session(sc); 1649 if (!cs) 1650 return (ENOMEM); 1651 1652 /* Prepare CESA configuration */ 1653 cs->cs_config = 0; 1654 cs->cs_ivlen = 1; 1655 cs->cs_mblen = 1; 1656 1657 if (enc) { 1658 switch (enc->cri_alg) { 1659 case CRYPTO_AES_CBC: 1660 cs->cs_config |= CESA_CSHD_AES | CESA_CSHD_CBC; 1661 cs->cs_ivlen = AES_BLOCK_LEN; 1662 break; 1663 case CRYPTO_DES_CBC: 1664 cs->cs_config |= CESA_CSHD_DES | CESA_CSHD_CBC; 1665 cs->cs_ivlen = DES_BLOCK_LEN; 1666 break; 1667 case CRYPTO_3DES_CBC: 1668 cs->cs_config |= CESA_CSHD_3DES | CESA_CSHD_3DES_EDE | 1669 CESA_CSHD_CBC; 1670 cs->cs_ivlen = DES3_BLOCK_LEN; 1671 break; 1672 default: 1673 error = EINVAL; 1674 break; 1675 } 1676 } 1677 1678 if (!error && mac) { 1679 switch (mac->cri_alg) { 1680 case CRYPTO_MD5: 1681 cs->cs_mblen = 1; 1682 cs->cs_hlen = (mac->cri_mlen == 0) ? MD5_HASH_LEN : 1683 mac->cri_mlen; 1684 cs->cs_config |= CESA_CSHD_MD5; 1685 break; 1686 case CRYPTO_MD5_HMAC: 1687 cs->cs_mblen = MD5_HMAC_BLOCK_LEN; 1688 cs->cs_hlen = (mac->cri_mlen == 0) ? MD5_HASH_LEN : 1689 mac->cri_mlen; 1690 cs->cs_config |= CESA_CSHD_MD5_HMAC; 1691 if (cs->cs_hlen == CESA_HMAC_TRUNC_LEN) 1692 cs->cs_config |= CESA_CSHD_96_BIT_HMAC; 1693 break; 1694 case CRYPTO_SHA1: 1695 cs->cs_mblen = 1; 1696 cs->cs_hlen = (mac->cri_mlen == 0) ? SHA1_HASH_LEN : 1697 mac->cri_mlen; 1698 cs->cs_config |= CESA_CSHD_SHA1; 1699 break; 1700 case CRYPTO_SHA1_HMAC: 1701 cs->cs_mblen = SHA1_HMAC_BLOCK_LEN; 1702 cs->cs_hlen = (mac->cri_mlen == 0) ? SHA1_HASH_LEN : 1703 mac->cri_mlen; 1704 cs->cs_config |= CESA_CSHD_SHA1_HMAC; 1705 if (cs->cs_hlen == CESA_HMAC_TRUNC_LEN) 1706 cs->cs_config |= CESA_CSHD_96_BIT_HMAC; 1707 break; 1708 case CRYPTO_SHA2_256_HMAC: 1709 cs->cs_mblen = SHA2_256_HMAC_BLOCK_LEN; 1710 cs->cs_hlen = (mac->cri_mlen == 0) ? SHA2_256_HASH_LEN : 1711 mac->cri_mlen; 1712 cs->cs_config |= CESA_CSHD_SHA2_256_HMAC; 1713 break; 1714 default: 1715 error = EINVAL; 1716 break; 1717 } 1718 } 1719 1720 /* Save cipher key */ 1721 if (!error && enc && enc->cri_key) { 1722 cs->cs_klen = enc->cri_klen / 8; 1723 memcpy(cs->cs_key, enc->cri_key, cs->cs_klen); 1724 if (enc->cri_alg == CRYPTO_AES_CBC) 1725 error = cesa_prep_aes_key(cs); 1726 } 1727 1728 /* Save digest key */ 1729 if (!error && mac && mac->cri_key) 1730 error = cesa_set_mkey(cs, mac->cri_alg, mac->cri_key, 1731 mac->cri_klen / 8); 1732 1733 if (error) { 1734 cesa_free_session(sc, cs); 1735 return (EINVAL); 1736 } 1737 1738 *sidp = cs->cs_sid; 1739 1740 return (0); 1741 } 1742 1743 static int 1744 cesa_freesession(device_t dev, uint64_t tid) 1745 { 1746 struct cesa_session *cs; 1747 struct cesa_softc *sc; 1748 1749 sc = device_get_softc(dev); 1750 cs = cesa_get_session(sc, CRYPTO_SESID2LID(tid)); 1751 if (!cs) 1752 return (EINVAL); 1753 1754 /* Free session */ 1755 cesa_free_session(sc, cs); 1756 1757 return (0); 1758 } 1759 1760 static int 1761 cesa_process(device_t dev, struct cryptop *crp, int hint) 1762 { 1763 struct cesa_request *cr; 1764 struct cesa_session *cs; 1765 struct cryptodesc *crd; 1766 struct cryptodesc *enc; 1767 struct cryptodesc *mac; 1768 struct cesa_softc *sc; 1769 int error; 1770 1771 sc = device_get_softc(dev); 1772 crd = crp->crp_desc; 1773 enc = NULL; 1774 mac = NULL; 1775 error = 0; 1776 1777 /* Check session ID */ 1778 cs = cesa_get_session(sc, CRYPTO_SESID2LID(crp->crp_sid)); 1779 if (!cs) { 1780 crp->crp_etype = EINVAL; 1781 crypto_done(crp); 1782 return (0); 1783 } 1784 1785 /* Check and parse input */ 1786 if (crp->crp_ilen > CESA_MAX_REQUEST_SIZE) { 1787 crp->crp_etype = E2BIG; 1788 crypto_done(crp); 1789 return (0); 1790 } 1791 1792 if (cesa_is_hash(crd->crd_alg)) 1793 mac = crd; 1794 else 1795 enc = crd; 1796 1797 crd = crd->crd_next; 1798 1799 if (crd) { 1800 if (!enc && !cesa_is_hash(crd->crd_alg)) 1801 enc = crd; 1802 1803 if (!mac && cesa_is_hash(crd->crd_alg)) 1804 mac = crd; 1805 1806 if (crd->crd_next || !(enc && mac)) { 1807 crp->crp_etype = EINVAL; 1808 crypto_done(crp); 1809 return (0); 1810 } 1811 } 1812 1813 /* 1814 * Get request descriptor. Block driver if there is no free 1815 * descriptors in pool. 1816 */ 1817 cr = cesa_alloc_request(sc); 1818 if (!cr) { 1819 CESA_LOCK(sc, sc); 1820 sc->sc_blocked = CRYPTO_SYMQ; 1821 CESA_UNLOCK(sc, sc); 1822 return (ERESTART); 1823 } 1824 1825 /* Prepare request */ 1826 cr->cr_crp = crp; 1827 cr->cr_enc = enc; 1828 cr->cr_mac = mac; 1829 cr->cr_cs = cs; 1830 1831 CESA_LOCK(sc, sessions); 1832 cesa_sync_desc(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1833 1834 if (enc && enc->crd_flags & CRD_F_ENCRYPT) { 1835 if (enc->crd_flags & CRD_F_IV_EXPLICIT) 1836 memcpy(cr->cr_csd->csd_iv, enc->crd_iv, cs->cs_ivlen); 1837 else 1838 arc4rand(cr->cr_csd->csd_iv, cs->cs_ivlen, 0); 1839 1840 if ((enc->crd_flags & CRD_F_IV_PRESENT) == 0) 1841 crypto_copyback(crp->crp_flags, crp->crp_buf, 1842 enc->crd_inject, cs->cs_ivlen, cr->cr_csd->csd_iv); 1843 } else if (enc) { 1844 if (enc->crd_flags & CRD_F_IV_EXPLICIT) 1845 memcpy(cr->cr_csd->csd_iv, enc->crd_iv, cs->cs_ivlen); 1846 else 1847 crypto_copydata(crp->crp_flags, crp->crp_buf, 1848 enc->crd_inject, cs->cs_ivlen, cr->cr_csd->csd_iv); 1849 } 1850 1851 if (enc && enc->crd_flags & CRD_F_KEY_EXPLICIT) { 1852 if ((enc->crd_klen / 8) <= CESA_MAX_KEY_LEN) { 1853 cs->cs_klen = enc->crd_klen / 8; 1854 memcpy(cs->cs_key, enc->crd_key, cs->cs_klen); 1855 if (enc->crd_alg == CRYPTO_AES_CBC) 1856 error = cesa_prep_aes_key(cs); 1857 } else 1858 error = E2BIG; 1859 } 1860 1861 if (!error && mac && mac->crd_flags & CRD_F_KEY_EXPLICIT) { 1862 if ((mac->crd_klen / 8) <= CESA_MAX_MKEY_LEN) 1863 error = cesa_set_mkey(cs, mac->crd_alg, mac->crd_key, 1864 mac->crd_klen / 8); 1865 else 1866 error = E2BIG; 1867 } 1868 1869 /* Convert request to chain of TDMA and SA descriptors */ 1870 if (!error) 1871 error = cesa_create_chain(sc, cr); 1872 1873 cesa_sync_desc(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1874 CESA_UNLOCK(sc, sessions); 1875 1876 if (error) { 1877 cesa_free_request(sc, cr); 1878 crp->crp_etype = error; 1879 crypto_done(crp); 1880 return (0); 1881 } 1882 1883 bus_dmamap_sync(sc->sc_data_dtag, cr->cr_dmap, BUS_DMASYNC_PREREAD | 1884 BUS_DMASYNC_PREWRITE); 1885 1886 /* Enqueue request to execution */ 1887 cesa_enqueue_request(sc, cr); 1888 1889 /* Start execution, if we have no more requests in queue */ 1890 if ((hint & CRYPTO_HINT_MORE) == 0) 1891 cesa_execute(sc); 1892 1893 return (0); 1894 } 1895