1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (C) 2009-2011 Semihalf. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29 /* 30 * CESA SRAM Memory Map: 31 * 32 * +------------------------+ <= sc->sc_sram_base_va + CESA_SRAM_SIZE 33 * | | 34 * | DATA | 35 * | | 36 * +------------------------+ <= sc->sc_sram_base_va + CESA_DATA(0) 37 * | struct cesa_sa_data | 38 * +------------------------+ 39 * | struct cesa_sa_hdesc | 40 * +------------------------+ <= sc->sc_sram_base_va 41 */ 42 43 #include <sys/param.h> 44 #include <sys/systm.h> 45 #include <sys/bus.h> 46 #include <sys/endian.h> 47 #include <sys/kernel.h> 48 #include <sys/lock.h> 49 #include <sys/mbuf.h> 50 #include <sys/module.h> 51 #include <sys/mutex.h> 52 #include <sys/rman.h> 53 54 #include <machine/bus.h> 55 #include <machine/intr.h> 56 #include <machine/resource.h> 57 #include <machine/fdt.h> 58 59 #include <dev/fdt/simplebus.h> 60 #include <dev/fdt/fdt_common.h> 61 #include <dev/ofw/ofw_bus.h> 62 #include <dev/ofw/ofw_bus_subr.h> 63 64 #include <crypto/sha1.h> 65 #include <crypto/sha2/sha256.h> 66 #include <crypto/rijndael/rijndael.h> 67 #include <opencrypto/cryptodev.h> 68 #include <opencrypto/xform.h> 69 #include "cryptodev_if.h" 70 71 #include <arm/mv/mvreg.h> 72 #include <arm/mv/mvvar.h> 73 #include "cesa.h" 74 75 static int cesa_probe(device_t); 76 static int cesa_attach(device_t); 77 static int cesa_attach_late(device_t); 78 static int cesa_detach(device_t); 79 static void cesa_intr(void *); 80 static int cesa_probesession(device_t, 81 const struct crypto_session_params *); 82 static int cesa_newsession(device_t, crypto_session_t, 83 const struct crypto_session_params *); 84 static int cesa_process(device_t, struct cryptop *, int); 85 86 static struct resource_spec cesa_res_spec[] = { 87 { SYS_RES_MEMORY, 0, RF_ACTIVE }, 88 { SYS_RES_MEMORY, 1, RF_ACTIVE }, 89 { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE }, 90 { -1, 0 } 91 }; 92 93 static device_method_t cesa_methods[] = { 94 /* Device interface */ 95 DEVMETHOD(device_probe, cesa_probe), 96 DEVMETHOD(device_attach, cesa_attach), 97 DEVMETHOD(device_detach, cesa_detach), 98 99 /* Crypto device methods */ 100 DEVMETHOD(cryptodev_probesession, cesa_probesession), 101 DEVMETHOD(cryptodev_newsession, cesa_newsession), 102 DEVMETHOD(cryptodev_process, cesa_process), 103 104 DEVMETHOD_END 105 }; 106 107 static driver_t cesa_driver = { 108 "cesa", 109 cesa_methods, 110 sizeof (struct cesa_softc) 111 }; 112 113 DRIVER_MODULE(cesa, simplebus, cesa_driver, 0, 0); 114 MODULE_DEPEND(cesa, crypto, 1, 1, 1); 115 116 static void 117 cesa_dump_cshd(struct cesa_softc *sc, struct cesa_sa_hdesc *cshd) 118 { 119 #ifdef DEBUG 120 device_t dev; 121 122 dev = sc->sc_dev; 123 device_printf(dev, "CESA SA Hardware Descriptor:\n"); 124 device_printf(dev, "\t\tconfig: 0x%08X\n", cshd->cshd_config); 125 device_printf(dev, "\t\te_src: 0x%08X\n", cshd->cshd_enc_src); 126 device_printf(dev, "\t\te_dst: 0x%08X\n", cshd->cshd_enc_dst); 127 device_printf(dev, "\t\te_dlen: 0x%08X\n", cshd->cshd_enc_dlen); 128 device_printf(dev, "\t\te_key: 0x%08X\n", cshd->cshd_enc_key); 129 device_printf(dev, "\t\te_iv_1: 0x%08X\n", cshd->cshd_enc_iv); 130 device_printf(dev, "\t\te_iv_2: 0x%08X\n", cshd->cshd_enc_iv_buf); 131 device_printf(dev, "\t\tm_src: 0x%08X\n", cshd->cshd_mac_src); 132 device_printf(dev, "\t\tm_dst: 0x%08X\n", cshd->cshd_mac_dst); 133 device_printf(dev, "\t\tm_dlen: 0x%08X\n", cshd->cshd_mac_dlen); 134 device_printf(dev, "\t\tm_tlen: 0x%08X\n", cshd->cshd_mac_total_dlen); 135 device_printf(dev, "\t\tm_iv_i: 0x%08X\n", cshd->cshd_mac_iv_in); 136 device_printf(dev, "\t\tm_iv_o: 0x%08X\n", cshd->cshd_mac_iv_out); 137 #endif 138 } 139 140 static void 141 cesa_alloc_dma_mem_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) 142 { 143 struct cesa_dma_mem *cdm; 144 145 if (error) 146 return; 147 148 KASSERT(nseg == 1, ("Got wrong number of DMA segments, should be 1.")); 149 cdm = arg; 150 cdm->cdm_paddr = segs->ds_addr; 151 } 152 153 static int 154 cesa_alloc_dma_mem(struct cesa_softc *sc, struct cesa_dma_mem *cdm, 155 bus_size_t size) 156 { 157 int error; 158 159 KASSERT(cdm->cdm_vaddr == NULL, 160 ("%s(): DMA memory descriptor in use.", __func__)); 161 162 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), /* parent */ 163 PAGE_SIZE, 0, /* alignment, boundary */ 164 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 165 BUS_SPACE_MAXADDR, /* highaddr */ 166 NULL, NULL, /* filtfunc, filtfuncarg */ 167 size, 1, /* maxsize, nsegments */ 168 size, 0, /* maxsegsz, flags */ 169 NULL, NULL, /* lockfunc, lockfuncarg */ 170 &cdm->cdm_tag); /* dmat */ 171 if (error) { 172 device_printf(sc->sc_dev, "failed to allocate busdma tag, error" 173 " %i!\n", error); 174 175 goto err1; 176 } 177 178 error = bus_dmamem_alloc(cdm->cdm_tag, &cdm->cdm_vaddr, 179 BUS_DMA_NOWAIT | BUS_DMA_ZERO, &cdm->cdm_map); 180 if (error) { 181 device_printf(sc->sc_dev, "failed to allocate DMA safe" 182 " memory, error %i!\n", error); 183 184 goto err2; 185 } 186 187 error = bus_dmamap_load(cdm->cdm_tag, cdm->cdm_map, cdm->cdm_vaddr, 188 size, cesa_alloc_dma_mem_cb, cdm, BUS_DMA_NOWAIT); 189 if (error) { 190 device_printf(sc->sc_dev, "cannot get address of the DMA" 191 " memory, error %i\n", error); 192 193 goto err3; 194 } 195 196 return (0); 197 err3: 198 bus_dmamem_free(cdm->cdm_tag, cdm->cdm_vaddr, cdm->cdm_map); 199 err2: 200 bus_dma_tag_destroy(cdm->cdm_tag); 201 err1: 202 cdm->cdm_vaddr = NULL; 203 return (error); 204 } 205 206 static void 207 cesa_free_dma_mem(struct cesa_dma_mem *cdm) 208 { 209 210 bus_dmamap_unload(cdm->cdm_tag, cdm->cdm_map); 211 bus_dmamem_free(cdm->cdm_tag, cdm->cdm_vaddr, cdm->cdm_map); 212 bus_dma_tag_destroy(cdm->cdm_tag); 213 cdm->cdm_vaddr = NULL; 214 } 215 216 static void 217 cesa_sync_dma_mem(struct cesa_dma_mem *cdm, bus_dmasync_op_t op) 218 { 219 220 /* Sync only if dma memory is valid */ 221 if (cdm->cdm_vaddr != NULL) 222 bus_dmamap_sync(cdm->cdm_tag, cdm->cdm_map, op); 223 } 224 225 static void 226 cesa_sync_desc(struct cesa_softc *sc, bus_dmasync_op_t op) 227 { 228 229 cesa_sync_dma_mem(&sc->sc_tdesc_cdm, op); 230 cesa_sync_dma_mem(&sc->sc_sdesc_cdm, op); 231 cesa_sync_dma_mem(&sc->sc_requests_cdm, op); 232 } 233 234 static struct cesa_request * 235 cesa_alloc_request(struct cesa_softc *sc) 236 { 237 struct cesa_request *cr; 238 239 CESA_GENERIC_ALLOC_LOCKED(sc, cr, requests); 240 if (!cr) 241 return (NULL); 242 243 STAILQ_INIT(&cr->cr_tdesc); 244 STAILQ_INIT(&cr->cr_sdesc); 245 246 return (cr); 247 } 248 249 static void 250 cesa_free_request(struct cesa_softc *sc, struct cesa_request *cr) 251 { 252 253 /* Free TDMA descriptors assigned to this request */ 254 CESA_LOCK(sc, tdesc); 255 STAILQ_CONCAT(&sc->sc_free_tdesc, &cr->cr_tdesc); 256 CESA_UNLOCK(sc, tdesc); 257 258 /* Free SA descriptors assigned to this request */ 259 CESA_LOCK(sc, sdesc); 260 STAILQ_CONCAT(&sc->sc_free_sdesc, &cr->cr_sdesc); 261 CESA_UNLOCK(sc, sdesc); 262 263 /* Unload DMA memory associated with request */ 264 if (cr->cr_dmap_loaded) { 265 bus_dmamap_unload(sc->sc_data_dtag, cr->cr_dmap); 266 cr->cr_dmap_loaded = 0; 267 } 268 269 CESA_GENERIC_FREE_LOCKED(sc, cr, requests); 270 } 271 272 static void 273 cesa_enqueue_request(struct cesa_softc *sc, struct cesa_request *cr) 274 { 275 276 CESA_LOCK(sc, requests); 277 STAILQ_INSERT_TAIL(&sc->sc_ready_requests, cr, cr_stq); 278 CESA_UNLOCK(sc, requests); 279 } 280 281 static struct cesa_tdma_desc * 282 cesa_alloc_tdesc(struct cesa_softc *sc) 283 { 284 struct cesa_tdma_desc *ctd; 285 286 CESA_GENERIC_ALLOC_LOCKED(sc, ctd, tdesc); 287 288 if (!ctd) 289 device_printf(sc->sc_dev, "TDMA descriptors pool exhaused. " 290 "Consider increasing CESA_TDMA_DESCRIPTORS.\n"); 291 292 return (ctd); 293 } 294 295 static struct cesa_sa_desc * 296 cesa_alloc_sdesc(struct cesa_softc *sc, struct cesa_request *cr) 297 { 298 struct cesa_sa_desc *csd; 299 300 CESA_GENERIC_ALLOC_LOCKED(sc, csd, sdesc); 301 if (!csd) { 302 device_printf(sc->sc_dev, "SA descriptors pool exhaused. " 303 "Consider increasing CESA_SA_DESCRIPTORS.\n"); 304 return (NULL); 305 } 306 307 STAILQ_INSERT_TAIL(&cr->cr_sdesc, csd, csd_stq); 308 309 /* Fill-in SA descriptor with default values */ 310 csd->csd_cshd->cshd_enc_key = CESA_SA_DATA(csd_key); 311 csd->csd_cshd->cshd_enc_iv = CESA_SA_DATA(csd_iv); 312 csd->csd_cshd->cshd_enc_iv_buf = CESA_SA_DATA(csd_iv); 313 csd->csd_cshd->cshd_enc_src = 0; 314 csd->csd_cshd->cshd_enc_dst = 0; 315 csd->csd_cshd->cshd_enc_dlen = 0; 316 csd->csd_cshd->cshd_mac_dst = CESA_SA_DATA(csd_hash); 317 csd->csd_cshd->cshd_mac_iv_in = CESA_SA_DATA(csd_hiv_in); 318 csd->csd_cshd->cshd_mac_iv_out = CESA_SA_DATA(csd_hiv_out); 319 csd->csd_cshd->cshd_mac_src = 0; 320 csd->csd_cshd->cshd_mac_dlen = 0; 321 322 return (csd); 323 } 324 325 static struct cesa_tdma_desc * 326 cesa_tdma_copy(struct cesa_softc *sc, bus_addr_t dst, bus_addr_t src, 327 bus_size_t size) 328 { 329 struct cesa_tdma_desc *ctd; 330 331 ctd = cesa_alloc_tdesc(sc); 332 if (!ctd) 333 return (NULL); 334 335 ctd->ctd_cthd->cthd_dst = dst; 336 ctd->ctd_cthd->cthd_src = src; 337 ctd->ctd_cthd->cthd_byte_count = size; 338 339 /* Handle special control packet */ 340 if (size != 0) 341 ctd->ctd_cthd->cthd_flags = CESA_CTHD_OWNED; 342 else 343 ctd->ctd_cthd->cthd_flags = 0; 344 345 return (ctd); 346 } 347 348 static struct cesa_tdma_desc * 349 cesa_tdma_copyin_sa_data(struct cesa_softc *sc, struct cesa_request *cr) 350 { 351 352 return (cesa_tdma_copy(sc, sc->sc_sram_base_pa + 353 sizeof(struct cesa_sa_hdesc), cr->cr_csd_paddr, 354 sizeof(struct cesa_sa_data))); 355 } 356 357 static struct cesa_tdma_desc * 358 cesa_tdma_copyout_sa_data(struct cesa_softc *sc, struct cesa_request *cr) 359 { 360 361 return (cesa_tdma_copy(sc, cr->cr_csd_paddr, sc->sc_sram_base_pa + 362 sizeof(struct cesa_sa_hdesc), sizeof(struct cesa_sa_data))); 363 } 364 365 static struct cesa_tdma_desc * 366 cesa_tdma_copy_sdesc(struct cesa_softc *sc, struct cesa_sa_desc *csd) 367 { 368 369 return (cesa_tdma_copy(sc, sc->sc_sram_base_pa, csd->csd_cshd_paddr, 370 sizeof(struct cesa_sa_hdesc))); 371 } 372 373 static void 374 cesa_append_tdesc(struct cesa_request *cr, struct cesa_tdma_desc *ctd) 375 { 376 struct cesa_tdma_desc *ctd_prev; 377 378 if (!STAILQ_EMPTY(&cr->cr_tdesc)) { 379 ctd_prev = STAILQ_LAST(&cr->cr_tdesc, cesa_tdma_desc, ctd_stq); 380 ctd_prev->ctd_cthd->cthd_next = ctd->ctd_cthd_paddr; 381 } 382 383 ctd->ctd_cthd->cthd_next = 0; 384 STAILQ_INSERT_TAIL(&cr->cr_tdesc, ctd, ctd_stq); 385 } 386 387 static int 388 cesa_append_packet(struct cesa_softc *sc, struct cesa_request *cr, 389 struct cesa_packet *cp, struct cesa_sa_desc *csd) 390 { 391 struct cesa_tdma_desc *ctd, *tmp; 392 393 /* Copy SA descriptor for this packet */ 394 ctd = cesa_tdma_copy_sdesc(sc, csd); 395 if (!ctd) 396 return (ENOMEM); 397 398 cesa_append_tdesc(cr, ctd); 399 400 /* Copy data to be processed */ 401 STAILQ_FOREACH_SAFE(ctd, &cp->cp_copyin, ctd_stq, tmp) 402 cesa_append_tdesc(cr, ctd); 403 STAILQ_INIT(&cp->cp_copyin); 404 405 /* Insert control descriptor */ 406 ctd = cesa_tdma_copy(sc, 0, 0, 0); 407 if (!ctd) 408 return (ENOMEM); 409 410 cesa_append_tdesc(cr, ctd); 411 412 /* Copy back results */ 413 STAILQ_FOREACH_SAFE(ctd, &cp->cp_copyout, ctd_stq, tmp) 414 cesa_append_tdesc(cr, ctd); 415 STAILQ_INIT(&cp->cp_copyout); 416 417 return (0); 418 } 419 420 static void 421 cesa_set_mkey(struct cesa_session *cs, int alg, const uint8_t *mkey, int mklen) 422 { 423 union authctx auth_ctx; 424 uint32_t *hout; 425 uint32_t *hin; 426 int i; 427 428 hin = (uint32_t *)cs->cs_hiv_in; 429 hout = (uint32_t *)cs->cs_hiv_out; 430 431 switch (alg) { 432 case CRYPTO_SHA1_HMAC: 433 hmac_init_ipad(&auth_hash_hmac_sha1, mkey, mklen, &auth_ctx); 434 memcpy(hin, auth_ctx.sha1ctx.h.b32, 435 sizeof(auth_ctx.sha1ctx.h.b32)); 436 hmac_init_opad(&auth_hash_hmac_sha1, mkey, mklen, &auth_ctx); 437 memcpy(hout, auth_ctx.sha1ctx.h.b32, 438 sizeof(auth_ctx.sha1ctx.h.b32)); 439 break; 440 case CRYPTO_SHA2_256_HMAC: 441 hmac_init_ipad(&auth_hash_hmac_sha2_256, mkey, mklen, 442 &auth_ctx); 443 memcpy(hin, auth_ctx.sha256ctx.state, 444 sizeof(auth_ctx.sha256ctx.state)); 445 hmac_init_opad(&auth_hash_hmac_sha2_256, mkey, mklen, 446 &auth_ctx); 447 memcpy(hout, auth_ctx.sha256ctx.state, 448 sizeof(auth_ctx.sha256ctx.state)); 449 break; 450 default: 451 panic("shouldn't get here"); 452 } 453 454 for (i = 0; i < CESA_MAX_HASH_LEN / sizeof(uint32_t); i++) { 455 hin[i] = htobe32(hin[i]); 456 hout[i] = htobe32(hout[i]); 457 } 458 explicit_bzero(&auth_ctx, sizeof(auth_ctx)); 459 } 460 461 static int 462 cesa_prep_aes_key(struct cesa_session *cs, 463 const struct crypto_session_params *csp) 464 { 465 uint32_t ek[4 * (RIJNDAEL_MAXNR + 1)]; 466 uint32_t *dkey; 467 int i; 468 469 rijndaelKeySetupEnc(ek, cs->cs_key, csp->csp_cipher_klen * 8); 470 471 cs->cs_config &= ~CESA_CSH_AES_KLEN_MASK; 472 dkey = (uint32_t *)cs->cs_aes_dkey; 473 474 switch (csp->csp_cipher_klen) { 475 case 16: 476 cs->cs_config |= CESA_CSH_AES_KLEN_128; 477 for (i = 0; i < 4; i++) 478 *dkey++ = htobe32(ek[4 * 10 + i]); 479 break; 480 case 24: 481 cs->cs_config |= CESA_CSH_AES_KLEN_192; 482 for (i = 0; i < 4; i++) 483 *dkey++ = htobe32(ek[4 * 12 + i]); 484 for (i = 0; i < 2; i++) 485 *dkey++ = htobe32(ek[4 * 11 + 2 + i]); 486 break; 487 case 32: 488 cs->cs_config |= CESA_CSH_AES_KLEN_256; 489 for (i = 0; i < 4; i++) 490 *dkey++ = htobe32(ek[4 * 14 + i]); 491 for (i = 0; i < 4; i++) 492 *dkey++ = htobe32(ek[4 * 13 + i]); 493 break; 494 default: 495 return (EINVAL); 496 } 497 498 return (0); 499 } 500 501 static void 502 cesa_start_packet(struct cesa_packet *cp, unsigned int size) 503 { 504 505 cp->cp_size = size; 506 cp->cp_offset = 0; 507 STAILQ_INIT(&cp->cp_copyin); 508 STAILQ_INIT(&cp->cp_copyout); 509 } 510 511 static int 512 cesa_fill_packet(struct cesa_softc *sc, struct cesa_packet *cp, 513 bus_dma_segment_t *seg) 514 { 515 struct cesa_tdma_desc *ctd; 516 unsigned int bsize; 517 518 /* Calculate size of block copy */ 519 bsize = MIN(seg->ds_len, cp->cp_size - cp->cp_offset); 520 521 if (bsize > 0) { 522 ctd = cesa_tdma_copy(sc, sc->sc_sram_base_pa + 523 CESA_DATA(cp->cp_offset), seg->ds_addr, bsize); 524 if (!ctd) 525 return (-ENOMEM); 526 527 STAILQ_INSERT_TAIL(&cp->cp_copyin, ctd, ctd_stq); 528 529 ctd = cesa_tdma_copy(sc, seg->ds_addr, sc->sc_sram_base_pa + 530 CESA_DATA(cp->cp_offset), bsize); 531 if (!ctd) 532 return (-ENOMEM); 533 534 STAILQ_INSERT_TAIL(&cp->cp_copyout, ctd, ctd_stq); 535 536 seg->ds_len -= bsize; 537 seg->ds_addr += bsize; 538 cp->cp_offset += bsize; 539 } 540 541 return (bsize); 542 } 543 544 static void 545 cesa_create_chain_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) 546 { 547 unsigned int mpsize, fragmented; 548 unsigned int mlen, mskip, tmlen; 549 struct cesa_chain_info *cci; 550 unsigned int elen, eskip; 551 unsigned int skip, len; 552 struct cesa_sa_desc *csd; 553 struct cesa_request *cr; 554 struct cryptop *crp; 555 struct cesa_softc *sc; 556 struct cesa_packet cp; 557 bus_dma_segment_t seg; 558 uint32_t config; 559 int size; 560 561 cci = arg; 562 sc = cci->cci_sc; 563 cr = cci->cci_cr; 564 crp = cr->cr_crp; 565 566 if (error) { 567 cci->cci_error = error; 568 return; 569 } 570 571 /* 572 * Only do a combined op if the AAD is adjacent to the payload 573 * and the AAD length is a multiple of the IV length. The 574 * checks against 'config' are to avoid recursing when the 575 * logic below invokes separate operations. 576 */ 577 config = cci->cci_config; 578 if (((config & CESA_CSHD_OP_MASK) == CESA_CSHD_MAC_AND_ENC || 579 (config & CESA_CSHD_OP_MASK) == CESA_CSHD_ENC_AND_MAC) && 580 crp->crp_aad_length != 0 && 581 (crp->crp_aad_length & (cr->cr_cs->cs_ivlen - 1)) != 0) { 582 /* 583 * Data alignment in the request does not meet CESA requiremnts 584 * for combined encryption/decryption and hashing. We have to 585 * split the request to separate operations and process them 586 * one by one. 587 */ 588 if ((config & CESA_CSHD_OP_MASK) == CESA_CSHD_MAC_AND_ENC) { 589 config &= ~CESA_CSHD_OP_MASK; 590 591 cci->cci_config = config | CESA_CSHD_MAC; 592 cesa_create_chain_cb(cci, segs, nseg, 0); 593 594 cci->cci_config = config | CESA_CSHD_ENC; 595 cesa_create_chain_cb(cci, segs, nseg, 0); 596 } else { 597 config &= ~CESA_CSHD_OP_MASK; 598 599 cci->cci_config = config | CESA_CSHD_ENC; 600 cesa_create_chain_cb(cci, segs, nseg, 0); 601 602 cci->cci_config = config | CESA_CSHD_MAC; 603 cesa_create_chain_cb(cci, segs, nseg, 0); 604 } 605 606 return; 607 } 608 609 mskip = mlen = eskip = elen = 0; 610 611 if (crp->crp_aad_length == 0) { 612 skip = crp->crp_payload_start; 613 len = crp->crp_payload_length; 614 switch (config & CESA_CSHD_OP_MASK) { 615 case CESA_CSHD_ENC: 616 eskip = skip; 617 elen = len; 618 break; 619 case CESA_CSHD_MAC: 620 mskip = skip; 621 mlen = len; 622 break; 623 default: 624 eskip = skip; 625 elen = len; 626 mskip = skip; 627 mlen = len; 628 break; 629 } 630 } else { 631 /* 632 * For an encryption-only separate request, only 633 * process the payload. For combined requests and 634 * hash-only requests, process the entire region. 635 */ 636 switch (config & CESA_CSHD_OP_MASK) { 637 case CESA_CSHD_ENC: 638 skip = crp->crp_payload_start; 639 len = crp->crp_payload_length; 640 eskip = skip; 641 elen = len; 642 break; 643 case CESA_CSHD_MAC: 644 skip = crp->crp_aad_start; 645 len = crp->crp_aad_length + crp->crp_payload_length; 646 mskip = skip; 647 mlen = len; 648 break; 649 default: 650 skip = crp->crp_aad_start; 651 len = crp->crp_aad_length + crp->crp_payload_length; 652 mskip = skip; 653 mlen = len; 654 eskip = crp->crp_payload_start; 655 elen = crp->crp_payload_length; 656 break; 657 } 658 } 659 660 tmlen = mlen; 661 fragmented = 0; 662 mpsize = CESA_MAX_PACKET_SIZE; 663 mpsize &= ~((cr->cr_cs->cs_ivlen - 1) | (cr->cr_cs->cs_mblen - 1)); 664 665 /* Start first packet in chain */ 666 cesa_start_packet(&cp, MIN(mpsize, len)); 667 668 while (nseg-- && len > 0) { 669 seg = *(segs++); 670 671 /* 672 * Skip data in buffer on which neither ENC nor MAC operation 673 * is requested. 674 */ 675 if (skip > 0) { 676 size = MIN(skip, seg.ds_len); 677 skip -= size; 678 679 seg.ds_addr += size; 680 seg.ds_len -= size; 681 682 if (eskip > 0) 683 eskip -= size; 684 685 if (mskip > 0) 686 mskip -= size; 687 688 if (seg.ds_len == 0) 689 continue; 690 } 691 692 while (1) { 693 /* 694 * Fill in current packet with data. Break if there is 695 * no more data in current DMA segment or an error 696 * occurred. 697 */ 698 size = cesa_fill_packet(sc, &cp, &seg); 699 if (size <= 0) { 700 error = -size; 701 break; 702 } 703 704 len -= size; 705 706 /* If packet is full, append it to the chain */ 707 if (cp.cp_size == cp.cp_offset) { 708 csd = cesa_alloc_sdesc(sc, cr); 709 if (!csd) { 710 error = ENOMEM; 711 break; 712 } 713 714 /* Create SA descriptor for this packet */ 715 csd->csd_cshd->cshd_config = cci->cci_config; 716 csd->csd_cshd->cshd_mac_total_dlen = tmlen; 717 718 /* 719 * Enable fragmentation if request will not fit 720 * into one packet. 721 */ 722 if (len > 0) { 723 if (!fragmented) { 724 fragmented = 1; 725 csd->csd_cshd->cshd_config |= 726 CESA_CSHD_FRAG_FIRST; 727 } else 728 csd->csd_cshd->cshd_config |= 729 CESA_CSHD_FRAG_MIDDLE; 730 } else if (fragmented) 731 csd->csd_cshd->cshd_config |= 732 CESA_CSHD_FRAG_LAST; 733 734 if (eskip < cp.cp_size && elen > 0) { 735 csd->csd_cshd->cshd_enc_src = 736 CESA_DATA(eskip); 737 csd->csd_cshd->cshd_enc_dst = 738 CESA_DATA(eskip); 739 csd->csd_cshd->cshd_enc_dlen = 740 MIN(elen, cp.cp_size - eskip); 741 } 742 743 if (mskip < cp.cp_size && mlen > 0) { 744 csd->csd_cshd->cshd_mac_src = 745 CESA_DATA(mskip); 746 csd->csd_cshd->cshd_mac_dlen = 747 MIN(mlen, cp.cp_size - mskip); 748 } 749 750 elen -= csd->csd_cshd->cshd_enc_dlen; 751 eskip -= MIN(eskip, cp.cp_size); 752 mlen -= csd->csd_cshd->cshd_mac_dlen; 753 mskip -= MIN(mskip, cp.cp_size); 754 755 cesa_dump_cshd(sc, csd->csd_cshd); 756 757 /* Append packet to the request */ 758 error = cesa_append_packet(sc, cr, &cp, csd); 759 if (error) 760 break; 761 762 /* Start a new packet, as current is full */ 763 cesa_start_packet(&cp, MIN(mpsize, len)); 764 } 765 } 766 767 if (error) 768 break; 769 } 770 771 if (error) { 772 /* 773 * Move all allocated resources to the request. They will be 774 * freed later. 775 */ 776 STAILQ_CONCAT(&cr->cr_tdesc, &cp.cp_copyin); 777 STAILQ_CONCAT(&cr->cr_tdesc, &cp.cp_copyout); 778 cci->cci_error = error; 779 } 780 } 781 782 static int 783 cesa_create_chain(struct cesa_softc *sc, 784 const struct crypto_session_params *csp, struct cesa_request *cr) 785 { 786 struct cesa_chain_info cci; 787 struct cesa_tdma_desc *ctd; 788 uint32_t config; 789 int error; 790 791 error = 0; 792 CESA_LOCK_ASSERT(sc, sessions); 793 794 /* Create request metadata */ 795 if (csp->csp_cipher_klen != 0) { 796 if (csp->csp_cipher_alg == CRYPTO_AES_CBC && 797 !CRYPTO_OP_IS_ENCRYPT(cr->cr_crp->crp_op)) 798 memcpy(cr->cr_csd->csd_key, cr->cr_cs->cs_aes_dkey, 799 csp->csp_cipher_klen); 800 else 801 memcpy(cr->cr_csd->csd_key, cr->cr_cs->cs_key, 802 csp->csp_cipher_klen); 803 } 804 805 if (csp->csp_auth_klen != 0) { 806 memcpy(cr->cr_csd->csd_hiv_in, cr->cr_cs->cs_hiv_in, 807 CESA_MAX_HASH_LEN); 808 memcpy(cr->cr_csd->csd_hiv_out, cr->cr_cs->cs_hiv_out, 809 CESA_MAX_HASH_LEN); 810 } 811 812 ctd = cesa_tdma_copyin_sa_data(sc, cr); 813 if (!ctd) 814 return (ENOMEM); 815 816 cesa_append_tdesc(cr, ctd); 817 818 /* Prepare SA configuration */ 819 config = cr->cr_cs->cs_config; 820 821 if (csp->csp_cipher_alg != 0 && 822 !CRYPTO_OP_IS_ENCRYPT(cr->cr_crp->crp_op)) 823 config |= CESA_CSHD_DECRYPT; 824 switch (csp->csp_mode) { 825 case CSP_MODE_CIPHER: 826 config |= CESA_CSHD_ENC; 827 break; 828 case CSP_MODE_DIGEST: 829 config |= CESA_CSHD_MAC; 830 break; 831 case CSP_MODE_ETA: 832 config |= (config & CESA_CSHD_DECRYPT) ? CESA_CSHD_MAC_AND_ENC : 833 CESA_CSHD_ENC_AND_MAC; 834 break; 835 } 836 837 /* Create data packets */ 838 cci.cci_sc = sc; 839 cci.cci_cr = cr; 840 cci.cci_config = config; 841 cci.cci_error = 0; 842 843 error = bus_dmamap_load_crp(sc->sc_data_dtag, cr->cr_dmap, cr->cr_crp, 844 cesa_create_chain_cb, &cci, BUS_DMA_NOWAIT); 845 846 if (!error) 847 cr->cr_dmap_loaded = 1; 848 849 if (cci.cci_error) 850 error = cci.cci_error; 851 852 if (error) 853 return (error); 854 855 /* Read back request metadata */ 856 ctd = cesa_tdma_copyout_sa_data(sc, cr); 857 if (!ctd) 858 return (ENOMEM); 859 860 cesa_append_tdesc(cr, ctd); 861 862 return (0); 863 } 864 865 static void 866 cesa_execute(struct cesa_softc *sc) 867 { 868 struct cesa_tdma_desc *prev_ctd, *ctd; 869 struct cesa_request *prev_cr, *cr; 870 871 CESA_LOCK(sc, requests); 872 873 /* 874 * If ready list is empty, there is nothing to execute. If queued list 875 * is not empty, the hardware is busy and we cannot start another 876 * execution. 877 */ 878 if (STAILQ_EMPTY(&sc->sc_ready_requests) || 879 !STAILQ_EMPTY(&sc->sc_queued_requests)) { 880 CESA_UNLOCK(sc, requests); 881 return; 882 } 883 884 /* Move all ready requests to queued list */ 885 STAILQ_CONCAT(&sc->sc_queued_requests, &sc->sc_ready_requests); 886 STAILQ_INIT(&sc->sc_ready_requests); 887 888 /* Create one execution chain from all requests on the list */ 889 if (STAILQ_FIRST(&sc->sc_queued_requests) != 890 STAILQ_LAST(&sc->sc_queued_requests, cesa_request, cr_stq)) { 891 prev_cr = NULL; 892 cesa_sync_dma_mem(&sc->sc_tdesc_cdm, BUS_DMASYNC_POSTREAD | 893 BUS_DMASYNC_POSTWRITE); 894 895 STAILQ_FOREACH(cr, &sc->sc_queued_requests, cr_stq) { 896 if (prev_cr) { 897 ctd = STAILQ_FIRST(&cr->cr_tdesc); 898 prev_ctd = STAILQ_LAST(&prev_cr->cr_tdesc, 899 cesa_tdma_desc, ctd_stq); 900 901 prev_ctd->ctd_cthd->cthd_next = 902 ctd->ctd_cthd_paddr; 903 } 904 905 prev_cr = cr; 906 } 907 908 cesa_sync_dma_mem(&sc->sc_tdesc_cdm, BUS_DMASYNC_PREREAD | 909 BUS_DMASYNC_PREWRITE); 910 } 911 912 /* Start chain execution in hardware */ 913 cr = STAILQ_FIRST(&sc->sc_queued_requests); 914 ctd = STAILQ_FIRST(&cr->cr_tdesc); 915 916 CESA_TDMA_WRITE(sc, CESA_TDMA_ND, ctd->ctd_cthd_paddr); 917 918 if (sc->sc_soc_id == MV_DEV_88F6828 || 919 sc->sc_soc_id == MV_DEV_88F6820 || 920 sc->sc_soc_id == MV_DEV_88F6810) 921 CESA_REG_WRITE(sc, CESA_SA_CMD, CESA_SA_CMD_ACTVATE | CESA_SA_CMD_SHA2); 922 else 923 CESA_REG_WRITE(sc, CESA_SA_CMD, CESA_SA_CMD_ACTVATE); 924 925 CESA_UNLOCK(sc, requests); 926 } 927 928 static int 929 cesa_setup_sram(struct cesa_softc *sc) 930 { 931 phandle_t sram_node; 932 ihandle_t sram_ihandle; 933 pcell_t sram_handle, sram_reg[2]; 934 void *sram_va; 935 int rv; 936 937 rv = OF_getencprop(ofw_bus_get_node(sc->sc_dev), "sram-handle", 938 (void *)&sram_handle, sizeof(sram_handle)); 939 if (rv <= 0) 940 return (rv); 941 942 sram_ihandle = (ihandle_t)sram_handle; 943 sram_node = OF_instance_to_package(sram_ihandle); 944 945 rv = OF_getencprop(sram_node, "reg", (void *)sram_reg, sizeof(sram_reg)); 946 if (rv <= 0) 947 return (rv); 948 949 sc->sc_sram_base_pa = sram_reg[0]; 950 /* Store SRAM size to be able to unmap in detach() */ 951 sc->sc_sram_size = sram_reg[1]; 952 953 if (sc->sc_soc_id != MV_DEV_88F6828 && 954 sc->sc_soc_id != MV_DEV_88F6820 && 955 sc->sc_soc_id != MV_DEV_88F6810) 956 return (0); 957 958 /* SRAM memory was not mapped in platform_sram_devmap(), map it now */ 959 sram_va = pmap_mapdev(sc->sc_sram_base_pa, sc->sc_sram_size); 960 if (sram_va == NULL) 961 return (ENOMEM); 962 sc->sc_sram_base_va = sram_va; 963 964 return (0); 965 } 966 967 /* 968 * Function: device_from_node 969 * This function returns appropriate device_t to phandle_t 970 * Parameters: 971 * root - device where you want to start search 972 * if you provide NULL here, function will take 973 * "root0" device as root. 974 * node - we are checking every device_t to be 975 * appropriate with this. 976 */ 977 static device_t 978 device_from_node(device_t root, phandle_t node) 979 { 980 device_t *children, retval; 981 int nkid, i; 982 983 /* Nothing matches no node */ 984 if (node == -1) 985 return (NULL); 986 987 if (root == NULL) 988 /* Get root of device tree */ 989 if ((root = device_lookup_by_name("root0")) == NULL) 990 return (NULL); 991 992 if (device_get_children(root, &children, &nkid) != 0) 993 return (NULL); 994 995 retval = NULL; 996 for (i = 0; i < nkid; i++) { 997 /* Check if device and node matches */ 998 if (OFW_BUS_GET_NODE(root, children[i]) == node) { 999 retval = children[i]; 1000 break; 1001 } 1002 /* or go deeper */ 1003 if ((retval = device_from_node(children[i], node)) != NULL) 1004 break; 1005 } 1006 free(children, M_TEMP); 1007 1008 return (retval); 1009 } 1010 1011 static int 1012 cesa_setup_sram_armada(struct cesa_softc *sc) 1013 { 1014 phandle_t sram_node; 1015 ihandle_t sram_ihandle; 1016 pcell_t sram_handle[2]; 1017 void *sram_va; 1018 int rv, j; 1019 struct resource_list rl; 1020 struct resource_list_entry *rle; 1021 struct simplebus_softc *ssc; 1022 device_t sdev; 1023 1024 /* Get refs to SRAMS from CESA node */ 1025 rv = OF_getencprop(ofw_bus_get_node(sc->sc_dev), "marvell,crypto-srams", 1026 (void *)sram_handle, sizeof(sram_handle)); 1027 if (rv <= 0) 1028 return (rv); 1029 1030 if (sc->sc_cesa_engine_id >= 2) 1031 return (ENXIO); 1032 1033 /* Get SRAM node on the basis of sc_cesa_engine_id */ 1034 sram_ihandle = (ihandle_t)sram_handle[sc->sc_cesa_engine_id]; 1035 sram_node = OF_instance_to_package(sram_ihandle); 1036 1037 /* Get device_t of simplebus (sram_node parent) */ 1038 sdev = device_from_node(NULL, OF_parent(sram_node)); 1039 if (!sdev) 1040 return (ENXIO); 1041 1042 ssc = device_get_softc(sdev); 1043 1044 resource_list_init(&rl); 1045 /* Parse reg property to resource list */ 1046 ofw_bus_reg_to_rl(sdev, sram_node, ssc->acells, 1047 ssc->scells, &rl); 1048 1049 /* We expect only one resource */ 1050 rle = resource_list_find(&rl, SYS_RES_MEMORY, 0); 1051 if (rle == NULL) 1052 return (ENXIO); 1053 1054 /* Remap through ranges property */ 1055 for (j = 0; j < ssc->nranges; j++) { 1056 if (rle->start >= ssc->ranges[j].bus && 1057 rle->end < ssc->ranges[j].bus + ssc->ranges[j].size) { 1058 rle->start -= ssc->ranges[j].bus; 1059 rle->start += ssc->ranges[j].host; 1060 rle->end -= ssc->ranges[j].bus; 1061 rle->end += ssc->ranges[j].host; 1062 } 1063 } 1064 1065 sc->sc_sram_base_pa = rle->start; 1066 sc->sc_sram_size = rle->count; 1067 1068 /* SRAM memory was not mapped in platform_sram_devmap(), map it now */ 1069 sram_va = pmap_mapdev(sc->sc_sram_base_pa, sc->sc_sram_size); 1070 if (sram_va == NULL) 1071 return (ENOMEM); 1072 sc->sc_sram_base_va = sram_va; 1073 1074 return (0); 1075 } 1076 1077 struct ofw_compat_data cesa_devices[] = { 1078 { "mrvl,cesa", (uintptr_t)true }, 1079 { "marvell,armada-38x-crypto", (uintptr_t)true }, 1080 { NULL, 0 } 1081 }; 1082 1083 static int 1084 cesa_probe(device_t dev) 1085 { 1086 1087 if (!ofw_bus_status_okay(dev)) 1088 return (ENXIO); 1089 1090 if (!ofw_bus_search_compatible(dev, cesa_devices)->ocd_data) 1091 return (ENXIO); 1092 1093 device_set_desc(dev, "Marvell Cryptographic Engine and Security " 1094 "Accelerator"); 1095 1096 return (BUS_PROBE_DEFAULT); 1097 } 1098 1099 static int 1100 cesa_attach(device_t dev) 1101 { 1102 static int engine_idx = 0; 1103 struct simplebus_devinfo *ndi; 1104 struct resource_list *rl; 1105 struct cesa_softc *sc; 1106 1107 if (!ofw_bus_is_compatible(dev, "marvell,armada-38x-crypto")) 1108 return (cesa_attach_late(dev)); 1109 1110 /* 1111 * Get simplebus_devinfo which contains 1112 * resource list filled with adresses and 1113 * interrupts read form FDT. 1114 * Let's correct it by splitting resources 1115 * for each engine. 1116 */ 1117 if ((ndi = device_get_ivars(dev)) == NULL) 1118 return (ENXIO); 1119 1120 rl = &ndi->rl; 1121 1122 switch (engine_idx) { 1123 case 0: 1124 /* Update regs values */ 1125 resource_list_add(rl, SYS_RES_MEMORY, 0, CESA0_TDMA_ADDR, 1126 CESA0_TDMA_ADDR + CESA_TDMA_SIZE - 1, CESA_TDMA_SIZE); 1127 resource_list_add(rl, SYS_RES_MEMORY, 1, CESA0_CESA_ADDR, 1128 CESA0_CESA_ADDR + CESA_CESA_SIZE - 1, CESA_CESA_SIZE); 1129 1130 /* Remove unused interrupt */ 1131 resource_list_delete(rl, SYS_RES_IRQ, 1); 1132 break; 1133 1134 case 1: 1135 /* Update regs values */ 1136 resource_list_add(rl, SYS_RES_MEMORY, 0, CESA1_TDMA_ADDR, 1137 CESA1_TDMA_ADDR + CESA_TDMA_SIZE - 1, CESA_TDMA_SIZE); 1138 resource_list_add(rl, SYS_RES_MEMORY, 1, CESA1_CESA_ADDR, 1139 CESA1_CESA_ADDR + CESA_CESA_SIZE - 1, CESA_CESA_SIZE); 1140 1141 /* Remove unused interrupt */ 1142 resource_list_delete(rl, SYS_RES_IRQ, 0); 1143 resource_list_find(rl, SYS_RES_IRQ, 1)->rid = 0; 1144 break; 1145 1146 default: 1147 device_printf(dev, "Bad cesa engine_idx\n"); 1148 return (ENXIO); 1149 } 1150 1151 sc = device_get_softc(dev); 1152 sc->sc_cesa_engine_id = engine_idx; 1153 1154 /* 1155 * Call simplebus_add_device only once. 1156 * It will create second cesa driver instance 1157 * with the same FDT node as first instance. 1158 * When second driver reach this function, 1159 * it will be configured to use second cesa engine 1160 */ 1161 if (engine_idx == 0) 1162 simplebus_add_device(device_get_parent(dev), ofw_bus_get_node(dev), 1163 0, "cesa", 1, NULL); 1164 1165 engine_idx++; 1166 1167 return (cesa_attach_late(dev)); 1168 } 1169 1170 static int 1171 cesa_attach_late(device_t dev) 1172 { 1173 struct cesa_softc *sc; 1174 uint32_t d, r, val; 1175 int error; 1176 int i; 1177 1178 sc = device_get_softc(dev); 1179 sc->sc_blocked = 0; 1180 sc->sc_error = 0; 1181 sc->sc_dev = dev; 1182 1183 soc_id(&d, &r); 1184 1185 switch (d) { 1186 case MV_DEV_88F6281: 1187 case MV_DEV_88F6282: 1188 /* Check if CESA peripheral device has power turned on */ 1189 if (soc_power_ctrl_get(CPU_PM_CTRL_CRYPTO) == 1190 CPU_PM_CTRL_CRYPTO) { 1191 device_printf(dev, "not powered on\n"); 1192 return (ENXIO); 1193 } 1194 sc->sc_tperr = 0; 1195 break; 1196 case MV_DEV_88F6828: 1197 case MV_DEV_88F6820: 1198 case MV_DEV_88F6810: 1199 sc->sc_tperr = 0; 1200 break; 1201 case MV_DEV_MV78100: 1202 case MV_DEV_MV78100_Z0: 1203 /* Check if CESA peripheral device has power turned on */ 1204 if (soc_power_ctrl_get(CPU_PM_CTRL_CRYPTO) != 1205 CPU_PM_CTRL_CRYPTO) { 1206 device_printf(dev, "not powered on\n"); 1207 return (ENXIO); 1208 } 1209 sc->sc_tperr = CESA_ICR_TPERR; 1210 break; 1211 default: 1212 return (ENXIO); 1213 } 1214 1215 sc->sc_soc_id = d; 1216 1217 /* Initialize mutexes */ 1218 mtx_init(&sc->sc_sc_lock, device_get_nameunit(dev), 1219 "CESA Shared Data", MTX_DEF); 1220 mtx_init(&sc->sc_tdesc_lock, device_get_nameunit(dev), 1221 "CESA TDMA Descriptors Pool", MTX_DEF); 1222 mtx_init(&sc->sc_sdesc_lock, device_get_nameunit(dev), 1223 "CESA SA Descriptors Pool", MTX_DEF); 1224 mtx_init(&sc->sc_requests_lock, device_get_nameunit(dev), 1225 "CESA Requests Pool", MTX_DEF); 1226 mtx_init(&sc->sc_sessions_lock, device_get_nameunit(dev), 1227 "CESA Sessions Pool", MTX_DEF); 1228 1229 /* Allocate I/O and IRQ resources */ 1230 error = bus_alloc_resources(dev, cesa_res_spec, sc->sc_res); 1231 if (error) { 1232 device_printf(dev, "could not allocate resources\n"); 1233 goto err0; 1234 } 1235 1236 /* Acquire SRAM base address */ 1237 if (!ofw_bus_is_compatible(dev, "marvell,armada-38x-crypto")) 1238 error = cesa_setup_sram(sc); 1239 else 1240 error = cesa_setup_sram_armada(sc); 1241 1242 if (error) { 1243 device_printf(dev, "could not setup SRAM\n"); 1244 goto err1; 1245 } 1246 1247 /* Setup interrupt handler */ 1248 error = bus_setup_intr(dev, sc->sc_res[RES_CESA_IRQ], INTR_TYPE_NET | 1249 INTR_MPSAFE, NULL, cesa_intr, sc, &(sc->sc_icookie)); 1250 if (error) { 1251 device_printf(dev, "could not setup engine completion irq\n"); 1252 goto err2; 1253 } 1254 1255 /* Create DMA tag for processed data */ 1256 error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */ 1257 1, 0, /* alignment, boundary */ 1258 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 1259 BUS_SPACE_MAXADDR, /* highaddr */ 1260 NULL, NULL, /* filtfunc, filtfuncarg */ 1261 CESA_MAX_REQUEST_SIZE, /* maxsize */ 1262 CESA_MAX_FRAGMENTS, /* nsegments */ 1263 CESA_MAX_REQUEST_SIZE, 0, /* maxsegsz, flags */ 1264 NULL, NULL, /* lockfunc, lockfuncarg */ 1265 &sc->sc_data_dtag); /* dmat */ 1266 if (error) 1267 goto err3; 1268 1269 /* Initialize data structures: TDMA Descriptors Pool */ 1270 error = cesa_alloc_dma_mem(sc, &sc->sc_tdesc_cdm, 1271 CESA_TDMA_DESCRIPTORS * sizeof(struct cesa_tdma_hdesc)); 1272 if (error) 1273 goto err4; 1274 1275 STAILQ_INIT(&sc->sc_free_tdesc); 1276 for (i = 0; i < CESA_TDMA_DESCRIPTORS; i++) { 1277 sc->sc_tdesc[i].ctd_cthd = 1278 (struct cesa_tdma_hdesc *)(sc->sc_tdesc_cdm.cdm_vaddr) + i; 1279 sc->sc_tdesc[i].ctd_cthd_paddr = sc->sc_tdesc_cdm.cdm_paddr + 1280 (i * sizeof(struct cesa_tdma_hdesc)); 1281 STAILQ_INSERT_TAIL(&sc->sc_free_tdesc, &sc->sc_tdesc[i], 1282 ctd_stq); 1283 } 1284 1285 /* Initialize data structures: SA Descriptors Pool */ 1286 error = cesa_alloc_dma_mem(sc, &sc->sc_sdesc_cdm, 1287 CESA_SA_DESCRIPTORS * sizeof(struct cesa_sa_hdesc)); 1288 if (error) 1289 goto err5; 1290 1291 STAILQ_INIT(&sc->sc_free_sdesc); 1292 for (i = 0; i < CESA_SA_DESCRIPTORS; i++) { 1293 sc->sc_sdesc[i].csd_cshd = 1294 (struct cesa_sa_hdesc *)(sc->sc_sdesc_cdm.cdm_vaddr) + i; 1295 sc->sc_sdesc[i].csd_cshd_paddr = sc->sc_sdesc_cdm.cdm_paddr + 1296 (i * sizeof(struct cesa_sa_hdesc)); 1297 STAILQ_INSERT_TAIL(&sc->sc_free_sdesc, &sc->sc_sdesc[i], 1298 csd_stq); 1299 } 1300 1301 /* Initialize data structures: Requests Pool */ 1302 error = cesa_alloc_dma_mem(sc, &sc->sc_requests_cdm, 1303 CESA_REQUESTS * sizeof(struct cesa_sa_data)); 1304 if (error) 1305 goto err6; 1306 1307 STAILQ_INIT(&sc->sc_free_requests); 1308 STAILQ_INIT(&sc->sc_ready_requests); 1309 STAILQ_INIT(&sc->sc_queued_requests); 1310 for (i = 0; i < CESA_REQUESTS; i++) { 1311 sc->sc_requests[i].cr_csd = 1312 (struct cesa_sa_data *)(sc->sc_requests_cdm.cdm_vaddr) + i; 1313 sc->sc_requests[i].cr_csd_paddr = 1314 sc->sc_requests_cdm.cdm_paddr + 1315 (i * sizeof(struct cesa_sa_data)); 1316 1317 /* Preallocate DMA maps */ 1318 error = bus_dmamap_create(sc->sc_data_dtag, 0, 1319 &sc->sc_requests[i].cr_dmap); 1320 if (error && i > 0) { 1321 i--; 1322 do { 1323 bus_dmamap_destroy(sc->sc_data_dtag, 1324 sc->sc_requests[i].cr_dmap); 1325 } while (i--); 1326 1327 goto err7; 1328 } 1329 1330 STAILQ_INSERT_TAIL(&sc->sc_free_requests, &sc->sc_requests[i], 1331 cr_stq); 1332 } 1333 1334 /* 1335 * Initialize TDMA: 1336 * - Burst limit: 128 bytes, 1337 * - Outstanding reads enabled, 1338 * - No byte-swap. 1339 */ 1340 val = CESA_TDMA_CR_DBL128 | CESA_TDMA_CR_SBL128 | 1341 CESA_TDMA_CR_ORDEN | CESA_TDMA_CR_NBS | CESA_TDMA_CR_ENABLE; 1342 1343 if (sc->sc_soc_id == MV_DEV_88F6828 || 1344 sc->sc_soc_id == MV_DEV_88F6820 || 1345 sc->sc_soc_id == MV_DEV_88F6810) 1346 val |= CESA_TDMA_NUM_OUTSTAND; 1347 1348 CESA_TDMA_WRITE(sc, CESA_TDMA_CR, val); 1349 1350 /* 1351 * Initialize SA: 1352 * - SA descriptor is present at beginning of CESA SRAM, 1353 * - Multi-packet chain mode, 1354 * - Cooperation with TDMA enabled. 1355 */ 1356 CESA_REG_WRITE(sc, CESA_SA_DPR, 0); 1357 CESA_REG_WRITE(sc, CESA_SA_CR, CESA_SA_CR_ACTIVATE_TDMA | 1358 CESA_SA_CR_WAIT_FOR_TDMA | CESA_SA_CR_MULTI_MODE); 1359 1360 /* Unmask interrupts */ 1361 CESA_REG_WRITE(sc, CESA_ICR, 0); 1362 CESA_REG_WRITE(sc, CESA_ICM, CESA_ICM_ACCTDMA | sc->sc_tperr); 1363 CESA_TDMA_WRITE(sc, CESA_TDMA_ECR, 0); 1364 CESA_TDMA_WRITE(sc, CESA_TDMA_EMR, CESA_TDMA_EMR_MISS | 1365 CESA_TDMA_EMR_DOUBLE_HIT | CESA_TDMA_EMR_BOTH_HIT | 1366 CESA_TDMA_EMR_DATA_ERROR); 1367 1368 /* Register in OCF */ 1369 sc->sc_cid = crypto_get_driverid(dev, sizeof(struct cesa_session), 1370 CRYPTOCAP_F_HARDWARE); 1371 if (sc->sc_cid < 0) { 1372 device_printf(dev, "could not get crypto driver id\n"); 1373 goto err8; 1374 } 1375 1376 return (0); 1377 err8: 1378 for (i = 0; i < CESA_REQUESTS; i++) 1379 bus_dmamap_destroy(sc->sc_data_dtag, 1380 sc->sc_requests[i].cr_dmap); 1381 err7: 1382 cesa_free_dma_mem(&sc->sc_requests_cdm); 1383 err6: 1384 cesa_free_dma_mem(&sc->sc_sdesc_cdm); 1385 err5: 1386 cesa_free_dma_mem(&sc->sc_tdesc_cdm); 1387 err4: 1388 bus_dma_tag_destroy(sc->sc_data_dtag); 1389 err3: 1390 bus_teardown_intr(dev, sc->sc_res[RES_CESA_IRQ], sc->sc_icookie); 1391 err2: 1392 if (sc->sc_soc_id == MV_DEV_88F6828 || 1393 sc->sc_soc_id == MV_DEV_88F6820 || 1394 sc->sc_soc_id == MV_DEV_88F6810) 1395 pmap_unmapdev(sc->sc_sram_base_va, sc->sc_sram_size); 1396 err1: 1397 bus_release_resources(dev, cesa_res_spec, sc->sc_res); 1398 err0: 1399 mtx_destroy(&sc->sc_sessions_lock); 1400 mtx_destroy(&sc->sc_requests_lock); 1401 mtx_destroy(&sc->sc_sdesc_lock); 1402 mtx_destroy(&sc->sc_tdesc_lock); 1403 mtx_destroy(&sc->sc_sc_lock); 1404 return (ENXIO); 1405 } 1406 1407 static int 1408 cesa_detach(device_t dev) 1409 { 1410 struct cesa_softc *sc; 1411 int i; 1412 1413 sc = device_get_softc(dev); 1414 1415 /* TODO: Wait for queued requests completion before shutdown. */ 1416 1417 /* Mask interrupts */ 1418 CESA_REG_WRITE(sc, CESA_ICM, 0); 1419 CESA_TDMA_WRITE(sc, CESA_TDMA_EMR, 0); 1420 1421 /* Unregister from OCF */ 1422 crypto_unregister_all(sc->sc_cid); 1423 1424 /* Free DMA Maps */ 1425 for (i = 0; i < CESA_REQUESTS; i++) 1426 bus_dmamap_destroy(sc->sc_data_dtag, 1427 sc->sc_requests[i].cr_dmap); 1428 1429 /* Free DMA Memory */ 1430 cesa_free_dma_mem(&sc->sc_requests_cdm); 1431 cesa_free_dma_mem(&sc->sc_sdesc_cdm); 1432 cesa_free_dma_mem(&sc->sc_tdesc_cdm); 1433 1434 /* Free DMA Tag */ 1435 bus_dma_tag_destroy(sc->sc_data_dtag); 1436 1437 /* Stop interrupt */ 1438 bus_teardown_intr(dev, sc->sc_res[RES_CESA_IRQ], sc->sc_icookie); 1439 1440 /* Relase I/O and IRQ resources */ 1441 bus_release_resources(dev, cesa_res_spec, sc->sc_res); 1442 1443 /* Unmap SRAM memory */ 1444 if (sc->sc_soc_id == MV_DEV_88F6828 || 1445 sc->sc_soc_id == MV_DEV_88F6820 || 1446 sc->sc_soc_id == MV_DEV_88F6810) 1447 pmap_unmapdev(sc->sc_sram_base_va, sc->sc_sram_size); 1448 1449 /* Destroy mutexes */ 1450 mtx_destroy(&sc->sc_sessions_lock); 1451 mtx_destroy(&sc->sc_requests_lock); 1452 mtx_destroy(&sc->sc_sdesc_lock); 1453 mtx_destroy(&sc->sc_tdesc_lock); 1454 mtx_destroy(&sc->sc_sc_lock); 1455 1456 return (0); 1457 } 1458 1459 static void 1460 cesa_intr(void *arg) 1461 { 1462 STAILQ_HEAD(, cesa_request) requests; 1463 struct cesa_request *cr, *tmp; 1464 struct cesa_softc *sc; 1465 uint32_t ecr, icr; 1466 uint8_t hash[HASH_MAX_LEN]; 1467 int blocked; 1468 1469 sc = arg; 1470 1471 /* Ack interrupt */ 1472 ecr = CESA_TDMA_READ(sc, CESA_TDMA_ECR); 1473 CESA_TDMA_WRITE(sc, CESA_TDMA_ECR, 0); 1474 icr = CESA_REG_READ(sc, CESA_ICR); 1475 CESA_REG_WRITE(sc, CESA_ICR, 0); 1476 1477 /* Check for TDMA errors */ 1478 if (ecr & CESA_TDMA_ECR_MISS) { 1479 device_printf(sc->sc_dev, "TDMA Miss error detected!\n"); 1480 sc->sc_error = EIO; 1481 } 1482 1483 if (ecr & CESA_TDMA_ECR_DOUBLE_HIT) { 1484 device_printf(sc->sc_dev, "TDMA Double Hit error detected!\n"); 1485 sc->sc_error = EIO; 1486 } 1487 1488 if (ecr & CESA_TDMA_ECR_BOTH_HIT) { 1489 device_printf(sc->sc_dev, "TDMA Both Hit error detected!\n"); 1490 sc->sc_error = EIO; 1491 } 1492 1493 if (ecr & CESA_TDMA_ECR_DATA_ERROR) { 1494 device_printf(sc->sc_dev, "TDMA Data error detected!\n"); 1495 sc->sc_error = EIO; 1496 } 1497 1498 /* Check for CESA errors */ 1499 if (icr & sc->sc_tperr) { 1500 device_printf(sc->sc_dev, "CESA SRAM Parity error detected!\n"); 1501 sc->sc_error = EIO; 1502 } 1503 1504 /* If there is nothing more to do, return */ 1505 if ((icr & CESA_ICR_ACCTDMA) == 0) 1506 return; 1507 1508 /* Get all finished requests */ 1509 CESA_LOCK(sc, requests); 1510 STAILQ_INIT(&requests); 1511 STAILQ_CONCAT(&requests, &sc->sc_queued_requests); 1512 STAILQ_INIT(&sc->sc_queued_requests); 1513 CESA_UNLOCK(sc, requests); 1514 1515 /* Execute all ready requests */ 1516 cesa_execute(sc); 1517 1518 /* Process completed requests */ 1519 cesa_sync_dma_mem(&sc->sc_requests_cdm, BUS_DMASYNC_POSTREAD | 1520 BUS_DMASYNC_POSTWRITE); 1521 1522 STAILQ_FOREACH_SAFE(cr, &requests, cr_stq, tmp) { 1523 bus_dmamap_sync(sc->sc_data_dtag, cr->cr_dmap, 1524 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1525 1526 cr->cr_crp->crp_etype = sc->sc_error; 1527 if (cr->cr_cs->cs_hlen != 0 && cr->cr_crp->crp_etype == 0) { 1528 if (cr->cr_crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) { 1529 crypto_copydata(cr->cr_crp, 1530 cr->cr_crp->crp_digest_start, 1531 cr->cr_cs->cs_hlen, hash); 1532 if (timingsafe_bcmp(hash, cr->cr_csd->csd_hash, 1533 cr->cr_cs->cs_hlen) != 0) 1534 cr->cr_crp->crp_etype = EBADMSG; 1535 } else 1536 crypto_copyback(cr->cr_crp, 1537 cr->cr_crp->crp_digest_start, 1538 cr->cr_cs->cs_hlen, cr->cr_csd->csd_hash); 1539 } 1540 crypto_done(cr->cr_crp); 1541 cesa_free_request(sc, cr); 1542 } 1543 1544 cesa_sync_dma_mem(&sc->sc_requests_cdm, BUS_DMASYNC_PREREAD | 1545 BUS_DMASYNC_PREWRITE); 1546 1547 sc->sc_error = 0; 1548 1549 /* Unblock driver if it ran out of resources */ 1550 CESA_LOCK(sc, sc); 1551 blocked = sc->sc_blocked; 1552 sc->sc_blocked = 0; 1553 CESA_UNLOCK(sc, sc); 1554 1555 if (blocked) 1556 crypto_unblock(sc->sc_cid, blocked); 1557 } 1558 1559 static bool 1560 cesa_cipher_supported(const struct crypto_session_params *csp) 1561 { 1562 1563 switch (csp->csp_cipher_alg) { 1564 case CRYPTO_AES_CBC: 1565 if (csp->csp_ivlen != AES_BLOCK_LEN) 1566 return (false); 1567 break; 1568 default: 1569 return (false); 1570 } 1571 1572 if (csp->csp_cipher_klen > CESA_MAX_KEY_LEN) 1573 return (false); 1574 1575 return (true); 1576 } 1577 1578 static bool 1579 cesa_auth_supported(struct cesa_softc *sc, 1580 const struct crypto_session_params *csp) 1581 { 1582 1583 switch (csp->csp_auth_alg) { 1584 case CRYPTO_SHA2_256_HMAC: 1585 if (!(sc->sc_soc_id == MV_DEV_88F6828 || 1586 sc->sc_soc_id == MV_DEV_88F6820 || 1587 sc->sc_soc_id == MV_DEV_88F6810)) 1588 return (false); 1589 /* FALLTHROUGH */ 1590 case CRYPTO_SHA1: 1591 case CRYPTO_SHA1_HMAC: 1592 break; 1593 default: 1594 return (false); 1595 } 1596 1597 if (csp->csp_auth_klen > CESA_MAX_MKEY_LEN) 1598 return (false); 1599 1600 return (true); 1601 } 1602 1603 static int 1604 cesa_probesession(device_t dev, const struct crypto_session_params *csp) 1605 { 1606 struct cesa_softc *sc; 1607 1608 sc = device_get_softc(dev); 1609 if (csp->csp_flags != 0) 1610 return (EINVAL); 1611 switch (csp->csp_mode) { 1612 case CSP_MODE_DIGEST: 1613 if (!cesa_auth_supported(sc, csp)) 1614 return (EINVAL); 1615 break; 1616 case CSP_MODE_CIPHER: 1617 if (!cesa_cipher_supported(csp)) 1618 return (EINVAL); 1619 break; 1620 case CSP_MODE_ETA: 1621 if (!cesa_auth_supported(sc, csp) || 1622 !cesa_cipher_supported(csp)) 1623 return (EINVAL); 1624 break; 1625 default: 1626 return (EINVAL); 1627 } 1628 return (CRYPTODEV_PROBE_HARDWARE); 1629 } 1630 1631 static int 1632 cesa_newsession(device_t dev, crypto_session_t cses, 1633 const struct crypto_session_params *csp) 1634 { 1635 struct cesa_session *cs; 1636 int error; 1637 1638 error = 0; 1639 1640 /* Allocate session */ 1641 cs = crypto_get_driver_session(cses); 1642 1643 /* Prepare CESA configuration */ 1644 cs->cs_config = 0; 1645 cs->cs_ivlen = 1; 1646 cs->cs_mblen = 1; 1647 1648 switch (csp->csp_cipher_alg) { 1649 case CRYPTO_AES_CBC: 1650 cs->cs_config |= CESA_CSHD_AES | CESA_CSHD_CBC; 1651 cs->cs_ivlen = AES_BLOCK_LEN; 1652 break; 1653 } 1654 1655 switch (csp->csp_auth_alg) { 1656 case CRYPTO_SHA1: 1657 cs->cs_mblen = 1; 1658 cs->cs_hlen = (csp->csp_auth_mlen == 0) ? SHA1_HASH_LEN : 1659 csp->csp_auth_mlen; 1660 cs->cs_config |= CESA_CSHD_SHA1; 1661 break; 1662 case CRYPTO_SHA1_HMAC: 1663 cs->cs_mblen = SHA1_BLOCK_LEN; 1664 cs->cs_hlen = (csp->csp_auth_mlen == 0) ? SHA1_HASH_LEN : 1665 csp->csp_auth_mlen; 1666 cs->cs_config |= CESA_CSHD_SHA1_HMAC; 1667 if (cs->cs_hlen == CESA_HMAC_TRUNC_LEN) 1668 cs->cs_config |= CESA_CSHD_96_BIT_HMAC; 1669 break; 1670 case CRYPTO_SHA2_256_HMAC: 1671 cs->cs_mblen = SHA2_256_BLOCK_LEN; 1672 cs->cs_hlen = (csp->csp_auth_mlen == 0) ? SHA2_256_HASH_LEN : 1673 csp->csp_auth_mlen; 1674 cs->cs_config |= CESA_CSHD_SHA2_256_HMAC; 1675 break; 1676 } 1677 1678 /* Save cipher key */ 1679 if (csp->csp_cipher_key != NULL) { 1680 memcpy(cs->cs_key, csp->csp_cipher_key, 1681 csp->csp_cipher_klen); 1682 if (csp->csp_cipher_alg == CRYPTO_AES_CBC) 1683 error = cesa_prep_aes_key(cs, csp); 1684 } 1685 1686 /* Save digest key */ 1687 if (csp->csp_auth_key != NULL) 1688 cesa_set_mkey(cs, csp->csp_auth_alg, csp->csp_auth_key, 1689 csp->csp_auth_klen); 1690 1691 return (error); 1692 } 1693 1694 static int 1695 cesa_process(device_t dev, struct cryptop *crp, int hint) 1696 { 1697 const struct crypto_session_params *csp; 1698 struct cesa_request *cr; 1699 struct cesa_session *cs; 1700 struct cesa_softc *sc; 1701 int error; 1702 1703 sc = device_get_softc(dev); 1704 error = 0; 1705 1706 cs = crypto_get_driver_session(crp->crp_session); 1707 csp = crypto_get_params(crp->crp_session); 1708 1709 /* Check and parse input */ 1710 if (crypto_buffer_len(&crp->crp_buf) > CESA_MAX_REQUEST_SIZE) { 1711 crp->crp_etype = E2BIG; 1712 crypto_done(crp); 1713 return (0); 1714 } 1715 1716 /* 1717 * For requests with AAD, only requests where the AAD is 1718 * immediately adjacent to the payload are supported. 1719 */ 1720 if (crp->crp_aad_length != 0 && 1721 (crp->crp_aad_start + crp->crp_aad_length) != 1722 crp->crp_payload_start) { 1723 crp->crp_etype = EINVAL; 1724 crypto_done(crp); 1725 return (0); 1726 } 1727 1728 /* 1729 * Get request descriptor. Block driver if there is no free 1730 * descriptors in pool. 1731 */ 1732 cr = cesa_alloc_request(sc); 1733 if (!cr) { 1734 CESA_LOCK(sc, sc); 1735 sc->sc_blocked = CRYPTO_SYMQ; 1736 CESA_UNLOCK(sc, sc); 1737 return (ERESTART); 1738 } 1739 1740 /* Prepare request */ 1741 cr->cr_crp = crp; 1742 cr->cr_cs = cs; 1743 1744 CESA_LOCK(sc, sessions); 1745 cesa_sync_desc(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1746 1747 if (csp->csp_cipher_alg != 0) 1748 crypto_read_iv(crp, cr->cr_csd->csd_iv); 1749 1750 if (crp->crp_cipher_key != NULL) { 1751 memcpy(cs->cs_key, crp->crp_cipher_key, 1752 csp->csp_cipher_klen); 1753 if (csp->csp_cipher_alg == CRYPTO_AES_CBC) 1754 error = cesa_prep_aes_key(cs, csp); 1755 } 1756 1757 if (!error && crp->crp_auth_key != NULL) 1758 cesa_set_mkey(cs, csp->csp_auth_alg, crp->crp_auth_key, 1759 csp->csp_auth_klen); 1760 1761 /* Convert request to chain of TDMA and SA descriptors */ 1762 if (!error) 1763 error = cesa_create_chain(sc, csp, cr); 1764 1765 cesa_sync_desc(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1766 CESA_UNLOCK(sc, sessions); 1767 1768 if (error) { 1769 cesa_free_request(sc, cr); 1770 crp->crp_etype = error; 1771 crypto_done(crp); 1772 return (0); 1773 } 1774 1775 bus_dmamap_sync(sc->sc_data_dtag, cr->cr_dmap, BUS_DMASYNC_PREREAD | 1776 BUS_DMASYNC_PREWRITE); 1777 1778 /* Enqueue request to execution */ 1779 cesa_enqueue_request(sc, cr); 1780 1781 /* Start execution, if we have no more requests in queue */ 1782 if ((hint & CRYPTO_HINT_MORE) == 0) 1783 cesa_execute(sc); 1784 1785 return (0); 1786 } 1787