1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (C) 2009-2011 Semihalf. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29 /* 30 * CESA SRAM Memory Map: 31 * 32 * +------------------------+ <= sc->sc_sram_base_va + CESA_SRAM_SIZE 33 * | | 34 * | DATA | 35 * | | 36 * +------------------------+ <= sc->sc_sram_base_va + CESA_DATA(0) 37 * | struct cesa_sa_data | 38 * +------------------------+ 39 * | struct cesa_sa_hdesc | 40 * +------------------------+ <= sc->sc_sram_base_va 41 */ 42 43 #include <sys/cdefs.h> 44 __FBSDID("$FreeBSD$"); 45 46 #include <sys/param.h> 47 #include <sys/systm.h> 48 #include <sys/bus.h> 49 #include <sys/endian.h> 50 #include <sys/kernel.h> 51 #include <sys/lock.h> 52 #include <sys/mbuf.h> 53 #include <sys/module.h> 54 #include <sys/mutex.h> 55 #include <sys/rman.h> 56 57 #include <machine/bus.h> 58 #include <machine/intr.h> 59 #include <machine/resource.h> 60 #include <machine/fdt.h> 61 62 #include <dev/fdt/simplebus.h> 63 #include <dev/fdt/fdt_common.h> 64 #include <dev/ofw/ofw_bus.h> 65 #include <dev/ofw/ofw_bus_subr.h> 66 67 #include <crypto/sha1.h> 68 #include <crypto/sha2/sha256.h> 69 #include <crypto/rijndael/rijndael.h> 70 #include <opencrypto/cryptodev.h> 71 #include <opencrypto/xform.h> 72 #include "cryptodev_if.h" 73 74 #include <arm/mv/mvreg.h> 75 #include <arm/mv/mvvar.h> 76 #include "cesa.h" 77 78 static int cesa_probe(device_t); 79 static int cesa_attach(device_t); 80 static int cesa_attach_late(device_t); 81 static int cesa_detach(device_t); 82 static void cesa_intr(void *); 83 static int cesa_probesession(device_t, 84 const struct crypto_session_params *); 85 static int cesa_newsession(device_t, crypto_session_t, 86 const struct crypto_session_params *); 87 static int cesa_process(device_t, struct cryptop *, int); 88 89 static struct resource_spec cesa_res_spec[] = { 90 { SYS_RES_MEMORY, 0, RF_ACTIVE }, 91 { SYS_RES_MEMORY, 1, RF_ACTIVE }, 92 { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE }, 93 { -1, 0 } 94 }; 95 96 static device_method_t cesa_methods[] = { 97 /* Device interface */ 98 DEVMETHOD(device_probe, cesa_probe), 99 DEVMETHOD(device_attach, cesa_attach), 100 DEVMETHOD(device_detach, cesa_detach), 101 102 /* Crypto device methods */ 103 DEVMETHOD(cryptodev_probesession, cesa_probesession), 104 DEVMETHOD(cryptodev_newsession, cesa_newsession), 105 DEVMETHOD(cryptodev_process, cesa_process), 106 107 DEVMETHOD_END 108 }; 109 110 static driver_t cesa_driver = { 111 "cesa", 112 cesa_methods, 113 sizeof (struct cesa_softc) 114 }; 115 116 DRIVER_MODULE(cesa, simplebus, cesa_driver, 0, 0); 117 MODULE_DEPEND(cesa, crypto, 1, 1, 1); 118 119 static void 120 cesa_dump_cshd(struct cesa_softc *sc, struct cesa_sa_hdesc *cshd) 121 { 122 #ifdef DEBUG 123 device_t dev; 124 125 dev = sc->sc_dev; 126 device_printf(dev, "CESA SA Hardware Descriptor:\n"); 127 device_printf(dev, "\t\tconfig: 0x%08X\n", cshd->cshd_config); 128 device_printf(dev, "\t\te_src: 0x%08X\n", cshd->cshd_enc_src); 129 device_printf(dev, "\t\te_dst: 0x%08X\n", cshd->cshd_enc_dst); 130 device_printf(dev, "\t\te_dlen: 0x%08X\n", cshd->cshd_enc_dlen); 131 device_printf(dev, "\t\te_key: 0x%08X\n", cshd->cshd_enc_key); 132 device_printf(dev, "\t\te_iv_1: 0x%08X\n", cshd->cshd_enc_iv); 133 device_printf(dev, "\t\te_iv_2: 0x%08X\n", cshd->cshd_enc_iv_buf); 134 device_printf(dev, "\t\tm_src: 0x%08X\n", cshd->cshd_mac_src); 135 device_printf(dev, "\t\tm_dst: 0x%08X\n", cshd->cshd_mac_dst); 136 device_printf(dev, "\t\tm_dlen: 0x%08X\n", cshd->cshd_mac_dlen); 137 device_printf(dev, "\t\tm_tlen: 0x%08X\n", cshd->cshd_mac_total_dlen); 138 device_printf(dev, "\t\tm_iv_i: 0x%08X\n", cshd->cshd_mac_iv_in); 139 device_printf(dev, "\t\tm_iv_o: 0x%08X\n", cshd->cshd_mac_iv_out); 140 #endif 141 } 142 143 static void 144 cesa_alloc_dma_mem_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) 145 { 146 struct cesa_dma_mem *cdm; 147 148 if (error) 149 return; 150 151 KASSERT(nseg == 1, ("Got wrong number of DMA segments, should be 1.")); 152 cdm = arg; 153 cdm->cdm_paddr = segs->ds_addr; 154 } 155 156 static int 157 cesa_alloc_dma_mem(struct cesa_softc *sc, struct cesa_dma_mem *cdm, 158 bus_size_t size) 159 { 160 int error; 161 162 KASSERT(cdm->cdm_vaddr == NULL, 163 ("%s(): DMA memory descriptor in use.", __func__)); 164 165 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), /* parent */ 166 PAGE_SIZE, 0, /* alignment, boundary */ 167 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 168 BUS_SPACE_MAXADDR, /* highaddr */ 169 NULL, NULL, /* filtfunc, filtfuncarg */ 170 size, 1, /* maxsize, nsegments */ 171 size, 0, /* maxsegsz, flags */ 172 NULL, NULL, /* lockfunc, lockfuncarg */ 173 &cdm->cdm_tag); /* dmat */ 174 if (error) { 175 device_printf(sc->sc_dev, "failed to allocate busdma tag, error" 176 " %i!\n", error); 177 178 goto err1; 179 } 180 181 error = bus_dmamem_alloc(cdm->cdm_tag, &cdm->cdm_vaddr, 182 BUS_DMA_NOWAIT | BUS_DMA_ZERO, &cdm->cdm_map); 183 if (error) { 184 device_printf(sc->sc_dev, "failed to allocate DMA safe" 185 " memory, error %i!\n", error); 186 187 goto err2; 188 } 189 190 error = bus_dmamap_load(cdm->cdm_tag, cdm->cdm_map, cdm->cdm_vaddr, 191 size, cesa_alloc_dma_mem_cb, cdm, BUS_DMA_NOWAIT); 192 if (error) { 193 device_printf(sc->sc_dev, "cannot get address of the DMA" 194 " memory, error %i\n", error); 195 196 goto err3; 197 } 198 199 return (0); 200 err3: 201 bus_dmamem_free(cdm->cdm_tag, cdm->cdm_vaddr, cdm->cdm_map); 202 err2: 203 bus_dma_tag_destroy(cdm->cdm_tag); 204 err1: 205 cdm->cdm_vaddr = NULL; 206 return (error); 207 } 208 209 static void 210 cesa_free_dma_mem(struct cesa_dma_mem *cdm) 211 { 212 213 bus_dmamap_unload(cdm->cdm_tag, cdm->cdm_map); 214 bus_dmamem_free(cdm->cdm_tag, cdm->cdm_vaddr, cdm->cdm_map); 215 bus_dma_tag_destroy(cdm->cdm_tag); 216 cdm->cdm_vaddr = NULL; 217 } 218 219 static void 220 cesa_sync_dma_mem(struct cesa_dma_mem *cdm, bus_dmasync_op_t op) 221 { 222 223 /* Sync only if dma memory is valid */ 224 if (cdm->cdm_vaddr != NULL) 225 bus_dmamap_sync(cdm->cdm_tag, cdm->cdm_map, op); 226 } 227 228 static void 229 cesa_sync_desc(struct cesa_softc *sc, bus_dmasync_op_t op) 230 { 231 232 cesa_sync_dma_mem(&sc->sc_tdesc_cdm, op); 233 cesa_sync_dma_mem(&sc->sc_sdesc_cdm, op); 234 cesa_sync_dma_mem(&sc->sc_requests_cdm, op); 235 } 236 237 static struct cesa_request * 238 cesa_alloc_request(struct cesa_softc *sc) 239 { 240 struct cesa_request *cr; 241 242 CESA_GENERIC_ALLOC_LOCKED(sc, cr, requests); 243 if (!cr) 244 return (NULL); 245 246 STAILQ_INIT(&cr->cr_tdesc); 247 STAILQ_INIT(&cr->cr_sdesc); 248 249 return (cr); 250 } 251 252 static void 253 cesa_free_request(struct cesa_softc *sc, struct cesa_request *cr) 254 { 255 256 /* Free TDMA descriptors assigned to this request */ 257 CESA_LOCK(sc, tdesc); 258 STAILQ_CONCAT(&sc->sc_free_tdesc, &cr->cr_tdesc); 259 CESA_UNLOCK(sc, tdesc); 260 261 /* Free SA descriptors assigned to this request */ 262 CESA_LOCK(sc, sdesc); 263 STAILQ_CONCAT(&sc->sc_free_sdesc, &cr->cr_sdesc); 264 CESA_UNLOCK(sc, sdesc); 265 266 /* Unload DMA memory associated with request */ 267 if (cr->cr_dmap_loaded) { 268 bus_dmamap_unload(sc->sc_data_dtag, cr->cr_dmap); 269 cr->cr_dmap_loaded = 0; 270 } 271 272 CESA_GENERIC_FREE_LOCKED(sc, cr, requests); 273 } 274 275 static void 276 cesa_enqueue_request(struct cesa_softc *sc, struct cesa_request *cr) 277 { 278 279 CESA_LOCK(sc, requests); 280 STAILQ_INSERT_TAIL(&sc->sc_ready_requests, cr, cr_stq); 281 CESA_UNLOCK(sc, requests); 282 } 283 284 static struct cesa_tdma_desc * 285 cesa_alloc_tdesc(struct cesa_softc *sc) 286 { 287 struct cesa_tdma_desc *ctd; 288 289 CESA_GENERIC_ALLOC_LOCKED(sc, ctd, tdesc); 290 291 if (!ctd) 292 device_printf(sc->sc_dev, "TDMA descriptors pool exhaused. " 293 "Consider increasing CESA_TDMA_DESCRIPTORS.\n"); 294 295 return (ctd); 296 } 297 298 static struct cesa_sa_desc * 299 cesa_alloc_sdesc(struct cesa_softc *sc, struct cesa_request *cr) 300 { 301 struct cesa_sa_desc *csd; 302 303 CESA_GENERIC_ALLOC_LOCKED(sc, csd, sdesc); 304 if (!csd) { 305 device_printf(sc->sc_dev, "SA descriptors pool exhaused. " 306 "Consider increasing CESA_SA_DESCRIPTORS.\n"); 307 return (NULL); 308 } 309 310 STAILQ_INSERT_TAIL(&cr->cr_sdesc, csd, csd_stq); 311 312 /* Fill-in SA descriptor with default values */ 313 csd->csd_cshd->cshd_enc_key = CESA_SA_DATA(csd_key); 314 csd->csd_cshd->cshd_enc_iv = CESA_SA_DATA(csd_iv); 315 csd->csd_cshd->cshd_enc_iv_buf = CESA_SA_DATA(csd_iv); 316 csd->csd_cshd->cshd_enc_src = 0; 317 csd->csd_cshd->cshd_enc_dst = 0; 318 csd->csd_cshd->cshd_enc_dlen = 0; 319 csd->csd_cshd->cshd_mac_dst = CESA_SA_DATA(csd_hash); 320 csd->csd_cshd->cshd_mac_iv_in = CESA_SA_DATA(csd_hiv_in); 321 csd->csd_cshd->cshd_mac_iv_out = CESA_SA_DATA(csd_hiv_out); 322 csd->csd_cshd->cshd_mac_src = 0; 323 csd->csd_cshd->cshd_mac_dlen = 0; 324 325 return (csd); 326 } 327 328 static struct cesa_tdma_desc * 329 cesa_tdma_copy(struct cesa_softc *sc, bus_addr_t dst, bus_addr_t src, 330 bus_size_t size) 331 { 332 struct cesa_tdma_desc *ctd; 333 334 ctd = cesa_alloc_tdesc(sc); 335 if (!ctd) 336 return (NULL); 337 338 ctd->ctd_cthd->cthd_dst = dst; 339 ctd->ctd_cthd->cthd_src = src; 340 ctd->ctd_cthd->cthd_byte_count = size; 341 342 /* Handle special control packet */ 343 if (size != 0) 344 ctd->ctd_cthd->cthd_flags = CESA_CTHD_OWNED; 345 else 346 ctd->ctd_cthd->cthd_flags = 0; 347 348 return (ctd); 349 } 350 351 static struct cesa_tdma_desc * 352 cesa_tdma_copyin_sa_data(struct cesa_softc *sc, struct cesa_request *cr) 353 { 354 355 return (cesa_tdma_copy(sc, sc->sc_sram_base_pa + 356 sizeof(struct cesa_sa_hdesc), cr->cr_csd_paddr, 357 sizeof(struct cesa_sa_data))); 358 } 359 360 static struct cesa_tdma_desc * 361 cesa_tdma_copyout_sa_data(struct cesa_softc *sc, struct cesa_request *cr) 362 { 363 364 return (cesa_tdma_copy(sc, cr->cr_csd_paddr, sc->sc_sram_base_pa + 365 sizeof(struct cesa_sa_hdesc), sizeof(struct cesa_sa_data))); 366 } 367 368 static struct cesa_tdma_desc * 369 cesa_tdma_copy_sdesc(struct cesa_softc *sc, struct cesa_sa_desc *csd) 370 { 371 372 return (cesa_tdma_copy(sc, sc->sc_sram_base_pa, csd->csd_cshd_paddr, 373 sizeof(struct cesa_sa_hdesc))); 374 } 375 376 static void 377 cesa_append_tdesc(struct cesa_request *cr, struct cesa_tdma_desc *ctd) 378 { 379 struct cesa_tdma_desc *ctd_prev; 380 381 if (!STAILQ_EMPTY(&cr->cr_tdesc)) { 382 ctd_prev = STAILQ_LAST(&cr->cr_tdesc, cesa_tdma_desc, ctd_stq); 383 ctd_prev->ctd_cthd->cthd_next = ctd->ctd_cthd_paddr; 384 } 385 386 ctd->ctd_cthd->cthd_next = 0; 387 STAILQ_INSERT_TAIL(&cr->cr_tdesc, ctd, ctd_stq); 388 } 389 390 static int 391 cesa_append_packet(struct cesa_softc *sc, struct cesa_request *cr, 392 struct cesa_packet *cp, struct cesa_sa_desc *csd) 393 { 394 struct cesa_tdma_desc *ctd, *tmp; 395 396 /* Copy SA descriptor for this packet */ 397 ctd = cesa_tdma_copy_sdesc(sc, csd); 398 if (!ctd) 399 return (ENOMEM); 400 401 cesa_append_tdesc(cr, ctd); 402 403 /* Copy data to be processed */ 404 STAILQ_FOREACH_SAFE(ctd, &cp->cp_copyin, ctd_stq, tmp) 405 cesa_append_tdesc(cr, ctd); 406 STAILQ_INIT(&cp->cp_copyin); 407 408 /* Insert control descriptor */ 409 ctd = cesa_tdma_copy(sc, 0, 0, 0); 410 if (!ctd) 411 return (ENOMEM); 412 413 cesa_append_tdesc(cr, ctd); 414 415 /* Copy back results */ 416 STAILQ_FOREACH_SAFE(ctd, &cp->cp_copyout, ctd_stq, tmp) 417 cesa_append_tdesc(cr, ctd); 418 STAILQ_INIT(&cp->cp_copyout); 419 420 return (0); 421 } 422 423 static void 424 cesa_set_mkey(struct cesa_session *cs, int alg, const uint8_t *mkey, int mklen) 425 { 426 union authctx auth_ctx; 427 uint32_t *hout; 428 uint32_t *hin; 429 int i; 430 431 hin = (uint32_t *)cs->cs_hiv_in; 432 hout = (uint32_t *)cs->cs_hiv_out; 433 434 switch (alg) { 435 case CRYPTO_SHA1_HMAC: 436 hmac_init_ipad(&auth_hash_hmac_sha1, mkey, mklen, &auth_ctx); 437 memcpy(hin, auth_ctx.sha1ctx.h.b32, 438 sizeof(auth_ctx.sha1ctx.h.b32)); 439 hmac_init_opad(&auth_hash_hmac_sha1, mkey, mklen, &auth_ctx); 440 memcpy(hout, auth_ctx.sha1ctx.h.b32, 441 sizeof(auth_ctx.sha1ctx.h.b32)); 442 break; 443 case CRYPTO_SHA2_256_HMAC: 444 hmac_init_ipad(&auth_hash_hmac_sha2_256, mkey, mklen, 445 &auth_ctx); 446 memcpy(hin, auth_ctx.sha256ctx.state, 447 sizeof(auth_ctx.sha256ctx.state)); 448 hmac_init_opad(&auth_hash_hmac_sha2_256, mkey, mklen, 449 &auth_ctx); 450 memcpy(hout, auth_ctx.sha256ctx.state, 451 sizeof(auth_ctx.sha256ctx.state)); 452 break; 453 default: 454 panic("shouldn't get here"); 455 } 456 457 for (i = 0; i < CESA_MAX_HASH_LEN / sizeof(uint32_t); i++) { 458 hin[i] = htobe32(hin[i]); 459 hout[i] = htobe32(hout[i]); 460 } 461 explicit_bzero(&auth_ctx, sizeof(auth_ctx)); 462 } 463 464 static int 465 cesa_prep_aes_key(struct cesa_session *cs, 466 const struct crypto_session_params *csp) 467 { 468 uint32_t ek[4 * (RIJNDAEL_MAXNR + 1)]; 469 uint32_t *dkey; 470 int i; 471 472 rijndaelKeySetupEnc(ek, cs->cs_key, csp->csp_cipher_klen * 8); 473 474 cs->cs_config &= ~CESA_CSH_AES_KLEN_MASK; 475 dkey = (uint32_t *)cs->cs_aes_dkey; 476 477 switch (csp->csp_cipher_klen) { 478 case 16: 479 cs->cs_config |= CESA_CSH_AES_KLEN_128; 480 for (i = 0; i < 4; i++) 481 *dkey++ = htobe32(ek[4 * 10 + i]); 482 break; 483 case 24: 484 cs->cs_config |= CESA_CSH_AES_KLEN_192; 485 for (i = 0; i < 4; i++) 486 *dkey++ = htobe32(ek[4 * 12 + i]); 487 for (i = 0; i < 2; i++) 488 *dkey++ = htobe32(ek[4 * 11 + 2 + i]); 489 break; 490 case 32: 491 cs->cs_config |= CESA_CSH_AES_KLEN_256; 492 for (i = 0; i < 4; i++) 493 *dkey++ = htobe32(ek[4 * 14 + i]); 494 for (i = 0; i < 4; i++) 495 *dkey++ = htobe32(ek[4 * 13 + i]); 496 break; 497 default: 498 return (EINVAL); 499 } 500 501 return (0); 502 } 503 504 static void 505 cesa_start_packet(struct cesa_packet *cp, unsigned int size) 506 { 507 508 cp->cp_size = size; 509 cp->cp_offset = 0; 510 STAILQ_INIT(&cp->cp_copyin); 511 STAILQ_INIT(&cp->cp_copyout); 512 } 513 514 static int 515 cesa_fill_packet(struct cesa_softc *sc, struct cesa_packet *cp, 516 bus_dma_segment_t *seg) 517 { 518 struct cesa_tdma_desc *ctd; 519 unsigned int bsize; 520 521 /* Calculate size of block copy */ 522 bsize = MIN(seg->ds_len, cp->cp_size - cp->cp_offset); 523 524 if (bsize > 0) { 525 ctd = cesa_tdma_copy(sc, sc->sc_sram_base_pa + 526 CESA_DATA(cp->cp_offset), seg->ds_addr, bsize); 527 if (!ctd) 528 return (-ENOMEM); 529 530 STAILQ_INSERT_TAIL(&cp->cp_copyin, ctd, ctd_stq); 531 532 ctd = cesa_tdma_copy(sc, seg->ds_addr, sc->sc_sram_base_pa + 533 CESA_DATA(cp->cp_offset), bsize); 534 if (!ctd) 535 return (-ENOMEM); 536 537 STAILQ_INSERT_TAIL(&cp->cp_copyout, ctd, ctd_stq); 538 539 seg->ds_len -= bsize; 540 seg->ds_addr += bsize; 541 cp->cp_offset += bsize; 542 } 543 544 return (bsize); 545 } 546 547 static void 548 cesa_create_chain_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) 549 { 550 unsigned int mpsize, fragmented; 551 unsigned int mlen, mskip, tmlen; 552 struct cesa_chain_info *cci; 553 unsigned int elen, eskip; 554 unsigned int skip, len; 555 struct cesa_sa_desc *csd; 556 struct cesa_request *cr; 557 struct cryptop *crp; 558 struct cesa_softc *sc; 559 struct cesa_packet cp; 560 bus_dma_segment_t seg; 561 uint32_t config; 562 int size; 563 564 cci = arg; 565 sc = cci->cci_sc; 566 cr = cci->cci_cr; 567 crp = cr->cr_crp; 568 569 if (error) { 570 cci->cci_error = error; 571 return; 572 } 573 574 /* 575 * Only do a combined op if the AAD is adjacent to the payload 576 * and the AAD length is a multiple of the IV length. The 577 * checks against 'config' are to avoid recursing when the 578 * logic below invokes separate operations. 579 */ 580 config = cci->cci_config; 581 if (((config & CESA_CSHD_OP_MASK) == CESA_CSHD_MAC_AND_ENC || 582 (config & CESA_CSHD_OP_MASK) == CESA_CSHD_ENC_AND_MAC) && 583 crp->crp_aad_length != 0 && 584 (crp->crp_aad_length & (cr->cr_cs->cs_ivlen - 1)) != 0) { 585 /* 586 * Data alignment in the request does not meet CESA requiremnts 587 * for combined encryption/decryption and hashing. We have to 588 * split the request to separate operations and process them 589 * one by one. 590 */ 591 if ((config & CESA_CSHD_OP_MASK) == CESA_CSHD_MAC_AND_ENC) { 592 config &= ~CESA_CSHD_OP_MASK; 593 594 cci->cci_config = config | CESA_CSHD_MAC; 595 cesa_create_chain_cb(cci, segs, nseg, 0); 596 597 cci->cci_config = config | CESA_CSHD_ENC; 598 cesa_create_chain_cb(cci, segs, nseg, 0); 599 } else { 600 config &= ~CESA_CSHD_OP_MASK; 601 602 cci->cci_config = config | CESA_CSHD_ENC; 603 cesa_create_chain_cb(cci, segs, nseg, 0); 604 605 cci->cci_config = config | CESA_CSHD_MAC; 606 cesa_create_chain_cb(cci, segs, nseg, 0); 607 } 608 609 return; 610 } 611 612 mskip = mlen = eskip = elen = 0; 613 614 if (crp->crp_aad_length == 0) { 615 skip = crp->crp_payload_start; 616 len = crp->crp_payload_length; 617 switch (config & CESA_CSHD_OP_MASK) { 618 case CESA_CSHD_ENC: 619 eskip = skip; 620 elen = len; 621 break; 622 case CESA_CSHD_MAC: 623 mskip = skip; 624 mlen = len; 625 break; 626 default: 627 eskip = skip; 628 elen = len; 629 mskip = skip; 630 mlen = len; 631 break; 632 } 633 } else { 634 /* 635 * For an encryption-only separate request, only 636 * process the payload. For combined requests and 637 * hash-only requests, process the entire region. 638 */ 639 switch (config & CESA_CSHD_OP_MASK) { 640 case CESA_CSHD_ENC: 641 skip = crp->crp_payload_start; 642 len = crp->crp_payload_length; 643 eskip = skip; 644 elen = len; 645 break; 646 case CESA_CSHD_MAC: 647 skip = crp->crp_aad_start; 648 len = crp->crp_aad_length + crp->crp_payload_length; 649 mskip = skip; 650 mlen = len; 651 break; 652 default: 653 skip = crp->crp_aad_start; 654 len = crp->crp_aad_length + crp->crp_payload_length; 655 mskip = skip; 656 mlen = len; 657 eskip = crp->crp_payload_start; 658 elen = crp->crp_payload_length; 659 break; 660 } 661 } 662 663 tmlen = mlen; 664 fragmented = 0; 665 mpsize = CESA_MAX_PACKET_SIZE; 666 mpsize &= ~((cr->cr_cs->cs_ivlen - 1) | (cr->cr_cs->cs_mblen - 1)); 667 668 /* Start first packet in chain */ 669 cesa_start_packet(&cp, MIN(mpsize, len)); 670 671 while (nseg-- && len > 0) { 672 seg = *(segs++); 673 674 /* 675 * Skip data in buffer on which neither ENC nor MAC operation 676 * is requested. 677 */ 678 if (skip > 0) { 679 size = MIN(skip, seg.ds_len); 680 skip -= size; 681 682 seg.ds_addr += size; 683 seg.ds_len -= size; 684 685 if (eskip > 0) 686 eskip -= size; 687 688 if (mskip > 0) 689 mskip -= size; 690 691 if (seg.ds_len == 0) 692 continue; 693 } 694 695 while (1) { 696 /* 697 * Fill in current packet with data. Break if there is 698 * no more data in current DMA segment or an error 699 * occurred. 700 */ 701 size = cesa_fill_packet(sc, &cp, &seg); 702 if (size <= 0) { 703 error = -size; 704 break; 705 } 706 707 len -= size; 708 709 /* If packet is full, append it to the chain */ 710 if (cp.cp_size == cp.cp_offset) { 711 csd = cesa_alloc_sdesc(sc, cr); 712 if (!csd) { 713 error = ENOMEM; 714 break; 715 } 716 717 /* Create SA descriptor for this packet */ 718 csd->csd_cshd->cshd_config = cci->cci_config; 719 csd->csd_cshd->cshd_mac_total_dlen = tmlen; 720 721 /* 722 * Enable fragmentation if request will not fit 723 * into one packet. 724 */ 725 if (len > 0) { 726 if (!fragmented) { 727 fragmented = 1; 728 csd->csd_cshd->cshd_config |= 729 CESA_CSHD_FRAG_FIRST; 730 } else 731 csd->csd_cshd->cshd_config |= 732 CESA_CSHD_FRAG_MIDDLE; 733 } else if (fragmented) 734 csd->csd_cshd->cshd_config |= 735 CESA_CSHD_FRAG_LAST; 736 737 if (eskip < cp.cp_size && elen > 0) { 738 csd->csd_cshd->cshd_enc_src = 739 CESA_DATA(eskip); 740 csd->csd_cshd->cshd_enc_dst = 741 CESA_DATA(eskip); 742 csd->csd_cshd->cshd_enc_dlen = 743 MIN(elen, cp.cp_size - eskip); 744 } 745 746 if (mskip < cp.cp_size && mlen > 0) { 747 csd->csd_cshd->cshd_mac_src = 748 CESA_DATA(mskip); 749 csd->csd_cshd->cshd_mac_dlen = 750 MIN(mlen, cp.cp_size - mskip); 751 } 752 753 elen -= csd->csd_cshd->cshd_enc_dlen; 754 eskip -= MIN(eskip, cp.cp_size); 755 mlen -= csd->csd_cshd->cshd_mac_dlen; 756 mskip -= MIN(mskip, cp.cp_size); 757 758 cesa_dump_cshd(sc, csd->csd_cshd); 759 760 /* Append packet to the request */ 761 error = cesa_append_packet(sc, cr, &cp, csd); 762 if (error) 763 break; 764 765 /* Start a new packet, as current is full */ 766 cesa_start_packet(&cp, MIN(mpsize, len)); 767 } 768 } 769 770 if (error) 771 break; 772 } 773 774 if (error) { 775 /* 776 * Move all allocated resources to the request. They will be 777 * freed later. 778 */ 779 STAILQ_CONCAT(&cr->cr_tdesc, &cp.cp_copyin); 780 STAILQ_CONCAT(&cr->cr_tdesc, &cp.cp_copyout); 781 cci->cci_error = error; 782 } 783 } 784 785 static int 786 cesa_create_chain(struct cesa_softc *sc, 787 const struct crypto_session_params *csp, struct cesa_request *cr) 788 { 789 struct cesa_chain_info cci; 790 struct cesa_tdma_desc *ctd; 791 uint32_t config; 792 int error; 793 794 error = 0; 795 CESA_LOCK_ASSERT(sc, sessions); 796 797 /* Create request metadata */ 798 if (csp->csp_cipher_klen != 0) { 799 if (csp->csp_cipher_alg == CRYPTO_AES_CBC && 800 !CRYPTO_OP_IS_ENCRYPT(cr->cr_crp->crp_op)) 801 memcpy(cr->cr_csd->csd_key, cr->cr_cs->cs_aes_dkey, 802 csp->csp_cipher_klen); 803 else 804 memcpy(cr->cr_csd->csd_key, cr->cr_cs->cs_key, 805 csp->csp_cipher_klen); 806 } 807 808 if (csp->csp_auth_klen != 0) { 809 memcpy(cr->cr_csd->csd_hiv_in, cr->cr_cs->cs_hiv_in, 810 CESA_MAX_HASH_LEN); 811 memcpy(cr->cr_csd->csd_hiv_out, cr->cr_cs->cs_hiv_out, 812 CESA_MAX_HASH_LEN); 813 } 814 815 ctd = cesa_tdma_copyin_sa_data(sc, cr); 816 if (!ctd) 817 return (ENOMEM); 818 819 cesa_append_tdesc(cr, ctd); 820 821 /* Prepare SA configuration */ 822 config = cr->cr_cs->cs_config; 823 824 if (csp->csp_cipher_alg != 0 && 825 !CRYPTO_OP_IS_ENCRYPT(cr->cr_crp->crp_op)) 826 config |= CESA_CSHD_DECRYPT; 827 switch (csp->csp_mode) { 828 case CSP_MODE_CIPHER: 829 config |= CESA_CSHD_ENC; 830 break; 831 case CSP_MODE_DIGEST: 832 config |= CESA_CSHD_MAC; 833 break; 834 case CSP_MODE_ETA: 835 config |= (config & CESA_CSHD_DECRYPT) ? CESA_CSHD_MAC_AND_ENC : 836 CESA_CSHD_ENC_AND_MAC; 837 break; 838 } 839 840 /* Create data packets */ 841 cci.cci_sc = sc; 842 cci.cci_cr = cr; 843 cci.cci_config = config; 844 cci.cci_error = 0; 845 846 error = bus_dmamap_load_crp(sc->sc_data_dtag, cr->cr_dmap, cr->cr_crp, 847 cesa_create_chain_cb, &cci, BUS_DMA_NOWAIT); 848 849 if (!error) 850 cr->cr_dmap_loaded = 1; 851 852 if (cci.cci_error) 853 error = cci.cci_error; 854 855 if (error) 856 return (error); 857 858 /* Read back request metadata */ 859 ctd = cesa_tdma_copyout_sa_data(sc, cr); 860 if (!ctd) 861 return (ENOMEM); 862 863 cesa_append_tdesc(cr, ctd); 864 865 return (0); 866 } 867 868 static void 869 cesa_execute(struct cesa_softc *sc) 870 { 871 struct cesa_tdma_desc *prev_ctd, *ctd; 872 struct cesa_request *prev_cr, *cr; 873 874 CESA_LOCK(sc, requests); 875 876 /* 877 * If ready list is empty, there is nothing to execute. If queued list 878 * is not empty, the hardware is busy and we cannot start another 879 * execution. 880 */ 881 if (STAILQ_EMPTY(&sc->sc_ready_requests) || 882 !STAILQ_EMPTY(&sc->sc_queued_requests)) { 883 CESA_UNLOCK(sc, requests); 884 return; 885 } 886 887 /* Move all ready requests to queued list */ 888 STAILQ_CONCAT(&sc->sc_queued_requests, &sc->sc_ready_requests); 889 STAILQ_INIT(&sc->sc_ready_requests); 890 891 /* Create one execution chain from all requests on the list */ 892 if (STAILQ_FIRST(&sc->sc_queued_requests) != 893 STAILQ_LAST(&sc->sc_queued_requests, cesa_request, cr_stq)) { 894 prev_cr = NULL; 895 cesa_sync_dma_mem(&sc->sc_tdesc_cdm, BUS_DMASYNC_POSTREAD | 896 BUS_DMASYNC_POSTWRITE); 897 898 STAILQ_FOREACH(cr, &sc->sc_queued_requests, cr_stq) { 899 if (prev_cr) { 900 ctd = STAILQ_FIRST(&cr->cr_tdesc); 901 prev_ctd = STAILQ_LAST(&prev_cr->cr_tdesc, 902 cesa_tdma_desc, ctd_stq); 903 904 prev_ctd->ctd_cthd->cthd_next = 905 ctd->ctd_cthd_paddr; 906 } 907 908 prev_cr = cr; 909 } 910 911 cesa_sync_dma_mem(&sc->sc_tdesc_cdm, BUS_DMASYNC_PREREAD | 912 BUS_DMASYNC_PREWRITE); 913 } 914 915 /* Start chain execution in hardware */ 916 cr = STAILQ_FIRST(&sc->sc_queued_requests); 917 ctd = STAILQ_FIRST(&cr->cr_tdesc); 918 919 CESA_TDMA_WRITE(sc, CESA_TDMA_ND, ctd->ctd_cthd_paddr); 920 921 if (sc->sc_soc_id == MV_DEV_88F6828 || 922 sc->sc_soc_id == MV_DEV_88F6820 || 923 sc->sc_soc_id == MV_DEV_88F6810) 924 CESA_REG_WRITE(sc, CESA_SA_CMD, CESA_SA_CMD_ACTVATE | CESA_SA_CMD_SHA2); 925 else 926 CESA_REG_WRITE(sc, CESA_SA_CMD, CESA_SA_CMD_ACTVATE); 927 928 CESA_UNLOCK(sc, requests); 929 } 930 931 static int 932 cesa_setup_sram(struct cesa_softc *sc) 933 { 934 phandle_t sram_node; 935 ihandle_t sram_ihandle; 936 pcell_t sram_handle, sram_reg[2]; 937 void *sram_va; 938 int rv; 939 940 rv = OF_getencprop(ofw_bus_get_node(sc->sc_dev), "sram-handle", 941 (void *)&sram_handle, sizeof(sram_handle)); 942 if (rv <= 0) 943 return (rv); 944 945 sram_ihandle = (ihandle_t)sram_handle; 946 sram_node = OF_instance_to_package(sram_ihandle); 947 948 rv = OF_getencprop(sram_node, "reg", (void *)sram_reg, sizeof(sram_reg)); 949 if (rv <= 0) 950 return (rv); 951 952 sc->sc_sram_base_pa = sram_reg[0]; 953 /* Store SRAM size to be able to unmap in detach() */ 954 sc->sc_sram_size = sram_reg[1]; 955 956 if (sc->sc_soc_id != MV_DEV_88F6828 && 957 sc->sc_soc_id != MV_DEV_88F6820 && 958 sc->sc_soc_id != MV_DEV_88F6810) 959 return (0); 960 961 /* SRAM memory was not mapped in platform_sram_devmap(), map it now */ 962 sram_va = pmap_mapdev(sc->sc_sram_base_pa, sc->sc_sram_size); 963 if (sram_va == NULL) 964 return (ENOMEM); 965 sc->sc_sram_base_va = (vm_offset_t)sram_va; 966 967 return (0); 968 } 969 970 /* 971 * Function: device_from_node 972 * This function returns appropriate device_t to phandle_t 973 * Parameters: 974 * root - device where you want to start search 975 * if you provide NULL here, function will take 976 * "root0" device as root. 977 * node - we are checking every device_t to be 978 * appropriate with this. 979 */ 980 static device_t 981 device_from_node(device_t root, phandle_t node) 982 { 983 device_t *children, retval; 984 int nkid, i; 985 986 /* Nothing matches no node */ 987 if (node == -1) 988 return (NULL); 989 990 if (root == NULL) 991 /* Get root of device tree */ 992 if ((root = device_lookup_by_name("root0")) == NULL) 993 return (NULL); 994 995 if (device_get_children(root, &children, &nkid) != 0) 996 return (NULL); 997 998 retval = NULL; 999 for (i = 0; i < nkid; i++) { 1000 /* Check if device and node matches */ 1001 if (OFW_BUS_GET_NODE(root, children[i]) == node) { 1002 retval = children[i]; 1003 break; 1004 } 1005 /* or go deeper */ 1006 if ((retval = device_from_node(children[i], node)) != NULL) 1007 break; 1008 } 1009 free(children, M_TEMP); 1010 1011 return (retval); 1012 } 1013 1014 static int 1015 cesa_setup_sram_armada(struct cesa_softc *sc) 1016 { 1017 phandle_t sram_node; 1018 ihandle_t sram_ihandle; 1019 pcell_t sram_handle[2]; 1020 void *sram_va; 1021 int rv, j; 1022 struct resource_list rl; 1023 struct resource_list_entry *rle; 1024 struct simplebus_softc *ssc; 1025 device_t sdev; 1026 1027 /* Get refs to SRAMS from CESA node */ 1028 rv = OF_getencprop(ofw_bus_get_node(sc->sc_dev), "marvell,crypto-srams", 1029 (void *)sram_handle, sizeof(sram_handle)); 1030 if (rv <= 0) 1031 return (rv); 1032 1033 if (sc->sc_cesa_engine_id >= 2) 1034 return (ENXIO); 1035 1036 /* Get SRAM node on the basis of sc_cesa_engine_id */ 1037 sram_ihandle = (ihandle_t)sram_handle[sc->sc_cesa_engine_id]; 1038 sram_node = OF_instance_to_package(sram_ihandle); 1039 1040 /* Get device_t of simplebus (sram_node parent) */ 1041 sdev = device_from_node(NULL, OF_parent(sram_node)); 1042 if (!sdev) 1043 return (ENXIO); 1044 1045 ssc = device_get_softc(sdev); 1046 1047 resource_list_init(&rl); 1048 /* Parse reg property to resource list */ 1049 ofw_bus_reg_to_rl(sdev, sram_node, ssc->acells, 1050 ssc->scells, &rl); 1051 1052 /* We expect only one resource */ 1053 rle = resource_list_find(&rl, SYS_RES_MEMORY, 0); 1054 if (rle == NULL) 1055 return (ENXIO); 1056 1057 /* Remap through ranges property */ 1058 for (j = 0; j < ssc->nranges; j++) { 1059 if (rle->start >= ssc->ranges[j].bus && 1060 rle->end < ssc->ranges[j].bus + ssc->ranges[j].size) { 1061 rle->start -= ssc->ranges[j].bus; 1062 rle->start += ssc->ranges[j].host; 1063 rle->end -= ssc->ranges[j].bus; 1064 rle->end += ssc->ranges[j].host; 1065 } 1066 } 1067 1068 sc->sc_sram_base_pa = rle->start; 1069 sc->sc_sram_size = rle->count; 1070 1071 /* SRAM memory was not mapped in platform_sram_devmap(), map it now */ 1072 sram_va = pmap_mapdev(sc->sc_sram_base_pa, sc->sc_sram_size); 1073 if (sram_va == NULL) 1074 return (ENOMEM); 1075 sc->sc_sram_base_va = (vm_offset_t)sram_va; 1076 1077 return (0); 1078 } 1079 1080 struct ofw_compat_data cesa_devices[] = { 1081 { "mrvl,cesa", (uintptr_t)true }, 1082 { "marvell,armada-38x-crypto", (uintptr_t)true }, 1083 { NULL, 0 } 1084 }; 1085 1086 static int 1087 cesa_probe(device_t dev) 1088 { 1089 1090 if (!ofw_bus_status_okay(dev)) 1091 return (ENXIO); 1092 1093 if (!ofw_bus_search_compatible(dev, cesa_devices)->ocd_data) 1094 return (ENXIO); 1095 1096 device_set_desc(dev, "Marvell Cryptographic Engine and Security " 1097 "Accelerator"); 1098 1099 return (BUS_PROBE_DEFAULT); 1100 } 1101 1102 static int 1103 cesa_attach(device_t dev) 1104 { 1105 static int engine_idx = 0; 1106 struct simplebus_devinfo *ndi; 1107 struct resource_list *rl; 1108 struct cesa_softc *sc; 1109 1110 if (!ofw_bus_is_compatible(dev, "marvell,armada-38x-crypto")) 1111 return (cesa_attach_late(dev)); 1112 1113 /* 1114 * Get simplebus_devinfo which contains 1115 * resource list filled with adresses and 1116 * interrupts read form FDT. 1117 * Let's correct it by splitting resources 1118 * for each engine. 1119 */ 1120 if ((ndi = device_get_ivars(dev)) == NULL) 1121 return (ENXIO); 1122 1123 rl = &ndi->rl; 1124 1125 switch (engine_idx) { 1126 case 0: 1127 /* Update regs values */ 1128 resource_list_add(rl, SYS_RES_MEMORY, 0, CESA0_TDMA_ADDR, 1129 CESA0_TDMA_ADDR + CESA_TDMA_SIZE - 1, CESA_TDMA_SIZE); 1130 resource_list_add(rl, SYS_RES_MEMORY, 1, CESA0_CESA_ADDR, 1131 CESA0_CESA_ADDR + CESA_CESA_SIZE - 1, CESA_CESA_SIZE); 1132 1133 /* Remove unused interrupt */ 1134 resource_list_delete(rl, SYS_RES_IRQ, 1); 1135 break; 1136 1137 case 1: 1138 /* Update regs values */ 1139 resource_list_add(rl, SYS_RES_MEMORY, 0, CESA1_TDMA_ADDR, 1140 CESA1_TDMA_ADDR + CESA_TDMA_SIZE - 1, CESA_TDMA_SIZE); 1141 resource_list_add(rl, SYS_RES_MEMORY, 1, CESA1_CESA_ADDR, 1142 CESA1_CESA_ADDR + CESA_CESA_SIZE - 1, CESA_CESA_SIZE); 1143 1144 /* Remove unused interrupt */ 1145 resource_list_delete(rl, SYS_RES_IRQ, 0); 1146 resource_list_find(rl, SYS_RES_IRQ, 1)->rid = 0; 1147 break; 1148 1149 default: 1150 device_printf(dev, "Bad cesa engine_idx\n"); 1151 return (ENXIO); 1152 } 1153 1154 sc = device_get_softc(dev); 1155 sc->sc_cesa_engine_id = engine_idx; 1156 1157 /* 1158 * Call simplebus_add_device only once. 1159 * It will create second cesa driver instance 1160 * with the same FDT node as first instance. 1161 * When second driver reach this function, 1162 * it will be configured to use second cesa engine 1163 */ 1164 if (engine_idx == 0) 1165 simplebus_add_device(device_get_parent(dev), ofw_bus_get_node(dev), 1166 0, "cesa", 1, NULL); 1167 1168 engine_idx++; 1169 1170 return (cesa_attach_late(dev)); 1171 } 1172 1173 static int 1174 cesa_attach_late(device_t dev) 1175 { 1176 struct cesa_softc *sc; 1177 uint32_t d, r, val; 1178 int error; 1179 int i; 1180 1181 sc = device_get_softc(dev); 1182 sc->sc_blocked = 0; 1183 sc->sc_error = 0; 1184 sc->sc_dev = dev; 1185 1186 soc_id(&d, &r); 1187 1188 switch (d) { 1189 case MV_DEV_88F6281: 1190 case MV_DEV_88F6282: 1191 /* Check if CESA peripheral device has power turned on */ 1192 if (soc_power_ctrl_get(CPU_PM_CTRL_CRYPTO) == 1193 CPU_PM_CTRL_CRYPTO) { 1194 device_printf(dev, "not powered on\n"); 1195 return (ENXIO); 1196 } 1197 sc->sc_tperr = 0; 1198 break; 1199 case MV_DEV_88F6828: 1200 case MV_DEV_88F6820: 1201 case MV_DEV_88F6810: 1202 sc->sc_tperr = 0; 1203 break; 1204 case MV_DEV_MV78100: 1205 case MV_DEV_MV78100_Z0: 1206 /* Check if CESA peripheral device has power turned on */ 1207 if (soc_power_ctrl_get(CPU_PM_CTRL_CRYPTO) != 1208 CPU_PM_CTRL_CRYPTO) { 1209 device_printf(dev, "not powered on\n"); 1210 return (ENXIO); 1211 } 1212 sc->sc_tperr = CESA_ICR_TPERR; 1213 break; 1214 default: 1215 return (ENXIO); 1216 } 1217 1218 sc->sc_soc_id = d; 1219 1220 /* Initialize mutexes */ 1221 mtx_init(&sc->sc_sc_lock, device_get_nameunit(dev), 1222 "CESA Shared Data", MTX_DEF); 1223 mtx_init(&sc->sc_tdesc_lock, device_get_nameunit(dev), 1224 "CESA TDMA Descriptors Pool", MTX_DEF); 1225 mtx_init(&sc->sc_sdesc_lock, device_get_nameunit(dev), 1226 "CESA SA Descriptors Pool", MTX_DEF); 1227 mtx_init(&sc->sc_requests_lock, device_get_nameunit(dev), 1228 "CESA Requests Pool", MTX_DEF); 1229 mtx_init(&sc->sc_sessions_lock, device_get_nameunit(dev), 1230 "CESA Sessions Pool", MTX_DEF); 1231 1232 /* Allocate I/O and IRQ resources */ 1233 error = bus_alloc_resources(dev, cesa_res_spec, sc->sc_res); 1234 if (error) { 1235 device_printf(dev, "could not allocate resources\n"); 1236 goto err0; 1237 } 1238 1239 /* Acquire SRAM base address */ 1240 if (!ofw_bus_is_compatible(dev, "marvell,armada-38x-crypto")) 1241 error = cesa_setup_sram(sc); 1242 else 1243 error = cesa_setup_sram_armada(sc); 1244 1245 if (error) { 1246 device_printf(dev, "could not setup SRAM\n"); 1247 goto err1; 1248 } 1249 1250 /* Setup interrupt handler */ 1251 error = bus_setup_intr(dev, sc->sc_res[RES_CESA_IRQ], INTR_TYPE_NET | 1252 INTR_MPSAFE, NULL, cesa_intr, sc, &(sc->sc_icookie)); 1253 if (error) { 1254 device_printf(dev, "could not setup engine completion irq\n"); 1255 goto err2; 1256 } 1257 1258 /* Create DMA tag for processed data */ 1259 error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */ 1260 1, 0, /* alignment, boundary */ 1261 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 1262 BUS_SPACE_MAXADDR, /* highaddr */ 1263 NULL, NULL, /* filtfunc, filtfuncarg */ 1264 CESA_MAX_REQUEST_SIZE, /* maxsize */ 1265 CESA_MAX_FRAGMENTS, /* nsegments */ 1266 CESA_MAX_REQUEST_SIZE, 0, /* maxsegsz, flags */ 1267 NULL, NULL, /* lockfunc, lockfuncarg */ 1268 &sc->sc_data_dtag); /* dmat */ 1269 if (error) 1270 goto err3; 1271 1272 /* Initialize data structures: TDMA Descriptors Pool */ 1273 error = cesa_alloc_dma_mem(sc, &sc->sc_tdesc_cdm, 1274 CESA_TDMA_DESCRIPTORS * sizeof(struct cesa_tdma_hdesc)); 1275 if (error) 1276 goto err4; 1277 1278 STAILQ_INIT(&sc->sc_free_tdesc); 1279 for (i = 0; i < CESA_TDMA_DESCRIPTORS; i++) { 1280 sc->sc_tdesc[i].ctd_cthd = 1281 (struct cesa_tdma_hdesc *)(sc->sc_tdesc_cdm.cdm_vaddr) + i; 1282 sc->sc_tdesc[i].ctd_cthd_paddr = sc->sc_tdesc_cdm.cdm_paddr + 1283 (i * sizeof(struct cesa_tdma_hdesc)); 1284 STAILQ_INSERT_TAIL(&sc->sc_free_tdesc, &sc->sc_tdesc[i], 1285 ctd_stq); 1286 } 1287 1288 /* Initialize data structures: SA Descriptors Pool */ 1289 error = cesa_alloc_dma_mem(sc, &sc->sc_sdesc_cdm, 1290 CESA_SA_DESCRIPTORS * sizeof(struct cesa_sa_hdesc)); 1291 if (error) 1292 goto err5; 1293 1294 STAILQ_INIT(&sc->sc_free_sdesc); 1295 for (i = 0; i < CESA_SA_DESCRIPTORS; i++) { 1296 sc->sc_sdesc[i].csd_cshd = 1297 (struct cesa_sa_hdesc *)(sc->sc_sdesc_cdm.cdm_vaddr) + i; 1298 sc->sc_sdesc[i].csd_cshd_paddr = sc->sc_sdesc_cdm.cdm_paddr + 1299 (i * sizeof(struct cesa_sa_hdesc)); 1300 STAILQ_INSERT_TAIL(&sc->sc_free_sdesc, &sc->sc_sdesc[i], 1301 csd_stq); 1302 } 1303 1304 /* Initialize data structures: Requests Pool */ 1305 error = cesa_alloc_dma_mem(sc, &sc->sc_requests_cdm, 1306 CESA_REQUESTS * sizeof(struct cesa_sa_data)); 1307 if (error) 1308 goto err6; 1309 1310 STAILQ_INIT(&sc->sc_free_requests); 1311 STAILQ_INIT(&sc->sc_ready_requests); 1312 STAILQ_INIT(&sc->sc_queued_requests); 1313 for (i = 0; i < CESA_REQUESTS; i++) { 1314 sc->sc_requests[i].cr_csd = 1315 (struct cesa_sa_data *)(sc->sc_requests_cdm.cdm_vaddr) + i; 1316 sc->sc_requests[i].cr_csd_paddr = 1317 sc->sc_requests_cdm.cdm_paddr + 1318 (i * sizeof(struct cesa_sa_data)); 1319 1320 /* Preallocate DMA maps */ 1321 error = bus_dmamap_create(sc->sc_data_dtag, 0, 1322 &sc->sc_requests[i].cr_dmap); 1323 if (error && i > 0) { 1324 i--; 1325 do { 1326 bus_dmamap_destroy(sc->sc_data_dtag, 1327 sc->sc_requests[i].cr_dmap); 1328 } while (i--); 1329 1330 goto err7; 1331 } 1332 1333 STAILQ_INSERT_TAIL(&sc->sc_free_requests, &sc->sc_requests[i], 1334 cr_stq); 1335 } 1336 1337 /* 1338 * Initialize TDMA: 1339 * - Burst limit: 128 bytes, 1340 * - Outstanding reads enabled, 1341 * - No byte-swap. 1342 */ 1343 val = CESA_TDMA_CR_DBL128 | CESA_TDMA_CR_SBL128 | 1344 CESA_TDMA_CR_ORDEN | CESA_TDMA_CR_NBS | CESA_TDMA_CR_ENABLE; 1345 1346 if (sc->sc_soc_id == MV_DEV_88F6828 || 1347 sc->sc_soc_id == MV_DEV_88F6820 || 1348 sc->sc_soc_id == MV_DEV_88F6810) 1349 val |= CESA_TDMA_NUM_OUTSTAND; 1350 1351 CESA_TDMA_WRITE(sc, CESA_TDMA_CR, val); 1352 1353 /* 1354 * Initialize SA: 1355 * - SA descriptor is present at beginning of CESA SRAM, 1356 * - Multi-packet chain mode, 1357 * - Cooperation with TDMA enabled. 1358 */ 1359 CESA_REG_WRITE(sc, CESA_SA_DPR, 0); 1360 CESA_REG_WRITE(sc, CESA_SA_CR, CESA_SA_CR_ACTIVATE_TDMA | 1361 CESA_SA_CR_WAIT_FOR_TDMA | CESA_SA_CR_MULTI_MODE); 1362 1363 /* Unmask interrupts */ 1364 CESA_REG_WRITE(sc, CESA_ICR, 0); 1365 CESA_REG_WRITE(sc, CESA_ICM, CESA_ICM_ACCTDMA | sc->sc_tperr); 1366 CESA_TDMA_WRITE(sc, CESA_TDMA_ECR, 0); 1367 CESA_TDMA_WRITE(sc, CESA_TDMA_EMR, CESA_TDMA_EMR_MISS | 1368 CESA_TDMA_EMR_DOUBLE_HIT | CESA_TDMA_EMR_BOTH_HIT | 1369 CESA_TDMA_EMR_DATA_ERROR); 1370 1371 /* Register in OCF */ 1372 sc->sc_cid = crypto_get_driverid(dev, sizeof(struct cesa_session), 1373 CRYPTOCAP_F_HARDWARE); 1374 if (sc->sc_cid < 0) { 1375 device_printf(dev, "could not get crypto driver id\n"); 1376 goto err8; 1377 } 1378 1379 return (0); 1380 err8: 1381 for (i = 0; i < CESA_REQUESTS; i++) 1382 bus_dmamap_destroy(sc->sc_data_dtag, 1383 sc->sc_requests[i].cr_dmap); 1384 err7: 1385 cesa_free_dma_mem(&sc->sc_requests_cdm); 1386 err6: 1387 cesa_free_dma_mem(&sc->sc_sdesc_cdm); 1388 err5: 1389 cesa_free_dma_mem(&sc->sc_tdesc_cdm); 1390 err4: 1391 bus_dma_tag_destroy(sc->sc_data_dtag); 1392 err3: 1393 bus_teardown_intr(dev, sc->sc_res[RES_CESA_IRQ], sc->sc_icookie); 1394 err2: 1395 if (sc->sc_soc_id == MV_DEV_88F6828 || 1396 sc->sc_soc_id == MV_DEV_88F6820 || 1397 sc->sc_soc_id == MV_DEV_88F6810) 1398 pmap_unmapdev(sc->sc_sram_base_va, sc->sc_sram_size); 1399 err1: 1400 bus_release_resources(dev, cesa_res_spec, sc->sc_res); 1401 err0: 1402 mtx_destroy(&sc->sc_sessions_lock); 1403 mtx_destroy(&sc->sc_requests_lock); 1404 mtx_destroy(&sc->sc_sdesc_lock); 1405 mtx_destroy(&sc->sc_tdesc_lock); 1406 mtx_destroy(&sc->sc_sc_lock); 1407 return (ENXIO); 1408 } 1409 1410 static int 1411 cesa_detach(device_t dev) 1412 { 1413 struct cesa_softc *sc; 1414 int i; 1415 1416 sc = device_get_softc(dev); 1417 1418 /* TODO: Wait for queued requests completion before shutdown. */ 1419 1420 /* Mask interrupts */ 1421 CESA_REG_WRITE(sc, CESA_ICM, 0); 1422 CESA_TDMA_WRITE(sc, CESA_TDMA_EMR, 0); 1423 1424 /* Unregister from OCF */ 1425 crypto_unregister_all(sc->sc_cid); 1426 1427 /* Free DMA Maps */ 1428 for (i = 0; i < CESA_REQUESTS; i++) 1429 bus_dmamap_destroy(sc->sc_data_dtag, 1430 sc->sc_requests[i].cr_dmap); 1431 1432 /* Free DMA Memory */ 1433 cesa_free_dma_mem(&sc->sc_requests_cdm); 1434 cesa_free_dma_mem(&sc->sc_sdesc_cdm); 1435 cesa_free_dma_mem(&sc->sc_tdesc_cdm); 1436 1437 /* Free DMA Tag */ 1438 bus_dma_tag_destroy(sc->sc_data_dtag); 1439 1440 /* Stop interrupt */ 1441 bus_teardown_intr(dev, sc->sc_res[RES_CESA_IRQ], sc->sc_icookie); 1442 1443 /* Relase I/O and IRQ resources */ 1444 bus_release_resources(dev, cesa_res_spec, sc->sc_res); 1445 1446 /* Unmap SRAM memory */ 1447 if (sc->sc_soc_id == MV_DEV_88F6828 || 1448 sc->sc_soc_id == MV_DEV_88F6820 || 1449 sc->sc_soc_id == MV_DEV_88F6810) 1450 pmap_unmapdev(sc->sc_sram_base_va, sc->sc_sram_size); 1451 1452 /* Destroy mutexes */ 1453 mtx_destroy(&sc->sc_sessions_lock); 1454 mtx_destroy(&sc->sc_requests_lock); 1455 mtx_destroy(&sc->sc_sdesc_lock); 1456 mtx_destroy(&sc->sc_tdesc_lock); 1457 mtx_destroy(&sc->sc_sc_lock); 1458 1459 return (0); 1460 } 1461 1462 static void 1463 cesa_intr(void *arg) 1464 { 1465 STAILQ_HEAD(, cesa_request) requests; 1466 struct cesa_request *cr, *tmp; 1467 struct cesa_softc *sc; 1468 uint32_t ecr, icr; 1469 uint8_t hash[HASH_MAX_LEN]; 1470 int blocked; 1471 1472 sc = arg; 1473 1474 /* Ack interrupt */ 1475 ecr = CESA_TDMA_READ(sc, CESA_TDMA_ECR); 1476 CESA_TDMA_WRITE(sc, CESA_TDMA_ECR, 0); 1477 icr = CESA_REG_READ(sc, CESA_ICR); 1478 CESA_REG_WRITE(sc, CESA_ICR, 0); 1479 1480 /* Check for TDMA errors */ 1481 if (ecr & CESA_TDMA_ECR_MISS) { 1482 device_printf(sc->sc_dev, "TDMA Miss error detected!\n"); 1483 sc->sc_error = EIO; 1484 } 1485 1486 if (ecr & CESA_TDMA_ECR_DOUBLE_HIT) { 1487 device_printf(sc->sc_dev, "TDMA Double Hit error detected!\n"); 1488 sc->sc_error = EIO; 1489 } 1490 1491 if (ecr & CESA_TDMA_ECR_BOTH_HIT) { 1492 device_printf(sc->sc_dev, "TDMA Both Hit error detected!\n"); 1493 sc->sc_error = EIO; 1494 } 1495 1496 if (ecr & CESA_TDMA_ECR_DATA_ERROR) { 1497 device_printf(sc->sc_dev, "TDMA Data error detected!\n"); 1498 sc->sc_error = EIO; 1499 } 1500 1501 /* Check for CESA errors */ 1502 if (icr & sc->sc_tperr) { 1503 device_printf(sc->sc_dev, "CESA SRAM Parity error detected!\n"); 1504 sc->sc_error = EIO; 1505 } 1506 1507 /* If there is nothing more to do, return */ 1508 if ((icr & CESA_ICR_ACCTDMA) == 0) 1509 return; 1510 1511 /* Get all finished requests */ 1512 CESA_LOCK(sc, requests); 1513 STAILQ_INIT(&requests); 1514 STAILQ_CONCAT(&requests, &sc->sc_queued_requests); 1515 STAILQ_INIT(&sc->sc_queued_requests); 1516 CESA_UNLOCK(sc, requests); 1517 1518 /* Execute all ready requests */ 1519 cesa_execute(sc); 1520 1521 /* Process completed requests */ 1522 cesa_sync_dma_mem(&sc->sc_requests_cdm, BUS_DMASYNC_POSTREAD | 1523 BUS_DMASYNC_POSTWRITE); 1524 1525 STAILQ_FOREACH_SAFE(cr, &requests, cr_stq, tmp) { 1526 bus_dmamap_sync(sc->sc_data_dtag, cr->cr_dmap, 1527 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1528 1529 cr->cr_crp->crp_etype = sc->sc_error; 1530 if (cr->cr_cs->cs_hlen != 0 && cr->cr_crp->crp_etype == 0) { 1531 if (cr->cr_crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) { 1532 crypto_copydata(cr->cr_crp, 1533 cr->cr_crp->crp_digest_start, 1534 cr->cr_cs->cs_hlen, hash); 1535 if (timingsafe_bcmp(hash, cr->cr_csd->csd_hash, 1536 cr->cr_cs->cs_hlen) != 0) 1537 cr->cr_crp->crp_etype = EBADMSG; 1538 } else 1539 crypto_copyback(cr->cr_crp, 1540 cr->cr_crp->crp_digest_start, 1541 cr->cr_cs->cs_hlen, cr->cr_csd->csd_hash); 1542 } 1543 crypto_done(cr->cr_crp); 1544 cesa_free_request(sc, cr); 1545 } 1546 1547 cesa_sync_dma_mem(&sc->sc_requests_cdm, BUS_DMASYNC_PREREAD | 1548 BUS_DMASYNC_PREWRITE); 1549 1550 sc->sc_error = 0; 1551 1552 /* Unblock driver if it ran out of resources */ 1553 CESA_LOCK(sc, sc); 1554 blocked = sc->sc_blocked; 1555 sc->sc_blocked = 0; 1556 CESA_UNLOCK(sc, sc); 1557 1558 if (blocked) 1559 crypto_unblock(sc->sc_cid, blocked); 1560 } 1561 1562 static bool 1563 cesa_cipher_supported(const struct crypto_session_params *csp) 1564 { 1565 1566 switch (csp->csp_cipher_alg) { 1567 case CRYPTO_AES_CBC: 1568 if (csp->csp_ivlen != AES_BLOCK_LEN) 1569 return (false); 1570 break; 1571 default: 1572 return (false); 1573 } 1574 1575 if (csp->csp_cipher_klen > CESA_MAX_KEY_LEN) 1576 return (false); 1577 1578 return (true); 1579 } 1580 1581 static bool 1582 cesa_auth_supported(struct cesa_softc *sc, 1583 const struct crypto_session_params *csp) 1584 { 1585 1586 switch (csp->csp_auth_alg) { 1587 case CRYPTO_SHA2_256_HMAC: 1588 if (!(sc->sc_soc_id == MV_DEV_88F6828 || 1589 sc->sc_soc_id == MV_DEV_88F6820 || 1590 sc->sc_soc_id == MV_DEV_88F6810)) 1591 return (false); 1592 /* FALLTHROUGH */ 1593 case CRYPTO_SHA1: 1594 case CRYPTO_SHA1_HMAC: 1595 break; 1596 default: 1597 return (false); 1598 } 1599 1600 if (csp->csp_auth_klen > CESA_MAX_MKEY_LEN) 1601 return (false); 1602 1603 return (true); 1604 } 1605 1606 static int 1607 cesa_probesession(device_t dev, const struct crypto_session_params *csp) 1608 { 1609 struct cesa_softc *sc; 1610 1611 sc = device_get_softc(dev); 1612 if (csp->csp_flags != 0) 1613 return (EINVAL); 1614 switch (csp->csp_mode) { 1615 case CSP_MODE_DIGEST: 1616 if (!cesa_auth_supported(sc, csp)) 1617 return (EINVAL); 1618 break; 1619 case CSP_MODE_CIPHER: 1620 if (!cesa_cipher_supported(csp)) 1621 return (EINVAL); 1622 break; 1623 case CSP_MODE_ETA: 1624 if (!cesa_auth_supported(sc, csp) || 1625 !cesa_cipher_supported(csp)) 1626 return (EINVAL); 1627 break; 1628 default: 1629 return (EINVAL); 1630 } 1631 return (CRYPTODEV_PROBE_HARDWARE); 1632 } 1633 1634 static int 1635 cesa_newsession(device_t dev, crypto_session_t cses, 1636 const struct crypto_session_params *csp) 1637 { 1638 struct cesa_session *cs; 1639 int error; 1640 1641 error = 0; 1642 1643 /* Allocate session */ 1644 cs = crypto_get_driver_session(cses); 1645 1646 /* Prepare CESA configuration */ 1647 cs->cs_config = 0; 1648 cs->cs_ivlen = 1; 1649 cs->cs_mblen = 1; 1650 1651 switch (csp->csp_cipher_alg) { 1652 case CRYPTO_AES_CBC: 1653 cs->cs_config |= CESA_CSHD_AES | CESA_CSHD_CBC; 1654 cs->cs_ivlen = AES_BLOCK_LEN; 1655 break; 1656 } 1657 1658 switch (csp->csp_auth_alg) { 1659 case CRYPTO_SHA1: 1660 cs->cs_mblen = 1; 1661 cs->cs_hlen = (csp->csp_auth_mlen == 0) ? SHA1_HASH_LEN : 1662 csp->csp_auth_mlen; 1663 cs->cs_config |= CESA_CSHD_SHA1; 1664 break; 1665 case CRYPTO_SHA1_HMAC: 1666 cs->cs_mblen = SHA1_BLOCK_LEN; 1667 cs->cs_hlen = (csp->csp_auth_mlen == 0) ? SHA1_HASH_LEN : 1668 csp->csp_auth_mlen; 1669 cs->cs_config |= CESA_CSHD_SHA1_HMAC; 1670 if (cs->cs_hlen == CESA_HMAC_TRUNC_LEN) 1671 cs->cs_config |= CESA_CSHD_96_BIT_HMAC; 1672 break; 1673 case CRYPTO_SHA2_256_HMAC: 1674 cs->cs_mblen = SHA2_256_BLOCK_LEN; 1675 cs->cs_hlen = (csp->csp_auth_mlen == 0) ? SHA2_256_HASH_LEN : 1676 csp->csp_auth_mlen; 1677 cs->cs_config |= CESA_CSHD_SHA2_256_HMAC; 1678 break; 1679 } 1680 1681 /* Save cipher key */ 1682 if (csp->csp_cipher_key != NULL) { 1683 memcpy(cs->cs_key, csp->csp_cipher_key, 1684 csp->csp_cipher_klen); 1685 if (csp->csp_cipher_alg == CRYPTO_AES_CBC) 1686 error = cesa_prep_aes_key(cs, csp); 1687 } 1688 1689 /* Save digest key */ 1690 if (csp->csp_auth_key != NULL) 1691 cesa_set_mkey(cs, csp->csp_auth_alg, csp->csp_auth_key, 1692 csp->csp_auth_klen); 1693 1694 return (error); 1695 } 1696 1697 static int 1698 cesa_process(device_t dev, struct cryptop *crp, int hint) 1699 { 1700 const struct crypto_session_params *csp; 1701 struct cesa_request *cr; 1702 struct cesa_session *cs; 1703 struct cesa_softc *sc; 1704 int error; 1705 1706 sc = device_get_softc(dev); 1707 error = 0; 1708 1709 cs = crypto_get_driver_session(crp->crp_session); 1710 csp = crypto_get_params(crp->crp_session); 1711 1712 /* Check and parse input */ 1713 if (crypto_buffer_len(&crp->crp_buf) > CESA_MAX_REQUEST_SIZE) { 1714 crp->crp_etype = E2BIG; 1715 crypto_done(crp); 1716 return (0); 1717 } 1718 1719 /* 1720 * For requests with AAD, only requests where the AAD is 1721 * immediately adjacent to the payload are supported. 1722 */ 1723 if (crp->crp_aad_length != 0 && 1724 (crp->crp_aad_start + crp->crp_aad_length) != 1725 crp->crp_payload_start) { 1726 crp->crp_etype = EINVAL; 1727 crypto_done(crp); 1728 return (0); 1729 } 1730 1731 /* 1732 * Get request descriptor. Block driver if there is no free 1733 * descriptors in pool. 1734 */ 1735 cr = cesa_alloc_request(sc); 1736 if (!cr) { 1737 CESA_LOCK(sc, sc); 1738 sc->sc_blocked = CRYPTO_SYMQ; 1739 CESA_UNLOCK(sc, sc); 1740 return (ERESTART); 1741 } 1742 1743 /* Prepare request */ 1744 cr->cr_crp = crp; 1745 cr->cr_cs = cs; 1746 1747 CESA_LOCK(sc, sessions); 1748 cesa_sync_desc(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1749 1750 if (csp->csp_cipher_alg != 0) 1751 crypto_read_iv(crp, cr->cr_csd->csd_iv); 1752 1753 if (crp->crp_cipher_key != NULL) { 1754 memcpy(cs->cs_key, crp->crp_cipher_key, 1755 csp->csp_cipher_klen); 1756 if (csp->csp_cipher_alg == CRYPTO_AES_CBC) 1757 error = cesa_prep_aes_key(cs, csp); 1758 } 1759 1760 if (!error && crp->crp_auth_key != NULL) 1761 cesa_set_mkey(cs, csp->csp_auth_alg, crp->crp_auth_key, 1762 csp->csp_auth_klen); 1763 1764 /* Convert request to chain of TDMA and SA descriptors */ 1765 if (!error) 1766 error = cesa_create_chain(sc, csp, cr); 1767 1768 cesa_sync_desc(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1769 CESA_UNLOCK(sc, sessions); 1770 1771 if (error) { 1772 cesa_free_request(sc, cr); 1773 crp->crp_etype = error; 1774 crypto_done(crp); 1775 return (0); 1776 } 1777 1778 bus_dmamap_sync(sc->sc_data_dtag, cr->cr_dmap, BUS_DMASYNC_PREREAD | 1779 BUS_DMASYNC_PREWRITE); 1780 1781 /* Enqueue request to execution */ 1782 cesa_enqueue_request(sc, cr); 1783 1784 /* Start execution, if we have no more requests in queue */ 1785 if ((hint & CRYPTO_HINT_MORE) == 0) 1786 cesa_execute(sc); 1787 1788 return (0); 1789 } 1790