1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2020 Rubicon Communications, LLC (Netgate) 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #include <sys/param.h> 31 #include <sys/bus.h> 32 #include <sys/endian.h> 33 #include <sys/kernel.h> 34 #include <sys/lock.h> 35 #include <sys/malloc.h> 36 #include <sys/module.h> 37 #include <sys/mutex.h> 38 #include <sys/rman.h> 39 #include <sys/sglist.h> 40 #include <sys/sysctl.h> 41 42 #include <machine/atomic.h> 43 #include <machine/bus.h> 44 45 #include <crypto/rijndael/rijndael.h> 46 #include <opencrypto/cryptodev.h> 47 #include <opencrypto/xform.h> 48 49 #include <dev/ofw/ofw_bus.h> 50 #include <dev/ofw/ofw_bus_subr.h> 51 52 #include "cryptodev_if.h" 53 54 #include "safexcel_reg.h" 55 #include "safexcel_var.h" 56 57 static MALLOC_DEFINE(M_SAFEXCEL, "safexcel_req", "safexcel request buffers"); 58 59 /* 60 * We only support the EIP97 for now. 61 */ 62 static struct ofw_compat_data safexcel_compat[] = { 63 { "inside-secure,safexcel-eip97ies", (uintptr_t)97 }, 64 { "inside-secure,safexcel-eip97", (uintptr_t)97 }, 65 { NULL, 0 } 66 }; 67 68 const struct safexcel_reg_offsets eip97_regs_offset = { 69 .hia_aic = SAFEXCEL_EIP97_HIA_AIC_BASE, 70 .hia_aic_g = SAFEXCEL_EIP97_HIA_AIC_G_BASE, 71 .hia_aic_r = SAFEXCEL_EIP97_HIA_AIC_R_BASE, 72 .hia_aic_xdr = SAFEXCEL_EIP97_HIA_AIC_xDR_BASE, 73 .hia_dfe = SAFEXCEL_EIP97_HIA_DFE_BASE, 74 .hia_dfe_thr = SAFEXCEL_EIP97_HIA_DFE_THR_BASE, 75 .hia_dse = SAFEXCEL_EIP97_HIA_DSE_BASE, 76 .hia_dse_thr = SAFEXCEL_EIP97_HIA_DSE_THR_BASE, 77 .hia_gen_cfg = SAFEXCEL_EIP97_HIA_GEN_CFG_BASE, 78 .pe = SAFEXCEL_EIP97_PE_BASE, 79 }; 80 81 const struct safexcel_reg_offsets eip197_regs_offset = { 82 .hia_aic = SAFEXCEL_EIP197_HIA_AIC_BASE, 83 .hia_aic_g = SAFEXCEL_EIP197_HIA_AIC_G_BASE, 84 .hia_aic_r = SAFEXCEL_EIP197_HIA_AIC_R_BASE, 85 .hia_aic_xdr = SAFEXCEL_EIP197_HIA_AIC_xDR_BASE, 86 .hia_dfe = SAFEXCEL_EIP197_HIA_DFE_BASE, 87 .hia_dfe_thr = SAFEXCEL_EIP197_HIA_DFE_THR_BASE, 88 .hia_dse = SAFEXCEL_EIP197_HIA_DSE_BASE, 89 .hia_dse_thr = SAFEXCEL_EIP197_HIA_DSE_THR_BASE, 90 .hia_gen_cfg = SAFEXCEL_EIP197_HIA_GEN_CFG_BASE, 91 .pe = SAFEXCEL_EIP197_PE_BASE, 92 }; 93 94 static struct safexcel_cmd_descr * 95 safexcel_cmd_descr_next(struct safexcel_cmd_descr_ring *ring) 96 { 97 struct safexcel_cmd_descr *cdesc; 98 99 if (ring->write == ring->read) 100 return (NULL); 101 cdesc = &ring->desc[ring->read]; 102 ring->read = (ring->read + 1) % SAFEXCEL_RING_SIZE; 103 return (cdesc); 104 } 105 106 static struct safexcel_res_descr * 107 safexcel_res_descr_next(struct safexcel_res_descr_ring *ring) 108 { 109 struct safexcel_res_descr *rdesc; 110 111 if (ring->write == ring->read) 112 return (NULL); 113 rdesc = &ring->desc[ring->read]; 114 ring->read = (ring->read + 1) % SAFEXCEL_RING_SIZE; 115 return (rdesc); 116 } 117 118 static struct safexcel_request * 119 safexcel_alloc_request(struct safexcel_softc *sc, struct safexcel_ring *ring) 120 { 121 struct safexcel_request *req; 122 123 mtx_assert(&ring->mtx, MA_OWNED); 124 125 if ((req = STAILQ_FIRST(&ring->free_requests)) != NULL) 126 STAILQ_REMOVE_HEAD(&ring->free_requests, link); 127 return (req); 128 } 129 130 static void 131 safexcel_free_request(struct safexcel_ring *ring, struct safexcel_request *req) 132 { 133 struct safexcel_context_record *ctx; 134 135 mtx_assert(&ring->mtx, MA_OWNED); 136 137 if (req->dmap_loaded) { 138 bus_dmamap_unload(ring->data_dtag, req->dmap); 139 req->dmap_loaded = false; 140 } 141 ctx = (struct safexcel_context_record *)req->ctx.vaddr; 142 explicit_bzero(ctx->data, sizeof(ctx->data)); 143 explicit_bzero(req->iv, sizeof(req->iv)); 144 STAILQ_INSERT_TAIL(&ring->free_requests, req, link); 145 } 146 147 static void 148 safexcel_enqueue_request(struct safexcel_softc *sc, struct safexcel_ring *ring, 149 struct safexcel_request *req) 150 { 151 mtx_assert(&ring->mtx, MA_OWNED); 152 153 STAILQ_INSERT_TAIL(&ring->ready_requests, req, link); 154 } 155 156 static void 157 safexcel_rdr_intr(struct safexcel_softc *sc, int ringidx) 158 { 159 struct safexcel_cmd_descr *cdesc; 160 struct safexcel_res_descr *rdesc; 161 struct safexcel_request *req; 162 struct safexcel_ring *ring; 163 uint32_t blocked, error, i, ncdescs, nrdescs, nreqs; 164 165 blocked = 0; 166 ring = &sc->sc_ring[ringidx]; 167 168 mtx_lock(&ring->mtx); 169 nreqs = SAFEXCEL_READ(sc, 170 SAFEXCEL_HIA_RDR(sc, ringidx) + SAFEXCEL_HIA_xDR_PROC_COUNT); 171 nreqs >>= SAFEXCEL_xDR_PROC_xD_PKT_OFFSET; 172 nreqs &= SAFEXCEL_xDR_PROC_xD_PKT_MASK; 173 if (nreqs == 0) { 174 SAFEXCEL_DPRINTF(sc, 1, 175 "zero pending requests on ring %d\n", ringidx); 176 goto out; 177 } 178 179 ring = &sc->sc_ring[ringidx]; 180 bus_dmamap_sync(ring->rdr.dma.tag, ring->rdr.dma.map, 181 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 182 bus_dmamap_sync(ring->cdr.dma.tag, ring->cdr.dma.map, 183 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 184 bus_dmamap_sync(ring->dma_atok.tag, ring->dma_atok.map, 185 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 186 187 ncdescs = nrdescs = 0; 188 for (i = 0; i < nreqs; i++) { 189 req = STAILQ_FIRST(&ring->queued_requests); 190 KASSERT(req != NULL, ("%s: expected %d pending requests", 191 __func__, nreqs)); 192 STAILQ_REMOVE_HEAD(&ring->queued_requests, link); 193 mtx_unlock(&ring->mtx); 194 195 bus_dmamap_sync(req->ctx.tag, req->ctx.map, 196 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 197 bus_dmamap_sync(ring->data_dtag, req->dmap, 198 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 199 200 ncdescs += req->cdescs; 201 while (req->cdescs-- > 0) { 202 cdesc = safexcel_cmd_descr_next(&ring->cdr); 203 KASSERT(cdesc != NULL, 204 ("%s: missing control descriptor", __func__)); 205 if (req->cdescs == 0) 206 KASSERT(cdesc->last_seg, 207 ("%s: chain is not terminated", __func__)); 208 } 209 nrdescs += req->rdescs; 210 while (req->rdescs-- > 0) { 211 rdesc = safexcel_res_descr_next(&ring->rdr); 212 error = rdesc->result_data.error_code; 213 if (error != 0) { 214 if (error == SAFEXCEL_RESULT_ERR_AUTH_FAILED && 215 req->crp->crp_etype == 0) { 216 req->crp->crp_etype = EBADMSG; 217 } else { 218 SAFEXCEL_DPRINTF(sc, 1, 219 "error code %#x\n", error); 220 req->crp->crp_etype = EIO; 221 } 222 } 223 } 224 225 crypto_done(req->crp); 226 mtx_lock(&ring->mtx); 227 safexcel_free_request(ring, req); 228 } 229 230 if (nreqs != 0) { 231 SAFEXCEL_WRITE(sc, 232 SAFEXCEL_HIA_RDR(sc, ringidx) + SAFEXCEL_HIA_xDR_PROC_COUNT, 233 SAFEXCEL_xDR_PROC_xD_PKT(nreqs) | 234 (sc->sc_config.rd_offset * nrdescs * sizeof(uint32_t))); 235 blocked = ring->blocked; 236 ring->blocked = 0; 237 } 238 out: 239 if (!STAILQ_EMPTY(&ring->queued_requests)) { 240 SAFEXCEL_WRITE(sc, 241 SAFEXCEL_HIA_RDR(sc, ringidx) + SAFEXCEL_HIA_xDR_THRESH, 242 SAFEXCEL_HIA_CDR_THRESH_PKT_MODE | 1); 243 } 244 mtx_unlock(&ring->mtx); 245 246 if (blocked) 247 crypto_unblock(sc->sc_cid, blocked); 248 } 249 250 static void 251 safexcel_ring_intr(void *arg) 252 { 253 struct safexcel_softc *sc; 254 struct safexcel_intr_handle *ih; 255 uint32_t status, stat; 256 int ring; 257 bool rdrpending; 258 259 ih = arg; 260 sc = ih->sc; 261 ring = ih->ring; 262 263 status = SAFEXCEL_READ(sc, SAFEXCEL_HIA_AIC_R(sc) + 264 SAFEXCEL_HIA_AIC_R_ENABLED_STAT(ring)); 265 /* CDR interrupts */ 266 if (status & SAFEXCEL_CDR_IRQ(ring)) { 267 stat = SAFEXCEL_READ(sc, 268 SAFEXCEL_HIA_CDR(sc, ring) + SAFEXCEL_HIA_xDR_STAT); 269 SAFEXCEL_WRITE(sc, 270 SAFEXCEL_HIA_CDR(sc, ring) + SAFEXCEL_HIA_xDR_STAT, 271 stat & SAFEXCEL_CDR_INTR_MASK); 272 } 273 /* RDR interrupts */ 274 rdrpending = false; 275 if (status & SAFEXCEL_RDR_IRQ(ring)) { 276 stat = SAFEXCEL_READ(sc, 277 SAFEXCEL_HIA_RDR(sc, ring) + SAFEXCEL_HIA_xDR_STAT); 278 if ((stat & SAFEXCEL_xDR_ERR) == 0) 279 rdrpending = true; 280 SAFEXCEL_WRITE(sc, 281 SAFEXCEL_HIA_RDR(sc, ring) + SAFEXCEL_HIA_xDR_STAT, 282 stat & SAFEXCEL_RDR_INTR_MASK); 283 } 284 SAFEXCEL_WRITE(sc, 285 SAFEXCEL_HIA_AIC_R(sc) + SAFEXCEL_HIA_AIC_R_ACK(ring), 286 status); 287 288 if (rdrpending) 289 safexcel_rdr_intr(sc, ring); 290 } 291 292 static int 293 safexcel_configure(struct safexcel_softc *sc) 294 { 295 uint32_t i, mask, pemask, reg; 296 device_t dev; 297 298 if (sc->sc_type == 197) { 299 sc->sc_offsets = eip197_regs_offset; 300 pemask = SAFEXCEL_N_PES_MASK; 301 } else { 302 sc->sc_offsets = eip97_regs_offset; 303 pemask = EIP97_N_PES_MASK; 304 } 305 306 dev = sc->sc_dev; 307 308 /* Scan for valid ring interrupt controllers. */ 309 for (i = 0; i < SAFEXCEL_MAX_RING_AIC; i++) { 310 reg = SAFEXCEL_READ(sc, SAFEXCEL_HIA_AIC_R(sc) + 311 SAFEXCEL_HIA_AIC_R_VERSION(i)); 312 if (SAFEXCEL_REG_LO16(reg) != EIP201_VERSION_LE) 313 break; 314 } 315 sc->sc_config.aic_rings = i; 316 if (sc->sc_config.aic_rings == 0) 317 return (-1); 318 319 reg = SAFEXCEL_READ(sc, SAFEXCEL_HIA_AIC_G(sc) + SAFEXCEL_HIA_OPTIONS); 320 /* Check for 64bit addressing. */ 321 if ((reg & SAFEXCEL_OPT_ADDR_64) == 0) 322 return (-1); 323 /* Check alignment constraints (which we do not support). */ 324 if (((reg & SAFEXCEL_OPT_TGT_ALIGN_MASK) >> 325 SAFEXCEL_OPT_TGT_ALIGN_OFFSET) != 0) 326 return (-1); 327 328 sc->sc_config.hdw = 329 (reg & SAFEXCEL_xDR_HDW_MASK) >> SAFEXCEL_xDR_HDW_OFFSET; 330 mask = (1 << sc->sc_config.hdw) - 1; 331 332 sc->sc_config.rings = reg & SAFEXCEL_N_RINGS_MASK; 333 /* Limit the number of rings to the number of the AIC Rings. */ 334 sc->sc_config.rings = MIN(sc->sc_config.rings, sc->sc_config.aic_rings); 335 336 sc->sc_config.pes = (reg & pemask) >> SAFEXCEL_N_PES_OFFSET; 337 338 sc->sc_config.cd_size = 339 sizeof(struct safexcel_cmd_descr) / sizeof(uint32_t); 340 sc->sc_config.cd_offset = (sc->sc_config.cd_size + mask) & ~mask; 341 342 sc->sc_config.rd_size = 343 sizeof(struct safexcel_res_descr) / sizeof(uint32_t); 344 sc->sc_config.rd_offset = (sc->sc_config.rd_size + mask) & ~mask; 345 346 sc->sc_config.atok_offset = 347 (SAFEXCEL_MAX_ATOKENS * sizeof(struct safexcel_instr) + mask) & 348 ~mask; 349 350 return (0); 351 } 352 353 static void 354 safexcel_init_hia_bus_access(struct safexcel_softc *sc) 355 { 356 uint32_t version, val; 357 358 /* Determine endianness and configure byte swap. */ 359 version = SAFEXCEL_READ(sc, 360 SAFEXCEL_HIA_AIC(sc) + SAFEXCEL_HIA_VERSION); 361 val = SAFEXCEL_READ(sc, SAFEXCEL_HIA_AIC(sc) + SAFEXCEL_HIA_MST_CTRL); 362 if (SAFEXCEL_REG_HI16(version) == SAFEXCEL_HIA_VERSION_BE) { 363 val = SAFEXCEL_READ(sc, 364 SAFEXCEL_HIA_AIC(sc) + SAFEXCEL_HIA_MST_CTRL); 365 val = val ^ (SAFEXCEL_MST_CTRL_NO_BYTE_SWAP >> 24); 366 SAFEXCEL_WRITE(sc, 367 SAFEXCEL_HIA_AIC(sc) + SAFEXCEL_HIA_MST_CTRL, 368 val); 369 } 370 371 /* Configure wr/rd cache values. */ 372 SAFEXCEL_WRITE(sc, SAFEXCEL_HIA_GEN_CFG(sc) + SAFEXCEL_HIA_MST_CTRL, 373 SAFEXCEL_MST_CTRL_RD_CACHE(RD_CACHE_4BITS) | 374 SAFEXCEL_MST_CTRL_WD_CACHE(WR_CACHE_4BITS)); 375 } 376 377 static void 378 safexcel_disable_global_interrupts(struct safexcel_softc *sc) 379 { 380 /* Disable and clear pending interrupts. */ 381 SAFEXCEL_WRITE(sc, 382 SAFEXCEL_HIA_AIC_G(sc) + SAFEXCEL_HIA_AIC_G_ENABLE_CTRL, 0); 383 SAFEXCEL_WRITE(sc, 384 SAFEXCEL_HIA_AIC_G(sc) + SAFEXCEL_HIA_AIC_G_ACK, 385 SAFEXCEL_AIC_G_ACK_ALL_MASK); 386 } 387 388 /* 389 * Configure the data fetch engine. This component parses command descriptors 390 * and sets up DMA transfers from host memory to the corresponding processing 391 * engine. 392 */ 393 static void 394 safexcel_configure_dfe_engine(struct safexcel_softc *sc, int pe) 395 { 396 /* Reset all DFE threads. */ 397 SAFEXCEL_WRITE(sc, 398 SAFEXCEL_HIA_DFE_THR(sc) + SAFEXCEL_HIA_DFE_THR_CTRL(pe), 399 SAFEXCEL_DxE_THR_CTRL_RESET_PE); 400 401 /* Deassert the DFE reset. */ 402 SAFEXCEL_WRITE(sc, 403 SAFEXCEL_HIA_DFE_THR(sc) + SAFEXCEL_HIA_DFE_THR_CTRL(pe), 0); 404 405 /* DMA transfer size to use. */ 406 SAFEXCEL_WRITE(sc, SAFEXCEL_HIA_DFE(sc) + SAFEXCEL_HIA_DFE_CFG(pe), 407 SAFEXCEL_HIA_DFE_CFG_DIS_DEBUG | 408 SAFEXCEL_HIA_DxE_CFG_MIN_DATA_SIZE(6) | 409 SAFEXCEL_HIA_DxE_CFG_MAX_DATA_SIZE(9) | 410 SAFEXCEL_HIA_DxE_CFG_MIN_CTRL_SIZE(6) | 411 SAFEXCEL_HIA_DxE_CFG_MAX_CTRL_SIZE(7) | 412 SAFEXCEL_HIA_DxE_CFG_DATA_CACHE_CTRL(RD_CACHE_3BITS) | 413 SAFEXCEL_HIA_DxE_CFG_CTRL_CACHE_CTRL(RD_CACHE_3BITS)); 414 415 /* Configure the PE DMA transfer thresholds. */ 416 SAFEXCEL_WRITE(sc, SAFEXCEL_PE(sc) + SAFEXCEL_PE_IN_DBUF_THRES(pe), 417 SAFEXCEL_PE_IN_xBUF_THRES_MIN(6) | 418 SAFEXCEL_PE_IN_xBUF_THRES_MAX(9)); 419 SAFEXCEL_WRITE(sc, SAFEXCEL_PE(sc) + SAFEXCEL_PE_IN_TBUF_THRES(pe), 420 SAFEXCEL_PE_IN_xBUF_THRES_MIN(6) | 421 SAFEXCEL_PE_IN_xBUF_THRES_MAX(7)); 422 } 423 424 /* 425 * Configure the data store engine. This component parses result descriptors 426 * and sets up DMA transfers from the processing engine to host memory. 427 */ 428 static int 429 safexcel_configure_dse(struct safexcel_softc *sc, int pe) 430 { 431 uint32_t val; 432 int count; 433 434 /* Disable and reset all DSE threads. */ 435 SAFEXCEL_WRITE(sc, 436 SAFEXCEL_HIA_DSE_THR(sc) + SAFEXCEL_HIA_DSE_THR_CTRL(pe), 437 SAFEXCEL_DxE_THR_CTRL_RESET_PE); 438 439 /* Wait for a second for threads to go idle. */ 440 for (count = 0;;) { 441 val = SAFEXCEL_READ(sc, 442 SAFEXCEL_HIA_DSE_THR(sc) + SAFEXCEL_HIA_DSE_THR_STAT(pe)); 443 if ((val & SAFEXCEL_DSE_THR_RDR_ID_MASK) == 444 SAFEXCEL_DSE_THR_RDR_ID_MASK) 445 break; 446 if (count++ > 10000) { 447 device_printf(sc->sc_dev, "DSE reset timeout\n"); 448 return (-1); 449 } 450 DELAY(100); 451 } 452 453 /* Exit the reset state. */ 454 SAFEXCEL_WRITE(sc, 455 SAFEXCEL_HIA_DSE_THR(sc) + SAFEXCEL_HIA_DSE_THR_CTRL(pe), 0); 456 457 /* DMA transfer size to use */ 458 SAFEXCEL_WRITE(sc, SAFEXCEL_HIA_DSE(sc) + SAFEXCEL_HIA_DSE_CFG(pe), 459 SAFEXCEL_HIA_DSE_CFG_DIS_DEBUG | 460 SAFEXCEL_HIA_DxE_CFG_MIN_DATA_SIZE(7) | 461 SAFEXCEL_HIA_DxE_CFG_MAX_DATA_SIZE(8) | 462 SAFEXCEL_HIA_DxE_CFG_DATA_CACHE_CTRL(WR_CACHE_3BITS) | 463 SAFEXCEL_HIA_DSE_CFG_ALLWAYS_BUFFERABLE); 464 465 /* Configure the procesing engine thresholds */ 466 SAFEXCEL_WRITE(sc, 467 SAFEXCEL_PE(sc) + SAFEXCEL_PE_OUT_DBUF_THRES(pe), 468 SAFEXCEL_PE_OUT_DBUF_THRES_MIN(7) | 469 SAFEXCEL_PE_OUT_DBUF_THRES_MAX(8)); 470 471 return (0); 472 } 473 474 static void 475 safexcel_hw_prepare_rings(struct safexcel_softc *sc) 476 { 477 int i; 478 479 for (i = 0; i < sc->sc_config.rings; i++) { 480 /* 481 * Command descriptors. 482 */ 483 484 /* Clear interrupts for this ring. */ 485 SAFEXCEL_WRITE(sc, 486 SAFEXCEL_HIA_AIC_R(sc) + SAFEXCEL_HIA_AIC_R_ENABLE_CLR(i), 487 SAFEXCEL_HIA_AIC_R_ENABLE_CLR_ALL_MASK); 488 489 /* Disable external triggering. */ 490 SAFEXCEL_WRITE(sc, 491 SAFEXCEL_HIA_CDR(sc, i) + SAFEXCEL_HIA_xDR_CFG, 0); 492 493 /* Clear the pending prepared counter. */ 494 SAFEXCEL_WRITE(sc, 495 SAFEXCEL_HIA_CDR(sc, i) + SAFEXCEL_HIA_xDR_PREP_COUNT, 496 SAFEXCEL_xDR_PREP_CLR_COUNT); 497 498 /* Clear the pending processed counter. */ 499 SAFEXCEL_WRITE(sc, 500 SAFEXCEL_HIA_CDR(sc, i) + SAFEXCEL_HIA_xDR_PROC_COUNT, 501 SAFEXCEL_xDR_PROC_CLR_COUNT); 502 503 SAFEXCEL_WRITE(sc, 504 SAFEXCEL_HIA_CDR(sc, i) + SAFEXCEL_HIA_xDR_PREP_PNTR, 0); 505 SAFEXCEL_WRITE(sc, 506 SAFEXCEL_HIA_CDR(sc, i) + SAFEXCEL_HIA_xDR_PROC_PNTR, 0); 507 508 SAFEXCEL_WRITE(sc, 509 SAFEXCEL_HIA_CDR(sc, i) + SAFEXCEL_HIA_xDR_RING_SIZE, 510 SAFEXCEL_RING_SIZE * sc->sc_config.cd_offset * 511 sizeof(uint32_t)); 512 513 /* 514 * Result descriptors. 515 */ 516 517 /* Disable external triggering. */ 518 SAFEXCEL_WRITE(sc, 519 SAFEXCEL_HIA_RDR(sc, i) + SAFEXCEL_HIA_xDR_CFG, 0); 520 521 /* Clear the pending prepared counter. */ 522 SAFEXCEL_WRITE(sc, 523 SAFEXCEL_HIA_RDR(sc, i) + SAFEXCEL_HIA_xDR_PREP_COUNT, 524 SAFEXCEL_xDR_PREP_CLR_COUNT); 525 526 /* Clear the pending processed counter. */ 527 SAFEXCEL_WRITE(sc, 528 SAFEXCEL_HIA_RDR(sc, i) + SAFEXCEL_HIA_xDR_PROC_COUNT, 529 SAFEXCEL_xDR_PROC_CLR_COUNT); 530 531 SAFEXCEL_WRITE(sc, 532 SAFEXCEL_HIA_RDR(sc, i) + SAFEXCEL_HIA_xDR_PREP_PNTR, 0); 533 SAFEXCEL_WRITE(sc, 534 SAFEXCEL_HIA_RDR(sc, i) + SAFEXCEL_HIA_xDR_PROC_PNTR, 0); 535 536 /* Ring size. */ 537 SAFEXCEL_WRITE(sc, 538 SAFEXCEL_HIA_RDR(sc, i) + SAFEXCEL_HIA_xDR_RING_SIZE, 539 SAFEXCEL_RING_SIZE * sc->sc_config.rd_offset * 540 sizeof(uint32_t)); 541 } 542 } 543 544 static void 545 safexcel_hw_setup_rings(struct safexcel_softc *sc) 546 { 547 struct safexcel_ring *ring; 548 uint32_t cd_size_rnd, mask, rd_size_rnd, val; 549 int i; 550 551 mask = (1 << sc->sc_config.hdw) - 1; 552 cd_size_rnd = (sc->sc_config.cd_size + mask) >> sc->sc_config.hdw; 553 val = (sizeof(struct safexcel_res_descr) - 554 sizeof(struct safexcel_res_data)) / sizeof(uint32_t); 555 rd_size_rnd = (val + mask) >> sc->sc_config.hdw; 556 557 for (i = 0; i < sc->sc_config.rings; i++) { 558 ring = &sc->sc_ring[i]; 559 560 /* 561 * Command descriptors. 562 */ 563 564 /* Ring base address. */ 565 SAFEXCEL_WRITE(sc, SAFEXCEL_HIA_CDR(sc, i) + 566 SAFEXCEL_HIA_xDR_RING_BASE_ADDR_LO, 567 SAFEXCEL_ADDR_LO(ring->cdr.dma.paddr)); 568 SAFEXCEL_WRITE(sc, SAFEXCEL_HIA_CDR(sc, i) + 569 SAFEXCEL_HIA_xDR_RING_BASE_ADDR_HI, 570 SAFEXCEL_ADDR_HI(ring->cdr.dma.paddr)); 571 572 SAFEXCEL_WRITE(sc, 573 SAFEXCEL_HIA_CDR(sc, i) + SAFEXCEL_HIA_xDR_DESC_SIZE, 574 SAFEXCEL_xDR_DESC_MODE_64BIT | SAFEXCEL_CDR_DESC_MODE_ADCP | 575 (sc->sc_config.cd_offset << SAFEXCEL_xDR_DESC_xD_OFFSET) | 576 sc->sc_config.cd_size); 577 578 SAFEXCEL_WRITE(sc, 579 SAFEXCEL_HIA_CDR(sc, i) + SAFEXCEL_HIA_xDR_CFG, 580 ((SAFEXCEL_FETCH_COUNT * (cd_size_rnd << sc->sc_config.hdw)) << 581 SAFEXCEL_xDR_xD_FETCH_THRESH) | 582 (SAFEXCEL_FETCH_COUNT * sc->sc_config.cd_offset)); 583 584 /* Configure DMA tx control. */ 585 SAFEXCEL_WRITE(sc, 586 SAFEXCEL_HIA_CDR(sc, i) + SAFEXCEL_HIA_xDR_DMA_CFG, 587 SAFEXCEL_HIA_xDR_CFG_WR_CACHE(WR_CACHE_3BITS) | 588 SAFEXCEL_HIA_xDR_CFG_RD_CACHE(RD_CACHE_3BITS)); 589 590 /* Clear any pending interrupt. */ 591 SAFEXCEL_WRITE(sc, 592 SAFEXCEL_HIA_CDR(sc, i) + SAFEXCEL_HIA_xDR_STAT, 593 SAFEXCEL_CDR_INTR_MASK); 594 595 /* 596 * Result descriptors. 597 */ 598 599 /* Ring base address. */ 600 SAFEXCEL_WRITE(sc, SAFEXCEL_HIA_RDR(sc, i) + 601 SAFEXCEL_HIA_xDR_RING_BASE_ADDR_LO, 602 SAFEXCEL_ADDR_LO(ring->rdr.dma.paddr)); 603 SAFEXCEL_WRITE(sc, SAFEXCEL_HIA_RDR(sc, i) + 604 SAFEXCEL_HIA_xDR_RING_BASE_ADDR_HI, 605 SAFEXCEL_ADDR_HI(ring->rdr.dma.paddr)); 606 607 SAFEXCEL_WRITE(sc, 608 SAFEXCEL_HIA_RDR(sc, i) + SAFEXCEL_HIA_xDR_DESC_SIZE, 609 SAFEXCEL_xDR_DESC_MODE_64BIT | 610 (sc->sc_config.rd_offset << SAFEXCEL_xDR_DESC_xD_OFFSET) | 611 sc->sc_config.rd_size); 612 613 SAFEXCEL_WRITE(sc, 614 SAFEXCEL_HIA_RDR(sc, i) + SAFEXCEL_HIA_xDR_CFG, 615 ((SAFEXCEL_FETCH_COUNT * (rd_size_rnd << sc->sc_config.hdw)) << 616 SAFEXCEL_xDR_xD_FETCH_THRESH) | 617 (SAFEXCEL_FETCH_COUNT * sc->sc_config.rd_offset)); 618 619 /* Configure DMA tx control. */ 620 SAFEXCEL_WRITE(sc, 621 SAFEXCEL_HIA_RDR(sc, i) + SAFEXCEL_HIA_xDR_DMA_CFG, 622 SAFEXCEL_HIA_xDR_CFG_WR_CACHE(WR_CACHE_3BITS) | 623 SAFEXCEL_HIA_xDR_CFG_RD_CACHE(RD_CACHE_3BITS) | 624 SAFEXCEL_HIA_xDR_WR_RES_BUF | SAFEXCEL_HIA_xDR_WR_CTRL_BUF); 625 626 /* Clear any pending interrupt. */ 627 SAFEXCEL_WRITE(sc, 628 SAFEXCEL_HIA_RDR(sc, i) + SAFEXCEL_HIA_xDR_STAT, 629 SAFEXCEL_RDR_INTR_MASK); 630 631 /* Enable ring interrupt. */ 632 SAFEXCEL_WRITE(sc, 633 SAFEXCEL_HIA_AIC_R(sc) + SAFEXCEL_HIA_AIC_R_ENABLE_CTRL(i), 634 SAFEXCEL_RDR_IRQ(i)); 635 } 636 } 637 638 /* Reset the command and result descriptor rings. */ 639 static void 640 safexcel_hw_reset_rings(struct safexcel_softc *sc) 641 { 642 int i; 643 644 for (i = 0; i < sc->sc_config.rings; i++) { 645 /* 646 * Result descriptor ring operations. 647 */ 648 649 /* Reset ring base address. */ 650 SAFEXCEL_WRITE(sc, SAFEXCEL_HIA_RDR(sc, i) + 651 SAFEXCEL_HIA_xDR_RING_BASE_ADDR_LO, 0); 652 SAFEXCEL_WRITE(sc, SAFEXCEL_HIA_RDR(sc, i) + 653 SAFEXCEL_HIA_xDR_RING_BASE_ADDR_HI, 0); 654 655 /* Clear the pending prepared counter. */ 656 SAFEXCEL_WRITE(sc, 657 SAFEXCEL_HIA_RDR(sc, i) + SAFEXCEL_HIA_xDR_PREP_COUNT, 658 SAFEXCEL_xDR_PREP_CLR_COUNT); 659 660 /* Clear the pending processed counter. */ 661 SAFEXCEL_WRITE(sc, 662 SAFEXCEL_HIA_RDR(sc, i) + SAFEXCEL_HIA_xDR_PROC_COUNT, 663 SAFEXCEL_xDR_PROC_CLR_COUNT); 664 665 SAFEXCEL_WRITE(sc, 666 SAFEXCEL_HIA_RDR(sc, i) + SAFEXCEL_HIA_xDR_PREP_PNTR, 0); 667 SAFEXCEL_WRITE(sc, 668 SAFEXCEL_HIA_RDR(sc, i) + SAFEXCEL_HIA_xDR_PROC_PNTR, 0); 669 670 SAFEXCEL_WRITE(sc, 671 SAFEXCEL_HIA_RDR(sc, i) + SAFEXCEL_HIA_xDR_RING_SIZE, 0); 672 673 /* Clear any pending interrupt. */ 674 SAFEXCEL_WRITE(sc, 675 SAFEXCEL_HIA_RDR(sc, i) + SAFEXCEL_HIA_xDR_STAT, 676 SAFEXCEL_RDR_INTR_MASK); 677 678 /* Disable ring interrupt. */ 679 SAFEXCEL_WRITE(sc, 680 SAFEXCEL_HIA_AIC_R(sc) + SAFEXCEL_HIA_AIC_R_ENABLE_CLR(i), 681 SAFEXCEL_RDR_IRQ(i)); 682 683 /* 684 * Command descriptor ring operations. 685 */ 686 687 /* Reset ring base address. */ 688 SAFEXCEL_WRITE(sc, SAFEXCEL_HIA_CDR(sc, i) + 689 SAFEXCEL_HIA_xDR_RING_BASE_ADDR_LO, 0); 690 SAFEXCEL_WRITE(sc, SAFEXCEL_HIA_CDR(sc, i) + 691 SAFEXCEL_HIA_xDR_RING_BASE_ADDR_HI, 0); 692 693 /* Clear the pending prepared counter. */ 694 SAFEXCEL_WRITE(sc, 695 SAFEXCEL_HIA_CDR(sc, i) + SAFEXCEL_HIA_xDR_PREP_COUNT, 696 SAFEXCEL_xDR_PREP_CLR_COUNT); 697 698 /* Clear the pending processed counter. */ 699 SAFEXCEL_WRITE(sc, 700 SAFEXCEL_HIA_CDR(sc, i) + SAFEXCEL_HIA_xDR_PROC_COUNT, 701 SAFEXCEL_xDR_PROC_CLR_COUNT); 702 703 SAFEXCEL_WRITE(sc, 704 SAFEXCEL_HIA_CDR(sc, i) + SAFEXCEL_HIA_xDR_PREP_PNTR, 0); 705 SAFEXCEL_WRITE(sc, 706 SAFEXCEL_HIA_CDR(sc, i) + SAFEXCEL_HIA_xDR_PROC_PNTR, 0); 707 708 SAFEXCEL_WRITE(sc, 709 SAFEXCEL_HIA_CDR(sc, i) + SAFEXCEL_HIA_xDR_RING_SIZE, 0); 710 711 /* Clear any pending interrupt. */ 712 SAFEXCEL_WRITE(sc, 713 SAFEXCEL_HIA_CDR(sc, i) + SAFEXCEL_HIA_xDR_STAT, 714 SAFEXCEL_CDR_INTR_MASK); 715 } 716 } 717 718 static void 719 safexcel_enable_pe_engine(struct safexcel_softc *sc, int pe) 720 { 721 int i, ring_mask; 722 723 for (ring_mask = 0, i = 0; i < sc->sc_config.rings; i++) { 724 ring_mask <<= 1; 725 ring_mask |= 1; 726 } 727 728 /* Enable command descriptor rings. */ 729 SAFEXCEL_WRITE(sc, SAFEXCEL_HIA_DFE_THR(sc) + SAFEXCEL_HIA_DFE_THR_CTRL(pe), 730 SAFEXCEL_DxE_THR_CTRL_EN | ring_mask); 731 732 /* Enable result descriptor rings. */ 733 SAFEXCEL_WRITE(sc, SAFEXCEL_HIA_DSE_THR(sc) + SAFEXCEL_HIA_DSE_THR_CTRL(pe), 734 SAFEXCEL_DxE_THR_CTRL_EN | ring_mask); 735 736 /* Clear any HIA interrupt. */ 737 SAFEXCEL_WRITE(sc, SAFEXCEL_HIA_AIC_G(sc) + SAFEXCEL_HIA_AIC_G_ACK, 738 SAFEXCEL_AIC_G_ACK_HIA_MASK); 739 } 740 741 static void 742 safexcel_execute(struct safexcel_softc *sc, struct safexcel_ring *ring, 743 struct safexcel_request *req) 744 { 745 uint32_t ncdescs, nrdescs, nreqs; 746 int ringidx; 747 bool busy; 748 749 mtx_assert(&ring->mtx, MA_OWNED); 750 751 ringidx = req->sess->ringidx; 752 if (STAILQ_EMPTY(&ring->ready_requests)) 753 return; 754 busy = !STAILQ_EMPTY(&ring->queued_requests); 755 ncdescs = nrdescs = nreqs = 0; 756 while ((req = STAILQ_FIRST(&ring->ready_requests)) != NULL && 757 req->cdescs + ncdescs <= SAFEXCEL_MAX_BATCH_SIZE && 758 req->rdescs + nrdescs <= SAFEXCEL_MAX_BATCH_SIZE) { 759 STAILQ_REMOVE_HEAD(&ring->ready_requests, link); 760 STAILQ_INSERT_TAIL(&ring->queued_requests, req, link); 761 ncdescs += req->cdescs; 762 nrdescs += req->rdescs; 763 nreqs++; 764 } 765 766 if (!busy) { 767 SAFEXCEL_WRITE(sc, 768 SAFEXCEL_HIA_RDR(sc, ringidx) + SAFEXCEL_HIA_xDR_THRESH, 769 SAFEXCEL_HIA_CDR_THRESH_PKT_MODE | nreqs); 770 } 771 SAFEXCEL_WRITE(sc, 772 SAFEXCEL_HIA_RDR(sc, ringidx) + SAFEXCEL_HIA_xDR_PREP_COUNT, 773 nrdescs * sc->sc_config.rd_offset * sizeof(uint32_t)); 774 SAFEXCEL_WRITE(sc, 775 SAFEXCEL_HIA_CDR(sc, ringidx) + SAFEXCEL_HIA_xDR_PREP_COUNT, 776 ncdescs * sc->sc_config.cd_offset * sizeof(uint32_t)); 777 } 778 779 static void 780 safexcel_init_rings(struct safexcel_softc *sc) 781 { 782 struct safexcel_cmd_descr *cdesc; 783 struct safexcel_ring *ring; 784 uint64_t atok; 785 int i, j; 786 787 for (i = 0; i < sc->sc_config.rings; i++) { 788 ring = &sc->sc_ring[i]; 789 790 snprintf(ring->lockname, sizeof(ring->lockname), 791 "safexcel_ring%d", i); 792 mtx_init(&ring->mtx, ring->lockname, NULL, MTX_DEF); 793 STAILQ_INIT(&ring->free_requests); 794 STAILQ_INIT(&ring->ready_requests); 795 STAILQ_INIT(&ring->queued_requests); 796 797 ring->cdr.read = ring->cdr.write = 0; 798 ring->rdr.read = ring->rdr.write = 0; 799 for (j = 0; j < SAFEXCEL_RING_SIZE; j++) { 800 cdesc = &ring->cdr.desc[j]; 801 atok = ring->dma_atok.paddr + 802 sc->sc_config.atok_offset * j; 803 cdesc->atok_lo = SAFEXCEL_ADDR_LO(atok); 804 cdesc->atok_hi = SAFEXCEL_ADDR_HI(atok); 805 } 806 } 807 } 808 809 static void 810 safexcel_dma_alloc_mem_cb(void *arg, bus_dma_segment_t *segs, int nseg, 811 int error) 812 { 813 struct safexcel_dma_mem *sdm; 814 815 if (error != 0) 816 return; 817 818 KASSERT(nseg == 1, ("%s: nsegs is %d", __func__, nseg)); 819 sdm = arg; 820 sdm->paddr = segs->ds_addr; 821 } 822 823 static int 824 safexcel_dma_alloc_mem(struct safexcel_softc *sc, struct safexcel_dma_mem *sdm, 825 bus_size_t size) 826 { 827 int error; 828 829 KASSERT(sdm->vaddr == NULL, 830 ("%s: DMA memory descriptor in use.", __func__)); 831 832 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), /* parent */ 833 PAGE_SIZE, 0, /* alignment, boundary */ 834 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 835 BUS_SPACE_MAXADDR, /* highaddr */ 836 NULL, NULL, /* filtfunc, filtfuncarg */ 837 size, 1, /* maxsize, nsegments */ 838 size, BUS_DMA_COHERENT, /* maxsegsz, flags */ 839 NULL, NULL, /* lockfunc, lockfuncarg */ 840 &sdm->tag); /* dmat */ 841 if (error != 0) { 842 device_printf(sc->sc_dev, 843 "failed to allocate busdma tag, error %d\n", error); 844 goto err1; 845 } 846 847 error = bus_dmamem_alloc(sdm->tag, (void **)&sdm->vaddr, 848 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT, &sdm->map); 849 if (error != 0) { 850 device_printf(sc->sc_dev, 851 "failed to allocate DMA safe memory, error %d\n", error); 852 goto err2; 853 } 854 855 error = bus_dmamap_load(sdm->tag, sdm->map, sdm->vaddr, size, 856 safexcel_dma_alloc_mem_cb, sdm, BUS_DMA_NOWAIT); 857 if (error != 0) { 858 device_printf(sc->sc_dev, 859 "cannot get address of the DMA memory, error %d\n", error); 860 goto err3; 861 } 862 863 return (0); 864 err3: 865 bus_dmamem_free(sdm->tag, sdm->vaddr, sdm->map); 866 err2: 867 bus_dma_tag_destroy(sdm->tag); 868 err1: 869 sdm->vaddr = NULL; 870 871 return (error); 872 } 873 874 static void 875 safexcel_dma_free_mem(struct safexcel_dma_mem *sdm) 876 { 877 bus_dmamap_unload(sdm->tag, sdm->map); 878 bus_dmamem_free(sdm->tag, sdm->vaddr, sdm->map); 879 bus_dma_tag_destroy(sdm->tag); 880 } 881 882 static void 883 safexcel_dma_free_rings(struct safexcel_softc *sc) 884 { 885 struct safexcel_ring *ring; 886 int i; 887 888 for (i = 0; i < sc->sc_config.rings; i++) { 889 ring = &sc->sc_ring[i]; 890 safexcel_dma_free_mem(&ring->cdr.dma); 891 safexcel_dma_free_mem(&ring->dma_atok); 892 safexcel_dma_free_mem(&ring->rdr.dma); 893 bus_dma_tag_destroy(ring->data_dtag); 894 mtx_destroy(&ring->mtx); 895 } 896 } 897 898 static int 899 safexcel_dma_init(struct safexcel_softc *sc) 900 { 901 struct safexcel_ring *ring; 902 bus_size_t size; 903 int error, i; 904 905 for (i = 0; i < sc->sc_config.rings; i++) { 906 ring = &sc->sc_ring[i]; 907 908 error = bus_dma_tag_create( 909 bus_get_dma_tag(sc->sc_dev),/* parent */ 910 1, 0, /* alignment, boundary */ 911 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 912 BUS_SPACE_MAXADDR, /* highaddr */ 913 NULL, NULL, /* filtfunc, filtfuncarg */ 914 SAFEXCEL_MAX_REQUEST_SIZE, /* maxsize */ 915 SAFEXCEL_MAX_FRAGMENTS, /* nsegments */ 916 SAFEXCEL_MAX_REQUEST_SIZE, /* maxsegsz */ 917 BUS_DMA_COHERENT, /* flags */ 918 NULL, NULL, /* lockfunc, lockfuncarg */ 919 &ring->data_dtag); /* dmat */ 920 if (error != 0) { 921 device_printf(sc->sc_dev, 922 "bus_dma_tag_create main failed; error %d\n", error); 923 return (error); 924 } 925 926 size = sizeof(uint32_t) * sc->sc_config.cd_offset * 927 SAFEXCEL_RING_SIZE; 928 error = safexcel_dma_alloc_mem(sc, &ring->cdr.dma, size); 929 if (error != 0) { 930 device_printf(sc->sc_dev, 931 "failed to allocate CDR DMA memory, error %d\n", 932 error); 933 goto err; 934 } 935 ring->cdr.desc = 936 (struct safexcel_cmd_descr *)ring->cdr.dma.vaddr; 937 938 /* Allocate additional CDR token memory. */ 939 size = (bus_size_t)sc->sc_config.atok_offset * 940 SAFEXCEL_RING_SIZE; 941 error = safexcel_dma_alloc_mem(sc, &ring->dma_atok, size); 942 if (error != 0) { 943 device_printf(sc->sc_dev, 944 "failed to allocate atoken DMA memory, error %d\n", 945 error); 946 goto err; 947 } 948 949 size = sizeof(uint32_t) * sc->sc_config.rd_offset * 950 SAFEXCEL_RING_SIZE; 951 error = safexcel_dma_alloc_mem(sc, &ring->rdr.dma, size); 952 if (error) { 953 device_printf(sc->sc_dev, 954 "failed to allocate RDR DMA memory, error %d\n", 955 error); 956 goto err; 957 } 958 ring->rdr.desc = 959 (struct safexcel_res_descr *)ring->rdr.dma.vaddr; 960 } 961 962 return (0); 963 err: 964 safexcel_dma_free_rings(sc); 965 return (error); 966 } 967 968 static void 969 safexcel_deinit_hw(struct safexcel_softc *sc) 970 { 971 safexcel_hw_reset_rings(sc); 972 safexcel_dma_free_rings(sc); 973 } 974 975 static int 976 safexcel_init_hw(struct safexcel_softc *sc) 977 { 978 int pe; 979 980 /* 23.3.7 Initialization */ 981 if (safexcel_configure(sc) != 0) 982 return (EINVAL); 983 984 if (safexcel_dma_init(sc) != 0) 985 return (ENOMEM); 986 987 safexcel_init_rings(sc); 988 989 safexcel_init_hia_bus_access(sc); 990 991 /* 23.3.7.2 Disable EIP-97 global Interrupts */ 992 safexcel_disable_global_interrupts(sc); 993 994 for (pe = 0; pe < sc->sc_config.pes; pe++) { 995 /* 23.3.7.3 Configure Data Fetch Engine */ 996 safexcel_configure_dfe_engine(sc, pe); 997 998 /* 23.3.7.4 Configure Data Store Engine */ 999 if (safexcel_configure_dse(sc, pe)) { 1000 safexcel_deinit_hw(sc); 1001 return (-1); 1002 } 1003 1004 /* 23.3.7.5 1. Protocol enables */ 1005 SAFEXCEL_WRITE(sc, 1006 SAFEXCEL_PE(sc) + SAFEXCEL_PE_EIP96_FUNCTION_EN(pe), 1007 0xffffffff); 1008 SAFEXCEL_WRITE(sc, 1009 SAFEXCEL_PE(sc) + SAFEXCEL_PE_EIP96_FUNCTION2_EN(pe), 1010 0xffffffff); 1011 } 1012 1013 safexcel_hw_prepare_rings(sc); 1014 1015 /* 23.3.7.5 Configure the Processing Engine(s). */ 1016 for (pe = 0; pe < sc->sc_config.pes; pe++) 1017 safexcel_enable_pe_engine(sc, pe); 1018 1019 safexcel_hw_setup_rings(sc); 1020 1021 return (0); 1022 } 1023 1024 static int 1025 safexcel_setup_dev_interrupts(struct safexcel_softc *sc) 1026 { 1027 int i, j; 1028 1029 for (i = 0; i < SAFEXCEL_MAX_RINGS && sc->sc_intr[i] != NULL; i++) { 1030 sc->sc_ih[i].sc = sc; 1031 sc->sc_ih[i].ring = i; 1032 1033 if (bus_setup_intr(sc->sc_dev, sc->sc_intr[i], 1034 INTR_TYPE_NET | INTR_MPSAFE, NULL, safexcel_ring_intr, 1035 &sc->sc_ih[i], &sc->sc_ih[i].handle)) { 1036 device_printf(sc->sc_dev, 1037 "couldn't setup interrupt %d\n", i); 1038 goto err; 1039 } 1040 } 1041 1042 return (0); 1043 1044 err: 1045 for (j = 0; j < i; j++) 1046 bus_teardown_intr(sc->sc_dev, sc->sc_intr[j], 1047 sc->sc_ih[j].handle); 1048 1049 return (ENXIO); 1050 } 1051 1052 static void 1053 safexcel_teardown_dev_interrupts(struct safexcel_softc *sc) 1054 { 1055 int i; 1056 1057 for (i = 0; i < SAFEXCEL_MAX_RINGS; i++) 1058 bus_teardown_intr(sc->sc_dev, sc->sc_intr[i], 1059 sc->sc_ih[i].handle); 1060 } 1061 1062 static int 1063 safexcel_alloc_dev_resources(struct safexcel_softc *sc) 1064 { 1065 char name[16]; 1066 device_t dev; 1067 phandle_t node; 1068 int error, i, rid; 1069 1070 dev = sc->sc_dev; 1071 node = ofw_bus_get_node(dev); 1072 1073 rid = 0; 1074 sc->sc_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 1075 RF_ACTIVE); 1076 if (sc->sc_res == NULL) { 1077 device_printf(dev, "couldn't allocate memory resources\n"); 1078 return (ENXIO); 1079 } 1080 1081 for (i = 0; i < SAFEXCEL_MAX_RINGS; i++) { 1082 (void)snprintf(name, sizeof(name), "ring%d", i); 1083 error = ofw_bus_find_string_index(node, "interrupt-names", name, 1084 &rid); 1085 if (error != 0) 1086 break; 1087 1088 sc->sc_intr[i] = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 1089 RF_ACTIVE | RF_SHAREABLE); 1090 if (sc->sc_intr[i] == NULL) { 1091 error = ENXIO; 1092 goto out; 1093 } 1094 } 1095 if (i == 0) { 1096 device_printf(dev, "couldn't allocate interrupt resources\n"); 1097 error = ENXIO; 1098 goto out; 1099 } 1100 1101 return (0); 1102 1103 out: 1104 for (i = 0; i < SAFEXCEL_MAX_RINGS && sc->sc_intr[i] != NULL; i++) 1105 bus_release_resource(dev, SYS_RES_IRQ, 1106 rman_get_rid(sc->sc_intr[i]), sc->sc_intr[i]); 1107 bus_release_resource(dev, SYS_RES_MEMORY, rman_get_rid(sc->sc_res), 1108 sc->sc_res); 1109 return (error); 1110 } 1111 1112 static void 1113 safexcel_free_dev_resources(struct safexcel_softc *sc) 1114 { 1115 int i; 1116 1117 for (i = 0; i < SAFEXCEL_MAX_RINGS && sc->sc_intr[i] != NULL; i++) 1118 bus_release_resource(sc->sc_dev, SYS_RES_IRQ, 1119 rman_get_rid(sc->sc_intr[i]), sc->sc_intr[i]); 1120 if (sc->sc_res != NULL) 1121 bus_release_resource(sc->sc_dev, SYS_RES_MEMORY, 1122 rman_get_rid(sc->sc_res), sc->sc_res); 1123 } 1124 1125 static int 1126 safexcel_probe(device_t dev) 1127 { 1128 struct safexcel_softc *sc; 1129 1130 if (!ofw_bus_status_okay(dev)) 1131 return (ENXIO); 1132 1133 sc = device_get_softc(dev); 1134 sc->sc_type = ofw_bus_search_compatible(dev, safexcel_compat)->ocd_data; 1135 if (sc->sc_type == 0) 1136 return (ENXIO); 1137 1138 device_set_desc(dev, "SafeXcel EIP-97 crypto accelerator"); 1139 1140 return (BUS_PROBE_DEFAULT); 1141 } 1142 1143 static int 1144 safexcel_attach(device_t dev) 1145 { 1146 struct sysctl_ctx_list *sctx; 1147 struct safexcel_softc *sc; 1148 struct safexcel_request *req; 1149 struct safexcel_ring *ring; 1150 int i, j, ringidx; 1151 1152 sc = device_get_softc(dev); 1153 sc->sc_dev = dev; 1154 sc->sc_cid = -1; 1155 1156 if (safexcel_alloc_dev_resources(sc)) 1157 goto err; 1158 1159 if (safexcel_setup_dev_interrupts(sc)) 1160 goto err1; 1161 1162 if (safexcel_init_hw(sc)) 1163 goto err2; 1164 1165 for (ringidx = 0; ringidx < sc->sc_config.rings; ringidx++) { 1166 ring = &sc->sc_ring[ringidx]; 1167 1168 ring->cmd_data = sglist_alloc(SAFEXCEL_MAX_FRAGMENTS, M_WAITOK); 1169 ring->res_data = sglist_alloc(SAFEXCEL_MAX_FRAGMENTS, M_WAITOK); 1170 1171 ring->requests = mallocarray(SAFEXCEL_REQUESTS_PER_RING, 1172 sizeof(struct safexcel_request), M_SAFEXCEL, 1173 M_WAITOK | M_ZERO); 1174 1175 for (i = 0; i < SAFEXCEL_REQUESTS_PER_RING; i++) { 1176 req = &ring->requests[i]; 1177 req->sc = sc; 1178 if (bus_dmamap_create(ring->data_dtag, 1179 BUS_DMA_COHERENT, &req->dmap) != 0) { 1180 for (j = 0; j < i; j++) 1181 bus_dmamap_destroy(ring->data_dtag, 1182 ring->requests[j].dmap); 1183 goto err2; 1184 } 1185 if (safexcel_dma_alloc_mem(sc, &req->ctx, 1186 sizeof(struct safexcel_context_record)) != 0) { 1187 for (j = 0; j < i; j++) { 1188 bus_dmamap_destroy(ring->data_dtag, 1189 ring->requests[j].dmap); 1190 safexcel_dma_free_mem( 1191 &ring->requests[j].ctx); 1192 } 1193 goto err2; 1194 } 1195 STAILQ_INSERT_TAIL(&ring->free_requests, req, link); 1196 } 1197 } 1198 1199 sctx = device_get_sysctl_ctx(dev); 1200 SYSCTL_ADD_INT(sctx, SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 1201 OID_AUTO, "debug", CTLFLAG_RWTUN, &sc->sc_debug, 0, 1202 "Debug message verbosity"); 1203 1204 sc->sc_cid = crypto_get_driverid(dev, sizeof(struct safexcel_session), 1205 CRYPTOCAP_F_HARDWARE); 1206 if (sc->sc_cid < 0) 1207 goto err2; 1208 1209 return (0); 1210 1211 err2: 1212 safexcel_teardown_dev_interrupts(sc); 1213 err1: 1214 safexcel_free_dev_resources(sc); 1215 err: 1216 return (ENXIO); 1217 } 1218 1219 static int 1220 safexcel_detach(device_t dev) 1221 { 1222 struct safexcel_ring *ring; 1223 struct safexcel_softc *sc; 1224 int i, ringidx; 1225 1226 sc = device_get_softc(dev); 1227 1228 if (sc->sc_cid >= 0) 1229 crypto_unregister_all(sc->sc_cid); 1230 for (ringidx = 0; ringidx < sc->sc_config.rings; ringidx++) { 1231 ring = &sc->sc_ring[ringidx]; 1232 for (i = 0; i < SAFEXCEL_REQUESTS_PER_RING; i++) { 1233 bus_dmamap_destroy(ring->data_dtag, 1234 ring->requests[i].dmap); 1235 safexcel_dma_free_mem(&ring->requests[i].ctx); 1236 } 1237 free(ring->requests, M_SAFEXCEL); 1238 sglist_free(ring->cmd_data); 1239 sglist_free(ring->res_data); 1240 } 1241 safexcel_deinit_hw(sc); 1242 safexcel_teardown_dev_interrupts(sc); 1243 safexcel_free_dev_resources(sc); 1244 1245 return (0); 1246 } 1247 1248 /* 1249 * Populate the request's context record with pre-computed key material. 1250 */ 1251 static int 1252 safexcel_set_context(struct safexcel_request *req) 1253 { 1254 const struct crypto_session_params *csp; 1255 struct cryptop *crp; 1256 struct safexcel_context_record *ctx; 1257 struct safexcel_session *sess; 1258 uint8_t *data; 1259 int off; 1260 1261 crp = req->crp; 1262 csp = crypto_get_params(crp->crp_session); 1263 sess = req->sess; 1264 1265 ctx = (struct safexcel_context_record *)req->ctx.vaddr; 1266 data = (uint8_t *)ctx->data; 1267 if (csp->csp_cipher_alg != 0) { 1268 if (crp->crp_cipher_key != NULL) 1269 memcpy(data, crp->crp_cipher_key, sess->klen); 1270 else 1271 memcpy(data, csp->csp_cipher_key, sess->klen); 1272 off = sess->klen; 1273 } else if (csp->csp_auth_alg == CRYPTO_AES_NIST_GMAC) { 1274 if (crp->crp_auth_key != NULL) 1275 memcpy(data, crp->crp_auth_key, sess->klen); 1276 else 1277 memcpy(data, csp->csp_auth_key, sess->klen); 1278 off = sess->klen; 1279 } else { 1280 off = 0; 1281 } 1282 1283 switch (csp->csp_cipher_alg) { 1284 case CRYPTO_AES_NIST_GCM_16: 1285 memcpy(data + off, sess->ghash_key, GMAC_BLOCK_LEN); 1286 off += GMAC_BLOCK_LEN; 1287 break; 1288 case CRYPTO_AES_CCM_16: 1289 memcpy(data + off, sess->xcbc_key, 1290 AES_BLOCK_LEN * 2 + sess->klen); 1291 off += AES_BLOCK_LEN * 2 + sess->klen; 1292 break; 1293 case CRYPTO_AES_XTS: 1294 memcpy(data + off, sess->tweak_key, sess->klen); 1295 off += sess->klen; 1296 break; 1297 } 1298 1299 switch (csp->csp_auth_alg) { 1300 case CRYPTO_AES_NIST_GMAC: 1301 memcpy(data + off, sess->ghash_key, GMAC_BLOCK_LEN); 1302 off += GMAC_BLOCK_LEN; 1303 break; 1304 case CRYPTO_SHA1_HMAC: 1305 case CRYPTO_SHA2_224_HMAC: 1306 case CRYPTO_SHA2_256_HMAC: 1307 case CRYPTO_SHA2_384_HMAC: 1308 case CRYPTO_SHA2_512_HMAC: 1309 memcpy(data + off, sess->hmac_ipad, sess->statelen); 1310 off += sess->statelen; 1311 memcpy(data + off, sess->hmac_opad, sess->statelen); 1312 off += sess->statelen; 1313 break; 1314 } 1315 1316 return (off); 1317 } 1318 1319 /* 1320 * Populate fields in the first command descriptor of the chain used to encode 1321 * the specified request. These fields indicate the algorithms used, the size 1322 * of the key material stored in the associated context record, the primitive 1323 * operations to be performed on input data, and the location of the IV if any. 1324 */ 1325 static void 1326 safexcel_set_command(struct safexcel_request *req, 1327 struct safexcel_cmd_descr *cdesc) 1328 { 1329 const struct crypto_session_params *csp; 1330 struct cryptop *crp; 1331 struct safexcel_session *sess; 1332 uint32_t ctrl0, ctrl1, ctxr_len; 1333 int alg; 1334 1335 crp = req->crp; 1336 csp = crypto_get_params(crp->crp_session); 1337 sess = req->sess; 1338 1339 ctrl0 = sess->alg | sess->digest | sess->hash; 1340 ctrl1 = sess->mode; 1341 1342 ctxr_len = safexcel_set_context(req) / sizeof(uint32_t); 1343 ctrl0 |= SAFEXCEL_CONTROL0_SIZE(ctxr_len); 1344 1345 alg = csp->csp_cipher_alg; 1346 if (alg == 0) 1347 alg = csp->csp_auth_alg; 1348 1349 switch (alg) { 1350 case CRYPTO_AES_CCM_16: 1351 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) { 1352 ctrl0 |= SAFEXCEL_CONTROL0_TYPE_HASH_ENCRYPT_OUT | 1353 SAFEXCEL_CONTROL0_KEY_EN; 1354 } else { 1355 ctrl0 |= SAFEXCEL_CONTROL0_TYPE_DECRYPT_HASH_IN | 1356 SAFEXCEL_CONTROL0_KEY_EN; 1357 } 1358 ctrl1 |= SAFEXCEL_CONTROL1_IV0 | SAFEXCEL_CONTROL1_IV1 | 1359 SAFEXCEL_CONTROL1_IV2 | SAFEXCEL_CONTROL1_IV3; 1360 break; 1361 case CRYPTO_AES_CBC: 1362 case CRYPTO_AES_ICM: 1363 case CRYPTO_AES_XTS: 1364 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) { 1365 ctrl0 |= SAFEXCEL_CONTROL0_TYPE_CRYPTO_OUT | 1366 SAFEXCEL_CONTROL0_KEY_EN; 1367 if (csp->csp_auth_alg != 0) 1368 ctrl0 |= 1369 SAFEXCEL_CONTROL0_TYPE_ENCRYPT_HASH_OUT; 1370 } else { 1371 ctrl0 |= SAFEXCEL_CONTROL0_TYPE_CRYPTO_IN | 1372 SAFEXCEL_CONTROL0_KEY_EN; 1373 if (csp->csp_auth_alg != 0) 1374 ctrl0 |= SAFEXCEL_CONTROL0_TYPE_HASH_DECRYPT_IN; 1375 } 1376 break; 1377 case CRYPTO_AES_NIST_GCM_16: 1378 case CRYPTO_AES_NIST_GMAC: 1379 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op) || 1380 csp->csp_auth_alg != 0) { 1381 ctrl0 |= SAFEXCEL_CONTROL0_TYPE_CRYPTO_OUT | 1382 SAFEXCEL_CONTROL0_KEY_EN | 1383 SAFEXCEL_CONTROL0_TYPE_HASH_OUT; 1384 } else { 1385 ctrl0 |= SAFEXCEL_CONTROL0_TYPE_CRYPTO_IN | 1386 SAFEXCEL_CONTROL0_KEY_EN | 1387 SAFEXCEL_CONTROL0_TYPE_HASH_DECRYPT_IN; 1388 } 1389 if (csp->csp_cipher_alg == CRYPTO_AES_NIST_GCM_16) { 1390 ctrl1 |= SAFEXCEL_CONTROL1_COUNTER_MODE | 1391 SAFEXCEL_CONTROL1_IV0 | SAFEXCEL_CONTROL1_IV1 | 1392 SAFEXCEL_CONTROL1_IV2; 1393 } 1394 break; 1395 case CRYPTO_SHA1: 1396 case CRYPTO_SHA2_224: 1397 case CRYPTO_SHA2_256: 1398 case CRYPTO_SHA2_384: 1399 case CRYPTO_SHA2_512: 1400 ctrl0 |= SAFEXCEL_CONTROL0_RESTART_HASH; 1401 /* FALLTHROUGH */ 1402 case CRYPTO_SHA1_HMAC: 1403 case CRYPTO_SHA2_224_HMAC: 1404 case CRYPTO_SHA2_256_HMAC: 1405 case CRYPTO_SHA2_384_HMAC: 1406 case CRYPTO_SHA2_512_HMAC: 1407 ctrl0 |= SAFEXCEL_CONTROL0_TYPE_HASH_OUT; 1408 break; 1409 } 1410 1411 cdesc->control_data.control0 = ctrl0; 1412 cdesc->control_data.control1 = ctrl1; 1413 } 1414 1415 /* 1416 * Construct a no-op instruction, used to pad input tokens. 1417 */ 1418 static void 1419 safexcel_instr_nop(struct safexcel_instr **instrp) 1420 { 1421 struct safexcel_instr *instr; 1422 1423 instr = *instrp; 1424 instr->opcode = SAFEXCEL_INSTR_OPCODE_INSERT; 1425 instr->length = (1 << 2); 1426 instr->status = 0; 1427 instr->instructions = 0; 1428 1429 *instrp = instr + 1; 1430 } 1431 1432 /* 1433 * Insert the digest of the input payload. This is typically the last 1434 * instruction of a sequence. 1435 */ 1436 static void 1437 safexcel_instr_insert_digest(struct safexcel_instr **instrp, int len) 1438 { 1439 struct safexcel_instr *instr; 1440 1441 instr = *instrp; 1442 instr->opcode = SAFEXCEL_INSTR_OPCODE_INSERT; 1443 instr->length = len; 1444 instr->status = SAFEXCEL_INSTR_STATUS_LAST_HASH | 1445 SAFEXCEL_INSTR_STATUS_LAST_PACKET; 1446 instr->instructions = SAFEXCEL_INSTR_DEST_OUTPUT | 1447 SAFEXCEL_INSTR_INSERT_HASH_DIGEST; 1448 1449 *instrp = instr + 1; 1450 } 1451 1452 /* 1453 * Retrieve and verify a digest. 1454 */ 1455 static void 1456 safexcel_instr_retrieve_digest(struct safexcel_instr **instrp, int len) 1457 { 1458 struct safexcel_instr *instr; 1459 1460 instr = *instrp; 1461 instr->opcode = SAFEXCEL_INSTR_OPCODE_RETRIEVE; 1462 instr->length = len; 1463 instr->status = SAFEXCEL_INSTR_STATUS_LAST_HASH | 1464 SAFEXCEL_INSTR_STATUS_LAST_PACKET; 1465 instr->instructions = SAFEXCEL_INSTR_INSERT_HASH_DIGEST; 1466 instr++; 1467 1468 instr->opcode = SAFEXCEL_INSTR_OPCODE_VERIFY_FIELDS; 1469 instr->length = len | SAFEXCEL_INSTR_VERIFY_HASH; 1470 instr->status = SAFEXCEL_INSTR_STATUS_LAST_HASH | 1471 SAFEXCEL_INSTR_STATUS_LAST_PACKET; 1472 instr->instructions = SAFEXCEL_INSTR_VERIFY_PADDING; 1473 1474 *instrp = instr + 1; 1475 } 1476 1477 static void 1478 safexcel_instr_temp_aes_block(struct safexcel_instr **instrp) 1479 { 1480 struct safexcel_instr *instr; 1481 1482 instr = *instrp; 1483 instr->opcode = SAFEXCEL_INSTR_OPCODE_INSERT_REMOVE_RESULT; 1484 instr->length = 0; 1485 instr->status = 0; 1486 instr->instructions = AES_BLOCK_LEN; 1487 instr++; 1488 1489 instr->opcode = SAFEXCEL_INSTR_OPCODE_INSERT; 1490 instr->length = AES_BLOCK_LEN; 1491 instr->status = 0; 1492 instr->instructions = SAFEXCEL_INSTR_DEST_OUTPUT | 1493 SAFEXCEL_INSTR_DEST_CRYPTO; 1494 1495 *instrp = instr + 1; 1496 } 1497 1498 /* 1499 * Handle a request for an unauthenticated block cipher. 1500 */ 1501 static void 1502 safexcel_instr_cipher(struct safexcel_request *req, 1503 struct safexcel_instr *instr, struct safexcel_cmd_descr *cdesc) 1504 { 1505 struct cryptop *crp; 1506 1507 crp = req->crp; 1508 1509 /* Insert the payload. */ 1510 instr->opcode = SAFEXCEL_INSTR_OPCODE_DIRECTION; 1511 instr->length = crp->crp_payload_length; 1512 instr->status = SAFEXCEL_INSTR_STATUS_LAST_PACKET | 1513 SAFEXCEL_INSTR_STATUS_LAST_HASH; 1514 instr->instructions = SAFEXCEL_INSTR_INS_LAST | 1515 SAFEXCEL_INSTR_DEST_CRYPTO | SAFEXCEL_INSTR_DEST_OUTPUT; 1516 1517 cdesc->additional_cdata_size = 1; 1518 } 1519 1520 static void 1521 safexcel_instr_eta(struct safexcel_request *req, struct safexcel_instr *instr, 1522 struct safexcel_cmd_descr *cdesc) 1523 { 1524 const struct crypto_session_params *csp; 1525 struct cryptop *crp; 1526 struct safexcel_instr *start; 1527 1528 crp = req->crp; 1529 csp = crypto_get_params(crp->crp_session); 1530 start = instr; 1531 1532 /* Insert the AAD. */ 1533 instr->opcode = SAFEXCEL_INSTR_OPCODE_DIRECTION; 1534 instr->length = crp->crp_aad_length; 1535 instr->status = crp->crp_payload_length == 0 ? 1536 SAFEXCEL_INSTR_STATUS_LAST_HASH : 0; 1537 instr->instructions = SAFEXCEL_INSTR_INS_LAST | 1538 SAFEXCEL_INSTR_DEST_HASH; 1539 instr++; 1540 1541 /* Encrypt any data left in the request. */ 1542 if (crp->crp_payload_length > 0) { 1543 instr->opcode = SAFEXCEL_INSTR_OPCODE_DIRECTION; 1544 instr->length = crp->crp_payload_length; 1545 instr->status = SAFEXCEL_INSTR_STATUS_LAST_HASH; 1546 instr->instructions = SAFEXCEL_INSTR_INS_LAST | 1547 SAFEXCEL_INSTR_DEST_CRYPTO | 1548 SAFEXCEL_INSTR_DEST_HASH | 1549 SAFEXCEL_INSTR_DEST_OUTPUT; 1550 instr++; 1551 } 1552 1553 /* 1554 * Compute the digest, or extract it and place it in the output stream. 1555 */ 1556 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) 1557 safexcel_instr_insert_digest(&instr, req->sess->digestlen); 1558 else 1559 safexcel_instr_retrieve_digest(&instr, req->sess->digestlen); 1560 cdesc->additional_cdata_size = instr - start; 1561 } 1562 1563 static void 1564 safexcel_instr_sha_hash(struct safexcel_request *req, 1565 struct safexcel_instr *instr) 1566 { 1567 struct cryptop *crp; 1568 struct safexcel_instr *start; 1569 1570 crp = req->crp; 1571 start = instr; 1572 1573 /* Pass the input data to the hash engine. */ 1574 instr->opcode = SAFEXCEL_INSTR_OPCODE_DIRECTION; 1575 instr->length = crp->crp_payload_length; 1576 instr->status = SAFEXCEL_INSTR_STATUS_LAST_HASH; 1577 instr->instructions = SAFEXCEL_INSTR_DEST_HASH; 1578 instr++; 1579 1580 /* Insert the hash result into the output stream. */ 1581 safexcel_instr_insert_digest(&instr, req->sess->digestlen); 1582 1583 /* Pad the rest of the inline instruction space. */ 1584 while (instr != start + SAFEXCEL_MAX_ITOKENS) 1585 safexcel_instr_nop(&instr); 1586 } 1587 1588 static void 1589 safexcel_instr_ccm(struct safexcel_request *req, struct safexcel_instr *instr, 1590 struct safexcel_cmd_descr *cdesc) 1591 { 1592 struct cryptop *crp; 1593 struct safexcel_instr *start; 1594 uint8_t *a0, *b0, *alenp, L; 1595 int aalign, blen; 1596 1597 crp = req->crp; 1598 start = instr; 1599 1600 /* 1601 * Construct two blocks, A0 and B0, used in encryption and 1602 * authentication, respectively. A0 is embedded in the token 1603 * descriptor, and B0 is inserted directly into the data stream using 1604 * instructions below. 1605 * 1606 * OCF seems to assume a 12-byte IV, fixing L (the payload length size) 1607 * at 3 bytes due to the layout of B0. This is fine since the driver 1608 * has a maximum of 65535 bytes anyway. 1609 */ 1610 blen = AES_BLOCK_LEN; 1611 L = 3; 1612 1613 a0 = (uint8_t *)&cdesc->control_data.token[0]; 1614 memset(a0, 0, blen); 1615 a0[0] = L - 1; 1616 memcpy(&a0[1], req->iv, AES_CCM_IV_LEN); 1617 1618 /* 1619 * Insert B0 and the AAD length into the input stream. 1620 */ 1621 instr->opcode = SAFEXCEL_INSTR_OPCODE_INSERT; 1622 instr->length = blen + (crp->crp_aad_length > 0 ? 2 : 0); 1623 instr->status = 0; 1624 instr->instructions = SAFEXCEL_INSTR_DEST_HASH | 1625 SAFEXCEL_INSTR_INSERT_IMMEDIATE; 1626 instr++; 1627 1628 b0 = (uint8_t *)instr; 1629 memset(b0, 0, blen); 1630 b0[0] = 1631 (L - 1) | /* payload length size */ 1632 ((CCM_CBC_MAX_DIGEST_LEN - 2) / 2) << 3 /* digest length */ | 1633 (crp->crp_aad_length > 0 ? 1 : 0) << 6 /* AAD present bit */; 1634 memcpy(&b0[1], req->iv, AES_CCM_IV_LEN); 1635 b0[14] = crp->crp_payload_length >> 8; 1636 b0[15] = crp->crp_payload_length & 0xff; 1637 instr += blen / sizeof(*instr); 1638 1639 /* Insert the AAD length and data into the input stream. */ 1640 if (crp->crp_aad_length > 0) { 1641 alenp = (uint8_t *)instr; 1642 alenp[0] = crp->crp_aad_length >> 8; 1643 alenp[1] = crp->crp_aad_length & 0xff; 1644 alenp[2] = 0; 1645 alenp[3] = 0; 1646 instr++; 1647 1648 instr->opcode = SAFEXCEL_INSTR_OPCODE_DIRECTION; 1649 instr->length = crp->crp_aad_length; 1650 instr->status = 0; 1651 instr->instructions = SAFEXCEL_INSTR_DEST_HASH; 1652 instr++; 1653 1654 /* Insert zero padding. */ 1655 aalign = (crp->crp_aad_length + 2) & (blen - 1); 1656 instr->opcode = SAFEXCEL_INSTR_OPCODE_INSERT; 1657 instr->length = aalign == 0 ? 0 : 1658 blen - ((crp->crp_aad_length + 2) & (blen - 1)); 1659 instr->status = crp->crp_payload_length == 0 ? 1660 SAFEXCEL_INSTR_STATUS_LAST_HASH : 0; 1661 instr->instructions = SAFEXCEL_INSTR_DEST_HASH; 1662 instr++; 1663 } 1664 1665 safexcel_instr_temp_aes_block(&instr); 1666 1667 /* Insert the cipher payload into the input stream. */ 1668 if (crp->crp_payload_length > 0) { 1669 instr->opcode = SAFEXCEL_INSTR_OPCODE_DIRECTION; 1670 instr->length = crp->crp_payload_length; 1671 instr->status = (crp->crp_payload_length & (blen - 1)) == 0 ? 1672 SAFEXCEL_INSTR_STATUS_LAST_HASH : 0; 1673 instr->instructions = SAFEXCEL_INSTR_DEST_OUTPUT | 1674 SAFEXCEL_INSTR_DEST_CRYPTO | 1675 SAFEXCEL_INSTR_DEST_HASH | 1676 SAFEXCEL_INSTR_INS_LAST; 1677 instr++; 1678 1679 /* Insert zero padding. */ 1680 if (crp->crp_payload_length & (blen - 1)) { 1681 instr->opcode = SAFEXCEL_INSTR_OPCODE_INSERT; 1682 instr->length = blen - 1683 (crp->crp_payload_length & (blen - 1)); 1684 instr->status = SAFEXCEL_INSTR_STATUS_LAST_HASH; 1685 instr->instructions = SAFEXCEL_INSTR_DEST_HASH; 1686 instr++; 1687 } 1688 } 1689 1690 /* 1691 * Compute the digest, or extract it and place it in the output stream. 1692 */ 1693 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) 1694 safexcel_instr_insert_digest(&instr, req->sess->digestlen); 1695 else 1696 safexcel_instr_retrieve_digest(&instr, req->sess->digestlen); 1697 1698 cdesc->additional_cdata_size = instr - start; 1699 } 1700 1701 static void 1702 safexcel_instr_gcm(struct safexcel_request *req, struct safexcel_instr *instr, 1703 struct safexcel_cmd_descr *cdesc) 1704 { 1705 struct cryptop *crp; 1706 struct safexcel_instr *start; 1707 1708 memcpy(cdesc->control_data.token, req->iv, AES_GCM_IV_LEN); 1709 cdesc->control_data.token[3] = htobe32(1); 1710 1711 crp = req->crp; 1712 start = instr; 1713 1714 /* Insert the AAD into the input stream. */ 1715 instr->opcode = SAFEXCEL_INSTR_OPCODE_DIRECTION; 1716 instr->length = crp->crp_aad_length; 1717 instr->status = crp->crp_payload_length == 0 ? 1718 SAFEXCEL_INSTR_STATUS_LAST_HASH : 0; 1719 instr->instructions = SAFEXCEL_INSTR_INS_LAST | 1720 SAFEXCEL_INSTR_DEST_HASH; 1721 instr++; 1722 1723 safexcel_instr_temp_aes_block(&instr); 1724 1725 /* Insert the cipher payload into the input stream. */ 1726 if (crp->crp_payload_length > 0) { 1727 instr->opcode = SAFEXCEL_INSTR_OPCODE_DIRECTION; 1728 instr->length = crp->crp_payload_length; 1729 instr->status = SAFEXCEL_INSTR_STATUS_LAST_HASH; 1730 instr->instructions = SAFEXCEL_INSTR_DEST_OUTPUT | 1731 SAFEXCEL_INSTR_DEST_CRYPTO | SAFEXCEL_INSTR_DEST_HASH | 1732 SAFEXCEL_INSTR_INS_LAST; 1733 instr++; 1734 } 1735 1736 /* 1737 * Compute the digest, or extract it and place it in the output stream. 1738 */ 1739 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) 1740 safexcel_instr_insert_digest(&instr, req->sess->digestlen); 1741 else 1742 safexcel_instr_retrieve_digest(&instr, req->sess->digestlen); 1743 1744 cdesc->additional_cdata_size = instr - start; 1745 } 1746 1747 static void 1748 safexcel_instr_gmac(struct safexcel_request *req, struct safexcel_instr *instr, 1749 struct safexcel_cmd_descr *cdesc) 1750 { 1751 struct cryptop *crp; 1752 struct safexcel_instr *start; 1753 1754 memcpy(cdesc->control_data.token, req->iv, AES_GCM_IV_LEN); 1755 cdesc->control_data.token[3] = htobe32(1); 1756 1757 crp = req->crp; 1758 start = instr; 1759 1760 instr->opcode = SAFEXCEL_INSTR_OPCODE_DIRECTION; 1761 instr->length = crp->crp_payload_length; 1762 instr->status = SAFEXCEL_INSTR_STATUS_LAST_HASH; 1763 instr->instructions = SAFEXCEL_INSTR_INS_LAST | 1764 SAFEXCEL_INSTR_DEST_HASH; 1765 instr++; 1766 1767 safexcel_instr_temp_aes_block(&instr); 1768 1769 safexcel_instr_insert_digest(&instr, req->sess->digestlen); 1770 1771 cdesc->additional_cdata_size = instr - start; 1772 } 1773 1774 static void 1775 safexcel_set_token(struct safexcel_request *req) 1776 { 1777 const struct crypto_session_params *csp; 1778 struct safexcel_cmd_descr *cdesc; 1779 struct safexcel_instr *instr; 1780 struct safexcel_softc *sc; 1781 int ringidx; 1782 1783 csp = crypto_get_params(req->crp->crp_session); 1784 cdesc = req->cdesc; 1785 sc = req->sc; 1786 ringidx = req->sess->ringidx; 1787 1788 safexcel_set_command(req, cdesc); 1789 1790 /* 1791 * For keyless hash operations, the token instructions can be embedded 1792 * in the token itself. Otherwise we use an additional token descriptor 1793 * and the embedded instruction space is used to store the IV. 1794 */ 1795 if (csp->csp_cipher_alg == 0 && 1796 csp->csp_auth_alg != CRYPTO_AES_NIST_GMAC) { 1797 instr = (void *)cdesc->control_data.token; 1798 } else { 1799 instr = (void *)(sc->sc_ring[ringidx].dma_atok.vaddr + 1800 sc->sc_config.atok_offset * 1801 (cdesc - sc->sc_ring[ringidx].cdr.desc)); 1802 cdesc->control_data.options |= SAFEXCEL_OPTION_4_TOKEN_IV_CMD; 1803 } 1804 1805 switch (csp->csp_cipher_alg) { 1806 case CRYPTO_AES_NIST_GCM_16: 1807 safexcel_instr_gcm(req, instr, cdesc); 1808 break; 1809 case CRYPTO_AES_CCM_16: 1810 safexcel_instr_ccm(req, instr, cdesc); 1811 break; 1812 case CRYPTO_AES_XTS: 1813 memcpy(cdesc->control_data.token, req->iv, AES_XTS_IV_LEN); 1814 memset(cdesc->control_data.token + 1815 AES_XTS_IV_LEN / sizeof(uint32_t), 0, AES_XTS_IV_LEN); 1816 1817 safexcel_instr_cipher(req, instr, cdesc); 1818 break; 1819 case CRYPTO_AES_CBC: 1820 case CRYPTO_AES_ICM: 1821 memcpy(cdesc->control_data.token, req->iv, AES_BLOCK_LEN); 1822 if (csp->csp_auth_alg != 0) 1823 safexcel_instr_eta(req, instr, cdesc); 1824 else 1825 safexcel_instr_cipher(req, instr, cdesc); 1826 break; 1827 default: 1828 switch (csp->csp_auth_alg) { 1829 case CRYPTO_SHA1: 1830 case CRYPTO_SHA1_HMAC: 1831 case CRYPTO_SHA2_224: 1832 case CRYPTO_SHA2_224_HMAC: 1833 case CRYPTO_SHA2_256: 1834 case CRYPTO_SHA2_256_HMAC: 1835 case CRYPTO_SHA2_384: 1836 case CRYPTO_SHA2_384_HMAC: 1837 case CRYPTO_SHA2_512: 1838 case CRYPTO_SHA2_512_HMAC: 1839 safexcel_instr_sha_hash(req, instr); 1840 break; 1841 case CRYPTO_AES_NIST_GMAC: 1842 safexcel_instr_gmac(req, instr, cdesc); 1843 break; 1844 default: 1845 panic("unhandled auth request %d", csp->csp_auth_alg); 1846 } 1847 break; 1848 } 1849 } 1850 1851 static struct safexcel_res_descr * 1852 safexcel_res_descr_add(struct safexcel_ring *ring, bool first, bool last, 1853 bus_addr_t data, uint32_t len) 1854 { 1855 struct safexcel_res_descr *rdesc; 1856 struct safexcel_res_descr_ring *rring; 1857 1858 mtx_assert(&ring->mtx, MA_OWNED); 1859 1860 rring = &ring->rdr; 1861 if ((rring->write + 1) % SAFEXCEL_RING_SIZE == rring->read) 1862 return (NULL); 1863 1864 rdesc = &rring->desc[rring->write]; 1865 rring->write = (rring->write + 1) % SAFEXCEL_RING_SIZE; 1866 1867 rdesc->particle_size = len; 1868 rdesc->rsvd0 = 0; 1869 rdesc->descriptor_overflow = 0; 1870 rdesc->buffer_overflow = 0; 1871 rdesc->last_seg = last; 1872 rdesc->first_seg = first; 1873 rdesc->result_size = 1874 sizeof(struct safexcel_res_data) / sizeof(uint32_t); 1875 rdesc->rsvd1 = 0; 1876 rdesc->data_lo = SAFEXCEL_ADDR_LO(data); 1877 rdesc->data_hi = SAFEXCEL_ADDR_HI(data); 1878 1879 if (first) { 1880 rdesc->result_data.packet_length = 0; 1881 rdesc->result_data.error_code = 0; 1882 } 1883 1884 return (rdesc); 1885 } 1886 1887 static struct safexcel_cmd_descr * 1888 safexcel_cmd_descr_add(struct safexcel_ring *ring, bool first, bool last, 1889 bus_addr_t data, uint32_t seglen, uint32_t reqlen, bus_addr_t context) 1890 { 1891 struct safexcel_cmd_descr *cdesc; 1892 struct safexcel_cmd_descr_ring *cring; 1893 1894 KASSERT(reqlen <= SAFEXCEL_MAX_REQUEST_SIZE, 1895 ("%s: request length %u too long", __func__, reqlen)); 1896 mtx_assert(&ring->mtx, MA_OWNED); 1897 1898 cring = &ring->cdr; 1899 if ((cring->write + 1) % SAFEXCEL_RING_SIZE == cring->read) 1900 return (NULL); 1901 1902 cdesc = &cring->desc[cring->write]; 1903 cring->write = (cring->write + 1) % SAFEXCEL_RING_SIZE; 1904 1905 cdesc->particle_size = seglen; 1906 cdesc->rsvd0 = 0; 1907 cdesc->last_seg = last; 1908 cdesc->first_seg = first; 1909 cdesc->additional_cdata_size = 0; 1910 cdesc->rsvd1 = 0; 1911 cdesc->data_lo = SAFEXCEL_ADDR_LO(data); 1912 cdesc->data_hi = SAFEXCEL_ADDR_HI(data); 1913 if (first) { 1914 cdesc->control_data.packet_length = reqlen; 1915 cdesc->control_data.options = SAFEXCEL_OPTION_IP | 1916 SAFEXCEL_OPTION_CP | SAFEXCEL_OPTION_CTX_CTRL_IN_CMD | 1917 SAFEXCEL_OPTION_RC_AUTO; 1918 cdesc->control_data.type = SAFEXCEL_TOKEN_TYPE_BYPASS; 1919 cdesc->control_data.context_lo = SAFEXCEL_ADDR_LO(context) | 1920 SAFEXCEL_CONTEXT_SMALL; 1921 cdesc->control_data.context_hi = SAFEXCEL_ADDR_HI(context); 1922 } 1923 1924 return (cdesc); 1925 } 1926 1927 static void 1928 safexcel_cmd_descr_rollback(struct safexcel_ring *ring, int count) 1929 { 1930 struct safexcel_cmd_descr_ring *cring; 1931 1932 mtx_assert(&ring->mtx, MA_OWNED); 1933 1934 cring = &ring->cdr; 1935 cring->write -= count; 1936 if (cring->write < 0) 1937 cring->write += SAFEXCEL_RING_SIZE; 1938 } 1939 1940 static void 1941 safexcel_res_descr_rollback(struct safexcel_ring *ring, int count) 1942 { 1943 struct safexcel_res_descr_ring *rring; 1944 1945 mtx_assert(&ring->mtx, MA_OWNED); 1946 1947 rring = &ring->rdr; 1948 rring->write -= count; 1949 if (rring->write < 0) 1950 rring->write += SAFEXCEL_RING_SIZE; 1951 } 1952 1953 static void 1954 safexcel_append_segs(bus_dma_segment_t *segs, int nseg, struct sglist *sg, 1955 int start, int len) 1956 { 1957 bus_dma_segment_t *seg; 1958 size_t seglen; 1959 int error, i; 1960 1961 for (i = 0; i < nseg && len > 0; i++) { 1962 seg = &segs[i]; 1963 1964 if (seg->ds_len <= start) { 1965 start -= seg->ds_len; 1966 continue; 1967 } 1968 1969 seglen = MIN(len, seg->ds_len - start); 1970 error = sglist_append_phys(sg, seg->ds_addr + start, seglen); 1971 if (error != 0) 1972 panic("%s: ran out of segments: %d", __func__, error); 1973 len -= seglen; 1974 start = 0; 1975 } 1976 } 1977 1978 static void 1979 safexcel_create_chain_cb(void *arg, bus_dma_segment_t *segs, int nseg, 1980 int error) 1981 { 1982 const struct crypto_session_params *csp; 1983 struct cryptop *crp; 1984 struct safexcel_cmd_descr *cdesc; 1985 struct safexcel_request *req; 1986 struct safexcel_ring *ring; 1987 struct safexcel_session *sess; 1988 struct sglist *sg; 1989 size_t inlen; 1990 int i; 1991 bool first, last; 1992 1993 req = arg; 1994 if (error != 0) { 1995 req->error = error; 1996 return; 1997 } 1998 1999 crp = req->crp; 2000 csp = crypto_get_params(crp->crp_session); 2001 sess = req->sess; 2002 ring = &req->sc->sc_ring[sess->ringidx]; 2003 2004 mtx_assert(&ring->mtx, MA_OWNED); 2005 2006 /* 2007 * Set up descriptors for input and output data. 2008 * 2009 * The processing engine programs require that any AAD comes first, 2010 * followed by the cipher plaintext, followed by the digest. Some 2011 * consumers place the digest first in the input buffer, in which case 2012 * we have to create an extra descriptor. 2013 * 2014 * As an optimization, unmodified data is not passed to the output 2015 * stream. 2016 */ 2017 sglist_reset(ring->cmd_data); 2018 sglist_reset(ring->res_data); 2019 if (crp->crp_aad_length != 0) { 2020 safexcel_append_segs(segs, nseg, ring->cmd_data, 2021 crp->crp_aad_start, crp->crp_aad_length); 2022 } 2023 safexcel_append_segs(segs, nseg, ring->cmd_data, 2024 crp->crp_payload_start, crp->crp_payload_length); 2025 if (csp->csp_cipher_alg != 0) { 2026 safexcel_append_segs(segs, nseg, ring->res_data, 2027 crp->crp_payload_start, crp->crp_payload_length); 2028 } 2029 if (sess->digestlen > 0) { 2030 if ((crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) != 0) { 2031 safexcel_append_segs(segs, nseg, ring->cmd_data, 2032 crp->crp_digest_start, sess->digestlen); 2033 } else { 2034 safexcel_append_segs(segs, nseg, ring->res_data, 2035 crp->crp_digest_start, sess->digestlen); 2036 } 2037 } 2038 2039 sg = ring->cmd_data; 2040 if (sg->sg_nseg == 0) { 2041 /* 2042 * Fake a segment for the command descriptor if the input has 2043 * length zero. The EIP97 apparently does not handle 2044 * zero-length packets properly since subsequent requests return 2045 * bogus errors, so provide a dummy segment using the context 2046 * descriptor. 2047 */ 2048 (void)sglist_append_phys(sg, req->ctx.paddr, 1); 2049 } 2050 for (i = 0, inlen = 0; i < sg->sg_nseg; i++) 2051 inlen += sg->sg_segs[i].ss_len; 2052 for (i = 0; i < sg->sg_nseg; i++) { 2053 first = i == 0; 2054 last = i == sg->sg_nseg - 1; 2055 2056 cdesc = safexcel_cmd_descr_add(ring, first, last, 2057 sg->sg_segs[i].ss_paddr, sg->sg_segs[i].ss_len, 2058 (uint32_t)inlen, req->ctx.paddr); 2059 if (cdesc == NULL) { 2060 safexcel_cmd_descr_rollback(ring, i); 2061 req->error = EAGAIN; 2062 return; 2063 } 2064 if (i == 0) 2065 req->cdesc = cdesc; 2066 } 2067 req->cdescs = sg->sg_nseg; 2068 2069 sg = ring->res_data; 2070 if (sg->sg_nseg == 0) { 2071 /* 2072 * We need a result descriptor even if the output stream will be 2073 * empty, for example when verifying an AAD digest. 2074 */ 2075 sg->sg_segs[0].ss_paddr = 0; 2076 sg->sg_segs[0].ss_len = 0; 2077 sg->sg_nseg = 1; 2078 } 2079 for (i = 0; i < sg->sg_nseg; i++) { 2080 first = i == 0; 2081 last = i == sg->sg_nseg - 1; 2082 2083 if (safexcel_res_descr_add(ring, first, last, 2084 sg->sg_segs[i].ss_paddr, sg->sg_segs[i].ss_len) == NULL) { 2085 safexcel_cmd_descr_rollback(ring, 2086 ring->cmd_data->sg_nseg); 2087 safexcel_res_descr_rollback(ring, i); 2088 req->error = EAGAIN; 2089 return; 2090 } 2091 } 2092 req->rdescs = sg->sg_nseg; 2093 } 2094 2095 static int 2096 safexcel_create_chain(struct safexcel_ring *ring, struct safexcel_request *req) 2097 { 2098 int error; 2099 2100 req->error = 0; 2101 req->cdescs = req->rdescs = 0; 2102 2103 error = bus_dmamap_load_crp(ring->data_dtag, req->dmap, req->crp, 2104 safexcel_create_chain_cb, req, BUS_DMA_NOWAIT); 2105 if (error == 0) 2106 req->dmap_loaded = true; 2107 2108 if (req->error != 0) 2109 error = req->error; 2110 2111 return (error); 2112 } 2113 2114 static bool 2115 safexcel_probe_cipher(const struct crypto_session_params *csp) 2116 { 2117 switch (csp->csp_cipher_alg) { 2118 case CRYPTO_AES_CBC: 2119 case CRYPTO_AES_ICM: 2120 if (csp->csp_ivlen != AES_BLOCK_LEN) 2121 return (false); 2122 break; 2123 case CRYPTO_AES_XTS: 2124 if (csp->csp_ivlen != AES_XTS_IV_LEN) 2125 return (false); 2126 break; 2127 default: 2128 return (false); 2129 } 2130 2131 return (true); 2132 } 2133 2134 /* 2135 * Determine whether the driver can implement a session with the requested 2136 * parameters. 2137 */ 2138 static int 2139 safexcel_probesession(device_t dev, const struct crypto_session_params *csp) 2140 { 2141 switch (csp->csp_mode) { 2142 case CSP_MODE_CIPHER: 2143 if (!safexcel_probe_cipher(csp)) 2144 return (EINVAL); 2145 break; 2146 case CSP_MODE_DIGEST: 2147 switch (csp->csp_auth_alg) { 2148 case CRYPTO_AES_NIST_GMAC: 2149 if (csp->csp_ivlen != AES_GCM_IV_LEN) 2150 return (EINVAL); 2151 break; 2152 case CRYPTO_SHA1: 2153 case CRYPTO_SHA1_HMAC: 2154 case CRYPTO_SHA2_224: 2155 case CRYPTO_SHA2_224_HMAC: 2156 case CRYPTO_SHA2_256: 2157 case CRYPTO_SHA2_256_HMAC: 2158 case CRYPTO_SHA2_384: 2159 case CRYPTO_SHA2_384_HMAC: 2160 case CRYPTO_SHA2_512: 2161 case CRYPTO_SHA2_512_HMAC: 2162 break; 2163 default: 2164 return (EINVAL); 2165 } 2166 break; 2167 case CSP_MODE_AEAD: 2168 switch (csp->csp_cipher_alg) { 2169 case CRYPTO_AES_NIST_GCM_16: 2170 if (csp->csp_ivlen != AES_GCM_IV_LEN) 2171 return (EINVAL); 2172 break; 2173 case CRYPTO_AES_CCM_16: 2174 if (csp->csp_ivlen != AES_CCM_IV_LEN) 2175 return (EINVAL); 2176 break; 2177 default: 2178 return (EINVAL); 2179 } 2180 break; 2181 case CSP_MODE_ETA: 2182 if (!safexcel_probe_cipher(csp)) 2183 return (EINVAL); 2184 switch (csp->csp_cipher_alg) { 2185 case CRYPTO_AES_CBC: 2186 case CRYPTO_AES_ICM: 2187 /* 2188 * The EIP-97 does not support combining AES-XTS with 2189 * hash operations. 2190 */ 2191 if (csp->csp_auth_alg != CRYPTO_SHA1_HMAC && 2192 csp->csp_auth_alg != CRYPTO_SHA2_224_HMAC && 2193 csp->csp_auth_alg != CRYPTO_SHA2_256_HMAC && 2194 csp->csp_auth_alg != CRYPTO_SHA2_384_HMAC && 2195 csp->csp_auth_alg != CRYPTO_SHA2_512_HMAC) 2196 return (EINVAL); 2197 break; 2198 default: 2199 return (EINVAL); 2200 } 2201 break; 2202 default: 2203 return (EINVAL); 2204 } 2205 2206 return (CRYPTODEV_PROBE_HARDWARE); 2207 } 2208 2209 /* 2210 * Pre-compute the hash key used in GHASH, which is a block of zeroes encrypted 2211 * using the cipher key. 2212 */ 2213 static void 2214 safexcel_setkey_ghash(struct safexcel_session *sess, const uint8_t *key, 2215 int klen) 2216 { 2217 uint32_t ks[4 * (RIJNDAEL_MAXNR + 1)]; 2218 uint8_t zeros[AES_BLOCK_LEN]; 2219 int i, rounds; 2220 2221 memset(zeros, 0, sizeof(zeros)); 2222 2223 rounds = rijndaelKeySetupEnc(ks, key, klen * NBBY); 2224 rijndaelEncrypt(ks, rounds, zeros, (uint8_t *)sess->ghash_key); 2225 for (i = 0; i < GMAC_BLOCK_LEN / sizeof(uint32_t); i++) 2226 sess->ghash_key[i] = htobe32(sess->ghash_key[i]); 2227 2228 explicit_bzero(ks, sizeof(ks)); 2229 } 2230 2231 /* 2232 * Pre-compute the combined CBC-MAC key, which consists of three keys K1, K2, K3 2233 * in the hardware implementation. K1 is the cipher key and comes last in the 2234 * buffer since K2 and K3 have a fixed size of AES_BLOCK_LEN. For now XCBC-MAC 2235 * is not implemented so K2 and K3 are fixed. 2236 */ 2237 static void 2238 safexcel_setkey_xcbcmac(struct safexcel_session *sess, const uint8_t *key, 2239 int klen) 2240 { 2241 int i, off; 2242 2243 memset(sess->xcbc_key, 0, sizeof(sess->xcbc_key)); 2244 off = 2 * AES_BLOCK_LEN / sizeof(uint32_t); 2245 for (i = 0; i < klen / sizeof(uint32_t); i++, key += 4) 2246 sess->xcbc_key[i + off] = htobe32(le32dec(key)); 2247 } 2248 2249 static void 2250 safexcel_setkey_hmac_digest(struct auth_hash *ahash, union authctx *ctx, 2251 char *buf) 2252 { 2253 int hashwords, i; 2254 2255 switch (ahash->type) { 2256 case CRYPTO_SHA1_HMAC: 2257 hashwords = ahash->hashsize / sizeof(uint32_t); 2258 for (i = 0; i < hashwords; i++) 2259 ((uint32_t *)buf)[i] = htobe32(ctx->sha1ctx.h.b32[i]); 2260 break; 2261 case CRYPTO_SHA2_224_HMAC: 2262 hashwords = auth_hash_hmac_sha2_256.hashsize / sizeof(uint32_t); 2263 for (i = 0; i < hashwords; i++) 2264 ((uint32_t *)buf)[i] = htobe32(ctx->sha224ctx.state[i]); 2265 break; 2266 case CRYPTO_SHA2_256_HMAC: 2267 hashwords = ahash->hashsize / sizeof(uint32_t); 2268 for (i = 0; i < hashwords; i++) 2269 ((uint32_t *)buf)[i] = htobe32(ctx->sha256ctx.state[i]); 2270 break; 2271 case CRYPTO_SHA2_384_HMAC: 2272 hashwords = auth_hash_hmac_sha2_512.hashsize / sizeof(uint64_t); 2273 for (i = 0; i < hashwords; i++) 2274 ((uint64_t *)buf)[i] = htobe64(ctx->sha384ctx.state[i]); 2275 break; 2276 case CRYPTO_SHA2_512_HMAC: 2277 hashwords = ahash->hashsize / sizeof(uint64_t); 2278 for (i = 0; i < hashwords; i++) 2279 ((uint64_t *)buf)[i] = htobe64(ctx->sha512ctx.state[i]); 2280 break; 2281 } 2282 } 2283 2284 /* 2285 * Pre-compute the inner and outer digests used in the HMAC algorithm. 2286 */ 2287 static void 2288 safexcel_setkey_hmac(const struct crypto_session_params *csp, 2289 struct safexcel_session *sess, const uint8_t *key, int klen) 2290 { 2291 union authctx ctx; 2292 struct auth_hash *ahash; 2293 2294 ahash = crypto_auth_hash(csp); 2295 hmac_init_ipad(ahash, key, klen, &ctx); 2296 safexcel_setkey_hmac_digest(ahash, &ctx, sess->hmac_ipad); 2297 hmac_init_opad(ahash, key, klen, &ctx); 2298 safexcel_setkey_hmac_digest(ahash, &ctx, sess->hmac_opad); 2299 explicit_bzero(&ctx, ahash->ctxsize); 2300 } 2301 2302 static void 2303 safexcel_setkey_xts(struct safexcel_session *sess, const uint8_t *key, int klen) 2304 { 2305 memcpy(sess->tweak_key, key + klen / 2, klen / 2); 2306 } 2307 2308 static void 2309 safexcel_setkey(struct safexcel_session *sess, 2310 const struct crypto_session_params *csp, struct cryptop *crp) 2311 { 2312 const uint8_t *akey, *ckey; 2313 int aklen, cklen; 2314 2315 aklen = csp->csp_auth_klen; 2316 cklen = csp->csp_cipher_klen; 2317 akey = ckey = NULL; 2318 if (crp != NULL) { 2319 akey = crp->crp_auth_key; 2320 ckey = crp->crp_cipher_key; 2321 } 2322 if (akey == NULL) 2323 akey = csp->csp_auth_key; 2324 if (ckey == NULL) 2325 ckey = csp->csp_cipher_key; 2326 2327 sess->klen = cklen; 2328 switch (csp->csp_cipher_alg) { 2329 case CRYPTO_AES_NIST_GCM_16: 2330 safexcel_setkey_ghash(sess, ckey, cklen); 2331 break; 2332 case CRYPTO_AES_CCM_16: 2333 safexcel_setkey_xcbcmac(sess, ckey, cklen); 2334 break; 2335 case CRYPTO_AES_XTS: 2336 safexcel_setkey_xts(sess, ckey, cklen); 2337 sess->klen /= 2; 2338 break; 2339 } 2340 2341 switch (csp->csp_auth_alg) { 2342 case CRYPTO_SHA1_HMAC: 2343 case CRYPTO_SHA2_224_HMAC: 2344 case CRYPTO_SHA2_256_HMAC: 2345 case CRYPTO_SHA2_384_HMAC: 2346 case CRYPTO_SHA2_512_HMAC: 2347 safexcel_setkey_hmac(csp, sess, akey, aklen); 2348 break; 2349 case CRYPTO_AES_NIST_GMAC: 2350 sess->klen = aklen; 2351 safexcel_setkey_ghash(sess, akey, aklen); 2352 break; 2353 } 2354 } 2355 2356 static uint32_t 2357 safexcel_aes_algid(int keylen) 2358 { 2359 switch (keylen) { 2360 case 16: 2361 return (SAFEXCEL_CONTROL0_CRYPTO_ALG_AES128); 2362 case 24: 2363 return (SAFEXCEL_CONTROL0_CRYPTO_ALG_AES192); 2364 case 32: 2365 return (SAFEXCEL_CONTROL0_CRYPTO_ALG_AES256); 2366 default: 2367 panic("invalid AES key length %d", keylen); 2368 } 2369 } 2370 2371 static uint32_t 2372 safexcel_aes_ccm_hashid(int keylen) 2373 { 2374 switch (keylen) { 2375 case 16: 2376 return (SAFEXCEL_CONTROL0_HASH_ALG_XCBC128); 2377 case 24: 2378 return (SAFEXCEL_CONTROL0_HASH_ALG_XCBC192); 2379 case 32: 2380 return (SAFEXCEL_CONTROL0_HASH_ALG_XCBC256); 2381 default: 2382 panic("invalid AES key length %d", keylen); 2383 } 2384 } 2385 2386 static uint32_t 2387 safexcel_sha_hashid(int alg) 2388 { 2389 switch (alg) { 2390 case CRYPTO_SHA1: 2391 case CRYPTO_SHA1_HMAC: 2392 return (SAFEXCEL_CONTROL0_HASH_ALG_SHA1); 2393 case CRYPTO_SHA2_224: 2394 case CRYPTO_SHA2_224_HMAC: 2395 return (SAFEXCEL_CONTROL0_HASH_ALG_SHA224); 2396 case CRYPTO_SHA2_256: 2397 case CRYPTO_SHA2_256_HMAC: 2398 return (SAFEXCEL_CONTROL0_HASH_ALG_SHA256); 2399 case CRYPTO_SHA2_384: 2400 case CRYPTO_SHA2_384_HMAC: 2401 return (SAFEXCEL_CONTROL0_HASH_ALG_SHA384); 2402 case CRYPTO_SHA2_512: 2403 case CRYPTO_SHA2_512_HMAC: 2404 return (SAFEXCEL_CONTROL0_HASH_ALG_SHA512); 2405 default: 2406 __assert_unreachable(); 2407 } 2408 } 2409 2410 static int 2411 safexcel_sha_hashlen(int alg) 2412 { 2413 switch (alg) { 2414 case CRYPTO_SHA1: 2415 case CRYPTO_SHA1_HMAC: 2416 return (SHA1_HASH_LEN); 2417 case CRYPTO_SHA2_224: 2418 case CRYPTO_SHA2_224_HMAC: 2419 return (SHA2_224_HASH_LEN); 2420 case CRYPTO_SHA2_256: 2421 case CRYPTO_SHA2_256_HMAC: 2422 return (SHA2_256_HASH_LEN); 2423 case CRYPTO_SHA2_384: 2424 case CRYPTO_SHA2_384_HMAC: 2425 return (SHA2_384_HASH_LEN); 2426 case CRYPTO_SHA2_512: 2427 case CRYPTO_SHA2_512_HMAC: 2428 return (SHA2_512_HASH_LEN); 2429 default: 2430 __assert_unreachable(); 2431 } 2432 } 2433 2434 static int 2435 safexcel_sha_statelen(int alg) 2436 { 2437 switch (alg) { 2438 case CRYPTO_SHA1: 2439 case CRYPTO_SHA1_HMAC: 2440 return (SHA1_HASH_LEN); 2441 case CRYPTO_SHA2_224: 2442 case CRYPTO_SHA2_224_HMAC: 2443 case CRYPTO_SHA2_256: 2444 case CRYPTO_SHA2_256_HMAC: 2445 return (SHA2_256_HASH_LEN); 2446 case CRYPTO_SHA2_384: 2447 case CRYPTO_SHA2_384_HMAC: 2448 case CRYPTO_SHA2_512: 2449 case CRYPTO_SHA2_512_HMAC: 2450 return (SHA2_512_HASH_LEN); 2451 default: 2452 __assert_unreachable(); 2453 } 2454 } 2455 2456 static int 2457 safexcel_newsession(device_t dev, crypto_session_t cses, 2458 const struct crypto_session_params *csp) 2459 { 2460 struct safexcel_session *sess; 2461 struct safexcel_softc *sc; 2462 2463 sc = device_get_softc(dev); 2464 sess = crypto_get_driver_session(cses); 2465 2466 switch (csp->csp_auth_alg) { 2467 case CRYPTO_SHA1: 2468 case CRYPTO_SHA2_224: 2469 case CRYPTO_SHA2_256: 2470 case CRYPTO_SHA2_384: 2471 case CRYPTO_SHA2_512: 2472 sess->digest = SAFEXCEL_CONTROL0_DIGEST_PRECOMPUTED; 2473 sess->hash = safexcel_sha_hashid(csp->csp_auth_alg); 2474 sess->digestlen = safexcel_sha_hashlen(csp->csp_auth_alg); 2475 sess->statelen = safexcel_sha_statelen(csp->csp_auth_alg); 2476 break; 2477 case CRYPTO_SHA1_HMAC: 2478 case CRYPTO_SHA2_224_HMAC: 2479 case CRYPTO_SHA2_256_HMAC: 2480 case CRYPTO_SHA2_384_HMAC: 2481 case CRYPTO_SHA2_512_HMAC: 2482 sess->digest = SAFEXCEL_CONTROL0_DIGEST_HMAC; 2483 sess->hash = safexcel_sha_hashid(csp->csp_auth_alg); 2484 sess->digestlen = safexcel_sha_hashlen(csp->csp_auth_alg); 2485 sess->statelen = safexcel_sha_statelen(csp->csp_auth_alg); 2486 break; 2487 case CRYPTO_AES_NIST_GMAC: 2488 sess->digest = SAFEXCEL_CONTROL0_DIGEST_GMAC; 2489 sess->digestlen = GMAC_DIGEST_LEN; 2490 sess->hash = SAFEXCEL_CONTROL0_HASH_ALG_GHASH; 2491 sess->alg = safexcel_aes_algid(csp->csp_auth_klen); 2492 sess->mode = SAFEXCEL_CONTROL1_CRYPTO_MODE_GCM; 2493 break; 2494 } 2495 2496 switch (csp->csp_cipher_alg) { 2497 case CRYPTO_AES_NIST_GCM_16: 2498 sess->digest = SAFEXCEL_CONTROL0_DIGEST_GMAC; 2499 sess->digestlen = GMAC_DIGEST_LEN; 2500 sess->hash = SAFEXCEL_CONTROL0_HASH_ALG_GHASH; 2501 sess->alg = safexcel_aes_algid(csp->csp_cipher_klen); 2502 sess->mode = SAFEXCEL_CONTROL1_CRYPTO_MODE_GCM; 2503 break; 2504 case CRYPTO_AES_CCM_16: 2505 sess->hash = safexcel_aes_ccm_hashid(csp->csp_cipher_klen); 2506 sess->digest = SAFEXCEL_CONTROL0_DIGEST_CCM; 2507 sess->digestlen = CCM_CBC_MAX_DIGEST_LEN; 2508 sess->alg = safexcel_aes_algid(csp->csp_cipher_klen); 2509 sess->mode = SAFEXCEL_CONTROL1_CRYPTO_MODE_CCM; 2510 break; 2511 case CRYPTO_AES_CBC: 2512 sess->alg = safexcel_aes_algid(csp->csp_cipher_klen); 2513 sess->mode = SAFEXCEL_CONTROL1_CRYPTO_MODE_CBC; 2514 break; 2515 case CRYPTO_AES_ICM: 2516 sess->alg = safexcel_aes_algid(csp->csp_cipher_klen); 2517 sess->mode = SAFEXCEL_CONTROL1_CRYPTO_MODE_CTR; 2518 break; 2519 case CRYPTO_AES_XTS: 2520 sess->alg = safexcel_aes_algid(csp->csp_cipher_klen / 2); 2521 sess->mode = SAFEXCEL_CONTROL1_CRYPTO_MODE_XTS; 2522 break; 2523 } 2524 2525 if (csp->csp_auth_mlen != 0) 2526 sess->digestlen = csp->csp_auth_mlen; 2527 2528 safexcel_setkey(sess, csp, NULL); 2529 2530 /* Bind each session to a fixed ring to minimize lock contention. */ 2531 sess->ringidx = atomic_fetchadd_int(&sc->sc_ringidx, 1); 2532 sess->ringidx %= sc->sc_config.rings; 2533 2534 return (0); 2535 } 2536 2537 static int 2538 safexcel_process(device_t dev, struct cryptop *crp, int hint) 2539 { 2540 const struct crypto_session_params *csp; 2541 struct safexcel_request *req; 2542 struct safexcel_ring *ring; 2543 struct safexcel_session *sess; 2544 struct safexcel_softc *sc; 2545 int error; 2546 2547 sc = device_get_softc(dev); 2548 sess = crypto_get_driver_session(crp->crp_session); 2549 csp = crypto_get_params(crp->crp_session); 2550 2551 if (__predict_false(crypto_buffer_len(&crp->crp_buf) > 2552 SAFEXCEL_MAX_REQUEST_SIZE)) { 2553 crp->crp_etype = E2BIG; 2554 crypto_done(crp); 2555 return (0); 2556 } 2557 2558 if (crp->crp_cipher_key != NULL || crp->crp_auth_key != NULL) 2559 safexcel_setkey(sess, csp, crp); 2560 2561 ring = &sc->sc_ring[sess->ringidx]; 2562 mtx_lock(&ring->mtx); 2563 req = safexcel_alloc_request(sc, ring); 2564 if (__predict_false(req == NULL)) { 2565 ring->blocked = CRYPTO_SYMQ; 2566 mtx_unlock(&ring->mtx); 2567 return (ERESTART); 2568 } 2569 2570 req->crp = crp; 2571 req->sess = sess; 2572 2573 crypto_read_iv(crp, req->iv); 2574 2575 error = safexcel_create_chain(ring, req); 2576 if (__predict_false(error != 0)) { 2577 safexcel_free_request(ring, req); 2578 mtx_unlock(&ring->mtx); 2579 crp->crp_etype = error; 2580 crypto_done(crp); 2581 return (0); 2582 } 2583 2584 safexcel_set_token(req); 2585 2586 bus_dmamap_sync(ring->data_dtag, req->dmap, 2587 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2588 bus_dmamap_sync(req->ctx.tag, req->ctx.map, 2589 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2590 bus_dmamap_sync(ring->cdr.dma.tag, ring->cdr.dma.map, 2591 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2592 bus_dmamap_sync(ring->dma_atok.tag, ring->dma_atok.map, 2593 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2594 bus_dmamap_sync(ring->rdr.dma.tag, ring->rdr.dma.map, 2595 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2596 2597 safexcel_enqueue_request(sc, ring, req); 2598 2599 if ((hint & CRYPTO_HINT_MORE) == 0) 2600 safexcel_execute(sc, ring, req); 2601 mtx_unlock(&ring->mtx); 2602 2603 return (0); 2604 } 2605 2606 static device_method_t safexcel_methods[] = { 2607 /* Device interface */ 2608 DEVMETHOD(device_probe, safexcel_probe), 2609 DEVMETHOD(device_attach, safexcel_attach), 2610 DEVMETHOD(device_detach, safexcel_detach), 2611 2612 /* Cryptodev interface */ 2613 DEVMETHOD(cryptodev_probesession, safexcel_probesession), 2614 DEVMETHOD(cryptodev_newsession, safexcel_newsession), 2615 DEVMETHOD(cryptodev_process, safexcel_process), 2616 2617 DEVMETHOD_END 2618 }; 2619 2620 static devclass_t safexcel_devclass; 2621 2622 static driver_t safexcel_driver = { 2623 .name = "safexcel", 2624 .methods = safexcel_methods, 2625 .size = sizeof(struct safexcel_softc), 2626 }; 2627 2628 DRIVER_MODULE(safexcel, simplebus, safexcel_driver, safexcel_devclass, 0, 0); 2629 MODULE_VERSION(safexcel, 1); 2630 MODULE_DEPEND(safexcel, crypto, 1, 1, 1); 2631