1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2020, 2021 Rubicon Communications, LLC (Netgate)
5 * Copyright (c) 2021 The FreeBSD Foundation
6 *
7 * Portions of this software were developed by Ararat River
8 * Consulting, LLC under sponsorship of the FreeBSD Foundation.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
20 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
21 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
24 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
28 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 */
30
31 #include <sys/param.h>
32 #include <sys/bus.h>
33 #include <sys/counter.h>
34 #include <sys/endian.h>
35 #include <sys/kernel.h>
36 #include <sys/lock.h>
37 #include <sys/malloc.h>
38 #include <sys/module.h>
39 #include <sys/mutex.h>
40 #include <sys/rman.h>
41 #include <sys/smp.h>
42 #include <sys/sglist.h>
43 #include <sys/sysctl.h>
44
45 #include <machine/atomic.h>
46 #include <machine/bus.h>
47
48 #include <crypto/rijndael/rijndael.h>
49 #include <opencrypto/cryptodev.h>
50 #include <opencrypto/xform.h>
51
52 #include <dev/ofw/ofw_bus.h>
53 #include <dev/ofw/ofw_bus_subr.h>
54
55 #include "cryptodev_if.h"
56
57 #include "safexcel_reg.h"
58 #include "safexcel_var.h"
59
60 /*
61 * We only support the EIP97 for now.
62 */
63 static struct ofw_compat_data safexcel_compat[] = {
64 { "inside-secure,safexcel-eip97ies", (uintptr_t)97 },
65 { "inside-secure,safexcel-eip97", (uintptr_t)97 },
66 { NULL, 0 }
67 };
68
69 const struct safexcel_reg_offsets eip97_regs_offset = {
70 .hia_aic = SAFEXCEL_EIP97_HIA_AIC_BASE,
71 .hia_aic_g = SAFEXCEL_EIP97_HIA_AIC_G_BASE,
72 .hia_aic_r = SAFEXCEL_EIP97_HIA_AIC_R_BASE,
73 .hia_aic_xdr = SAFEXCEL_EIP97_HIA_AIC_xDR_BASE,
74 .hia_dfe = SAFEXCEL_EIP97_HIA_DFE_BASE,
75 .hia_dfe_thr = SAFEXCEL_EIP97_HIA_DFE_THR_BASE,
76 .hia_dse = SAFEXCEL_EIP97_HIA_DSE_BASE,
77 .hia_dse_thr = SAFEXCEL_EIP97_HIA_DSE_THR_BASE,
78 .hia_gen_cfg = SAFEXCEL_EIP97_HIA_GEN_CFG_BASE,
79 .pe = SAFEXCEL_EIP97_PE_BASE,
80 };
81
82 const struct safexcel_reg_offsets eip197_regs_offset = {
83 .hia_aic = SAFEXCEL_EIP197_HIA_AIC_BASE,
84 .hia_aic_g = SAFEXCEL_EIP197_HIA_AIC_G_BASE,
85 .hia_aic_r = SAFEXCEL_EIP197_HIA_AIC_R_BASE,
86 .hia_aic_xdr = SAFEXCEL_EIP197_HIA_AIC_xDR_BASE,
87 .hia_dfe = SAFEXCEL_EIP197_HIA_DFE_BASE,
88 .hia_dfe_thr = SAFEXCEL_EIP197_HIA_DFE_THR_BASE,
89 .hia_dse = SAFEXCEL_EIP197_HIA_DSE_BASE,
90 .hia_dse_thr = SAFEXCEL_EIP197_HIA_DSE_THR_BASE,
91 .hia_gen_cfg = SAFEXCEL_EIP197_HIA_GEN_CFG_BASE,
92 .pe = SAFEXCEL_EIP197_PE_BASE,
93 };
94
95 static struct safexcel_request *
safexcel_next_request(struct safexcel_ring * ring)96 safexcel_next_request(struct safexcel_ring *ring)
97 {
98 int i;
99
100 i = ring->cdr.read;
101 KASSERT(i >= 0 && i < SAFEXCEL_RING_SIZE,
102 ("%s: out of bounds request index %d", __func__, i));
103 return (&ring->requests[i]);
104 }
105
106 static struct safexcel_cmd_descr *
safexcel_cmd_descr_next(struct safexcel_cmd_descr_ring * ring)107 safexcel_cmd_descr_next(struct safexcel_cmd_descr_ring *ring)
108 {
109 struct safexcel_cmd_descr *cdesc;
110
111 if (ring->write == ring->read)
112 return (NULL);
113 cdesc = &ring->desc[ring->read];
114 ring->read = (ring->read + 1) % SAFEXCEL_RING_SIZE;
115 return (cdesc);
116 }
117
118 static struct safexcel_res_descr *
safexcel_res_descr_next(struct safexcel_res_descr_ring * ring)119 safexcel_res_descr_next(struct safexcel_res_descr_ring *ring)
120 {
121 struct safexcel_res_descr *rdesc;
122
123 if (ring->write == ring->read)
124 return (NULL);
125 rdesc = &ring->desc[ring->read];
126 ring->read = (ring->read + 1) % SAFEXCEL_RING_SIZE;
127 return (rdesc);
128 }
129
130 static struct safexcel_request *
safexcel_alloc_request(struct safexcel_softc * sc,struct safexcel_ring * ring)131 safexcel_alloc_request(struct safexcel_softc *sc, struct safexcel_ring *ring)
132 {
133 int i;
134
135 mtx_assert(&ring->mtx, MA_OWNED);
136
137 i = ring->cdr.write;
138 if ((i + 1) % SAFEXCEL_RING_SIZE == ring->cdr.read)
139 return (NULL);
140 return (&ring->requests[i]);
141 }
142
143 static void
safexcel_free_request(struct safexcel_ring * ring,struct safexcel_request * req)144 safexcel_free_request(struct safexcel_ring *ring, struct safexcel_request *req)
145 {
146 struct safexcel_context_record *ctx;
147
148 mtx_assert(&ring->mtx, MA_OWNED);
149
150 if (req->dmap_loaded) {
151 bus_dmamap_unload(ring->data_dtag, req->dmap);
152 req->dmap_loaded = false;
153 }
154 ctx = (struct safexcel_context_record *)req->ctx.vaddr;
155 explicit_bzero(ctx->data, sizeof(ctx->data));
156 explicit_bzero(req->iv, sizeof(req->iv));
157 }
158
159 static void
safexcel_rdr_intr(struct safexcel_softc * sc,int ringidx)160 safexcel_rdr_intr(struct safexcel_softc *sc, int ringidx)
161 {
162 TAILQ_HEAD(, cryptop) cq;
163 struct cryptop *crp, *tmp;
164 struct safexcel_cmd_descr *cdesc __diagused;
165 struct safexcel_res_descr *rdesc;
166 struct safexcel_request *req;
167 struct safexcel_ring *ring;
168 uint32_t blocked, error, i, nrdescs, nreqs;
169
170 blocked = 0;
171 ring = &sc->sc_ring[ringidx];
172
173 nreqs = SAFEXCEL_READ(sc,
174 SAFEXCEL_HIA_RDR(sc, ringidx) + SAFEXCEL_HIA_xDR_PROC_COUNT);
175 nreqs >>= SAFEXCEL_xDR_PROC_xD_PKT_OFFSET;
176 nreqs &= SAFEXCEL_xDR_PROC_xD_PKT_MASK;
177 if (nreqs == 0) {
178 SAFEXCEL_DPRINTF(sc, 1,
179 "zero pending requests on ring %d\n", ringidx);
180 mtx_lock(&ring->mtx);
181 goto out;
182 }
183
184 TAILQ_INIT(&cq);
185
186 ring = &sc->sc_ring[ringidx];
187 bus_dmamap_sync(ring->rdr.dma.tag, ring->rdr.dma.map,
188 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
189 bus_dmamap_sync(ring->cdr.dma.tag, ring->cdr.dma.map,
190 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
191 bus_dmamap_sync(ring->dma_atok.tag, ring->dma_atok.map,
192 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
193
194 nrdescs = 0;
195 for (i = 0; i < nreqs; i++) {
196 req = safexcel_next_request(ring);
197
198 bus_dmamap_sync(req->ctx.tag, req->ctx.map,
199 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
200 bus_dmamap_sync(ring->data_dtag, req->dmap,
201 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
202
203 while (req->cdescs-- > 0) {
204 cdesc = safexcel_cmd_descr_next(&ring->cdr);
205 KASSERT(cdesc != NULL,
206 ("%s: missing control descriptor", __func__));
207 if (req->cdescs == 0)
208 KASSERT(cdesc->last_seg,
209 ("%s: chain is not terminated", __func__));
210 }
211 nrdescs += req->rdescs;
212 while (req->rdescs-- > 0) {
213 rdesc = safexcel_res_descr_next(&ring->rdr);
214 error = rdesc->result_data.error_code;
215 if (error != 0) {
216 if (error == SAFEXCEL_RESULT_ERR_AUTH_FAILED &&
217 req->crp->crp_etype == 0) {
218 req->crp->crp_etype = EBADMSG;
219 } else {
220 SAFEXCEL_DPRINTF(sc, 1,
221 "error code %#x\n", error);
222 req->crp->crp_etype = EIO;
223 }
224 }
225 }
226
227 TAILQ_INSERT_TAIL(&cq, req->crp, crp_next);
228 }
229
230 mtx_lock(&ring->mtx);
231 if (nreqs != 0) {
232 KASSERT(ring->queued >= nreqs,
233 ("%s: request count underflow, %d queued %d completed",
234 __func__, ring->queued, nreqs));
235 ring->queued -= nreqs;
236
237 SAFEXCEL_WRITE(sc,
238 SAFEXCEL_HIA_RDR(sc, ringidx) + SAFEXCEL_HIA_xDR_PROC_COUNT,
239 SAFEXCEL_xDR_PROC_xD_PKT(nreqs) |
240 (sc->sc_config.rd_offset * nrdescs * sizeof(uint32_t)));
241 blocked = ring->blocked;
242 ring->blocked = 0;
243 }
244 out:
245 if (ring->queued != 0) {
246 SAFEXCEL_WRITE(sc,
247 SAFEXCEL_HIA_RDR(sc, ringidx) + SAFEXCEL_HIA_xDR_THRESH,
248 SAFEXCEL_HIA_CDR_THRESH_PKT_MODE | imin(ring->queued, 16));
249 }
250 mtx_unlock(&ring->mtx);
251
252 if (blocked)
253 crypto_unblock(sc->sc_cid, blocked);
254
255 TAILQ_FOREACH_SAFE(crp, &cq, crp_next, tmp)
256 crypto_done(crp);
257 }
258
259 static void
safexcel_ring_intr(void * arg)260 safexcel_ring_intr(void *arg)
261 {
262 struct safexcel_softc *sc;
263 struct safexcel_intr_handle *ih;
264 uint32_t status, stat;
265 int ring;
266 bool rdrpending;
267
268 ih = arg;
269 sc = ih->sc;
270 ring = ih->ring;
271
272 status = SAFEXCEL_READ(sc, SAFEXCEL_HIA_AIC_R(sc) +
273 SAFEXCEL_HIA_AIC_R_ENABLED_STAT(ring));
274 /* CDR interrupts */
275 if (status & SAFEXCEL_CDR_IRQ(ring)) {
276 stat = SAFEXCEL_READ(sc,
277 SAFEXCEL_HIA_CDR(sc, ring) + SAFEXCEL_HIA_xDR_STAT);
278 SAFEXCEL_WRITE(sc,
279 SAFEXCEL_HIA_CDR(sc, ring) + SAFEXCEL_HIA_xDR_STAT,
280 stat & SAFEXCEL_CDR_INTR_MASK);
281 }
282 /* RDR interrupts */
283 rdrpending = false;
284 if (status & SAFEXCEL_RDR_IRQ(ring)) {
285 stat = SAFEXCEL_READ(sc,
286 SAFEXCEL_HIA_RDR(sc, ring) + SAFEXCEL_HIA_xDR_STAT);
287 if ((stat & SAFEXCEL_xDR_ERR) == 0)
288 rdrpending = true;
289 SAFEXCEL_WRITE(sc,
290 SAFEXCEL_HIA_RDR(sc, ring) + SAFEXCEL_HIA_xDR_STAT,
291 stat & SAFEXCEL_RDR_INTR_MASK);
292 }
293 SAFEXCEL_WRITE(sc,
294 SAFEXCEL_HIA_AIC_R(sc) + SAFEXCEL_HIA_AIC_R_ACK(ring),
295 status);
296
297 if (rdrpending)
298 safexcel_rdr_intr(sc, ring);
299 }
300
301 static int
safexcel_configure(struct safexcel_softc * sc)302 safexcel_configure(struct safexcel_softc *sc)
303 {
304 uint32_t i, mask, pemask, reg;
305
306 if (sc->sc_type == 197) {
307 sc->sc_offsets = eip197_regs_offset;
308 pemask = SAFEXCEL_N_PES_MASK;
309 } else {
310 sc->sc_offsets = eip97_regs_offset;
311 pemask = EIP97_N_PES_MASK;
312 }
313
314 /* Scan for valid ring interrupt controllers. */
315 for (i = 0; i < SAFEXCEL_MAX_RING_AIC; i++) {
316 reg = SAFEXCEL_READ(sc, SAFEXCEL_HIA_AIC_R(sc) +
317 SAFEXCEL_HIA_AIC_R_VERSION(i));
318 if (SAFEXCEL_REG_LO16(reg) != EIP201_VERSION_LE)
319 break;
320 }
321 sc->sc_config.aic_rings = i;
322 if (sc->sc_config.aic_rings == 0)
323 return (-1);
324
325 reg = SAFEXCEL_READ(sc, SAFEXCEL_HIA_AIC_G(sc) + SAFEXCEL_HIA_OPTIONS);
326 /* Check for 64bit addressing. */
327 if ((reg & SAFEXCEL_OPT_ADDR_64) == 0)
328 return (-1);
329 /* Check alignment constraints (which we do not support). */
330 if (((reg & SAFEXCEL_OPT_TGT_ALIGN_MASK) >>
331 SAFEXCEL_OPT_TGT_ALIGN_OFFSET) != 0)
332 return (-1);
333
334 sc->sc_config.hdw =
335 (reg & SAFEXCEL_xDR_HDW_MASK) >> SAFEXCEL_xDR_HDW_OFFSET;
336 mask = (1 << sc->sc_config.hdw) - 1;
337
338 sc->sc_config.rings = reg & SAFEXCEL_N_RINGS_MASK;
339 /* Limit the number of rings to the number of the AIC Rings. */
340 sc->sc_config.rings = MIN(sc->sc_config.rings, sc->sc_config.aic_rings);
341
342 sc->sc_config.pes = (reg & pemask) >> SAFEXCEL_N_PES_OFFSET;
343
344 sc->sc_config.cd_size =
345 sizeof(struct safexcel_cmd_descr) / sizeof(uint32_t);
346 sc->sc_config.cd_offset = (sc->sc_config.cd_size + mask) & ~mask;
347
348 sc->sc_config.rd_size =
349 sizeof(struct safexcel_res_descr) / sizeof(uint32_t);
350 sc->sc_config.rd_offset = (sc->sc_config.rd_size + mask) & ~mask;
351
352 sc->sc_config.atok_offset =
353 (SAFEXCEL_MAX_ATOKENS * sizeof(struct safexcel_instr) + mask) &
354 ~mask;
355
356 return (0);
357 }
358
359 static void
safexcel_init_hia_bus_access(struct safexcel_softc * sc)360 safexcel_init_hia_bus_access(struct safexcel_softc *sc)
361 {
362 uint32_t version, val;
363
364 /* Determine endianness and configure byte swap. */
365 version = SAFEXCEL_READ(sc,
366 SAFEXCEL_HIA_AIC(sc) + SAFEXCEL_HIA_VERSION);
367 val = SAFEXCEL_READ(sc, SAFEXCEL_HIA_AIC(sc) + SAFEXCEL_HIA_MST_CTRL);
368 if (SAFEXCEL_REG_HI16(version) == SAFEXCEL_HIA_VERSION_BE) {
369 val = SAFEXCEL_READ(sc,
370 SAFEXCEL_HIA_AIC(sc) + SAFEXCEL_HIA_MST_CTRL);
371 val = val ^ (SAFEXCEL_MST_CTRL_NO_BYTE_SWAP >> 24);
372 SAFEXCEL_WRITE(sc,
373 SAFEXCEL_HIA_AIC(sc) + SAFEXCEL_HIA_MST_CTRL,
374 val);
375 }
376
377 /* Configure wr/rd cache values. */
378 SAFEXCEL_WRITE(sc, SAFEXCEL_HIA_GEN_CFG(sc) + SAFEXCEL_HIA_MST_CTRL,
379 SAFEXCEL_MST_CTRL_RD_CACHE(RD_CACHE_4BITS) |
380 SAFEXCEL_MST_CTRL_WD_CACHE(WR_CACHE_4BITS));
381 }
382
383 static void
safexcel_disable_global_interrupts(struct safexcel_softc * sc)384 safexcel_disable_global_interrupts(struct safexcel_softc *sc)
385 {
386 /* Disable and clear pending interrupts. */
387 SAFEXCEL_WRITE(sc,
388 SAFEXCEL_HIA_AIC_G(sc) + SAFEXCEL_HIA_AIC_G_ENABLE_CTRL, 0);
389 SAFEXCEL_WRITE(sc,
390 SAFEXCEL_HIA_AIC_G(sc) + SAFEXCEL_HIA_AIC_G_ACK,
391 SAFEXCEL_AIC_G_ACK_ALL_MASK);
392 }
393
394 /*
395 * Configure the data fetch engine. This component parses command descriptors
396 * and sets up DMA transfers from host memory to the corresponding processing
397 * engine.
398 */
399 static void
safexcel_configure_dfe_engine(struct safexcel_softc * sc,int pe)400 safexcel_configure_dfe_engine(struct safexcel_softc *sc, int pe)
401 {
402 /* Reset all DFE threads. */
403 SAFEXCEL_WRITE(sc,
404 SAFEXCEL_HIA_DFE_THR(sc) + SAFEXCEL_HIA_DFE_THR_CTRL(pe),
405 SAFEXCEL_DxE_THR_CTRL_RESET_PE);
406
407 /* Deassert the DFE reset. */
408 SAFEXCEL_WRITE(sc,
409 SAFEXCEL_HIA_DFE_THR(sc) + SAFEXCEL_HIA_DFE_THR_CTRL(pe), 0);
410
411 /* DMA transfer size to use. */
412 SAFEXCEL_WRITE(sc, SAFEXCEL_HIA_DFE(sc) + SAFEXCEL_HIA_DFE_CFG(pe),
413 SAFEXCEL_HIA_DFE_CFG_DIS_DEBUG |
414 SAFEXCEL_HIA_DxE_CFG_MIN_DATA_SIZE(6) |
415 SAFEXCEL_HIA_DxE_CFG_MAX_DATA_SIZE(9) |
416 SAFEXCEL_HIA_DxE_CFG_MIN_CTRL_SIZE(6) |
417 SAFEXCEL_HIA_DxE_CFG_MAX_CTRL_SIZE(7) |
418 SAFEXCEL_HIA_DxE_CFG_DATA_CACHE_CTRL(RD_CACHE_3BITS) |
419 SAFEXCEL_HIA_DxE_CFG_CTRL_CACHE_CTRL(RD_CACHE_3BITS));
420
421 /* Configure the PE DMA transfer thresholds. */
422 SAFEXCEL_WRITE(sc, SAFEXCEL_PE(sc) + SAFEXCEL_PE_IN_DBUF_THRES(pe),
423 SAFEXCEL_PE_IN_xBUF_THRES_MIN(6) |
424 SAFEXCEL_PE_IN_xBUF_THRES_MAX(9));
425 SAFEXCEL_WRITE(sc, SAFEXCEL_PE(sc) + SAFEXCEL_PE_IN_TBUF_THRES(pe),
426 SAFEXCEL_PE_IN_xBUF_THRES_MIN(6) |
427 SAFEXCEL_PE_IN_xBUF_THRES_MAX(7));
428 }
429
430 /*
431 * Configure the data store engine. This component parses result descriptors
432 * and sets up DMA transfers from the processing engine to host memory.
433 */
434 static int
safexcel_configure_dse(struct safexcel_softc * sc,int pe)435 safexcel_configure_dse(struct safexcel_softc *sc, int pe)
436 {
437 uint32_t val;
438 int count;
439
440 /* Disable and reset all DSE threads. */
441 SAFEXCEL_WRITE(sc,
442 SAFEXCEL_HIA_DSE_THR(sc) + SAFEXCEL_HIA_DSE_THR_CTRL(pe),
443 SAFEXCEL_DxE_THR_CTRL_RESET_PE);
444
445 /* Wait for a second for threads to go idle. */
446 for (count = 0;;) {
447 val = SAFEXCEL_READ(sc,
448 SAFEXCEL_HIA_DSE_THR(sc) + SAFEXCEL_HIA_DSE_THR_STAT(pe));
449 if ((val & SAFEXCEL_DSE_THR_RDR_ID_MASK) ==
450 SAFEXCEL_DSE_THR_RDR_ID_MASK)
451 break;
452 if (count++ > 10000) {
453 device_printf(sc->sc_dev, "DSE reset timeout\n");
454 return (-1);
455 }
456 DELAY(100);
457 }
458
459 /* Exit the reset state. */
460 SAFEXCEL_WRITE(sc,
461 SAFEXCEL_HIA_DSE_THR(sc) + SAFEXCEL_HIA_DSE_THR_CTRL(pe), 0);
462
463 /* DMA transfer size to use */
464 SAFEXCEL_WRITE(sc, SAFEXCEL_HIA_DSE(sc) + SAFEXCEL_HIA_DSE_CFG(pe),
465 SAFEXCEL_HIA_DSE_CFG_DIS_DEBUG |
466 SAFEXCEL_HIA_DxE_CFG_MIN_DATA_SIZE(7) |
467 SAFEXCEL_HIA_DxE_CFG_MAX_DATA_SIZE(8) |
468 SAFEXCEL_HIA_DxE_CFG_DATA_CACHE_CTRL(WR_CACHE_3BITS) |
469 SAFEXCEL_HIA_DSE_CFG_ALLWAYS_BUFFERABLE);
470
471 /* Configure the procesing engine thresholds */
472 SAFEXCEL_WRITE(sc,
473 SAFEXCEL_PE(sc) + SAFEXCEL_PE_OUT_DBUF_THRES(pe),
474 SAFEXCEL_PE_OUT_DBUF_THRES_MIN(7) |
475 SAFEXCEL_PE_OUT_DBUF_THRES_MAX(8));
476
477 return (0);
478 }
479
480 static void
safexcel_hw_prepare_rings(struct safexcel_softc * sc)481 safexcel_hw_prepare_rings(struct safexcel_softc *sc)
482 {
483 int i;
484
485 for (i = 0; i < sc->sc_config.rings; i++) {
486 /*
487 * Command descriptors.
488 */
489
490 /* Clear interrupts for this ring. */
491 SAFEXCEL_WRITE(sc,
492 SAFEXCEL_HIA_AIC_R(sc) + SAFEXCEL_HIA_AIC_R_ENABLE_CLR(i),
493 SAFEXCEL_HIA_AIC_R_ENABLE_CLR_ALL_MASK);
494
495 /* Disable external triggering. */
496 SAFEXCEL_WRITE(sc,
497 SAFEXCEL_HIA_CDR(sc, i) + SAFEXCEL_HIA_xDR_CFG, 0);
498
499 /* Clear the pending prepared counter. */
500 SAFEXCEL_WRITE(sc,
501 SAFEXCEL_HIA_CDR(sc, i) + SAFEXCEL_HIA_xDR_PREP_COUNT,
502 SAFEXCEL_xDR_PREP_CLR_COUNT);
503
504 /* Clear the pending processed counter. */
505 SAFEXCEL_WRITE(sc,
506 SAFEXCEL_HIA_CDR(sc, i) + SAFEXCEL_HIA_xDR_PROC_COUNT,
507 SAFEXCEL_xDR_PROC_CLR_COUNT);
508
509 SAFEXCEL_WRITE(sc,
510 SAFEXCEL_HIA_CDR(sc, i) + SAFEXCEL_HIA_xDR_PREP_PNTR, 0);
511 SAFEXCEL_WRITE(sc,
512 SAFEXCEL_HIA_CDR(sc, i) + SAFEXCEL_HIA_xDR_PROC_PNTR, 0);
513
514 SAFEXCEL_WRITE(sc,
515 SAFEXCEL_HIA_CDR(sc, i) + SAFEXCEL_HIA_xDR_RING_SIZE,
516 SAFEXCEL_RING_SIZE * sc->sc_config.cd_offset *
517 sizeof(uint32_t));
518
519 /*
520 * Result descriptors.
521 */
522
523 /* Disable external triggering. */
524 SAFEXCEL_WRITE(sc,
525 SAFEXCEL_HIA_RDR(sc, i) + SAFEXCEL_HIA_xDR_CFG, 0);
526
527 /* Clear the pending prepared counter. */
528 SAFEXCEL_WRITE(sc,
529 SAFEXCEL_HIA_RDR(sc, i) + SAFEXCEL_HIA_xDR_PREP_COUNT,
530 SAFEXCEL_xDR_PREP_CLR_COUNT);
531
532 /* Clear the pending processed counter. */
533 SAFEXCEL_WRITE(sc,
534 SAFEXCEL_HIA_RDR(sc, i) + SAFEXCEL_HIA_xDR_PROC_COUNT,
535 SAFEXCEL_xDR_PROC_CLR_COUNT);
536
537 SAFEXCEL_WRITE(sc,
538 SAFEXCEL_HIA_RDR(sc, i) + SAFEXCEL_HIA_xDR_PREP_PNTR, 0);
539 SAFEXCEL_WRITE(sc,
540 SAFEXCEL_HIA_RDR(sc, i) + SAFEXCEL_HIA_xDR_PROC_PNTR, 0);
541
542 /* Ring size. */
543 SAFEXCEL_WRITE(sc,
544 SAFEXCEL_HIA_RDR(sc, i) + SAFEXCEL_HIA_xDR_RING_SIZE,
545 SAFEXCEL_RING_SIZE * sc->sc_config.rd_offset *
546 sizeof(uint32_t));
547 }
548 }
549
550 static void
safexcel_hw_setup_rings(struct safexcel_softc * sc)551 safexcel_hw_setup_rings(struct safexcel_softc *sc)
552 {
553 struct safexcel_ring *ring;
554 uint32_t cd_size_rnd, mask, rd_size_rnd, val;
555 int i;
556
557 mask = (1 << sc->sc_config.hdw) - 1;
558 cd_size_rnd = (sc->sc_config.cd_size + mask) >> sc->sc_config.hdw;
559 val = (sizeof(struct safexcel_res_descr) -
560 sizeof(struct safexcel_res_data)) / sizeof(uint32_t);
561 rd_size_rnd = (val + mask) >> sc->sc_config.hdw;
562
563 for (i = 0; i < sc->sc_config.rings; i++) {
564 ring = &sc->sc_ring[i];
565
566 /*
567 * Command descriptors.
568 */
569
570 /* Ring base address. */
571 SAFEXCEL_WRITE(sc, SAFEXCEL_HIA_CDR(sc, i) +
572 SAFEXCEL_HIA_xDR_RING_BASE_ADDR_LO,
573 SAFEXCEL_ADDR_LO(ring->cdr.dma.paddr));
574 SAFEXCEL_WRITE(sc, SAFEXCEL_HIA_CDR(sc, i) +
575 SAFEXCEL_HIA_xDR_RING_BASE_ADDR_HI,
576 SAFEXCEL_ADDR_HI(ring->cdr.dma.paddr));
577
578 SAFEXCEL_WRITE(sc,
579 SAFEXCEL_HIA_CDR(sc, i) + SAFEXCEL_HIA_xDR_DESC_SIZE,
580 SAFEXCEL_xDR_DESC_MODE_64BIT | SAFEXCEL_CDR_DESC_MODE_ADCP |
581 (sc->sc_config.cd_offset << SAFEXCEL_xDR_DESC_xD_OFFSET) |
582 sc->sc_config.cd_size);
583
584 SAFEXCEL_WRITE(sc,
585 SAFEXCEL_HIA_CDR(sc, i) + SAFEXCEL_HIA_xDR_CFG,
586 ((SAFEXCEL_FETCH_COUNT * (cd_size_rnd << sc->sc_config.hdw)) <<
587 SAFEXCEL_xDR_xD_FETCH_THRESH) |
588 (SAFEXCEL_FETCH_COUNT * sc->sc_config.cd_offset));
589
590 /* Configure DMA tx control. */
591 SAFEXCEL_WRITE(sc,
592 SAFEXCEL_HIA_CDR(sc, i) + SAFEXCEL_HIA_xDR_DMA_CFG,
593 SAFEXCEL_HIA_xDR_CFG_WR_CACHE(WR_CACHE_3BITS) |
594 SAFEXCEL_HIA_xDR_CFG_RD_CACHE(RD_CACHE_3BITS));
595
596 /* Clear any pending interrupt. */
597 SAFEXCEL_WRITE(sc,
598 SAFEXCEL_HIA_CDR(sc, i) + SAFEXCEL_HIA_xDR_STAT,
599 SAFEXCEL_CDR_INTR_MASK);
600
601 /*
602 * Result descriptors.
603 */
604
605 /* Ring base address. */
606 SAFEXCEL_WRITE(sc, SAFEXCEL_HIA_RDR(sc, i) +
607 SAFEXCEL_HIA_xDR_RING_BASE_ADDR_LO,
608 SAFEXCEL_ADDR_LO(ring->rdr.dma.paddr));
609 SAFEXCEL_WRITE(sc, SAFEXCEL_HIA_RDR(sc, i) +
610 SAFEXCEL_HIA_xDR_RING_BASE_ADDR_HI,
611 SAFEXCEL_ADDR_HI(ring->rdr.dma.paddr));
612
613 SAFEXCEL_WRITE(sc,
614 SAFEXCEL_HIA_RDR(sc, i) + SAFEXCEL_HIA_xDR_DESC_SIZE,
615 SAFEXCEL_xDR_DESC_MODE_64BIT |
616 (sc->sc_config.rd_offset << SAFEXCEL_xDR_DESC_xD_OFFSET) |
617 sc->sc_config.rd_size);
618
619 SAFEXCEL_WRITE(sc,
620 SAFEXCEL_HIA_RDR(sc, i) + SAFEXCEL_HIA_xDR_CFG,
621 ((SAFEXCEL_FETCH_COUNT * (rd_size_rnd << sc->sc_config.hdw)) <<
622 SAFEXCEL_xDR_xD_FETCH_THRESH) |
623 (SAFEXCEL_FETCH_COUNT * sc->sc_config.rd_offset));
624
625 /* Configure DMA tx control. */
626 SAFEXCEL_WRITE(sc,
627 SAFEXCEL_HIA_RDR(sc, i) + SAFEXCEL_HIA_xDR_DMA_CFG,
628 SAFEXCEL_HIA_xDR_CFG_WR_CACHE(WR_CACHE_3BITS) |
629 SAFEXCEL_HIA_xDR_CFG_RD_CACHE(RD_CACHE_3BITS) |
630 SAFEXCEL_HIA_xDR_WR_RES_BUF | SAFEXCEL_HIA_xDR_WR_CTRL_BUF);
631
632 /* Clear any pending interrupt. */
633 SAFEXCEL_WRITE(sc,
634 SAFEXCEL_HIA_RDR(sc, i) + SAFEXCEL_HIA_xDR_STAT,
635 SAFEXCEL_RDR_INTR_MASK);
636
637 /* Enable ring interrupt. */
638 SAFEXCEL_WRITE(sc,
639 SAFEXCEL_HIA_AIC_R(sc) + SAFEXCEL_HIA_AIC_R_ENABLE_CTRL(i),
640 SAFEXCEL_RDR_IRQ(i));
641 }
642 }
643
644 /* Reset the command and result descriptor rings. */
645 static void
safexcel_hw_reset_rings(struct safexcel_softc * sc)646 safexcel_hw_reset_rings(struct safexcel_softc *sc)
647 {
648 int i;
649
650 for (i = 0; i < sc->sc_config.rings; i++) {
651 /*
652 * Result descriptor ring operations.
653 */
654
655 /* Reset ring base address. */
656 SAFEXCEL_WRITE(sc, SAFEXCEL_HIA_RDR(sc, i) +
657 SAFEXCEL_HIA_xDR_RING_BASE_ADDR_LO, 0);
658 SAFEXCEL_WRITE(sc, SAFEXCEL_HIA_RDR(sc, i) +
659 SAFEXCEL_HIA_xDR_RING_BASE_ADDR_HI, 0);
660
661 /* Clear the pending prepared counter. */
662 SAFEXCEL_WRITE(sc,
663 SAFEXCEL_HIA_RDR(sc, i) + SAFEXCEL_HIA_xDR_PREP_COUNT,
664 SAFEXCEL_xDR_PREP_CLR_COUNT);
665
666 /* Clear the pending processed counter. */
667 SAFEXCEL_WRITE(sc,
668 SAFEXCEL_HIA_RDR(sc, i) + SAFEXCEL_HIA_xDR_PROC_COUNT,
669 SAFEXCEL_xDR_PROC_CLR_COUNT);
670
671 SAFEXCEL_WRITE(sc,
672 SAFEXCEL_HIA_RDR(sc, i) + SAFEXCEL_HIA_xDR_PREP_PNTR, 0);
673 SAFEXCEL_WRITE(sc,
674 SAFEXCEL_HIA_RDR(sc, i) + SAFEXCEL_HIA_xDR_PROC_PNTR, 0);
675
676 SAFEXCEL_WRITE(sc,
677 SAFEXCEL_HIA_RDR(sc, i) + SAFEXCEL_HIA_xDR_RING_SIZE, 0);
678
679 /* Clear any pending interrupt. */
680 SAFEXCEL_WRITE(sc,
681 SAFEXCEL_HIA_RDR(sc, i) + SAFEXCEL_HIA_xDR_STAT,
682 SAFEXCEL_RDR_INTR_MASK);
683
684 /* Disable ring interrupt. */
685 SAFEXCEL_WRITE(sc,
686 SAFEXCEL_HIA_AIC_R(sc) + SAFEXCEL_HIA_AIC_R_ENABLE_CLR(i),
687 SAFEXCEL_RDR_IRQ(i));
688
689 /*
690 * Command descriptor ring operations.
691 */
692
693 /* Reset ring base address. */
694 SAFEXCEL_WRITE(sc, SAFEXCEL_HIA_CDR(sc, i) +
695 SAFEXCEL_HIA_xDR_RING_BASE_ADDR_LO, 0);
696 SAFEXCEL_WRITE(sc, SAFEXCEL_HIA_CDR(sc, i) +
697 SAFEXCEL_HIA_xDR_RING_BASE_ADDR_HI, 0);
698
699 /* Clear the pending prepared counter. */
700 SAFEXCEL_WRITE(sc,
701 SAFEXCEL_HIA_CDR(sc, i) + SAFEXCEL_HIA_xDR_PREP_COUNT,
702 SAFEXCEL_xDR_PREP_CLR_COUNT);
703
704 /* Clear the pending processed counter. */
705 SAFEXCEL_WRITE(sc,
706 SAFEXCEL_HIA_CDR(sc, i) + SAFEXCEL_HIA_xDR_PROC_COUNT,
707 SAFEXCEL_xDR_PROC_CLR_COUNT);
708
709 SAFEXCEL_WRITE(sc,
710 SAFEXCEL_HIA_CDR(sc, i) + SAFEXCEL_HIA_xDR_PREP_PNTR, 0);
711 SAFEXCEL_WRITE(sc,
712 SAFEXCEL_HIA_CDR(sc, i) + SAFEXCEL_HIA_xDR_PROC_PNTR, 0);
713
714 SAFEXCEL_WRITE(sc,
715 SAFEXCEL_HIA_CDR(sc, i) + SAFEXCEL_HIA_xDR_RING_SIZE, 0);
716
717 /* Clear any pending interrupt. */
718 SAFEXCEL_WRITE(sc,
719 SAFEXCEL_HIA_CDR(sc, i) + SAFEXCEL_HIA_xDR_STAT,
720 SAFEXCEL_CDR_INTR_MASK);
721 }
722 }
723
724 static void
safexcel_enable_pe_engine(struct safexcel_softc * sc,int pe)725 safexcel_enable_pe_engine(struct safexcel_softc *sc, int pe)
726 {
727 int i, ring_mask;
728
729 for (ring_mask = 0, i = 0; i < sc->sc_config.rings; i++) {
730 ring_mask <<= 1;
731 ring_mask |= 1;
732 }
733
734 /* Enable command descriptor rings. */
735 SAFEXCEL_WRITE(sc, SAFEXCEL_HIA_DFE_THR(sc) + SAFEXCEL_HIA_DFE_THR_CTRL(pe),
736 SAFEXCEL_DxE_THR_CTRL_EN | ring_mask);
737
738 /* Enable result descriptor rings. */
739 SAFEXCEL_WRITE(sc, SAFEXCEL_HIA_DSE_THR(sc) + SAFEXCEL_HIA_DSE_THR_CTRL(pe),
740 SAFEXCEL_DxE_THR_CTRL_EN | ring_mask);
741
742 /* Clear any HIA interrupt. */
743 SAFEXCEL_WRITE(sc, SAFEXCEL_HIA_AIC_G(sc) + SAFEXCEL_HIA_AIC_G_ACK,
744 SAFEXCEL_AIC_G_ACK_HIA_MASK);
745 }
746
747 static void
safexcel_execute(struct safexcel_softc * sc,struct safexcel_ring * ring,struct safexcel_request * req,int hint)748 safexcel_execute(struct safexcel_softc *sc, struct safexcel_ring *ring,
749 struct safexcel_request *req, int hint)
750 {
751 int ringidx, ncdesc, nrdesc;
752 bool busy;
753
754 mtx_assert(&ring->mtx, MA_OWNED);
755
756 if ((hint & CRYPTO_HINT_MORE) != 0) {
757 ring->pending++;
758 ring->pending_cdesc += req->cdescs;
759 ring->pending_rdesc += req->rdescs;
760 return;
761 }
762
763 ringidx = req->ringidx;
764
765 busy = ring->queued != 0;
766 ncdesc = ring->pending_cdesc + req->cdescs;
767 nrdesc = ring->pending_rdesc + req->rdescs;
768 ring->queued += ring->pending + 1;
769
770 if (!busy) {
771 SAFEXCEL_WRITE(sc,
772 SAFEXCEL_HIA_RDR(sc, ringidx) + SAFEXCEL_HIA_xDR_THRESH,
773 SAFEXCEL_HIA_CDR_THRESH_PKT_MODE | ring->queued);
774 }
775 SAFEXCEL_WRITE(sc,
776 SAFEXCEL_HIA_RDR(sc, ringidx) + SAFEXCEL_HIA_xDR_PREP_COUNT,
777 nrdesc * sc->sc_config.rd_offset * sizeof(uint32_t));
778 SAFEXCEL_WRITE(sc,
779 SAFEXCEL_HIA_CDR(sc, ringidx) + SAFEXCEL_HIA_xDR_PREP_COUNT,
780 ncdesc * sc->sc_config.cd_offset * sizeof(uint32_t));
781
782 ring->pending = ring->pending_cdesc = ring->pending_rdesc = 0;
783 }
784
785 static void
safexcel_init_rings(struct safexcel_softc * sc)786 safexcel_init_rings(struct safexcel_softc *sc)
787 {
788 struct safexcel_cmd_descr *cdesc;
789 struct safexcel_ring *ring;
790 uint64_t atok;
791 int i, j;
792
793 for (i = 0; i < sc->sc_config.rings; i++) {
794 ring = &sc->sc_ring[i];
795
796 snprintf(ring->lockname, sizeof(ring->lockname),
797 "safexcel_ring%d", i);
798 mtx_init(&ring->mtx, ring->lockname, NULL, MTX_DEF);
799
800 ring->pending = ring->pending_cdesc = ring->pending_rdesc = 0;
801 ring->queued = 0;
802 ring->cdr.read = ring->cdr.write = 0;
803 ring->rdr.read = ring->rdr.write = 0;
804 for (j = 0; j < SAFEXCEL_RING_SIZE; j++) {
805 cdesc = &ring->cdr.desc[j];
806 atok = ring->dma_atok.paddr +
807 sc->sc_config.atok_offset * j;
808 cdesc->atok_lo = SAFEXCEL_ADDR_LO(atok);
809 cdesc->atok_hi = SAFEXCEL_ADDR_HI(atok);
810 }
811 }
812 }
813
814 static void
safexcel_dma_alloc_mem_cb(void * arg,bus_dma_segment_t * segs,int nseg,int error)815 safexcel_dma_alloc_mem_cb(void *arg, bus_dma_segment_t *segs, int nseg,
816 int error)
817 {
818 struct safexcel_dma_mem *sdm;
819
820 if (error != 0)
821 return;
822
823 KASSERT(nseg == 1, ("%s: nsegs is %d", __func__, nseg));
824 sdm = arg;
825 sdm->paddr = segs->ds_addr;
826 }
827
828 static int
safexcel_dma_alloc_mem(struct safexcel_softc * sc,struct safexcel_dma_mem * sdm,bus_size_t size)829 safexcel_dma_alloc_mem(struct safexcel_softc *sc, struct safexcel_dma_mem *sdm,
830 bus_size_t size)
831 {
832 int error;
833
834 KASSERT(sdm->vaddr == NULL,
835 ("%s: DMA memory descriptor in use.", __func__));
836
837 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), /* parent */
838 PAGE_SIZE, 0, /* alignment, boundary */
839 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
840 BUS_SPACE_MAXADDR, /* highaddr */
841 NULL, NULL, /* filtfunc, filtfuncarg */
842 size, 1, /* maxsize, nsegments */
843 size, BUS_DMA_COHERENT, /* maxsegsz, flags */
844 NULL, NULL, /* lockfunc, lockfuncarg */
845 &sdm->tag); /* dmat */
846 if (error != 0) {
847 device_printf(sc->sc_dev,
848 "failed to allocate busdma tag, error %d\n", error);
849 goto err1;
850 }
851
852 error = bus_dmamem_alloc(sdm->tag, (void **)&sdm->vaddr,
853 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT, &sdm->map);
854 if (error != 0) {
855 device_printf(sc->sc_dev,
856 "failed to allocate DMA safe memory, error %d\n", error);
857 goto err2;
858 }
859
860 error = bus_dmamap_load(sdm->tag, sdm->map, sdm->vaddr, size,
861 safexcel_dma_alloc_mem_cb, sdm, BUS_DMA_NOWAIT);
862 if (error != 0) {
863 device_printf(sc->sc_dev,
864 "cannot get address of the DMA memory, error %d\n", error);
865 goto err3;
866 }
867
868 return (0);
869 err3:
870 bus_dmamem_free(sdm->tag, sdm->vaddr, sdm->map);
871 err2:
872 bus_dma_tag_destroy(sdm->tag);
873 err1:
874 sdm->vaddr = NULL;
875
876 return (error);
877 }
878
879 static void
safexcel_dma_free_mem(struct safexcel_dma_mem * sdm)880 safexcel_dma_free_mem(struct safexcel_dma_mem *sdm)
881 {
882 bus_dmamap_unload(sdm->tag, sdm->map);
883 bus_dmamem_free(sdm->tag, sdm->vaddr, sdm->map);
884 bus_dma_tag_destroy(sdm->tag);
885 }
886
887 static void
safexcel_dma_free_rings(struct safexcel_softc * sc)888 safexcel_dma_free_rings(struct safexcel_softc *sc)
889 {
890 struct safexcel_ring *ring;
891 int i;
892
893 for (i = 0; i < sc->sc_config.rings; i++) {
894 ring = &sc->sc_ring[i];
895 safexcel_dma_free_mem(&ring->cdr.dma);
896 safexcel_dma_free_mem(&ring->dma_atok);
897 safexcel_dma_free_mem(&ring->rdr.dma);
898 bus_dma_tag_destroy(ring->data_dtag);
899 mtx_destroy(&ring->mtx);
900 }
901 }
902
903 static int
safexcel_dma_init(struct safexcel_softc * sc)904 safexcel_dma_init(struct safexcel_softc *sc)
905 {
906 struct safexcel_ring *ring;
907 bus_size_t size;
908 int error, i;
909
910 for (i = 0; i < sc->sc_config.rings; i++) {
911 ring = &sc->sc_ring[i];
912
913 error = bus_dma_tag_create(
914 bus_get_dma_tag(sc->sc_dev),/* parent */
915 1, 0, /* alignment, boundary */
916 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
917 BUS_SPACE_MAXADDR, /* highaddr */
918 NULL, NULL, /* filtfunc, filtfuncarg */
919 SAFEXCEL_MAX_REQUEST_SIZE, /* maxsize */
920 SAFEXCEL_MAX_FRAGMENTS, /* nsegments */
921 SAFEXCEL_MAX_REQUEST_SIZE, /* maxsegsz */
922 BUS_DMA_COHERENT, /* flags */
923 NULL, NULL, /* lockfunc, lockfuncarg */
924 &ring->data_dtag); /* dmat */
925 if (error != 0) {
926 device_printf(sc->sc_dev,
927 "bus_dma_tag_create main failed; error %d\n", error);
928 return (error);
929 }
930
931 size = sizeof(uint32_t) * sc->sc_config.cd_offset *
932 SAFEXCEL_RING_SIZE;
933 error = safexcel_dma_alloc_mem(sc, &ring->cdr.dma, size);
934 if (error != 0) {
935 device_printf(sc->sc_dev,
936 "failed to allocate CDR DMA memory, error %d\n",
937 error);
938 goto err;
939 }
940 ring->cdr.desc =
941 (struct safexcel_cmd_descr *)ring->cdr.dma.vaddr;
942
943 /* Allocate additional CDR token memory. */
944 size = (bus_size_t)sc->sc_config.atok_offset *
945 SAFEXCEL_RING_SIZE;
946 error = safexcel_dma_alloc_mem(sc, &ring->dma_atok, size);
947 if (error != 0) {
948 device_printf(sc->sc_dev,
949 "failed to allocate atoken DMA memory, error %d\n",
950 error);
951 goto err;
952 }
953
954 size = sizeof(uint32_t) * sc->sc_config.rd_offset *
955 SAFEXCEL_RING_SIZE;
956 error = safexcel_dma_alloc_mem(sc, &ring->rdr.dma, size);
957 if (error) {
958 device_printf(sc->sc_dev,
959 "failed to allocate RDR DMA memory, error %d\n",
960 error);
961 goto err;
962 }
963 ring->rdr.desc =
964 (struct safexcel_res_descr *)ring->rdr.dma.vaddr;
965 }
966
967 return (0);
968 err:
969 safexcel_dma_free_rings(sc);
970 return (error);
971 }
972
973 static void
safexcel_deinit_hw(struct safexcel_softc * sc)974 safexcel_deinit_hw(struct safexcel_softc *sc)
975 {
976 safexcel_hw_reset_rings(sc);
977 safexcel_dma_free_rings(sc);
978 }
979
980 static int
safexcel_init_hw(struct safexcel_softc * sc)981 safexcel_init_hw(struct safexcel_softc *sc)
982 {
983 int pe;
984
985 /* 23.3.7 Initialization */
986 if (safexcel_configure(sc) != 0)
987 return (EINVAL);
988
989 if (safexcel_dma_init(sc) != 0)
990 return (ENOMEM);
991
992 safexcel_init_rings(sc);
993
994 safexcel_init_hia_bus_access(sc);
995
996 /* 23.3.7.2 Disable EIP-97 global Interrupts */
997 safexcel_disable_global_interrupts(sc);
998
999 for (pe = 0; pe < sc->sc_config.pes; pe++) {
1000 /* 23.3.7.3 Configure Data Fetch Engine */
1001 safexcel_configure_dfe_engine(sc, pe);
1002
1003 /* 23.3.7.4 Configure Data Store Engine */
1004 if (safexcel_configure_dse(sc, pe)) {
1005 safexcel_deinit_hw(sc);
1006 return (-1);
1007 }
1008
1009 /* 23.3.7.5 1. Protocol enables */
1010 SAFEXCEL_WRITE(sc,
1011 SAFEXCEL_PE(sc) + SAFEXCEL_PE_EIP96_FUNCTION_EN(pe),
1012 0xffffffff);
1013 SAFEXCEL_WRITE(sc,
1014 SAFEXCEL_PE(sc) + SAFEXCEL_PE_EIP96_FUNCTION2_EN(pe),
1015 0xffffffff);
1016 }
1017
1018 safexcel_hw_prepare_rings(sc);
1019
1020 /* 23.3.7.5 Configure the Processing Engine(s). */
1021 for (pe = 0; pe < sc->sc_config.pes; pe++)
1022 safexcel_enable_pe_engine(sc, pe);
1023
1024 safexcel_hw_setup_rings(sc);
1025
1026 return (0);
1027 }
1028
1029 static int
safexcel_setup_dev_interrupts(struct safexcel_softc * sc)1030 safexcel_setup_dev_interrupts(struct safexcel_softc *sc)
1031 {
1032 int error, i, j;
1033
1034 for (i = 0; i < SAFEXCEL_MAX_RINGS && sc->sc_intr[i] != NULL; i++) {
1035 sc->sc_ih[i].sc = sc;
1036 sc->sc_ih[i].ring = i;
1037
1038 if (bus_setup_intr(sc->sc_dev, sc->sc_intr[i],
1039 INTR_TYPE_NET | INTR_MPSAFE, NULL, safexcel_ring_intr,
1040 &sc->sc_ih[i], &sc->sc_ih[i].handle)) {
1041 device_printf(sc->sc_dev,
1042 "couldn't setup interrupt %d\n", i);
1043 goto err;
1044 }
1045
1046 error = bus_bind_intr(sc->sc_dev, sc->sc_intr[i], i % mp_ncpus);
1047 if (error != 0)
1048 device_printf(sc->sc_dev,
1049 "failed to bind ring %d\n", error);
1050 }
1051
1052 return (0);
1053
1054 err:
1055 for (j = 0; j < i; j++)
1056 bus_teardown_intr(sc->sc_dev, sc->sc_intr[j],
1057 sc->sc_ih[j].handle);
1058
1059 return (ENXIO);
1060 }
1061
1062 static void
safexcel_teardown_dev_interrupts(struct safexcel_softc * sc)1063 safexcel_teardown_dev_interrupts(struct safexcel_softc *sc)
1064 {
1065 int i;
1066
1067 for (i = 0; i < SAFEXCEL_MAX_RINGS; i++)
1068 bus_teardown_intr(sc->sc_dev, sc->sc_intr[i],
1069 sc->sc_ih[i].handle);
1070 }
1071
1072 static int
safexcel_alloc_dev_resources(struct safexcel_softc * sc)1073 safexcel_alloc_dev_resources(struct safexcel_softc *sc)
1074 {
1075 char name[16];
1076 device_t dev;
1077 phandle_t node;
1078 int error, i, rid;
1079
1080 dev = sc->sc_dev;
1081 node = ofw_bus_get_node(dev);
1082
1083 rid = 0;
1084 sc->sc_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
1085 RF_ACTIVE);
1086 if (sc->sc_res == NULL) {
1087 device_printf(dev, "couldn't allocate memory resources\n");
1088 return (ENXIO);
1089 }
1090
1091 for (i = 0; i < SAFEXCEL_MAX_RINGS; i++) {
1092 (void)snprintf(name, sizeof(name), "ring%d", i);
1093 error = ofw_bus_find_string_index(node, "interrupt-names", name,
1094 &rid);
1095 if (error != 0)
1096 break;
1097
1098 sc->sc_intr[i] = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1099 RF_ACTIVE | RF_SHAREABLE);
1100 if (sc->sc_intr[i] == NULL) {
1101 error = ENXIO;
1102 goto out;
1103 }
1104 }
1105 if (i == 0) {
1106 device_printf(dev, "couldn't allocate interrupt resources\n");
1107 error = ENXIO;
1108 goto out;
1109 }
1110
1111 return (0);
1112
1113 out:
1114 for (i = 0; i < SAFEXCEL_MAX_RINGS && sc->sc_intr[i] != NULL; i++)
1115 bus_release_resource(dev, SYS_RES_IRQ,
1116 rman_get_rid(sc->sc_intr[i]), sc->sc_intr[i]);
1117 bus_release_resource(dev, SYS_RES_MEMORY, rman_get_rid(sc->sc_res),
1118 sc->sc_res);
1119 return (error);
1120 }
1121
1122 static void
safexcel_free_dev_resources(struct safexcel_softc * sc)1123 safexcel_free_dev_resources(struct safexcel_softc *sc)
1124 {
1125 int i;
1126
1127 for (i = 0; i < SAFEXCEL_MAX_RINGS && sc->sc_intr[i] != NULL; i++)
1128 bus_release_resource(sc->sc_dev, SYS_RES_IRQ,
1129 rman_get_rid(sc->sc_intr[i]), sc->sc_intr[i]);
1130 if (sc->sc_res != NULL)
1131 bus_release_resource(sc->sc_dev, SYS_RES_MEMORY,
1132 rman_get_rid(sc->sc_res), sc->sc_res);
1133 }
1134
1135 static int
safexcel_probe(device_t dev)1136 safexcel_probe(device_t dev)
1137 {
1138 struct safexcel_softc *sc;
1139
1140 if (!ofw_bus_status_okay(dev))
1141 return (ENXIO);
1142
1143 sc = device_get_softc(dev);
1144 sc->sc_type = ofw_bus_search_compatible(dev, safexcel_compat)->ocd_data;
1145 if (sc->sc_type == 0)
1146 return (ENXIO);
1147
1148 device_set_desc(dev, "SafeXcel EIP-97 crypto accelerator");
1149
1150 return (BUS_PROBE_DEFAULT);
1151 }
1152
1153 static int
safexcel_attach(device_t dev)1154 safexcel_attach(device_t dev)
1155 {
1156 struct sysctl_ctx_list *ctx;
1157 struct sysctl_oid *oid;
1158 struct sysctl_oid_list *children;
1159 struct safexcel_softc *sc;
1160 struct safexcel_request *req;
1161 struct safexcel_ring *ring;
1162 int i, j, ringidx;
1163
1164 sc = device_get_softc(dev);
1165 sc->sc_dev = dev;
1166 sc->sc_cid = -1;
1167
1168 if (safexcel_alloc_dev_resources(sc))
1169 goto err;
1170
1171 if (safexcel_setup_dev_interrupts(sc))
1172 goto err1;
1173
1174 if (safexcel_init_hw(sc))
1175 goto err2;
1176
1177 for (ringidx = 0; ringidx < sc->sc_config.rings; ringidx++) {
1178 ring = &sc->sc_ring[ringidx];
1179
1180 ring->cmd_data = sglist_alloc(SAFEXCEL_MAX_FRAGMENTS, M_WAITOK);
1181 ring->res_data = sglist_alloc(SAFEXCEL_MAX_FRAGMENTS, M_WAITOK);
1182
1183 for (i = 0; i < SAFEXCEL_RING_SIZE; i++) {
1184 req = &ring->requests[i];
1185 req->sc = sc;
1186 req->ringidx = ringidx;
1187 if (bus_dmamap_create(ring->data_dtag,
1188 BUS_DMA_COHERENT, &req->dmap) != 0) {
1189 for (j = 0; j < i; j++)
1190 bus_dmamap_destroy(ring->data_dtag,
1191 ring->requests[j].dmap);
1192 goto err2;
1193 }
1194 if (safexcel_dma_alloc_mem(sc, &req->ctx,
1195 sizeof(struct safexcel_context_record)) != 0) {
1196 for (j = 0; j < i; j++) {
1197 bus_dmamap_destroy(ring->data_dtag,
1198 ring->requests[j].dmap);
1199 safexcel_dma_free_mem(
1200 &ring->requests[j].ctx);
1201 }
1202 goto err2;
1203 }
1204 }
1205 }
1206
1207 ctx = device_get_sysctl_ctx(dev);
1208 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
1209 OID_AUTO, "debug", CTLFLAG_RWTUN, &sc->sc_debug, 0,
1210 "Debug message verbosity");
1211
1212 oid = device_get_sysctl_tree(sc->sc_dev);
1213 children = SYSCTL_CHILDREN(oid);
1214 oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats",
1215 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "statistics");
1216 children = SYSCTL_CHILDREN(oid);
1217
1218 sc->sc_req_alloc_failures = counter_u64_alloc(M_WAITOK);
1219 SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "req_alloc_failures",
1220 CTLFLAG_RD, &sc->sc_req_alloc_failures,
1221 "Number of request allocation failures");
1222 sc->sc_cdesc_alloc_failures = counter_u64_alloc(M_WAITOK);
1223 SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "cdesc_alloc_failures",
1224 CTLFLAG_RD, &sc->sc_cdesc_alloc_failures,
1225 "Number of command descriptor ring overflows");
1226 sc->sc_rdesc_alloc_failures = counter_u64_alloc(M_WAITOK);
1227 SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "rdesc_alloc_failures",
1228 CTLFLAG_RD, &sc->sc_rdesc_alloc_failures,
1229 "Number of result descriptor ring overflows");
1230
1231 sc->sc_cid = crypto_get_driverid(dev, sizeof(struct safexcel_session),
1232 CRYPTOCAP_F_HARDWARE);
1233 if (sc->sc_cid < 0)
1234 goto err2;
1235
1236 return (0);
1237
1238 err2:
1239 safexcel_teardown_dev_interrupts(sc);
1240 err1:
1241 safexcel_free_dev_resources(sc);
1242 err:
1243 return (ENXIO);
1244 }
1245
1246 static int
safexcel_detach(device_t dev)1247 safexcel_detach(device_t dev)
1248 {
1249 struct safexcel_ring *ring;
1250 struct safexcel_softc *sc;
1251 int i, ringidx;
1252
1253 sc = device_get_softc(dev);
1254
1255 if (sc->sc_cid >= 0)
1256 crypto_unregister_all(sc->sc_cid);
1257
1258 counter_u64_free(sc->sc_req_alloc_failures);
1259 counter_u64_free(sc->sc_cdesc_alloc_failures);
1260 counter_u64_free(sc->sc_rdesc_alloc_failures);
1261
1262 for (ringidx = 0; ringidx < sc->sc_config.rings; ringidx++) {
1263 ring = &sc->sc_ring[ringidx];
1264 for (i = 0; i < SAFEXCEL_RING_SIZE; i++) {
1265 bus_dmamap_destroy(ring->data_dtag,
1266 ring->requests[i].dmap);
1267 safexcel_dma_free_mem(&ring->requests[i].ctx);
1268 }
1269 sglist_free(ring->cmd_data);
1270 sglist_free(ring->res_data);
1271 }
1272 safexcel_deinit_hw(sc);
1273 safexcel_teardown_dev_interrupts(sc);
1274 safexcel_free_dev_resources(sc);
1275
1276 return (0);
1277 }
1278
1279 /*
1280 * Pre-compute the hash key used in GHASH, which is a block of zeroes encrypted
1281 * using the cipher key.
1282 */
1283 static void
safexcel_setkey_ghash(const uint8_t * key,int klen,uint32_t * hashkey)1284 safexcel_setkey_ghash(const uint8_t *key, int klen, uint32_t *hashkey)
1285 {
1286 uint32_t ks[4 * (RIJNDAEL_MAXNR + 1)];
1287 uint8_t zeros[AES_BLOCK_LEN];
1288 int i, rounds;
1289
1290 memset(zeros, 0, sizeof(zeros));
1291
1292 rounds = rijndaelKeySetupEnc(ks, key, klen * NBBY);
1293 rijndaelEncrypt(ks, rounds, zeros, (uint8_t *)hashkey);
1294 for (i = 0; i < GMAC_BLOCK_LEN / sizeof(uint32_t); i++)
1295 hashkey[i] = htobe32(hashkey[i]);
1296
1297 explicit_bzero(ks, sizeof(ks));
1298 }
1299
1300 /*
1301 * Pre-compute the combined CBC-MAC key, which consists of three keys K1, K2, K3
1302 * in the hardware implementation. K1 is the cipher key and comes last in the
1303 * buffer since K2 and K3 have a fixed size of AES_BLOCK_LEN. For now XCBC-MAC
1304 * is not implemented so K2 and K3 are fixed.
1305 */
1306 static void
safexcel_setkey_xcbcmac(const uint8_t * key,int klen,uint32_t * hashkey)1307 safexcel_setkey_xcbcmac(const uint8_t *key, int klen, uint32_t *hashkey)
1308 {
1309 int i, off;
1310
1311 memset(hashkey, 0, 2 * AES_BLOCK_LEN);
1312 off = 2 * AES_BLOCK_LEN / sizeof(uint32_t);
1313 for (i = 0; i < klen / sizeof(uint32_t); i++, key += 4)
1314 hashkey[i + off] = htobe32(le32dec(key));
1315 }
1316
1317 static void
safexcel_setkey_hmac_digest(const struct auth_hash * ahash,union authctx * ctx,char * buf)1318 safexcel_setkey_hmac_digest(const struct auth_hash *ahash, union authctx *ctx,
1319 char *buf)
1320 {
1321 int hashwords, i;
1322
1323 switch (ahash->type) {
1324 case CRYPTO_SHA1_HMAC:
1325 hashwords = ahash->hashsize / sizeof(uint32_t);
1326 for (i = 0; i < hashwords; i++)
1327 ((uint32_t *)buf)[i] = htobe32(ctx->sha1ctx.h.b32[i]);
1328 break;
1329 case CRYPTO_SHA2_224_HMAC:
1330 hashwords = auth_hash_hmac_sha2_256.hashsize / sizeof(uint32_t);
1331 for (i = 0; i < hashwords; i++)
1332 ((uint32_t *)buf)[i] = htobe32(ctx->sha224ctx.state[i]);
1333 break;
1334 case CRYPTO_SHA2_256_HMAC:
1335 hashwords = ahash->hashsize / sizeof(uint32_t);
1336 for (i = 0; i < hashwords; i++)
1337 ((uint32_t *)buf)[i] = htobe32(ctx->sha256ctx.state[i]);
1338 break;
1339 case CRYPTO_SHA2_384_HMAC:
1340 hashwords = auth_hash_hmac_sha2_512.hashsize / sizeof(uint64_t);
1341 for (i = 0; i < hashwords; i++)
1342 ((uint64_t *)buf)[i] = htobe64(ctx->sha384ctx.state[i]);
1343 break;
1344 case CRYPTO_SHA2_512_HMAC:
1345 hashwords = ahash->hashsize / sizeof(uint64_t);
1346 for (i = 0; i < hashwords; i++)
1347 ((uint64_t *)buf)[i] = htobe64(ctx->sha512ctx.state[i]);
1348 break;
1349 }
1350 }
1351
1352 /*
1353 * Pre-compute the inner and outer digests used in the HMAC algorithm.
1354 */
1355 static void
safexcel_setkey_hmac(const struct crypto_session_params * csp,const uint8_t * key,int klen,uint8_t * ipad,uint8_t * opad)1356 safexcel_setkey_hmac(const struct crypto_session_params *csp,
1357 const uint8_t *key, int klen, uint8_t *ipad, uint8_t *opad)
1358 {
1359 union authctx ctx;
1360 const struct auth_hash *ahash;
1361
1362 ahash = crypto_auth_hash(csp);
1363 hmac_init_ipad(ahash, key, klen, &ctx);
1364 safexcel_setkey_hmac_digest(ahash, &ctx, ipad);
1365 hmac_init_opad(ahash, key, klen, &ctx);
1366 safexcel_setkey_hmac_digest(ahash, &ctx, opad);
1367 explicit_bzero(&ctx, ahash->ctxsize);
1368 }
1369
1370 static void
safexcel_setkey_xts(const uint8_t * key,int klen,uint8_t * tweakkey)1371 safexcel_setkey_xts(const uint8_t *key, int klen, uint8_t *tweakkey)
1372 {
1373 memcpy(tweakkey, key + klen, klen);
1374 }
1375
1376 /*
1377 * Populate a context record with parameters from a session. Some consumers
1378 * specify per-request keys, in which case the context must be re-initialized
1379 * for each request.
1380 */
1381 static int
safexcel_set_context(struct safexcel_context_record * ctx,int op,const uint8_t * ckey,const uint8_t * akey,struct safexcel_session * sess)1382 safexcel_set_context(struct safexcel_context_record *ctx, int op,
1383 const uint8_t *ckey, const uint8_t *akey, struct safexcel_session *sess)
1384 {
1385 const struct crypto_session_params *csp;
1386 uint8_t *data;
1387 uint32_t ctrl0, ctrl1;
1388 int aklen, alg, cklen, off;
1389
1390 csp = crypto_get_params(sess->cses);
1391 aklen = csp->csp_auth_klen;
1392 cklen = csp->csp_cipher_klen;
1393 if (csp->csp_cipher_alg == CRYPTO_AES_XTS)
1394 cklen /= 2;
1395
1396 ctrl0 = sess->alg | sess->digest | sess->hash;
1397 ctrl1 = sess->mode;
1398
1399 data = (uint8_t *)ctx->data;
1400 if (csp->csp_cipher_alg != 0) {
1401 memcpy(data, ckey, cklen);
1402 off = cklen;
1403 } else if (csp->csp_auth_alg == CRYPTO_AES_NIST_GMAC) {
1404 memcpy(data, akey, aklen);
1405 off = aklen;
1406 } else {
1407 off = 0;
1408 }
1409
1410 switch (csp->csp_cipher_alg) {
1411 case CRYPTO_AES_NIST_GCM_16:
1412 safexcel_setkey_ghash(ckey, cklen, (uint32_t *)(data + off));
1413 off += GMAC_BLOCK_LEN;
1414 break;
1415 case CRYPTO_AES_CCM_16:
1416 safexcel_setkey_xcbcmac(ckey, cklen, (uint32_t *)(data + off));
1417 off += AES_BLOCK_LEN * 2 + cklen;
1418 break;
1419 case CRYPTO_AES_XTS:
1420 safexcel_setkey_xts(ckey, cklen, data + off);
1421 off += cklen;
1422 break;
1423 }
1424 switch (csp->csp_auth_alg) {
1425 case CRYPTO_AES_NIST_GMAC:
1426 safexcel_setkey_ghash(akey, aklen, (uint32_t *)(data + off));
1427 off += GMAC_BLOCK_LEN;
1428 break;
1429 case CRYPTO_SHA1_HMAC:
1430 case CRYPTO_SHA2_224_HMAC:
1431 case CRYPTO_SHA2_256_HMAC:
1432 case CRYPTO_SHA2_384_HMAC:
1433 case CRYPTO_SHA2_512_HMAC:
1434 safexcel_setkey_hmac(csp, akey, aklen,
1435 data + off, data + off + sess->statelen);
1436 off += sess->statelen * 2;
1437 break;
1438 }
1439 ctrl0 |= SAFEXCEL_CONTROL0_SIZE(off / sizeof(uint32_t));
1440
1441 alg = csp->csp_cipher_alg;
1442 if (alg == 0)
1443 alg = csp->csp_auth_alg;
1444
1445 switch (alg) {
1446 case CRYPTO_AES_CCM_16:
1447 if (CRYPTO_OP_IS_ENCRYPT(op)) {
1448 ctrl0 |= SAFEXCEL_CONTROL0_TYPE_HASH_ENCRYPT_OUT |
1449 SAFEXCEL_CONTROL0_KEY_EN;
1450 } else {
1451 ctrl0 |= SAFEXCEL_CONTROL0_TYPE_DECRYPT_HASH_IN |
1452 SAFEXCEL_CONTROL0_KEY_EN;
1453 }
1454 ctrl1 |= SAFEXCEL_CONTROL1_IV0 | SAFEXCEL_CONTROL1_IV1 |
1455 SAFEXCEL_CONTROL1_IV2 | SAFEXCEL_CONTROL1_IV3;
1456 break;
1457 case CRYPTO_AES_CBC:
1458 case CRYPTO_AES_ICM:
1459 case CRYPTO_AES_XTS:
1460 if (CRYPTO_OP_IS_ENCRYPT(op)) {
1461 ctrl0 |= SAFEXCEL_CONTROL0_TYPE_CRYPTO_OUT |
1462 SAFEXCEL_CONTROL0_KEY_EN;
1463 if (csp->csp_auth_alg != 0)
1464 ctrl0 |=
1465 SAFEXCEL_CONTROL0_TYPE_ENCRYPT_HASH_OUT;
1466 } else {
1467 ctrl0 |= SAFEXCEL_CONTROL0_TYPE_CRYPTO_IN |
1468 SAFEXCEL_CONTROL0_KEY_EN;
1469 if (csp->csp_auth_alg != 0)
1470 ctrl0 |= SAFEXCEL_CONTROL0_TYPE_HASH_DECRYPT_IN;
1471 }
1472 break;
1473 case CRYPTO_AES_NIST_GCM_16:
1474 case CRYPTO_AES_NIST_GMAC:
1475 if (CRYPTO_OP_IS_ENCRYPT(op) || csp->csp_auth_alg != 0) {
1476 ctrl0 |= SAFEXCEL_CONTROL0_TYPE_CRYPTO_OUT |
1477 SAFEXCEL_CONTROL0_KEY_EN |
1478 SAFEXCEL_CONTROL0_TYPE_HASH_OUT;
1479 } else {
1480 ctrl0 |= SAFEXCEL_CONTROL0_TYPE_CRYPTO_IN |
1481 SAFEXCEL_CONTROL0_KEY_EN |
1482 SAFEXCEL_CONTROL0_TYPE_HASH_DECRYPT_IN;
1483 }
1484 if (csp->csp_cipher_alg == CRYPTO_AES_NIST_GCM_16) {
1485 ctrl1 |= SAFEXCEL_CONTROL1_COUNTER_MODE |
1486 SAFEXCEL_CONTROL1_IV0 | SAFEXCEL_CONTROL1_IV1 |
1487 SAFEXCEL_CONTROL1_IV2;
1488 }
1489 break;
1490 case CRYPTO_SHA1:
1491 case CRYPTO_SHA2_224:
1492 case CRYPTO_SHA2_256:
1493 case CRYPTO_SHA2_384:
1494 case CRYPTO_SHA2_512:
1495 ctrl0 |= SAFEXCEL_CONTROL0_RESTART_HASH;
1496 /* FALLTHROUGH */
1497 case CRYPTO_SHA1_HMAC:
1498 case CRYPTO_SHA2_224_HMAC:
1499 case CRYPTO_SHA2_256_HMAC:
1500 case CRYPTO_SHA2_384_HMAC:
1501 case CRYPTO_SHA2_512_HMAC:
1502 ctrl0 |= SAFEXCEL_CONTROL0_TYPE_HASH_OUT;
1503 break;
1504 }
1505
1506 ctx->control0 = ctrl0;
1507 ctx->control1 = ctrl1;
1508
1509 return (off);
1510 }
1511
1512 /*
1513 * Construct a no-op instruction, used to pad input tokens.
1514 */
1515 static void
safexcel_instr_nop(struct safexcel_instr ** instrp)1516 safexcel_instr_nop(struct safexcel_instr **instrp)
1517 {
1518 struct safexcel_instr *instr;
1519
1520 instr = *instrp;
1521 instr->opcode = SAFEXCEL_INSTR_OPCODE_INSERT;
1522 instr->length = (1 << 2);
1523 instr->status = 0;
1524 instr->instructions = 0;
1525
1526 *instrp = instr + 1;
1527 }
1528
1529 /*
1530 * Insert the digest of the input payload. This is typically the last
1531 * instruction of a sequence.
1532 */
1533 static void
safexcel_instr_insert_digest(struct safexcel_instr ** instrp,int len)1534 safexcel_instr_insert_digest(struct safexcel_instr **instrp, int len)
1535 {
1536 struct safexcel_instr *instr;
1537
1538 instr = *instrp;
1539 instr->opcode = SAFEXCEL_INSTR_OPCODE_INSERT;
1540 instr->length = len;
1541 instr->status = SAFEXCEL_INSTR_STATUS_LAST_HASH |
1542 SAFEXCEL_INSTR_STATUS_LAST_PACKET;
1543 instr->instructions = SAFEXCEL_INSTR_DEST_OUTPUT |
1544 SAFEXCEL_INSTR_INSERT_HASH_DIGEST;
1545
1546 *instrp = instr + 1;
1547 }
1548
1549 /*
1550 * Retrieve and verify a digest.
1551 */
1552 static void
safexcel_instr_retrieve_digest(struct safexcel_instr ** instrp,int len)1553 safexcel_instr_retrieve_digest(struct safexcel_instr **instrp, int len)
1554 {
1555 struct safexcel_instr *instr;
1556
1557 instr = *instrp;
1558 instr->opcode = SAFEXCEL_INSTR_OPCODE_RETRIEVE;
1559 instr->length = len;
1560 instr->status = SAFEXCEL_INSTR_STATUS_LAST_HASH |
1561 SAFEXCEL_INSTR_STATUS_LAST_PACKET;
1562 instr->instructions = SAFEXCEL_INSTR_INSERT_HASH_DIGEST;
1563 instr++;
1564
1565 instr->opcode = SAFEXCEL_INSTR_OPCODE_VERIFY_FIELDS;
1566 instr->length = len | SAFEXCEL_INSTR_VERIFY_HASH;
1567 instr->status = SAFEXCEL_INSTR_STATUS_LAST_HASH |
1568 SAFEXCEL_INSTR_STATUS_LAST_PACKET;
1569 instr->instructions = SAFEXCEL_INSTR_VERIFY_PADDING;
1570
1571 *instrp = instr + 1;
1572 }
1573
1574 static void
safexcel_instr_temp_aes_block(struct safexcel_instr ** instrp)1575 safexcel_instr_temp_aes_block(struct safexcel_instr **instrp)
1576 {
1577 struct safexcel_instr *instr;
1578
1579 instr = *instrp;
1580 instr->opcode = SAFEXCEL_INSTR_OPCODE_INSERT_REMOVE_RESULT;
1581 instr->length = 0;
1582 instr->status = 0;
1583 instr->instructions = AES_BLOCK_LEN;
1584 instr++;
1585
1586 instr->opcode = SAFEXCEL_INSTR_OPCODE_INSERT;
1587 instr->length = AES_BLOCK_LEN;
1588 instr->status = 0;
1589 instr->instructions = SAFEXCEL_INSTR_DEST_OUTPUT |
1590 SAFEXCEL_INSTR_DEST_CRYPTO;
1591
1592 *instrp = instr + 1;
1593 }
1594
1595 /*
1596 * Handle a request for an unauthenticated block cipher.
1597 */
1598 static void
safexcel_instr_cipher(struct safexcel_request * req,struct safexcel_instr * instr,struct safexcel_cmd_descr * cdesc)1599 safexcel_instr_cipher(struct safexcel_request *req,
1600 struct safexcel_instr *instr, struct safexcel_cmd_descr *cdesc)
1601 {
1602 struct cryptop *crp;
1603
1604 crp = req->crp;
1605
1606 /* Insert the payload. */
1607 instr->opcode = SAFEXCEL_INSTR_OPCODE_DIRECTION;
1608 instr->length = crp->crp_payload_length;
1609 instr->status = SAFEXCEL_INSTR_STATUS_LAST_PACKET |
1610 SAFEXCEL_INSTR_STATUS_LAST_HASH;
1611 instr->instructions = SAFEXCEL_INSTR_INS_LAST |
1612 SAFEXCEL_INSTR_DEST_CRYPTO | SAFEXCEL_INSTR_DEST_OUTPUT;
1613
1614 cdesc->additional_cdata_size = 1;
1615 }
1616
1617 static void
safexcel_instr_eta(struct safexcel_request * req,struct safexcel_instr * instr,struct safexcel_cmd_descr * cdesc)1618 safexcel_instr_eta(struct safexcel_request *req, struct safexcel_instr *instr,
1619 struct safexcel_cmd_descr *cdesc)
1620 {
1621 struct cryptop *crp;
1622 struct safexcel_instr *start;
1623
1624 crp = req->crp;
1625 start = instr;
1626
1627 /* Insert the AAD. */
1628 instr->opcode = SAFEXCEL_INSTR_OPCODE_DIRECTION;
1629 instr->length = crp->crp_aad_length;
1630 instr->status = crp->crp_payload_length == 0 ?
1631 SAFEXCEL_INSTR_STATUS_LAST_HASH : 0;
1632 instr->instructions = SAFEXCEL_INSTR_INS_LAST |
1633 SAFEXCEL_INSTR_DEST_HASH;
1634 instr++;
1635
1636 /* Encrypt any data left in the request. */
1637 if (crp->crp_payload_length > 0) {
1638 instr->opcode = SAFEXCEL_INSTR_OPCODE_DIRECTION;
1639 instr->length = crp->crp_payload_length;
1640 instr->status = SAFEXCEL_INSTR_STATUS_LAST_HASH;
1641 instr->instructions = SAFEXCEL_INSTR_INS_LAST |
1642 SAFEXCEL_INSTR_DEST_CRYPTO |
1643 SAFEXCEL_INSTR_DEST_HASH |
1644 SAFEXCEL_INSTR_DEST_OUTPUT;
1645 instr++;
1646 }
1647
1648 /*
1649 * Compute the digest, or extract it and place it in the output stream.
1650 */
1651 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op))
1652 safexcel_instr_insert_digest(&instr, req->sess->digestlen);
1653 else
1654 safexcel_instr_retrieve_digest(&instr, req->sess->digestlen);
1655 cdesc->additional_cdata_size = instr - start;
1656 }
1657
1658 static void
safexcel_instr_sha_hash(struct safexcel_request * req,struct safexcel_instr * instr)1659 safexcel_instr_sha_hash(struct safexcel_request *req,
1660 struct safexcel_instr *instr)
1661 {
1662 struct cryptop *crp;
1663 struct safexcel_instr *start;
1664
1665 crp = req->crp;
1666 start = instr;
1667
1668 /* Pass the input data to the hash engine. */
1669 instr->opcode = SAFEXCEL_INSTR_OPCODE_DIRECTION;
1670 instr->length = crp->crp_payload_length;
1671 instr->status = SAFEXCEL_INSTR_STATUS_LAST_HASH;
1672 instr->instructions = SAFEXCEL_INSTR_DEST_HASH;
1673 instr++;
1674
1675 /* Insert the hash result into the output stream. */
1676 safexcel_instr_insert_digest(&instr, req->sess->digestlen);
1677
1678 /* Pad the rest of the inline instruction space. */
1679 while (instr != start + SAFEXCEL_MAX_ITOKENS)
1680 safexcel_instr_nop(&instr);
1681 }
1682
1683 static void
safexcel_instr_ccm(struct safexcel_request * req,struct safexcel_instr * instr,struct safexcel_cmd_descr * cdesc)1684 safexcel_instr_ccm(struct safexcel_request *req, struct safexcel_instr *instr,
1685 struct safexcel_cmd_descr *cdesc)
1686 {
1687 const struct crypto_session_params *csp;
1688 struct cryptop *crp;
1689 struct safexcel_instr *start;
1690 uint8_t *a0, *b0, *alenp, L;
1691 int aalign, blen;
1692
1693 crp = req->crp;
1694 csp = crypto_get_params(crp->crp_session);
1695 start = instr;
1696
1697 /*
1698 * Construct two blocks, A0 and B0, used in encryption and
1699 * authentication, respectively. A0 is embedded in the token
1700 * descriptor, and B0 is inserted directly into the data stream using
1701 * instructions below.
1702 *
1703 * An explicit check for overflow of the length field is not
1704 * needed since the maximum driver size of 65535 bytes fits in
1705 * the smallest length field used for a 13-byte nonce.
1706 */
1707 blen = AES_BLOCK_LEN;
1708 L = 15 - csp->csp_ivlen;
1709
1710 a0 = (uint8_t *)&cdesc->control_data.token[0];
1711 memset(a0, 0, blen);
1712 a0[0] = L - 1;
1713 memcpy(&a0[1], req->iv, csp->csp_ivlen);
1714
1715 /*
1716 * Insert B0 and the AAD length into the input stream.
1717 */
1718 instr->opcode = SAFEXCEL_INSTR_OPCODE_INSERT;
1719 instr->length = blen + (crp->crp_aad_length > 0 ? 2 : 0);
1720 instr->status = 0;
1721 instr->instructions = SAFEXCEL_INSTR_DEST_HASH |
1722 SAFEXCEL_INSTR_INSERT_IMMEDIATE;
1723 instr++;
1724
1725 b0 = (uint8_t *)instr;
1726 memset(b0, 0, blen);
1727 b0[0] =
1728 (L - 1) | /* payload length size */
1729 ((req->sess->digestlen - 2) / 2) << 3 /* digest length */ |
1730 (crp->crp_aad_length > 0 ? 1 : 0) << 6 /* AAD present bit */;
1731 memcpy(&b0[1], req->iv, csp->csp_ivlen);
1732 b0[14] = crp->crp_payload_length >> 8;
1733 b0[15] = crp->crp_payload_length & 0xff;
1734 instr += blen / sizeof(*instr);
1735
1736 /* Insert the AAD length and data into the input stream. */
1737 if (crp->crp_aad_length > 0) {
1738 alenp = (uint8_t *)instr;
1739 alenp[0] = crp->crp_aad_length >> 8;
1740 alenp[1] = crp->crp_aad_length & 0xff;
1741 alenp[2] = 0;
1742 alenp[3] = 0;
1743 instr++;
1744
1745 instr->opcode = SAFEXCEL_INSTR_OPCODE_DIRECTION;
1746 instr->length = crp->crp_aad_length;
1747 instr->status = 0;
1748 instr->instructions = SAFEXCEL_INSTR_DEST_HASH;
1749 instr++;
1750
1751 /* Insert zero padding. */
1752 aalign = (crp->crp_aad_length + 2) & (blen - 1);
1753 instr->opcode = SAFEXCEL_INSTR_OPCODE_INSERT;
1754 instr->length = aalign == 0 ? 0 :
1755 blen - ((crp->crp_aad_length + 2) & (blen - 1));
1756 instr->status = crp->crp_payload_length == 0 ?
1757 SAFEXCEL_INSTR_STATUS_LAST_HASH : 0;
1758 instr->instructions = SAFEXCEL_INSTR_DEST_HASH;
1759 instr++;
1760 }
1761
1762 safexcel_instr_temp_aes_block(&instr);
1763
1764 /* Insert the cipher payload into the input stream. */
1765 if (crp->crp_payload_length > 0) {
1766 instr->opcode = SAFEXCEL_INSTR_OPCODE_DIRECTION;
1767 instr->length = crp->crp_payload_length;
1768 instr->status = (crp->crp_payload_length & (blen - 1)) == 0 ?
1769 SAFEXCEL_INSTR_STATUS_LAST_HASH : 0;
1770 instr->instructions = SAFEXCEL_INSTR_DEST_OUTPUT |
1771 SAFEXCEL_INSTR_DEST_CRYPTO |
1772 SAFEXCEL_INSTR_DEST_HASH |
1773 SAFEXCEL_INSTR_INS_LAST;
1774 instr++;
1775
1776 /* Insert zero padding. */
1777 if (crp->crp_payload_length & (blen - 1)) {
1778 instr->opcode = SAFEXCEL_INSTR_OPCODE_INSERT;
1779 instr->length = blen -
1780 (crp->crp_payload_length & (blen - 1));
1781 instr->status = SAFEXCEL_INSTR_STATUS_LAST_HASH;
1782 instr->instructions = SAFEXCEL_INSTR_DEST_HASH;
1783 instr++;
1784 }
1785 }
1786
1787 /*
1788 * Compute the digest, or extract it and place it in the output stream.
1789 */
1790 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op))
1791 safexcel_instr_insert_digest(&instr, req->sess->digestlen);
1792 else
1793 safexcel_instr_retrieve_digest(&instr, req->sess->digestlen);
1794
1795 cdesc->additional_cdata_size = instr - start;
1796 }
1797
1798 static void
safexcel_instr_gcm(struct safexcel_request * req,struct safexcel_instr * instr,struct safexcel_cmd_descr * cdesc)1799 safexcel_instr_gcm(struct safexcel_request *req, struct safexcel_instr *instr,
1800 struct safexcel_cmd_descr *cdesc)
1801 {
1802 struct cryptop *crp;
1803 struct safexcel_instr *start;
1804
1805 memcpy(cdesc->control_data.token, req->iv, AES_GCM_IV_LEN);
1806 cdesc->control_data.token[3] = htobe32(1);
1807
1808 crp = req->crp;
1809 start = instr;
1810
1811 /* Insert the AAD into the input stream. */
1812 instr->opcode = SAFEXCEL_INSTR_OPCODE_DIRECTION;
1813 instr->length = crp->crp_aad_length;
1814 instr->status = crp->crp_payload_length == 0 ?
1815 SAFEXCEL_INSTR_STATUS_LAST_HASH : 0;
1816 instr->instructions = SAFEXCEL_INSTR_INS_LAST |
1817 SAFEXCEL_INSTR_DEST_HASH;
1818 instr++;
1819
1820 safexcel_instr_temp_aes_block(&instr);
1821
1822 /* Insert the cipher payload into the input stream. */
1823 if (crp->crp_payload_length > 0) {
1824 instr->opcode = SAFEXCEL_INSTR_OPCODE_DIRECTION;
1825 instr->length = crp->crp_payload_length;
1826 instr->status = SAFEXCEL_INSTR_STATUS_LAST_HASH;
1827 instr->instructions = SAFEXCEL_INSTR_DEST_OUTPUT |
1828 SAFEXCEL_INSTR_DEST_CRYPTO | SAFEXCEL_INSTR_DEST_HASH |
1829 SAFEXCEL_INSTR_INS_LAST;
1830 instr++;
1831 }
1832
1833 /*
1834 * Compute the digest, or extract it and place it in the output stream.
1835 */
1836 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op))
1837 safexcel_instr_insert_digest(&instr, req->sess->digestlen);
1838 else
1839 safexcel_instr_retrieve_digest(&instr, req->sess->digestlen);
1840
1841 cdesc->additional_cdata_size = instr - start;
1842 }
1843
1844 static void
safexcel_instr_gmac(struct safexcel_request * req,struct safexcel_instr * instr,struct safexcel_cmd_descr * cdesc)1845 safexcel_instr_gmac(struct safexcel_request *req, struct safexcel_instr *instr,
1846 struct safexcel_cmd_descr *cdesc)
1847 {
1848 struct cryptop *crp;
1849 struct safexcel_instr *start;
1850
1851 memcpy(cdesc->control_data.token, req->iv, AES_GCM_IV_LEN);
1852 cdesc->control_data.token[3] = htobe32(1);
1853
1854 crp = req->crp;
1855 start = instr;
1856
1857 instr->opcode = SAFEXCEL_INSTR_OPCODE_DIRECTION;
1858 instr->length = crp->crp_payload_length;
1859 instr->status = SAFEXCEL_INSTR_STATUS_LAST_HASH;
1860 instr->instructions = SAFEXCEL_INSTR_INS_LAST |
1861 SAFEXCEL_INSTR_DEST_HASH;
1862 instr++;
1863
1864 safexcel_instr_temp_aes_block(&instr);
1865
1866 safexcel_instr_insert_digest(&instr, req->sess->digestlen);
1867
1868 cdesc->additional_cdata_size = instr - start;
1869 }
1870
1871 static void
safexcel_set_token(struct safexcel_request * req)1872 safexcel_set_token(struct safexcel_request *req)
1873 {
1874 const struct crypto_session_params *csp;
1875 struct cryptop *crp;
1876 struct safexcel_cmd_descr *cdesc;
1877 struct safexcel_context_record *ctx;
1878 struct safexcel_context_template *ctxtmp;
1879 struct safexcel_instr *instr;
1880 struct safexcel_softc *sc;
1881 const uint8_t *akey, *ckey;
1882 int ringidx;
1883
1884 crp = req->crp;
1885 csp = crypto_get_params(crp->crp_session);
1886 cdesc = req->cdesc;
1887 sc = req->sc;
1888 ringidx = req->ringidx;
1889
1890 akey = crp->crp_auth_key;
1891 ckey = crp->crp_cipher_key;
1892 if (akey != NULL || ckey != NULL) {
1893 /*
1894 * If we have a per-request key we have to generate the context
1895 * record on the fly.
1896 */
1897 if (akey == NULL)
1898 akey = csp->csp_auth_key;
1899 if (ckey == NULL)
1900 ckey = csp->csp_cipher_key;
1901 ctx = (struct safexcel_context_record *)req->ctx.vaddr;
1902 (void)safexcel_set_context(ctx, crp->crp_op, ckey, akey,
1903 req->sess);
1904 } else {
1905 /*
1906 * Use the context record template computed at session
1907 * initialization time.
1908 */
1909 ctxtmp = CRYPTO_OP_IS_ENCRYPT(crp->crp_op) ?
1910 &req->sess->encctx : &req->sess->decctx;
1911 ctx = &ctxtmp->ctx;
1912 memcpy(req->ctx.vaddr + 2 * sizeof(uint32_t), ctx->data,
1913 ctxtmp->len);
1914 }
1915 cdesc->control_data.control0 = ctx->control0;
1916 cdesc->control_data.control1 = ctx->control1;
1917
1918 /*
1919 * For keyless hash operations, the token instructions can be embedded
1920 * in the token itself. Otherwise we use an additional token descriptor
1921 * and the embedded instruction space is used to store the IV.
1922 */
1923 if (csp->csp_cipher_alg == 0 &&
1924 csp->csp_auth_alg != CRYPTO_AES_NIST_GMAC) {
1925 instr = (void *)cdesc->control_data.token;
1926 } else {
1927 instr = (void *)(sc->sc_ring[ringidx].dma_atok.vaddr +
1928 sc->sc_config.atok_offset *
1929 (cdesc - sc->sc_ring[ringidx].cdr.desc));
1930 cdesc->control_data.options |= SAFEXCEL_OPTION_4_TOKEN_IV_CMD;
1931 }
1932
1933 switch (csp->csp_cipher_alg) {
1934 case CRYPTO_AES_NIST_GCM_16:
1935 safexcel_instr_gcm(req, instr, cdesc);
1936 break;
1937 case CRYPTO_AES_CCM_16:
1938 safexcel_instr_ccm(req, instr, cdesc);
1939 break;
1940 case CRYPTO_AES_XTS:
1941 memcpy(cdesc->control_data.token, req->iv, AES_XTS_IV_LEN);
1942 memset(cdesc->control_data.token +
1943 AES_XTS_IV_LEN / sizeof(uint32_t), 0, AES_XTS_IV_LEN);
1944
1945 safexcel_instr_cipher(req, instr, cdesc);
1946 break;
1947 case CRYPTO_AES_CBC:
1948 case CRYPTO_AES_ICM:
1949 memcpy(cdesc->control_data.token, req->iv, AES_BLOCK_LEN);
1950 if (csp->csp_auth_alg != 0)
1951 safexcel_instr_eta(req, instr, cdesc);
1952 else
1953 safexcel_instr_cipher(req, instr, cdesc);
1954 break;
1955 default:
1956 switch (csp->csp_auth_alg) {
1957 case CRYPTO_SHA1:
1958 case CRYPTO_SHA1_HMAC:
1959 case CRYPTO_SHA2_224:
1960 case CRYPTO_SHA2_224_HMAC:
1961 case CRYPTO_SHA2_256:
1962 case CRYPTO_SHA2_256_HMAC:
1963 case CRYPTO_SHA2_384:
1964 case CRYPTO_SHA2_384_HMAC:
1965 case CRYPTO_SHA2_512:
1966 case CRYPTO_SHA2_512_HMAC:
1967 safexcel_instr_sha_hash(req, instr);
1968 break;
1969 case CRYPTO_AES_NIST_GMAC:
1970 safexcel_instr_gmac(req, instr, cdesc);
1971 break;
1972 default:
1973 panic("unhandled auth request %d", csp->csp_auth_alg);
1974 }
1975 break;
1976 }
1977 }
1978
1979 static struct safexcel_res_descr *
safexcel_res_descr_add(struct safexcel_ring * ring,bool first,bool last,bus_addr_t data,uint32_t len)1980 safexcel_res_descr_add(struct safexcel_ring *ring, bool first, bool last,
1981 bus_addr_t data, uint32_t len)
1982 {
1983 struct safexcel_res_descr *rdesc;
1984 struct safexcel_res_descr_ring *rring;
1985
1986 mtx_assert(&ring->mtx, MA_OWNED);
1987
1988 rring = &ring->rdr;
1989 if ((rring->write + 1) % SAFEXCEL_RING_SIZE == rring->read)
1990 return (NULL);
1991
1992 rdesc = &rring->desc[rring->write];
1993 rring->write = (rring->write + 1) % SAFEXCEL_RING_SIZE;
1994
1995 rdesc->particle_size = len;
1996 rdesc->rsvd0 = 0;
1997 rdesc->descriptor_overflow = 0;
1998 rdesc->buffer_overflow = 0;
1999 rdesc->last_seg = last;
2000 rdesc->first_seg = first;
2001 rdesc->result_size =
2002 sizeof(struct safexcel_res_data) / sizeof(uint32_t);
2003 rdesc->rsvd1 = 0;
2004 rdesc->data_lo = SAFEXCEL_ADDR_LO(data);
2005 rdesc->data_hi = SAFEXCEL_ADDR_HI(data);
2006
2007 if (first) {
2008 rdesc->result_data.packet_length = 0;
2009 rdesc->result_data.error_code = 0;
2010 }
2011
2012 return (rdesc);
2013 }
2014
2015 static struct safexcel_cmd_descr *
safexcel_cmd_descr_add(struct safexcel_ring * ring,bool first,bool last,bus_addr_t data,uint32_t seglen,uint32_t reqlen,bus_addr_t context)2016 safexcel_cmd_descr_add(struct safexcel_ring *ring, bool first, bool last,
2017 bus_addr_t data, uint32_t seglen, uint32_t reqlen, bus_addr_t context)
2018 {
2019 struct safexcel_cmd_descr *cdesc;
2020 struct safexcel_cmd_descr_ring *cring;
2021
2022 KASSERT(reqlen <= SAFEXCEL_MAX_REQUEST_SIZE,
2023 ("%s: request length %u too long", __func__, reqlen));
2024 mtx_assert(&ring->mtx, MA_OWNED);
2025
2026 cring = &ring->cdr;
2027 if ((cring->write + 1) % SAFEXCEL_RING_SIZE == cring->read)
2028 return (NULL);
2029
2030 cdesc = &cring->desc[cring->write];
2031 cring->write = (cring->write + 1) % SAFEXCEL_RING_SIZE;
2032
2033 cdesc->particle_size = seglen;
2034 cdesc->rsvd0 = 0;
2035 cdesc->last_seg = last;
2036 cdesc->first_seg = first;
2037 cdesc->additional_cdata_size = 0;
2038 cdesc->rsvd1 = 0;
2039 cdesc->data_lo = SAFEXCEL_ADDR_LO(data);
2040 cdesc->data_hi = SAFEXCEL_ADDR_HI(data);
2041 if (first) {
2042 cdesc->control_data.packet_length = reqlen;
2043 cdesc->control_data.options = SAFEXCEL_OPTION_IP |
2044 SAFEXCEL_OPTION_CP | SAFEXCEL_OPTION_CTX_CTRL_IN_CMD |
2045 SAFEXCEL_OPTION_RC_AUTO;
2046 cdesc->control_data.type = SAFEXCEL_TOKEN_TYPE_BYPASS;
2047 cdesc->control_data.context_lo = SAFEXCEL_ADDR_LO(context) |
2048 SAFEXCEL_CONTEXT_SMALL;
2049 cdesc->control_data.context_hi = SAFEXCEL_ADDR_HI(context);
2050 }
2051
2052 return (cdesc);
2053 }
2054
2055 static void
safexcel_cmd_descr_rollback(struct safexcel_ring * ring,int count)2056 safexcel_cmd_descr_rollback(struct safexcel_ring *ring, int count)
2057 {
2058 struct safexcel_cmd_descr_ring *cring;
2059
2060 mtx_assert(&ring->mtx, MA_OWNED);
2061
2062 cring = &ring->cdr;
2063 cring->write -= count;
2064 if (cring->write < 0)
2065 cring->write += SAFEXCEL_RING_SIZE;
2066 }
2067
2068 static void
safexcel_res_descr_rollback(struct safexcel_ring * ring,int count)2069 safexcel_res_descr_rollback(struct safexcel_ring *ring, int count)
2070 {
2071 struct safexcel_res_descr_ring *rring;
2072
2073 mtx_assert(&ring->mtx, MA_OWNED);
2074
2075 rring = &ring->rdr;
2076 rring->write -= count;
2077 if (rring->write < 0)
2078 rring->write += SAFEXCEL_RING_SIZE;
2079 }
2080
2081 static void
safexcel_append_segs(bus_dma_segment_t * segs,int nseg,struct sglist * sg,int start,int len)2082 safexcel_append_segs(bus_dma_segment_t *segs, int nseg, struct sglist *sg,
2083 int start, int len)
2084 {
2085 bus_dma_segment_t *seg;
2086 size_t seglen;
2087 int error, i;
2088
2089 for (i = 0; i < nseg && len > 0; i++) {
2090 seg = &segs[i];
2091
2092 if (seg->ds_len <= start) {
2093 start -= seg->ds_len;
2094 continue;
2095 }
2096
2097 seglen = MIN(len, seg->ds_len - start);
2098 error = sglist_append_phys(sg, seg->ds_addr + start, seglen);
2099 if (error != 0)
2100 panic("%s: ran out of segments: %d", __func__, error);
2101 len -= seglen;
2102 start = 0;
2103 }
2104 }
2105
2106 static void
safexcel_create_chain_cb(void * arg,bus_dma_segment_t * segs,int nseg,int error)2107 safexcel_create_chain_cb(void *arg, bus_dma_segment_t *segs, int nseg,
2108 int error)
2109 {
2110 const struct crypto_session_params *csp;
2111 struct cryptop *crp;
2112 struct safexcel_cmd_descr *cdesc;
2113 struct safexcel_request *req;
2114 struct safexcel_ring *ring;
2115 struct safexcel_session *sess;
2116 struct sglist *sg;
2117 size_t inlen;
2118 int i;
2119 bool first, last;
2120
2121 req = arg;
2122 if (error != 0) {
2123 req->error = error;
2124 return;
2125 }
2126
2127 crp = req->crp;
2128 csp = crypto_get_params(crp->crp_session);
2129 sess = req->sess;
2130 ring = &req->sc->sc_ring[req->ringidx];
2131
2132 mtx_assert(&ring->mtx, MA_OWNED);
2133
2134 /*
2135 * Set up descriptors for input and output data.
2136 *
2137 * The processing engine programs require that any AAD comes first,
2138 * followed by the cipher plaintext, followed by the digest. Some
2139 * consumers place the digest first in the input buffer, in which case
2140 * we have to create an extra descriptor.
2141 *
2142 * As an optimization, unmodified data is not passed to the output
2143 * stream.
2144 */
2145 sglist_reset(ring->cmd_data);
2146 sglist_reset(ring->res_data);
2147 if (crp->crp_aad_length != 0) {
2148 safexcel_append_segs(segs, nseg, ring->cmd_data,
2149 crp->crp_aad_start, crp->crp_aad_length);
2150 }
2151 safexcel_append_segs(segs, nseg, ring->cmd_data,
2152 crp->crp_payload_start, crp->crp_payload_length);
2153 if (csp->csp_cipher_alg != 0) {
2154 safexcel_append_segs(segs, nseg, ring->res_data,
2155 crp->crp_payload_start, crp->crp_payload_length);
2156 }
2157 if (sess->digestlen > 0) {
2158 if ((crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) != 0) {
2159 safexcel_append_segs(segs, nseg, ring->cmd_data,
2160 crp->crp_digest_start, sess->digestlen);
2161 } else {
2162 safexcel_append_segs(segs, nseg, ring->res_data,
2163 crp->crp_digest_start, sess->digestlen);
2164 }
2165 }
2166
2167 sg = ring->cmd_data;
2168 if (sg->sg_nseg == 0) {
2169 /*
2170 * Fake a segment for the command descriptor if the input has
2171 * length zero. The EIP97 apparently does not handle
2172 * zero-length packets properly since subsequent requests return
2173 * bogus errors, so provide a dummy segment using the context
2174 * descriptor. Also, we must allocate at least one command ring
2175 * entry per request to keep the request shadow ring in sync.
2176 */
2177 (void)sglist_append_phys(sg, req->ctx.paddr, 1);
2178 }
2179 for (i = 0, inlen = 0; i < sg->sg_nseg; i++)
2180 inlen += sg->sg_segs[i].ss_len;
2181 for (i = 0; i < sg->sg_nseg; i++) {
2182 first = i == 0;
2183 last = i == sg->sg_nseg - 1;
2184
2185 cdesc = safexcel_cmd_descr_add(ring, first, last,
2186 sg->sg_segs[i].ss_paddr, sg->sg_segs[i].ss_len,
2187 (uint32_t)inlen, req->ctx.paddr);
2188 if (cdesc == NULL) {
2189 safexcel_cmd_descr_rollback(ring, i);
2190 counter_u64_add(req->sc->sc_cdesc_alloc_failures, 1);
2191 req->error = ERESTART;
2192 return;
2193 }
2194 if (i == 0)
2195 req->cdesc = cdesc;
2196 }
2197 req->cdescs = sg->sg_nseg;
2198
2199 sg = ring->res_data;
2200 if (sg->sg_nseg == 0) {
2201 /*
2202 * We need a result descriptor even if the output stream will be
2203 * empty, for example when verifying an AAD digest.
2204 */
2205 sg->sg_segs[0].ss_paddr = 0;
2206 sg->sg_segs[0].ss_len = 0;
2207 sg->sg_nseg = 1;
2208 }
2209 for (i = 0; i < sg->sg_nseg; i++) {
2210 first = i == 0;
2211 last = i == sg->sg_nseg - 1;
2212
2213 if (safexcel_res_descr_add(ring, first, last,
2214 sg->sg_segs[i].ss_paddr, sg->sg_segs[i].ss_len) == NULL) {
2215 safexcel_cmd_descr_rollback(ring,
2216 ring->cmd_data->sg_nseg);
2217 safexcel_res_descr_rollback(ring, i);
2218 counter_u64_add(req->sc->sc_rdesc_alloc_failures, 1);
2219 req->error = ERESTART;
2220 return;
2221 }
2222 }
2223 req->rdescs = sg->sg_nseg;
2224 }
2225
2226 static int
safexcel_create_chain(struct safexcel_ring * ring,struct safexcel_request * req)2227 safexcel_create_chain(struct safexcel_ring *ring, struct safexcel_request *req)
2228 {
2229 int error;
2230
2231 req->error = 0;
2232 req->cdescs = req->rdescs = 0;
2233
2234 error = bus_dmamap_load_crp(ring->data_dtag, req->dmap, req->crp,
2235 safexcel_create_chain_cb, req, BUS_DMA_NOWAIT);
2236 if (error == 0)
2237 req->dmap_loaded = true;
2238
2239 if (req->error != 0)
2240 error = req->error;
2241
2242 return (error);
2243 }
2244
2245 static bool
safexcel_probe_cipher(const struct crypto_session_params * csp)2246 safexcel_probe_cipher(const struct crypto_session_params *csp)
2247 {
2248 switch (csp->csp_cipher_alg) {
2249 case CRYPTO_AES_CBC:
2250 case CRYPTO_AES_ICM:
2251 if (csp->csp_ivlen != AES_BLOCK_LEN)
2252 return (false);
2253 break;
2254 case CRYPTO_AES_XTS:
2255 if (csp->csp_ivlen != AES_XTS_IV_LEN)
2256 return (false);
2257 break;
2258 default:
2259 return (false);
2260 }
2261
2262 return (true);
2263 }
2264
2265 /*
2266 * Determine whether the driver can implement a session with the requested
2267 * parameters.
2268 */
2269 static int
safexcel_probesession(device_t dev,const struct crypto_session_params * csp)2270 safexcel_probesession(device_t dev, const struct crypto_session_params *csp)
2271 {
2272 if (csp->csp_flags != 0)
2273 return (EINVAL);
2274
2275 switch (csp->csp_mode) {
2276 case CSP_MODE_CIPHER:
2277 if (!safexcel_probe_cipher(csp))
2278 return (EINVAL);
2279 break;
2280 case CSP_MODE_DIGEST:
2281 switch (csp->csp_auth_alg) {
2282 case CRYPTO_AES_NIST_GMAC:
2283 if (csp->csp_ivlen != AES_GCM_IV_LEN)
2284 return (EINVAL);
2285 break;
2286 case CRYPTO_SHA1:
2287 case CRYPTO_SHA1_HMAC:
2288 case CRYPTO_SHA2_224:
2289 case CRYPTO_SHA2_224_HMAC:
2290 case CRYPTO_SHA2_256:
2291 case CRYPTO_SHA2_256_HMAC:
2292 case CRYPTO_SHA2_384:
2293 case CRYPTO_SHA2_384_HMAC:
2294 case CRYPTO_SHA2_512:
2295 case CRYPTO_SHA2_512_HMAC:
2296 break;
2297 default:
2298 return (EINVAL);
2299 }
2300 break;
2301 case CSP_MODE_AEAD:
2302 switch (csp->csp_cipher_alg) {
2303 case CRYPTO_AES_NIST_GCM_16:
2304 case CRYPTO_AES_CCM_16:
2305 break;
2306 default:
2307 return (EINVAL);
2308 }
2309 break;
2310 case CSP_MODE_ETA:
2311 if (!safexcel_probe_cipher(csp))
2312 return (EINVAL);
2313 switch (csp->csp_cipher_alg) {
2314 case CRYPTO_AES_CBC:
2315 case CRYPTO_AES_ICM:
2316 /*
2317 * The EIP-97 does not support combining AES-XTS with
2318 * hash operations.
2319 */
2320 if (csp->csp_auth_alg != CRYPTO_SHA1_HMAC &&
2321 csp->csp_auth_alg != CRYPTO_SHA2_224_HMAC &&
2322 csp->csp_auth_alg != CRYPTO_SHA2_256_HMAC &&
2323 csp->csp_auth_alg != CRYPTO_SHA2_384_HMAC &&
2324 csp->csp_auth_alg != CRYPTO_SHA2_512_HMAC)
2325 return (EINVAL);
2326 break;
2327 default:
2328 return (EINVAL);
2329 }
2330 break;
2331 default:
2332 return (EINVAL);
2333 }
2334
2335 return (CRYPTODEV_PROBE_HARDWARE);
2336 }
2337
2338 static uint32_t
safexcel_aes_algid(int keylen)2339 safexcel_aes_algid(int keylen)
2340 {
2341 switch (keylen) {
2342 case 16:
2343 return (SAFEXCEL_CONTROL0_CRYPTO_ALG_AES128);
2344 case 24:
2345 return (SAFEXCEL_CONTROL0_CRYPTO_ALG_AES192);
2346 case 32:
2347 return (SAFEXCEL_CONTROL0_CRYPTO_ALG_AES256);
2348 default:
2349 panic("invalid AES key length %d", keylen);
2350 }
2351 }
2352
2353 static uint32_t
safexcel_aes_ccm_hashid(int keylen)2354 safexcel_aes_ccm_hashid(int keylen)
2355 {
2356 switch (keylen) {
2357 case 16:
2358 return (SAFEXCEL_CONTROL0_HASH_ALG_XCBC128);
2359 case 24:
2360 return (SAFEXCEL_CONTROL0_HASH_ALG_XCBC192);
2361 case 32:
2362 return (SAFEXCEL_CONTROL0_HASH_ALG_XCBC256);
2363 default:
2364 panic("invalid AES key length %d", keylen);
2365 }
2366 }
2367
2368 static uint32_t
safexcel_sha_hashid(int alg)2369 safexcel_sha_hashid(int alg)
2370 {
2371 switch (alg) {
2372 case CRYPTO_SHA1:
2373 case CRYPTO_SHA1_HMAC:
2374 return (SAFEXCEL_CONTROL0_HASH_ALG_SHA1);
2375 case CRYPTO_SHA2_224:
2376 case CRYPTO_SHA2_224_HMAC:
2377 return (SAFEXCEL_CONTROL0_HASH_ALG_SHA224);
2378 case CRYPTO_SHA2_256:
2379 case CRYPTO_SHA2_256_HMAC:
2380 return (SAFEXCEL_CONTROL0_HASH_ALG_SHA256);
2381 case CRYPTO_SHA2_384:
2382 case CRYPTO_SHA2_384_HMAC:
2383 return (SAFEXCEL_CONTROL0_HASH_ALG_SHA384);
2384 case CRYPTO_SHA2_512:
2385 case CRYPTO_SHA2_512_HMAC:
2386 return (SAFEXCEL_CONTROL0_HASH_ALG_SHA512);
2387 default:
2388 __assert_unreachable();
2389 }
2390 }
2391
2392 static int
safexcel_sha_hashlen(int alg)2393 safexcel_sha_hashlen(int alg)
2394 {
2395 switch (alg) {
2396 case CRYPTO_SHA1:
2397 case CRYPTO_SHA1_HMAC:
2398 return (SHA1_HASH_LEN);
2399 case CRYPTO_SHA2_224:
2400 case CRYPTO_SHA2_224_HMAC:
2401 return (SHA2_224_HASH_LEN);
2402 case CRYPTO_SHA2_256:
2403 case CRYPTO_SHA2_256_HMAC:
2404 return (SHA2_256_HASH_LEN);
2405 case CRYPTO_SHA2_384:
2406 case CRYPTO_SHA2_384_HMAC:
2407 return (SHA2_384_HASH_LEN);
2408 case CRYPTO_SHA2_512:
2409 case CRYPTO_SHA2_512_HMAC:
2410 return (SHA2_512_HASH_LEN);
2411 default:
2412 __assert_unreachable();
2413 }
2414 }
2415
2416 static int
safexcel_sha_statelen(int alg)2417 safexcel_sha_statelen(int alg)
2418 {
2419 switch (alg) {
2420 case CRYPTO_SHA1:
2421 case CRYPTO_SHA1_HMAC:
2422 return (SHA1_HASH_LEN);
2423 case CRYPTO_SHA2_224:
2424 case CRYPTO_SHA2_224_HMAC:
2425 case CRYPTO_SHA2_256:
2426 case CRYPTO_SHA2_256_HMAC:
2427 return (SHA2_256_HASH_LEN);
2428 case CRYPTO_SHA2_384:
2429 case CRYPTO_SHA2_384_HMAC:
2430 case CRYPTO_SHA2_512:
2431 case CRYPTO_SHA2_512_HMAC:
2432 return (SHA2_512_HASH_LEN);
2433 default:
2434 __assert_unreachable();
2435 }
2436 }
2437
2438 static int
safexcel_newsession(device_t dev,crypto_session_t cses,const struct crypto_session_params * csp)2439 safexcel_newsession(device_t dev, crypto_session_t cses,
2440 const struct crypto_session_params *csp)
2441 {
2442 struct safexcel_session *sess;
2443
2444 sess = crypto_get_driver_session(cses);
2445 sess->cses = cses;
2446
2447 switch (csp->csp_auth_alg) {
2448 case CRYPTO_SHA1:
2449 case CRYPTO_SHA2_224:
2450 case CRYPTO_SHA2_256:
2451 case CRYPTO_SHA2_384:
2452 case CRYPTO_SHA2_512:
2453 sess->digest = SAFEXCEL_CONTROL0_DIGEST_PRECOMPUTED;
2454 sess->hash = safexcel_sha_hashid(csp->csp_auth_alg);
2455 sess->digestlen = safexcel_sha_hashlen(csp->csp_auth_alg);
2456 sess->statelen = safexcel_sha_statelen(csp->csp_auth_alg);
2457 break;
2458 case CRYPTO_SHA1_HMAC:
2459 case CRYPTO_SHA2_224_HMAC:
2460 case CRYPTO_SHA2_256_HMAC:
2461 case CRYPTO_SHA2_384_HMAC:
2462 case CRYPTO_SHA2_512_HMAC:
2463 sess->digest = SAFEXCEL_CONTROL0_DIGEST_HMAC;
2464 sess->hash = safexcel_sha_hashid(csp->csp_auth_alg);
2465 sess->digestlen = safexcel_sha_hashlen(csp->csp_auth_alg);
2466 sess->statelen = safexcel_sha_statelen(csp->csp_auth_alg);
2467 break;
2468 case CRYPTO_AES_NIST_GMAC:
2469 sess->digest = SAFEXCEL_CONTROL0_DIGEST_GMAC;
2470 sess->digestlen = GMAC_DIGEST_LEN;
2471 sess->hash = SAFEXCEL_CONTROL0_HASH_ALG_GHASH;
2472 sess->alg = safexcel_aes_algid(csp->csp_auth_klen);
2473 sess->mode = SAFEXCEL_CONTROL1_CRYPTO_MODE_GCM;
2474 break;
2475 }
2476
2477 switch (csp->csp_cipher_alg) {
2478 case CRYPTO_AES_NIST_GCM_16:
2479 sess->digest = SAFEXCEL_CONTROL0_DIGEST_GMAC;
2480 sess->digestlen = GMAC_DIGEST_LEN;
2481 sess->hash = SAFEXCEL_CONTROL0_HASH_ALG_GHASH;
2482 sess->alg = safexcel_aes_algid(csp->csp_cipher_klen);
2483 sess->mode = SAFEXCEL_CONTROL1_CRYPTO_MODE_GCM;
2484 break;
2485 case CRYPTO_AES_CCM_16:
2486 sess->hash = safexcel_aes_ccm_hashid(csp->csp_cipher_klen);
2487 sess->digest = SAFEXCEL_CONTROL0_DIGEST_CCM;
2488 sess->digestlen = CCM_CBC_MAX_DIGEST_LEN;
2489 sess->alg = safexcel_aes_algid(csp->csp_cipher_klen);
2490 sess->mode = SAFEXCEL_CONTROL1_CRYPTO_MODE_CCM;
2491 break;
2492 case CRYPTO_AES_CBC:
2493 sess->alg = safexcel_aes_algid(csp->csp_cipher_klen);
2494 sess->mode = SAFEXCEL_CONTROL1_CRYPTO_MODE_CBC;
2495 break;
2496 case CRYPTO_AES_ICM:
2497 sess->alg = safexcel_aes_algid(csp->csp_cipher_klen);
2498 sess->mode = SAFEXCEL_CONTROL1_CRYPTO_MODE_CTR;
2499 break;
2500 case CRYPTO_AES_XTS:
2501 sess->alg = safexcel_aes_algid(csp->csp_cipher_klen / 2);
2502 sess->mode = SAFEXCEL_CONTROL1_CRYPTO_MODE_XTS;
2503 break;
2504 }
2505
2506 if (csp->csp_auth_mlen != 0)
2507 sess->digestlen = csp->csp_auth_mlen;
2508
2509 sess->encctx.len = safexcel_set_context(&sess->encctx.ctx,
2510 CRYPTO_OP_ENCRYPT, csp->csp_cipher_key, csp->csp_auth_key,
2511 sess);
2512 sess->decctx.len = safexcel_set_context(&sess->decctx.ctx,
2513 CRYPTO_OP_DECRYPT, csp->csp_cipher_key, csp->csp_auth_key,
2514 sess);
2515
2516 return (0);
2517 }
2518
2519 static int
safexcel_process(device_t dev,struct cryptop * crp,int hint)2520 safexcel_process(device_t dev, struct cryptop *crp, int hint)
2521 {
2522 struct safexcel_request *req;
2523 struct safexcel_ring *ring;
2524 struct safexcel_session *sess;
2525 struct safexcel_softc *sc;
2526 int error;
2527
2528 sc = device_get_softc(dev);
2529 sess = crypto_get_driver_session(crp->crp_session);
2530
2531 if (__predict_false(crypto_buffer_len(&crp->crp_buf) >
2532 SAFEXCEL_MAX_REQUEST_SIZE)) {
2533 crp->crp_etype = E2BIG;
2534 crypto_done(crp);
2535 return (0);
2536 }
2537
2538 ring = &sc->sc_ring[curcpu % sc->sc_config.rings];
2539 mtx_lock(&ring->mtx);
2540 req = safexcel_alloc_request(sc, ring);
2541 if (__predict_false(req == NULL)) {
2542 ring->blocked = CRYPTO_SYMQ;
2543 mtx_unlock(&ring->mtx);
2544 counter_u64_add(sc->sc_req_alloc_failures, 1);
2545 return (ERESTART);
2546 }
2547
2548 req->crp = crp;
2549 req->sess = sess;
2550
2551 crypto_read_iv(crp, req->iv);
2552
2553 error = safexcel_create_chain(ring, req);
2554 if (__predict_false(error != 0)) {
2555 safexcel_free_request(ring, req);
2556 if (error == ERESTART)
2557 ring->blocked = CRYPTO_SYMQ;
2558 mtx_unlock(&ring->mtx);
2559 if (error != ERESTART) {
2560 crp->crp_etype = error;
2561 crypto_done(crp);
2562 return (0);
2563 } else {
2564 return (ERESTART);
2565 }
2566 }
2567
2568 safexcel_set_token(req);
2569
2570 bus_dmamap_sync(ring->data_dtag, req->dmap,
2571 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2572 bus_dmamap_sync(req->ctx.tag, req->ctx.map,
2573 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2574 bus_dmamap_sync(ring->cdr.dma.tag, ring->cdr.dma.map,
2575 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2576 bus_dmamap_sync(ring->dma_atok.tag, ring->dma_atok.map,
2577 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2578 bus_dmamap_sync(ring->rdr.dma.tag, ring->rdr.dma.map,
2579 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2580
2581 safexcel_execute(sc, ring, req, hint);
2582
2583 mtx_unlock(&ring->mtx);
2584
2585 return (0);
2586 }
2587
2588 static device_method_t safexcel_methods[] = {
2589 /* Device interface */
2590 DEVMETHOD(device_probe, safexcel_probe),
2591 DEVMETHOD(device_attach, safexcel_attach),
2592 DEVMETHOD(device_detach, safexcel_detach),
2593
2594 /* Cryptodev interface */
2595 DEVMETHOD(cryptodev_probesession, safexcel_probesession),
2596 DEVMETHOD(cryptodev_newsession, safexcel_newsession),
2597 DEVMETHOD(cryptodev_process, safexcel_process),
2598
2599 DEVMETHOD_END
2600 };
2601
2602 static driver_t safexcel_driver = {
2603 .name = "safexcel",
2604 .methods = safexcel_methods,
2605 .size = sizeof(struct safexcel_softc),
2606 };
2607
2608 DRIVER_MODULE(safexcel, simplebus, safexcel_driver, 0, 0);
2609 MODULE_VERSION(safexcel, 1);
2610 MODULE_DEPEND(safexcel, crypto, 1, 1, 1);
2611