xref: /freebsd/sys/dev/safexcel/safexcel.c (revision d0b2dbfa0ecf2bbc9709efc5e20baf8e4b44bbbf)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2020, 2021 Rubicon Communications, LLC (Netgate)
5  * Copyright (c) 2021 The FreeBSD Foundation
6  *
7  * Portions of this software were developed by Ararat River
8  * Consulting, LLC under sponsorship of the FreeBSD Foundation.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
20  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
21  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
23  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
24  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
28  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29  */
30 
31 #include <sys/cdefs.h>
32 #include <sys/param.h>
33 #include <sys/bus.h>
34 #include <sys/counter.h>
35 #include <sys/endian.h>
36 #include <sys/kernel.h>
37 #include <sys/lock.h>
38 #include <sys/malloc.h>
39 #include <sys/module.h>
40 #include <sys/mutex.h>
41 #include <sys/rman.h>
42 #include <sys/smp.h>
43 #include <sys/sglist.h>
44 #include <sys/sysctl.h>
45 
46 #include <machine/atomic.h>
47 #include <machine/bus.h>
48 
49 #include <crypto/rijndael/rijndael.h>
50 #include <opencrypto/cryptodev.h>
51 #include <opencrypto/xform.h>
52 
53 #include <dev/ofw/ofw_bus.h>
54 #include <dev/ofw/ofw_bus_subr.h>
55 
56 #include "cryptodev_if.h"
57 
58 #include "safexcel_reg.h"
59 #include "safexcel_var.h"
60 
61 /*
62  * We only support the EIP97 for now.
63  */
64 static struct ofw_compat_data safexcel_compat[] = {
65 	{ "inside-secure,safexcel-eip97ies",	(uintptr_t)97 },
66 	{ "inside-secure,safexcel-eip97",	(uintptr_t)97 },
67 	{ NULL,					0 }
68 };
69 
70 const struct safexcel_reg_offsets eip97_regs_offset = {
71 	.hia_aic	= SAFEXCEL_EIP97_HIA_AIC_BASE,
72 	.hia_aic_g	= SAFEXCEL_EIP97_HIA_AIC_G_BASE,
73 	.hia_aic_r	= SAFEXCEL_EIP97_HIA_AIC_R_BASE,
74 	.hia_aic_xdr	= SAFEXCEL_EIP97_HIA_AIC_xDR_BASE,
75 	.hia_dfe	= SAFEXCEL_EIP97_HIA_DFE_BASE,
76 	.hia_dfe_thr	= SAFEXCEL_EIP97_HIA_DFE_THR_BASE,
77 	.hia_dse	= SAFEXCEL_EIP97_HIA_DSE_BASE,
78 	.hia_dse_thr	= SAFEXCEL_EIP97_HIA_DSE_THR_BASE,
79 	.hia_gen_cfg	= SAFEXCEL_EIP97_HIA_GEN_CFG_BASE,
80 	.pe		= SAFEXCEL_EIP97_PE_BASE,
81 };
82 
83 const struct safexcel_reg_offsets eip197_regs_offset = {
84 	.hia_aic	= SAFEXCEL_EIP197_HIA_AIC_BASE,
85 	.hia_aic_g	= SAFEXCEL_EIP197_HIA_AIC_G_BASE,
86 	.hia_aic_r	= SAFEXCEL_EIP197_HIA_AIC_R_BASE,
87 	.hia_aic_xdr	= SAFEXCEL_EIP197_HIA_AIC_xDR_BASE,
88 	.hia_dfe	= SAFEXCEL_EIP197_HIA_DFE_BASE,
89 	.hia_dfe_thr	= SAFEXCEL_EIP197_HIA_DFE_THR_BASE,
90 	.hia_dse	= SAFEXCEL_EIP197_HIA_DSE_BASE,
91 	.hia_dse_thr	= SAFEXCEL_EIP197_HIA_DSE_THR_BASE,
92 	.hia_gen_cfg	= SAFEXCEL_EIP197_HIA_GEN_CFG_BASE,
93 	.pe		= SAFEXCEL_EIP197_PE_BASE,
94 };
95 
96 static struct safexcel_request *
97 safexcel_next_request(struct safexcel_ring *ring)
98 {
99 	int i;
100 
101 	i = ring->cdr.read;
102 	KASSERT(i >= 0 && i < SAFEXCEL_RING_SIZE,
103 	    ("%s: out of bounds request index %d", __func__, i));
104 	return (&ring->requests[i]);
105 }
106 
107 static struct safexcel_cmd_descr *
108 safexcel_cmd_descr_next(struct safexcel_cmd_descr_ring *ring)
109 {
110 	struct safexcel_cmd_descr *cdesc;
111 
112 	if (ring->write == ring->read)
113 		return (NULL);
114 	cdesc = &ring->desc[ring->read];
115 	ring->read = (ring->read + 1) % SAFEXCEL_RING_SIZE;
116 	return (cdesc);
117 }
118 
119 static struct safexcel_res_descr *
120 safexcel_res_descr_next(struct safexcel_res_descr_ring *ring)
121 {
122 	struct safexcel_res_descr *rdesc;
123 
124 	if (ring->write == ring->read)
125 		return (NULL);
126 	rdesc = &ring->desc[ring->read];
127 	ring->read = (ring->read + 1) % SAFEXCEL_RING_SIZE;
128 	return (rdesc);
129 }
130 
131 static struct safexcel_request *
132 safexcel_alloc_request(struct safexcel_softc *sc, struct safexcel_ring *ring)
133 {
134 	int i;
135 
136 	mtx_assert(&ring->mtx, MA_OWNED);
137 
138 	i = ring->cdr.write;
139 	if ((i + 1) % SAFEXCEL_RING_SIZE == ring->cdr.read)
140 		return (NULL);
141 	return (&ring->requests[i]);
142 }
143 
144 static void
145 safexcel_free_request(struct safexcel_ring *ring, struct safexcel_request *req)
146 {
147 	struct safexcel_context_record *ctx;
148 
149 	mtx_assert(&ring->mtx, MA_OWNED);
150 
151 	if (req->dmap_loaded) {
152 		bus_dmamap_unload(ring->data_dtag, req->dmap);
153 		req->dmap_loaded = false;
154 	}
155 	ctx = (struct safexcel_context_record *)req->ctx.vaddr;
156 	explicit_bzero(ctx->data, sizeof(ctx->data));
157 	explicit_bzero(req->iv, sizeof(req->iv));
158 }
159 
160 static void
161 safexcel_rdr_intr(struct safexcel_softc *sc, int ringidx)
162 {
163 	TAILQ_HEAD(, cryptop) cq;
164 	struct cryptop *crp, *tmp;
165 	struct safexcel_cmd_descr *cdesc __diagused;
166 	struct safexcel_res_descr *rdesc;
167 	struct safexcel_request *req;
168 	struct safexcel_ring *ring;
169 	uint32_t blocked, error, i, nrdescs, nreqs;
170 
171 	blocked = 0;
172 	ring = &sc->sc_ring[ringidx];
173 
174 	nreqs = SAFEXCEL_READ(sc,
175 	    SAFEXCEL_HIA_RDR(sc, ringidx) + SAFEXCEL_HIA_xDR_PROC_COUNT);
176 	nreqs >>= SAFEXCEL_xDR_PROC_xD_PKT_OFFSET;
177 	nreqs &= SAFEXCEL_xDR_PROC_xD_PKT_MASK;
178 	if (nreqs == 0) {
179 		SAFEXCEL_DPRINTF(sc, 1,
180 		    "zero pending requests on ring %d\n", ringidx);
181 		mtx_lock(&ring->mtx);
182 		goto out;
183 	}
184 
185 	TAILQ_INIT(&cq);
186 
187 	ring = &sc->sc_ring[ringidx];
188 	bus_dmamap_sync(ring->rdr.dma.tag, ring->rdr.dma.map,
189 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
190 	bus_dmamap_sync(ring->cdr.dma.tag, ring->cdr.dma.map,
191 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
192 	bus_dmamap_sync(ring->dma_atok.tag, ring->dma_atok.map,
193 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
194 
195 	nrdescs = 0;
196 	for (i = 0; i < nreqs; i++) {
197 		req = safexcel_next_request(ring);
198 
199 		bus_dmamap_sync(req->ctx.tag, req->ctx.map,
200 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
201 		bus_dmamap_sync(ring->data_dtag, req->dmap,
202 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
203 
204 		while (req->cdescs-- > 0) {
205 			cdesc = safexcel_cmd_descr_next(&ring->cdr);
206 			KASSERT(cdesc != NULL,
207 			    ("%s: missing control descriptor", __func__));
208 			if (req->cdescs == 0)
209 				KASSERT(cdesc->last_seg,
210 				    ("%s: chain is not terminated", __func__));
211 		}
212 		nrdescs += req->rdescs;
213 		while (req->rdescs-- > 0) {
214 			rdesc = safexcel_res_descr_next(&ring->rdr);
215 			error = rdesc->result_data.error_code;
216 			if (error != 0) {
217 				if (error == SAFEXCEL_RESULT_ERR_AUTH_FAILED &&
218 				    req->crp->crp_etype == 0) {
219 					req->crp->crp_etype = EBADMSG;
220 				} else {
221 					SAFEXCEL_DPRINTF(sc, 1,
222 					    "error code %#x\n", error);
223 					req->crp->crp_etype = EIO;
224 				}
225 			}
226 		}
227 
228 		TAILQ_INSERT_TAIL(&cq, req->crp, crp_next);
229 	}
230 
231 	mtx_lock(&ring->mtx);
232 	if (nreqs != 0) {
233 		KASSERT(ring->queued >= nreqs,
234 		    ("%s: request count underflow, %d queued %d completed",
235 		    __func__, ring->queued, nreqs));
236 		ring->queued -= nreqs;
237 
238 		SAFEXCEL_WRITE(sc,
239 		    SAFEXCEL_HIA_RDR(sc, ringidx) + SAFEXCEL_HIA_xDR_PROC_COUNT,
240 		    SAFEXCEL_xDR_PROC_xD_PKT(nreqs) |
241 		    (sc->sc_config.rd_offset * nrdescs * sizeof(uint32_t)));
242 		blocked = ring->blocked;
243 		ring->blocked = 0;
244 	}
245 out:
246 	if (ring->queued != 0) {
247 		SAFEXCEL_WRITE(sc,
248 		    SAFEXCEL_HIA_RDR(sc, ringidx) + SAFEXCEL_HIA_xDR_THRESH,
249 		    SAFEXCEL_HIA_CDR_THRESH_PKT_MODE | imin(ring->queued, 16));
250 	}
251 	mtx_unlock(&ring->mtx);
252 
253 	if (blocked)
254 		crypto_unblock(sc->sc_cid, blocked);
255 
256 	TAILQ_FOREACH_SAFE(crp, &cq, crp_next, tmp)
257 		crypto_done(crp);
258 }
259 
260 static void
261 safexcel_ring_intr(void *arg)
262 {
263 	struct safexcel_softc *sc;
264 	struct safexcel_intr_handle *ih;
265 	uint32_t status, stat;
266 	int ring;
267 	bool rdrpending;
268 
269 	ih = arg;
270 	sc = ih->sc;
271 	ring = ih->ring;
272 
273 	status = SAFEXCEL_READ(sc, SAFEXCEL_HIA_AIC_R(sc) +
274 	    SAFEXCEL_HIA_AIC_R_ENABLED_STAT(ring));
275 	/* CDR interrupts */
276 	if (status & SAFEXCEL_CDR_IRQ(ring)) {
277 		stat = SAFEXCEL_READ(sc,
278 		    SAFEXCEL_HIA_CDR(sc, ring) + SAFEXCEL_HIA_xDR_STAT);
279 		SAFEXCEL_WRITE(sc,
280 		    SAFEXCEL_HIA_CDR(sc, ring) + SAFEXCEL_HIA_xDR_STAT,
281 		    stat & SAFEXCEL_CDR_INTR_MASK);
282 	}
283 	/* RDR interrupts */
284 	rdrpending = false;
285 	if (status & SAFEXCEL_RDR_IRQ(ring)) {
286 		stat = SAFEXCEL_READ(sc,
287 		    SAFEXCEL_HIA_RDR(sc, ring) + SAFEXCEL_HIA_xDR_STAT);
288 		if ((stat & SAFEXCEL_xDR_ERR) == 0)
289 			rdrpending = true;
290 		SAFEXCEL_WRITE(sc,
291 		    SAFEXCEL_HIA_RDR(sc, ring) + SAFEXCEL_HIA_xDR_STAT,
292 		    stat & SAFEXCEL_RDR_INTR_MASK);
293 	}
294 	SAFEXCEL_WRITE(sc,
295 	    SAFEXCEL_HIA_AIC_R(sc) + SAFEXCEL_HIA_AIC_R_ACK(ring),
296 	    status);
297 
298 	if (rdrpending)
299 		safexcel_rdr_intr(sc, ring);
300 }
301 
302 static int
303 safexcel_configure(struct safexcel_softc *sc)
304 {
305 	uint32_t i, mask, pemask, reg;
306 
307 	if (sc->sc_type == 197) {
308 		sc->sc_offsets = eip197_regs_offset;
309 		pemask = SAFEXCEL_N_PES_MASK;
310 	} else {
311 		sc->sc_offsets = eip97_regs_offset;
312 		pemask = EIP97_N_PES_MASK;
313 	}
314 
315 	/* Scan for valid ring interrupt controllers. */
316 	for (i = 0; i < SAFEXCEL_MAX_RING_AIC; i++) {
317 		reg = SAFEXCEL_READ(sc, SAFEXCEL_HIA_AIC_R(sc) +
318 		    SAFEXCEL_HIA_AIC_R_VERSION(i));
319 		if (SAFEXCEL_REG_LO16(reg) != EIP201_VERSION_LE)
320 			break;
321 	}
322 	sc->sc_config.aic_rings = i;
323 	if (sc->sc_config.aic_rings == 0)
324 		return (-1);
325 
326 	reg = SAFEXCEL_READ(sc, SAFEXCEL_HIA_AIC_G(sc) + SAFEXCEL_HIA_OPTIONS);
327 	/* Check for 64bit addressing. */
328 	if ((reg & SAFEXCEL_OPT_ADDR_64) == 0)
329 		return (-1);
330 	/* Check alignment constraints (which we do not support). */
331 	if (((reg & SAFEXCEL_OPT_TGT_ALIGN_MASK) >>
332 	    SAFEXCEL_OPT_TGT_ALIGN_OFFSET) != 0)
333 		return (-1);
334 
335 	sc->sc_config.hdw =
336 	    (reg & SAFEXCEL_xDR_HDW_MASK) >> SAFEXCEL_xDR_HDW_OFFSET;
337 	mask = (1 << sc->sc_config.hdw) - 1;
338 
339 	sc->sc_config.rings = reg & SAFEXCEL_N_RINGS_MASK;
340 	/* Limit the number of rings to the number of the AIC Rings. */
341 	sc->sc_config.rings = MIN(sc->sc_config.rings, sc->sc_config.aic_rings);
342 
343 	sc->sc_config.pes = (reg & pemask) >> SAFEXCEL_N_PES_OFFSET;
344 
345 	sc->sc_config.cd_size =
346 	    sizeof(struct safexcel_cmd_descr) / sizeof(uint32_t);
347 	sc->sc_config.cd_offset = (sc->sc_config.cd_size + mask) & ~mask;
348 
349 	sc->sc_config.rd_size =
350 	    sizeof(struct safexcel_res_descr) / sizeof(uint32_t);
351 	sc->sc_config.rd_offset = (sc->sc_config.rd_size + mask) & ~mask;
352 
353 	sc->sc_config.atok_offset =
354 	    (SAFEXCEL_MAX_ATOKENS * sizeof(struct safexcel_instr) + mask) &
355 	    ~mask;
356 
357 	return (0);
358 }
359 
360 static void
361 safexcel_init_hia_bus_access(struct safexcel_softc *sc)
362 {
363 	uint32_t version, val;
364 
365 	/* Determine endianness and configure byte swap. */
366 	version = SAFEXCEL_READ(sc,
367 	    SAFEXCEL_HIA_AIC(sc) + SAFEXCEL_HIA_VERSION);
368 	val = SAFEXCEL_READ(sc, SAFEXCEL_HIA_AIC(sc) + SAFEXCEL_HIA_MST_CTRL);
369 	if (SAFEXCEL_REG_HI16(version) == SAFEXCEL_HIA_VERSION_BE) {
370 		val = SAFEXCEL_READ(sc,
371 		    SAFEXCEL_HIA_AIC(sc) + SAFEXCEL_HIA_MST_CTRL);
372 		val = val ^ (SAFEXCEL_MST_CTRL_NO_BYTE_SWAP >> 24);
373 		SAFEXCEL_WRITE(sc,
374 		    SAFEXCEL_HIA_AIC(sc) + SAFEXCEL_HIA_MST_CTRL,
375 		    val);
376 	}
377 
378 	/* Configure wr/rd cache values. */
379 	SAFEXCEL_WRITE(sc, SAFEXCEL_HIA_GEN_CFG(sc) + SAFEXCEL_HIA_MST_CTRL,
380 	    SAFEXCEL_MST_CTRL_RD_CACHE(RD_CACHE_4BITS) |
381 	    SAFEXCEL_MST_CTRL_WD_CACHE(WR_CACHE_4BITS));
382 }
383 
384 static void
385 safexcel_disable_global_interrupts(struct safexcel_softc *sc)
386 {
387 	/* Disable and clear pending interrupts. */
388 	SAFEXCEL_WRITE(sc,
389 	    SAFEXCEL_HIA_AIC_G(sc) + SAFEXCEL_HIA_AIC_G_ENABLE_CTRL, 0);
390 	SAFEXCEL_WRITE(sc,
391 	    SAFEXCEL_HIA_AIC_G(sc) + SAFEXCEL_HIA_AIC_G_ACK,
392 	    SAFEXCEL_AIC_G_ACK_ALL_MASK);
393 }
394 
395 /*
396  * Configure the data fetch engine.  This component parses command descriptors
397  * and sets up DMA transfers from host memory to the corresponding processing
398  * engine.
399  */
400 static void
401 safexcel_configure_dfe_engine(struct safexcel_softc *sc, int pe)
402 {
403 	/* Reset all DFE threads. */
404 	SAFEXCEL_WRITE(sc,
405 	    SAFEXCEL_HIA_DFE_THR(sc) + SAFEXCEL_HIA_DFE_THR_CTRL(pe),
406 	    SAFEXCEL_DxE_THR_CTRL_RESET_PE);
407 
408 	/* Deassert the DFE reset. */
409 	SAFEXCEL_WRITE(sc,
410 	    SAFEXCEL_HIA_DFE_THR(sc) + SAFEXCEL_HIA_DFE_THR_CTRL(pe), 0);
411 
412 	/* DMA transfer size to use. */
413 	SAFEXCEL_WRITE(sc, SAFEXCEL_HIA_DFE(sc) + SAFEXCEL_HIA_DFE_CFG(pe),
414 	    SAFEXCEL_HIA_DFE_CFG_DIS_DEBUG |
415 	    SAFEXCEL_HIA_DxE_CFG_MIN_DATA_SIZE(6) |
416 	    SAFEXCEL_HIA_DxE_CFG_MAX_DATA_SIZE(9) |
417 	    SAFEXCEL_HIA_DxE_CFG_MIN_CTRL_SIZE(6) |
418 	    SAFEXCEL_HIA_DxE_CFG_MAX_CTRL_SIZE(7) |
419 	    SAFEXCEL_HIA_DxE_CFG_DATA_CACHE_CTRL(RD_CACHE_3BITS) |
420 	    SAFEXCEL_HIA_DxE_CFG_CTRL_CACHE_CTRL(RD_CACHE_3BITS));
421 
422 	/* Configure the PE DMA transfer thresholds. */
423 	SAFEXCEL_WRITE(sc, SAFEXCEL_PE(sc) + SAFEXCEL_PE_IN_DBUF_THRES(pe),
424 	    SAFEXCEL_PE_IN_xBUF_THRES_MIN(6) |
425 	    SAFEXCEL_PE_IN_xBUF_THRES_MAX(9));
426 	SAFEXCEL_WRITE(sc, SAFEXCEL_PE(sc) + SAFEXCEL_PE_IN_TBUF_THRES(pe),
427 	    SAFEXCEL_PE_IN_xBUF_THRES_MIN(6) |
428 	    SAFEXCEL_PE_IN_xBUF_THRES_MAX(7));
429 }
430 
431 /*
432  * Configure the data store engine.  This component parses result descriptors
433  * and sets up DMA transfers from the processing engine to host memory.
434  */
435 static int
436 safexcel_configure_dse(struct safexcel_softc *sc, int pe)
437 {
438 	uint32_t val;
439 	int count;
440 
441 	/* Disable and reset all DSE threads. */
442 	SAFEXCEL_WRITE(sc,
443 	    SAFEXCEL_HIA_DSE_THR(sc) + SAFEXCEL_HIA_DSE_THR_CTRL(pe),
444 	    SAFEXCEL_DxE_THR_CTRL_RESET_PE);
445 
446 	/* Wait for a second for threads to go idle. */
447 	for (count = 0;;) {
448 		val = SAFEXCEL_READ(sc,
449 		    SAFEXCEL_HIA_DSE_THR(sc) + SAFEXCEL_HIA_DSE_THR_STAT(pe));
450 		if ((val & SAFEXCEL_DSE_THR_RDR_ID_MASK) ==
451 		    SAFEXCEL_DSE_THR_RDR_ID_MASK)
452 			break;
453 		if (count++ > 10000) {
454 			device_printf(sc->sc_dev, "DSE reset timeout\n");
455 			return (-1);
456 		}
457 		DELAY(100);
458 	}
459 
460 	/* Exit the reset state. */
461 	SAFEXCEL_WRITE(sc,
462 	    SAFEXCEL_HIA_DSE_THR(sc) + SAFEXCEL_HIA_DSE_THR_CTRL(pe), 0);
463 
464 	/* DMA transfer size to use */
465 	SAFEXCEL_WRITE(sc, SAFEXCEL_HIA_DSE(sc) + SAFEXCEL_HIA_DSE_CFG(pe),
466 	    SAFEXCEL_HIA_DSE_CFG_DIS_DEBUG |
467 	    SAFEXCEL_HIA_DxE_CFG_MIN_DATA_SIZE(7) |
468 	    SAFEXCEL_HIA_DxE_CFG_MAX_DATA_SIZE(8) |
469 	    SAFEXCEL_HIA_DxE_CFG_DATA_CACHE_CTRL(WR_CACHE_3BITS) |
470 	    SAFEXCEL_HIA_DSE_CFG_ALLWAYS_BUFFERABLE);
471 
472 	/* Configure the procesing engine thresholds */
473 	SAFEXCEL_WRITE(sc,
474 	    SAFEXCEL_PE(sc) + SAFEXCEL_PE_OUT_DBUF_THRES(pe),
475 	    SAFEXCEL_PE_OUT_DBUF_THRES_MIN(7) |
476 	    SAFEXCEL_PE_OUT_DBUF_THRES_MAX(8));
477 
478 	return (0);
479 }
480 
481 static void
482 safexcel_hw_prepare_rings(struct safexcel_softc *sc)
483 {
484 	int i;
485 
486 	for (i = 0; i < sc->sc_config.rings; i++) {
487 		/*
488 		 * Command descriptors.
489 		 */
490 
491 		/* Clear interrupts for this ring. */
492 		SAFEXCEL_WRITE(sc,
493 		    SAFEXCEL_HIA_AIC_R(sc) + SAFEXCEL_HIA_AIC_R_ENABLE_CLR(i),
494 		    SAFEXCEL_HIA_AIC_R_ENABLE_CLR_ALL_MASK);
495 
496 		/* Disable external triggering. */
497 		SAFEXCEL_WRITE(sc,
498 		    SAFEXCEL_HIA_CDR(sc, i) + SAFEXCEL_HIA_xDR_CFG, 0);
499 
500 		/* Clear the pending prepared counter. */
501 		SAFEXCEL_WRITE(sc,
502 		    SAFEXCEL_HIA_CDR(sc, i) + SAFEXCEL_HIA_xDR_PREP_COUNT,
503 		    SAFEXCEL_xDR_PREP_CLR_COUNT);
504 
505 		/* Clear the pending processed counter. */
506 		SAFEXCEL_WRITE(sc,
507 		    SAFEXCEL_HIA_CDR(sc, i) + SAFEXCEL_HIA_xDR_PROC_COUNT,
508 		    SAFEXCEL_xDR_PROC_CLR_COUNT);
509 
510 		SAFEXCEL_WRITE(sc,
511 		    SAFEXCEL_HIA_CDR(sc, i) + SAFEXCEL_HIA_xDR_PREP_PNTR, 0);
512 		SAFEXCEL_WRITE(sc,
513 		    SAFEXCEL_HIA_CDR(sc, i) + SAFEXCEL_HIA_xDR_PROC_PNTR, 0);
514 
515 		SAFEXCEL_WRITE(sc,
516 		    SAFEXCEL_HIA_CDR(sc, i) + SAFEXCEL_HIA_xDR_RING_SIZE,
517 		    SAFEXCEL_RING_SIZE * sc->sc_config.cd_offset *
518 		    sizeof(uint32_t));
519 
520 		/*
521 		 * Result descriptors.
522 		 */
523 
524 		/* Disable external triggering. */
525 		SAFEXCEL_WRITE(sc,
526 		    SAFEXCEL_HIA_RDR(sc, i) + SAFEXCEL_HIA_xDR_CFG, 0);
527 
528 		/* Clear the pending prepared counter. */
529 		SAFEXCEL_WRITE(sc,
530 		    SAFEXCEL_HIA_RDR(sc, i) + SAFEXCEL_HIA_xDR_PREP_COUNT,
531 		    SAFEXCEL_xDR_PREP_CLR_COUNT);
532 
533 		/* Clear the pending processed counter. */
534 		SAFEXCEL_WRITE(sc,
535 		    SAFEXCEL_HIA_RDR(sc, i) + SAFEXCEL_HIA_xDR_PROC_COUNT,
536 		    SAFEXCEL_xDR_PROC_CLR_COUNT);
537 
538 		SAFEXCEL_WRITE(sc,
539 		    SAFEXCEL_HIA_RDR(sc, i) + SAFEXCEL_HIA_xDR_PREP_PNTR, 0);
540 		SAFEXCEL_WRITE(sc,
541 		    SAFEXCEL_HIA_RDR(sc, i) + SAFEXCEL_HIA_xDR_PROC_PNTR, 0);
542 
543 		/* Ring size. */
544 		SAFEXCEL_WRITE(sc,
545 		    SAFEXCEL_HIA_RDR(sc, i) + SAFEXCEL_HIA_xDR_RING_SIZE,
546 		    SAFEXCEL_RING_SIZE * sc->sc_config.rd_offset *
547 		    sizeof(uint32_t));
548 	}
549 }
550 
551 static void
552 safexcel_hw_setup_rings(struct safexcel_softc *sc)
553 {
554 	struct safexcel_ring *ring;
555 	uint32_t cd_size_rnd, mask, rd_size_rnd, val;
556 	int i;
557 
558 	mask = (1 << sc->sc_config.hdw) - 1;
559 	cd_size_rnd = (sc->sc_config.cd_size + mask) >> sc->sc_config.hdw;
560 	val = (sizeof(struct safexcel_res_descr) -
561 	    sizeof(struct safexcel_res_data)) / sizeof(uint32_t);
562 	rd_size_rnd = (val + mask) >> sc->sc_config.hdw;
563 
564 	for (i = 0; i < sc->sc_config.rings; i++) {
565 		ring = &sc->sc_ring[i];
566 
567 		/*
568 		 * Command descriptors.
569 		 */
570 
571 		/* Ring base address. */
572 		SAFEXCEL_WRITE(sc, SAFEXCEL_HIA_CDR(sc, i) +
573 		    SAFEXCEL_HIA_xDR_RING_BASE_ADDR_LO,
574 		    SAFEXCEL_ADDR_LO(ring->cdr.dma.paddr));
575 		SAFEXCEL_WRITE(sc, SAFEXCEL_HIA_CDR(sc, i) +
576 		    SAFEXCEL_HIA_xDR_RING_BASE_ADDR_HI,
577 		    SAFEXCEL_ADDR_HI(ring->cdr.dma.paddr));
578 
579 		SAFEXCEL_WRITE(sc,
580 		    SAFEXCEL_HIA_CDR(sc, i) + SAFEXCEL_HIA_xDR_DESC_SIZE,
581 		    SAFEXCEL_xDR_DESC_MODE_64BIT | SAFEXCEL_CDR_DESC_MODE_ADCP |
582 		    (sc->sc_config.cd_offset << SAFEXCEL_xDR_DESC_xD_OFFSET) |
583 		    sc->sc_config.cd_size);
584 
585 		SAFEXCEL_WRITE(sc,
586 		    SAFEXCEL_HIA_CDR(sc, i) + SAFEXCEL_HIA_xDR_CFG,
587 		    ((SAFEXCEL_FETCH_COUNT * (cd_size_rnd << sc->sc_config.hdw)) <<
588 		      SAFEXCEL_xDR_xD_FETCH_THRESH) |
589 		    (SAFEXCEL_FETCH_COUNT * sc->sc_config.cd_offset));
590 
591 		/* Configure DMA tx control. */
592 		SAFEXCEL_WRITE(sc,
593 		    SAFEXCEL_HIA_CDR(sc, i) + SAFEXCEL_HIA_xDR_DMA_CFG,
594 		    SAFEXCEL_HIA_xDR_CFG_WR_CACHE(WR_CACHE_3BITS) |
595 		    SAFEXCEL_HIA_xDR_CFG_RD_CACHE(RD_CACHE_3BITS));
596 
597 		/* Clear any pending interrupt. */
598 		SAFEXCEL_WRITE(sc,
599 		    SAFEXCEL_HIA_CDR(sc, i) + SAFEXCEL_HIA_xDR_STAT,
600 		    SAFEXCEL_CDR_INTR_MASK);
601 
602 		/*
603 		 * Result descriptors.
604 		 */
605 
606 		/* Ring base address. */
607 		SAFEXCEL_WRITE(sc, SAFEXCEL_HIA_RDR(sc, i) +
608 		    SAFEXCEL_HIA_xDR_RING_BASE_ADDR_LO,
609 		    SAFEXCEL_ADDR_LO(ring->rdr.dma.paddr));
610 		SAFEXCEL_WRITE(sc, SAFEXCEL_HIA_RDR(sc, i) +
611 		    SAFEXCEL_HIA_xDR_RING_BASE_ADDR_HI,
612 		    SAFEXCEL_ADDR_HI(ring->rdr.dma.paddr));
613 
614 		SAFEXCEL_WRITE(sc,
615 		    SAFEXCEL_HIA_RDR(sc, i) + SAFEXCEL_HIA_xDR_DESC_SIZE,
616 		    SAFEXCEL_xDR_DESC_MODE_64BIT |
617 		    (sc->sc_config.rd_offset << SAFEXCEL_xDR_DESC_xD_OFFSET) |
618 		    sc->sc_config.rd_size);
619 
620 		SAFEXCEL_WRITE(sc,
621 		    SAFEXCEL_HIA_RDR(sc, i) + SAFEXCEL_HIA_xDR_CFG,
622 		    ((SAFEXCEL_FETCH_COUNT * (rd_size_rnd << sc->sc_config.hdw)) <<
623 		    SAFEXCEL_xDR_xD_FETCH_THRESH) |
624 		    (SAFEXCEL_FETCH_COUNT * sc->sc_config.rd_offset));
625 
626 		/* Configure DMA tx control. */
627 		SAFEXCEL_WRITE(sc,
628 		    SAFEXCEL_HIA_RDR(sc, i) + SAFEXCEL_HIA_xDR_DMA_CFG,
629 		    SAFEXCEL_HIA_xDR_CFG_WR_CACHE(WR_CACHE_3BITS) |
630 		    SAFEXCEL_HIA_xDR_CFG_RD_CACHE(RD_CACHE_3BITS) |
631 		    SAFEXCEL_HIA_xDR_WR_RES_BUF | SAFEXCEL_HIA_xDR_WR_CTRL_BUF);
632 
633 		/* Clear any pending interrupt. */
634 		SAFEXCEL_WRITE(sc,
635 		    SAFEXCEL_HIA_RDR(sc, i) + SAFEXCEL_HIA_xDR_STAT,
636 		    SAFEXCEL_RDR_INTR_MASK);
637 
638 		/* Enable ring interrupt. */
639 		SAFEXCEL_WRITE(sc,
640 		    SAFEXCEL_HIA_AIC_R(sc) + SAFEXCEL_HIA_AIC_R_ENABLE_CTRL(i),
641 		    SAFEXCEL_RDR_IRQ(i));
642 	}
643 }
644 
645 /* Reset the command and result descriptor rings. */
646 static void
647 safexcel_hw_reset_rings(struct safexcel_softc *sc)
648 {
649 	int i;
650 
651 	for (i = 0; i < sc->sc_config.rings; i++) {
652 		/*
653 		 * Result descriptor ring operations.
654 		 */
655 
656 		/* Reset ring base address. */
657 		SAFEXCEL_WRITE(sc, SAFEXCEL_HIA_RDR(sc, i) +
658 		    SAFEXCEL_HIA_xDR_RING_BASE_ADDR_LO, 0);
659 		SAFEXCEL_WRITE(sc, SAFEXCEL_HIA_RDR(sc, i) +
660 		    SAFEXCEL_HIA_xDR_RING_BASE_ADDR_HI, 0);
661 
662 		/* Clear the pending prepared counter. */
663 		SAFEXCEL_WRITE(sc,
664 		    SAFEXCEL_HIA_RDR(sc, i) + SAFEXCEL_HIA_xDR_PREP_COUNT,
665 		    SAFEXCEL_xDR_PREP_CLR_COUNT);
666 
667 		/* Clear the pending processed counter. */
668 		SAFEXCEL_WRITE(sc,
669 		    SAFEXCEL_HIA_RDR(sc, i) + SAFEXCEL_HIA_xDR_PROC_COUNT,
670 		    SAFEXCEL_xDR_PROC_CLR_COUNT);
671 
672 		SAFEXCEL_WRITE(sc,
673 		    SAFEXCEL_HIA_RDR(sc, i) + SAFEXCEL_HIA_xDR_PREP_PNTR, 0);
674 		SAFEXCEL_WRITE(sc,
675 		    SAFEXCEL_HIA_RDR(sc, i) + SAFEXCEL_HIA_xDR_PROC_PNTR, 0);
676 
677 		SAFEXCEL_WRITE(sc,
678 		    SAFEXCEL_HIA_RDR(sc, i) + SAFEXCEL_HIA_xDR_RING_SIZE, 0);
679 
680 		/* Clear any pending interrupt. */
681 		SAFEXCEL_WRITE(sc,
682 		    SAFEXCEL_HIA_RDR(sc, i) + SAFEXCEL_HIA_xDR_STAT,
683 		    SAFEXCEL_RDR_INTR_MASK);
684 
685 		/* Disable ring interrupt. */
686 		SAFEXCEL_WRITE(sc,
687 		    SAFEXCEL_HIA_AIC_R(sc) + SAFEXCEL_HIA_AIC_R_ENABLE_CLR(i),
688 		    SAFEXCEL_RDR_IRQ(i));
689 
690 		/*
691 		 * Command descriptor ring operations.
692 		 */
693 
694 		/* Reset ring base address. */
695 		SAFEXCEL_WRITE(sc, SAFEXCEL_HIA_CDR(sc, i) +
696 		    SAFEXCEL_HIA_xDR_RING_BASE_ADDR_LO, 0);
697 		SAFEXCEL_WRITE(sc, SAFEXCEL_HIA_CDR(sc, i) +
698 		    SAFEXCEL_HIA_xDR_RING_BASE_ADDR_HI, 0);
699 
700 		/* Clear the pending prepared counter. */
701 		SAFEXCEL_WRITE(sc,
702 		    SAFEXCEL_HIA_CDR(sc, i) + SAFEXCEL_HIA_xDR_PREP_COUNT,
703 		    SAFEXCEL_xDR_PREP_CLR_COUNT);
704 
705 		/* Clear the pending processed counter. */
706 		SAFEXCEL_WRITE(sc,
707 		    SAFEXCEL_HIA_CDR(sc, i) + SAFEXCEL_HIA_xDR_PROC_COUNT,
708 		    SAFEXCEL_xDR_PROC_CLR_COUNT);
709 
710 		SAFEXCEL_WRITE(sc,
711 		    SAFEXCEL_HIA_CDR(sc, i) + SAFEXCEL_HIA_xDR_PREP_PNTR, 0);
712 		SAFEXCEL_WRITE(sc,
713 		    SAFEXCEL_HIA_CDR(sc, i) + SAFEXCEL_HIA_xDR_PROC_PNTR, 0);
714 
715 		SAFEXCEL_WRITE(sc,
716 		    SAFEXCEL_HIA_CDR(sc, i) + SAFEXCEL_HIA_xDR_RING_SIZE, 0);
717 
718 		/* Clear any pending interrupt. */
719 		SAFEXCEL_WRITE(sc,
720 		    SAFEXCEL_HIA_CDR(sc, i) + SAFEXCEL_HIA_xDR_STAT,
721 		    SAFEXCEL_CDR_INTR_MASK);
722 	}
723 }
724 
725 static void
726 safexcel_enable_pe_engine(struct safexcel_softc *sc, int pe)
727 {
728 	int i, ring_mask;
729 
730 	for (ring_mask = 0, i = 0; i < sc->sc_config.rings; i++) {
731 		ring_mask <<= 1;
732 		ring_mask |= 1;
733 	}
734 
735 	/* Enable command descriptor rings. */
736 	SAFEXCEL_WRITE(sc, SAFEXCEL_HIA_DFE_THR(sc) + SAFEXCEL_HIA_DFE_THR_CTRL(pe),
737 	    SAFEXCEL_DxE_THR_CTRL_EN | ring_mask);
738 
739 	/* Enable result descriptor rings. */
740 	SAFEXCEL_WRITE(sc, SAFEXCEL_HIA_DSE_THR(sc) + SAFEXCEL_HIA_DSE_THR_CTRL(pe),
741 	    SAFEXCEL_DxE_THR_CTRL_EN | ring_mask);
742 
743 	/* Clear any HIA interrupt. */
744 	SAFEXCEL_WRITE(sc, SAFEXCEL_HIA_AIC_G(sc) + SAFEXCEL_HIA_AIC_G_ACK,
745 	    SAFEXCEL_AIC_G_ACK_HIA_MASK);
746 }
747 
748 static void
749 safexcel_execute(struct safexcel_softc *sc, struct safexcel_ring *ring,
750     struct safexcel_request *req, int hint)
751 {
752 	int ringidx, ncdesc, nrdesc;
753 	bool busy;
754 
755 	mtx_assert(&ring->mtx, MA_OWNED);
756 
757 	if ((hint & CRYPTO_HINT_MORE) != 0) {
758 		ring->pending++;
759 		ring->pending_cdesc += req->cdescs;
760 		ring->pending_rdesc += req->rdescs;
761 		return;
762 	}
763 
764 	ringidx = req->ringidx;
765 
766 	busy = ring->queued != 0;
767 	ncdesc = ring->pending_cdesc + req->cdescs;
768 	nrdesc = ring->pending_rdesc + req->rdescs;
769 	ring->queued += ring->pending + 1;
770 
771 	if (!busy) {
772 		SAFEXCEL_WRITE(sc,
773 		    SAFEXCEL_HIA_RDR(sc, ringidx) + SAFEXCEL_HIA_xDR_THRESH,
774 		    SAFEXCEL_HIA_CDR_THRESH_PKT_MODE | ring->queued);
775 	}
776 	SAFEXCEL_WRITE(sc,
777 	    SAFEXCEL_HIA_RDR(sc, ringidx) + SAFEXCEL_HIA_xDR_PREP_COUNT,
778 	    nrdesc * sc->sc_config.rd_offset * sizeof(uint32_t));
779 	SAFEXCEL_WRITE(sc,
780 	    SAFEXCEL_HIA_CDR(sc, ringidx) + SAFEXCEL_HIA_xDR_PREP_COUNT,
781 	    ncdesc * sc->sc_config.cd_offset * sizeof(uint32_t));
782 
783 	ring->pending = ring->pending_cdesc = ring->pending_rdesc = 0;
784 }
785 
786 static void
787 safexcel_init_rings(struct safexcel_softc *sc)
788 {
789 	struct safexcel_cmd_descr *cdesc;
790 	struct safexcel_ring *ring;
791 	uint64_t atok;
792 	int i, j;
793 
794 	for (i = 0; i < sc->sc_config.rings; i++) {
795 		ring = &sc->sc_ring[i];
796 
797 		snprintf(ring->lockname, sizeof(ring->lockname),
798 		    "safexcel_ring%d", i);
799 		mtx_init(&ring->mtx, ring->lockname, NULL, MTX_DEF);
800 
801 		ring->pending = ring->pending_cdesc = ring->pending_rdesc = 0;
802 		ring->queued = 0;
803 		ring->cdr.read = ring->cdr.write = 0;
804 		ring->rdr.read = ring->rdr.write = 0;
805 		for (j = 0; j < SAFEXCEL_RING_SIZE; j++) {
806 			cdesc = &ring->cdr.desc[j];
807 			atok = ring->dma_atok.paddr +
808 			    sc->sc_config.atok_offset * j;
809 			cdesc->atok_lo = SAFEXCEL_ADDR_LO(atok);
810 			cdesc->atok_hi = SAFEXCEL_ADDR_HI(atok);
811 		}
812 	}
813 }
814 
815 static void
816 safexcel_dma_alloc_mem_cb(void *arg, bus_dma_segment_t *segs, int nseg,
817     int error)
818 {
819 	struct safexcel_dma_mem *sdm;
820 
821 	if (error != 0)
822 		return;
823 
824 	KASSERT(nseg == 1, ("%s: nsegs is %d", __func__, nseg));
825 	sdm = arg;
826 	sdm->paddr = segs->ds_addr;
827 }
828 
829 static int
830 safexcel_dma_alloc_mem(struct safexcel_softc *sc, struct safexcel_dma_mem *sdm,
831     bus_size_t size)
832 {
833 	int error;
834 
835 	KASSERT(sdm->vaddr == NULL,
836 	    ("%s: DMA memory descriptor in use.", __func__));
837 
838 	error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), /* parent */
839 	    PAGE_SIZE, 0,		/* alignment, boundary */
840 	    BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
841 	    BUS_SPACE_MAXADDR,		/* highaddr */
842 	    NULL, NULL,			/* filtfunc, filtfuncarg */
843 	    size, 1,			/* maxsize, nsegments */
844 	    size, BUS_DMA_COHERENT,	/* maxsegsz, flags */
845 	    NULL, NULL,			/* lockfunc, lockfuncarg */
846 	    &sdm->tag);			/* dmat */
847 	if (error != 0) {
848 		device_printf(sc->sc_dev,
849 		    "failed to allocate busdma tag, error %d\n", error);
850 		goto err1;
851 	}
852 
853 	error = bus_dmamem_alloc(sdm->tag, (void **)&sdm->vaddr,
854 	    BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT, &sdm->map);
855 	if (error != 0) {
856 		device_printf(sc->sc_dev,
857 		    "failed to allocate DMA safe memory, error %d\n", error);
858 		goto err2;
859 	}
860 
861 	error = bus_dmamap_load(sdm->tag, sdm->map, sdm->vaddr, size,
862 	    safexcel_dma_alloc_mem_cb, sdm, BUS_DMA_NOWAIT);
863 	if (error != 0) {
864 		device_printf(sc->sc_dev,
865 		    "cannot get address of the DMA memory, error %d\n", error);
866 		goto err3;
867 	}
868 
869 	return (0);
870 err3:
871 	bus_dmamem_free(sdm->tag, sdm->vaddr, sdm->map);
872 err2:
873 	bus_dma_tag_destroy(sdm->tag);
874 err1:
875 	sdm->vaddr = NULL;
876 
877 	return (error);
878 }
879 
880 static void
881 safexcel_dma_free_mem(struct safexcel_dma_mem *sdm)
882 {
883 	bus_dmamap_unload(sdm->tag, sdm->map);
884 	bus_dmamem_free(sdm->tag, sdm->vaddr, sdm->map);
885 	bus_dma_tag_destroy(sdm->tag);
886 }
887 
888 static void
889 safexcel_dma_free_rings(struct safexcel_softc *sc)
890 {
891 	struct safexcel_ring *ring;
892 	int i;
893 
894 	for (i = 0; i < sc->sc_config.rings; i++) {
895 		ring = &sc->sc_ring[i];
896 		safexcel_dma_free_mem(&ring->cdr.dma);
897 		safexcel_dma_free_mem(&ring->dma_atok);
898 		safexcel_dma_free_mem(&ring->rdr.dma);
899 		bus_dma_tag_destroy(ring->data_dtag);
900 		mtx_destroy(&ring->mtx);
901 	}
902 }
903 
904 static int
905 safexcel_dma_init(struct safexcel_softc *sc)
906 {
907 	struct safexcel_ring *ring;
908 	bus_size_t size;
909 	int error, i;
910 
911 	for (i = 0; i < sc->sc_config.rings; i++) {
912 		ring = &sc->sc_ring[i];
913 
914 		error = bus_dma_tag_create(
915 		    bus_get_dma_tag(sc->sc_dev),/* parent */
916 		    1, 0,			/* alignment, boundary */
917 		    BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
918 		    BUS_SPACE_MAXADDR,		/* highaddr */
919 		    NULL, NULL,			/* filtfunc, filtfuncarg */
920 		    SAFEXCEL_MAX_REQUEST_SIZE,	/* maxsize */
921 		    SAFEXCEL_MAX_FRAGMENTS,	/* nsegments */
922 		    SAFEXCEL_MAX_REQUEST_SIZE,	/* maxsegsz */
923 		    BUS_DMA_COHERENT,		/* flags */
924 		    NULL, NULL,			/* lockfunc, lockfuncarg */
925 		    &ring->data_dtag);		/* dmat */
926 		if (error != 0) {
927 			device_printf(sc->sc_dev,
928 			    "bus_dma_tag_create main failed; error %d\n", error);
929 			return (error);
930 		}
931 
932 		size = sizeof(uint32_t) * sc->sc_config.cd_offset *
933 		    SAFEXCEL_RING_SIZE;
934 		error = safexcel_dma_alloc_mem(sc, &ring->cdr.dma, size);
935 		if (error != 0) {
936 			device_printf(sc->sc_dev,
937 			    "failed to allocate CDR DMA memory, error %d\n",
938 			    error);
939 			goto err;
940 		}
941 		ring->cdr.desc =
942 		    (struct safexcel_cmd_descr *)ring->cdr.dma.vaddr;
943 
944 		/* Allocate additional CDR token memory. */
945 		size = (bus_size_t)sc->sc_config.atok_offset *
946 		    SAFEXCEL_RING_SIZE;
947 		error = safexcel_dma_alloc_mem(sc, &ring->dma_atok, size);
948 		if (error != 0) {
949 			device_printf(sc->sc_dev,
950 			    "failed to allocate atoken DMA memory, error %d\n",
951 			    error);
952 			goto err;
953 		}
954 
955 		size = sizeof(uint32_t) * sc->sc_config.rd_offset *
956 		    SAFEXCEL_RING_SIZE;
957 		error = safexcel_dma_alloc_mem(sc, &ring->rdr.dma, size);
958 		if (error) {
959 			device_printf(sc->sc_dev,
960 			    "failed to allocate RDR DMA memory, error %d\n",
961 			    error);
962 			goto err;
963 		}
964 		ring->rdr.desc =
965 		    (struct safexcel_res_descr *)ring->rdr.dma.vaddr;
966 	}
967 
968 	return (0);
969 err:
970 	safexcel_dma_free_rings(sc);
971 	return (error);
972 }
973 
974 static void
975 safexcel_deinit_hw(struct safexcel_softc *sc)
976 {
977 	safexcel_hw_reset_rings(sc);
978 	safexcel_dma_free_rings(sc);
979 }
980 
981 static int
982 safexcel_init_hw(struct safexcel_softc *sc)
983 {
984 	int pe;
985 
986 	/* 23.3.7 Initialization */
987 	if (safexcel_configure(sc) != 0)
988 		return (EINVAL);
989 
990 	if (safexcel_dma_init(sc) != 0)
991 		return (ENOMEM);
992 
993 	safexcel_init_rings(sc);
994 
995 	safexcel_init_hia_bus_access(sc);
996 
997 	/* 23.3.7.2 Disable EIP-97 global Interrupts */
998 	safexcel_disable_global_interrupts(sc);
999 
1000 	for (pe = 0; pe < sc->sc_config.pes; pe++) {
1001 		/* 23.3.7.3 Configure Data Fetch Engine */
1002 		safexcel_configure_dfe_engine(sc, pe);
1003 
1004 		/* 23.3.7.4 Configure Data Store Engine */
1005 		if (safexcel_configure_dse(sc, pe)) {
1006 			safexcel_deinit_hw(sc);
1007 			return (-1);
1008 		}
1009 
1010 		/* 23.3.7.5 1. Protocol enables */
1011 		SAFEXCEL_WRITE(sc,
1012 		    SAFEXCEL_PE(sc) + SAFEXCEL_PE_EIP96_FUNCTION_EN(pe),
1013 		    0xffffffff);
1014 		SAFEXCEL_WRITE(sc,
1015 		    SAFEXCEL_PE(sc) + SAFEXCEL_PE_EIP96_FUNCTION2_EN(pe),
1016 		    0xffffffff);
1017 	}
1018 
1019 	safexcel_hw_prepare_rings(sc);
1020 
1021 	/* 23.3.7.5 Configure the Processing Engine(s). */
1022 	for (pe = 0; pe < sc->sc_config.pes; pe++)
1023 		safexcel_enable_pe_engine(sc, pe);
1024 
1025 	safexcel_hw_setup_rings(sc);
1026 
1027 	return (0);
1028 }
1029 
1030 static int
1031 safexcel_setup_dev_interrupts(struct safexcel_softc *sc)
1032 {
1033 	int error, i, j;
1034 
1035 	for (i = 0; i < SAFEXCEL_MAX_RINGS && sc->sc_intr[i] != NULL; i++) {
1036 		sc->sc_ih[i].sc = sc;
1037 		sc->sc_ih[i].ring = i;
1038 
1039 		if (bus_setup_intr(sc->sc_dev, sc->sc_intr[i],
1040 		    INTR_TYPE_NET | INTR_MPSAFE, NULL, safexcel_ring_intr,
1041 		    &sc->sc_ih[i], &sc->sc_ih[i].handle)) {
1042 			device_printf(sc->sc_dev,
1043 			    "couldn't setup interrupt %d\n", i);
1044 			goto err;
1045 		}
1046 
1047 		error = bus_bind_intr(sc->sc_dev, sc->sc_intr[i], i % mp_ncpus);
1048 		if (error != 0)
1049 			device_printf(sc->sc_dev,
1050 			    "failed to bind ring %d\n", error);
1051 	}
1052 
1053 	return (0);
1054 
1055 err:
1056 	for (j = 0; j < i; j++)
1057 		bus_teardown_intr(sc->sc_dev, sc->sc_intr[j],
1058 		    sc->sc_ih[j].handle);
1059 
1060 	return (ENXIO);
1061 }
1062 
1063 static void
1064 safexcel_teardown_dev_interrupts(struct safexcel_softc *sc)
1065 {
1066 	int i;
1067 
1068 	for (i = 0; i < SAFEXCEL_MAX_RINGS; i++)
1069 		bus_teardown_intr(sc->sc_dev, sc->sc_intr[i],
1070 		    sc->sc_ih[i].handle);
1071 }
1072 
1073 static int
1074 safexcel_alloc_dev_resources(struct safexcel_softc *sc)
1075 {
1076 	char name[16];
1077 	device_t dev;
1078 	phandle_t node;
1079 	int error, i, rid;
1080 
1081 	dev = sc->sc_dev;
1082 	node = ofw_bus_get_node(dev);
1083 
1084 	rid = 0;
1085 	sc->sc_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
1086 	    RF_ACTIVE);
1087 	if (sc->sc_res == NULL) {
1088 		device_printf(dev, "couldn't allocate memory resources\n");
1089 		return (ENXIO);
1090 	}
1091 
1092 	for (i = 0; i < SAFEXCEL_MAX_RINGS; i++) {
1093 		(void)snprintf(name, sizeof(name), "ring%d", i);
1094 		error = ofw_bus_find_string_index(node, "interrupt-names", name,
1095 		    &rid);
1096 		if (error != 0)
1097 			break;
1098 
1099 		sc->sc_intr[i] = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1100 		    RF_ACTIVE | RF_SHAREABLE);
1101 		if (sc->sc_intr[i] == NULL) {
1102 			error = ENXIO;
1103 			goto out;
1104 		}
1105 	}
1106 	if (i == 0) {
1107 		device_printf(dev, "couldn't allocate interrupt resources\n");
1108 		error = ENXIO;
1109 		goto out;
1110 	}
1111 
1112 	return (0);
1113 
1114 out:
1115 	for (i = 0; i < SAFEXCEL_MAX_RINGS && sc->sc_intr[i] != NULL; i++)
1116 		bus_release_resource(dev, SYS_RES_IRQ,
1117 		    rman_get_rid(sc->sc_intr[i]), sc->sc_intr[i]);
1118 	bus_release_resource(dev, SYS_RES_MEMORY, rman_get_rid(sc->sc_res),
1119 	    sc->sc_res);
1120 	return (error);
1121 }
1122 
1123 static void
1124 safexcel_free_dev_resources(struct safexcel_softc *sc)
1125 {
1126 	int i;
1127 
1128 	for (i = 0; i < SAFEXCEL_MAX_RINGS && sc->sc_intr[i] != NULL; i++)
1129 		bus_release_resource(sc->sc_dev, SYS_RES_IRQ,
1130 		    rman_get_rid(sc->sc_intr[i]), sc->sc_intr[i]);
1131 	if (sc->sc_res != NULL)
1132 		bus_release_resource(sc->sc_dev, SYS_RES_MEMORY,
1133 		    rman_get_rid(sc->sc_res), sc->sc_res);
1134 }
1135 
1136 static int
1137 safexcel_probe(device_t dev)
1138 {
1139 	struct safexcel_softc *sc;
1140 
1141 	if (!ofw_bus_status_okay(dev))
1142 		return (ENXIO);
1143 
1144 	sc = device_get_softc(dev);
1145 	sc->sc_type = ofw_bus_search_compatible(dev, safexcel_compat)->ocd_data;
1146 	if (sc->sc_type == 0)
1147 		return (ENXIO);
1148 
1149 	device_set_desc(dev, "SafeXcel EIP-97 crypto accelerator");
1150 
1151 	return (BUS_PROBE_DEFAULT);
1152 }
1153 
1154 static int
1155 safexcel_attach(device_t dev)
1156 {
1157 	struct sysctl_ctx_list *ctx;
1158 	struct sysctl_oid *oid;
1159 	struct sysctl_oid_list *children;
1160 	struct safexcel_softc *sc;
1161 	struct safexcel_request *req;
1162 	struct safexcel_ring *ring;
1163 	int i, j, ringidx;
1164 
1165 	sc = device_get_softc(dev);
1166 	sc->sc_dev = dev;
1167 	sc->sc_cid = -1;
1168 
1169 	if (safexcel_alloc_dev_resources(sc))
1170 		goto err;
1171 
1172 	if (safexcel_setup_dev_interrupts(sc))
1173 		goto err1;
1174 
1175 	if (safexcel_init_hw(sc))
1176 		goto err2;
1177 
1178 	for (ringidx = 0; ringidx < sc->sc_config.rings; ringidx++) {
1179 		ring = &sc->sc_ring[ringidx];
1180 
1181 		ring->cmd_data = sglist_alloc(SAFEXCEL_MAX_FRAGMENTS, M_WAITOK);
1182 		ring->res_data = sglist_alloc(SAFEXCEL_MAX_FRAGMENTS, M_WAITOK);
1183 
1184 		for (i = 0; i < SAFEXCEL_RING_SIZE; i++) {
1185 			req = &ring->requests[i];
1186 			req->sc = sc;
1187 			req->ringidx = ringidx;
1188 			if (bus_dmamap_create(ring->data_dtag,
1189 			    BUS_DMA_COHERENT, &req->dmap) != 0) {
1190 				for (j = 0; j < i; j++)
1191 					bus_dmamap_destroy(ring->data_dtag,
1192 					    ring->requests[j].dmap);
1193 				goto err2;
1194 			}
1195 			if (safexcel_dma_alloc_mem(sc, &req->ctx,
1196 			    sizeof(struct safexcel_context_record)) != 0) {
1197 				for (j = 0; j < i; j++) {
1198 					bus_dmamap_destroy(ring->data_dtag,
1199 					    ring->requests[j].dmap);
1200 					safexcel_dma_free_mem(
1201 					    &ring->requests[j].ctx);
1202 				}
1203 				goto err2;
1204 			}
1205 		}
1206 	}
1207 
1208 	ctx = device_get_sysctl_ctx(dev);
1209 	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
1210 	    OID_AUTO, "debug", CTLFLAG_RWTUN, &sc->sc_debug, 0,
1211 	    "Debug message verbosity");
1212 
1213 	oid = device_get_sysctl_tree(sc->sc_dev);
1214 	children = SYSCTL_CHILDREN(oid);
1215 	oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats",
1216 	    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "statistics");
1217 	children = SYSCTL_CHILDREN(oid);
1218 
1219 	sc->sc_req_alloc_failures = counter_u64_alloc(M_WAITOK);
1220 	SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "req_alloc_failures",
1221 	    CTLFLAG_RD, &sc->sc_req_alloc_failures,
1222 	    "Number of request allocation failures");
1223 	sc->sc_cdesc_alloc_failures = counter_u64_alloc(M_WAITOK);
1224 	SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "cdesc_alloc_failures",
1225 	    CTLFLAG_RD, &sc->sc_cdesc_alloc_failures,
1226 	    "Number of command descriptor ring overflows");
1227 	sc->sc_rdesc_alloc_failures = counter_u64_alloc(M_WAITOK);
1228 	SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "rdesc_alloc_failures",
1229 	    CTLFLAG_RD, &sc->sc_rdesc_alloc_failures,
1230 	    "Number of result descriptor ring overflows");
1231 
1232 	sc->sc_cid = crypto_get_driverid(dev, sizeof(struct safexcel_session),
1233 	    CRYPTOCAP_F_HARDWARE);
1234 	if (sc->sc_cid < 0)
1235 		goto err2;
1236 
1237 	return (0);
1238 
1239 err2:
1240 	safexcel_teardown_dev_interrupts(sc);
1241 err1:
1242 	safexcel_free_dev_resources(sc);
1243 err:
1244 	return (ENXIO);
1245 }
1246 
1247 static int
1248 safexcel_detach(device_t dev)
1249 {
1250 	struct safexcel_ring *ring;
1251 	struct safexcel_softc *sc;
1252 	int i, ringidx;
1253 
1254 	sc = device_get_softc(dev);
1255 
1256 	if (sc->sc_cid >= 0)
1257 		crypto_unregister_all(sc->sc_cid);
1258 
1259 	counter_u64_free(sc->sc_req_alloc_failures);
1260 	counter_u64_free(sc->sc_cdesc_alloc_failures);
1261 	counter_u64_free(sc->sc_rdesc_alloc_failures);
1262 
1263 	for (ringidx = 0; ringidx < sc->sc_config.rings; ringidx++) {
1264 		ring = &sc->sc_ring[ringidx];
1265 		for (i = 0; i < SAFEXCEL_RING_SIZE; i++) {
1266 			bus_dmamap_destroy(ring->data_dtag,
1267 			    ring->requests[i].dmap);
1268 			safexcel_dma_free_mem(&ring->requests[i].ctx);
1269 		}
1270 		sglist_free(ring->cmd_data);
1271 		sglist_free(ring->res_data);
1272 	}
1273 	safexcel_deinit_hw(sc);
1274 	safexcel_teardown_dev_interrupts(sc);
1275 	safexcel_free_dev_resources(sc);
1276 
1277 	return (0);
1278 }
1279 
1280 /*
1281  * Pre-compute the hash key used in GHASH, which is a block of zeroes encrypted
1282  * using the cipher key.
1283  */
1284 static void
1285 safexcel_setkey_ghash(const uint8_t *key, int klen, uint32_t *hashkey)
1286 {
1287 	uint32_t ks[4 * (RIJNDAEL_MAXNR + 1)];
1288 	uint8_t zeros[AES_BLOCK_LEN];
1289 	int i, rounds;
1290 
1291 	memset(zeros, 0, sizeof(zeros));
1292 
1293 	rounds = rijndaelKeySetupEnc(ks, key, klen * NBBY);
1294 	rijndaelEncrypt(ks, rounds, zeros, (uint8_t *)hashkey);
1295 	for (i = 0; i < GMAC_BLOCK_LEN / sizeof(uint32_t); i++)
1296 		hashkey[i] = htobe32(hashkey[i]);
1297 
1298 	explicit_bzero(ks, sizeof(ks));
1299 }
1300 
1301 /*
1302  * Pre-compute the combined CBC-MAC key, which consists of three keys K1, K2, K3
1303  * in the hardware implementation.  K1 is the cipher key and comes last in the
1304  * buffer since K2 and K3 have a fixed size of AES_BLOCK_LEN.  For now XCBC-MAC
1305  * is not implemented so K2 and K3 are fixed.
1306  */
1307 static void
1308 safexcel_setkey_xcbcmac(const uint8_t *key, int klen, uint32_t *hashkey)
1309 {
1310 	int i, off;
1311 
1312 	memset(hashkey, 0, 2 * AES_BLOCK_LEN);
1313 	off = 2 * AES_BLOCK_LEN / sizeof(uint32_t);
1314 	for (i = 0; i < klen / sizeof(uint32_t); i++, key += 4)
1315 		hashkey[i + off] = htobe32(le32dec(key));
1316 }
1317 
1318 static void
1319 safexcel_setkey_hmac_digest(const struct auth_hash *ahash, union authctx *ctx,
1320     char *buf)
1321 {
1322 	int hashwords, i;
1323 
1324 	switch (ahash->type) {
1325 	case CRYPTO_SHA1_HMAC:
1326 		hashwords = ahash->hashsize / sizeof(uint32_t);
1327 		for (i = 0; i < hashwords; i++)
1328 			((uint32_t *)buf)[i] = htobe32(ctx->sha1ctx.h.b32[i]);
1329 		break;
1330 	case CRYPTO_SHA2_224_HMAC:
1331 		hashwords = auth_hash_hmac_sha2_256.hashsize / sizeof(uint32_t);
1332 		for (i = 0; i < hashwords; i++)
1333 			((uint32_t *)buf)[i] = htobe32(ctx->sha224ctx.state[i]);
1334 		break;
1335 	case CRYPTO_SHA2_256_HMAC:
1336 		hashwords = ahash->hashsize / sizeof(uint32_t);
1337 		for (i = 0; i < hashwords; i++)
1338 			((uint32_t *)buf)[i] = htobe32(ctx->sha256ctx.state[i]);
1339 		break;
1340 	case CRYPTO_SHA2_384_HMAC:
1341 		hashwords = auth_hash_hmac_sha2_512.hashsize / sizeof(uint64_t);
1342 		for (i = 0; i < hashwords; i++)
1343 			((uint64_t *)buf)[i] = htobe64(ctx->sha384ctx.state[i]);
1344 		break;
1345 	case CRYPTO_SHA2_512_HMAC:
1346 		hashwords = ahash->hashsize / sizeof(uint64_t);
1347 		for (i = 0; i < hashwords; i++)
1348 			((uint64_t *)buf)[i] = htobe64(ctx->sha512ctx.state[i]);
1349 		break;
1350 	}
1351 }
1352 
1353 /*
1354  * Pre-compute the inner and outer digests used in the HMAC algorithm.
1355  */
1356 static void
1357 safexcel_setkey_hmac(const struct crypto_session_params *csp,
1358     const uint8_t *key, int klen, uint8_t *ipad, uint8_t *opad)
1359 {
1360 	union authctx ctx;
1361 	const struct auth_hash *ahash;
1362 
1363 	ahash = crypto_auth_hash(csp);
1364 	hmac_init_ipad(ahash, key, klen, &ctx);
1365 	safexcel_setkey_hmac_digest(ahash, &ctx, ipad);
1366 	hmac_init_opad(ahash, key, klen, &ctx);
1367 	safexcel_setkey_hmac_digest(ahash, &ctx, opad);
1368 	explicit_bzero(&ctx, ahash->ctxsize);
1369 }
1370 
1371 static void
1372 safexcel_setkey_xts(const uint8_t *key, int klen, uint8_t *tweakkey)
1373 {
1374 	memcpy(tweakkey, key + klen, klen);
1375 }
1376 
1377 /*
1378  * Populate a context record with parameters from a session.  Some consumers
1379  * specify per-request keys, in which case the context must be re-initialized
1380  * for each request.
1381  */
1382 static int
1383 safexcel_set_context(struct safexcel_context_record *ctx, int op,
1384     const uint8_t *ckey, const uint8_t *akey, struct safexcel_session *sess)
1385 {
1386 	const struct crypto_session_params *csp;
1387 	uint8_t *data;
1388 	uint32_t ctrl0, ctrl1;
1389 	int aklen, alg, cklen, off;
1390 
1391 	csp = crypto_get_params(sess->cses);
1392 	aklen = csp->csp_auth_klen;
1393 	cklen = csp->csp_cipher_klen;
1394 	if (csp->csp_cipher_alg == CRYPTO_AES_XTS)
1395 		cklen /= 2;
1396 
1397 	ctrl0 = sess->alg | sess->digest | sess->hash;
1398 	ctrl1 = sess->mode;
1399 
1400 	data = (uint8_t *)ctx->data;
1401 	if (csp->csp_cipher_alg != 0) {
1402 		memcpy(data, ckey, cklen);
1403 		off = cklen;
1404 	} else if (csp->csp_auth_alg == CRYPTO_AES_NIST_GMAC) {
1405 		memcpy(data, akey, aklen);
1406 		off = aklen;
1407 	} else {
1408 		off = 0;
1409 	}
1410 
1411 	switch (csp->csp_cipher_alg) {
1412 	case CRYPTO_AES_NIST_GCM_16:
1413 		safexcel_setkey_ghash(ckey, cklen, (uint32_t *)(data + off));
1414 		off += GMAC_BLOCK_LEN;
1415 		break;
1416 	case CRYPTO_AES_CCM_16:
1417 		safexcel_setkey_xcbcmac(ckey, cklen, (uint32_t *)(data + off));
1418 		off += AES_BLOCK_LEN * 2 + cklen;
1419 		break;
1420 	case CRYPTO_AES_XTS:
1421 		safexcel_setkey_xts(ckey, cklen, data + off);
1422 		off += cklen;
1423 		break;
1424 	}
1425 	switch (csp->csp_auth_alg) {
1426 	case CRYPTO_AES_NIST_GMAC:
1427 		safexcel_setkey_ghash(akey, aklen, (uint32_t *)(data + off));
1428 		off += GMAC_BLOCK_LEN;
1429 		break;
1430 	case CRYPTO_SHA1_HMAC:
1431 	case CRYPTO_SHA2_224_HMAC:
1432 	case CRYPTO_SHA2_256_HMAC:
1433 	case CRYPTO_SHA2_384_HMAC:
1434 	case CRYPTO_SHA2_512_HMAC:
1435 		safexcel_setkey_hmac(csp, akey, aklen,
1436 		    data + off, data + off + sess->statelen);
1437 		off += sess->statelen * 2;
1438 		break;
1439 	}
1440 	ctrl0 |= SAFEXCEL_CONTROL0_SIZE(off / sizeof(uint32_t));
1441 
1442 	alg = csp->csp_cipher_alg;
1443 	if (alg == 0)
1444 		alg = csp->csp_auth_alg;
1445 
1446 	switch (alg) {
1447 	case CRYPTO_AES_CCM_16:
1448 		if (CRYPTO_OP_IS_ENCRYPT(op)) {
1449 			ctrl0 |= SAFEXCEL_CONTROL0_TYPE_HASH_ENCRYPT_OUT |
1450 			    SAFEXCEL_CONTROL0_KEY_EN;
1451 		} else {
1452 			ctrl0 |= SAFEXCEL_CONTROL0_TYPE_DECRYPT_HASH_IN |
1453 			    SAFEXCEL_CONTROL0_KEY_EN;
1454 		}
1455 		ctrl1 |= SAFEXCEL_CONTROL1_IV0 | SAFEXCEL_CONTROL1_IV1 |
1456 		    SAFEXCEL_CONTROL1_IV2 | SAFEXCEL_CONTROL1_IV3;
1457 		break;
1458 	case CRYPTO_AES_CBC:
1459 	case CRYPTO_AES_ICM:
1460 	case CRYPTO_AES_XTS:
1461 		if (CRYPTO_OP_IS_ENCRYPT(op)) {
1462 			ctrl0 |= SAFEXCEL_CONTROL0_TYPE_CRYPTO_OUT |
1463 			    SAFEXCEL_CONTROL0_KEY_EN;
1464 			if (csp->csp_auth_alg != 0)
1465 				ctrl0 |=
1466 				    SAFEXCEL_CONTROL0_TYPE_ENCRYPT_HASH_OUT;
1467 		} else {
1468 			ctrl0 |= SAFEXCEL_CONTROL0_TYPE_CRYPTO_IN |
1469 			    SAFEXCEL_CONTROL0_KEY_EN;
1470 			if (csp->csp_auth_alg != 0)
1471 				ctrl0 |= SAFEXCEL_CONTROL0_TYPE_HASH_DECRYPT_IN;
1472 		}
1473 		break;
1474 	case CRYPTO_AES_NIST_GCM_16:
1475 	case CRYPTO_AES_NIST_GMAC:
1476 		if (CRYPTO_OP_IS_ENCRYPT(op) || csp->csp_auth_alg != 0) {
1477 			ctrl0 |= SAFEXCEL_CONTROL0_TYPE_CRYPTO_OUT |
1478 			    SAFEXCEL_CONTROL0_KEY_EN |
1479 			    SAFEXCEL_CONTROL0_TYPE_HASH_OUT;
1480 		} else {
1481 			ctrl0 |= SAFEXCEL_CONTROL0_TYPE_CRYPTO_IN |
1482 			    SAFEXCEL_CONTROL0_KEY_EN |
1483 			    SAFEXCEL_CONTROL0_TYPE_HASH_DECRYPT_IN;
1484 		}
1485 		if (csp->csp_cipher_alg == CRYPTO_AES_NIST_GCM_16) {
1486 			ctrl1 |= SAFEXCEL_CONTROL1_COUNTER_MODE |
1487 			    SAFEXCEL_CONTROL1_IV0 | SAFEXCEL_CONTROL1_IV1 |
1488 			    SAFEXCEL_CONTROL1_IV2;
1489 		}
1490 		break;
1491 	case CRYPTO_SHA1:
1492 	case CRYPTO_SHA2_224:
1493 	case CRYPTO_SHA2_256:
1494 	case CRYPTO_SHA2_384:
1495 	case CRYPTO_SHA2_512:
1496 		ctrl0 |= SAFEXCEL_CONTROL0_RESTART_HASH;
1497 		/* FALLTHROUGH */
1498 	case CRYPTO_SHA1_HMAC:
1499 	case CRYPTO_SHA2_224_HMAC:
1500 	case CRYPTO_SHA2_256_HMAC:
1501 	case CRYPTO_SHA2_384_HMAC:
1502 	case CRYPTO_SHA2_512_HMAC:
1503 		ctrl0 |= SAFEXCEL_CONTROL0_TYPE_HASH_OUT;
1504 		break;
1505 	}
1506 
1507 	ctx->control0 = ctrl0;
1508 	ctx->control1 = ctrl1;
1509 
1510 	return (off);
1511 }
1512 
1513 /*
1514  * Construct a no-op instruction, used to pad input tokens.
1515  */
1516 static void
1517 safexcel_instr_nop(struct safexcel_instr **instrp)
1518 {
1519 	struct safexcel_instr *instr;
1520 
1521 	instr = *instrp;
1522 	instr->opcode = SAFEXCEL_INSTR_OPCODE_INSERT;
1523 	instr->length = (1 << 2);
1524 	instr->status = 0;
1525 	instr->instructions = 0;
1526 
1527 	*instrp = instr + 1;
1528 }
1529 
1530 /*
1531  * Insert the digest of the input payload.  This is typically the last
1532  * instruction of a sequence.
1533  */
1534 static void
1535 safexcel_instr_insert_digest(struct safexcel_instr **instrp, int len)
1536 {
1537 	struct safexcel_instr *instr;
1538 
1539 	instr = *instrp;
1540 	instr->opcode = SAFEXCEL_INSTR_OPCODE_INSERT;
1541 	instr->length = len;
1542 	instr->status = SAFEXCEL_INSTR_STATUS_LAST_HASH |
1543 	    SAFEXCEL_INSTR_STATUS_LAST_PACKET;
1544 	instr->instructions = SAFEXCEL_INSTR_DEST_OUTPUT |
1545 	    SAFEXCEL_INSTR_INSERT_HASH_DIGEST;
1546 
1547 	*instrp = instr + 1;
1548 }
1549 
1550 /*
1551  * Retrieve and verify a digest.
1552  */
1553 static void
1554 safexcel_instr_retrieve_digest(struct safexcel_instr **instrp, int len)
1555 {
1556 	struct safexcel_instr *instr;
1557 
1558 	instr = *instrp;
1559 	instr->opcode = SAFEXCEL_INSTR_OPCODE_RETRIEVE;
1560 	instr->length = len;
1561 	instr->status = SAFEXCEL_INSTR_STATUS_LAST_HASH |
1562 	    SAFEXCEL_INSTR_STATUS_LAST_PACKET;
1563 	instr->instructions = SAFEXCEL_INSTR_INSERT_HASH_DIGEST;
1564 	instr++;
1565 
1566 	instr->opcode = SAFEXCEL_INSTR_OPCODE_VERIFY_FIELDS;
1567 	instr->length = len | SAFEXCEL_INSTR_VERIFY_HASH;
1568 	instr->status = SAFEXCEL_INSTR_STATUS_LAST_HASH |
1569 	    SAFEXCEL_INSTR_STATUS_LAST_PACKET;
1570 	instr->instructions = SAFEXCEL_INSTR_VERIFY_PADDING;
1571 
1572 	*instrp = instr + 1;
1573 }
1574 
1575 static void
1576 safexcel_instr_temp_aes_block(struct safexcel_instr **instrp)
1577 {
1578 	struct safexcel_instr *instr;
1579 
1580 	instr = *instrp;
1581 	instr->opcode = SAFEXCEL_INSTR_OPCODE_INSERT_REMOVE_RESULT;
1582 	instr->length = 0;
1583 	instr->status = 0;
1584 	instr->instructions = AES_BLOCK_LEN;
1585 	instr++;
1586 
1587 	instr->opcode = SAFEXCEL_INSTR_OPCODE_INSERT;
1588 	instr->length = AES_BLOCK_LEN;
1589 	instr->status = 0;
1590 	instr->instructions = SAFEXCEL_INSTR_DEST_OUTPUT |
1591 	    SAFEXCEL_INSTR_DEST_CRYPTO;
1592 
1593 	*instrp = instr + 1;
1594 }
1595 
1596 /*
1597  * Handle a request for an unauthenticated block cipher.
1598  */
1599 static void
1600 safexcel_instr_cipher(struct safexcel_request *req,
1601     struct safexcel_instr *instr, struct safexcel_cmd_descr *cdesc)
1602 {
1603 	struct cryptop *crp;
1604 
1605 	crp = req->crp;
1606 
1607 	/* Insert the payload. */
1608 	instr->opcode = SAFEXCEL_INSTR_OPCODE_DIRECTION;
1609 	instr->length = crp->crp_payload_length;
1610 	instr->status = SAFEXCEL_INSTR_STATUS_LAST_PACKET |
1611 	    SAFEXCEL_INSTR_STATUS_LAST_HASH;
1612 	instr->instructions = SAFEXCEL_INSTR_INS_LAST |
1613 	    SAFEXCEL_INSTR_DEST_CRYPTO | SAFEXCEL_INSTR_DEST_OUTPUT;
1614 
1615 	cdesc->additional_cdata_size = 1;
1616 }
1617 
1618 static void
1619 safexcel_instr_eta(struct safexcel_request *req, struct safexcel_instr *instr,
1620     struct safexcel_cmd_descr *cdesc)
1621 {
1622 	struct cryptop *crp;
1623 	struct safexcel_instr *start;
1624 
1625 	crp = req->crp;
1626 	start = instr;
1627 
1628 	/* Insert the AAD. */
1629 	instr->opcode = SAFEXCEL_INSTR_OPCODE_DIRECTION;
1630 	instr->length = crp->crp_aad_length;
1631 	instr->status = crp->crp_payload_length == 0 ?
1632 	    SAFEXCEL_INSTR_STATUS_LAST_HASH : 0;
1633 	instr->instructions = SAFEXCEL_INSTR_INS_LAST |
1634 	    SAFEXCEL_INSTR_DEST_HASH;
1635 	instr++;
1636 
1637 	/* Encrypt any data left in the request. */
1638 	if (crp->crp_payload_length > 0) {
1639 		instr->opcode = SAFEXCEL_INSTR_OPCODE_DIRECTION;
1640 		instr->length = crp->crp_payload_length;
1641 		instr->status = SAFEXCEL_INSTR_STATUS_LAST_HASH;
1642 		instr->instructions = SAFEXCEL_INSTR_INS_LAST |
1643 		    SAFEXCEL_INSTR_DEST_CRYPTO |
1644 		    SAFEXCEL_INSTR_DEST_HASH |
1645 		    SAFEXCEL_INSTR_DEST_OUTPUT;
1646 		instr++;
1647 	}
1648 
1649 	/*
1650 	 * Compute the digest, or extract it and place it in the output stream.
1651 	 */
1652 	if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op))
1653 		safexcel_instr_insert_digest(&instr, req->sess->digestlen);
1654 	else
1655 		safexcel_instr_retrieve_digest(&instr, req->sess->digestlen);
1656 	cdesc->additional_cdata_size = instr - start;
1657 }
1658 
1659 static void
1660 safexcel_instr_sha_hash(struct safexcel_request *req,
1661     struct safexcel_instr *instr)
1662 {
1663 	struct cryptop *crp;
1664 	struct safexcel_instr *start;
1665 
1666 	crp = req->crp;
1667 	start = instr;
1668 
1669 	/* Pass the input data to the hash engine. */
1670 	instr->opcode = SAFEXCEL_INSTR_OPCODE_DIRECTION;
1671 	instr->length = crp->crp_payload_length;
1672 	instr->status = SAFEXCEL_INSTR_STATUS_LAST_HASH;
1673 	instr->instructions = SAFEXCEL_INSTR_DEST_HASH;
1674 	instr++;
1675 
1676 	/* Insert the hash result into the output stream. */
1677 	safexcel_instr_insert_digest(&instr, req->sess->digestlen);
1678 
1679 	/* Pad the rest of the inline instruction space. */
1680 	while (instr != start + SAFEXCEL_MAX_ITOKENS)
1681 		safexcel_instr_nop(&instr);
1682 }
1683 
1684 static void
1685 safexcel_instr_ccm(struct safexcel_request *req, struct safexcel_instr *instr,
1686     struct safexcel_cmd_descr *cdesc)
1687 {
1688 	const struct crypto_session_params *csp;
1689 	struct cryptop *crp;
1690 	struct safexcel_instr *start;
1691 	uint8_t *a0, *b0, *alenp, L;
1692 	int aalign, blen;
1693 
1694 	crp = req->crp;
1695 	csp = crypto_get_params(crp->crp_session);
1696 	start = instr;
1697 
1698 	/*
1699 	 * Construct two blocks, A0 and B0, used in encryption and
1700 	 * authentication, respectively.  A0 is embedded in the token
1701 	 * descriptor, and B0 is inserted directly into the data stream using
1702 	 * instructions below.
1703 	 *
1704 	 * An explicit check for overflow of the length field is not
1705 	 * needed since the maximum driver size of 65535 bytes fits in
1706 	 * the smallest length field used for a 13-byte nonce.
1707 	 */
1708 	blen = AES_BLOCK_LEN;
1709 	L = 15 - csp->csp_ivlen;
1710 
1711 	a0 = (uint8_t *)&cdesc->control_data.token[0];
1712 	memset(a0, 0, blen);
1713 	a0[0] = L - 1;
1714 	memcpy(&a0[1], req->iv, csp->csp_ivlen);
1715 
1716 	/*
1717 	 * Insert B0 and the AAD length into the input stream.
1718 	 */
1719 	instr->opcode = SAFEXCEL_INSTR_OPCODE_INSERT;
1720 	instr->length = blen + (crp->crp_aad_length > 0 ? 2 : 0);
1721 	instr->status = 0;
1722 	instr->instructions = SAFEXCEL_INSTR_DEST_HASH |
1723 	    SAFEXCEL_INSTR_INSERT_IMMEDIATE;
1724 	instr++;
1725 
1726 	b0 = (uint8_t *)instr;
1727 	memset(b0, 0, blen);
1728 	b0[0] =
1729 	    (L - 1) | /* payload length size */
1730 	    ((req->sess->digestlen - 2) / 2) << 3 /* digest length */ |
1731 	    (crp->crp_aad_length > 0 ? 1 : 0) << 6 /* AAD present bit */;
1732 	memcpy(&b0[1], req->iv, csp->csp_ivlen);
1733 	b0[14] = crp->crp_payload_length >> 8;
1734 	b0[15] = crp->crp_payload_length & 0xff;
1735 	instr += blen / sizeof(*instr);
1736 
1737 	/* Insert the AAD length and data into the input stream. */
1738 	if (crp->crp_aad_length > 0) {
1739 		alenp = (uint8_t *)instr;
1740 		alenp[0] = crp->crp_aad_length >> 8;
1741 		alenp[1] = crp->crp_aad_length & 0xff;
1742 		alenp[2] = 0;
1743 		alenp[3] = 0;
1744 		instr++;
1745 
1746 		instr->opcode = SAFEXCEL_INSTR_OPCODE_DIRECTION;
1747 		instr->length = crp->crp_aad_length;
1748 		instr->status = 0;
1749 		instr->instructions = SAFEXCEL_INSTR_DEST_HASH;
1750 		instr++;
1751 
1752 		/* Insert zero padding. */
1753 		aalign = (crp->crp_aad_length + 2) & (blen - 1);
1754 		instr->opcode = SAFEXCEL_INSTR_OPCODE_INSERT;
1755 		instr->length = aalign == 0 ? 0 :
1756 		    blen - ((crp->crp_aad_length + 2) & (blen - 1));
1757 		instr->status = crp->crp_payload_length == 0 ?
1758 		    SAFEXCEL_INSTR_STATUS_LAST_HASH : 0;
1759 		instr->instructions = SAFEXCEL_INSTR_DEST_HASH;
1760 		instr++;
1761 	}
1762 
1763 	safexcel_instr_temp_aes_block(&instr);
1764 
1765 	/* Insert the cipher payload into the input stream. */
1766 	if (crp->crp_payload_length > 0) {
1767 		instr->opcode = SAFEXCEL_INSTR_OPCODE_DIRECTION;
1768 		instr->length = crp->crp_payload_length;
1769 		instr->status = (crp->crp_payload_length & (blen - 1)) == 0 ?
1770 		    SAFEXCEL_INSTR_STATUS_LAST_HASH : 0;
1771 		instr->instructions = SAFEXCEL_INSTR_DEST_OUTPUT |
1772 		    SAFEXCEL_INSTR_DEST_CRYPTO |
1773 		    SAFEXCEL_INSTR_DEST_HASH |
1774 		    SAFEXCEL_INSTR_INS_LAST;
1775 		instr++;
1776 
1777 		/* Insert zero padding. */
1778 		if (crp->crp_payload_length & (blen - 1)) {
1779 			instr->opcode = SAFEXCEL_INSTR_OPCODE_INSERT;
1780 			instr->length = blen -
1781 			    (crp->crp_payload_length & (blen - 1));
1782 			instr->status = SAFEXCEL_INSTR_STATUS_LAST_HASH;
1783 			instr->instructions = SAFEXCEL_INSTR_DEST_HASH;
1784 			instr++;
1785 		}
1786 	}
1787 
1788 	/*
1789 	 * Compute the digest, or extract it and place it in the output stream.
1790 	 */
1791 	if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op))
1792 		safexcel_instr_insert_digest(&instr, req->sess->digestlen);
1793 	else
1794 		safexcel_instr_retrieve_digest(&instr, req->sess->digestlen);
1795 
1796 	cdesc->additional_cdata_size = instr - start;
1797 }
1798 
1799 static void
1800 safexcel_instr_gcm(struct safexcel_request *req, struct safexcel_instr *instr,
1801     struct safexcel_cmd_descr *cdesc)
1802 {
1803 	struct cryptop *crp;
1804 	struct safexcel_instr *start;
1805 
1806 	memcpy(cdesc->control_data.token, req->iv, AES_GCM_IV_LEN);
1807 	cdesc->control_data.token[3] = htobe32(1);
1808 
1809 	crp = req->crp;
1810 	start = instr;
1811 
1812 	/* Insert the AAD into the input stream. */
1813 	instr->opcode = SAFEXCEL_INSTR_OPCODE_DIRECTION;
1814 	instr->length = crp->crp_aad_length;
1815 	instr->status = crp->crp_payload_length == 0 ?
1816 	    SAFEXCEL_INSTR_STATUS_LAST_HASH : 0;
1817 	instr->instructions = SAFEXCEL_INSTR_INS_LAST |
1818 	    SAFEXCEL_INSTR_DEST_HASH;
1819 	instr++;
1820 
1821 	safexcel_instr_temp_aes_block(&instr);
1822 
1823 	/* Insert the cipher payload into the input stream. */
1824 	if (crp->crp_payload_length > 0) {
1825 		instr->opcode = SAFEXCEL_INSTR_OPCODE_DIRECTION;
1826 		instr->length = crp->crp_payload_length;
1827 		instr->status = SAFEXCEL_INSTR_STATUS_LAST_HASH;
1828 		instr->instructions = SAFEXCEL_INSTR_DEST_OUTPUT |
1829 		    SAFEXCEL_INSTR_DEST_CRYPTO | SAFEXCEL_INSTR_DEST_HASH |
1830 		    SAFEXCEL_INSTR_INS_LAST;
1831 		instr++;
1832 	}
1833 
1834 	/*
1835 	 * Compute the digest, or extract it and place it in the output stream.
1836 	 */
1837 	if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op))
1838 		safexcel_instr_insert_digest(&instr, req->sess->digestlen);
1839 	else
1840 		safexcel_instr_retrieve_digest(&instr, req->sess->digestlen);
1841 
1842 	cdesc->additional_cdata_size = instr - start;
1843 }
1844 
1845 static void
1846 safexcel_instr_gmac(struct safexcel_request *req, struct safexcel_instr *instr,
1847     struct safexcel_cmd_descr *cdesc)
1848 {
1849 	struct cryptop *crp;
1850 	struct safexcel_instr *start;
1851 
1852 	memcpy(cdesc->control_data.token, req->iv, AES_GCM_IV_LEN);
1853 	cdesc->control_data.token[3] = htobe32(1);
1854 
1855 	crp = req->crp;
1856 	start = instr;
1857 
1858 	instr->opcode = SAFEXCEL_INSTR_OPCODE_DIRECTION;
1859 	instr->length = crp->crp_payload_length;
1860 	instr->status = SAFEXCEL_INSTR_STATUS_LAST_HASH;
1861 	instr->instructions = SAFEXCEL_INSTR_INS_LAST |
1862 	    SAFEXCEL_INSTR_DEST_HASH;
1863 	instr++;
1864 
1865 	safexcel_instr_temp_aes_block(&instr);
1866 
1867 	safexcel_instr_insert_digest(&instr, req->sess->digestlen);
1868 
1869 	cdesc->additional_cdata_size = instr - start;
1870 }
1871 
1872 static void
1873 safexcel_set_token(struct safexcel_request *req)
1874 {
1875 	const struct crypto_session_params *csp;
1876 	struct cryptop *crp;
1877 	struct safexcel_cmd_descr *cdesc;
1878 	struct safexcel_context_record *ctx;
1879 	struct safexcel_context_template *ctxtmp;
1880 	struct safexcel_instr *instr;
1881 	struct safexcel_softc *sc;
1882 	const uint8_t *akey, *ckey;
1883 	int ringidx;
1884 
1885 	crp = req->crp;
1886 	csp = crypto_get_params(crp->crp_session);
1887 	cdesc = req->cdesc;
1888 	sc = req->sc;
1889 	ringidx = req->ringidx;
1890 
1891 	akey = crp->crp_auth_key;
1892 	ckey = crp->crp_cipher_key;
1893 	if (akey != NULL || ckey != NULL) {
1894 		/*
1895 		 * If we have a per-request key we have to generate the context
1896 		 * record on the fly.
1897 		 */
1898 		if (akey == NULL)
1899 			akey = csp->csp_auth_key;
1900 		if (ckey == NULL)
1901 			ckey = csp->csp_cipher_key;
1902 		ctx = (struct safexcel_context_record *)req->ctx.vaddr;
1903 		(void)safexcel_set_context(ctx, crp->crp_op, ckey, akey,
1904 		    req->sess);
1905 	} else {
1906 		/*
1907 		 * Use the context record template computed at session
1908 		 * initialization time.
1909 		 */
1910 		ctxtmp = CRYPTO_OP_IS_ENCRYPT(crp->crp_op) ?
1911 		    &req->sess->encctx : &req->sess->decctx;
1912 		ctx = &ctxtmp->ctx;
1913 		memcpy(req->ctx.vaddr + 2 * sizeof(uint32_t), ctx->data,
1914 		    ctxtmp->len);
1915 	}
1916 	cdesc->control_data.control0 = ctx->control0;
1917 	cdesc->control_data.control1 = ctx->control1;
1918 
1919 	/*
1920 	 * For keyless hash operations, the token instructions can be embedded
1921 	 * in the token itself.  Otherwise we use an additional token descriptor
1922 	 * and the embedded instruction space is used to store the IV.
1923 	 */
1924 	if (csp->csp_cipher_alg == 0 &&
1925 	    csp->csp_auth_alg != CRYPTO_AES_NIST_GMAC) {
1926 		instr = (void *)cdesc->control_data.token;
1927 	} else {
1928 		instr = (void *)(sc->sc_ring[ringidx].dma_atok.vaddr +
1929 		    sc->sc_config.atok_offset *
1930 		    (cdesc - sc->sc_ring[ringidx].cdr.desc));
1931 		cdesc->control_data.options |= SAFEXCEL_OPTION_4_TOKEN_IV_CMD;
1932 	}
1933 
1934 	switch (csp->csp_cipher_alg) {
1935 	case CRYPTO_AES_NIST_GCM_16:
1936 		safexcel_instr_gcm(req, instr, cdesc);
1937 		break;
1938 	case CRYPTO_AES_CCM_16:
1939 		safexcel_instr_ccm(req, instr, cdesc);
1940 		break;
1941 	case CRYPTO_AES_XTS:
1942 		memcpy(cdesc->control_data.token, req->iv, AES_XTS_IV_LEN);
1943 		memset(cdesc->control_data.token +
1944 		    AES_XTS_IV_LEN / sizeof(uint32_t), 0, AES_XTS_IV_LEN);
1945 
1946 		safexcel_instr_cipher(req, instr, cdesc);
1947 		break;
1948 	case CRYPTO_AES_CBC:
1949 	case CRYPTO_AES_ICM:
1950 		memcpy(cdesc->control_data.token, req->iv, AES_BLOCK_LEN);
1951 		if (csp->csp_auth_alg != 0)
1952 			safexcel_instr_eta(req, instr, cdesc);
1953 		else
1954 			safexcel_instr_cipher(req, instr, cdesc);
1955 		break;
1956 	default:
1957 		switch (csp->csp_auth_alg) {
1958 		case CRYPTO_SHA1:
1959 		case CRYPTO_SHA1_HMAC:
1960 		case CRYPTO_SHA2_224:
1961 		case CRYPTO_SHA2_224_HMAC:
1962 		case CRYPTO_SHA2_256:
1963 		case CRYPTO_SHA2_256_HMAC:
1964 		case CRYPTO_SHA2_384:
1965 		case CRYPTO_SHA2_384_HMAC:
1966 		case CRYPTO_SHA2_512:
1967 		case CRYPTO_SHA2_512_HMAC:
1968 			safexcel_instr_sha_hash(req, instr);
1969 			break;
1970 		case CRYPTO_AES_NIST_GMAC:
1971 			safexcel_instr_gmac(req, instr, cdesc);
1972 			break;
1973 		default:
1974 			panic("unhandled auth request %d", csp->csp_auth_alg);
1975 		}
1976 		break;
1977 	}
1978 }
1979 
1980 static struct safexcel_res_descr *
1981 safexcel_res_descr_add(struct safexcel_ring *ring, bool first, bool last,
1982     bus_addr_t data, uint32_t len)
1983 {
1984 	struct safexcel_res_descr *rdesc;
1985 	struct safexcel_res_descr_ring *rring;
1986 
1987 	mtx_assert(&ring->mtx, MA_OWNED);
1988 
1989 	rring = &ring->rdr;
1990 	if ((rring->write + 1) % SAFEXCEL_RING_SIZE == rring->read)
1991 		return (NULL);
1992 
1993 	rdesc = &rring->desc[rring->write];
1994 	rring->write = (rring->write + 1) % SAFEXCEL_RING_SIZE;
1995 
1996 	rdesc->particle_size = len;
1997 	rdesc->rsvd0 = 0;
1998 	rdesc->descriptor_overflow = 0;
1999 	rdesc->buffer_overflow = 0;
2000 	rdesc->last_seg = last;
2001 	rdesc->first_seg = first;
2002 	rdesc->result_size =
2003 	    sizeof(struct safexcel_res_data) / sizeof(uint32_t);
2004 	rdesc->rsvd1 = 0;
2005 	rdesc->data_lo = SAFEXCEL_ADDR_LO(data);
2006 	rdesc->data_hi = SAFEXCEL_ADDR_HI(data);
2007 
2008 	if (first) {
2009 		rdesc->result_data.packet_length = 0;
2010 		rdesc->result_data.error_code = 0;
2011 	}
2012 
2013 	return (rdesc);
2014 }
2015 
2016 static struct safexcel_cmd_descr *
2017 safexcel_cmd_descr_add(struct safexcel_ring *ring, bool first, bool last,
2018     bus_addr_t data, uint32_t seglen, uint32_t reqlen, bus_addr_t context)
2019 {
2020 	struct safexcel_cmd_descr *cdesc;
2021 	struct safexcel_cmd_descr_ring *cring;
2022 
2023 	KASSERT(reqlen <= SAFEXCEL_MAX_REQUEST_SIZE,
2024 	    ("%s: request length %u too long", __func__, reqlen));
2025 	mtx_assert(&ring->mtx, MA_OWNED);
2026 
2027 	cring = &ring->cdr;
2028 	if ((cring->write + 1) % SAFEXCEL_RING_SIZE == cring->read)
2029 		return (NULL);
2030 
2031 	cdesc = &cring->desc[cring->write];
2032 	cring->write = (cring->write + 1) % SAFEXCEL_RING_SIZE;
2033 
2034 	cdesc->particle_size = seglen;
2035 	cdesc->rsvd0 = 0;
2036 	cdesc->last_seg = last;
2037 	cdesc->first_seg = first;
2038 	cdesc->additional_cdata_size = 0;
2039 	cdesc->rsvd1 = 0;
2040 	cdesc->data_lo = SAFEXCEL_ADDR_LO(data);
2041 	cdesc->data_hi = SAFEXCEL_ADDR_HI(data);
2042 	if (first) {
2043 		cdesc->control_data.packet_length = reqlen;
2044 		cdesc->control_data.options = SAFEXCEL_OPTION_IP |
2045 		    SAFEXCEL_OPTION_CP | SAFEXCEL_OPTION_CTX_CTRL_IN_CMD |
2046 		    SAFEXCEL_OPTION_RC_AUTO;
2047 		cdesc->control_data.type = SAFEXCEL_TOKEN_TYPE_BYPASS;
2048 		cdesc->control_data.context_lo = SAFEXCEL_ADDR_LO(context) |
2049 		    SAFEXCEL_CONTEXT_SMALL;
2050 		cdesc->control_data.context_hi = SAFEXCEL_ADDR_HI(context);
2051 	}
2052 
2053 	return (cdesc);
2054 }
2055 
2056 static void
2057 safexcel_cmd_descr_rollback(struct safexcel_ring *ring, int count)
2058 {
2059 	struct safexcel_cmd_descr_ring *cring;
2060 
2061 	mtx_assert(&ring->mtx, MA_OWNED);
2062 
2063 	cring = &ring->cdr;
2064 	cring->write -= count;
2065 	if (cring->write < 0)
2066 		cring->write += SAFEXCEL_RING_SIZE;
2067 }
2068 
2069 static void
2070 safexcel_res_descr_rollback(struct safexcel_ring *ring, int count)
2071 {
2072 	struct safexcel_res_descr_ring *rring;
2073 
2074 	mtx_assert(&ring->mtx, MA_OWNED);
2075 
2076 	rring = &ring->rdr;
2077 	rring->write -= count;
2078 	if (rring->write < 0)
2079 		rring->write += SAFEXCEL_RING_SIZE;
2080 }
2081 
2082 static void
2083 safexcel_append_segs(bus_dma_segment_t *segs, int nseg, struct sglist *sg,
2084     int start, int len)
2085 {
2086 	bus_dma_segment_t *seg;
2087 	size_t seglen;
2088 	int error, i;
2089 
2090 	for (i = 0; i < nseg && len > 0; i++) {
2091 		seg = &segs[i];
2092 
2093 		if (seg->ds_len <= start) {
2094 			start -= seg->ds_len;
2095 			continue;
2096 		}
2097 
2098 		seglen = MIN(len, seg->ds_len - start);
2099 		error = sglist_append_phys(sg, seg->ds_addr + start, seglen);
2100 		if (error != 0)
2101 			panic("%s: ran out of segments: %d", __func__, error);
2102 		len -= seglen;
2103 		start = 0;
2104 	}
2105 }
2106 
2107 static void
2108 safexcel_create_chain_cb(void *arg, bus_dma_segment_t *segs, int nseg,
2109     int error)
2110 {
2111 	const struct crypto_session_params *csp;
2112 	struct cryptop *crp;
2113 	struct safexcel_cmd_descr *cdesc;
2114 	struct safexcel_request *req;
2115 	struct safexcel_ring *ring;
2116 	struct safexcel_session *sess;
2117 	struct sglist *sg;
2118 	size_t inlen;
2119 	int i;
2120 	bool first, last;
2121 
2122 	req = arg;
2123 	if (error != 0) {
2124 		req->error = error;
2125 		return;
2126 	}
2127 
2128 	crp = req->crp;
2129 	csp = crypto_get_params(crp->crp_session);
2130 	sess = req->sess;
2131 	ring = &req->sc->sc_ring[req->ringidx];
2132 
2133 	mtx_assert(&ring->mtx, MA_OWNED);
2134 
2135 	/*
2136 	 * Set up descriptors for input and output data.
2137 	 *
2138 	 * The processing engine programs require that any AAD comes first,
2139 	 * followed by the cipher plaintext, followed by the digest.  Some
2140 	 * consumers place the digest first in the input buffer, in which case
2141 	 * we have to create an extra descriptor.
2142 	 *
2143 	 * As an optimization, unmodified data is not passed to the output
2144 	 * stream.
2145 	 */
2146 	sglist_reset(ring->cmd_data);
2147 	sglist_reset(ring->res_data);
2148 	if (crp->crp_aad_length != 0) {
2149 		safexcel_append_segs(segs, nseg, ring->cmd_data,
2150 		    crp->crp_aad_start, crp->crp_aad_length);
2151 	}
2152 	safexcel_append_segs(segs, nseg, ring->cmd_data,
2153 	    crp->crp_payload_start, crp->crp_payload_length);
2154 	if (csp->csp_cipher_alg != 0) {
2155 		safexcel_append_segs(segs, nseg, ring->res_data,
2156 		    crp->crp_payload_start, crp->crp_payload_length);
2157 	}
2158 	if (sess->digestlen > 0) {
2159 		if ((crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) != 0) {
2160 			safexcel_append_segs(segs, nseg, ring->cmd_data,
2161 			    crp->crp_digest_start, sess->digestlen);
2162 		} else {
2163 			safexcel_append_segs(segs, nseg, ring->res_data,
2164 			    crp->crp_digest_start, sess->digestlen);
2165 		}
2166 	}
2167 
2168 	sg = ring->cmd_data;
2169 	if (sg->sg_nseg == 0) {
2170 		/*
2171 		 * Fake a segment for the command descriptor if the input has
2172 		 * length zero.  The EIP97 apparently does not handle
2173 		 * zero-length packets properly since subsequent requests return
2174 		 * bogus errors, so provide a dummy segment using the context
2175 		 * descriptor.  Also, we must allocate at least one command ring
2176 		 * entry per request to keep the request shadow ring in sync.
2177 		 */
2178 		(void)sglist_append_phys(sg, req->ctx.paddr, 1);
2179 	}
2180 	for (i = 0, inlen = 0; i < sg->sg_nseg; i++)
2181 		inlen += sg->sg_segs[i].ss_len;
2182 	for (i = 0; i < sg->sg_nseg; i++) {
2183 		first = i == 0;
2184 		last = i == sg->sg_nseg - 1;
2185 
2186 		cdesc = safexcel_cmd_descr_add(ring, first, last,
2187 		    sg->sg_segs[i].ss_paddr, sg->sg_segs[i].ss_len,
2188 		    (uint32_t)inlen, req->ctx.paddr);
2189 		if (cdesc == NULL) {
2190 			safexcel_cmd_descr_rollback(ring, i);
2191 			counter_u64_add(req->sc->sc_cdesc_alloc_failures, 1);
2192 			req->error = ERESTART;
2193 			return;
2194 		}
2195 		if (i == 0)
2196 			req->cdesc = cdesc;
2197 	}
2198 	req->cdescs = sg->sg_nseg;
2199 
2200 	sg = ring->res_data;
2201 	if (sg->sg_nseg == 0) {
2202 		/*
2203 		 * We need a result descriptor even if the output stream will be
2204 		 * empty, for example when verifying an AAD digest.
2205 		 */
2206 		sg->sg_segs[0].ss_paddr = 0;
2207 		sg->sg_segs[0].ss_len = 0;
2208 		sg->sg_nseg = 1;
2209 	}
2210 	for (i = 0; i < sg->sg_nseg; i++) {
2211 		first = i == 0;
2212 		last = i == sg->sg_nseg - 1;
2213 
2214 		if (safexcel_res_descr_add(ring, first, last,
2215 		    sg->sg_segs[i].ss_paddr, sg->sg_segs[i].ss_len) == NULL) {
2216 			safexcel_cmd_descr_rollback(ring,
2217 			    ring->cmd_data->sg_nseg);
2218 			safexcel_res_descr_rollback(ring, i);
2219 			counter_u64_add(req->sc->sc_rdesc_alloc_failures, 1);
2220 			req->error = ERESTART;
2221 			return;
2222 		}
2223 	}
2224 	req->rdescs = sg->sg_nseg;
2225 }
2226 
2227 static int
2228 safexcel_create_chain(struct safexcel_ring *ring, struct safexcel_request *req)
2229 {
2230 	int error;
2231 
2232 	req->error = 0;
2233 	req->cdescs = req->rdescs = 0;
2234 
2235 	error = bus_dmamap_load_crp(ring->data_dtag, req->dmap, req->crp,
2236 	    safexcel_create_chain_cb, req, BUS_DMA_NOWAIT);
2237 	if (error == 0)
2238 		req->dmap_loaded = true;
2239 
2240 	if (req->error != 0)
2241 		error = req->error;
2242 
2243 	return (error);
2244 }
2245 
2246 static bool
2247 safexcel_probe_cipher(const struct crypto_session_params *csp)
2248 {
2249 	switch (csp->csp_cipher_alg) {
2250 	case CRYPTO_AES_CBC:
2251 	case CRYPTO_AES_ICM:
2252 		if (csp->csp_ivlen != AES_BLOCK_LEN)
2253 			return (false);
2254 		break;
2255 	case CRYPTO_AES_XTS:
2256 		if (csp->csp_ivlen != AES_XTS_IV_LEN)
2257 			return (false);
2258 		break;
2259 	default:
2260 		return (false);
2261 	}
2262 
2263 	return (true);
2264 }
2265 
2266 /*
2267  * Determine whether the driver can implement a session with the requested
2268  * parameters.
2269  */
2270 static int
2271 safexcel_probesession(device_t dev, const struct crypto_session_params *csp)
2272 {
2273 	if (csp->csp_flags != 0)
2274 		return (EINVAL);
2275 
2276 	switch (csp->csp_mode) {
2277 	case CSP_MODE_CIPHER:
2278 		if (!safexcel_probe_cipher(csp))
2279 			return (EINVAL);
2280 		break;
2281 	case CSP_MODE_DIGEST:
2282 		switch (csp->csp_auth_alg) {
2283 		case CRYPTO_AES_NIST_GMAC:
2284 			if (csp->csp_ivlen != AES_GCM_IV_LEN)
2285 				return (EINVAL);
2286 			break;
2287 		case CRYPTO_SHA1:
2288 		case CRYPTO_SHA1_HMAC:
2289 		case CRYPTO_SHA2_224:
2290 		case CRYPTO_SHA2_224_HMAC:
2291 		case CRYPTO_SHA2_256:
2292 		case CRYPTO_SHA2_256_HMAC:
2293 		case CRYPTO_SHA2_384:
2294 		case CRYPTO_SHA2_384_HMAC:
2295 		case CRYPTO_SHA2_512:
2296 		case CRYPTO_SHA2_512_HMAC:
2297 			break;
2298 		default:
2299 			return (EINVAL);
2300 		}
2301 		break;
2302 	case CSP_MODE_AEAD:
2303 		switch (csp->csp_cipher_alg) {
2304 		case CRYPTO_AES_NIST_GCM_16:
2305 		case CRYPTO_AES_CCM_16:
2306 			break;
2307 		default:
2308 			return (EINVAL);
2309 		}
2310 		break;
2311 	case CSP_MODE_ETA:
2312 		if (!safexcel_probe_cipher(csp))
2313 			return (EINVAL);
2314 		switch (csp->csp_cipher_alg) {
2315 		case CRYPTO_AES_CBC:
2316 		case CRYPTO_AES_ICM:
2317 			/*
2318 			 * The EIP-97 does not support combining AES-XTS with
2319 			 * hash operations.
2320 			 */
2321 			if (csp->csp_auth_alg != CRYPTO_SHA1_HMAC &&
2322 			    csp->csp_auth_alg != CRYPTO_SHA2_224_HMAC &&
2323 			    csp->csp_auth_alg != CRYPTO_SHA2_256_HMAC &&
2324 			    csp->csp_auth_alg != CRYPTO_SHA2_384_HMAC &&
2325 			    csp->csp_auth_alg != CRYPTO_SHA2_512_HMAC)
2326 				return (EINVAL);
2327 			break;
2328 		default:
2329 			return (EINVAL);
2330 		}
2331 		break;
2332 	default:
2333 		return (EINVAL);
2334 	}
2335 
2336 	return (CRYPTODEV_PROBE_HARDWARE);
2337 }
2338 
2339 static uint32_t
2340 safexcel_aes_algid(int keylen)
2341 {
2342 	switch (keylen) {
2343 	case 16:
2344 		return (SAFEXCEL_CONTROL0_CRYPTO_ALG_AES128);
2345 	case 24:
2346 		return (SAFEXCEL_CONTROL0_CRYPTO_ALG_AES192);
2347 	case 32:
2348 		return (SAFEXCEL_CONTROL0_CRYPTO_ALG_AES256);
2349 	default:
2350 		panic("invalid AES key length %d", keylen);
2351 	}
2352 }
2353 
2354 static uint32_t
2355 safexcel_aes_ccm_hashid(int keylen)
2356 {
2357 	switch (keylen) {
2358 	case 16:
2359 		return (SAFEXCEL_CONTROL0_HASH_ALG_XCBC128);
2360 	case 24:
2361 		return (SAFEXCEL_CONTROL0_HASH_ALG_XCBC192);
2362 	case 32:
2363 		return (SAFEXCEL_CONTROL0_HASH_ALG_XCBC256);
2364 	default:
2365 		panic("invalid AES key length %d", keylen);
2366 	}
2367 }
2368 
2369 static uint32_t
2370 safexcel_sha_hashid(int alg)
2371 {
2372 	switch (alg) {
2373 	case CRYPTO_SHA1:
2374 	case CRYPTO_SHA1_HMAC:
2375 		return (SAFEXCEL_CONTROL0_HASH_ALG_SHA1);
2376 	case CRYPTO_SHA2_224:
2377 	case CRYPTO_SHA2_224_HMAC:
2378 		return (SAFEXCEL_CONTROL0_HASH_ALG_SHA224);
2379 	case CRYPTO_SHA2_256:
2380 	case CRYPTO_SHA2_256_HMAC:
2381 		return (SAFEXCEL_CONTROL0_HASH_ALG_SHA256);
2382 	case CRYPTO_SHA2_384:
2383 	case CRYPTO_SHA2_384_HMAC:
2384 		return (SAFEXCEL_CONTROL0_HASH_ALG_SHA384);
2385 	case CRYPTO_SHA2_512:
2386 	case CRYPTO_SHA2_512_HMAC:
2387 		return (SAFEXCEL_CONTROL0_HASH_ALG_SHA512);
2388 	default:
2389 		__assert_unreachable();
2390 	}
2391 }
2392 
2393 static int
2394 safexcel_sha_hashlen(int alg)
2395 {
2396 	switch (alg) {
2397 	case CRYPTO_SHA1:
2398 	case CRYPTO_SHA1_HMAC:
2399 		return (SHA1_HASH_LEN);
2400 	case CRYPTO_SHA2_224:
2401 	case CRYPTO_SHA2_224_HMAC:
2402 		return (SHA2_224_HASH_LEN);
2403 	case CRYPTO_SHA2_256:
2404 	case CRYPTO_SHA2_256_HMAC:
2405 		return (SHA2_256_HASH_LEN);
2406 	case CRYPTO_SHA2_384:
2407 	case CRYPTO_SHA2_384_HMAC:
2408 		return (SHA2_384_HASH_LEN);
2409 	case CRYPTO_SHA2_512:
2410 	case CRYPTO_SHA2_512_HMAC:
2411 		return (SHA2_512_HASH_LEN);
2412 	default:
2413 		__assert_unreachable();
2414 	}
2415 }
2416 
2417 static int
2418 safexcel_sha_statelen(int alg)
2419 {
2420 	switch (alg) {
2421 	case CRYPTO_SHA1:
2422 	case CRYPTO_SHA1_HMAC:
2423 		return (SHA1_HASH_LEN);
2424 	case CRYPTO_SHA2_224:
2425 	case CRYPTO_SHA2_224_HMAC:
2426 	case CRYPTO_SHA2_256:
2427 	case CRYPTO_SHA2_256_HMAC:
2428 		return (SHA2_256_HASH_LEN);
2429 	case CRYPTO_SHA2_384:
2430 	case CRYPTO_SHA2_384_HMAC:
2431 	case CRYPTO_SHA2_512:
2432 	case CRYPTO_SHA2_512_HMAC:
2433 		return (SHA2_512_HASH_LEN);
2434 	default:
2435 		__assert_unreachable();
2436 	}
2437 }
2438 
2439 static int
2440 safexcel_newsession(device_t dev, crypto_session_t cses,
2441     const struct crypto_session_params *csp)
2442 {
2443 	struct safexcel_session *sess;
2444 
2445 	sess = crypto_get_driver_session(cses);
2446 	sess->cses = cses;
2447 
2448 	switch (csp->csp_auth_alg) {
2449 	case CRYPTO_SHA1:
2450 	case CRYPTO_SHA2_224:
2451 	case CRYPTO_SHA2_256:
2452 	case CRYPTO_SHA2_384:
2453 	case CRYPTO_SHA2_512:
2454 		sess->digest = SAFEXCEL_CONTROL0_DIGEST_PRECOMPUTED;
2455 		sess->hash = safexcel_sha_hashid(csp->csp_auth_alg);
2456 		sess->digestlen = safexcel_sha_hashlen(csp->csp_auth_alg);
2457 		sess->statelen = safexcel_sha_statelen(csp->csp_auth_alg);
2458 		break;
2459 	case CRYPTO_SHA1_HMAC:
2460 	case CRYPTO_SHA2_224_HMAC:
2461 	case CRYPTO_SHA2_256_HMAC:
2462 	case CRYPTO_SHA2_384_HMAC:
2463 	case CRYPTO_SHA2_512_HMAC:
2464 		sess->digest = SAFEXCEL_CONTROL0_DIGEST_HMAC;
2465 		sess->hash = safexcel_sha_hashid(csp->csp_auth_alg);
2466 		sess->digestlen = safexcel_sha_hashlen(csp->csp_auth_alg);
2467 		sess->statelen = safexcel_sha_statelen(csp->csp_auth_alg);
2468 		break;
2469 	case CRYPTO_AES_NIST_GMAC:
2470 		sess->digest = SAFEXCEL_CONTROL0_DIGEST_GMAC;
2471 		sess->digestlen = GMAC_DIGEST_LEN;
2472 		sess->hash = SAFEXCEL_CONTROL0_HASH_ALG_GHASH;
2473 		sess->alg = safexcel_aes_algid(csp->csp_auth_klen);
2474 		sess->mode = SAFEXCEL_CONTROL1_CRYPTO_MODE_GCM;
2475 		break;
2476 	}
2477 
2478 	switch (csp->csp_cipher_alg) {
2479 	case CRYPTO_AES_NIST_GCM_16:
2480 		sess->digest = SAFEXCEL_CONTROL0_DIGEST_GMAC;
2481 		sess->digestlen = GMAC_DIGEST_LEN;
2482 		sess->hash = SAFEXCEL_CONTROL0_HASH_ALG_GHASH;
2483 		sess->alg = safexcel_aes_algid(csp->csp_cipher_klen);
2484 		sess->mode = SAFEXCEL_CONTROL1_CRYPTO_MODE_GCM;
2485 		break;
2486 	case CRYPTO_AES_CCM_16:
2487 		sess->hash = safexcel_aes_ccm_hashid(csp->csp_cipher_klen);
2488 		sess->digest = SAFEXCEL_CONTROL0_DIGEST_CCM;
2489 		sess->digestlen = CCM_CBC_MAX_DIGEST_LEN;
2490 		sess->alg = safexcel_aes_algid(csp->csp_cipher_klen);
2491 		sess->mode = SAFEXCEL_CONTROL1_CRYPTO_MODE_CCM;
2492 		break;
2493 	case CRYPTO_AES_CBC:
2494 		sess->alg = safexcel_aes_algid(csp->csp_cipher_klen);
2495 		sess->mode = SAFEXCEL_CONTROL1_CRYPTO_MODE_CBC;
2496 		break;
2497 	case CRYPTO_AES_ICM:
2498 		sess->alg = safexcel_aes_algid(csp->csp_cipher_klen);
2499 		sess->mode = SAFEXCEL_CONTROL1_CRYPTO_MODE_CTR;
2500 		break;
2501 	case CRYPTO_AES_XTS:
2502 		sess->alg = safexcel_aes_algid(csp->csp_cipher_klen / 2);
2503 		sess->mode = SAFEXCEL_CONTROL1_CRYPTO_MODE_XTS;
2504 		break;
2505 	}
2506 
2507 	if (csp->csp_auth_mlen != 0)
2508 		sess->digestlen = csp->csp_auth_mlen;
2509 
2510 	sess->encctx.len = safexcel_set_context(&sess->encctx.ctx,
2511 	    CRYPTO_OP_ENCRYPT, csp->csp_cipher_key, csp->csp_auth_key,
2512 	    sess);
2513 	sess->decctx.len = safexcel_set_context(&sess->decctx.ctx,
2514 	    CRYPTO_OP_DECRYPT, csp->csp_cipher_key, csp->csp_auth_key,
2515 	    sess);
2516 
2517 	return (0);
2518 }
2519 
2520 static int
2521 safexcel_process(device_t dev, struct cryptop *crp, int hint)
2522 {
2523 	struct safexcel_request *req;
2524 	struct safexcel_ring *ring;
2525 	struct safexcel_session *sess;
2526 	struct safexcel_softc *sc;
2527 	int error;
2528 
2529 	sc = device_get_softc(dev);
2530 	sess = crypto_get_driver_session(crp->crp_session);
2531 
2532 	if (__predict_false(crypto_buffer_len(&crp->crp_buf) >
2533 	    SAFEXCEL_MAX_REQUEST_SIZE)) {
2534 		crp->crp_etype = E2BIG;
2535 		crypto_done(crp);
2536 		return (0);
2537 	}
2538 
2539 	ring = &sc->sc_ring[curcpu % sc->sc_config.rings];
2540 	mtx_lock(&ring->mtx);
2541 	req = safexcel_alloc_request(sc, ring);
2542 	if (__predict_false(req == NULL)) {
2543 		ring->blocked = CRYPTO_SYMQ;
2544 		mtx_unlock(&ring->mtx);
2545 		counter_u64_add(sc->sc_req_alloc_failures, 1);
2546 		return (ERESTART);
2547 	}
2548 
2549 	req->crp = crp;
2550 	req->sess = sess;
2551 
2552 	crypto_read_iv(crp, req->iv);
2553 
2554 	error = safexcel_create_chain(ring, req);
2555 	if (__predict_false(error != 0)) {
2556 		safexcel_free_request(ring, req);
2557 		if (error == ERESTART)
2558 			ring->blocked = CRYPTO_SYMQ;
2559 		mtx_unlock(&ring->mtx);
2560 		if (error != ERESTART) {
2561 			crp->crp_etype = error;
2562 			crypto_done(crp);
2563 			return (0);
2564 		} else {
2565 			return (ERESTART);
2566 		}
2567 	}
2568 
2569 	safexcel_set_token(req);
2570 
2571 	bus_dmamap_sync(ring->data_dtag, req->dmap,
2572 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2573 	bus_dmamap_sync(req->ctx.tag, req->ctx.map,
2574 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2575 	bus_dmamap_sync(ring->cdr.dma.tag, ring->cdr.dma.map,
2576 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2577 	bus_dmamap_sync(ring->dma_atok.tag, ring->dma_atok.map,
2578 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2579 	bus_dmamap_sync(ring->rdr.dma.tag, ring->rdr.dma.map,
2580 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2581 
2582 	safexcel_execute(sc, ring, req, hint);
2583 
2584 	mtx_unlock(&ring->mtx);
2585 
2586 	return (0);
2587 }
2588 
2589 static device_method_t safexcel_methods[] = {
2590 	/* Device interface */
2591 	DEVMETHOD(device_probe,		safexcel_probe),
2592 	DEVMETHOD(device_attach,	safexcel_attach),
2593 	DEVMETHOD(device_detach,	safexcel_detach),
2594 
2595 	/* Cryptodev interface */
2596 	DEVMETHOD(cryptodev_probesession, safexcel_probesession),
2597 	DEVMETHOD(cryptodev_newsession,	safexcel_newsession),
2598 	DEVMETHOD(cryptodev_process,	safexcel_process),
2599 
2600 	DEVMETHOD_END
2601 };
2602 
2603 static driver_t safexcel_driver = {
2604 	.name 		= "safexcel",
2605 	.methods 	= safexcel_methods,
2606 	.size		= sizeof(struct safexcel_softc),
2607 };
2608 
2609 DRIVER_MODULE(safexcel, simplebus, safexcel_driver, 0, 0);
2610 MODULE_VERSION(safexcel, 1);
2611 MODULE_DEPEND(safexcel, crypto, 1, 1, 1);
2612