xref: /freebsd/sys/dev/safexcel/safexcel.c (revision 179219ea046f46927d6478d43431e8b541703539)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2020, 2021 Rubicon Communications, LLC (Netgate)
5  * Copyright (c) 2021 The FreeBSD Foundation
6  *
7  * Portions of this software were developed by Ararat River
8  * Consulting, LLC under sponsorship of the FreeBSD Foundation.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
20  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
21  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
23  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
24  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
28  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29  */
30 
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
33 
34 #include <sys/param.h>
35 #include <sys/bus.h>
36 #include <sys/counter.h>
37 #include <sys/endian.h>
38 #include <sys/kernel.h>
39 #include <sys/lock.h>
40 #include <sys/malloc.h>
41 #include <sys/module.h>
42 #include <sys/mutex.h>
43 #include <sys/rman.h>
44 #include <sys/smp.h>
45 #include <sys/sglist.h>
46 #include <sys/sysctl.h>
47 
48 #include <machine/atomic.h>
49 #include <machine/bus.h>
50 
51 #include <crypto/rijndael/rijndael.h>
52 #include <opencrypto/cryptodev.h>
53 #include <opencrypto/xform.h>
54 
55 #include <dev/ofw/ofw_bus.h>
56 #include <dev/ofw/ofw_bus_subr.h>
57 
58 #include "cryptodev_if.h"
59 
60 #include "safexcel_reg.h"
61 #include "safexcel_var.h"
62 
63 /*
64  * We only support the EIP97 for now.
65  */
66 static struct ofw_compat_data safexcel_compat[] = {
67 	{ "inside-secure,safexcel-eip97ies",	(uintptr_t)97 },
68 	{ "inside-secure,safexcel-eip97",	(uintptr_t)97 },
69 	{ NULL,					0 }
70 };
71 
72 const struct safexcel_reg_offsets eip97_regs_offset = {
73 	.hia_aic	= SAFEXCEL_EIP97_HIA_AIC_BASE,
74 	.hia_aic_g	= SAFEXCEL_EIP97_HIA_AIC_G_BASE,
75 	.hia_aic_r	= SAFEXCEL_EIP97_HIA_AIC_R_BASE,
76 	.hia_aic_xdr	= SAFEXCEL_EIP97_HIA_AIC_xDR_BASE,
77 	.hia_dfe	= SAFEXCEL_EIP97_HIA_DFE_BASE,
78 	.hia_dfe_thr	= SAFEXCEL_EIP97_HIA_DFE_THR_BASE,
79 	.hia_dse	= SAFEXCEL_EIP97_HIA_DSE_BASE,
80 	.hia_dse_thr	= SAFEXCEL_EIP97_HIA_DSE_THR_BASE,
81 	.hia_gen_cfg	= SAFEXCEL_EIP97_HIA_GEN_CFG_BASE,
82 	.pe		= SAFEXCEL_EIP97_PE_BASE,
83 };
84 
85 const struct safexcel_reg_offsets eip197_regs_offset = {
86 	.hia_aic	= SAFEXCEL_EIP197_HIA_AIC_BASE,
87 	.hia_aic_g	= SAFEXCEL_EIP197_HIA_AIC_G_BASE,
88 	.hia_aic_r	= SAFEXCEL_EIP197_HIA_AIC_R_BASE,
89 	.hia_aic_xdr	= SAFEXCEL_EIP197_HIA_AIC_xDR_BASE,
90 	.hia_dfe	= SAFEXCEL_EIP197_HIA_DFE_BASE,
91 	.hia_dfe_thr	= SAFEXCEL_EIP197_HIA_DFE_THR_BASE,
92 	.hia_dse	= SAFEXCEL_EIP197_HIA_DSE_BASE,
93 	.hia_dse_thr	= SAFEXCEL_EIP197_HIA_DSE_THR_BASE,
94 	.hia_gen_cfg	= SAFEXCEL_EIP197_HIA_GEN_CFG_BASE,
95 	.pe		= SAFEXCEL_EIP197_PE_BASE,
96 };
97 
98 static struct safexcel_request *
99 safexcel_next_request(struct safexcel_ring *ring)
100 {
101 	int i;
102 
103 	i = ring->cdr.read;
104 	KASSERT(i >= 0 && i < SAFEXCEL_RING_SIZE,
105 	    ("%s: out of bounds request index %d", __func__, i));
106 	return (&ring->requests[i]);
107 }
108 
109 static struct safexcel_cmd_descr *
110 safexcel_cmd_descr_next(struct safexcel_cmd_descr_ring *ring)
111 {
112 	struct safexcel_cmd_descr *cdesc;
113 
114 	if (ring->write == ring->read)
115 		return (NULL);
116 	cdesc = &ring->desc[ring->read];
117 	ring->read = (ring->read + 1) % SAFEXCEL_RING_SIZE;
118 	return (cdesc);
119 }
120 
121 static struct safexcel_res_descr *
122 safexcel_res_descr_next(struct safexcel_res_descr_ring *ring)
123 {
124 	struct safexcel_res_descr *rdesc;
125 
126 	if (ring->write == ring->read)
127 		return (NULL);
128 	rdesc = &ring->desc[ring->read];
129 	ring->read = (ring->read + 1) % SAFEXCEL_RING_SIZE;
130 	return (rdesc);
131 }
132 
133 static struct safexcel_request *
134 safexcel_alloc_request(struct safexcel_softc *sc, struct safexcel_ring *ring)
135 {
136 	int i;
137 
138 	mtx_assert(&ring->mtx, MA_OWNED);
139 
140 	i = ring->cdr.write;
141 	if ((i + 1) % SAFEXCEL_RING_SIZE == ring->cdr.read)
142 		return (NULL);
143 	return (&ring->requests[i]);
144 }
145 
146 static void
147 safexcel_free_request(struct safexcel_ring *ring, struct safexcel_request *req)
148 {
149 	struct safexcel_context_record *ctx;
150 
151 	mtx_assert(&ring->mtx, MA_OWNED);
152 
153 	if (req->dmap_loaded) {
154 		bus_dmamap_unload(ring->data_dtag, req->dmap);
155 		req->dmap_loaded = false;
156 	}
157 	ctx = (struct safexcel_context_record *)req->ctx.vaddr;
158 	explicit_bzero(ctx->data, sizeof(ctx->data));
159 	explicit_bzero(req->iv, sizeof(req->iv));
160 }
161 
162 static void
163 safexcel_rdr_intr(struct safexcel_softc *sc, int ringidx)
164 {
165 	TAILQ_HEAD(, cryptop) cq;
166 	struct cryptop *crp, *tmp;
167 	struct safexcel_cmd_descr *cdesc;
168 	struct safexcel_res_descr *rdesc;
169 	struct safexcel_request *req;
170 	struct safexcel_ring *ring;
171 	uint32_t blocked, error, i, ncdescs, nrdescs, nreqs;
172 
173 	blocked = 0;
174 	ring = &sc->sc_ring[ringidx];
175 
176 	nreqs = SAFEXCEL_READ(sc,
177 	    SAFEXCEL_HIA_RDR(sc, ringidx) + SAFEXCEL_HIA_xDR_PROC_COUNT);
178 	nreqs >>= SAFEXCEL_xDR_PROC_xD_PKT_OFFSET;
179 	nreqs &= SAFEXCEL_xDR_PROC_xD_PKT_MASK;
180 	if (nreqs == 0) {
181 		SAFEXCEL_DPRINTF(sc, 1,
182 		    "zero pending requests on ring %d\n", ringidx);
183 		mtx_lock(&ring->mtx);
184 		goto out;
185 	}
186 
187 	TAILQ_INIT(&cq);
188 
189 	ring = &sc->sc_ring[ringidx];
190 	bus_dmamap_sync(ring->rdr.dma.tag, ring->rdr.dma.map,
191 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
192 	bus_dmamap_sync(ring->cdr.dma.tag, ring->cdr.dma.map,
193 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
194 	bus_dmamap_sync(ring->dma_atok.tag, ring->dma_atok.map,
195 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
196 
197 	ncdescs = nrdescs = 0;
198 	for (i = 0; i < nreqs; i++) {
199 		req = safexcel_next_request(ring);
200 
201 		bus_dmamap_sync(req->ctx.tag, req->ctx.map,
202 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
203 		bus_dmamap_sync(ring->data_dtag, req->dmap,
204 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
205 
206 		ncdescs += req->cdescs;
207 		while (req->cdescs-- > 0) {
208 			cdesc = safexcel_cmd_descr_next(&ring->cdr);
209 			KASSERT(cdesc != NULL,
210 			    ("%s: missing control descriptor", __func__));
211 			if (req->cdescs == 0)
212 				KASSERT(cdesc->last_seg,
213 				    ("%s: chain is not terminated", __func__));
214 		}
215 		nrdescs += req->rdescs;
216 		while (req->rdescs-- > 0) {
217 			rdesc = safexcel_res_descr_next(&ring->rdr);
218 			error = rdesc->result_data.error_code;
219 			if (error != 0) {
220 				if (error == SAFEXCEL_RESULT_ERR_AUTH_FAILED &&
221 				    req->crp->crp_etype == 0) {
222 					req->crp->crp_etype = EBADMSG;
223 				} else {
224 					SAFEXCEL_DPRINTF(sc, 1,
225 					    "error code %#x\n", error);
226 					req->crp->crp_etype = EIO;
227 				}
228 			}
229 		}
230 
231 		TAILQ_INSERT_TAIL(&cq, req->crp, crp_next);
232 	}
233 
234 	mtx_lock(&ring->mtx);
235 	if (nreqs != 0) {
236 		KASSERT(ring->queued >= nreqs,
237 		    ("%s: request count underflow, %d queued %d completed",
238 		    __func__, ring->queued, nreqs));
239 		ring->queued -= nreqs;
240 
241 		SAFEXCEL_WRITE(sc,
242 		    SAFEXCEL_HIA_RDR(sc, ringidx) + SAFEXCEL_HIA_xDR_PROC_COUNT,
243 		    SAFEXCEL_xDR_PROC_xD_PKT(nreqs) |
244 		    (sc->sc_config.rd_offset * nrdescs * sizeof(uint32_t)));
245 		blocked = ring->blocked;
246 		ring->blocked = 0;
247 	}
248 out:
249 	if (ring->queued != 0) {
250 		SAFEXCEL_WRITE(sc,
251 		    SAFEXCEL_HIA_RDR(sc, ringidx) + SAFEXCEL_HIA_xDR_THRESH,
252 		    SAFEXCEL_HIA_CDR_THRESH_PKT_MODE | imin(ring->queued, 16));
253 	}
254 	mtx_unlock(&ring->mtx);
255 
256 	if (blocked)
257 		crypto_unblock(sc->sc_cid, blocked);
258 
259 	TAILQ_FOREACH_SAFE(crp, &cq, crp_next, tmp)
260 		crypto_done(crp);
261 }
262 
263 static void
264 safexcel_ring_intr(void *arg)
265 {
266 	struct safexcel_softc *sc;
267 	struct safexcel_intr_handle *ih;
268 	uint32_t status, stat;
269 	int ring;
270 	bool rdrpending;
271 
272 	ih = arg;
273 	sc = ih->sc;
274 	ring = ih->ring;
275 
276 	status = SAFEXCEL_READ(sc, SAFEXCEL_HIA_AIC_R(sc) +
277 	    SAFEXCEL_HIA_AIC_R_ENABLED_STAT(ring));
278 	/* CDR interrupts */
279 	if (status & SAFEXCEL_CDR_IRQ(ring)) {
280 		stat = SAFEXCEL_READ(sc,
281 		    SAFEXCEL_HIA_CDR(sc, ring) + SAFEXCEL_HIA_xDR_STAT);
282 		SAFEXCEL_WRITE(sc,
283 		    SAFEXCEL_HIA_CDR(sc, ring) + SAFEXCEL_HIA_xDR_STAT,
284 		    stat & SAFEXCEL_CDR_INTR_MASK);
285 	}
286 	/* RDR interrupts */
287 	rdrpending = false;
288 	if (status & SAFEXCEL_RDR_IRQ(ring)) {
289 		stat = SAFEXCEL_READ(sc,
290 		    SAFEXCEL_HIA_RDR(sc, ring) + SAFEXCEL_HIA_xDR_STAT);
291 		if ((stat & SAFEXCEL_xDR_ERR) == 0)
292 			rdrpending = true;
293 		SAFEXCEL_WRITE(sc,
294 		    SAFEXCEL_HIA_RDR(sc, ring) + SAFEXCEL_HIA_xDR_STAT,
295 		    stat & SAFEXCEL_RDR_INTR_MASK);
296 	}
297 	SAFEXCEL_WRITE(sc,
298 	    SAFEXCEL_HIA_AIC_R(sc) + SAFEXCEL_HIA_AIC_R_ACK(ring),
299 	    status);
300 
301 	if (rdrpending)
302 		safexcel_rdr_intr(sc, ring);
303 }
304 
305 static int
306 safexcel_configure(struct safexcel_softc *sc)
307 {
308 	uint32_t i, mask, pemask, reg;
309 	device_t dev;
310 
311 	if (sc->sc_type == 197) {
312 		sc->sc_offsets = eip197_regs_offset;
313 		pemask = SAFEXCEL_N_PES_MASK;
314 	} else {
315 		sc->sc_offsets = eip97_regs_offset;
316 		pemask = EIP97_N_PES_MASK;
317 	}
318 
319 	dev = sc->sc_dev;
320 
321 	/* Scan for valid ring interrupt controllers. */
322 	for (i = 0; i < SAFEXCEL_MAX_RING_AIC; i++) {
323 		reg = SAFEXCEL_READ(sc, SAFEXCEL_HIA_AIC_R(sc) +
324 		    SAFEXCEL_HIA_AIC_R_VERSION(i));
325 		if (SAFEXCEL_REG_LO16(reg) != EIP201_VERSION_LE)
326 			break;
327 	}
328 	sc->sc_config.aic_rings = i;
329 	if (sc->sc_config.aic_rings == 0)
330 		return (-1);
331 
332 	reg = SAFEXCEL_READ(sc, SAFEXCEL_HIA_AIC_G(sc) + SAFEXCEL_HIA_OPTIONS);
333 	/* Check for 64bit addressing. */
334 	if ((reg & SAFEXCEL_OPT_ADDR_64) == 0)
335 		return (-1);
336 	/* Check alignment constraints (which we do not support). */
337 	if (((reg & SAFEXCEL_OPT_TGT_ALIGN_MASK) >>
338 	    SAFEXCEL_OPT_TGT_ALIGN_OFFSET) != 0)
339 		return (-1);
340 
341 	sc->sc_config.hdw =
342 	    (reg & SAFEXCEL_xDR_HDW_MASK) >> SAFEXCEL_xDR_HDW_OFFSET;
343 	mask = (1 << sc->sc_config.hdw) - 1;
344 
345 	sc->sc_config.rings = reg & SAFEXCEL_N_RINGS_MASK;
346 	/* Limit the number of rings to the number of the AIC Rings. */
347 	sc->sc_config.rings = MIN(sc->sc_config.rings, sc->sc_config.aic_rings);
348 
349 	sc->sc_config.pes = (reg & pemask) >> SAFEXCEL_N_PES_OFFSET;
350 
351 	sc->sc_config.cd_size =
352 	    sizeof(struct safexcel_cmd_descr) / sizeof(uint32_t);
353 	sc->sc_config.cd_offset = (sc->sc_config.cd_size + mask) & ~mask;
354 
355 	sc->sc_config.rd_size =
356 	    sizeof(struct safexcel_res_descr) / sizeof(uint32_t);
357 	sc->sc_config.rd_offset = (sc->sc_config.rd_size + mask) & ~mask;
358 
359 	sc->sc_config.atok_offset =
360 	    (SAFEXCEL_MAX_ATOKENS * sizeof(struct safexcel_instr) + mask) &
361 	    ~mask;
362 
363 	return (0);
364 }
365 
366 static void
367 safexcel_init_hia_bus_access(struct safexcel_softc *sc)
368 {
369 	uint32_t version, val;
370 
371 	/* Determine endianness and configure byte swap. */
372 	version = SAFEXCEL_READ(sc,
373 	    SAFEXCEL_HIA_AIC(sc) + SAFEXCEL_HIA_VERSION);
374 	val = SAFEXCEL_READ(sc, SAFEXCEL_HIA_AIC(sc) + SAFEXCEL_HIA_MST_CTRL);
375 	if (SAFEXCEL_REG_HI16(version) == SAFEXCEL_HIA_VERSION_BE) {
376 		val = SAFEXCEL_READ(sc,
377 		    SAFEXCEL_HIA_AIC(sc) + SAFEXCEL_HIA_MST_CTRL);
378 		val = val ^ (SAFEXCEL_MST_CTRL_NO_BYTE_SWAP >> 24);
379 		SAFEXCEL_WRITE(sc,
380 		    SAFEXCEL_HIA_AIC(sc) + SAFEXCEL_HIA_MST_CTRL,
381 		    val);
382 	}
383 
384 	/* Configure wr/rd cache values. */
385 	SAFEXCEL_WRITE(sc, SAFEXCEL_HIA_GEN_CFG(sc) + SAFEXCEL_HIA_MST_CTRL,
386 	    SAFEXCEL_MST_CTRL_RD_CACHE(RD_CACHE_4BITS) |
387 	    SAFEXCEL_MST_CTRL_WD_CACHE(WR_CACHE_4BITS));
388 }
389 
390 static void
391 safexcel_disable_global_interrupts(struct safexcel_softc *sc)
392 {
393 	/* Disable and clear pending interrupts. */
394 	SAFEXCEL_WRITE(sc,
395 	    SAFEXCEL_HIA_AIC_G(sc) + SAFEXCEL_HIA_AIC_G_ENABLE_CTRL, 0);
396 	SAFEXCEL_WRITE(sc,
397 	    SAFEXCEL_HIA_AIC_G(sc) + SAFEXCEL_HIA_AIC_G_ACK,
398 	    SAFEXCEL_AIC_G_ACK_ALL_MASK);
399 }
400 
401 /*
402  * Configure the data fetch engine.  This component parses command descriptors
403  * and sets up DMA transfers from host memory to the corresponding processing
404  * engine.
405  */
406 static void
407 safexcel_configure_dfe_engine(struct safexcel_softc *sc, int pe)
408 {
409 	/* Reset all DFE threads. */
410 	SAFEXCEL_WRITE(sc,
411 	    SAFEXCEL_HIA_DFE_THR(sc) + SAFEXCEL_HIA_DFE_THR_CTRL(pe),
412 	    SAFEXCEL_DxE_THR_CTRL_RESET_PE);
413 
414 	/* Deassert the DFE reset. */
415 	SAFEXCEL_WRITE(sc,
416 	    SAFEXCEL_HIA_DFE_THR(sc) + SAFEXCEL_HIA_DFE_THR_CTRL(pe), 0);
417 
418 	/* DMA transfer size to use. */
419 	SAFEXCEL_WRITE(sc, SAFEXCEL_HIA_DFE(sc) + SAFEXCEL_HIA_DFE_CFG(pe),
420 	    SAFEXCEL_HIA_DFE_CFG_DIS_DEBUG |
421 	    SAFEXCEL_HIA_DxE_CFG_MIN_DATA_SIZE(6) |
422 	    SAFEXCEL_HIA_DxE_CFG_MAX_DATA_SIZE(9) |
423 	    SAFEXCEL_HIA_DxE_CFG_MIN_CTRL_SIZE(6) |
424 	    SAFEXCEL_HIA_DxE_CFG_MAX_CTRL_SIZE(7) |
425 	    SAFEXCEL_HIA_DxE_CFG_DATA_CACHE_CTRL(RD_CACHE_3BITS) |
426 	    SAFEXCEL_HIA_DxE_CFG_CTRL_CACHE_CTRL(RD_CACHE_3BITS));
427 
428 	/* Configure the PE DMA transfer thresholds. */
429 	SAFEXCEL_WRITE(sc, SAFEXCEL_PE(sc) + SAFEXCEL_PE_IN_DBUF_THRES(pe),
430 	    SAFEXCEL_PE_IN_xBUF_THRES_MIN(6) |
431 	    SAFEXCEL_PE_IN_xBUF_THRES_MAX(9));
432 	SAFEXCEL_WRITE(sc, SAFEXCEL_PE(sc) + SAFEXCEL_PE_IN_TBUF_THRES(pe),
433 	    SAFEXCEL_PE_IN_xBUF_THRES_MIN(6) |
434 	    SAFEXCEL_PE_IN_xBUF_THRES_MAX(7));
435 }
436 
437 /*
438  * Configure the data store engine.  This component parses result descriptors
439  * and sets up DMA transfers from the processing engine to host memory.
440  */
441 static int
442 safexcel_configure_dse(struct safexcel_softc *sc, int pe)
443 {
444 	uint32_t val;
445 	int count;
446 
447 	/* Disable and reset all DSE threads. */
448 	SAFEXCEL_WRITE(sc,
449 	    SAFEXCEL_HIA_DSE_THR(sc) + SAFEXCEL_HIA_DSE_THR_CTRL(pe),
450 	    SAFEXCEL_DxE_THR_CTRL_RESET_PE);
451 
452 	/* Wait for a second for threads to go idle. */
453 	for (count = 0;;) {
454 		val = SAFEXCEL_READ(sc,
455 		    SAFEXCEL_HIA_DSE_THR(sc) + SAFEXCEL_HIA_DSE_THR_STAT(pe));
456 		if ((val & SAFEXCEL_DSE_THR_RDR_ID_MASK) ==
457 		    SAFEXCEL_DSE_THR_RDR_ID_MASK)
458 			break;
459 		if (count++ > 10000) {
460 			device_printf(sc->sc_dev, "DSE reset timeout\n");
461 			return (-1);
462 		}
463 		DELAY(100);
464 	}
465 
466 	/* Exit the reset state. */
467 	SAFEXCEL_WRITE(sc,
468 	    SAFEXCEL_HIA_DSE_THR(sc) + SAFEXCEL_HIA_DSE_THR_CTRL(pe), 0);
469 
470 	/* DMA transfer size to use */
471 	SAFEXCEL_WRITE(sc, SAFEXCEL_HIA_DSE(sc) + SAFEXCEL_HIA_DSE_CFG(pe),
472 	    SAFEXCEL_HIA_DSE_CFG_DIS_DEBUG |
473 	    SAFEXCEL_HIA_DxE_CFG_MIN_DATA_SIZE(7) |
474 	    SAFEXCEL_HIA_DxE_CFG_MAX_DATA_SIZE(8) |
475 	    SAFEXCEL_HIA_DxE_CFG_DATA_CACHE_CTRL(WR_CACHE_3BITS) |
476 	    SAFEXCEL_HIA_DSE_CFG_ALLWAYS_BUFFERABLE);
477 
478 	/* Configure the procesing engine thresholds */
479 	SAFEXCEL_WRITE(sc,
480 	    SAFEXCEL_PE(sc) + SAFEXCEL_PE_OUT_DBUF_THRES(pe),
481 	    SAFEXCEL_PE_OUT_DBUF_THRES_MIN(7) |
482 	    SAFEXCEL_PE_OUT_DBUF_THRES_MAX(8));
483 
484 	return (0);
485 }
486 
487 static void
488 safexcel_hw_prepare_rings(struct safexcel_softc *sc)
489 {
490 	int i;
491 
492 	for (i = 0; i < sc->sc_config.rings; i++) {
493 		/*
494 		 * Command descriptors.
495 		 */
496 
497 		/* Clear interrupts for this ring. */
498 		SAFEXCEL_WRITE(sc,
499 		    SAFEXCEL_HIA_AIC_R(sc) + SAFEXCEL_HIA_AIC_R_ENABLE_CLR(i),
500 		    SAFEXCEL_HIA_AIC_R_ENABLE_CLR_ALL_MASK);
501 
502 		/* Disable external triggering. */
503 		SAFEXCEL_WRITE(sc,
504 		    SAFEXCEL_HIA_CDR(sc, i) + SAFEXCEL_HIA_xDR_CFG, 0);
505 
506 		/* Clear the pending prepared counter. */
507 		SAFEXCEL_WRITE(sc,
508 		    SAFEXCEL_HIA_CDR(sc, i) + SAFEXCEL_HIA_xDR_PREP_COUNT,
509 		    SAFEXCEL_xDR_PREP_CLR_COUNT);
510 
511 		/* Clear the pending processed counter. */
512 		SAFEXCEL_WRITE(sc,
513 		    SAFEXCEL_HIA_CDR(sc, i) + SAFEXCEL_HIA_xDR_PROC_COUNT,
514 		    SAFEXCEL_xDR_PROC_CLR_COUNT);
515 
516 		SAFEXCEL_WRITE(sc,
517 		    SAFEXCEL_HIA_CDR(sc, i) + SAFEXCEL_HIA_xDR_PREP_PNTR, 0);
518 		SAFEXCEL_WRITE(sc,
519 		    SAFEXCEL_HIA_CDR(sc, i) + SAFEXCEL_HIA_xDR_PROC_PNTR, 0);
520 
521 		SAFEXCEL_WRITE(sc,
522 		    SAFEXCEL_HIA_CDR(sc, i) + SAFEXCEL_HIA_xDR_RING_SIZE,
523 		    SAFEXCEL_RING_SIZE * sc->sc_config.cd_offset *
524 		    sizeof(uint32_t));
525 
526 		/*
527 		 * Result descriptors.
528 		 */
529 
530 		/* Disable external triggering. */
531 		SAFEXCEL_WRITE(sc,
532 		    SAFEXCEL_HIA_RDR(sc, i) + SAFEXCEL_HIA_xDR_CFG, 0);
533 
534 		/* Clear the pending prepared counter. */
535 		SAFEXCEL_WRITE(sc,
536 		    SAFEXCEL_HIA_RDR(sc, i) + SAFEXCEL_HIA_xDR_PREP_COUNT,
537 		    SAFEXCEL_xDR_PREP_CLR_COUNT);
538 
539 		/* Clear the pending processed counter. */
540 		SAFEXCEL_WRITE(sc,
541 		    SAFEXCEL_HIA_RDR(sc, i) + SAFEXCEL_HIA_xDR_PROC_COUNT,
542 		    SAFEXCEL_xDR_PROC_CLR_COUNT);
543 
544 		SAFEXCEL_WRITE(sc,
545 		    SAFEXCEL_HIA_RDR(sc, i) + SAFEXCEL_HIA_xDR_PREP_PNTR, 0);
546 		SAFEXCEL_WRITE(sc,
547 		    SAFEXCEL_HIA_RDR(sc, i) + SAFEXCEL_HIA_xDR_PROC_PNTR, 0);
548 
549 		/* Ring size. */
550 		SAFEXCEL_WRITE(sc,
551 		    SAFEXCEL_HIA_RDR(sc, i) + SAFEXCEL_HIA_xDR_RING_SIZE,
552 		    SAFEXCEL_RING_SIZE * sc->sc_config.rd_offset *
553 		    sizeof(uint32_t));
554 	}
555 }
556 
557 static void
558 safexcel_hw_setup_rings(struct safexcel_softc *sc)
559 {
560 	struct safexcel_ring *ring;
561 	uint32_t cd_size_rnd, mask, rd_size_rnd, val;
562 	int i;
563 
564 	mask = (1 << sc->sc_config.hdw) - 1;
565 	cd_size_rnd = (sc->sc_config.cd_size + mask) >> sc->sc_config.hdw;
566 	val = (sizeof(struct safexcel_res_descr) -
567 	    sizeof(struct safexcel_res_data)) / sizeof(uint32_t);
568 	rd_size_rnd = (val + mask) >> sc->sc_config.hdw;
569 
570 	for (i = 0; i < sc->sc_config.rings; i++) {
571 		ring = &sc->sc_ring[i];
572 
573 		/*
574 		 * Command descriptors.
575 		 */
576 
577 		/* Ring base address. */
578 		SAFEXCEL_WRITE(sc, SAFEXCEL_HIA_CDR(sc, i) +
579 		    SAFEXCEL_HIA_xDR_RING_BASE_ADDR_LO,
580 		    SAFEXCEL_ADDR_LO(ring->cdr.dma.paddr));
581 		SAFEXCEL_WRITE(sc, SAFEXCEL_HIA_CDR(sc, i) +
582 		    SAFEXCEL_HIA_xDR_RING_BASE_ADDR_HI,
583 		    SAFEXCEL_ADDR_HI(ring->cdr.dma.paddr));
584 
585 		SAFEXCEL_WRITE(sc,
586 		    SAFEXCEL_HIA_CDR(sc, i) + SAFEXCEL_HIA_xDR_DESC_SIZE,
587 		    SAFEXCEL_xDR_DESC_MODE_64BIT | SAFEXCEL_CDR_DESC_MODE_ADCP |
588 		    (sc->sc_config.cd_offset << SAFEXCEL_xDR_DESC_xD_OFFSET) |
589 		    sc->sc_config.cd_size);
590 
591 		SAFEXCEL_WRITE(sc,
592 		    SAFEXCEL_HIA_CDR(sc, i) + SAFEXCEL_HIA_xDR_CFG,
593 		    ((SAFEXCEL_FETCH_COUNT * (cd_size_rnd << sc->sc_config.hdw)) <<
594 		      SAFEXCEL_xDR_xD_FETCH_THRESH) |
595 		    (SAFEXCEL_FETCH_COUNT * sc->sc_config.cd_offset));
596 
597 		/* Configure DMA tx control. */
598 		SAFEXCEL_WRITE(sc,
599 		    SAFEXCEL_HIA_CDR(sc, i) + SAFEXCEL_HIA_xDR_DMA_CFG,
600 		    SAFEXCEL_HIA_xDR_CFG_WR_CACHE(WR_CACHE_3BITS) |
601 		    SAFEXCEL_HIA_xDR_CFG_RD_CACHE(RD_CACHE_3BITS));
602 
603 		/* Clear any pending interrupt. */
604 		SAFEXCEL_WRITE(sc,
605 		    SAFEXCEL_HIA_CDR(sc, i) + SAFEXCEL_HIA_xDR_STAT,
606 		    SAFEXCEL_CDR_INTR_MASK);
607 
608 		/*
609 		 * Result descriptors.
610 		 */
611 
612 		/* Ring base address. */
613 		SAFEXCEL_WRITE(sc, SAFEXCEL_HIA_RDR(sc, i) +
614 		    SAFEXCEL_HIA_xDR_RING_BASE_ADDR_LO,
615 		    SAFEXCEL_ADDR_LO(ring->rdr.dma.paddr));
616 		SAFEXCEL_WRITE(sc, SAFEXCEL_HIA_RDR(sc, i) +
617 		    SAFEXCEL_HIA_xDR_RING_BASE_ADDR_HI,
618 		    SAFEXCEL_ADDR_HI(ring->rdr.dma.paddr));
619 
620 		SAFEXCEL_WRITE(sc,
621 		    SAFEXCEL_HIA_RDR(sc, i) + SAFEXCEL_HIA_xDR_DESC_SIZE,
622 		    SAFEXCEL_xDR_DESC_MODE_64BIT |
623 		    (sc->sc_config.rd_offset << SAFEXCEL_xDR_DESC_xD_OFFSET) |
624 		    sc->sc_config.rd_size);
625 
626 		SAFEXCEL_WRITE(sc,
627 		    SAFEXCEL_HIA_RDR(sc, i) + SAFEXCEL_HIA_xDR_CFG,
628 		    ((SAFEXCEL_FETCH_COUNT * (rd_size_rnd << sc->sc_config.hdw)) <<
629 		    SAFEXCEL_xDR_xD_FETCH_THRESH) |
630 		    (SAFEXCEL_FETCH_COUNT * sc->sc_config.rd_offset));
631 
632 		/* Configure DMA tx control. */
633 		SAFEXCEL_WRITE(sc,
634 		    SAFEXCEL_HIA_RDR(sc, i) + SAFEXCEL_HIA_xDR_DMA_CFG,
635 		    SAFEXCEL_HIA_xDR_CFG_WR_CACHE(WR_CACHE_3BITS) |
636 		    SAFEXCEL_HIA_xDR_CFG_RD_CACHE(RD_CACHE_3BITS) |
637 		    SAFEXCEL_HIA_xDR_WR_RES_BUF | SAFEXCEL_HIA_xDR_WR_CTRL_BUF);
638 
639 		/* Clear any pending interrupt. */
640 		SAFEXCEL_WRITE(sc,
641 		    SAFEXCEL_HIA_RDR(sc, i) + SAFEXCEL_HIA_xDR_STAT,
642 		    SAFEXCEL_RDR_INTR_MASK);
643 
644 		/* Enable ring interrupt. */
645 		SAFEXCEL_WRITE(sc,
646 		    SAFEXCEL_HIA_AIC_R(sc) + SAFEXCEL_HIA_AIC_R_ENABLE_CTRL(i),
647 		    SAFEXCEL_RDR_IRQ(i));
648 	}
649 }
650 
651 /* Reset the command and result descriptor rings. */
652 static void
653 safexcel_hw_reset_rings(struct safexcel_softc *sc)
654 {
655 	int i;
656 
657 	for (i = 0; i < sc->sc_config.rings; i++) {
658 		/*
659 		 * Result descriptor ring operations.
660 		 */
661 
662 		/* Reset ring base address. */
663 		SAFEXCEL_WRITE(sc, SAFEXCEL_HIA_RDR(sc, i) +
664 		    SAFEXCEL_HIA_xDR_RING_BASE_ADDR_LO, 0);
665 		SAFEXCEL_WRITE(sc, SAFEXCEL_HIA_RDR(sc, i) +
666 		    SAFEXCEL_HIA_xDR_RING_BASE_ADDR_HI, 0);
667 
668 		/* Clear the pending prepared counter. */
669 		SAFEXCEL_WRITE(sc,
670 		    SAFEXCEL_HIA_RDR(sc, i) + SAFEXCEL_HIA_xDR_PREP_COUNT,
671 		    SAFEXCEL_xDR_PREP_CLR_COUNT);
672 
673 		/* Clear the pending processed counter. */
674 		SAFEXCEL_WRITE(sc,
675 		    SAFEXCEL_HIA_RDR(sc, i) + SAFEXCEL_HIA_xDR_PROC_COUNT,
676 		    SAFEXCEL_xDR_PROC_CLR_COUNT);
677 
678 		SAFEXCEL_WRITE(sc,
679 		    SAFEXCEL_HIA_RDR(sc, i) + SAFEXCEL_HIA_xDR_PREP_PNTR, 0);
680 		SAFEXCEL_WRITE(sc,
681 		    SAFEXCEL_HIA_RDR(sc, i) + SAFEXCEL_HIA_xDR_PROC_PNTR, 0);
682 
683 		SAFEXCEL_WRITE(sc,
684 		    SAFEXCEL_HIA_RDR(sc, i) + SAFEXCEL_HIA_xDR_RING_SIZE, 0);
685 
686 		/* Clear any pending interrupt. */
687 		SAFEXCEL_WRITE(sc,
688 		    SAFEXCEL_HIA_RDR(sc, i) + SAFEXCEL_HIA_xDR_STAT,
689 		    SAFEXCEL_RDR_INTR_MASK);
690 
691 		/* Disable ring interrupt. */
692 		SAFEXCEL_WRITE(sc,
693 		    SAFEXCEL_HIA_AIC_R(sc) + SAFEXCEL_HIA_AIC_R_ENABLE_CLR(i),
694 		    SAFEXCEL_RDR_IRQ(i));
695 
696 		/*
697 		 * Command descriptor ring operations.
698 		 */
699 
700 		/* Reset ring base address. */
701 		SAFEXCEL_WRITE(sc, SAFEXCEL_HIA_CDR(sc, i) +
702 		    SAFEXCEL_HIA_xDR_RING_BASE_ADDR_LO, 0);
703 		SAFEXCEL_WRITE(sc, SAFEXCEL_HIA_CDR(sc, i) +
704 		    SAFEXCEL_HIA_xDR_RING_BASE_ADDR_HI, 0);
705 
706 		/* Clear the pending prepared counter. */
707 		SAFEXCEL_WRITE(sc,
708 		    SAFEXCEL_HIA_CDR(sc, i) + SAFEXCEL_HIA_xDR_PREP_COUNT,
709 		    SAFEXCEL_xDR_PREP_CLR_COUNT);
710 
711 		/* Clear the pending processed counter. */
712 		SAFEXCEL_WRITE(sc,
713 		    SAFEXCEL_HIA_CDR(sc, i) + SAFEXCEL_HIA_xDR_PROC_COUNT,
714 		    SAFEXCEL_xDR_PROC_CLR_COUNT);
715 
716 		SAFEXCEL_WRITE(sc,
717 		    SAFEXCEL_HIA_CDR(sc, i) + SAFEXCEL_HIA_xDR_PREP_PNTR, 0);
718 		SAFEXCEL_WRITE(sc,
719 		    SAFEXCEL_HIA_CDR(sc, i) + SAFEXCEL_HIA_xDR_PROC_PNTR, 0);
720 
721 		SAFEXCEL_WRITE(sc,
722 		    SAFEXCEL_HIA_CDR(sc, i) + SAFEXCEL_HIA_xDR_RING_SIZE, 0);
723 
724 		/* Clear any pending interrupt. */
725 		SAFEXCEL_WRITE(sc,
726 		    SAFEXCEL_HIA_CDR(sc, i) + SAFEXCEL_HIA_xDR_STAT,
727 		    SAFEXCEL_CDR_INTR_MASK);
728 	}
729 }
730 
731 static void
732 safexcel_enable_pe_engine(struct safexcel_softc *sc, int pe)
733 {
734 	int i, ring_mask;
735 
736 	for (ring_mask = 0, i = 0; i < sc->sc_config.rings; i++) {
737 		ring_mask <<= 1;
738 		ring_mask |= 1;
739 	}
740 
741 	/* Enable command descriptor rings. */
742 	SAFEXCEL_WRITE(sc, SAFEXCEL_HIA_DFE_THR(sc) + SAFEXCEL_HIA_DFE_THR_CTRL(pe),
743 	    SAFEXCEL_DxE_THR_CTRL_EN | ring_mask);
744 
745 	/* Enable result descriptor rings. */
746 	SAFEXCEL_WRITE(sc, SAFEXCEL_HIA_DSE_THR(sc) + SAFEXCEL_HIA_DSE_THR_CTRL(pe),
747 	    SAFEXCEL_DxE_THR_CTRL_EN | ring_mask);
748 
749 	/* Clear any HIA interrupt. */
750 	SAFEXCEL_WRITE(sc, SAFEXCEL_HIA_AIC_G(sc) + SAFEXCEL_HIA_AIC_G_ACK,
751 	    SAFEXCEL_AIC_G_ACK_HIA_MASK);
752 }
753 
754 static void
755 safexcel_execute(struct safexcel_softc *sc, struct safexcel_ring *ring,
756     struct safexcel_request *req, int hint)
757 {
758 	int ringidx, ncdesc, nrdesc;
759 	bool busy;
760 
761 	mtx_assert(&ring->mtx, MA_OWNED);
762 
763 	if ((hint & CRYPTO_HINT_MORE) != 0) {
764 		ring->pending++;
765 		ring->pending_cdesc += req->cdescs;
766 		ring->pending_rdesc += req->rdescs;
767 		return;
768 	}
769 
770 	ringidx = req->ringidx;
771 
772 	busy = ring->queued != 0;
773 	ncdesc = ring->pending_cdesc + req->cdescs;
774 	nrdesc = ring->pending_rdesc + req->rdescs;
775 	ring->queued += ring->pending + 1;
776 
777 	if (!busy) {
778 		SAFEXCEL_WRITE(sc,
779 		    SAFEXCEL_HIA_RDR(sc, ringidx) + SAFEXCEL_HIA_xDR_THRESH,
780 		    SAFEXCEL_HIA_CDR_THRESH_PKT_MODE | ring->queued);
781 	}
782 	SAFEXCEL_WRITE(sc,
783 	    SAFEXCEL_HIA_RDR(sc, ringidx) + SAFEXCEL_HIA_xDR_PREP_COUNT,
784 	    nrdesc * sc->sc_config.rd_offset * sizeof(uint32_t));
785 	SAFEXCEL_WRITE(sc,
786 	    SAFEXCEL_HIA_CDR(sc, ringidx) + SAFEXCEL_HIA_xDR_PREP_COUNT,
787 	    ncdesc * sc->sc_config.cd_offset * sizeof(uint32_t));
788 
789 	ring->pending = ring->pending_cdesc = ring->pending_rdesc = 0;
790 }
791 
792 static void
793 safexcel_init_rings(struct safexcel_softc *sc)
794 {
795 	struct safexcel_cmd_descr *cdesc;
796 	struct safexcel_ring *ring;
797 	uint64_t atok;
798 	int i, j;
799 
800 	for (i = 0; i < sc->sc_config.rings; i++) {
801 		ring = &sc->sc_ring[i];
802 
803 		snprintf(ring->lockname, sizeof(ring->lockname),
804 		    "safexcel_ring%d", i);
805 		mtx_init(&ring->mtx, ring->lockname, NULL, MTX_DEF);
806 
807 		ring->pending = ring->pending_cdesc = ring->pending_rdesc = 0;
808 		ring->queued = 0;
809 		ring->cdr.read = ring->cdr.write = 0;
810 		ring->rdr.read = ring->rdr.write = 0;
811 		for (j = 0; j < SAFEXCEL_RING_SIZE; j++) {
812 			cdesc = &ring->cdr.desc[j];
813 			atok = ring->dma_atok.paddr +
814 			    sc->sc_config.atok_offset * j;
815 			cdesc->atok_lo = SAFEXCEL_ADDR_LO(atok);
816 			cdesc->atok_hi = SAFEXCEL_ADDR_HI(atok);
817 		}
818 	}
819 }
820 
821 static void
822 safexcel_dma_alloc_mem_cb(void *arg, bus_dma_segment_t *segs, int nseg,
823     int error)
824 {
825 	struct safexcel_dma_mem *sdm;
826 
827 	if (error != 0)
828 		return;
829 
830 	KASSERT(nseg == 1, ("%s: nsegs is %d", __func__, nseg));
831 	sdm = arg;
832 	sdm->paddr = segs->ds_addr;
833 }
834 
835 static int
836 safexcel_dma_alloc_mem(struct safexcel_softc *sc, struct safexcel_dma_mem *sdm,
837     bus_size_t size)
838 {
839 	int error;
840 
841 	KASSERT(sdm->vaddr == NULL,
842 	    ("%s: DMA memory descriptor in use.", __func__));
843 
844 	error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), /* parent */
845 	    PAGE_SIZE, 0,		/* alignment, boundary */
846 	    BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
847 	    BUS_SPACE_MAXADDR,		/* highaddr */
848 	    NULL, NULL,			/* filtfunc, filtfuncarg */
849 	    size, 1,			/* maxsize, nsegments */
850 	    size, BUS_DMA_COHERENT,	/* maxsegsz, flags */
851 	    NULL, NULL,			/* lockfunc, lockfuncarg */
852 	    &sdm->tag);			/* dmat */
853 	if (error != 0) {
854 		device_printf(sc->sc_dev,
855 		    "failed to allocate busdma tag, error %d\n", error);
856 		goto err1;
857 	}
858 
859 	error = bus_dmamem_alloc(sdm->tag, (void **)&sdm->vaddr,
860 	    BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT, &sdm->map);
861 	if (error != 0) {
862 		device_printf(sc->sc_dev,
863 		    "failed to allocate DMA safe memory, error %d\n", error);
864 		goto err2;
865 	}
866 
867 	error = bus_dmamap_load(sdm->tag, sdm->map, sdm->vaddr, size,
868 	    safexcel_dma_alloc_mem_cb, sdm, BUS_DMA_NOWAIT);
869 	if (error != 0) {
870 		device_printf(sc->sc_dev,
871 		    "cannot get address of the DMA memory, error %d\n", error);
872 		goto err3;
873 	}
874 
875 	return (0);
876 err3:
877 	bus_dmamem_free(sdm->tag, sdm->vaddr, sdm->map);
878 err2:
879 	bus_dma_tag_destroy(sdm->tag);
880 err1:
881 	sdm->vaddr = NULL;
882 
883 	return (error);
884 }
885 
886 static void
887 safexcel_dma_free_mem(struct safexcel_dma_mem *sdm)
888 {
889 	bus_dmamap_unload(sdm->tag, sdm->map);
890 	bus_dmamem_free(sdm->tag, sdm->vaddr, sdm->map);
891 	bus_dma_tag_destroy(sdm->tag);
892 }
893 
894 static void
895 safexcel_dma_free_rings(struct safexcel_softc *sc)
896 {
897 	struct safexcel_ring *ring;
898 	int i;
899 
900 	for (i = 0; i < sc->sc_config.rings; i++) {
901 		ring = &sc->sc_ring[i];
902 		safexcel_dma_free_mem(&ring->cdr.dma);
903 		safexcel_dma_free_mem(&ring->dma_atok);
904 		safexcel_dma_free_mem(&ring->rdr.dma);
905 		bus_dma_tag_destroy(ring->data_dtag);
906 		mtx_destroy(&ring->mtx);
907 	}
908 }
909 
910 static int
911 safexcel_dma_init(struct safexcel_softc *sc)
912 {
913 	struct safexcel_ring *ring;
914 	bus_size_t size;
915 	int error, i;
916 
917 	for (i = 0; i < sc->sc_config.rings; i++) {
918 		ring = &sc->sc_ring[i];
919 
920 		error = bus_dma_tag_create(
921 		    bus_get_dma_tag(sc->sc_dev),/* parent */
922 		    1, 0,			/* alignment, boundary */
923 		    BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
924 		    BUS_SPACE_MAXADDR,		/* highaddr */
925 		    NULL, NULL,			/* filtfunc, filtfuncarg */
926 		    SAFEXCEL_MAX_REQUEST_SIZE,	/* maxsize */
927 		    SAFEXCEL_MAX_FRAGMENTS,	/* nsegments */
928 		    SAFEXCEL_MAX_REQUEST_SIZE,	/* maxsegsz */
929 		    BUS_DMA_COHERENT,		/* flags */
930 		    NULL, NULL,			/* lockfunc, lockfuncarg */
931 		    &ring->data_dtag);		/* dmat */
932 		if (error != 0) {
933 			device_printf(sc->sc_dev,
934 			    "bus_dma_tag_create main failed; error %d\n", error);
935 			return (error);
936 		}
937 
938 		size = sizeof(uint32_t) * sc->sc_config.cd_offset *
939 		    SAFEXCEL_RING_SIZE;
940 		error = safexcel_dma_alloc_mem(sc, &ring->cdr.dma, size);
941 		if (error != 0) {
942 			device_printf(sc->sc_dev,
943 			    "failed to allocate CDR DMA memory, error %d\n",
944 			    error);
945 			goto err;
946 		}
947 		ring->cdr.desc =
948 		    (struct safexcel_cmd_descr *)ring->cdr.dma.vaddr;
949 
950 		/* Allocate additional CDR token memory. */
951 		size = (bus_size_t)sc->sc_config.atok_offset *
952 		    SAFEXCEL_RING_SIZE;
953 		error = safexcel_dma_alloc_mem(sc, &ring->dma_atok, size);
954 		if (error != 0) {
955 			device_printf(sc->sc_dev,
956 			    "failed to allocate atoken DMA memory, error %d\n",
957 			    error);
958 			goto err;
959 		}
960 
961 		size = sizeof(uint32_t) * sc->sc_config.rd_offset *
962 		    SAFEXCEL_RING_SIZE;
963 		error = safexcel_dma_alloc_mem(sc, &ring->rdr.dma, size);
964 		if (error) {
965 			device_printf(sc->sc_dev,
966 			    "failed to allocate RDR DMA memory, error %d\n",
967 			    error);
968 			goto err;
969 		}
970 		ring->rdr.desc =
971 		    (struct safexcel_res_descr *)ring->rdr.dma.vaddr;
972 	}
973 
974 	return (0);
975 err:
976 	safexcel_dma_free_rings(sc);
977 	return (error);
978 }
979 
980 static void
981 safexcel_deinit_hw(struct safexcel_softc *sc)
982 {
983 	safexcel_hw_reset_rings(sc);
984 	safexcel_dma_free_rings(sc);
985 }
986 
987 static int
988 safexcel_init_hw(struct safexcel_softc *sc)
989 {
990 	int pe;
991 
992 	/* 23.3.7 Initialization */
993 	if (safexcel_configure(sc) != 0)
994 		return (EINVAL);
995 
996 	if (safexcel_dma_init(sc) != 0)
997 		return (ENOMEM);
998 
999 	safexcel_init_rings(sc);
1000 
1001 	safexcel_init_hia_bus_access(sc);
1002 
1003 	/* 23.3.7.2 Disable EIP-97 global Interrupts */
1004 	safexcel_disable_global_interrupts(sc);
1005 
1006 	for (pe = 0; pe < sc->sc_config.pes; pe++) {
1007 		/* 23.3.7.3 Configure Data Fetch Engine */
1008 		safexcel_configure_dfe_engine(sc, pe);
1009 
1010 		/* 23.3.7.4 Configure Data Store Engine */
1011 		if (safexcel_configure_dse(sc, pe)) {
1012 			safexcel_deinit_hw(sc);
1013 			return (-1);
1014 		}
1015 
1016 		/* 23.3.7.5 1. Protocol enables */
1017 		SAFEXCEL_WRITE(sc,
1018 		    SAFEXCEL_PE(sc) + SAFEXCEL_PE_EIP96_FUNCTION_EN(pe),
1019 		    0xffffffff);
1020 		SAFEXCEL_WRITE(sc,
1021 		    SAFEXCEL_PE(sc) + SAFEXCEL_PE_EIP96_FUNCTION2_EN(pe),
1022 		    0xffffffff);
1023 	}
1024 
1025 	safexcel_hw_prepare_rings(sc);
1026 
1027 	/* 23.3.7.5 Configure the Processing Engine(s). */
1028 	for (pe = 0; pe < sc->sc_config.pes; pe++)
1029 		safexcel_enable_pe_engine(sc, pe);
1030 
1031 	safexcel_hw_setup_rings(sc);
1032 
1033 	return (0);
1034 }
1035 
1036 static int
1037 safexcel_setup_dev_interrupts(struct safexcel_softc *sc)
1038 {
1039 	int error, i, j;
1040 
1041 	for (i = 0; i < SAFEXCEL_MAX_RINGS && sc->sc_intr[i] != NULL; i++) {
1042 		sc->sc_ih[i].sc = sc;
1043 		sc->sc_ih[i].ring = i;
1044 
1045 		if (bus_setup_intr(sc->sc_dev, sc->sc_intr[i],
1046 		    INTR_TYPE_NET | INTR_MPSAFE, NULL, safexcel_ring_intr,
1047 		    &sc->sc_ih[i], &sc->sc_ih[i].handle)) {
1048 			device_printf(sc->sc_dev,
1049 			    "couldn't setup interrupt %d\n", i);
1050 			goto err;
1051 		}
1052 
1053 		error = bus_bind_intr(sc->sc_dev, sc->sc_intr[i], i % mp_ncpus);
1054 		if (error != 0)
1055 			device_printf(sc->sc_dev,
1056 			    "failed to bind ring %d\n", error);
1057 	}
1058 
1059 	return (0);
1060 
1061 err:
1062 	for (j = 0; j < i; j++)
1063 		bus_teardown_intr(sc->sc_dev, sc->sc_intr[j],
1064 		    sc->sc_ih[j].handle);
1065 
1066 	return (ENXIO);
1067 }
1068 
1069 static void
1070 safexcel_teardown_dev_interrupts(struct safexcel_softc *sc)
1071 {
1072 	int i;
1073 
1074 	for (i = 0; i < SAFEXCEL_MAX_RINGS; i++)
1075 		bus_teardown_intr(sc->sc_dev, sc->sc_intr[i],
1076 		    sc->sc_ih[i].handle);
1077 }
1078 
1079 static int
1080 safexcel_alloc_dev_resources(struct safexcel_softc *sc)
1081 {
1082 	char name[16];
1083 	device_t dev;
1084 	phandle_t node;
1085 	int error, i, rid;
1086 
1087 	dev = sc->sc_dev;
1088 	node = ofw_bus_get_node(dev);
1089 
1090 	rid = 0;
1091 	sc->sc_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
1092 	    RF_ACTIVE);
1093 	if (sc->sc_res == NULL) {
1094 		device_printf(dev, "couldn't allocate memory resources\n");
1095 		return (ENXIO);
1096 	}
1097 
1098 	for (i = 0; i < SAFEXCEL_MAX_RINGS; i++) {
1099 		(void)snprintf(name, sizeof(name), "ring%d", i);
1100 		error = ofw_bus_find_string_index(node, "interrupt-names", name,
1101 		    &rid);
1102 		if (error != 0)
1103 			break;
1104 
1105 		sc->sc_intr[i] = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1106 		    RF_ACTIVE | RF_SHAREABLE);
1107 		if (sc->sc_intr[i] == NULL) {
1108 			error = ENXIO;
1109 			goto out;
1110 		}
1111 	}
1112 	if (i == 0) {
1113 		device_printf(dev, "couldn't allocate interrupt resources\n");
1114 		error = ENXIO;
1115 		goto out;
1116 	}
1117 
1118 	return (0);
1119 
1120 out:
1121 	for (i = 0; i < SAFEXCEL_MAX_RINGS && sc->sc_intr[i] != NULL; i++)
1122 		bus_release_resource(dev, SYS_RES_IRQ,
1123 		    rman_get_rid(sc->sc_intr[i]), sc->sc_intr[i]);
1124 	bus_release_resource(dev, SYS_RES_MEMORY, rman_get_rid(sc->sc_res),
1125 	    sc->sc_res);
1126 	return (error);
1127 }
1128 
1129 static void
1130 safexcel_free_dev_resources(struct safexcel_softc *sc)
1131 {
1132 	int i;
1133 
1134 	for (i = 0; i < SAFEXCEL_MAX_RINGS && sc->sc_intr[i] != NULL; i++)
1135 		bus_release_resource(sc->sc_dev, SYS_RES_IRQ,
1136 		    rman_get_rid(sc->sc_intr[i]), sc->sc_intr[i]);
1137 	if (sc->sc_res != NULL)
1138 		bus_release_resource(sc->sc_dev, SYS_RES_MEMORY,
1139 		    rman_get_rid(sc->sc_res), sc->sc_res);
1140 }
1141 
1142 static int
1143 safexcel_probe(device_t dev)
1144 {
1145 	struct safexcel_softc *sc;
1146 
1147 	if (!ofw_bus_status_okay(dev))
1148 		return (ENXIO);
1149 
1150 	sc = device_get_softc(dev);
1151 	sc->sc_type = ofw_bus_search_compatible(dev, safexcel_compat)->ocd_data;
1152 	if (sc->sc_type == 0)
1153 		return (ENXIO);
1154 
1155 	device_set_desc(dev, "SafeXcel EIP-97 crypto accelerator");
1156 
1157 	return (BUS_PROBE_DEFAULT);
1158 }
1159 
1160 static int
1161 safexcel_attach(device_t dev)
1162 {
1163 	struct sysctl_ctx_list *ctx;
1164 	struct sysctl_oid *oid;
1165 	struct sysctl_oid_list *children;
1166 	struct safexcel_softc *sc;
1167 	struct safexcel_request *req;
1168 	struct safexcel_ring *ring;
1169 	int i, j, ringidx;
1170 
1171 	sc = device_get_softc(dev);
1172 	sc->sc_dev = dev;
1173 	sc->sc_cid = -1;
1174 
1175 	if (safexcel_alloc_dev_resources(sc))
1176 		goto err;
1177 
1178 	if (safexcel_setup_dev_interrupts(sc))
1179 		goto err1;
1180 
1181 	if (safexcel_init_hw(sc))
1182 		goto err2;
1183 
1184 	for (ringidx = 0; ringidx < sc->sc_config.rings; ringidx++) {
1185 		ring = &sc->sc_ring[ringidx];
1186 
1187 		ring->cmd_data = sglist_alloc(SAFEXCEL_MAX_FRAGMENTS, M_WAITOK);
1188 		ring->res_data = sglist_alloc(SAFEXCEL_MAX_FRAGMENTS, M_WAITOK);
1189 
1190 		for (i = 0; i < SAFEXCEL_RING_SIZE; i++) {
1191 			req = &ring->requests[i];
1192 			req->sc = sc;
1193 			req->ringidx = ringidx;
1194 			if (bus_dmamap_create(ring->data_dtag,
1195 			    BUS_DMA_COHERENT, &req->dmap) != 0) {
1196 				for (j = 0; j < i; j++)
1197 					bus_dmamap_destroy(ring->data_dtag,
1198 					    ring->requests[j].dmap);
1199 				goto err2;
1200 			}
1201 			if (safexcel_dma_alloc_mem(sc, &req->ctx,
1202 			    sizeof(struct safexcel_context_record)) != 0) {
1203 				for (j = 0; j < i; j++) {
1204 					bus_dmamap_destroy(ring->data_dtag,
1205 					    ring->requests[j].dmap);
1206 					safexcel_dma_free_mem(
1207 					    &ring->requests[j].ctx);
1208 				}
1209 				goto err2;
1210 			}
1211 		}
1212 	}
1213 
1214 	ctx = device_get_sysctl_ctx(dev);
1215 	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
1216 	    OID_AUTO, "debug", CTLFLAG_RWTUN, &sc->sc_debug, 0,
1217 	    "Debug message verbosity");
1218 
1219 	oid = device_get_sysctl_tree(sc->sc_dev);
1220 	children = SYSCTL_CHILDREN(oid);
1221 	oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats",
1222 	    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "statistics");
1223 	children = SYSCTL_CHILDREN(oid);
1224 
1225 	sc->sc_req_alloc_failures = counter_u64_alloc(M_WAITOK);
1226 	SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "req_alloc_failures",
1227 	    CTLFLAG_RD, &sc->sc_req_alloc_failures,
1228 	    "Number of request allocation failures");
1229 	sc->sc_cdesc_alloc_failures = counter_u64_alloc(M_WAITOK);
1230 	SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "cdesc_alloc_failures",
1231 	    CTLFLAG_RD, &sc->sc_cdesc_alloc_failures,
1232 	    "Number of command descriptor ring overflows");
1233 	sc->sc_rdesc_alloc_failures = counter_u64_alloc(M_WAITOK);
1234 	SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "rdesc_alloc_failures",
1235 	    CTLFLAG_RD, &sc->sc_rdesc_alloc_failures,
1236 	    "Number of result descriptor ring overflows");
1237 
1238 	sc->sc_cid = crypto_get_driverid(dev, sizeof(struct safexcel_session),
1239 	    CRYPTOCAP_F_HARDWARE);
1240 	if (sc->sc_cid < 0)
1241 		goto err2;
1242 
1243 	return (0);
1244 
1245 err2:
1246 	safexcel_teardown_dev_interrupts(sc);
1247 err1:
1248 	safexcel_free_dev_resources(sc);
1249 err:
1250 	return (ENXIO);
1251 }
1252 
1253 static int
1254 safexcel_detach(device_t dev)
1255 {
1256 	struct safexcel_ring *ring;
1257 	struct safexcel_softc *sc;
1258 	int i, ringidx;
1259 
1260 	sc = device_get_softc(dev);
1261 
1262 	if (sc->sc_cid >= 0)
1263 		crypto_unregister_all(sc->sc_cid);
1264 
1265 	counter_u64_free(sc->sc_req_alloc_failures);
1266 	counter_u64_free(sc->sc_cdesc_alloc_failures);
1267 	counter_u64_free(sc->sc_rdesc_alloc_failures);
1268 
1269 	for (ringidx = 0; ringidx < sc->sc_config.rings; ringidx++) {
1270 		ring = &sc->sc_ring[ringidx];
1271 		for (i = 0; i < SAFEXCEL_RING_SIZE; i++) {
1272 			bus_dmamap_destroy(ring->data_dtag,
1273 			    ring->requests[i].dmap);
1274 			safexcel_dma_free_mem(&ring->requests[i].ctx);
1275 		}
1276 		sglist_free(ring->cmd_data);
1277 		sglist_free(ring->res_data);
1278 	}
1279 	safexcel_deinit_hw(sc);
1280 	safexcel_teardown_dev_interrupts(sc);
1281 	safexcel_free_dev_resources(sc);
1282 
1283 	return (0);
1284 }
1285 
1286 /*
1287  * Pre-compute the hash key used in GHASH, which is a block of zeroes encrypted
1288  * using the cipher key.
1289  */
1290 static void
1291 safexcel_setkey_ghash(const uint8_t *key, int klen, uint32_t *hashkey)
1292 {
1293 	uint32_t ks[4 * (RIJNDAEL_MAXNR + 1)];
1294 	uint8_t zeros[AES_BLOCK_LEN];
1295 	int i, rounds;
1296 
1297 	memset(zeros, 0, sizeof(zeros));
1298 
1299 	rounds = rijndaelKeySetupEnc(ks, key, klen * NBBY);
1300 	rijndaelEncrypt(ks, rounds, zeros, (uint8_t *)hashkey);
1301 	for (i = 0; i < GMAC_BLOCK_LEN / sizeof(uint32_t); i++)
1302 		hashkey[i] = htobe32(hashkey[i]);
1303 
1304 	explicit_bzero(ks, sizeof(ks));
1305 }
1306 
1307 /*
1308  * Pre-compute the combined CBC-MAC key, which consists of three keys K1, K2, K3
1309  * in the hardware implementation.  K1 is the cipher key and comes last in the
1310  * buffer since K2 and K3 have a fixed size of AES_BLOCK_LEN.  For now XCBC-MAC
1311  * is not implemented so K2 and K3 are fixed.
1312  */
1313 static void
1314 safexcel_setkey_xcbcmac(const uint8_t *key, int klen, uint32_t *hashkey)
1315 {
1316 	int i, off;
1317 
1318 	memset(hashkey, 0, 2 * AES_BLOCK_LEN);
1319 	off = 2 * AES_BLOCK_LEN / sizeof(uint32_t);
1320 	for (i = 0; i < klen / sizeof(uint32_t); i++, key += 4)
1321 		hashkey[i + off] = htobe32(le32dec(key));
1322 }
1323 
1324 static void
1325 safexcel_setkey_hmac_digest(const struct auth_hash *ahash, union authctx *ctx,
1326     char *buf)
1327 {
1328 	int hashwords, i;
1329 
1330 	switch (ahash->type) {
1331 	case CRYPTO_SHA1_HMAC:
1332 		hashwords = ahash->hashsize / sizeof(uint32_t);
1333 		for (i = 0; i < hashwords; i++)
1334 			((uint32_t *)buf)[i] = htobe32(ctx->sha1ctx.h.b32[i]);
1335 		break;
1336 	case CRYPTO_SHA2_224_HMAC:
1337 		hashwords = auth_hash_hmac_sha2_256.hashsize / sizeof(uint32_t);
1338 		for (i = 0; i < hashwords; i++)
1339 			((uint32_t *)buf)[i] = htobe32(ctx->sha224ctx.state[i]);
1340 		break;
1341 	case CRYPTO_SHA2_256_HMAC:
1342 		hashwords = ahash->hashsize / sizeof(uint32_t);
1343 		for (i = 0; i < hashwords; i++)
1344 			((uint32_t *)buf)[i] = htobe32(ctx->sha256ctx.state[i]);
1345 		break;
1346 	case CRYPTO_SHA2_384_HMAC:
1347 		hashwords = auth_hash_hmac_sha2_512.hashsize / sizeof(uint64_t);
1348 		for (i = 0; i < hashwords; i++)
1349 			((uint64_t *)buf)[i] = htobe64(ctx->sha384ctx.state[i]);
1350 		break;
1351 	case CRYPTO_SHA2_512_HMAC:
1352 		hashwords = ahash->hashsize / sizeof(uint64_t);
1353 		for (i = 0; i < hashwords; i++)
1354 			((uint64_t *)buf)[i] = htobe64(ctx->sha512ctx.state[i]);
1355 		break;
1356 	}
1357 }
1358 
1359 /*
1360  * Pre-compute the inner and outer digests used in the HMAC algorithm.
1361  */
1362 static void
1363 safexcel_setkey_hmac(const struct crypto_session_params *csp,
1364     const uint8_t *key, int klen, uint8_t *ipad, uint8_t *opad)
1365 {
1366 	union authctx ctx;
1367 	const struct auth_hash *ahash;
1368 
1369 	ahash = crypto_auth_hash(csp);
1370 	hmac_init_ipad(ahash, key, klen, &ctx);
1371 	safexcel_setkey_hmac_digest(ahash, &ctx, ipad);
1372 	hmac_init_opad(ahash, key, klen, &ctx);
1373 	safexcel_setkey_hmac_digest(ahash, &ctx, opad);
1374 	explicit_bzero(&ctx, ahash->ctxsize);
1375 }
1376 
1377 static void
1378 safexcel_setkey_xts(const uint8_t *key, int klen, uint8_t *tweakkey)
1379 {
1380 	memcpy(tweakkey, key + klen, klen);
1381 }
1382 
1383 /*
1384  * Populate a context record with paramters from a session.  Some consumers
1385  * specify per-request keys, in which case the context must be re-initialized
1386  * for each request.
1387  */
1388 static int
1389 safexcel_set_context(struct safexcel_context_record *ctx, int op,
1390     const uint8_t *ckey, const uint8_t *akey, struct safexcel_session *sess)
1391 {
1392 	const struct crypto_session_params *csp;
1393 	uint8_t *data;
1394 	uint32_t ctrl0, ctrl1;
1395 	int aklen, alg, cklen, off;
1396 
1397 	csp = crypto_get_params(sess->cses);
1398 	aklen = csp->csp_auth_klen;
1399 	cklen = csp->csp_cipher_klen;
1400 	if (csp->csp_cipher_alg == CRYPTO_AES_XTS)
1401 		cklen /= 2;
1402 
1403 	ctrl0 = sess->alg | sess->digest | sess->hash;
1404 	ctrl1 = sess->mode;
1405 
1406 	data = (uint8_t *)ctx->data;
1407 	if (csp->csp_cipher_alg != 0) {
1408 		memcpy(data, ckey, cklen);
1409 		off = cklen;
1410 	} else if (csp->csp_auth_alg == CRYPTO_AES_NIST_GMAC) {
1411 		memcpy(data, akey, aklen);
1412 		off = aklen;
1413 	} else {
1414 		off = 0;
1415 	}
1416 
1417 	switch (csp->csp_cipher_alg) {
1418 	case CRYPTO_AES_NIST_GCM_16:
1419 		safexcel_setkey_ghash(ckey, cklen, (uint32_t *)(data + off));
1420 		off += GMAC_BLOCK_LEN;
1421 		break;
1422 	case CRYPTO_AES_CCM_16:
1423 		safexcel_setkey_xcbcmac(ckey, cklen, (uint32_t *)(data + off));
1424 		off += AES_BLOCK_LEN * 2 + cklen;
1425 		break;
1426 	case CRYPTO_AES_XTS:
1427 		safexcel_setkey_xts(ckey, cklen, data + off);
1428 		off += cklen;
1429 		break;
1430 	}
1431 	switch (csp->csp_auth_alg) {
1432 	case CRYPTO_AES_NIST_GMAC:
1433 		safexcel_setkey_ghash(akey, aklen, (uint32_t *)(data + off));
1434 		off += GMAC_BLOCK_LEN;
1435 		break;
1436 	case CRYPTO_SHA1_HMAC:
1437 	case CRYPTO_SHA2_224_HMAC:
1438 	case CRYPTO_SHA2_256_HMAC:
1439 	case CRYPTO_SHA2_384_HMAC:
1440 	case CRYPTO_SHA2_512_HMAC:
1441 		safexcel_setkey_hmac(csp, akey, aklen,
1442 		    data + off, data + off + sess->statelen);
1443 		off += sess->statelen * 2;
1444 		break;
1445 	}
1446 	ctrl0 |= SAFEXCEL_CONTROL0_SIZE(off / sizeof(uint32_t));
1447 
1448 	alg = csp->csp_cipher_alg;
1449 	if (alg == 0)
1450 		alg = csp->csp_auth_alg;
1451 
1452 	switch (alg) {
1453 	case CRYPTO_AES_CCM_16:
1454 		if (CRYPTO_OP_IS_ENCRYPT(op)) {
1455 			ctrl0 |= SAFEXCEL_CONTROL0_TYPE_HASH_ENCRYPT_OUT |
1456 			    SAFEXCEL_CONTROL0_KEY_EN;
1457 		} else {
1458 			ctrl0 |= SAFEXCEL_CONTROL0_TYPE_DECRYPT_HASH_IN |
1459 			    SAFEXCEL_CONTROL0_KEY_EN;
1460 		}
1461 		ctrl1 |= SAFEXCEL_CONTROL1_IV0 | SAFEXCEL_CONTROL1_IV1 |
1462 		    SAFEXCEL_CONTROL1_IV2 | SAFEXCEL_CONTROL1_IV3;
1463 		break;
1464 	case CRYPTO_AES_CBC:
1465 	case CRYPTO_AES_ICM:
1466 	case CRYPTO_AES_XTS:
1467 		if (CRYPTO_OP_IS_ENCRYPT(op)) {
1468 			ctrl0 |= SAFEXCEL_CONTROL0_TYPE_CRYPTO_OUT |
1469 			    SAFEXCEL_CONTROL0_KEY_EN;
1470 			if (csp->csp_auth_alg != 0)
1471 				ctrl0 |=
1472 				    SAFEXCEL_CONTROL0_TYPE_ENCRYPT_HASH_OUT;
1473 		} else {
1474 			ctrl0 |= SAFEXCEL_CONTROL0_TYPE_CRYPTO_IN |
1475 			    SAFEXCEL_CONTROL0_KEY_EN;
1476 			if (csp->csp_auth_alg != 0)
1477 				ctrl0 |= SAFEXCEL_CONTROL0_TYPE_HASH_DECRYPT_IN;
1478 		}
1479 		break;
1480 	case CRYPTO_AES_NIST_GCM_16:
1481 	case CRYPTO_AES_NIST_GMAC:
1482 		if (CRYPTO_OP_IS_ENCRYPT(op) || csp->csp_auth_alg != 0) {
1483 			ctrl0 |= SAFEXCEL_CONTROL0_TYPE_CRYPTO_OUT |
1484 			    SAFEXCEL_CONTROL0_KEY_EN |
1485 			    SAFEXCEL_CONTROL0_TYPE_HASH_OUT;
1486 		} else {
1487 			ctrl0 |= SAFEXCEL_CONTROL0_TYPE_CRYPTO_IN |
1488 			    SAFEXCEL_CONTROL0_KEY_EN |
1489 			    SAFEXCEL_CONTROL0_TYPE_HASH_DECRYPT_IN;
1490 		}
1491 		if (csp->csp_cipher_alg == CRYPTO_AES_NIST_GCM_16) {
1492 			ctrl1 |= SAFEXCEL_CONTROL1_COUNTER_MODE |
1493 			    SAFEXCEL_CONTROL1_IV0 | SAFEXCEL_CONTROL1_IV1 |
1494 			    SAFEXCEL_CONTROL1_IV2;
1495 		}
1496 		break;
1497 	case CRYPTO_SHA1:
1498 	case CRYPTO_SHA2_224:
1499 	case CRYPTO_SHA2_256:
1500 	case CRYPTO_SHA2_384:
1501 	case CRYPTO_SHA2_512:
1502 		ctrl0 |= SAFEXCEL_CONTROL0_RESTART_HASH;
1503 		/* FALLTHROUGH */
1504 	case CRYPTO_SHA1_HMAC:
1505 	case CRYPTO_SHA2_224_HMAC:
1506 	case CRYPTO_SHA2_256_HMAC:
1507 	case CRYPTO_SHA2_384_HMAC:
1508 	case CRYPTO_SHA2_512_HMAC:
1509 		ctrl0 |= SAFEXCEL_CONTROL0_TYPE_HASH_OUT;
1510 		break;
1511 	}
1512 
1513 	ctx->control0 = ctrl0;
1514 	ctx->control1 = ctrl1;
1515 
1516 	return (off);
1517 }
1518 
1519 /*
1520  * Construct a no-op instruction, used to pad input tokens.
1521  */
1522 static void
1523 safexcel_instr_nop(struct safexcel_instr **instrp)
1524 {
1525 	struct safexcel_instr *instr;
1526 
1527 	instr = *instrp;
1528 	instr->opcode = SAFEXCEL_INSTR_OPCODE_INSERT;
1529 	instr->length = (1 << 2);
1530 	instr->status = 0;
1531 	instr->instructions = 0;
1532 
1533 	*instrp = instr + 1;
1534 }
1535 
1536 /*
1537  * Insert the digest of the input payload.  This is typically the last
1538  * instruction of a sequence.
1539  */
1540 static void
1541 safexcel_instr_insert_digest(struct safexcel_instr **instrp, int len)
1542 {
1543 	struct safexcel_instr *instr;
1544 
1545 	instr = *instrp;
1546 	instr->opcode = SAFEXCEL_INSTR_OPCODE_INSERT;
1547 	instr->length = len;
1548 	instr->status = SAFEXCEL_INSTR_STATUS_LAST_HASH |
1549 	    SAFEXCEL_INSTR_STATUS_LAST_PACKET;
1550 	instr->instructions = SAFEXCEL_INSTR_DEST_OUTPUT |
1551 	    SAFEXCEL_INSTR_INSERT_HASH_DIGEST;
1552 
1553 	*instrp = instr + 1;
1554 }
1555 
1556 /*
1557  * Retrieve and verify a digest.
1558  */
1559 static void
1560 safexcel_instr_retrieve_digest(struct safexcel_instr **instrp, int len)
1561 {
1562 	struct safexcel_instr *instr;
1563 
1564 	instr = *instrp;
1565 	instr->opcode = SAFEXCEL_INSTR_OPCODE_RETRIEVE;
1566 	instr->length = len;
1567 	instr->status = SAFEXCEL_INSTR_STATUS_LAST_HASH |
1568 	    SAFEXCEL_INSTR_STATUS_LAST_PACKET;
1569 	instr->instructions = SAFEXCEL_INSTR_INSERT_HASH_DIGEST;
1570 	instr++;
1571 
1572 	instr->opcode = SAFEXCEL_INSTR_OPCODE_VERIFY_FIELDS;
1573 	instr->length = len | SAFEXCEL_INSTR_VERIFY_HASH;
1574 	instr->status = SAFEXCEL_INSTR_STATUS_LAST_HASH |
1575 	    SAFEXCEL_INSTR_STATUS_LAST_PACKET;
1576 	instr->instructions = SAFEXCEL_INSTR_VERIFY_PADDING;
1577 
1578 	*instrp = instr + 1;
1579 }
1580 
1581 static void
1582 safexcel_instr_temp_aes_block(struct safexcel_instr **instrp)
1583 {
1584 	struct safexcel_instr *instr;
1585 
1586 	instr = *instrp;
1587 	instr->opcode = SAFEXCEL_INSTR_OPCODE_INSERT_REMOVE_RESULT;
1588 	instr->length = 0;
1589 	instr->status = 0;
1590 	instr->instructions = AES_BLOCK_LEN;
1591 	instr++;
1592 
1593 	instr->opcode = SAFEXCEL_INSTR_OPCODE_INSERT;
1594 	instr->length = AES_BLOCK_LEN;
1595 	instr->status = 0;
1596 	instr->instructions = SAFEXCEL_INSTR_DEST_OUTPUT |
1597 	    SAFEXCEL_INSTR_DEST_CRYPTO;
1598 
1599 	*instrp = instr + 1;
1600 }
1601 
1602 /*
1603  * Handle a request for an unauthenticated block cipher.
1604  */
1605 static void
1606 safexcel_instr_cipher(struct safexcel_request *req,
1607     struct safexcel_instr *instr, struct safexcel_cmd_descr *cdesc)
1608 {
1609 	struct cryptop *crp;
1610 
1611 	crp = req->crp;
1612 
1613 	/* Insert the payload. */
1614 	instr->opcode = SAFEXCEL_INSTR_OPCODE_DIRECTION;
1615 	instr->length = crp->crp_payload_length;
1616 	instr->status = SAFEXCEL_INSTR_STATUS_LAST_PACKET |
1617 	    SAFEXCEL_INSTR_STATUS_LAST_HASH;
1618 	instr->instructions = SAFEXCEL_INSTR_INS_LAST |
1619 	    SAFEXCEL_INSTR_DEST_CRYPTO | SAFEXCEL_INSTR_DEST_OUTPUT;
1620 
1621 	cdesc->additional_cdata_size = 1;
1622 }
1623 
1624 static void
1625 safexcel_instr_eta(struct safexcel_request *req, struct safexcel_instr *instr,
1626     struct safexcel_cmd_descr *cdesc)
1627 {
1628 	const struct crypto_session_params *csp;
1629 	struct cryptop *crp;
1630 	struct safexcel_instr *start;
1631 
1632 	crp = req->crp;
1633 	csp = crypto_get_params(crp->crp_session);
1634 	start = instr;
1635 
1636 	/* Insert the AAD. */
1637 	instr->opcode = SAFEXCEL_INSTR_OPCODE_DIRECTION;
1638 	instr->length = crp->crp_aad_length;
1639 	instr->status = crp->crp_payload_length == 0 ?
1640 	    SAFEXCEL_INSTR_STATUS_LAST_HASH : 0;
1641 	instr->instructions = SAFEXCEL_INSTR_INS_LAST |
1642 	    SAFEXCEL_INSTR_DEST_HASH;
1643 	instr++;
1644 
1645 	/* Encrypt any data left in the request. */
1646 	if (crp->crp_payload_length > 0) {
1647 		instr->opcode = SAFEXCEL_INSTR_OPCODE_DIRECTION;
1648 		instr->length = crp->crp_payload_length;
1649 		instr->status = SAFEXCEL_INSTR_STATUS_LAST_HASH;
1650 		instr->instructions = SAFEXCEL_INSTR_INS_LAST |
1651 		    SAFEXCEL_INSTR_DEST_CRYPTO |
1652 		    SAFEXCEL_INSTR_DEST_HASH |
1653 		    SAFEXCEL_INSTR_DEST_OUTPUT;
1654 		instr++;
1655 	}
1656 
1657 	/*
1658 	 * Compute the digest, or extract it and place it in the output stream.
1659 	 */
1660 	if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op))
1661 		safexcel_instr_insert_digest(&instr, req->sess->digestlen);
1662 	else
1663 		safexcel_instr_retrieve_digest(&instr, req->sess->digestlen);
1664 	cdesc->additional_cdata_size = instr - start;
1665 }
1666 
1667 static void
1668 safexcel_instr_sha_hash(struct safexcel_request *req,
1669     struct safexcel_instr *instr)
1670 {
1671 	struct cryptop *crp;
1672 	struct safexcel_instr *start;
1673 
1674 	crp = req->crp;
1675 	start = instr;
1676 
1677 	/* Pass the input data to the hash engine. */
1678 	instr->opcode = SAFEXCEL_INSTR_OPCODE_DIRECTION;
1679 	instr->length = crp->crp_payload_length;
1680 	instr->status = SAFEXCEL_INSTR_STATUS_LAST_HASH;
1681 	instr->instructions = SAFEXCEL_INSTR_DEST_HASH;
1682 	instr++;
1683 
1684 	/* Insert the hash result into the output stream. */
1685 	safexcel_instr_insert_digest(&instr, req->sess->digestlen);
1686 
1687 	/* Pad the rest of the inline instruction space. */
1688 	while (instr != start + SAFEXCEL_MAX_ITOKENS)
1689 		safexcel_instr_nop(&instr);
1690 }
1691 
1692 static void
1693 safexcel_instr_ccm(struct safexcel_request *req, struct safexcel_instr *instr,
1694     struct safexcel_cmd_descr *cdesc)
1695 {
1696 	const struct crypto_session_params *csp;
1697 	struct cryptop *crp;
1698 	struct safexcel_instr *start;
1699 	uint8_t *a0, *b0, *alenp, L;
1700 	int aalign, blen;
1701 
1702 	crp = req->crp;
1703 	csp = crypto_get_params(crp->crp_session);
1704 	start = instr;
1705 
1706 	/*
1707 	 * Construct two blocks, A0 and B0, used in encryption and
1708 	 * authentication, respectively.  A0 is embedded in the token
1709 	 * descriptor, and B0 is inserted directly into the data stream using
1710 	 * instructions below.
1711 	 *
1712 	 * An explicit check for overflow of the length field is not
1713 	 * needed since the maximum driver size of 65535 bytes fits in
1714 	 * the smallest length field used for a 13-byte nonce.
1715 	 */
1716 	blen = AES_BLOCK_LEN;
1717 	L = 15 - csp->csp_ivlen;
1718 
1719 	a0 = (uint8_t *)&cdesc->control_data.token[0];
1720 	memset(a0, 0, blen);
1721 	a0[0] = L - 1;
1722 	memcpy(&a0[1], req->iv, csp->csp_ivlen);
1723 
1724 	/*
1725 	 * Insert B0 and the AAD length into the input stream.
1726 	 */
1727 	instr->opcode = SAFEXCEL_INSTR_OPCODE_INSERT;
1728 	instr->length = blen + (crp->crp_aad_length > 0 ? 2 : 0);
1729 	instr->status = 0;
1730 	instr->instructions = SAFEXCEL_INSTR_DEST_HASH |
1731 	    SAFEXCEL_INSTR_INSERT_IMMEDIATE;
1732 	instr++;
1733 
1734 	b0 = (uint8_t *)instr;
1735 	memset(b0, 0, blen);
1736 	b0[0] =
1737 	    (L - 1) | /* payload length size */
1738 	    ((req->sess->digestlen - 2) / 2) << 3 /* digest length */ |
1739 	    (crp->crp_aad_length > 0 ? 1 : 0) << 6 /* AAD present bit */;
1740 	memcpy(&b0[1], req->iv, csp->csp_ivlen);
1741 	b0[14] = crp->crp_payload_length >> 8;
1742 	b0[15] = crp->crp_payload_length & 0xff;
1743 	instr += blen / sizeof(*instr);
1744 
1745 	/* Insert the AAD length and data into the input stream. */
1746 	if (crp->crp_aad_length > 0) {
1747 		alenp = (uint8_t *)instr;
1748 		alenp[0] = crp->crp_aad_length >> 8;
1749 		alenp[1] = crp->crp_aad_length & 0xff;
1750 		alenp[2] = 0;
1751 		alenp[3] = 0;
1752 		instr++;
1753 
1754 		instr->opcode = SAFEXCEL_INSTR_OPCODE_DIRECTION;
1755 		instr->length = crp->crp_aad_length;
1756 		instr->status = 0;
1757 		instr->instructions = SAFEXCEL_INSTR_DEST_HASH;
1758 		instr++;
1759 
1760 		/* Insert zero padding. */
1761 		aalign = (crp->crp_aad_length + 2) & (blen - 1);
1762 		instr->opcode = SAFEXCEL_INSTR_OPCODE_INSERT;
1763 		instr->length = aalign == 0 ? 0 :
1764 		    blen - ((crp->crp_aad_length + 2) & (blen - 1));
1765 		instr->status = crp->crp_payload_length == 0 ?
1766 		    SAFEXCEL_INSTR_STATUS_LAST_HASH : 0;
1767 		instr->instructions = SAFEXCEL_INSTR_DEST_HASH;
1768 		instr++;
1769 	}
1770 
1771 	safexcel_instr_temp_aes_block(&instr);
1772 
1773 	/* Insert the cipher payload into the input stream. */
1774 	if (crp->crp_payload_length > 0) {
1775 		instr->opcode = SAFEXCEL_INSTR_OPCODE_DIRECTION;
1776 		instr->length = crp->crp_payload_length;
1777 		instr->status = (crp->crp_payload_length & (blen - 1)) == 0 ?
1778 		    SAFEXCEL_INSTR_STATUS_LAST_HASH : 0;
1779 		instr->instructions = SAFEXCEL_INSTR_DEST_OUTPUT |
1780 		    SAFEXCEL_INSTR_DEST_CRYPTO |
1781 		    SAFEXCEL_INSTR_DEST_HASH |
1782 		    SAFEXCEL_INSTR_INS_LAST;
1783 		instr++;
1784 
1785 		/* Insert zero padding. */
1786 		if (crp->crp_payload_length & (blen - 1)) {
1787 			instr->opcode = SAFEXCEL_INSTR_OPCODE_INSERT;
1788 			instr->length = blen -
1789 			    (crp->crp_payload_length & (blen - 1));
1790 			instr->status = SAFEXCEL_INSTR_STATUS_LAST_HASH;
1791 			instr->instructions = SAFEXCEL_INSTR_DEST_HASH;
1792 			instr++;
1793 		}
1794 	}
1795 
1796 	/*
1797 	 * Compute the digest, or extract it and place it in the output stream.
1798 	 */
1799 	if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op))
1800 		safexcel_instr_insert_digest(&instr, req->sess->digestlen);
1801 	else
1802 		safexcel_instr_retrieve_digest(&instr, req->sess->digestlen);
1803 
1804 	cdesc->additional_cdata_size = instr - start;
1805 }
1806 
1807 static void
1808 safexcel_instr_gcm(struct safexcel_request *req, struct safexcel_instr *instr,
1809     struct safexcel_cmd_descr *cdesc)
1810 {
1811 	struct cryptop *crp;
1812 	struct safexcel_instr *start;
1813 
1814 	memcpy(cdesc->control_data.token, req->iv, AES_GCM_IV_LEN);
1815 	cdesc->control_data.token[3] = htobe32(1);
1816 
1817 	crp = req->crp;
1818 	start = instr;
1819 
1820 	/* Insert the AAD into the input stream. */
1821 	instr->opcode = SAFEXCEL_INSTR_OPCODE_DIRECTION;
1822 	instr->length = crp->crp_aad_length;
1823 	instr->status = crp->crp_payload_length == 0 ?
1824 	    SAFEXCEL_INSTR_STATUS_LAST_HASH : 0;
1825 	instr->instructions = SAFEXCEL_INSTR_INS_LAST |
1826 	    SAFEXCEL_INSTR_DEST_HASH;
1827 	instr++;
1828 
1829 	safexcel_instr_temp_aes_block(&instr);
1830 
1831 	/* Insert the cipher payload into the input stream. */
1832 	if (crp->crp_payload_length > 0) {
1833 		instr->opcode = SAFEXCEL_INSTR_OPCODE_DIRECTION;
1834 		instr->length = crp->crp_payload_length;
1835 		instr->status = SAFEXCEL_INSTR_STATUS_LAST_HASH;
1836 		instr->instructions = SAFEXCEL_INSTR_DEST_OUTPUT |
1837 		    SAFEXCEL_INSTR_DEST_CRYPTO | SAFEXCEL_INSTR_DEST_HASH |
1838 		    SAFEXCEL_INSTR_INS_LAST;
1839 		instr++;
1840 	}
1841 
1842 	/*
1843 	 * Compute the digest, or extract it and place it in the output stream.
1844 	 */
1845 	if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op))
1846 		safexcel_instr_insert_digest(&instr, req->sess->digestlen);
1847 	else
1848 		safexcel_instr_retrieve_digest(&instr, req->sess->digestlen);
1849 
1850 	cdesc->additional_cdata_size = instr - start;
1851 }
1852 
1853 static void
1854 safexcel_instr_gmac(struct safexcel_request *req, struct safexcel_instr *instr,
1855     struct safexcel_cmd_descr *cdesc)
1856 {
1857 	struct cryptop *crp;
1858 	struct safexcel_instr *start;
1859 
1860 	memcpy(cdesc->control_data.token, req->iv, AES_GCM_IV_LEN);
1861 	cdesc->control_data.token[3] = htobe32(1);
1862 
1863 	crp = req->crp;
1864 	start = instr;
1865 
1866 	instr->opcode = SAFEXCEL_INSTR_OPCODE_DIRECTION;
1867 	instr->length = crp->crp_payload_length;
1868 	instr->status = SAFEXCEL_INSTR_STATUS_LAST_HASH;
1869 	instr->instructions = SAFEXCEL_INSTR_INS_LAST |
1870 	    SAFEXCEL_INSTR_DEST_HASH;
1871 	instr++;
1872 
1873 	safexcel_instr_temp_aes_block(&instr);
1874 
1875 	safexcel_instr_insert_digest(&instr, req->sess->digestlen);
1876 
1877 	cdesc->additional_cdata_size = instr - start;
1878 }
1879 
1880 static void
1881 safexcel_set_token(struct safexcel_request *req)
1882 {
1883 	const struct crypto_session_params *csp;
1884 	struct cryptop *crp;
1885 	struct safexcel_cmd_descr *cdesc;
1886 	struct safexcel_context_record *ctx;
1887 	struct safexcel_context_template *ctxtmp;
1888 	struct safexcel_instr *instr;
1889 	struct safexcel_softc *sc;
1890 	const uint8_t *akey, *ckey;
1891 	int ringidx;
1892 
1893 	crp = req->crp;
1894 	csp = crypto_get_params(crp->crp_session);
1895 	cdesc = req->cdesc;
1896 	sc = req->sc;
1897 	ringidx = req->ringidx;
1898 
1899 	akey = crp->crp_auth_key;
1900 	ckey = crp->crp_cipher_key;
1901 	if (akey != NULL || ckey != NULL) {
1902 		/*
1903 		 * If we have a per-request key we have to generate the context
1904 		 * record on the fly.
1905 		 */
1906 		if (akey == NULL)
1907 			akey = csp->csp_auth_key;
1908 		if (ckey == NULL)
1909 			ckey = csp->csp_cipher_key;
1910 		ctx = (struct safexcel_context_record *)req->ctx.vaddr;
1911 		(void)safexcel_set_context(ctx, crp->crp_op, ckey, akey,
1912 		    req->sess);
1913 	} else {
1914 		/*
1915 		 * Use the context record template computed at session
1916 		 * initialization time.
1917 		 */
1918 		ctxtmp = CRYPTO_OP_IS_ENCRYPT(crp->crp_op) ?
1919 		    &req->sess->encctx : &req->sess->decctx;
1920 		ctx = &ctxtmp->ctx;
1921 		memcpy(req->ctx.vaddr + 2 * sizeof(uint32_t), ctx->data,
1922 		    ctxtmp->len);
1923 	}
1924 	cdesc->control_data.control0 = ctx->control0;
1925 	cdesc->control_data.control1 = ctx->control1;
1926 
1927 	/*
1928 	 * For keyless hash operations, the token instructions can be embedded
1929 	 * in the token itself.  Otherwise we use an additional token descriptor
1930 	 * and the embedded instruction space is used to store the IV.
1931 	 */
1932 	if (csp->csp_cipher_alg == 0 &&
1933 	    csp->csp_auth_alg != CRYPTO_AES_NIST_GMAC) {
1934 		instr = (void *)cdesc->control_data.token;
1935 	} else {
1936 		instr = (void *)(sc->sc_ring[ringidx].dma_atok.vaddr +
1937 		    sc->sc_config.atok_offset *
1938 		    (cdesc - sc->sc_ring[ringidx].cdr.desc));
1939 		cdesc->control_data.options |= SAFEXCEL_OPTION_4_TOKEN_IV_CMD;
1940 	}
1941 
1942 	switch (csp->csp_cipher_alg) {
1943 	case CRYPTO_AES_NIST_GCM_16:
1944 		safexcel_instr_gcm(req, instr, cdesc);
1945 		break;
1946 	case CRYPTO_AES_CCM_16:
1947 		safexcel_instr_ccm(req, instr, cdesc);
1948 		break;
1949 	case CRYPTO_AES_XTS:
1950 		memcpy(cdesc->control_data.token, req->iv, AES_XTS_IV_LEN);
1951 		memset(cdesc->control_data.token +
1952 		    AES_XTS_IV_LEN / sizeof(uint32_t), 0, AES_XTS_IV_LEN);
1953 
1954 		safexcel_instr_cipher(req, instr, cdesc);
1955 		break;
1956 	case CRYPTO_AES_CBC:
1957 	case CRYPTO_AES_ICM:
1958 		memcpy(cdesc->control_data.token, req->iv, AES_BLOCK_LEN);
1959 		if (csp->csp_auth_alg != 0)
1960 			safexcel_instr_eta(req, instr, cdesc);
1961 		else
1962 			safexcel_instr_cipher(req, instr, cdesc);
1963 		break;
1964 	default:
1965 		switch (csp->csp_auth_alg) {
1966 		case CRYPTO_SHA1:
1967 		case CRYPTO_SHA1_HMAC:
1968 		case CRYPTO_SHA2_224:
1969 		case CRYPTO_SHA2_224_HMAC:
1970 		case CRYPTO_SHA2_256:
1971 		case CRYPTO_SHA2_256_HMAC:
1972 		case CRYPTO_SHA2_384:
1973 		case CRYPTO_SHA2_384_HMAC:
1974 		case CRYPTO_SHA2_512:
1975 		case CRYPTO_SHA2_512_HMAC:
1976 			safexcel_instr_sha_hash(req, instr);
1977 			break;
1978 		case CRYPTO_AES_NIST_GMAC:
1979 			safexcel_instr_gmac(req, instr, cdesc);
1980 			break;
1981 		default:
1982 			panic("unhandled auth request %d", csp->csp_auth_alg);
1983 		}
1984 		break;
1985 	}
1986 }
1987 
1988 static struct safexcel_res_descr *
1989 safexcel_res_descr_add(struct safexcel_ring *ring, bool first, bool last,
1990     bus_addr_t data, uint32_t len)
1991 {
1992 	struct safexcel_res_descr *rdesc;
1993 	struct safexcel_res_descr_ring *rring;
1994 
1995 	mtx_assert(&ring->mtx, MA_OWNED);
1996 
1997 	rring = &ring->rdr;
1998 	if ((rring->write + 1) % SAFEXCEL_RING_SIZE == rring->read)
1999 		return (NULL);
2000 
2001 	rdesc = &rring->desc[rring->write];
2002 	rring->write = (rring->write + 1) % SAFEXCEL_RING_SIZE;
2003 
2004 	rdesc->particle_size = len;
2005 	rdesc->rsvd0 = 0;
2006 	rdesc->descriptor_overflow = 0;
2007 	rdesc->buffer_overflow = 0;
2008 	rdesc->last_seg = last;
2009 	rdesc->first_seg = first;
2010 	rdesc->result_size =
2011 	    sizeof(struct safexcel_res_data) / sizeof(uint32_t);
2012 	rdesc->rsvd1 = 0;
2013 	rdesc->data_lo = SAFEXCEL_ADDR_LO(data);
2014 	rdesc->data_hi = SAFEXCEL_ADDR_HI(data);
2015 
2016 	if (first) {
2017 		rdesc->result_data.packet_length = 0;
2018 		rdesc->result_data.error_code = 0;
2019 	}
2020 
2021 	return (rdesc);
2022 }
2023 
2024 static struct safexcel_cmd_descr *
2025 safexcel_cmd_descr_add(struct safexcel_ring *ring, bool first, bool last,
2026     bus_addr_t data, uint32_t seglen, uint32_t reqlen, bus_addr_t context)
2027 {
2028 	struct safexcel_cmd_descr *cdesc;
2029 	struct safexcel_cmd_descr_ring *cring;
2030 
2031 	KASSERT(reqlen <= SAFEXCEL_MAX_REQUEST_SIZE,
2032 	    ("%s: request length %u too long", __func__, reqlen));
2033 	mtx_assert(&ring->mtx, MA_OWNED);
2034 
2035 	cring = &ring->cdr;
2036 	if ((cring->write + 1) % SAFEXCEL_RING_SIZE == cring->read)
2037 		return (NULL);
2038 
2039 	cdesc = &cring->desc[cring->write];
2040 	cring->write = (cring->write + 1) % SAFEXCEL_RING_SIZE;
2041 
2042 	cdesc->particle_size = seglen;
2043 	cdesc->rsvd0 = 0;
2044 	cdesc->last_seg = last;
2045 	cdesc->first_seg = first;
2046 	cdesc->additional_cdata_size = 0;
2047 	cdesc->rsvd1 = 0;
2048 	cdesc->data_lo = SAFEXCEL_ADDR_LO(data);
2049 	cdesc->data_hi = SAFEXCEL_ADDR_HI(data);
2050 	if (first) {
2051 		cdesc->control_data.packet_length = reqlen;
2052 		cdesc->control_data.options = SAFEXCEL_OPTION_IP |
2053 		    SAFEXCEL_OPTION_CP | SAFEXCEL_OPTION_CTX_CTRL_IN_CMD |
2054 		    SAFEXCEL_OPTION_RC_AUTO;
2055 		cdesc->control_data.type = SAFEXCEL_TOKEN_TYPE_BYPASS;
2056 		cdesc->control_data.context_lo = SAFEXCEL_ADDR_LO(context) |
2057 		    SAFEXCEL_CONTEXT_SMALL;
2058 		cdesc->control_data.context_hi = SAFEXCEL_ADDR_HI(context);
2059 	}
2060 
2061 	return (cdesc);
2062 }
2063 
2064 static void
2065 safexcel_cmd_descr_rollback(struct safexcel_ring *ring, int count)
2066 {
2067 	struct safexcel_cmd_descr_ring *cring;
2068 
2069 	mtx_assert(&ring->mtx, MA_OWNED);
2070 
2071 	cring = &ring->cdr;
2072 	cring->write -= count;
2073 	if (cring->write < 0)
2074 		cring->write += SAFEXCEL_RING_SIZE;
2075 }
2076 
2077 static void
2078 safexcel_res_descr_rollback(struct safexcel_ring *ring, int count)
2079 {
2080 	struct safexcel_res_descr_ring *rring;
2081 
2082 	mtx_assert(&ring->mtx, MA_OWNED);
2083 
2084 	rring = &ring->rdr;
2085 	rring->write -= count;
2086 	if (rring->write < 0)
2087 		rring->write += SAFEXCEL_RING_SIZE;
2088 }
2089 
2090 static void
2091 safexcel_append_segs(bus_dma_segment_t *segs, int nseg, struct sglist *sg,
2092     int start, int len)
2093 {
2094 	bus_dma_segment_t *seg;
2095 	size_t seglen;
2096 	int error, i;
2097 
2098 	for (i = 0; i < nseg && len > 0; i++) {
2099 		seg = &segs[i];
2100 
2101 		if (seg->ds_len <= start) {
2102 			start -= seg->ds_len;
2103 			continue;
2104 		}
2105 
2106 		seglen = MIN(len, seg->ds_len - start);
2107 		error = sglist_append_phys(sg, seg->ds_addr + start, seglen);
2108 		if (error != 0)
2109 			panic("%s: ran out of segments: %d", __func__, error);
2110 		len -= seglen;
2111 		start = 0;
2112 	}
2113 }
2114 
2115 static void
2116 safexcel_create_chain_cb(void *arg, bus_dma_segment_t *segs, int nseg,
2117     int error)
2118 {
2119 	const struct crypto_session_params *csp;
2120 	struct cryptop *crp;
2121 	struct safexcel_cmd_descr *cdesc;
2122 	struct safexcel_request *req;
2123 	struct safexcel_ring *ring;
2124 	struct safexcel_session *sess;
2125 	struct sglist *sg;
2126 	size_t inlen;
2127 	int i;
2128 	bool first, last;
2129 
2130 	req = arg;
2131 	if (error != 0) {
2132 		req->error = error;
2133 		return;
2134 	}
2135 
2136 	crp = req->crp;
2137 	csp = crypto_get_params(crp->crp_session);
2138 	sess = req->sess;
2139 	ring = &req->sc->sc_ring[req->ringidx];
2140 
2141 	mtx_assert(&ring->mtx, MA_OWNED);
2142 
2143 	/*
2144 	 * Set up descriptors for input and output data.
2145 	 *
2146 	 * The processing engine programs require that any AAD comes first,
2147 	 * followed by the cipher plaintext, followed by the digest.  Some
2148 	 * consumers place the digest first in the input buffer, in which case
2149 	 * we have to create an extra descriptor.
2150 	 *
2151 	 * As an optimization, unmodified data is not passed to the output
2152 	 * stream.
2153 	 */
2154 	sglist_reset(ring->cmd_data);
2155 	sglist_reset(ring->res_data);
2156 	if (crp->crp_aad_length != 0) {
2157 		safexcel_append_segs(segs, nseg, ring->cmd_data,
2158 		    crp->crp_aad_start, crp->crp_aad_length);
2159 	}
2160 	safexcel_append_segs(segs, nseg, ring->cmd_data,
2161 	    crp->crp_payload_start, crp->crp_payload_length);
2162 	if (csp->csp_cipher_alg != 0) {
2163 		safexcel_append_segs(segs, nseg, ring->res_data,
2164 		    crp->crp_payload_start, crp->crp_payload_length);
2165 	}
2166 	if (sess->digestlen > 0) {
2167 		if ((crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) != 0) {
2168 			safexcel_append_segs(segs, nseg, ring->cmd_data,
2169 			    crp->crp_digest_start, sess->digestlen);
2170 		} else {
2171 			safexcel_append_segs(segs, nseg, ring->res_data,
2172 			    crp->crp_digest_start, sess->digestlen);
2173 		}
2174 	}
2175 
2176 	sg = ring->cmd_data;
2177 	if (sg->sg_nseg == 0) {
2178 		/*
2179 		 * Fake a segment for the command descriptor if the input has
2180 		 * length zero.  The EIP97 apparently does not handle
2181 		 * zero-length packets properly since subsequent requests return
2182 		 * bogus errors, so provide a dummy segment using the context
2183 		 * descriptor.  Also, we must allocate at least one command ring
2184 		 * entry per request to keep the request shadow ring in sync.
2185 		 */
2186 		(void)sglist_append_phys(sg, req->ctx.paddr, 1);
2187 	}
2188 	for (i = 0, inlen = 0; i < sg->sg_nseg; i++)
2189 		inlen += sg->sg_segs[i].ss_len;
2190 	for (i = 0; i < sg->sg_nseg; i++) {
2191 		first = i == 0;
2192 		last = i == sg->sg_nseg - 1;
2193 
2194 		cdesc = safexcel_cmd_descr_add(ring, first, last,
2195 		    sg->sg_segs[i].ss_paddr, sg->sg_segs[i].ss_len,
2196 		    (uint32_t)inlen, req->ctx.paddr);
2197 		if (cdesc == NULL) {
2198 			safexcel_cmd_descr_rollback(ring, i);
2199 			counter_u64_add(req->sc->sc_cdesc_alloc_failures, 1);
2200 			req->error = ERESTART;
2201 			return;
2202 		}
2203 		if (i == 0)
2204 			req->cdesc = cdesc;
2205 	}
2206 	req->cdescs = sg->sg_nseg;
2207 
2208 	sg = ring->res_data;
2209 	if (sg->sg_nseg == 0) {
2210 		/*
2211 		 * We need a result descriptor even if the output stream will be
2212 		 * empty, for example when verifying an AAD digest.
2213 		 */
2214 		sg->sg_segs[0].ss_paddr = 0;
2215 		sg->sg_segs[0].ss_len = 0;
2216 		sg->sg_nseg = 1;
2217 	}
2218 	for (i = 0; i < sg->sg_nseg; i++) {
2219 		first = i == 0;
2220 		last = i == sg->sg_nseg - 1;
2221 
2222 		if (safexcel_res_descr_add(ring, first, last,
2223 		    sg->sg_segs[i].ss_paddr, sg->sg_segs[i].ss_len) == NULL) {
2224 			safexcel_cmd_descr_rollback(ring,
2225 			    ring->cmd_data->sg_nseg);
2226 			safexcel_res_descr_rollback(ring, i);
2227 			counter_u64_add(req->sc->sc_rdesc_alloc_failures, 1);
2228 			req->error = ERESTART;
2229 			return;
2230 		}
2231 	}
2232 	req->rdescs = sg->sg_nseg;
2233 }
2234 
2235 static int
2236 safexcel_create_chain(struct safexcel_ring *ring, struct safexcel_request *req)
2237 {
2238 	int error;
2239 
2240 	req->error = 0;
2241 	req->cdescs = req->rdescs = 0;
2242 
2243 	error = bus_dmamap_load_crp(ring->data_dtag, req->dmap, req->crp,
2244 	    safexcel_create_chain_cb, req, BUS_DMA_NOWAIT);
2245 	if (error == 0)
2246 		req->dmap_loaded = true;
2247 
2248 	if (req->error != 0)
2249 		error = req->error;
2250 
2251 	return (error);
2252 }
2253 
2254 static bool
2255 safexcel_probe_cipher(const struct crypto_session_params *csp)
2256 {
2257 	switch (csp->csp_cipher_alg) {
2258 	case CRYPTO_AES_CBC:
2259 	case CRYPTO_AES_ICM:
2260 		if (csp->csp_ivlen != AES_BLOCK_LEN)
2261 			return (false);
2262 		break;
2263 	case CRYPTO_AES_XTS:
2264 		if (csp->csp_ivlen != AES_XTS_IV_LEN)
2265 			return (false);
2266 		break;
2267 	default:
2268 		return (false);
2269 	}
2270 
2271 	return (true);
2272 }
2273 
2274 /*
2275  * Determine whether the driver can implement a session with the requested
2276  * parameters.
2277  */
2278 static int
2279 safexcel_probesession(device_t dev, const struct crypto_session_params *csp)
2280 {
2281 	if (csp->csp_flags != 0)
2282 		return (EINVAL);
2283 
2284 	switch (csp->csp_mode) {
2285 	case CSP_MODE_CIPHER:
2286 		if (!safexcel_probe_cipher(csp))
2287 			return (EINVAL);
2288 		break;
2289 	case CSP_MODE_DIGEST:
2290 		switch (csp->csp_auth_alg) {
2291 		case CRYPTO_AES_NIST_GMAC:
2292 			if (csp->csp_ivlen != AES_GCM_IV_LEN)
2293 				return (EINVAL);
2294 			break;
2295 		case CRYPTO_SHA1:
2296 		case CRYPTO_SHA1_HMAC:
2297 		case CRYPTO_SHA2_224:
2298 		case CRYPTO_SHA2_224_HMAC:
2299 		case CRYPTO_SHA2_256:
2300 		case CRYPTO_SHA2_256_HMAC:
2301 		case CRYPTO_SHA2_384:
2302 		case CRYPTO_SHA2_384_HMAC:
2303 		case CRYPTO_SHA2_512:
2304 		case CRYPTO_SHA2_512_HMAC:
2305 			break;
2306 		default:
2307 			return (EINVAL);
2308 		}
2309 		break;
2310 	case CSP_MODE_AEAD:
2311 		switch (csp->csp_cipher_alg) {
2312 		case CRYPTO_AES_NIST_GCM_16:
2313 			if (csp->csp_ivlen != AES_GCM_IV_LEN)
2314 				return (EINVAL);
2315 			break;
2316 		case CRYPTO_AES_CCM_16:
2317 			break;
2318 		default:
2319 			return (EINVAL);
2320 		}
2321 		break;
2322 	case CSP_MODE_ETA:
2323 		if (!safexcel_probe_cipher(csp))
2324 			return (EINVAL);
2325 		switch (csp->csp_cipher_alg) {
2326 		case CRYPTO_AES_CBC:
2327 		case CRYPTO_AES_ICM:
2328 			/*
2329 			 * The EIP-97 does not support combining AES-XTS with
2330 			 * hash operations.
2331 			 */
2332 			if (csp->csp_auth_alg != CRYPTO_SHA1_HMAC &&
2333 			    csp->csp_auth_alg != CRYPTO_SHA2_224_HMAC &&
2334 			    csp->csp_auth_alg != CRYPTO_SHA2_256_HMAC &&
2335 			    csp->csp_auth_alg != CRYPTO_SHA2_384_HMAC &&
2336 			    csp->csp_auth_alg != CRYPTO_SHA2_512_HMAC)
2337 				return (EINVAL);
2338 			break;
2339 		default:
2340 			return (EINVAL);
2341 		}
2342 		break;
2343 	default:
2344 		return (EINVAL);
2345 	}
2346 
2347 	return (CRYPTODEV_PROBE_HARDWARE);
2348 }
2349 
2350 static uint32_t
2351 safexcel_aes_algid(int keylen)
2352 {
2353 	switch (keylen) {
2354 	case 16:
2355 		return (SAFEXCEL_CONTROL0_CRYPTO_ALG_AES128);
2356 	case 24:
2357 		return (SAFEXCEL_CONTROL0_CRYPTO_ALG_AES192);
2358 	case 32:
2359 		return (SAFEXCEL_CONTROL0_CRYPTO_ALG_AES256);
2360 	default:
2361 		panic("invalid AES key length %d", keylen);
2362 	}
2363 }
2364 
2365 static uint32_t
2366 safexcel_aes_ccm_hashid(int keylen)
2367 {
2368 	switch (keylen) {
2369 	case 16:
2370 		return (SAFEXCEL_CONTROL0_HASH_ALG_XCBC128);
2371 	case 24:
2372 		return (SAFEXCEL_CONTROL0_HASH_ALG_XCBC192);
2373 	case 32:
2374 		return (SAFEXCEL_CONTROL0_HASH_ALG_XCBC256);
2375 	default:
2376 		panic("invalid AES key length %d", keylen);
2377 	}
2378 }
2379 
2380 static uint32_t
2381 safexcel_sha_hashid(int alg)
2382 {
2383 	switch (alg) {
2384 	case CRYPTO_SHA1:
2385 	case CRYPTO_SHA1_HMAC:
2386 		return (SAFEXCEL_CONTROL0_HASH_ALG_SHA1);
2387 	case CRYPTO_SHA2_224:
2388 	case CRYPTO_SHA2_224_HMAC:
2389 		return (SAFEXCEL_CONTROL0_HASH_ALG_SHA224);
2390 	case CRYPTO_SHA2_256:
2391 	case CRYPTO_SHA2_256_HMAC:
2392 		return (SAFEXCEL_CONTROL0_HASH_ALG_SHA256);
2393 	case CRYPTO_SHA2_384:
2394 	case CRYPTO_SHA2_384_HMAC:
2395 		return (SAFEXCEL_CONTROL0_HASH_ALG_SHA384);
2396 	case CRYPTO_SHA2_512:
2397 	case CRYPTO_SHA2_512_HMAC:
2398 		return (SAFEXCEL_CONTROL0_HASH_ALG_SHA512);
2399 	default:
2400 		__assert_unreachable();
2401 	}
2402 }
2403 
2404 static int
2405 safexcel_sha_hashlen(int alg)
2406 {
2407 	switch (alg) {
2408 	case CRYPTO_SHA1:
2409 	case CRYPTO_SHA1_HMAC:
2410 		return (SHA1_HASH_LEN);
2411 	case CRYPTO_SHA2_224:
2412 	case CRYPTO_SHA2_224_HMAC:
2413 		return (SHA2_224_HASH_LEN);
2414 	case CRYPTO_SHA2_256:
2415 	case CRYPTO_SHA2_256_HMAC:
2416 		return (SHA2_256_HASH_LEN);
2417 	case CRYPTO_SHA2_384:
2418 	case CRYPTO_SHA2_384_HMAC:
2419 		return (SHA2_384_HASH_LEN);
2420 	case CRYPTO_SHA2_512:
2421 	case CRYPTO_SHA2_512_HMAC:
2422 		return (SHA2_512_HASH_LEN);
2423 	default:
2424 		__assert_unreachable();
2425 	}
2426 }
2427 
2428 static int
2429 safexcel_sha_statelen(int alg)
2430 {
2431 	switch (alg) {
2432 	case CRYPTO_SHA1:
2433 	case CRYPTO_SHA1_HMAC:
2434 		return (SHA1_HASH_LEN);
2435 	case CRYPTO_SHA2_224:
2436 	case CRYPTO_SHA2_224_HMAC:
2437 	case CRYPTO_SHA2_256:
2438 	case CRYPTO_SHA2_256_HMAC:
2439 		return (SHA2_256_HASH_LEN);
2440 	case CRYPTO_SHA2_384:
2441 	case CRYPTO_SHA2_384_HMAC:
2442 	case CRYPTO_SHA2_512:
2443 	case CRYPTO_SHA2_512_HMAC:
2444 		return (SHA2_512_HASH_LEN);
2445 	default:
2446 		__assert_unreachable();
2447 	}
2448 }
2449 
2450 static int
2451 safexcel_newsession(device_t dev, crypto_session_t cses,
2452     const struct crypto_session_params *csp)
2453 {
2454 	struct safexcel_session *sess;
2455 	struct safexcel_softc *sc;
2456 
2457 	sc = device_get_softc(dev);
2458 	sess = crypto_get_driver_session(cses);
2459 	sess->cses = cses;
2460 
2461 	switch (csp->csp_auth_alg) {
2462 	case CRYPTO_SHA1:
2463 	case CRYPTO_SHA2_224:
2464 	case CRYPTO_SHA2_256:
2465 	case CRYPTO_SHA2_384:
2466 	case CRYPTO_SHA2_512:
2467 		sess->digest = SAFEXCEL_CONTROL0_DIGEST_PRECOMPUTED;
2468 		sess->hash = safexcel_sha_hashid(csp->csp_auth_alg);
2469 		sess->digestlen = safexcel_sha_hashlen(csp->csp_auth_alg);
2470 		sess->statelen = safexcel_sha_statelen(csp->csp_auth_alg);
2471 		break;
2472 	case CRYPTO_SHA1_HMAC:
2473 	case CRYPTO_SHA2_224_HMAC:
2474 	case CRYPTO_SHA2_256_HMAC:
2475 	case CRYPTO_SHA2_384_HMAC:
2476 	case CRYPTO_SHA2_512_HMAC:
2477 		sess->digest = SAFEXCEL_CONTROL0_DIGEST_HMAC;
2478 		sess->hash = safexcel_sha_hashid(csp->csp_auth_alg);
2479 		sess->digestlen = safexcel_sha_hashlen(csp->csp_auth_alg);
2480 		sess->statelen = safexcel_sha_statelen(csp->csp_auth_alg);
2481 		break;
2482 	case CRYPTO_AES_NIST_GMAC:
2483 		sess->digest = SAFEXCEL_CONTROL0_DIGEST_GMAC;
2484 		sess->digestlen = GMAC_DIGEST_LEN;
2485 		sess->hash = SAFEXCEL_CONTROL0_HASH_ALG_GHASH;
2486 		sess->alg = safexcel_aes_algid(csp->csp_auth_klen);
2487 		sess->mode = SAFEXCEL_CONTROL1_CRYPTO_MODE_GCM;
2488 		break;
2489 	}
2490 
2491 	switch (csp->csp_cipher_alg) {
2492 	case CRYPTO_AES_NIST_GCM_16:
2493 		sess->digest = SAFEXCEL_CONTROL0_DIGEST_GMAC;
2494 		sess->digestlen = GMAC_DIGEST_LEN;
2495 		sess->hash = SAFEXCEL_CONTROL0_HASH_ALG_GHASH;
2496 		sess->alg = safexcel_aes_algid(csp->csp_cipher_klen);
2497 		sess->mode = SAFEXCEL_CONTROL1_CRYPTO_MODE_GCM;
2498 		break;
2499 	case CRYPTO_AES_CCM_16:
2500 		sess->hash = safexcel_aes_ccm_hashid(csp->csp_cipher_klen);
2501 		sess->digest = SAFEXCEL_CONTROL0_DIGEST_CCM;
2502 		sess->digestlen = CCM_CBC_MAX_DIGEST_LEN;
2503 		sess->alg = safexcel_aes_algid(csp->csp_cipher_klen);
2504 		sess->mode = SAFEXCEL_CONTROL1_CRYPTO_MODE_CCM;
2505 		break;
2506 	case CRYPTO_AES_CBC:
2507 		sess->alg = safexcel_aes_algid(csp->csp_cipher_klen);
2508 		sess->mode = SAFEXCEL_CONTROL1_CRYPTO_MODE_CBC;
2509 		break;
2510 	case CRYPTO_AES_ICM:
2511 		sess->alg = safexcel_aes_algid(csp->csp_cipher_klen);
2512 		sess->mode = SAFEXCEL_CONTROL1_CRYPTO_MODE_CTR;
2513 		break;
2514 	case CRYPTO_AES_XTS:
2515 		sess->alg = safexcel_aes_algid(csp->csp_cipher_klen / 2);
2516 		sess->mode = SAFEXCEL_CONTROL1_CRYPTO_MODE_XTS;
2517 		break;
2518 	}
2519 
2520 	if (csp->csp_auth_mlen != 0)
2521 		sess->digestlen = csp->csp_auth_mlen;
2522 
2523 	sess->encctx.len = safexcel_set_context(&sess->encctx.ctx,
2524 	    CRYPTO_OP_ENCRYPT, csp->csp_cipher_key, csp->csp_auth_key,
2525 	    sess);
2526 	sess->decctx.len = safexcel_set_context(&sess->decctx.ctx,
2527 	    CRYPTO_OP_DECRYPT, csp->csp_cipher_key, csp->csp_auth_key,
2528 	    sess);
2529 
2530 	return (0);
2531 }
2532 
2533 static int
2534 safexcel_process(device_t dev, struct cryptop *crp, int hint)
2535 {
2536 	const struct crypto_session_params *csp;
2537 	struct safexcel_request *req;
2538 	struct safexcel_ring *ring;
2539 	struct safexcel_session *sess;
2540 	struct safexcel_softc *sc;
2541 	int error;
2542 
2543 	sc = device_get_softc(dev);
2544 	sess = crypto_get_driver_session(crp->crp_session);
2545 	csp = crypto_get_params(crp->crp_session);
2546 
2547 	if (__predict_false(crypto_buffer_len(&crp->crp_buf) >
2548 	    SAFEXCEL_MAX_REQUEST_SIZE)) {
2549 		crp->crp_etype = E2BIG;
2550 		crypto_done(crp);
2551 		return (0);
2552 	}
2553 
2554 	ring = &sc->sc_ring[curcpu % sc->sc_config.rings];
2555 	mtx_lock(&ring->mtx);
2556 	req = safexcel_alloc_request(sc, ring);
2557 	if (__predict_false(req == NULL)) {
2558 		ring->blocked = CRYPTO_SYMQ;
2559 		mtx_unlock(&ring->mtx);
2560 		counter_u64_add(sc->sc_req_alloc_failures, 1);
2561 		return (ERESTART);
2562 	}
2563 
2564 	req->crp = crp;
2565 	req->sess = sess;
2566 
2567 	crypto_read_iv(crp, req->iv);
2568 
2569 	error = safexcel_create_chain(ring, req);
2570 	if (__predict_false(error != 0)) {
2571 		safexcel_free_request(ring, req);
2572 		if (error == ERESTART)
2573 			ring->blocked = CRYPTO_SYMQ;
2574 		mtx_unlock(&ring->mtx);
2575 		if (error != ERESTART) {
2576 			crp->crp_etype = error;
2577 			crypto_done(crp);
2578 			return (0);
2579 		} else {
2580 			return (ERESTART);
2581 		}
2582 	}
2583 
2584 	safexcel_set_token(req);
2585 
2586 	bus_dmamap_sync(ring->data_dtag, req->dmap,
2587 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2588 	bus_dmamap_sync(req->ctx.tag, req->ctx.map,
2589 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2590 	bus_dmamap_sync(ring->cdr.dma.tag, ring->cdr.dma.map,
2591 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2592 	bus_dmamap_sync(ring->dma_atok.tag, ring->dma_atok.map,
2593 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2594 	bus_dmamap_sync(ring->rdr.dma.tag, ring->rdr.dma.map,
2595 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2596 
2597 	safexcel_execute(sc, ring, req, hint);
2598 
2599 	mtx_unlock(&ring->mtx);
2600 
2601 	return (0);
2602 }
2603 
2604 static device_method_t safexcel_methods[] = {
2605 	/* Device interface */
2606 	DEVMETHOD(device_probe,		safexcel_probe),
2607 	DEVMETHOD(device_attach,	safexcel_attach),
2608 	DEVMETHOD(device_detach,	safexcel_detach),
2609 
2610 	/* Cryptodev interface */
2611 	DEVMETHOD(cryptodev_probesession, safexcel_probesession),
2612 	DEVMETHOD(cryptodev_newsession,	safexcel_newsession),
2613 	DEVMETHOD(cryptodev_process,	safexcel_process),
2614 
2615 	DEVMETHOD_END
2616 };
2617 
2618 static devclass_t safexcel_devclass;
2619 
2620 static driver_t safexcel_driver = {
2621 	.name 		= "safexcel",
2622 	.methods 	= safexcel_methods,
2623 	.size		= sizeof(struct safexcel_softc),
2624 };
2625 
2626 DRIVER_MODULE(safexcel, simplebus, safexcel_driver, safexcel_devclass, 0, 0);
2627 MODULE_VERSION(safexcel, 1);
2628 MODULE_DEPEND(safexcel, crypto, 1, 1, 1);
2629