xref: /freebsd/sys/dev/sec/sec.c (revision 732a02b4e77866604a120a275c082bb6221bd2ff)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (C) 2008-2009 Semihalf, Piotr Ziecik
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
19  * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
20  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
21  * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
22  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
23  * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
24  * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
25  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26  */
27 
28 /*
29  * Freescale integrated Security Engine (SEC) driver. Currently SEC 2.0 and
30  * 3.0 are supported.
31  */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/bus.h>
39 #include <sys/endian.h>
40 #include <sys/kernel.h>
41 #include <sys/lock.h>
42 #include <sys/malloc.h>
43 #include <sys/mbuf.h>
44 #include <sys/module.h>
45 #include <sys/mutex.h>
46 #include <sys/random.h>
47 #include <sys/rman.h>
48 
49 #include <machine/_inttypes.h>
50 #include <machine/bus.h>
51 #include <machine/resource.h>
52 
53 #include <opencrypto/cryptodev.h>
54 #include <opencrypto/xform_auth.h>
55 #include "cryptodev_if.h"
56 
57 #include <dev/ofw/ofw_bus_subr.h>
58 #include <dev/sec/sec.h>
59 
60 static int	sec_probe(device_t dev);
61 static int	sec_attach(device_t dev);
62 static int	sec_detach(device_t dev);
63 static int	sec_suspend(device_t dev);
64 static int	sec_resume(device_t dev);
65 static int	sec_shutdown(device_t dev);
66 static void	sec_primary_intr(void *arg);
67 static void	sec_secondary_intr(void *arg);
68 static int	sec_setup_intr(struct sec_softc *sc, struct resource **ires,
69     void **ihand, int *irid, driver_intr_t handler, const char *iname);
70 static void	sec_release_intr(struct sec_softc *sc, struct resource *ires,
71     void *ihand, int irid, const char *iname);
72 static int	sec_controller_reset(struct sec_softc *sc);
73 static int	sec_channel_reset(struct sec_softc *sc, int channel, int full);
74 static int	sec_init(struct sec_softc *sc);
75 static int	sec_alloc_dma_mem(struct sec_softc *sc,
76     struct sec_dma_mem *dma_mem, bus_size_t size);
77 static int	sec_desc_map_dma(struct sec_softc *sc,
78     struct sec_dma_mem *dma_mem, struct cryptop *crp, bus_size_t size,
79     struct sec_desc_map_info *sdmi);
80 static void	sec_free_dma_mem(struct sec_dma_mem *dma_mem);
81 static void	sec_enqueue(struct sec_softc *sc);
82 static int	sec_enqueue_desc(struct sec_softc *sc, struct sec_desc *desc,
83     int channel);
84 static int	sec_eu_channel(struct sec_softc *sc, int eu);
85 static int	sec_make_pointer(struct sec_softc *sc, struct sec_desc *desc,
86     u_int n, struct cryptop *crp, bus_size_t doffset, bus_size_t dsize);
87 static int	sec_make_pointer_direct(struct sec_softc *sc,
88     struct sec_desc *desc, u_int n, bus_addr_t data, bus_size_t dsize);
89 static int	sec_probesession(device_t dev,
90     const struct crypto_session_params *csp);
91 static int	sec_newsession(device_t dev, crypto_session_t cses,
92     const struct crypto_session_params *csp);
93 static int	sec_process(device_t dev, struct cryptop *crp, int hint);
94 static int	sec_build_common_ns_desc(struct sec_softc *sc,
95     struct sec_desc *desc, const struct crypto_session_params *csp,
96     struct cryptop *crp);
97 static int	sec_build_common_s_desc(struct sec_softc *sc,
98     struct sec_desc *desc, const struct crypto_session_params *csp,
99     struct cryptop *crp);
100 
101 static struct sec_desc *sec_find_desc(struct sec_softc *sc, bus_addr_t paddr);
102 
103 /* AESU */
104 static bool	sec_aesu_newsession(const struct crypto_session_params *csp);
105 static int	sec_aesu_make_desc(struct sec_softc *sc,
106     const struct crypto_session_params *csp, struct sec_desc *desc,
107     struct cryptop *crp);
108 
109 /* DEU */
110 static bool	sec_deu_newsession(const struct crypto_session_params *csp);
111 static int	sec_deu_make_desc(struct sec_softc *sc,
112     const struct crypto_session_params *csp, struct sec_desc *desc,
113     struct cryptop *crp);
114 
115 /* MDEU */
116 static bool	sec_mdeu_can_handle(u_int alg);
117 static int	sec_mdeu_config(const struct crypto_session_params *csp,
118     u_int *eu, u_int *mode, u_int *hashlen);
119 static bool	sec_mdeu_newsession(const struct crypto_session_params *csp);
120 static int	sec_mdeu_make_desc(struct sec_softc *sc,
121     const struct crypto_session_params *csp, struct sec_desc *desc,
122     struct cryptop *crp);
123 
124 static device_method_t sec_methods[] = {
125 	/* Device interface */
126 	DEVMETHOD(device_probe,		sec_probe),
127 	DEVMETHOD(device_attach,	sec_attach),
128 	DEVMETHOD(device_detach,	sec_detach),
129 
130 	DEVMETHOD(device_suspend,	sec_suspend),
131 	DEVMETHOD(device_resume,	sec_resume),
132 	DEVMETHOD(device_shutdown,	sec_shutdown),
133 
134 	/* Crypto methods */
135 	DEVMETHOD(cryptodev_probesession, sec_probesession),
136 	DEVMETHOD(cryptodev_newsession,	sec_newsession),
137 	DEVMETHOD(cryptodev_process,	sec_process),
138 
139 	DEVMETHOD_END
140 };
141 static driver_t sec_driver = {
142 	"sec",
143 	sec_methods,
144 	sizeof(struct sec_softc),
145 };
146 
147 static devclass_t sec_devclass;
148 DRIVER_MODULE(sec, simplebus, sec_driver, sec_devclass, 0, 0);
149 MODULE_DEPEND(sec, crypto, 1, 1, 1);
150 
151 static struct sec_eu_methods sec_eus[] = {
152 	{
153 		sec_aesu_newsession,
154 		sec_aesu_make_desc,
155 	},
156 	{
157 		sec_deu_newsession,
158 		sec_deu_make_desc,
159 	},
160 	{
161 		sec_mdeu_newsession,
162 		sec_mdeu_make_desc,
163 	},
164 	{ NULL, NULL }
165 };
166 
167 static inline void
168 sec_sync_dma_mem(struct sec_dma_mem *dma_mem, bus_dmasync_op_t op)
169 {
170 
171 	/* Sync only if dma memory is valid */
172 	if (dma_mem->dma_vaddr != NULL)
173 		bus_dmamap_sync(dma_mem->dma_tag, dma_mem->dma_map, op);
174 }
175 
176 static inline void *
177 sec_get_pointer_data(struct sec_desc *desc, u_int n)
178 {
179 
180 	return (desc->sd_ptr_dmem[n].dma_vaddr);
181 }
182 
183 static int
184 sec_probe(device_t dev)
185 {
186 	struct sec_softc *sc;
187 	uint64_t id;
188 
189 	if (!ofw_bus_status_okay(dev))
190 		return (ENXIO);
191 
192 	if (!ofw_bus_is_compatible(dev, "fsl,sec2.0"))
193 		return (ENXIO);
194 
195 	sc = device_get_softc(dev);
196 
197 	sc->sc_rrid = 0;
198 	sc->sc_rres = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->sc_rrid,
199 	    RF_ACTIVE);
200 
201 	if (sc->sc_rres == NULL)
202 		return (ENXIO);
203 
204 	sc->sc_bas.bsh = rman_get_bushandle(sc->sc_rres);
205 	sc->sc_bas.bst = rman_get_bustag(sc->sc_rres);
206 
207 	id = SEC_READ(sc, SEC_ID);
208 
209 	bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_rrid, sc->sc_rres);
210 
211 	switch (id) {
212 	case SEC_20_ID:
213 		device_set_desc(dev, "Freescale Security Engine 2.0");
214 		sc->sc_version = 2;
215 		break;
216 	case SEC_30_ID:
217 		device_set_desc(dev, "Freescale Security Engine 3.0");
218 		sc->sc_version = 3;
219 		break;
220 	case SEC_31_ID:
221 		device_set_desc(dev, "Freescale Security Engine 3.1");
222 		sc->sc_version = 3;
223 		break;
224 	default:
225 		device_printf(dev, "unknown SEC ID 0x%016"PRIx64"!\n", id);
226 		return (ENXIO);
227 	}
228 
229 	return (0);
230 }
231 
232 static int
233 sec_attach(device_t dev)
234 {
235 	struct sec_softc *sc;
236 	struct sec_hw_lt *lt;
237 	int error = 0;
238 	int i;
239 
240 	sc = device_get_softc(dev);
241 	sc->sc_dev = dev;
242 	sc->sc_blocked = 0;
243 	sc->sc_shutdown = 0;
244 
245 	sc->sc_cid = crypto_get_driverid(dev, sizeof(struct sec_session),
246 	    CRYPTOCAP_F_HARDWARE);
247 	if (sc->sc_cid < 0) {
248 		device_printf(dev, "could not get crypto driver ID!\n");
249 		return (ENXIO);
250 	}
251 
252 	/* Init locks */
253 	mtx_init(&sc->sc_controller_lock, device_get_nameunit(dev),
254 	    "SEC Controller lock", MTX_DEF);
255 	mtx_init(&sc->sc_descriptors_lock, device_get_nameunit(dev),
256 	    "SEC Descriptors lock", MTX_DEF);
257 
258 	/* Allocate I/O memory for SEC registers */
259 	sc->sc_rrid = 0;
260 	sc->sc_rres = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->sc_rrid,
261 	    RF_ACTIVE);
262 
263 	if (sc->sc_rres == NULL) {
264 		device_printf(dev, "could not allocate I/O memory!\n");
265 		goto fail1;
266 	}
267 
268 	sc->sc_bas.bsh = rman_get_bushandle(sc->sc_rres);
269 	sc->sc_bas.bst = rman_get_bustag(sc->sc_rres);
270 
271 	/* Setup interrupts */
272 	sc->sc_pri_irid = 0;
273 	error = sec_setup_intr(sc, &sc->sc_pri_ires, &sc->sc_pri_ihand,
274 	    &sc->sc_pri_irid, sec_primary_intr, "primary");
275 
276 	if (error)
277 		goto fail2;
278 
279 
280 	if (sc->sc_version == 3) {
281 		sc->sc_sec_irid = 1;
282 		error = sec_setup_intr(sc, &sc->sc_sec_ires, &sc->sc_sec_ihand,
283 		    &sc->sc_sec_irid, sec_secondary_intr, "secondary");
284 
285 		if (error)
286 			goto fail3;
287 	}
288 
289 	/* Alloc DMA memory for descriptors and link tables */
290 	error = sec_alloc_dma_mem(sc, &(sc->sc_desc_dmem),
291 	    SEC_DESCRIPTORS * sizeof(struct sec_hw_desc));
292 
293 	if (error)
294 		goto fail4;
295 
296 	error = sec_alloc_dma_mem(sc, &(sc->sc_lt_dmem),
297 	    (SEC_LT_ENTRIES + 1) * sizeof(struct sec_hw_lt));
298 
299 	if (error)
300 		goto fail5;
301 
302 	/* Fill in descriptors and link tables */
303 	for (i = 0; i < SEC_DESCRIPTORS; i++) {
304 		sc->sc_desc[i].sd_desc =
305 		    (struct sec_hw_desc*)(sc->sc_desc_dmem.dma_vaddr) + i;
306 		sc->sc_desc[i].sd_desc_paddr = sc->sc_desc_dmem.dma_paddr +
307 		    (i * sizeof(struct sec_hw_desc));
308 	}
309 
310 	for (i = 0; i < SEC_LT_ENTRIES + 1; i++) {
311 		sc->sc_lt[i].sl_lt =
312 		    (struct sec_hw_lt*)(sc->sc_lt_dmem.dma_vaddr) + i;
313 		sc->sc_lt[i].sl_lt_paddr = sc->sc_lt_dmem.dma_paddr +
314 		    (i * sizeof(struct sec_hw_lt));
315 	}
316 
317 	/* Last entry in link table is used to create a circle */
318 	lt = sc->sc_lt[SEC_LT_ENTRIES].sl_lt;
319 	lt->shl_length = 0;
320 	lt->shl_r = 0;
321 	lt->shl_n = 1;
322 	lt->shl_ptr = sc->sc_lt[0].sl_lt_paddr;
323 
324 	/* Init descriptor and link table queues pointers */
325 	SEC_CNT_INIT(sc, sc_free_desc_get_cnt, SEC_DESCRIPTORS);
326 	SEC_CNT_INIT(sc, sc_free_desc_put_cnt, SEC_DESCRIPTORS);
327 	SEC_CNT_INIT(sc, sc_ready_desc_get_cnt, SEC_DESCRIPTORS);
328 	SEC_CNT_INIT(sc, sc_ready_desc_put_cnt, SEC_DESCRIPTORS);
329 	SEC_CNT_INIT(sc, sc_queued_desc_get_cnt, SEC_DESCRIPTORS);
330 	SEC_CNT_INIT(sc, sc_queued_desc_put_cnt, SEC_DESCRIPTORS);
331 	SEC_CNT_INIT(sc, sc_lt_alloc_cnt, SEC_LT_ENTRIES);
332 	SEC_CNT_INIT(sc, sc_lt_free_cnt, SEC_LT_ENTRIES);
333 
334 	/* Create masks for fast checks */
335 	sc->sc_int_error_mask = 0;
336 	for (i = 0; i < SEC_CHANNELS; i++)
337 		sc->sc_int_error_mask |= (~0ULL & SEC_INT_CH_ERR(i));
338 
339 	switch (sc->sc_version) {
340 	case 2:
341 		sc->sc_channel_idle_mask =
342 		    (SEC_CHAN_CSR2_FFLVL_M << SEC_CHAN_CSR2_FFLVL_S) |
343 		    (SEC_CHAN_CSR2_MSTATE_M << SEC_CHAN_CSR2_MSTATE_S) |
344 		    (SEC_CHAN_CSR2_PSTATE_M << SEC_CHAN_CSR2_PSTATE_S) |
345 		    (SEC_CHAN_CSR2_GSTATE_M << SEC_CHAN_CSR2_GSTATE_S);
346 		break;
347 	case 3:
348 		sc->sc_channel_idle_mask =
349 		    (SEC_CHAN_CSR3_FFLVL_M << SEC_CHAN_CSR3_FFLVL_S) |
350 		    (SEC_CHAN_CSR3_MSTATE_M << SEC_CHAN_CSR3_MSTATE_S) |
351 		    (SEC_CHAN_CSR3_PSTATE_M << SEC_CHAN_CSR3_PSTATE_S) |
352 		    (SEC_CHAN_CSR3_GSTATE_M << SEC_CHAN_CSR3_GSTATE_S);
353 		break;
354 	}
355 
356 	/* Init hardware */
357 	error = sec_init(sc);
358 
359 	if (error)
360 		goto fail6;
361 
362 	return (0);
363 
364 fail6:
365 	sec_free_dma_mem(&(sc->sc_lt_dmem));
366 fail5:
367 	sec_free_dma_mem(&(sc->sc_desc_dmem));
368 fail4:
369 	sec_release_intr(sc, sc->sc_sec_ires, sc->sc_sec_ihand,
370 	    sc->sc_sec_irid, "secondary");
371 fail3:
372 	sec_release_intr(sc, sc->sc_pri_ires, sc->sc_pri_ihand,
373 	    sc->sc_pri_irid, "primary");
374 fail2:
375 	bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_rrid, sc->sc_rres);
376 fail1:
377 	mtx_destroy(&sc->sc_controller_lock);
378 	mtx_destroy(&sc->sc_descriptors_lock);
379 
380 	return (ENXIO);
381 }
382 
383 static int
384 sec_detach(device_t dev)
385 {
386 	struct sec_softc *sc = device_get_softc(dev);
387 	int i, error, timeout = SEC_TIMEOUT;
388 
389 	/* Prepare driver to shutdown */
390 	SEC_LOCK(sc, descriptors);
391 	sc->sc_shutdown = 1;
392 	SEC_UNLOCK(sc, descriptors);
393 
394 	/* Wait until all queued processing finishes */
395 	while (1) {
396 		SEC_LOCK(sc, descriptors);
397 		i = SEC_READY_DESC_CNT(sc) + SEC_QUEUED_DESC_CNT(sc);
398 		SEC_UNLOCK(sc, descriptors);
399 
400 		if (i == 0)
401 			break;
402 
403 		if (timeout < 0) {
404 			device_printf(dev, "queue flush timeout!\n");
405 
406 			/* DMA can be still active - stop it */
407 			for (i = 0; i < SEC_CHANNELS; i++)
408 				sec_channel_reset(sc, i, 1);
409 
410 			break;
411 		}
412 
413 		timeout -= 1000;
414 		DELAY(1000);
415 	}
416 
417 	/* Disable interrupts */
418 	SEC_WRITE(sc, SEC_IER, 0);
419 
420 	/* Unregister from OCF */
421 	crypto_unregister_all(sc->sc_cid);
422 
423 	/* Free DMA memory */
424 	for (i = 0; i < SEC_DESCRIPTORS; i++)
425 		SEC_DESC_FREE_POINTERS(&(sc->sc_desc[i]));
426 
427 	sec_free_dma_mem(&(sc->sc_lt_dmem));
428 	sec_free_dma_mem(&(sc->sc_desc_dmem));
429 
430 	/* Release interrupts */
431 	sec_release_intr(sc, sc->sc_pri_ires, sc->sc_pri_ihand,
432 	    sc->sc_pri_irid, "primary");
433 	sec_release_intr(sc, sc->sc_sec_ires, sc->sc_sec_ihand,
434 	    sc->sc_sec_irid, "secondary");
435 
436 	/* Release memory */
437 	if (sc->sc_rres) {
438 		error = bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_rrid,
439 		    sc->sc_rres);
440 		if (error)
441 			device_printf(dev, "bus_release_resource() failed for"
442 			    " I/O memory, error %d\n", error);
443 
444 		sc->sc_rres = NULL;
445 	}
446 
447 	mtx_destroy(&sc->sc_controller_lock);
448 	mtx_destroy(&sc->sc_descriptors_lock);
449 
450 	return (0);
451 }
452 
453 static int
454 sec_suspend(device_t dev)
455 {
456 
457 	return (0);
458 }
459 
460 static int
461 sec_resume(device_t dev)
462 {
463 
464 	return (0);
465 }
466 
467 static int
468 sec_shutdown(device_t dev)
469 {
470 
471 	return (0);
472 }
473 
474 static int
475 sec_setup_intr(struct sec_softc *sc, struct resource **ires, void **ihand,
476     int *irid, driver_intr_t handler, const char *iname)
477 {
478 	int error;
479 
480 	(*ires) = bus_alloc_resource_any(sc->sc_dev, SYS_RES_IRQ, irid,
481 	    RF_ACTIVE);
482 
483 	if ((*ires) == NULL) {
484 		device_printf(sc->sc_dev, "could not allocate %s IRQ\n", iname);
485 		return (ENXIO);
486 	}
487 
488 	error = bus_setup_intr(sc->sc_dev, *ires, INTR_MPSAFE | INTR_TYPE_NET,
489 	    NULL, handler, sc, ihand);
490 
491 	if (error) {
492 		device_printf(sc->sc_dev, "failed to set up %s IRQ\n", iname);
493 		if (bus_release_resource(sc->sc_dev, SYS_RES_IRQ, *irid, *ires))
494 			device_printf(sc->sc_dev, "could not release %s IRQ\n",
495 			    iname);
496 
497 		(*ires) = NULL;
498 		return (error);
499 	}
500 
501 	return (0);
502 }
503 
504 static void
505 sec_release_intr(struct sec_softc *sc, struct resource *ires, void *ihand,
506     int irid, const char *iname)
507 {
508 	int error;
509 
510 	if (ires == NULL)
511 		return;
512 
513 	error = bus_teardown_intr(sc->sc_dev, ires, ihand);
514 	if (error)
515 		device_printf(sc->sc_dev, "bus_teardown_intr() failed for %s"
516 		    " IRQ, error %d\n", iname, error);
517 
518 	error = bus_release_resource(sc->sc_dev, SYS_RES_IRQ, irid, ires);
519 	if (error)
520 		device_printf(sc->sc_dev, "bus_release_resource() failed for %s"
521 		    " IRQ, error %d\n", iname, error);
522 }
523 
524 static void
525 sec_primary_intr(void *arg)
526 {
527 	struct sec_session *ses;
528 	struct sec_softc *sc = arg;
529 	struct sec_desc *desc;
530 	struct cryptop *crp;
531 	uint64_t isr;
532 	uint8_t hash[HASH_MAX_LEN];
533 	int i, wakeup = 0;
534 
535 	SEC_LOCK(sc, controller);
536 
537 	/* Check for errors */
538 	isr = SEC_READ(sc, SEC_ISR);
539 	if (isr & sc->sc_int_error_mask) {
540 		/* Check each channel for error */
541 		for (i = 0; i < SEC_CHANNELS; i++) {
542 			if ((isr & SEC_INT_CH_ERR(i)) == 0)
543 				continue;
544 
545 			device_printf(sc->sc_dev,
546 			    "I/O error on channel %i!\n", i);
547 
548 			/* Find and mark problematic descriptor */
549 			desc = sec_find_desc(sc, SEC_READ(sc,
550 			    SEC_CHAN_CDPR(i)));
551 
552 			if (desc != NULL)
553 				desc->sd_error = EIO;
554 
555 			/* Do partial channel reset */
556 			sec_channel_reset(sc, i, 0);
557 		}
558 	}
559 
560 	/* ACK interrupt */
561 	SEC_WRITE(sc, SEC_ICR, 0xFFFFFFFFFFFFFFFFULL);
562 
563 	SEC_UNLOCK(sc, controller);
564 	SEC_LOCK(sc, descriptors);
565 
566 	/* Handle processed descriptors */
567 	SEC_DESC_SYNC(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
568 
569 	while (SEC_QUEUED_DESC_CNT(sc) > 0) {
570 		desc = SEC_GET_QUEUED_DESC(sc);
571 
572 		if (desc->sd_desc->shd_done != 0xFF && desc->sd_error == 0) {
573 			SEC_PUT_BACK_QUEUED_DESC(sc);
574 			break;
575 		}
576 
577 		SEC_DESC_SYNC_POINTERS(desc, BUS_DMASYNC_PREREAD |
578 		    BUS_DMASYNC_PREWRITE);
579 
580 		crp = desc->sd_crp;
581 		crp->crp_etype = desc->sd_error;
582 		if (crp->crp_etype == 0) {
583 			ses = crypto_get_driver_session(crp->crp_session);
584 			if (ses->ss_mlen != 0) {
585 				if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) {
586 					crypto_copydata(crp,
587 					    crp->crp_digest_start,
588 					    ses->ss_mlen, hash);
589 					if (timingsafe_bcmp(
590 					    desc->sd_desc->shd_digest,
591 					    hash, ses->ss_mlen) != 0)
592 						crp->crp_etype = EBADMSG;
593 				} else
594 					crypto_copyback(crp,
595 					    crp->crp_digest_start,
596 					    ses->ss_mlen,
597 					    desc->sd_desc->shd_digest);
598 			}
599 		}
600 		crypto_done(desc->sd_crp);
601 
602 		SEC_DESC_FREE_POINTERS(desc);
603 		SEC_DESC_FREE_LT(sc, desc);
604 		SEC_DESC_QUEUED2FREE(sc);
605 	}
606 
607 	SEC_DESC_SYNC(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
608 
609 	if (!sc->sc_shutdown) {
610 		wakeup = sc->sc_blocked;
611 		sc->sc_blocked = 0;
612 	}
613 
614 	SEC_UNLOCK(sc, descriptors);
615 
616 	/* Enqueue ready descriptors in hardware */
617 	sec_enqueue(sc);
618 
619 	if (wakeup)
620 		crypto_unblock(sc->sc_cid, wakeup);
621 }
622 
623 static void
624 sec_secondary_intr(void *arg)
625 {
626 	struct sec_softc *sc = arg;
627 
628 	device_printf(sc->sc_dev, "spurious secondary interrupt!\n");
629 	sec_primary_intr(arg);
630 }
631 
632 static int
633 sec_controller_reset(struct sec_softc *sc)
634 {
635 	int timeout = SEC_TIMEOUT;
636 
637 	/* Reset Controller */
638 	SEC_WRITE(sc, SEC_MCR, SEC_MCR_SWR);
639 
640 	while (SEC_READ(sc, SEC_MCR) & SEC_MCR_SWR) {
641 		DELAY(1000);
642 		timeout -= 1000;
643 
644 		if (timeout < 0) {
645 			device_printf(sc->sc_dev, "timeout while waiting for "
646 			    "device reset!\n");
647 			return (ETIMEDOUT);
648 		}
649 	}
650 
651 	return (0);
652 }
653 
654 static int
655 sec_channel_reset(struct sec_softc *sc, int channel, int full)
656 {
657 	int timeout = SEC_TIMEOUT;
658 	uint64_t bit = (full) ? SEC_CHAN_CCR_R : SEC_CHAN_CCR_CON;
659 	uint64_t reg;
660 
661 	/* Reset Channel */
662 	reg = SEC_READ(sc, SEC_CHAN_CCR(channel));
663 	SEC_WRITE(sc, SEC_CHAN_CCR(channel), reg | bit);
664 
665 	while (SEC_READ(sc, SEC_CHAN_CCR(channel)) & bit) {
666 		DELAY(1000);
667 		timeout -= 1000;
668 
669 		if (timeout < 0) {
670 			device_printf(sc->sc_dev, "timeout while waiting for "
671 			    "channel reset!\n");
672 			return (ETIMEDOUT);
673 		}
674 	}
675 
676 	if (full) {
677 		reg = SEC_CHAN_CCR_CDIE | SEC_CHAN_CCR_NT | SEC_CHAN_CCR_BS;
678 
679 		switch(sc->sc_version) {
680 		case 2:
681 			reg |= SEC_CHAN_CCR_CDWE;
682 			break;
683 		case 3:
684 			reg |= SEC_CHAN_CCR_AWSE | SEC_CHAN_CCR_WGN;
685 			break;
686 		}
687 
688 		SEC_WRITE(sc, SEC_CHAN_CCR(channel), reg);
689 	}
690 
691 	return (0);
692 }
693 
694 static int
695 sec_init(struct sec_softc *sc)
696 {
697 	uint64_t reg;
698 	int error, i;
699 
700 	/* Reset controller twice to clear all pending interrupts */
701 	error = sec_controller_reset(sc);
702 	if (error)
703 		return (error);
704 
705 	error = sec_controller_reset(sc);
706 	if (error)
707 		return (error);
708 
709 	/* Reset channels */
710 	for (i = 0; i < SEC_CHANNELS; i++) {
711 		error = sec_channel_reset(sc, i, 1);
712 		if (error)
713 			return (error);
714 	}
715 
716 	/* Enable Interrupts */
717 	reg = SEC_INT_ITO;
718 	for (i = 0; i < SEC_CHANNELS; i++)
719 		reg |= SEC_INT_CH_DN(i) | SEC_INT_CH_ERR(i);
720 
721 	SEC_WRITE(sc, SEC_IER, reg);
722 
723 	return (error);
724 }
725 
726 static void
727 sec_alloc_dma_mem_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
728 {
729 	struct sec_dma_mem *dma_mem = arg;
730 
731 	if (error)
732 		return;
733 
734 	KASSERT(nseg == 1, ("Wrong number of segments, should be 1"));
735 	dma_mem->dma_paddr = segs->ds_addr;
736 }
737 
738 static void
739 sec_dma_map_desc_cb(void *arg, bus_dma_segment_t *segs, int nseg,
740     int error)
741 {
742 	struct sec_desc_map_info *sdmi = arg;
743 	struct sec_softc *sc = sdmi->sdmi_sc;
744 	struct sec_lt *lt = NULL;
745 	bus_addr_t addr;
746 	bus_size_t size;
747 	int i;
748 
749 	SEC_LOCK_ASSERT(sc, descriptors);
750 
751 	if (error)
752 		return;
753 
754 	for (i = 0; i < nseg; i++) {
755 		addr = segs[i].ds_addr;
756 		size = segs[i].ds_len;
757 
758 		/* Skip requested offset */
759 		if (sdmi->sdmi_offset >= size) {
760 			sdmi->sdmi_offset -= size;
761 			continue;
762 		}
763 
764 		addr += sdmi->sdmi_offset;
765 		size -= sdmi->sdmi_offset;
766 		sdmi->sdmi_offset = 0;
767 
768 		/* Do not link more than requested */
769 		if (sdmi->sdmi_size < size)
770 			size = sdmi->sdmi_size;
771 
772 		lt = SEC_ALLOC_LT_ENTRY(sc);
773 		lt->sl_lt->shl_length = size;
774 		lt->sl_lt->shl_r = 0;
775 		lt->sl_lt->shl_n = 0;
776 		lt->sl_lt->shl_ptr = addr;
777 
778 		if (sdmi->sdmi_lt_first == NULL)
779 			sdmi->sdmi_lt_first = lt;
780 
781 		sdmi->sdmi_lt_used += 1;
782 
783 		if ((sdmi->sdmi_size -= size) == 0)
784 			break;
785 	}
786 
787 	sdmi->sdmi_lt_last = lt;
788 }
789 
790 static int
791 sec_alloc_dma_mem(struct sec_softc *sc, struct sec_dma_mem *dma_mem,
792     bus_size_t size)
793 {
794 	int error;
795 
796 	if (dma_mem->dma_vaddr != NULL)
797 		return (EBUSY);
798 
799 	error = bus_dma_tag_create(NULL,	/* parent */
800 		SEC_DMA_ALIGNMENT, 0,		/* alignment, boundary */
801 		BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
802 		BUS_SPACE_MAXADDR,		/* highaddr */
803 		NULL, NULL,			/* filtfunc, filtfuncarg */
804 		size, 1,			/* maxsize, nsegments */
805 		size, 0,			/* maxsegsz, flags */
806 		NULL, NULL,			/* lockfunc, lockfuncarg */
807 		&(dma_mem->dma_tag));		/* dmat */
808 
809 	if (error) {
810 		device_printf(sc->sc_dev, "failed to allocate busdma tag, error"
811 		    " %i!\n", error);
812 		goto err1;
813 	}
814 
815 	error = bus_dmamem_alloc(dma_mem->dma_tag, &(dma_mem->dma_vaddr),
816 	    BUS_DMA_NOWAIT | BUS_DMA_ZERO, &(dma_mem->dma_map));
817 
818 	if (error) {
819 		device_printf(sc->sc_dev, "failed to allocate DMA safe"
820 		    " memory, error %i!\n", error);
821 		goto err2;
822 	}
823 
824 	error = bus_dmamap_load(dma_mem->dma_tag, dma_mem->dma_map,
825 		    dma_mem->dma_vaddr, size, sec_alloc_dma_mem_cb, dma_mem,
826 		    BUS_DMA_NOWAIT);
827 
828 	if (error) {
829 		device_printf(sc->sc_dev, "cannot get address of the DMA"
830 		    " memory, error %i\n", error);
831 		goto err3;
832 	}
833 
834 	dma_mem->dma_is_map = 0;
835 	return (0);
836 
837 err3:
838 	bus_dmamem_free(dma_mem->dma_tag, dma_mem->dma_vaddr, dma_mem->dma_map);
839 err2:
840 	bus_dma_tag_destroy(dma_mem->dma_tag);
841 err1:
842 	dma_mem->dma_vaddr = NULL;
843 	return(error);
844 }
845 
846 static int
847 sec_desc_map_dma(struct sec_softc *sc, struct sec_dma_mem *dma_mem,
848     struct cryptop *crp, bus_size_t size, struct sec_desc_map_info *sdmi)
849 {
850 	int error;
851 
852 	if (dma_mem->dma_vaddr != NULL)
853 		return (EBUSY);
854 
855 	switch (crp->crp_buf_type) {
856 	case CRYPTO_BUF_CONTIG:
857 		break;
858 	case CRYPTO_BUF_UIO:
859 		size = SEC_FREE_LT_CNT(sc) * SEC_MAX_DMA_BLOCK_SIZE;
860 		break;
861 	case CRYPTO_BUF_MBUF:
862 		size = m_length(crp->crp_mbuf, NULL);
863 		break;
864 	default:
865 		return (EINVAL);
866 	}
867 
868 	error = bus_dma_tag_create(NULL,	/* parent */
869 		SEC_DMA_ALIGNMENT, 0,		/* alignment, boundary */
870 		BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
871 		BUS_SPACE_MAXADDR,		/* highaddr */
872 		NULL, NULL,			/* filtfunc, filtfuncarg */
873 		size,				/* maxsize */
874 		SEC_FREE_LT_CNT(sc),		/* nsegments */
875 		SEC_MAX_DMA_BLOCK_SIZE, 0,	/* maxsegsz, flags */
876 		NULL, NULL,			/* lockfunc, lockfuncarg */
877 		&(dma_mem->dma_tag));		/* dmat */
878 
879 	if (error) {
880 		device_printf(sc->sc_dev, "failed to allocate busdma tag, error"
881 		    " %i!\n", error);
882 		dma_mem->dma_vaddr = NULL;
883 		return (error);
884 	}
885 
886 	error = bus_dmamap_create(dma_mem->dma_tag, 0, &(dma_mem->dma_map));
887 
888 	if (error) {
889 		device_printf(sc->sc_dev, "failed to create DMA map, error %i!"
890 		    "\n", error);
891 		bus_dma_tag_destroy(dma_mem->dma_tag);
892 		return (error);
893 	}
894 
895 	error = bus_dmamap_load_crp(dma_mem->dma_tag, dma_mem->dma_map, crp,
896 	    sec_dma_map_desc_cb, sdmi, BUS_DMA_NOWAIT);
897 
898 	if (error) {
899 		device_printf(sc->sc_dev, "cannot get address of the DMA"
900 		    " memory, error %i!\n", error);
901 		bus_dmamap_destroy(dma_mem->dma_tag, dma_mem->dma_map);
902 		bus_dma_tag_destroy(dma_mem->dma_tag);
903 		return (error);
904 	}
905 
906 	dma_mem->dma_is_map = 1;
907 	dma_mem->dma_vaddr = crp;
908 
909 	return (0);
910 }
911 
912 static void
913 sec_free_dma_mem(struct sec_dma_mem *dma_mem)
914 {
915 
916 	/* Check for double free */
917 	if (dma_mem->dma_vaddr == NULL)
918 		return;
919 
920 	bus_dmamap_unload(dma_mem->dma_tag, dma_mem->dma_map);
921 
922 	if (dma_mem->dma_is_map)
923 		bus_dmamap_destroy(dma_mem->dma_tag, dma_mem->dma_map);
924 	else
925 		bus_dmamem_free(dma_mem->dma_tag, dma_mem->dma_vaddr,
926 		    dma_mem->dma_map);
927 
928 	bus_dma_tag_destroy(dma_mem->dma_tag);
929 	dma_mem->dma_vaddr = NULL;
930 }
931 
932 static int
933 sec_eu_channel(struct sec_softc *sc, int eu)
934 {
935 	uint64_t reg;
936 	int channel = 0;
937 
938 	SEC_LOCK_ASSERT(sc, controller);
939 
940 	reg = SEC_READ(sc, SEC_EUASR);
941 
942 	switch (eu) {
943 	case SEC_EU_AFEU:
944 		channel = SEC_EUASR_AFEU(reg);
945 		break;
946 	case SEC_EU_DEU:
947 		channel = SEC_EUASR_DEU(reg);
948 		break;
949 	case SEC_EU_MDEU_A:
950 	case SEC_EU_MDEU_B:
951 		channel = SEC_EUASR_MDEU(reg);
952 		break;
953 	case SEC_EU_RNGU:
954 		channel = SEC_EUASR_RNGU(reg);
955 		break;
956 	case SEC_EU_PKEU:
957 		channel = SEC_EUASR_PKEU(reg);
958 		break;
959 	case SEC_EU_AESU:
960 		channel = SEC_EUASR_AESU(reg);
961 		break;
962 	case SEC_EU_KEU:
963 		channel = SEC_EUASR_KEU(reg);
964 		break;
965 	case SEC_EU_CRCU:
966 		channel = SEC_EUASR_CRCU(reg);
967 		break;
968 	}
969 
970 	return (channel - 1);
971 }
972 
973 static int
974 sec_enqueue_desc(struct sec_softc *sc, struct sec_desc *desc, int channel)
975 {
976 	u_int fflvl = SEC_MAX_FIFO_LEVEL;
977 	uint64_t reg;
978 	int i;
979 
980 	SEC_LOCK_ASSERT(sc, controller);
981 
982 	/* Find free channel if have not got one */
983 	if (channel < 0) {
984 		for (i = 0; i < SEC_CHANNELS; i++) {
985 			reg = SEC_READ(sc, SEC_CHAN_CSR(channel));
986 
987 			if ((reg & sc->sc_channel_idle_mask) == 0) {
988 				channel = i;
989 				break;
990 			}
991 		}
992 	}
993 
994 	/* There is no free channel */
995 	if (channel < 0)
996 		return (-1);
997 
998 	/* Check FIFO level on selected channel */
999 	reg = SEC_READ(sc, SEC_CHAN_CSR(channel));
1000 
1001 	switch(sc->sc_version) {
1002 	case 2:
1003 		fflvl = (reg >> SEC_CHAN_CSR2_FFLVL_S) & SEC_CHAN_CSR2_FFLVL_M;
1004 		break;
1005 	case 3:
1006 		fflvl = (reg >> SEC_CHAN_CSR3_FFLVL_S) & SEC_CHAN_CSR3_FFLVL_M;
1007 		break;
1008 	}
1009 
1010 	if (fflvl >= SEC_MAX_FIFO_LEVEL)
1011 		return (-1);
1012 
1013 	/* Enqueue descriptor in channel */
1014 	SEC_WRITE(sc, SEC_CHAN_FF(channel), desc->sd_desc_paddr);
1015 
1016 	return (channel);
1017 }
1018 
1019 static void
1020 sec_enqueue(struct sec_softc *sc)
1021 {
1022 	struct sec_desc *desc;
1023 	int ch0, ch1;
1024 
1025 	SEC_LOCK(sc, descriptors);
1026 	SEC_LOCK(sc, controller);
1027 
1028 	while (SEC_READY_DESC_CNT(sc) > 0) {
1029 		desc = SEC_GET_READY_DESC(sc);
1030 
1031 		ch0 = sec_eu_channel(sc, desc->sd_desc->shd_eu_sel0);
1032 		ch1 = sec_eu_channel(sc, desc->sd_desc->shd_eu_sel1);
1033 
1034 		/*
1035 		 * Both EU are used by the same channel.
1036 		 * Enqueue descriptor in channel used by busy EUs.
1037 		 */
1038 		if (ch0 >= 0 && ch0 == ch1) {
1039 			if (sec_enqueue_desc(sc, desc, ch0) >= 0) {
1040 				SEC_DESC_READY2QUEUED(sc);
1041 				continue;
1042 			}
1043 		}
1044 
1045 		/*
1046 		 * Only one EU is free.
1047 		 * Enqueue descriptor in channel used by busy EU.
1048 		 */
1049 		if ((ch0 >= 0 && ch1 < 0) || (ch1 >= 0 && ch0 < 0)) {
1050 			if (sec_enqueue_desc(sc, desc, (ch0 >= 0) ? ch0 : ch1)
1051 			    >= 0) {
1052 				SEC_DESC_READY2QUEUED(sc);
1053 				continue;
1054 			}
1055 		}
1056 
1057 		/*
1058 		 * Both EU are free.
1059 		 * Enqueue descriptor in first free channel.
1060 		 */
1061 		if (ch0 < 0 && ch1 < 0) {
1062 			if (sec_enqueue_desc(sc, desc, -1) >= 0) {
1063 				SEC_DESC_READY2QUEUED(sc);
1064 				continue;
1065 			}
1066 		}
1067 
1068 		/* Current descriptor can not be queued at the moment */
1069 		SEC_PUT_BACK_READY_DESC(sc);
1070 		break;
1071 	}
1072 
1073 	SEC_UNLOCK(sc, controller);
1074 	SEC_UNLOCK(sc, descriptors);
1075 }
1076 
1077 static struct sec_desc *
1078 sec_find_desc(struct sec_softc *sc, bus_addr_t paddr)
1079 {
1080 	struct sec_desc *desc = NULL;
1081 	int i;
1082 
1083 	SEC_LOCK_ASSERT(sc, descriptors);
1084 
1085 	for (i = 0; i < SEC_CHANNELS; i++) {
1086 		if (sc->sc_desc[i].sd_desc_paddr == paddr) {
1087 			desc = &(sc->sc_desc[i]);
1088 			break;
1089 		}
1090 	}
1091 
1092 	return (desc);
1093 }
1094 
1095 static int
1096 sec_make_pointer_direct(struct sec_softc *sc, struct sec_desc *desc, u_int n,
1097     bus_addr_t data, bus_size_t dsize)
1098 {
1099 	struct sec_hw_desc_ptr *ptr;
1100 
1101 	SEC_LOCK_ASSERT(sc, descriptors);
1102 
1103 	ptr = &(desc->sd_desc->shd_pointer[n]);
1104 	ptr->shdp_length = dsize;
1105 	ptr->shdp_extent = 0;
1106 	ptr->shdp_j = 0;
1107 	ptr->shdp_ptr = data;
1108 
1109 	return (0);
1110 }
1111 
1112 static int
1113 sec_make_pointer(struct sec_softc *sc, struct sec_desc *desc,
1114     u_int n, struct cryptop *crp, bus_size_t doffset, bus_size_t dsize)
1115 {
1116 	struct sec_desc_map_info sdmi = { sc, dsize, doffset, NULL, NULL, 0 };
1117 	struct sec_hw_desc_ptr *ptr;
1118 	int error;
1119 
1120 	SEC_LOCK_ASSERT(sc, descriptors);
1121 
1122 	error = sec_desc_map_dma(sc, &(desc->sd_ptr_dmem[n]), crp, dsize,
1123 	    &sdmi);
1124 
1125 	if (error)
1126 		return (error);
1127 
1128 	sdmi.sdmi_lt_last->sl_lt->shl_r = 1;
1129 	desc->sd_lt_used += sdmi.sdmi_lt_used;
1130 
1131 	ptr = &(desc->sd_desc->shd_pointer[n]);
1132 	ptr->shdp_length = dsize;
1133 	ptr->shdp_extent = 0;
1134 	ptr->shdp_j = 1;
1135 	ptr->shdp_ptr = sdmi.sdmi_lt_first->sl_lt_paddr;
1136 
1137 	return (0);
1138 }
1139 
1140 static bool
1141 sec_cipher_supported(const struct crypto_session_params *csp)
1142 {
1143 
1144 	switch (csp->csp_cipher_alg) {
1145 	case CRYPTO_AES_CBC:
1146 		/* AESU */
1147 		if (csp->csp_ivlen != AES_BLOCK_LEN)
1148 			return (false);
1149 		break;
1150 	case CRYPTO_DES_CBC:
1151 	case CRYPTO_3DES_CBC:
1152 		/* DEU */
1153 		if (csp->csp_ivlen != DES_BLOCK_LEN)
1154 			return (false);
1155 		break;
1156 	default:
1157 		return (false);
1158 	}
1159 
1160 	if (csp->csp_cipher_klen == 0 || csp->csp_cipher_klen > SEC_MAX_KEY_LEN)
1161 		return (false);
1162 
1163 	return (true);
1164 }
1165 
1166 static bool
1167 sec_auth_supported(struct sec_softc *sc,
1168     const struct crypto_session_params *csp)
1169 {
1170 
1171 	switch (csp->csp_auth_alg) {
1172 	case CRYPTO_SHA2_384_HMAC:
1173 	case CRYPTO_SHA2_512_HMAC:
1174 		if (sc->sc_version < 3)
1175 			return (false);
1176 		/* FALLTHROUGH */
1177 	case CRYPTO_MD5_HMAC:
1178 	case CRYPTO_SHA1_HMAC:
1179 	case CRYPTO_SHA2_256_HMAC:
1180 		if (csp->csp_auth_klen > SEC_MAX_KEY_LEN)
1181 			return (false);
1182 		break;
1183 	case CRYPTO_MD5:
1184 	case CRYPTO_SHA1:
1185 		break;
1186 	default:
1187 		return (false);
1188 	}
1189 	return (true);
1190 }
1191 
1192 static int
1193 sec_probesession(device_t dev, const struct crypto_session_params *csp)
1194 {
1195 	struct sec_softc *sc = device_get_softc(dev);
1196 
1197 	if (csp->csp_flags != 0)
1198 		return (EINVAL);
1199 	switch (csp->csp_mode) {
1200 	case CSP_MODE_DIGEST:
1201 		if (!sec_auth_supported(sc, csp))
1202 			return (EINVAL);
1203 		break;
1204 	case CSP_MODE_CIPHER:
1205 		if (!sec_cipher_supported(csp))
1206 			return (EINVAL);
1207 		break;
1208 	case CSP_MODE_ETA:
1209 		if (!sec_auth_supported(sc, csp) || !sec_cipher_supported(csp))
1210 			return (EINVAL);
1211 		break;
1212 	default:
1213 		return (EINVAL);
1214 	}
1215 	return (CRYPTODEV_PROBE_HARDWARE);
1216 }
1217 
1218 static int
1219 sec_newsession(device_t dev, crypto_session_t cses,
1220     const struct crypto_session_params *csp)
1221 {
1222 	struct sec_eu_methods *eu = sec_eus;
1223 	struct sec_session *ses;
1224 
1225 	ses = crypto_get_driver_session(cses);
1226 
1227 	/* Find EU for this session */
1228 	while (eu->sem_make_desc != NULL) {
1229 		if (eu->sem_newsession(csp))
1230 			break;
1231 		eu++;
1232 	}
1233 	KASSERT(eu->sem_make_desc != NULL, ("failed to find eu for session"));
1234 
1235 	/* Save cipher key */
1236 	if (csp->csp_cipher_key != NULL)
1237 		memcpy(ses->ss_key, csp->csp_cipher_key, csp->csp_cipher_klen);
1238 
1239 	/* Save digest key */
1240 	if (csp->csp_auth_key != NULL)
1241 		memcpy(ses->ss_mkey, csp->csp_auth_key, csp->csp_auth_klen);
1242 
1243 	if (csp->csp_auth_alg != 0) {
1244 		if (csp->csp_auth_mlen == 0)
1245 			ses->ss_mlen = crypto_auth_hash(csp)->hashsize;
1246 		else
1247 			ses->ss_mlen = csp->csp_auth_mlen;
1248 	}
1249 
1250 	return (0);
1251 }
1252 
1253 static int
1254 sec_process(device_t dev, struct cryptop *crp, int hint)
1255 {
1256 	struct sec_softc *sc = device_get_softc(dev);
1257 	struct sec_desc *desc = NULL;
1258 	const struct crypto_session_params *csp;
1259 	struct sec_session *ses;
1260 	int error = 0;
1261 
1262 	ses = crypto_get_driver_session(crp->crp_session);
1263 	csp = crypto_get_params(crp->crp_session);
1264 
1265 	/* Check for input length */
1266 	if (crp->crp_ilen > SEC_MAX_DMA_BLOCK_SIZE) {
1267 		crp->crp_etype = E2BIG;
1268 		crypto_done(crp);
1269 		return (0);
1270 	}
1271 
1272 	SEC_LOCK(sc, descriptors);
1273 	SEC_DESC_SYNC(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1274 
1275 	/* Block driver if there is no free descriptors or we are going down */
1276 	if (SEC_FREE_DESC_CNT(sc) == 0 || sc->sc_shutdown) {
1277 		sc->sc_blocked |= CRYPTO_SYMQ;
1278 		SEC_UNLOCK(sc, descriptors);
1279 		return (ERESTART);
1280 	}
1281 
1282 	/* Prepare descriptor */
1283 	desc = SEC_GET_FREE_DESC(sc);
1284 	desc->sd_lt_used = 0;
1285 	desc->sd_error = 0;
1286 	desc->sd_crp = crp;
1287 
1288 	if (csp->csp_cipher_alg != 0) {
1289 		if (crp->crp_flags & CRYPTO_F_IV_GENERATE) {
1290 			arc4rand(desc->sd_desc->shd_iv, csp->csp_ivlen, 0);
1291 			crypto_copyback(crp, crp->crp_iv_start, csp->csp_ivlen,
1292 			    desc->sd_desc->shd_iv);
1293 		} else if (crp->crp_flags & CRYPTO_F_IV_SEPARATE)
1294 			memcpy(desc->sd_desc->shd_iv, crp->crp_iv,
1295 			    csp->csp_ivlen);
1296 		else
1297 			crypto_copydata(crp, crp->crp_iv_start, csp->csp_ivlen,
1298 			    desc->sd_desc->shd_iv);
1299 	}
1300 
1301 	if (crp->crp_cipher_key != NULL)
1302 		memcpy(ses->ss_key, crp->crp_cipher_key, csp->csp_cipher_klen);
1303 
1304 	if (crp->crp_auth_key != NULL)
1305 		memcpy(ses->ss_mkey, crp->crp_auth_key, csp->csp_auth_klen);
1306 
1307 	memcpy(desc->sd_desc->shd_key, ses->ss_key, csp->csp_cipher_klen);
1308 	memcpy(desc->sd_desc->shd_mkey, ses->ss_mkey, csp->csp_auth_klen);
1309 
1310 	error = ses->ss_eu->sem_make_desc(sc, csp, desc, crp);
1311 
1312 	if (error) {
1313 		SEC_DESC_FREE_POINTERS(desc);
1314 		SEC_DESC_PUT_BACK_LT(sc, desc);
1315 		SEC_PUT_BACK_FREE_DESC(sc);
1316 		SEC_UNLOCK(sc, descriptors);
1317 		crp->crp_etype = error;
1318 		crypto_done(crp);
1319 		return (0);
1320 	}
1321 
1322 	/*
1323 	 * Skip DONE interrupt if this is not last request in burst, but only
1324 	 * if we are running on SEC 3.X. On SEC 2.X we have to enable DONE
1325 	 * signaling on each descriptor.
1326 	 */
1327 	if ((hint & CRYPTO_HINT_MORE) && sc->sc_version == 3)
1328 		desc->sd_desc->shd_dn = 0;
1329 	else
1330 		desc->sd_desc->shd_dn = 1;
1331 
1332 	SEC_DESC_SYNC(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1333 	SEC_DESC_SYNC_POINTERS(desc, BUS_DMASYNC_POSTREAD |
1334 	    BUS_DMASYNC_POSTWRITE);
1335 	SEC_DESC_FREE2READY(sc);
1336 	SEC_UNLOCK(sc, descriptors);
1337 
1338 	/* Enqueue ready descriptors in hardware */
1339 	sec_enqueue(sc);
1340 
1341 	return (0);
1342 }
1343 
1344 static int
1345 sec_build_common_ns_desc(struct sec_softc *sc, struct sec_desc *desc,
1346     const struct crypto_session_params *csp, struct cryptop *crp)
1347 {
1348 	struct sec_hw_desc *hd = desc->sd_desc;
1349 	int error;
1350 
1351 	hd->shd_desc_type = SEC_DT_COMMON_NONSNOOP;
1352 	hd->shd_eu_sel1 = SEC_EU_NONE;
1353 	hd->shd_mode1 = 0;
1354 
1355 	/* Pointer 0: NULL */
1356 	error = sec_make_pointer_direct(sc, desc, 0, 0, 0);
1357 	if (error)
1358 		return (error);
1359 
1360 	/* Pointer 1: IV IN */
1361 	error = sec_make_pointer_direct(sc, desc, 1, desc->sd_desc_paddr +
1362 	    offsetof(struct sec_hw_desc, shd_iv), csp->csp_ivlen);
1363 	if (error)
1364 		return (error);
1365 
1366 	/* Pointer 2: Cipher Key */
1367 	error = sec_make_pointer_direct(sc, desc, 2, desc->sd_desc_paddr +
1368 	    offsetof(struct sec_hw_desc, shd_key), csp->csp_cipher_klen);
1369  	if (error)
1370 		return (error);
1371 
1372 	/* Pointer 3: Data IN */
1373 	error = sec_make_pointer(sc, desc, 3, crp, crp->crp_payload_start,
1374 	    crp->crp_payload_length);
1375 	if (error)
1376 		return (error);
1377 
1378 	/* Pointer 4: Data OUT */
1379 	error = sec_make_pointer(sc, desc, 4, crp, crp->crp_payload_start,
1380 	    crp->crp_payload_length);
1381 	if (error)
1382 		return (error);
1383 
1384 	/* Pointer 5: IV OUT (Not used: NULL) */
1385 	error = sec_make_pointer_direct(sc, desc, 5, 0, 0);
1386 	if (error)
1387 		return (error);
1388 
1389 	/* Pointer 6: NULL */
1390 	error = sec_make_pointer_direct(sc, desc, 6, 0, 0);
1391 
1392 	return (error);
1393 }
1394 
1395 static int
1396 sec_build_common_s_desc(struct sec_softc *sc, struct sec_desc *desc,
1397     const struct crypto_session_params *csp, struct cryptop *crp)
1398 {
1399 	struct sec_hw_desc *hd = desc->sd_desc;
1400 	u_int eu, mode, hashlen;
1401 	int error;
1402 
1403 	error = sec_mdeu_config(csp, &eu, &mode, &hashlen);
1404 	if (error)
1405 		return (error);
1406 
1407 	hd->shd_desc_type = SEC_DT_HMAC_SNOOP;
1408 	hd->shd_eu_sel1 = eu;
1409 	hd->shd_mode1 = mode;
1410 
1411 	/* Pointer 0: HMAC Key */
1412 	error = sec_make_pointer_direct(sc, desc, 0, desc->sd_desc_paddr +
1413 	    offsetof(struct sec_hw_desc, shd_mkey), csp->csp_auth_klen);
1414 	if (error)
1415 		return (error);
1416 
1417 	/* Pointer 1: HMAC-Only Data IN */
1418 	error = sec_make_pointer(sc, desc, 1, crp, crp->crp_aad_start,
1419 	    crp->crp_aad_length);
1420 	if (error)
1421 		return (error);
1422 
1423 	/* Pointer 2: Cipher Key */
1424 	error = sec_make_pointer_direct(sc, desc, 2, desc->sd_desc_paddr +
1425 	    offsetof(struct sec_hw_desc, shd_key), csp->csp_cipher_klen);
1426  	if (error)
1427 		return (error);
1428 
1429 	/* Pointer 3: IV IN */
1430 	error = sec_make_pointer_direct(sc, desc, 3, desc->sd_desc_paddr +
1431 	    offsetof(struct sec_hw_desc, shd_iv), csp->csp_ivlen);
1432 	if (error)
1433 		return (error);
1434 
1435 	/* Pointer 4: Data IN */
1436 	error = sec_make_pointer(sc, desc, 4, crp, crp->crp_payload_start,
1437 	    crp->crp_payload_length);
1438 	if (error)
1439 		return (error);
1440 
1441 	/* Pointer 5: Data OUT */
1442 	error = sec_make_pointer(sc, desc, 5, crp, crp->crp_payload_start,
1443 	    crp->crp_payload_length);
1444 	if (error)
1445 		return (error);
1446 
1447 	/* Pointer 6: HMAC OUT */
1448 	error = sec_make_pointer_direct(sc, desc, 6, desc->sd_desc_paddr +
1449 	    offsetof(struct sec_hw_desc, shd_digest), hashlen);
1450 
1451 	return (error);
1452 }
1453 
1454 /* AESU */
1455 
1456 static bool
1457 sec_aesu_newsession(const struct crypto_session_params *csp)
1458 {
1459 
1460 	return (csp->csp_cipher_alg == CRYPTO_AES_CBC);
1461 }
1462 
1463 static int
1464 sec_aesu_make_desc(struct sec_softc *sc,
1465     const struct crypto_session_params *csp, struct sec_desc *desc,
1466     struct cryptop *crp)
1467 {
1468 	struct sec_hw_desc *hd = desc->sd_desc;
1469 	int error;
1470 
1471 	hd->shd_eu_sel0 = SEC_EU_AESU;
1472 	hd->shd_mode0 = SEC_AESU_MODE_CBC;
1473 
1474 	if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) {
1475 		hd->shd_mode0 |= SEC_AESU_MODE_ED;
1476 		hd->shd_dir = 0;
1477 	} else
1478 		hd->shd_dir = 1;
1479 
1480 	if (csp->csp_mode == CSP_MODE_ETA)
1481 		error = sec_build_common_s_desc(sc, desc, csp, crp);
1482 	else
1483 		error = sec_build_common_ns_desc(sc, desc, csp, crp);
1484 
1485 	return (error);
1486 }
1487 
1488 /* DEU */
1489 
1490 static bool
1491 sec_deu_newsession(const struct crypto_session_params *csp)
1492 {
1493 
1494 	switch (csp->csp_cipher_alg) {
1495 	case CRYPTO_DES_CBC:
1496 	case CRYPTO_3DES_CBC:
1497 		return (true);
1498 	default:
1499 		return (false);
1500 	}
1501 }
1502 
1503 static int
1504 sec_deu_make_desc(struct sec_softc *sc, const struct crypto_session_params *csp,
1505     struct sec_desc *desc, struct cryptop *crp)
1506 {
1507 	struct sec_hw_desc *hd = desc->sd_desc;
1508 	int error;
1509 
1510 	hd->shd_eu_sel0 = SEC_EU_DEU;
1511 	hd->shd_mode0 = SEC_DEU_MODE_CBC;
1512 
1513 	switch (csp->csp_cipher_alg) {
1514 	case CRYPTO_3DES_CBC:
1515 		hd->shd_mode0 |= SEC_DEU_MODE_TS;
1516 		break;
1517 	case CRYPTO_DES_CBC:
1518 		break;
1519 	default:
1520 		return (EINVAL);
1521 	}
1522 
1523 	if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) {
1524 		hd->shd_mode0 |= SEC_DEU_MODE_ED;
1525 		hd->shd_dir = 0;
1526 	} else
1527 		hd->shd_dir = 1;
1528 
1529 	if (csp->csp_mode == CSP_MODE_ETA)
1530 		error = sec_build_common_s_desc(sc, desc, csp, crp);
1531 	else
1532 		error = sec_build_common_ns_desc(sc, desc, csp, crp);
1533 
1534 	return (error);
1535 }
1536 
1537 /* MDEU */
1538 
1539 static bool
1540 sec_mdeu_can_handle(u_int alg)
1541 {
1542 	switch (alg) {
1543 	case CRYPTO_MD5:
1544 	case CRYPTO_SHA1:
1545 	case CRYPTO_MD5_HMAC:
1546 	case CRYPTO_SHA1_HMAC:
1547 	case CRYPTO_SHA2_256_HMAC:
1548 	case CRYPTO_SHA2_384_HMAC:
1549 	case CRYPTO_SHA2_512_HMAC:
1550 		return (true);
1551 	default:
1552 		return (false);
1553 	}
1554 }
1555 
1556 static int
1557 sec_mdeu_config(const struct crypto_session_params *csp, u_int *eu, u_int *mode,
1558     u_int *hashlen)
1559 {
1560 
1561 	*mode = SEC_MDEU_MODE_PD | SEC_MDEU_MODE_INIT;
1562 	*eu = SEC_EU_NONE;
1563 
1564 	switch (csp->csp_auth_alg) {
1565 	case CRYPTO_MD5_HMAC:
1566 		*mode |= SEC_MDEU_MODE_HMAC;
1567 		/* FALLTHROUGH */
1568 	case CRYPTO_MD5:
1569 		*eu = SEC_EU_MDEU_A;
1570 		*mode |= SEC_MDEU_MODE_MD5;
1571 		*hashlen = MD5_HASH_LEN;
1572 		break;
1573 	case CRYPTO_SHA1_HMAC:
1574 		*mode |= SEC_MDEU_MODE_HMAC;
1575 		/* FALLTHROUGH */
1576 	case CRYPTO_SHA1:
1577 		*eu = SEC_EU_MDEU_A;
1578 		*mode |= SEC_MDEU_MODE_SHA1;
1579 		*hashlen = SHA1_HASH_LEN;
1580 		break;
1581 	case CRYPTO_SHA2_256_HMAC:
1582 		*mode |= SEC_MDEU_MODE_HMAC | SEC_MDEU_MODE_SHA256;
1583 		*eu = SEC_EU_MDEU_A;
1584 		break;
1585 	case CRYPTO_SHA2_384_HMAC:
1586 		*mode |= SEC_MDEU_MODE_HMAC | SEC_MDEU_MODE_SHA384;
1587 		*eu = SEC_EU_MDEU_B;
1588 		break;
1589 	case CRYPTO_SHA2_512_HMAC:
1590 		*mode |= SEC_MDEU_MODE_HMAC | SEC_MDEU_MODE_SHA512;
1591 		*eu = SEC_EU_MDEU_B;
1592 		break;
1593 	default:
1594 		return (EINVAL);
1595 	}
1596 
1597 	if (*mode & SEC_MDEU_MODE_HMAC)
1598 		*hashlen = SEC_HMAC_HASH_LEN;
1599 
1600 	return (0);
1601 }
1602 
1603 static bool
1604 sec_mdeu_newsession(const struct crypto_session_params *csp)
1605 {
1606 
1607 	return (sec_mdeu_can_handle(csp->csp_auth_alg));
1608 }
1609 
1610 static int
1611 sec_mdeu_make_desc(struct sec_softc *sc,
1612     const struct crypto_session_params *csp,
1613     struct sec_desc *desc, struct cryptop *crp)
1614 {
1615 	struct sec_hw_desc *hd = desc->sd_desc;
1616 	u_int eu, mode, hashlen;
1617 	int error;
1618 
1619 	error = sec_mdeu_config(csp, &eu, &mode, &hashlen);
1620 	if (error)
1621 		return (error);
1622 
1623 	hd->shd_desc_type = SEC_DT_COMMON_NONSNOOP;
1624 	hd->shd_eu_sel0 = eu;
1625 	hd->shd_mode0 = mode;
1626 	hd->shd_eu_sel1 = SEC_EU_NONE;
1627 	hd->shd_mode1 = 0;
1628 
1629 	/* Pointer 0: NULL */
1630 	error = sec_make_pointer_direct(sc, desc, 0, 0, 0);
1631 	if (error)
1632 		return (error);
1633 
1634 	/* Pointer 1: Context In (Not used: NULL) */
1635 	error = sec_make_pointer_direct(sc, desc, 1, 0, 0);
1636 	if (error)
1637 		return (error);
1638 
1639 	/* Pointer 2: HMAC Key (or NULL, depending on digest type) */
1640 	if (hd->shd_mode0 & SEC_MDEU_MODE_HMAC)
1641 		error = sec_make_pointer_direct(sc, desc, 2,
1642 		    desc->sd_desc_paddr + offsetof(struct sec_hw_desc,
1643 		    shd_mkey), csp->csp_auth_klen);
1644 	else
1645 		error = sec_make_pointer_direct(sc, desc, 2, 0, 0);
1646 
1647 	if (error)
1648 		return (error);
1649 
1650 	/* Pointer 3: Input Data */
1651 	error = sec_make_pointer(sc, desc, 3, crp, crp->crp_payload_start,
1652 	    crp->crp_payload_length);
1653 	if (error)
1654 		return (error);
1655 
1656 	/* Pointer 4: NULL */
1657 	error = sec_make_pointer_direct(sc, desc, 4, 0, 0);
1658 	if (error)
1659 		return (error);
1660 
1661 	/* Pointer 5: Hash out */
1662 	error = sec_make_pointer_direct(sc, desc, 5, desc->sd_desc_paddr +
1663 	    offsetof(struct sec_hw_desc, shd_digest), hashlen);
1664 	if (error)
1665 		return (error);
1666 
1667 	/* Pointer 6: NULL */
1668 	error = sec_make_pointer_direct(sc, desc, 6, 0, 0);
1669 
1670 	return (0);
1671 }
1672