xref: /freebsd/sys/dev/sec/sec.c (revision f6385d921b2f354d71256d1d0392122597e0fd33)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (C) 2008-2009 Semihalf, Piotr Ziecik
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
19  * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
20  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
21  * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
22  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
23  * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
24  * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
25  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26  */
27 
28 /*
29  * Freescale integrated Security Engine (SEC) driver. Currently SEC 2.0 and
30  * 3.0 are supported.
31  */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/bus.h>
39 #include <sys/endian.h>
40 #include <sys/kernel.h>
41 #include <sys/lock.h>
42 #include <sys/malloc.h>
43 #include <sys/mbuf.h>
44 #include <sys/module.h>
45 #include <sys/mutex.h>
46 #include <sys/random.h>
47 #include <sys/rman.h>
48 
49 #include <machine/_inttypes.h>
50 #include <machine/bus.h>
51 #include <machine/resource.h>
52 
53 #include <opencrypto/cryptodev.h>
54 #include <opencrypto/xform_auth.h>
55 #include "cryptodev_if.h"
56 
57 #include <dev/ofw/ofw_bus_subr.h>
58 #include <dev/sec/sec.h>
59 
60 static int	sec_probe(device_t dev);
61 static int	sec_attach(device_t dev);
62 static int	sec_detach(device_t dev);
63 static int	sec_suspend(device_t dev);
64 static int	sec_resume(device_t dev);
65 static int	sec_shutdown(device_t dev);
66 static void	sec_primary_intr(void *arg);
67 static void	sec_secondary_intr(void *arg);
68 static int	sec_setup_intr(struct sec_softc *sc, struct resource **ires,
69     void **ihand, int *irid, driver_intr_t handler, const char *iname);
70 static void	sec_release_intr(struct sec_softc *sc, struct resource *ires,
71     void *ihand, int irid, const char *iname);
72 static int	sec_controller_reset(struct sec_softc *sc);
73 static int	sec_channel_reset(struct sec_softc *sc, int channel, int full);
74 static int	sec_init(struct sec_softc *sc);
75 static int	sec_alloc_dma_mem(struct sec_softc *sc,
76     struct sec_dma_mem *dma_mem, bus_size_t size);
77 static int	sec_desc_map_dma(struct sec_softc *sc,
78     struct sec_dma_mem *dma_mem, struct cryptop *crp, bus_size_t size,
79     struct sec_desc_map_info *sdmi);
80 static void	sec_free_dma_mem(struct sec_dma_mem *dma_mem);
81 static void	sec_enqueue(struct sec_softc *sc);
82 static int	sec_enqueue_desc(struct sec_softc *sc, struct sec_desc *desc,
83     int channel);
84 static int	sec_eu_channel(struct sec_softc *sc, int eu);
85 static int	sec_make_pointer(struct sec_softc *sc, struct sec_desc *desc,
86     u_int n, struct cryptop *crp, bus_size_t doffset, bus_size_t dsize);
87 static int	sec_make_pointer_direct(struct sec_softc *sc,
88     struct sec_desc *desc, u_int n, bus_addr_t data, bus_size_t dsize);
89 static int	sec_probesession(device_t dev,
90     const struct crypto_session_params *csp);
91 static int	sec_newsession(device_t dev, crypto_session_t cses,
92     const struct crypto_session_params *csp);
93 static int	sec_process(device_t dev, struct cryptop *crp, int hint);
94 static int	sec_build_common_ns_desc(struct sec_softc *sc,
95     struct sec_desc *desc, const struct crypto_session_params *csp,
96     struct cryptop *crp);
97 static int	sec_build_common_s_desc(struct sec_softc *sc,
98     struct sec_desc *desc, const struct crypto_session_params *csp,
99     struct cryptop *crp);
100 
101 static struct sec_desc *sec_find_desc(struct sec_softc *sc, bus_addr_t paddr);
102 
103 /* AESU */
104 static bool	sec_aesu_newsession(const struct crypto_session_params *csp);
105 static int	sec_aesu_make_desc(struct sec_softc *sc,
106     const struct crypto_session_params *csp, struct sec_desc *desc,
107     struct cryptop *crp);
108 
109 /* MDEU */
110 static bool	sec_mdeu_can_handle(u_int alg);
111 static int	sec_mdeu_config(const struct crypto_session_params *csp,
112     u_int *eu, u_int *mode, u_int *hashlen);
113 static bool	sec_mdeu_newsession(const struct crypto_session_params *csp);
114 static int	sec_mdeu_make_desc(struct sec_softc *sc,
115     const struct crypto_session_params *csp, struct sec_desc *desc,
116     struct cryptop *crp);
117 
118 static device_method_t sec_methods[] = {
119 	/* Device interface */
120 	DEVMETHOD(device_probe,		sec_probe),
121 	DEVMETHOD(device_attach,	sec_attach),
122 	DEVMETHOD(device_detach,	sec_detach),
123 
124 	DEVMETHOD(device_suspend,	sec_suspend),
125 	DEVMETHOD(device_resume,	sec_resume),
126 	DEVMETHOD(device_shutdown,	sec_shutdown),
127 
128 	/* Crypto methods */
129 	DEVMETHOD(cryptodev_probesession, sec_probesession),
130 	DEVMETHOD(cryptodev_newsession,	sec_newsession),
131 	DEVMETHOD(cryptodev_process,	sec_process),
132 
133 	DEVMETHOD_END
134 };
135 static driver_t sec_driver = {
136 	"sec",
137 	sec_methods,
138 	sizeof(struct sec_softc),
139 };
140 
141 static devclass_t sec_devclass;
142 DRIVER_MODULE(sec, simplebus, sec_driver, sec_devclass, 0, 0);
143 MODULE_DEPEND(sec, crypto, 1, 1, 1);
144 
145 static struct sec_eu_methods sec_eus[] = {
146 	{
147 		sec_aesu_newsession,
148 		sec_aesu_make_desc,
149 	},
150 	{
151 		sec_mdeu_newsession,
152 		sec_mdeu_make_desc,
153 	},
154 	{ NULL, NULL }
155 };
156 
157 static inline void
158 sec_sync_dma_mem(struct sec_dma_mem *dma_mem, bus_dmasync_op_t op)
159 {
160 
161 	/* Sync only if dma memory is valid */
162 	if (dma_mem->dma_vaddr != NULL)
163 		bus_dmamap_sync(dma_mem->dma_tag, dma_mem->dma_map, op);
164 }
165 
166 static inline void *
167 sec_get_pointer_data(struct sec_desc *desc, u_int n)
168 {
169 
170 	return (desc->sd_ptr_dmem[n].dma_vaddr);
171 }
172 
173 static int
174 sec_probe(device_t dev)
175 {
176 	struct sec_softc *sc;
177 	uint64_t id;
178 
179 	if (!ofw_bus_status_okay(dev))
180 		return (ENXIO);
181 
182 	if (!ofw_bus_is_compatible(dev, "fsl,sec2.0"))
183 		return (ENXIO);
184 
185 	sc = device_get_softc(dev);
186 
187 	sc->sc_rrid = 0;
188 	sc->sc_rres = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->sc_rrid,
189 	    RF_ACTIVE);
190 
191 	if (sc->sc_rres == NULL)
192 		return (ENXIO);
193 
194 	sc->sc_bas.bsh = rman_get_bushandle(sc->sc_rres);
195 	sc->sc_bas.bst = rman_get_bustag(sc->sc_rres);
196 
197 	id = SEC_READ(sc, SEC_ID);
198 
199 	bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_rrid, sc->sc_rres);
200 
201 	switch (id) {
202 	case SEC_20_ID:
203 		device_set_desc(dev, "Freescale Security Engine 2.0");
204 		sc->sc_version = 2;
205 		break;
206 	case SEC_30_ID:
207 		device_set_desc(dev, "Freescale Security Engine 3.0");
208 		sc->sc_version = 3;
209 		break;
210 	case SEC_31_ID:
211 		device_set_desc(dev, "Freescale Security Engine 3.1");
212 		sc->sc_version = 3;
213 		break;
214 	default:
215 		device_printf(dev, "unknown SEC ID 0x%016"PRIx64"!\n", id);
216 		return (ENXIO);
217 	}
218 
219 	return (0);
220 }
221 
222 static int
223 sec_attach(device_t dev)
224 {
225 	struct sec_softc *sc;
226 	struct sec_hw_lt *lt;
227 	int error = 0;
228 	int i;
229 
230 	sc = device_get_softc(dev);
231 	sc->sc_dev = dev;
232 	sc->sc_blocked = 0;
233 	sc->sc_shutdown = 0;
234 
235 	sc->sc_cid = crypto_get_driverid(dev, sizeof(struct sec_session),
236 	    CRYPTOCAP_F_HARDWARE);
237 	if (sc->sc_cid < 0) {
238 		device_printf(dev, "could not get crypto driver ID!\n");
239 		return (ENXIO);
240 	}
241 
242 	/* Init locks */
243 	mtx_init(&sc->sc_controller_lock, device_get_nameunit(dev),
244 	    "SEC Controller lock", MTX_DEF);
245 	mtx_init(&sc->sc_descriptors_lock, device_get_nameunit(dev),
246 	    "SEC Descriptors lock", MTX_DEF);
247 
248 	/* Allocate I/O memory for SEC registers */
249 	sc->sc_rrid = 0;
250 	sc->sc_rres = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->sc_rrid,
251 	    RF_ACTIVE);
252 
253 	if (sc->sc_rres == NULL) {
254 		device_printf(dev, "could not allocate I/O memory!\n");
255 		goto fail1;
256 	}
257 
258 	sc->sc_bas.bsh = rman_get_bushandle(sc->sc_rres);
259 	sc->sc_bas.bst = rman_get_bustag(sc->sc_rres);
260 
261 	/* Setup interrupts */
262 	sc->sc_pri_irid = 0;
263 	error = sec_setup_intr(sc, &sc->sc_pri_ires, &sc->sc_pri_ihand,
264 	    &sc->sc_pri_irid, sec_primary_intr, "primary");
265 
266 	if (error)
267 		goto fail2;
268 
269 
270 	if (sc->sc_version == 3) {
271 		sc->sc_sec_irid = 1;
272 		error = sec_setup_intr(sc, &sc->sc_sec_ires, &sc->sc_sec_ihand,
273 		    &sc->sc_sec_irid, sec_secondary_intr, "secondary");
274 
275 		if (error)
276 			goto fail3;
277 	}
278 
279 	/* Alloc DMA memory for descriptors and link tables */
280 	error = sec_alloc_dma_mem(sc, &(sc->sc_desc_dmem),
281 	    SEC_DESCRIPTORS * sizeof(struct sec_hw_desc));
282 
283 	if (error)
284 		goto fail4;
285 
286 	error = sec_alloc_dma_mem(sc, &(sc->sc_lt_dmem),
287 	    (SEC_LT_ENTRIES + 1) * sizeof(struct sec_hw_lt));
288 
289 	if (error)
290 		goto fail5;
291 
292 	/* Fill in descriptors and link tables */
293 	for (i = 0; i < SEC_DESCRIPTORS; i++) {
294 		sc->sc_desc[i].sd_desc =
295 		    (struct sec_hw_desc*)(sc->sc_desc_dmem.dma_vaddr) + i;
296 		sc->sc_desc[i].sd_desc_paddr = sc->sc_desc_dmem.dma_paddr +
297 		    (i * sizeof(struct sec_hw_desc));
298 	}
299 
300 	for (i = 0; i < SEC_LT_ENTRIES + 1; i++) {
301 		sc->sc_lt[i].sl_lt =
302 		    (struct sec_hw_lt*)(sc->sc_lt_dmem.dma_vaddr) + i;
303 		sc->sc_lt[i].sl_lt_paddr = sc->sc_lt_dmem.dma_paddr +
304 		    (i * sizeof(struct sec_hw_lt));
305 	}
306 
307 	/* Last entry in link table is used to create a circle */
308 	lt = sc->sc_lt[SEC_LT_ENTRIES].sl_lt;
309 	lt->shl_length = 0;
310 	lt->shl_r = 0;
311 	lt->shl_n = 1;
312 	lt->shl_ptr = sc->sc_lt[0].sl_lt_paddr;
313 
314 	/* Init descriptor and link table queues pointers */
315 	SEC_CNT_INIT(sc, sc_free_desc_get_cnt, SEC_DESCRIPTORS);
316 	SEC_CNT_INIT(sc, sc_free_desc_put_cnt, SEC_DESCRIPTORS);
317 	SEC_CNT_INIT(sc, sc_ready_desc_get_cnt, SEC_DESCRIPTORS);
318 	SEC_CNT_INIT(sc, sc_ready_desc_put_cnt, SEC_DESCRIPTORS);
319 	SEC_CNT_INIT(sc, sc_queued_desc_get_cnt, SEC_DESCRIPTORS);
320 	SEC_CNT_INIT(sc, sc_queued_desc_put_cnt, SEC_DESCRIPTORS);
321 	SEC_CNT_INIT(sc, sc_lt_alloc_cnt, SEC_LT_ENTRIES);
322 	SEC_CNT_INIT(sc, sc_lt_free_cnt, SEC_LT_ENTRIES);
323 
324 	/* Create masks for fast checks */
325 	sc->sc_int_error_mask = 0;
326 	for (i = 0; i < SEC_CHANNELS; i++)
327 		sc->sc_int_error_mask |= (~0ULL & SEC_INT_CH_ERR(i));
328 
329 	switch (sc->sc_version) {
330 	case 2:
331 		sc->sc_channel_idle_mask =
332 		    (SEC_CHAN_CSR2_FFLVL_M << SEC_CHAN_CSR2_FFLVL_S) |
333 		    (SEC_CHAN_CSR2_MSTATE_M << SEC_CHAN_CSR2_MSTATE_S) |
334 		    (SEC_CHAN_CSR2_PSTATE_M << SEC_CHAN_CSR2_PSTATE_S) |
335 		    (SEC_CHAN_CSR2_GSTATE_M << SEC_CHAN_CSR2_GSTATE_S);
336 		break;
337 	case 3:
338 		sc->sc_channel_idle_mask =
339 		    (SEC_CHAN_CSR3_FFLVL_M << SEC_CHAN_CSR3_FFLVL_S) |
340 		    (SEC_CHAN_CSR3_MSTATE_M << SEC_CHAN_CSR3_MSTATE_S) |
341 		    (SEC_CHAN_CSR3_PSTATE_M << SEC_CHAN_CSR3_PSTATE_S) |
342 		    (SEC_CHAN_CSR3_GSTATE_M << SEC_CHAN_CSR3_GSTATE_S);
343 		break;
344 	}
345 
346 	/* Init hardware */
347 	error = sec_init(sc);
348 
349 	if (error)
350 		goto fail6;
351 
352 	return (0);
353 
354 fail6:
355 	sec_free_dma_mem(&(sc->sc_lt_dmem));
356 fail5:
357 	sec_free_dma_mem(&(sc->sc_desc_dmem));
358 fail4:
359 	sec_release_intr(sc, sc->sc_sec_ires, sc->sc_sec_ihand,
360 	    sc->sc_sec_irid, "secondary");
361 fail3:
362 	sec_release_intr(sc, sc->sc_pri_ires, sc->sc_pri_ihand,
363 	    sc->sc_pri_irid, "primary");
364 fail2:
365 	bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_rrid, sc->sc_rres);
366 fail1:
367 	mtx_destroy(&sc->sc_controller_lock);
368 	mtx_destroy(&sc->sc_descriptors_lock);
369 
370 	return (ENXIO);
371 }
372 
373 static int
374 sec_detach(device_t dev)
375 {
376 	struct sec_softc *sc = device_get_softc(dev);
377 	int i, error, timeout = SEC_TIMEOUT;
378 
379 	/* Prepare driver to shutdown */
380 	SEC_LOCK(sc, descriptors);
381 	sc->sc_shutdown = 1;
382 	SEC_UNLOCK(sc, descriptors);
383 
384 	/* Wait until all queued processing finishes */
385 	while (1) {
386 		SEC_LOCK(sc, descriptors);
387 		i = SEC_READY_DESC_CNT(sc) + SEC_QUEUED_DESC_CNT(sc);
388 		SEC_UNLOCK(sc, descriptors);
389 
390 		if (i == 0)
391 			break;
392 
393 		if (timeout < 0) {
394 			device_printf(dev, "queue flush timeout!\n");
395 
396 			/* DMA can be still active - stop it */
397 			for (i = 0; i < SEC_CHANNELS; i++)
398 				sec_channel_reset(sc, i, 1);
399 
400 			break;
401 		}
402 
403 		timeout -= 1000;
404 		DELAY(1000);
405 	}
406 
407 	/* Disable interrupts */
408 	SEC_WRITE(sc, SEC_IER, 0);
409 
410 	/* Unregister from OCF */
411 	crypto_unregister_all(sc->sc_cid);
412 
413 	/* Free DMA memory */
414 	for (i = 0; i < SEC_DESCRIPTORS; i++)
415 		SEC_DESC_FREE_POINTERS(&(sc->sc_desc[i]));
416 
417 	sec_free_dma_mem(&(sc->sc_lt_dmem));
418 	sec_free_dma_mem(&(sc->sc_desc_dmem));
419 
420 	/* Release interrupts */
421 	sec_release_intr(sc, sc->sc_pri_ires, sc->sc_pri_ihand,
422 	    sc->sc_pri_irid, "primary");
423 	sec_release_intr(sc, sc->sc_sec_ires, sc->sc_sec_ihand,
424 	    sc->sc_sec_irid, "secondary");
425 
426 	/* Release memory */
427 	if (sc->sc_rres) {
428 		error = bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_rrid,
429 		    sc->sc_rres);
430 		if (error)
431 			device_printf(dev, "bus_release_resource() failed for"
432 			    " I/O memory, error %d\n", error);
433 
434 		sc->sc_rres = NULL;
435 	}
436 
437 	mtx_destroy(&sc->sc_controller_lock);
438 	mtx_destroy(&sc->sc_descriptors_lock);
439 
440 	return (0);
441 }
442 
443 static int
444 sec_suspend(device_t dev)
445 {
446 
447 	return (0);
448 }
449 
450 static int
451 sec_resume(device_t dev)
452 {
453 
454 	return (0);
455 }
456 
457 static int
458 sec_shutdown(device_t dev)
459 {
460 
461 	return (0);
462 }
463 
464 static int
465 sec_setup_intr(struct sec_softc *sc, struct resource **ires, void **ihand,
466     int *irid, driver_intr_t handler, const char *iname)
467 {
468 	int error;
469 
470 	(*ires) = bus_alloc_resource_any(sc->sc_dev, SYS_RES_IRQ, irid,
471 	    RF_ACTIVE);
472 
473 	if ((*ires) == NULL) {
474 		device_printf(sc->sc_dev, "could not allocate %s IRQ\n", iname);
475 		return (ENXIO);
476 	}
477 
478 	error = bus_setup_intr(sc->sc_dev, *ires, INTR_MPSAFE | INTR_TYPE_NET,
479 	    NULL, handler, sc, ihand);
480 
481 	if (error) {
482 		device_printf(sc->sc_dev, "failed to set up %s IRQ\n", iname);
483 		if (bus_release_resource(sc->sc_dev, SYS_RES_IRQ, *irid, *ires))
484 			device_printf(sc->sc_dev, "could not release %s IRQ\n",
485 			    iname);
486 
487 		(*ires) = NULL;
488 		return (error);
489 	}
490 
491 	return (0);
492 }
493 
494 static void
495 sec_release_intr(struct sec_softc *sc, struct resource *ires, void *ihand,
496     int irid, const char *iname)
497 {
498 	int error;
499 
500 	if (ires == NULL)
501 		return;
502 
503 	error = bus_teardown_intr(sc->sc_dev, ires, ihand);
504 	if (error)
505 		device_printf(sc->sc_dev, "bus_teardown_intr() failed for %s"
506 		    " IRQ, error %d\n", iname, error);
507 
508 	error = bus_release_resource(sc->sc_dev, SYS_RES_IRQ, irid, ires);
509 	if (error)
510 		device_printf(sc->sc_dev, "bus_release_resource() failed for %s"
511 		    " IRQ, error %d\n", iname, error);
512 }
513 
514 static void
515 sec_primary_intr(void *arg)
516 {
517 	struct sec_session *ses;
518 	struct sec_softc *sc = arg;
519 	struct sec_desc *desc;
520 	struct cryptop *crp;
521 	uint64_t isr;
522 	uint8_t hash[HASH_MAX_LEN];
523 	int i, wakeup = 0;
524 
525 	SEC_LOCK(sc, controller);
526 
527 	/* Check for errors */
528 	isr = SEC_READ(sc, SEC_ISR);
529 	if (isr & sc->sc_int_error_mask) {
530 		/* Check each channel for error */
531 		for (i = 0; i < SEC_CHANNELS; i++) {
532 			if ((isr & SEC_INT_CH_ERR(i)) == 0)
533 				continue;
534 
535 			device_printf(sc->sc_dev,
536 			    "I/O error on channel %i!\n", i);
537 
538 			/* Find and mark problematic descriptor */
539 			desc = sec_find_desc(sc, SEC_READ(sc,
540 			    SEC_CHAN_CDPR(i)));
541 
542 			if (desc != NULL)
543 				desc->sd_error = EIO;
544 
545 			/* Do partial channel reset */
546 			sec_channel_reset(sc, i, 0);
547 		}
548 	}
549 
550 	/* ACK interrupt */
551 	SEC_WRITE(sc, SEC_ICR, 0xFFFFFFFFFFFFFFFFULL);
552 
553 	SEC_UNLOCK(sc, controller);
554 	SEC_LOCK(sc, descriptors);
555 
556 	/* Handle processed descriptors */
557 	SEC_DESC_SYNC(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
558 
559 	while (SEC_QUEUED_DESC_CNT(sc) > 0) {
560 		desc = SEC_GET_QUEUED_DESC(sc);
561 
562 		if (desc->sd_desc->shd_done != 0xFF && desc->sd_error == 0) {
563 			SEC_PUT_BACK_QUEUED_DESC(sc);
564 			break;
565 		}
566 
567 		SEC_DESC_SYNC_POINTERS(desc, BUS_DMASYNC_PREREAD |
568 		    BUS_DMASYNC_PREWRITE);
569 
570 		crp = desc->sd_crp;
571 		crp->crp_etype = desc->sd_error;
572 		if (crp->crp_etype == 0) {
573 			ses = crypto_get_driver_session(crp->crp_session);
574 			if (ses->ss_mlen != 0) {
575 				if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) {
576 					crypto_copydata(crp,
577 					    crp->crp_digest_start,
578 					    ses->ss_mlen, hash);
579 					if (timingsafe_bcmp(
580 					    desc->sd_desc->shd_digest,
581 					    hash, ses->ss_mlen) != 0)
582 						crp->crp_etype = EBADMSG;
583 				} else
584 					crypto_copyback(crp,
585 					    crp->crp_digest_start,
586 					    ses->ss_mlen,
587 					    desc->sd_desc->shd_digest);
588 			}
589 		}
590 		crypto_done(desc->sd_crp);
591 
592 		SEC_DESC_FREE_POINTERS(desc);
593 		SEC_DESC_FREE_LT(sc, desc);
594 		SEC_DESC_QUEUED2FREE(sc);
595 	}
596 
597 	SEC_DESC_SYNC(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
598 
599 	if (!sc->sc_shutdown) {
600 		wakeup = sc->sc_blocked;
601 		sc->sc_blocked = 0;
602 	}
603 
604 	SEC_UNLOCK(sc, descriptors);
605 
606 	/* Enqueue ready descriptors in hardware */
607 	sec_enqueue(sc);
608 
609 	if (wakeup)
610 		crypto_unblock(sc->sc_cid, wakeup);
611 }
612 
613 static void
614 sec_secondary_intr(void *arg)
615 {
616 	struct sec_softc *sc = arg;
617 
618 	device_printf(sc->sc_dev, "spurious secondary interrupt!\n");
619 	sec_primary_intr(arg);
620 }
621 
622 static int
623 sec_controller_reset(struct sec_softc *sc)
624 {
625 	int timeout = SEC_TIMEOUT;
626 
627 	/* Reset Controller */
628 	SEC_WRITE(sc, SEC_MCR, SEC_MCR_SWR);
629 
630 	while (SEC_READ(sc, SEC_MCR) & SEC_MCR_SWR) {
631 		DELAY(1000);
632 		timeout -= 1000;
633 
634 		if (timeout < 0) {
635 			device_printf(sc->sc_dev, "timeout while waiting for "
636 			    "device reset!\n");
637 			return (ETIMEDOUT);
638 		}
639 	}
640 
641 	return (0);
642 }
643 
644 static int
645 sec_channel_reset(struct sec_softc *sc, int channel, int full)
646 {
647 	int timeout = SEC_TIMEOUT;
648 	uint64_t bit = (full) ? SEC_CHAN_CCR_R : SEC_CHAN_CCR_CON;
649 	uint64_t reg;
650 
651 	/* Reset Channel */
652 	reg = SEC_READ(sc, SEC_CHAN_CCR(channel));
653 	SEC_WRITE(sc, SEC_CHAN_CCR(channel), reg | bit);
654 
655 	while (SEC_READ(sc, SEC_CHAN_CCR(channel)) & bit) {
656 		DELAY(1000);
657 		timeout -= 1000;
658 
659 		if (timeout < 0) {
660 			device_printf(sc->sc_dev, "timeout while waiting for "
661 			    "channel reset!\n");
662 			return (ETIMEDOUT);
663 		}
664 	}
665 
666 	if (full) {
667 		reg = SEC_CHAN_CCR_CDIE | SEC_CHAN_CCR_NT | SEC_CHAN_CCR_BS;
668 
669 		switch(sc->sc_version) {
670 		case 2:
671 			reg |= SEC_CHAN_CCR_CDWE;
672 			break;
673 		case 3:
674 			reg |= SEC_CHAN_CCR_AWSE | SEC_CHAN_CCR_WGN;
675 			break;
676 		}
677 
678 		SEC_WRITE(sc, SEC_CHAN_CCR(channel), reg);
679 	}
680 
681 	return (0);
682 }
683 
684 static int
685 sec_init(struct sec_softc *sc)
686 {
687 	uint64_t reg;
688 	int error, i;
689 
690 	/* Reset controller twice to clear all pending interrupts */
691 	error = sec_controller_reset(sc);
692 	if (error)
693 		return (error);
694 
695 	error = sec_controller_reset(sc);
696 	if (error)
697 		return (error);
698 
699 	/* Reset channels */
700 	for (i = 0; i < SEC_CHANNELS; i++) {
701 		error = sec_channel_reset(sc, i, 1);
702 		if (error)
703 			return (error);
704 	}
705 
706 	/* Enable Interrupts */
707 	reg = SEC_INT_ITO;
708 	for (i = 0; i < SEC_CHANNELS; i++)
709 		reg |= SEC_INT_CH_DN(i) | SEC_INT_CH_ERR(i);
710 
711 	SEC_WRITE(sc, SEC_IER, reg);
712 
713 	return (error);
714 }
715 
716 static void
717 sec_alloc_dma_mem_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
718 {
719 	struct sec_dma_mem *dma_mem = arg;
720 
721 	if (error)
722 		return;
723 
724 	KASSERT(nseg == 1, ("Wrong number of segments, should be 1"));
725 	dma_mem->dma_paddr = segs->ds_addr;
726 }
727 
728 static void
729 sec_dma_map_desc_cb(void *arg, bus_dma_segment_t *segs, int nseg,
730     int error)
731 {
732 	struct sec_desc_map_info *sdmi = arg;
733 	struct sec_softc *sc = sdmi->sdmi_sc;
734 	struct sec_lt *lt = NULL;
735 	bus_addr_t addr;
736 	bus_size_t size;
737 	int i;
738 
739 	SEC_LOCK_ASSERT(sc, descriptors);
740 
741 	if (error)
742 		return;
743 
744 	for (i = 0; i < nseg; i++) {
745 		addr = segs[i].ds_addr;
746 		size = segs[i].ds_len;
747 
748 		/* Skip requested offset */
749 		if (sdmi->sdmi_offset >= size) {
750 			sdmi->sdmi_offset -= size;
751 			continue;
752 		}
753 
754 		addr += sdmi->sdmi_offset;
755 		size -= sdmi->sdmi_offset;
756 		sdmi->sdmi_offset = 0;
757 
758 		/* Do not link more than requested */
759 		if (sdmi->sdmi_size < size)
760 			size = sdmi->sdmi_size;
761 
762 		lt = SEC_ALLOC_LT_ENTRY(sc);
763 		lt->sl_lt->shl_length = size;
764 		lt->sl_lt->shl_r = 0;
765 		lt->sl_lt->shl_n = 0;
766 		lt->sl_lt->shl_ptr = addr;
767 
768 		if (sdmi->sdmi_lt_first == NULL)
769 			sdmi->sdmi_lt_first = lt;
770 
771 		sdmi->sdmi_lt_used += 1;
772 
773 		if ((sdmi->sdmi_size -= size) == 0)
774 			break;
775 	}
776 
777 	sdmi->sdmi_lt_last = lt;
778 }
779 
780 static int
781 sec_alloc_dma_mem(struct sec_softc *sc, struct sec_dma_mem *dma_mem,
782     bus_size_t size)
783 {
784 	int error;
785 
786 	if (dma_mem->dma_vaddr != NULL)
787 		return (EBUSY);
788 
789 	error = bus_dma_tag_create(NULL,	/* parent */
790 		SEC_DMA_ALIGNMENT, 0,		/* alignment, boundary */
791 		BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
792 		BUS_SPACE_MAXADDR,		/* highaddr */
793 		NULL, NULL,			/* filtfunc, filtfuncarg */
794 		size, 1,			/* maxsize, nsegments */
795 		size, 0,			/* maxsegsz, flags */
796 		NULL, NULL,			/* lockfunc, lockfuncarg */
797 		&(dma_mem->dma_tag));		/* dmat */
798 
799 	if (error) {
800 		device_printf(sc->sc_dev, "failed to allocate busdma tag, error"
801 		    " %i!\n", error);
802 		goto err1;
803 	}
804 
805 	error = bus_dmamem_alloc(dma_mem->dma_tag, &(dma_mem->dma_vaddr),
806 	    BUS_DMA_NOWAIT | BUS_DMA_ZERO, &(dma_mem->dma_map));
807 
808 	if (error) {
809 		device_printf(sc->sc_dev, "failed to allocate DMA safe"
810 		    " memory, error %i!\n", error);
811 		goto err2;
812 	}
813 
814 	error = bus_dmamap_load(dma_mem->dma_tag, dma_mem->dma_map,
815 		    dma_mem->dma_vaddr, size, sec_alloc_dma_mem_cb, dma_mem,
816 		    BUS_DMA_NOWAIT);
817 
818 	if (error) {
819 		device_printf(sc->sc_dev, "cannot get address of the DMA"
820 		    " memory, error %i\n", error);
821 		goto err3;
822 	}
823 
824 	dma_mem->dma_is_map = 0;
825 	return (0);
826 
827 err3:
828 	bus_dmamem_free(dma_mem->dma_tag, dma_mem->dma_vaddr, dma_mem->dma_map);
829 err2:
830 	bus_dma_tag_destroy(dma_mem->dma_tag);
831 err1:
832 	dma_mem->dma_vaddr = NULL;
833 	return(error);
834 }
835 
836 static int
837 sec_desc_map_dma(struct sec_softc *sc, struct sec_dma_mem *dma_mem,
838     struct cryptop *crp, bus_size_t size, struct sec_desc_map_info *sdmi)
839 {
840 	int error;
841 
842 	if (dma_mem->dma_vaddr != NULL)
843 		return (EBUSY);
844 
845 	switch (crp->crp_buf.cb_type) {
846 	case CRYPTO_BUF_CONTIG:
847 		break;
848 	case CRYPTO_BUF_UIO:
849 		size = SEC_FREE_LT_CNT(sc) * SEC_MAX_DMA_BLOCK_SIZE;
850 		break;
851 	case CRYPTO_BUF_MBUF:
852 		size = m_length(crp->crp_buf.cb_mbuf, NULL);
853 		break;
854 	case CRYPTO_BUF_VMPAGE:
855 		size = PAGE_SIZE - cb->cb_vm_page_offset;
856 		break;
857 	default:
858 		return (EINVAL);
859 	}
860 
861 	error = bus_dma_tag_create(NULL,	/* parent */
862 		SEC_DMA_ALIGNMENT, 0,		/* alignment, boundary */
863 		BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
864 		BUS_SPACE_MAXADDR,		/* highaddr */
865 		NULL, NULL,			/* filtfunc, filtfuncarg */
866 		size,				/* maxsize */
867 		SEC_FREE_LT_CNT(sc),		/* nsegments */
868 		SEC_MAX_DMA_BLOCK_SIZE, 0,	/* maxsegsz, flags */
869 		NULL, NULL,			/* lockfunc, lockfuncarg */
870 		&(dma_mem->dma_tag));		/* dmat */
871 
872 	if (error) {
873 		device_printf(sc->sc_dev, "failed to allocate busdma tag, error"
874 		    " %i!\n", error);
875 		dma_mem->dma_vaddr = NULL;
876 		return (error);
877 	}
878 
879 	error = bus_dmamap_create(dma_mem->dma_tag, 0, &(dma_mem->dma_map));
880 
881 	if (error) {
882 		device_printf(sc->sc_dev, "failed to create DMA map, error %i!"
883 		    "\n", error);
884 		bus_dma_tag_destroy(dma_mem->dma_tag);
885 		return (error);
886 	}
887 
888 	error = bus_dmamap_load_crp(dma_mem->dma_tag, dma_mem->dma_map, crp,
889 	    sec_dma_map_desc_cb, sdmi, BUS_DMA_NOWAIT);
890 
891 	if (error) {
892 		device_printf(sc->sc_dev, "cannot get address of the DMA"
893 		    " memory, error %i!\n", error);
894 		bus_dmamap_destroy(dma_mem->dma_tag, dma_mem->dma_map);
895 		bus_dma_tag_destroy(dma_mem->dma_tag);
896 		return (error);
897 	}
898 
899 	dma_mem->dma_is_map = 1;
900 	dma_mem->dma_vaddr = crp;
901 
902 	return (0);
903 }
904 
905 static void
906 sec_free_dma_mem(struct sec_dma_mem *dma_mem)
907 {
908 
909 	/* Check for double free */
910 	if (dma_mem->dma_vaddr == NULL)
911 		return;
912 
913 	bus_dmamap_unload(dma_mem->dma_tag, dma_mem->dma_map);
914 
915 	if (dma_mem->dma_is_map)
916 		bus_dmamap_destroy(dma_mem->dma_tag, dma_mem->dma_map);
917 	else
918 		bus_dmamem_free(dma_mem->dma_tag, dma_mem->dma_vaddr,
919 		    dma_mem->dma_map);
920 
921 	bus_dma_tag_destroy(dma_mem->dma_tag);
922 	dma_mem->dma_vaddr = NULL;
923 }
924 
925 static int
926 sec_eu_channel(struct sec_softc *sc, int eu)
927 {
928 	uint64_t reg;
929 	int channel = 0;
930 
931 	SEC_LOCK_ASSERT(sc, controller);
932 
933 	reg = SEC_READ(sc, SEC_EUASR);
934 
935 	switch (eu) {
936 	case SEC_EU_AFEU:
937 		channel = SEC_EUASR_AFEU(reg);
938 		break;
939 	case SEC_EU_DEU:
940 		channel = SEC_EUASR_DEU(reg);
941 		break;
942 	case SEC_EU_MDEU_A:
943 	case SEC_EU_MDEU_B:
944 		channel = SEC_EUASR_MDEU(reg);
945 		break;
946 	case SEC_EU_RNGU:
947 		channel = SEC_EUASR_RNGU(reg);
948 		break;
949 	case SEC_EU_PKEU:
950 		channel = SEC_EUASR_PKEU(reg);
951 		break;
952 	case SEC_EU_AESU:
953 		channel = SEC_EUASR_AESU(reg);
954 		break;
955 	case SEC_EU_KEU:
956 		channel = SEC_EUASR_KEU(reg);
957 		break;
958 	case SEC_EU_CRCU:
959 		channel = SEC_EUASR_CRCU(reg);
960 		break;
961 	}
962 
963 	return (channel - 1);
964 }
965 
966 static int
967 sec_enqueue_desc(struct sec_softc *sc, struct sec_desc *desc, int channel)
968 {
969 	u_int fflvl = SEC_MAX_FIFO_LEVEL;
970 	uint64_t reg;
971 	int i;
972 
973 	SEC_LOCK_ASSERT(sc, controller);
974 
975 	/* Find free channel if have not got one */
976 	if (channel < 0) {
977 		for (i = 0; i < SEC_CHANNELS; i++) {
978 			reg = SEC_READ(sc, SEC_CHAN_CSR(channel));
979 
980 			if ((reg & sc->sc_channel_idle_mask) == 0) {
981 				channel = i;
982 				break;
983 			}
984 		}
985 	}
986 
987 	/* There is no free channel */
988 	if (channel < 0)
989 		return (-1);
990 
991 	/* Check FIFO level on selected channel */
992 	reg = SEC_READ(sc, SEC_CHAN_CSR(channel));
993 
994 	switch(sc->sc_version) {
995 	case 2:
996 		fflvl = (reg >> SEC_CHAN_CSR2_FFLVL_S) & SEC_CHAN_CSR2_FFLVL_M;
997 		break;
998 	case 3:
999 		fflvl = (reg >> SEC_CHAN_CSR3_FFLVL_S) & SEC_CHAN_CSR3_FFLVL_M;
1000 		break;
1001 	}
1002 
1003 	if (fflvl >= SEC_MAX_FIFO_LEVEL)
1004 		return (-1);
1005 
1006 	/* Enqueue descriptor in channel */
1007 	SEC_WRITE(sc, SEC_CHAN_FF(channel), desc->sd_desc_paddr);
1008 
1009 	return (channel);
1010 }
1011 
1012 static void
1013 sec_enqueue(struct sec_softc *sc)
1014 {
1015 	struct sec_desc *desc;
1016 	int ch0, ch1;
1017 
1018 	SEC_LOCK(sc, descriptors);
1019 	SEC_LOCK(sc, controller);
1020 
1021 	while (SEC_READY_DESC_CNT(sc) > 0) {
1022 		desc = SEC_GET_READY_DESC(sc);
1023 
1024 		ch0 = sec_eu_channel(sc, desc->sd_desc->shd_eu_sel0);
1025 		ch1 = sec_eu_channel(sc, desc->sd_desc->shd_eu_sel1);
1026 
1027 		/*
1028 		 * Both EU are used by the same channel.
1029 		 * Enqueue descriptor in channel used by busy EUs.
1030 		 */
1031 		if (ch0 >= 0 && ch0 == ch1) {
1032 			if (sec_enqueue_desc(sc, desc, ch0) >= 0) {
1033 				SEC_DESC_READY2QUEUED(sc);
1034 				continue;
1035 			}
1036 		}
1037 
1038 		/*
1039 		 * Only one EU is free.
1040 		 * Enqueue descriptor in channel used by busy EU.
1041 		 */
1042 		if ((ch0 >= 0 && ch1 < 0) || (ch1 >= 0 && ch0 < 0)) {
1043 			if (sec_enqueue_desc(sc, desc, (ch0 >= 0) ? ch0 : ch1)
1044 			    >= 0) {
1045 				SEC_DESC_READY2QUEUED(sc);
1046 				continue;
1047 			}
1048 		}
1049 
1050 		/*
1051 		 * Both EU are free.
1052 		 * Enqueue descriptor in first free channel.
1053 		 */
1054 		if (ch0 < 0 && ch1 < 0) {
1055 			if (sec_enqueue_desc(sc, desc, -1) >= 0) {
1056 				SEC_DESC_READY2QUEUED(sc);
1057 				continue;
1058 			}
1059 		}
1060 
1061 		/* Current descriptor can not be queued at the moment */
1062 		SEC_PUT_BACK_READY_DESC(sc);
1063 		break;
1064 	}
1065 
1066 	SEC_UNLOCK(sc, controller);
1067 	SEC_UNLOCK(sc, descriptors);
1068 }
1069 
1070 static struct sec_desc *
1071 sec_find_desc(struct sec_softc *sc, bus_addr_t paddr)
1072 {
1073 	struct sec_desc *desc = NULL;
1074 	int i;
1075 
1076 	SEC_LOCK_ASSERT(sc, descriptors);
1077 
1078 	for (i = 0; i < SEC_CHANNELS; i++) {
1079 		if (sc->sc_desc[i].sd_desc_paddr == paddr) {
1080 			desc = &(sc->sc_desc[i]);
1081 			break;
1082 		}
1083 	}
1084 
1085 	return (desc);
1086 }
1087 
1088 static int
1089 sec_make_pointer_direct(struct sec_softc *sc, struct sec_desc *desc, u_int n,
1090     bus_addr_t data, bus_size_t dsize)
1091 {
1092 	struct sec_hw_desc_ptr *ptr;
1093 
1094 	SEC_LOCK_ASSERT(sc, descriptors);
1095 
1096 	ptr = &(desc->sd_desc->shd_pointer[n]);
1097 	ptr->shdp_length = dsize;
1098 	ptr->shdp_extent = 0;
1099 	ptr->shdp_j = 0;
1100 	ptr->shdp_ptr = data;
1101 
1102 	return (0);
1103 }
1104 
1105 static int
1106 sec_make_pointer(struct sec_softc *sc, struct sec_desc *desc,
1107     u_int n, struct cryptop *crp, bus_size_t doffset, bus_size_t dsize)
1108 {
1109 	struct sec_desc_map_info sdmi = { sc, dsize, doffset, NULL, NULL, 0 };
1110 	struct sec_hw_desc_ptr *ptr;
1111 	int error;
1112 
1113 	SEC_LOCK_ASSERT(sc, descriptors);
1114 
1115 	error = sec_desc_map_dma(sc, &(desc->sd_ptr_dmem[n]), crp, dsize,
1116 	    &sdmi);
1117 
1118 	if (error)
1119 		return (error);
1120 
1121 	sdmi.sdmi_lt_last->sl_lt->shl_r = 1;
1122 	desc->sd_lt_used += sdmi.sdmi_lt_used;
1123 
1124 	ptr = &(desc->sd_desc->shd_pointer[n]);
1125 	ptr->shdp_length = dsize;
1126 	ptr->shdp_extent = 0;
1127 	ptr->shdp_j = 1;
1128 	ptr->shdp_ptr = sdmi.sdmi_lt_first->sl_lt_paddr;
1129 
1130 	return (0);
1131 }
1132 
1133 static bool
1134 sec_cipher_supported(const struct crypto_session_params *csp)
1135 {
1136 
1137 	switch (csp->csp_cipher_alg) {
1138 	case CRYPTO_AES_CBC:
1139 		/* AESU */
1140 		if (csp->csp_ivlen != AES_BLOCK_LEN)
1141 			return (false);
1142 		break;
1143 	default:
1144 		return (false);
1145 	}
1146 
1147 	if (csp->csp_cipher_klen == 0 || csp->csp_cipher_klen > SEC_MAX_KEY_LEN)
1148 		return (false);
1149 
1150 	return (true);
1151 }
1152 
1153 static bool
1154 sec_auth_supported(struct sec_softc *sc,
1155     const struct crypto_session_params *csp)
1156 {
1157 
1158 	switch (csp->csp_auth_alg) {
1159 	case CRYPTO_SHA2_384_HMAC:
1160 	case CRYPTO_SHA2_512_HMAC:
1161 		if (sc->sc_version < 3)
1162 			return (false);
1163 		/* FALLTHROUGH */
1164 	case CRYPTO_SHA1_HMAC:
1165 	case CRYPTO_SHA2_256_HMAC:
1166 		if (csp->csp_auth_klen > SEC_MAX_KEY_LEN)
1167 			return (false);
1168 		break;
1169 	case CRYPTO_SHA1:
1170 		break;
1171 	default:
1172 		return (false);
1173 	}
1174 	return (true);
1175 }
1176 
1177 static int
1178 sec_probesession(device_t dev, const struct crypto_session_params *csp)
1179 {
1180 	struct sec_softc *sc = device_get_softc(dev);
1181 
1182 	if (csp->csp_flags != 0)
1183 		return (EINVAL);
1184 	switch (csp->csp_mode) {
1185 	case CSP_MODE_DIGEST:
1186 		if (!sec_auth_supported(sc, csp))
1187 			return (EINVAL);
1188 		break;
1189 	case CSP_MODE_CIPHER:
1190 		if (!sec_cipher_supported(csp))
1191 			return (EINVAL);
1192 		break;
1193 	case CSP_MODE_ETA:
1194 		if (!sec_auth_supported(sc, csp) || !sec_cipher_supported(csp))
1195 			return (EINVAL);
1196 		break;
1197 	default:
1198 		return (EINVAL);
1199 	}
1200 	return (CRYPTODEV_PROBE_HARDWARE);
1201 }
1202 
1203 static int
1204 sec_newsession(device_t dev, crypto_session_t cses,
1205     const struct crypto_session_params *csp)
1206 {
1207 	struct sec_eu_methods *eu = sec_eus;
1208 	struct sec_session *ses;
1209 
1210 	ses = crypto_get_driver_session(cses);
1211 
1212 	/* Find EU for this session */
1213 	while (eu->sem_make_desc != NULL) {
1214 		if (eu->sem_newsession(csp))
1215 			break;
1216 		eu++;
1217 	}
1218 	KASSERT(eu->sem_make_desc != NULL, ("failed to find eu for session"));
1219 
1220 	/* Save cipher key */
1221 	if (csp->csp_cipher_key != NULL)
1222 		memcpy(ses->ss_key, csp->csp_cipher_key, csp->csp_cipher_klen);
1223 
1224 	/* Save digest key */
1225 	if (csp->csp_auth_key != NULL)
1226 		memcpy(ses->ss_mkey, csp->csp_auth_key, csp->csp_auth_klen);
1227 
1228 	if (csp->csp_auth_alg != 0) {
1229 		if (csp->csp_auth_mlen == 0)
1230 			ses->ss_mlen = crypto_auth_hash(csp)->hashsize;
1231 		else
1232 			ses->ss_mlen = csp->csp_auth_mlen;
1233 	}
1234 
1235 	return (0);
1236 }
1237 
1238 static int
1239 sec_process(device_t dev, struct cryptop *crp, int hint)
1240 {
1241 	struct sec_softc *sc = device_get_softc(dev);
1242 	struct sec_desc *desc = NULL;
1243 	const struct crypto_session_params *csp;
1244 	struct sec_session *ses;
1245 	int error = 0;
1246 
1247 	ses = crypto_get_driver_session(crp->crp_session);
1248 	csp = crypto_get_params(crp->crp_session);
1249 
1250 	/* Check for input length */
1251 	if (crypto_buffer_len(&crp->crp_buf) > SEC_MAX_DMA_BLOCK_SIZE) {
1252 		crp->crp_etype = E2BIG;
1253 		crypto_done(crp);
1254 		return (0);
1255 	}
1256 
1257 	SEC_LOCK(sc, descriptors);
1258 	SEC_DESC_SYNC(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1259 
1260 	/* Block driver if there is no free descriptors or we are going down */
1261 	if (SEC_FREE_DESC_CNT(sc) == 0 || sc->sc_shutdown) {
1262 		sc->sc_blocked |= CRYPTO_SYMQ;
1263 		SEC_UNLOCK(sc, descriptors);
1264 		return (ERESTART);
1265 	}
1266 
1267 	/* Prepare descriptor */
1268 	desc = SEC_GET_FREE_DESC(sc);
1269 	desc->sd_lt_used = 0;
1270 	desc->sd_error = 0;
1271 	desc->sd_crp = crp;
1272 
1273 	if (csp->csp_cipher_alg != 0)
1274 		crypto_read_iv(crp, desc->sd_desc->shd_iv);
1275 
1276 	if (crp->crp_cipher_key != NULL)
1277 		memcpy(ses->ss_key, crp->crp_cipher_key, csp->csp_cipher_klen);
1278 
1279 	if (crp->crp_auth_key != NULL)
1280 		memcpy(ses->ss_mkey, crp->crp_auth_key, csp->csp_auth_klen);
1281 
1282 	memcpy(desc->sd_desc->shd_key, ses->ss_key, csp->csp_cipher_klen);
1283 	memcpy(desc->sd_desc->shd_mkey, ses->ss_mkey, csp->csp_auth_klen);
1284 
1285 	error = ses->ss_eu->sem_make_desc(sc, csp, desc, crp);
1286 
1287 	if (error) {
1288 		SEC_DESC_FREE_POINTERS(desc);
1289 		SEC_DESC_PUT_BACK_LT(sc, desc);
1290 		SEC_PUT_BACK_FREE_DESC(sc);
1291 		SEC_UNLOCK(sc, descriptors);
1292 		crp->crp_etype = error;
1293 		crypto_done(crp);
1294 		return (0);
1295 	}
1296 
1297 	/*
1298 	 * Skip DONE interrupt if this is not last request in burst, but only
1299 	 * if we are running on SEC 3.X. On SEC 2.X we have to enable DONE
1300 	 * signaling on each descriptor.
1301 	 */
1302 	if ((hint & CRYPTO_HINT_MORE) && sc->sc_version == 3)
1303 		desc->sd_desc->shd_dn = 0;
1304 	else
1305 		desc->sd_desc->shd_dn = 1;
1306 
1307 	SEC_DESC_SYNC(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1308 	SEC_DESC_SYNC_POINTERS(desc, BUS_DMASYNC_POSTREAD |
1309 	    BUS_DMASYNC_POSTWRITE);
1310 	SEC_DESC_FREE2READY(sc);
1311 	SEC_UNLOCK(sc, descriptors);
1312 
1313 	/* Enqueue ready descriptors in hardware */
1314 	sec_enqueue(sc);
1315 
1316 	return (0);
1317 }
1318 
1319 static int
1320 sec_build_common_ns_desc(struct sec_softc *sc, struct sec_desc *desc,
1321     const struct crypto_session_params *csp, struct cryptop *crp)
1322 {
1323 	struct sec_hw_desc *hd = desc->sd_desc;
1324 	int error;
1325 
1326 	hd->shd_desc_type = SEC_DT_COMMON_NONSNOOP;
1327 	hd->shd_eu_sel1 = SEC_EU_NONE;
1328 	hd->shd_mode1 = 0;
1329 
1330 	/* Pointer 0: NULL */
1331 	error = sec_make_pointer_direct(sc, desc, 0, 0, 0);
1332 	if (error)
1333 		return (error);
1334 
1335 	/* Pointer 1: IV IN */
1336 	error = sec_make_pointer_direct(sc, desc, 1, desc->sd_desc_paddr +
1337 	    offsetof(struct sec_hw_desc, shd_iv), csp->csp_ivlen);
1338 	if (error)
1339 		return (error);
1340 
1341 	/* Pointer 2: Cipher Key */
1342 	error = sec_make_pointer_direct(sc, desc, 2, desc->sd_desc_paddr +
1343 	    offsetof(struct sec_hw_desc, shd_key), csp->csp_cipher_klen);
1344  	if (error)
1345 		return (error);
1346 
1347 	/* Pointer 3: Data IN */
1348 	error = sec_make_pointer(sc, desc, 3, crp, crp->crp_payload_start,
1349 	    crp->crp_payload_length);
1350 	if (error)
1351 		return (error);
1352 
1353 	/* Pointer 4: Data OUT */
1354 	error = sec_make_pointer(sc, desc, 4, crp, crp->crp_payload_start,
1355 	    crp->crp_payload_length);
1356 	if (error)
1357 		return (error);
1358 
1359 	/* Pointer 5: IV OUT (Not used: NULL) */
1360 	error = sec_make_pointer_direct(sc, desc, 5, 0, 0);
1361 	if (error)
1362 		return (error);
1363 
1364 	/* Pointer 6: NULL */
1365 	error = sec_make_pointer_direct(sc, desc, 6, 0, 0);
1366 
1367 	return (error);
1368 }
1369 
1370 static int
1371 sec_build_common_s_desc(struct sec_softc *sc, struct sec_desc *desc,
1372     const struct crypto_session_params *csp, struct cryptop *crp)
1373 {
1374 	struct sec_hw_desc *hd = desc->sd_desc;
1375 	u_int eu, mode, hashlen;
1376 	int error;
1377 
1378 	error = sec_mdeu_config(csp, &eu, &mode, &hashlen);
1379 	if (error)
1380 		return (error);
1381 
1382 	hd->shd_desc_type = SEC_DT_HMAC_SNOOP;
1383 	hd->shd_eu_sel1 = eu;
1384 	hd->shd_mode1 = mode;
1385 
1386 	/* Pointer 0: HMAC Key */
1387 	error = sec_make_pointer_direct(sc, desc, 0, desc->sd_desc_paddr +
1388 	    offsetof(struct sec_hw_desc, shd_mkey), csp->csp_auth_klen);
1389 	if (error)
1390 		return (error);
1391 
1392 	/* Pointer 1: HMAC-Only Data IN */
1393 	error = sec_make_pointer(sc, desc, 1, crp, crp->crp_aad_start,
1394 	    crp->crp_aad_length);
1395 	if (error)
1396 		return (error);
1397 
1398 	/* Pointer 2: Cipher Key */
1399 	error = sec_make_pointer_direct(sc, desc, 2, desc->sd_desc_paddr +
1400 	    offsetof(struct sec_hw_desc, shd_key), csp->csp_cipher_klen);
1401  	if (error)
1402 		return (error);
1403 
1404 	/* Pointer 3: IV IN */
1405 	error = sec_make_pointer_direct(sc, desc, 3, desc->sd_desc_paddr +
1406 	    offsetof(struct sec_hw_desc, shd_iv), csp->csp_ivlen);
1407 	if (error)
1408 		return (error);
1409 
1410 	/* Pointer 4: Data IN */
1411 	error = sec_make_pointer(sc, desc, 4, crp, crp->crp_payload_start,
1412 	    crp->crp_payload_length);
1413 	if (error)
1414 		return (error);
1415 
1416 	/* Pointer 5: Data OUT */
1417 	error = sec_make_pointer(sc, desc, 5, crp, crp->crp_payload_start,
1418 	    crp->crp_payload_length);
1419 	if (error)
1420 		return (error);
1421 
1422 	/* Pointer 6: HMAC OUT */
1423 	error = sec_make_pointer_direct(sc, desc, 6, desc->sd_desc_paddr +
1424 	    offsetof(struct sec_hw_desc, shd_digest), hashlen);
1425 
1426 	return (error);
1427 }
1428 
1429 /* AESU */
1430 
1431 static bool
1432 sec_aesu_newsession(const struct crypto_session_params *csp)
1433 {
1434 
1435 	return (csp->csp_cipher_alg == CRYPTO_AES_CBC);
1436 }
1437 
1438 static int
1439 sec_aesu_make_desc(struct sec_softc *sc,
1440     const struct crypto_session_params *csp, struct sec_desc *desc,
1441     struct cryptop *crp)
1442 {
1443 	struct sec_hw_desc *hd = desc->sd_desc;
1444 	int error;
1445 
1446 	hd->shd_eu_sel0 = SEC_EU_AESU;
1447 	hd->shd_mode0 = SEC_AESU_MODE_CBC;
1448 
1449 	if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) {
1450 		hd->shd_mode0 |= SEC_AESU_MODE_ED;
1451 		hd->shd_dir = 0;
1452 	} else
1453 		hd->shd_dir = 1;
1454 
1455 	if (csp->csp_mode == CSP_MODE_ETA)
1456 		error = sec_build_common_s_desc(sc, desc, csp, crp);
1457 	else
1458 		error = sec_build_common_ns_desc(sc, desc, csp, crp);
1459 
1460 	return (error);
1461 }
1462 
1463 /* MDEU */
1464 
1465 static bool
1466 sec_mdeu_can_handle(u_int alg)
1467 {
1468 	switch (alg) {
1469 	case CRYPTO_SHA1:
1470 	case CRYPTO_SHA1_HMAC:
1471 	case CRYPTO_SHA2_256_HMAC:
1472 	case CRYPTO_SHA2_384_HMAC:
1473 	case CRYPTO_SHA2_512_HMAC:
1474 		return (true);
1475 	default:
1476 		return (false);
1477 	}
1478 }
1479 
1480 static int
1481 sec_mdeu_config(const struct crypto_session_params *csp, u_int *eu, u_int *mode,
1482     u_int *hashlen)
1483 {
1484 
1485 	*mode = SEC_MDEU_MODE_PD | SEC_MDEU_MODE_INIT;
1486 	*eu = SEC_EU_NONE;
1487 
1488 	switch (csp->csp_auth_alg) {
1489 	case CRYPTO_SHA1_HMAC:
1490 		*mode |= SEC_MDEU_MODE_HMAC;
1491 		/* FALLTHROUGH */
1492 	case CRYPTO_SHA1:
1493 		*eu = SEC_EU_MDEU_A;
1494 		*mode |= SEC_MDEU_MODE_SHA1;
1495 		*hashlen = SHA1_HASH_LEN;
1496 		break;
1497 	case CRYPTO_SHA2_256_HMAC:
1498 		*mode |= SEC_MDEU_MODE_HMAC | SEC_MDEU_MODE_SHA256;
1499 		*eu = SEC_EU_MDEU_A;
1500 		break;
1501 	case CRYPTO_SHA2_384_HMAC:
1502 		*mode |= SEC_MDEU_MODE_HMAC | SEC_MDEU_MODE_SHA384;
1503 		*eu = SEC_EU_MDEU_B;
1504 		break;
1505 	case CRYPTO_SHA2_512_HMAC:
1506 		*mode |= SEC_MDEU_MODE_HMAC | SEC_MDEU_MODE_SHA512;
1507 		*eu = SEC_EU_MDEU_B;
1508 		break;
1509 	default:
1510 		return (EINVAL);
1511 	}
1512 
1513 	if (*mode & SEC_MDEU_MODE_HMAC)
1514 		*hashlen = SEC_HMAC_HASH_LEN;
1515 
1516 	return (0);
1517 }
1518 
1519 static bool
1520 sec_mdeu_newsession(const struct crypto_session_params *csp)
1521 {
1522 
1523 	return (sec_mdeu_can_handle(csp->csp_auth_alg));
1524 }
1525 
1526 static int
1527 sec_mdeu_make_desc(struct sec_softc *sc,
1528     const struct crypto_session_params *csp,
1529     struct sec_desc *desc, struct cryptop *crp)
1530 {
1531 	struct sec_hw_desc *hd = desc->sd_desc;
1532 	u_int eu, mode, hashlen;
1533 	int error;
1534 
1535 	error = sec_mdeu_config(csp, &eu, &mode, &hashlen);
1536 	if (error)
1537 		return (error);
1538 
1539 	hd->shd_desc_type = SEC_DT_COMMON_NONSNOOP;
1540 	hd->shd_eu_sel0 = eu;
1541 	hd->shd_mode0 = mode;
1542 	hd->shd_eu_sel1 = SEC_EU_NONE;
1543 	hd->shd_mode1 = 0;
1544 
1545 	/* Pointer 0: NULL */
1546 	error = sec_make_pointer_direct(sc, desc, 0, 0, 0);
1547 	if (error)
1548 		return (error);
1549 
1550 	/* Pointer 1: Context In (Not used: NULL) */
1551 	error = sec_make_pointer_direct(sc, desc, 1, 0, 0);
1552 	if (error)
1553 		return (error);
1554 
1555 	/* Pointer 2: HMAC Key (or NULL, depending on digest type) */
1556 	if (hd->shd_mode0 & SEC_MDEU_MODE_HMAC)
1557 		error = sec_make_pointer_direct(sc, desc, 2,
1558 		    desc->sd_desc_paddr + offsetof(struct sec_hw_desc,
1559 		    shd_mkey), csp->csp_auth_klen);
1560 	else
1561 		error = sec_make_pointer_direct(sc, desc, 2, 0, 0);
1562 
1563 	if (error)
1564 		return (error);
1565 
1566 	/* Pointer 3: Input Data */
1567 	error = sec_make_pointer(sc, desc, 3, crp, crp->crp_payload_start,
1568 	    crp->crp_payload_length);
1569 	if (error)
1570 		return (error);
1571 
1572 	/* Pointer 4: NULL */
1573 	error = sec_make_pointer_direct(sc, desc, 4, 0, 0);
1574 	if (error)
1575 		return (error);
1576 
1577 	/* Pointer 5: Hash out */
1578 	error = sec_make_pointer_direct(sc, desc, 5, desc->sd_desc_paddr +
1579 	    offsetof(struct sec_hw_desc, shd_digest), hashlen);
1580 	if (error)
1581 		return (error);
1582 
1583 	/* Pointer 6: NULL */
1584 	error = sec_make_pointer_direct(sc, desc, 6, 0, 0);
1585 
1586 	return (0);
1587 }
1588