xref: /freebsd/sys/dev/sec/sec.c (revision cab6a39d7b343596a5823e65c0f7b426551ec22d)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (C) 2008-2009 Semihalf, Piotr Ziecik
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
19  * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
20  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
21  * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
22  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
23  * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
24  * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
25  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26  */
27 
28 /*
29  * Freescale integrated Security Engine (SEC) driver. Currently SEC 2.0 and
30  * 3.0 are supported.
31  */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/bus.h>
39 #include <sys/endian.h>
40 #include <sys/kernel.h>
41 #include <sys/lock.h>
42 #include <sys/malloc.h>
43 #include <sys/mbuf.h>
44 #include <sys/module.h>
45 #include <sys/mutex.h>
46 #include <sys/random.h>
47 #include <sys/rman.h>
48 
49 #include <machine/_inttypes.h>
50 #include <machine/bus.h>
51 #include <machine/resource.h>
52 
53 #include <opencrypto/cryptodev.h>
54 #include <opencrypto/xform_auth.h>
55 #include "cryptodev_if.h"
56 
57 #include <dev/ofw/ofw_bus_subr.h>
58 #include <dev/sec/sec.h>
59 
60 static int	sec_probe(device_t dev);
61 static int	sec_attach(device_t dev);
62 static int	sec_detach(device_t dev);
63 static int	sec_suspend(device_t dev);
64 static int	sec_resume(device_t dev);
65 static int	sec_shutdown(device_t dev);
66 static void	sec_primary_intr(void *arg);
67 static void	sec_secondary_intr(void *arg);
68 static int	sec_setup_intr(struct sec_softc *sc, struct resource **ires,
69     void **ihand, int *irid, driver_intr_t handler, const char *iname);
70 static void	sec_release_intr(struct sec_softc *sc, struct resource *ires,
71     void *ihand, int irid, const char *iname);
72 static int	sec_controller_reset(struct sec_softc *sc);
73 static int	sec_channel_reset(struct sec_softc *sc, int channel, int full);
74 static int	sec_init(struct sec_softc *sc);
75 static int	sec_alloc_dma_mem(struct sec_softc *sc,
76     struct sec_dma_mem *dma_mem, bus_size_t size);
77 static int	sec_desc_map_dma(struct sec_softc *sc,
78     struct sec_dma_mem *dma_mem, struct cryptop *crp, bus_size_t size,
79     struct sec_desc_map_info *sdmi);
80 static void	sec_free_dma_mem(struct sec_dma_mem *dma_mem);
81 static void	sec_enqueue(struct sec_softc *sc);
82 static int	sec_enqueue_desc(struct sec_softc *sc, struct sec_desc *desc,
83     int channel);
84 static int	sec_eu_channel(struct sec_softc *sc, int eu);
85 static int	sec_make_pointer(struct sec_softc *sc, struct sec_desc *desc,
86     u_int n, struct cryptop *crp, bus_size_t doffset, bus_size_t dsize);
87 static int	sec_make_pointer_direct(struct sec_softc *sc,
88     struct sec_desc *desc, u_int n, bus_addr_t data, bus_size_t dsize);
89 static int	sec_probesession(device_t dev,
90     const struct crypto_session_params *csp);
91 static int	sec_newsession(device_t dev, crypto_session_t cses,
92     const struct crypto_session_params *csp);
93 static int	sec_process(device_t dev, struct cryptop *crp, int hint);
94 static int	sec_build_common_ns_desc(struct sec_softc *sc,
95     struct sec_desc *desc, const struct crypto_session_params *csp,
96     struct cryptop *crp);
97 static int	sec_build_common_s_desc(struct sec_softc *sc,
98     struct sec_desc *desc, const struct crypto_session_params *csp,
99     struct cryptop *crp);
100 
101 static struct sec_desc *sec_find_desc(struct sec_softc *sc, bus_addr_t paddr);
102 
103 /* AESU */
104 static bool	sec_aesu_newsession(const struct crypto_session_params *csp);
105 static int	sec_aesu_make_desc(struct sec_softc *sc,
106     const struct crypto_session_params *csp, struct sec_desc *desc,
107     struct cryptop *crp);
108 
109 /* MDEU */
110 static bool	sec_mdeu_can_handle(u_int alg);
111 static int	sec_mdeu_config(const struct crypto_session_params *csp,
112     u_int *eu, u_int *mode, u_int *hashlen);
113 static bool	sec_mdeu_newsession(const struct crypto_session_params *csp);
114 static int	sec_mdeu_make_desc(struct sec_softc *sc,
115     const struct crypto_session_params *csp, struct sec_desc *desc,
116     struct cryptop *crp);
117 
118 static device_method_t sec_methods[] = {
119 	/* Device interface */
120 	DEVMETHOD(device_probe,		sec_probe),
121 	DEVMETHOD(device_attach,	sec_attach),
122 	DEVMETHOD(device_detach,	sec_detach),
123 
124 	DEVMETHOD(device_suspend,	sec_suspend),
125 	DEVMETHOD(device_resume,	sec_resume),
126 	DEVMETHOD(device_shutdown,	sec_shutdown),
127 
128 	/* Crypto methods */
129 	DEVMETHOD(cryptodev_probesession, sec_probesession),
130 	DEVMETHOD(cryptodev_newsession,	sec_newsession),
131 	DEVMETHOD(cryptodev_process,	sec_process),
132 
133 	DEVMETHOD_END
134 };
135 static driver_t sec_driver = {
136 	"sec",
137 	sec_methods,
138 	sizeof(struct sec_softc),
139 };
140 
141 static devclass_t sec_devclass;
142 DRIVER_MODULE(sec, simplebus, sec_driver, sec_devclass, 0, 0);
143 MODULE_DEPEND(sec, crypto, 1, 1, 1);
144 
145 static struct sec_eu_methods sec_eus[] = {
146 	{
147 		sec_aesu_newsession,
148 		sec_aesu_make_desc,
149 	},
150 	{
151 		sec_mdeu_newsession,
152 		sec_mdeu_make_desc,
153 	},
154 	{ NULL, NULL }
155 };
156 
157 static inline void
158 sec_sync_dma_mem(struct sec_dma_mem *dma_mem, bus_dmasync_op_t op)
159 {
160 
161 	/* Sync only if dma memory is valid */
162 	if (dma_mem->dma_vaddr != NULL)
163 		bus_dmamap_sync(dma_mem->dma_tag, dma_mem->dma_map, op);
164 }
165 
166 static inline void *
167 sec_get_pointer_data(struct sec_desc *desc, u_int n)
168 {
169 
170 	return (desc->sd_ptr_dmem[n].dma_vaddr);
171 }
172 
173 static int
174 sec_probe(device_t dev)
175 {
176 	struct sec_softc *sc;
177 	uint64_t id;
178 
179 	if (!ofw_bus_status_okay(dev))
180 		return (ENXIO);
181 
182 	if (!ofw_bus_is_compatible(dev, "fsl,sec2.0"))
183 		return (ENXIO);
184 
185 	sc = device_get_softc(dev);
186 
187 	sc->sc_rrid = 0;
188 	sc->sc_rres = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->sc_rrid,
189 	    RF_ACTIVE);
190 
191 	if (sc->sc_rres == NULL)
192 		return (ENXIO);
193 
194 	sc->sc_bas.bsh = rman_get_bushandle(sc->sc_rres);
195 	sc->sc_bas.bst = rman_get_bustag(sc->sc_rres);
196 
197 	id = SEC_READ(sc, SEC_ID);
198 
199 	bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_rrid, sc->sc_rres);
200 
201 	switch (id) {
202 	case SEC_20_ID:
203 		device_set_desc(dev, "Freescale Security Engine 2.0");
204 		sc->sc_version = 2;
205 		break;
206 	case SEC_30_ID:
207 		device_set_desc(dev, "Freescale Security Engine 3.0");
208 		sc->sc_version = 3;
209 		break;
210 	case SEC_31_ID:
211 		device_set_desc(dev, "Freescale Security Engine 3.1");
212 		sc->sc_version = 3;
213 		break;
214 	default:
215 		device_printf(dev, "unknown SEC ID 0x%016"PRIx64"!\n", id);
216 		return (ENXIO);
217 	}
218 
219 	return (0);
220 }
221 
222 static int
223 sec_attach(device_t dev)
224 {
225 	struct sec_softc *sc;
226 	struct sec_hw_lt *lt;
227 	int error = 0;
228 	int i;
229 
230 	sc = device_get_softc(dev);
231 	sc->sc_dev = dev;
232 	sc->sc_blocked = 0;
233 	sc->sc_shutdown = 0;
234 
235 	sc->sc_cid = crypto_get_driverid(dev, sizeof(struct sec_session),
236 	    CRYPTOCAP_F_HARDWARE);
237 	if (sc->sc_cid < 0) {
238 		device_printf(dev, "could not get crypto driver ID!\n");
239 		return (ENXIO);
240 	}
241 
242 	/* Init locks */
243 	mtx_init(&sc->sc_controller_lock, device_get_nameunit(dev),
244 	    "SEC Controller lock", MTX_DEF);
245 	mtx_init(&sc->sc_descriptors_lock, device_get_nameunit(dev),
246 	    "SEC Descriptors lock", MTX_DEF);
247 
248 	/* Allocate I/O memory for SEC registers */
249 	sc->sc_rrid = 0;
250 	sc->sc_rres = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->sc_rrid,
251 	    RF_ACTIVE);
252 
253 	if (sc->sc_rres == NULL) {
254 		device_printf(dev, "could not allocate I/O memory!\n");
255 		goto fail1;
256 	}
257 
258 	sc->sc_bas.bsh = rman_get_bushandle(sc->sc_rres);
259 	sc->sc_bas.bst = rman_get_bustag(sc->sc_rres);
260 
261 	/* Setup interrupts */
262 	sc->sc_pri_irid = 0;
263 	error = sec_setup_intr(sc, &sc->sc_pri_ires, &sc->sc_pri_ihand,
264 	    &sc->sc_pri_irid, sec_primary_intr, "primary");
265 
266 	if (error)
267 		goto fail2;
268 
269 	if (sc->sc_version == 3) {
270 		sc->sc_sec_irid = 1;
271 		error = sec_setup_intr(sc, &sc->sc_sec_ires, &sc->sc_sec_ihand,
272 		    &sc->sc_sec_irid, sec_secondary_intr, "secondary");
273 
274 		if (error)
275 			goto fail3;
276 	}
277 
278 	/* Alloc DMA memory for descriptors and link tables */
279 	error = sec_alloc_dma_mem(sc, &(sc->sc_desc_dmem),
280 	    SEC_DESCRIPTORS * sizeof(struct sec_hw_desc));
281 
282 	if (error)
283 		goto fail4;
284 
285 	error = sec_alloc_dma_mem(sc, &(sc->sc_lt_dmem),
286 	    (SEC_LT_ENTRIES + 1) * sizeof(struct sec_hw_lt));
287 
288 	if (error)
289 		goto fail5;
290 
291 	/* Fill in descriptors and link tables */
292 	for (i = 0; i < SEC_DESCRIPTORS; i++) {
293 		sc->sc_desc[i].sd_desc =
294 		    (struct sec_hw_desc*)(sc->sc_desc_dmem.dma_vaddr) + i;
295 		sc->sc_desc[i].sd_desc_paddr = sc->sc_desc_dmem.dma_paddr +
296 		    (i * sizeof(struct sec_hw_desc));
297 	}
298 
299 	for (i = 0; i < SEC_LT_ENTRIES + 1; i++) {
300 		sc->sc_lt[i].sl_lt =
301 		    (struct sec_hw_lt*)(sc->sc_lt_dmem.dma_vaddr) + i;
302 		sc->sc_lt[i].sl_lt_paddr = sc->sc_lt_dmem.dma_paddr +
303 		    (i * sizeof(struct sec_hw_lt));
304 	}
305 
306 	/* Last entry in link table is used to create a circle */
307 	lt = sc->sc_lt[SEC_LT_ENTRIES].sl_lt;
308 	lt->shl_length = 0;
309 	lt->shl_r = 0;
310 	lt->shl_n = 1;
311 	lt->shl_ptr = sc->sc_lt[0].sl_lt_paddr;
312 
313 	/* Init descriptor and link table queues pointers */
314 	SEC_CNT_INIT(sc, sc_free_desc_get_cnt, SEC_DESCRIPTORS);
315 	SEC_CNT_INIT(sc, sc_free_desc_put_cnt, SEC_DESCRIPTORS);
316 	SEC_CNT_INIT(sc, sc_ready_desc_get_cnt, SEC_DESCRIPTORS);
317 	SEC_CNT_INIT(sc, sc_ready_desc_put_cnt, SEC_DESCRIPTORS);
318 	SEC_CNT_INIT(sc, sc_queued_desc_get_cnt, SEC_DESCRIPTORS);
319 	SEC_CNT_INIT(sc, sc_queued_desc_put_cnt, SEC_DESCRIPTORS);
320 	SEC_CNT_INIT(sc, sc_lt_alloc_cnt, SEC_LT_ENTRIES);
321 	SEC_CNT_INIT(sc, sc_lt_free_cnt, SEC_LT_ENTRIES);
322 
323 	/* Create masks for fast checks */
324 	sc->sc_int_error_mask = 0;
325 	for (i = 0; i < SEC_CHANNELS; i++)
326 		sc->sc_int_error_mask |= (~0ULL & SEC_INT_CH_ERR(i));
327 
328 	switch (sc->sc_version) {
329 	case 2:
330 		sc->sc_channel_idle_mask =
331 		    (SEC_CHAN_CSR2_FFLVL_M << SEC_CHAN_CSR2_FFLVL_S) |
332 		    (SEC_CHAN_CSR2_MSTATE_M << SEC_CHAN_CSR2_MSTATE_S) |
333 		    (SEC_CHAN_CSR2_PSTATE_M << SEC_CHAN_CSR2_PSTATE_S) |
334 		    (SEC_CHAN_CSR2_GSTATE_M << SEC_CHAN_CSR2_GSTATE_S);
335 		break;
336 	case 3:
337 		sc->sc_channel_idle_mask =
338 		    (SEC_CHAN_CSR3_FFLVL_M << SEC_CHAN_CSR3_FFLVL_S) |
339 		    (SEC_CHAN_CSR3_MSTATE_M << SEC_CHAN_CSR3_MSTATE_S) |
340 		    (SEC_CHAN_CSR3_PSTATE_M << SEC_CHAN_CSR3_PSTATE_S) |
341 		    (SEC_CHAN_CSR3_GSTATE_M << SEC_CHAN_CSR3_GSTATE_S);
342 		break;
343 	}
344 
345 	/* Init hardware */
346 	error = sec_init(sc);
347 
348 	if (error)
349 		goto fail6;
350 
351 	return (0);
352 
353 fail6:
354 	sec_free_dma_mem(&(sc->sc_lt_dmem));
355 fail5:
356 	sec_free_dma_mem(&(sc->sc_desc_dmem));
357 fail4:
358 	sec_release_intr(sc, sc->sc_sec_ires, sc->sc_sec_ihand,
359 	    sc->sc_sec_irid, "secondary");
360 fail3:
361 	sec_release_intr(sc, sc->sc_pri_ires, sc->sc_pri_ihand,
362 	    sc->sc_pri_irid, "primary");
363 fail2:
364 	bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_rrid, sc->sc_rres);
365 fail1:
366 	mtx_destroy(&sc->sc_controller_lock);
367 	mtx_destroy(&sc->sc_descriptors_lock);
368 
369 	return (ENXIO);
370 }
371 
372 static int
373 sec_detach(device_t dev)
374 {
375 	struct sec_softc *sc = device_get_softc(dev);
376 	int i, error, timeout = SEC_TIMEOUT;
377 
378 	/* Prepare driver to shutdown */
379 	SEC_LOCK(sc, descriptors);
380 	sc->sc_shutdown = 1;
381 	SEC_UNLOCK(sc, descriptors);
382 
383 	/* Wait until all queued processing finishes */
384 	while (1) {
385 		SEC_LOCK(sc, descriptors);
386 		i = SEC_READY_DESC_CNT(sc) + SEC_QUEUED_DESC_CNT(sc);
387 		SEC_UNLOCK(sc, descriptors);
388 
389 		if (i == 0)
390 			break;
391 
392 		if (timeout < 0) {
393 			device_printf(dev, "queue flush timeout!\n");
394 
395 			/* DMA can be still active - stop it */
396 			for (i = 0; i < SEC_CHANNELS; i++)
397 				sec_channel_reset(sc, i, 1);
398 
399 			break;
400 		}
401 
402 		timeout -= 1000;
403 		DELAY(1000);
404 	}
405 
406 	/* Disable interrupts */
407 	SEC_WRITE(sc, SEC_IER, 0);
408 
409 	/* Unregister from OCF */
410 	crypto_unregister_all(sc->sc_cid);
411 
412 	/* Free DMA memory */
413 	for (i = 0; i < SEC_DESCRIPTORS; i++)
414 		SEC_DESC_FREE_POINTERS(&(sc->sc_desc[i]));
415 
416 	sec_free_dma_mem(&(sc->sc_lt_dmem));
417 	sec_free_dma_mem(&(sc->sc_desc_dmem));
418 
419 	/* Release interrupts */
420 	sec_release_intr(sc, sc->sc_pri_ires, sc->sc_pri_ihand,
421 	    sc->sc_pri_irid, "primary");
422 	sec_release_intr(sc, sc->sc_sec_ires, sc->sc_sec_ihand,
423 	    sc->sc_sec_irid, "secondary");
424 
425 	/* Release memory */
426 	if (sc->sc_rres) {
427 		error = bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_rrid,
428 		    sc->sc_rres);
429 		if (error)
430 			device_printf(dev, "bus_release_resource() failed for"
431 			    " I/O memory, error %d\n", error);
432 
433 		sc->sc_rres = NULL;
434 	}
435 
436 	mtx_destroy(&sc->sc_controller_lock);
437 	mtx_destroy(&sc->sc_descriptors_lock);
438 
439 	return (0);
440 }
441 
442 static int
443 sec_suspend(device_t dev)
444 {
445 
446 	return (0);
447 }
448 
449 static int
450 sec_resume(device_t dev)
451 {
452 
453 	return (0);
454 }
455 
456 static int
457 sec_shutdown(device_t dev)
458 {
459 
460 	return (0);
461 }
462 
463 static int
464 sec_setup_intr(struct sec_softc *sc, struct resource **ires, void **ihand,
465     int *irid, driver_intr_t handler, const char *iname)
466 {
467 	int error;
468 
469 	(*ires) = bus_alloc_resource_any(sc->sc_dev, SYS_RES_IRQ, irid,
470 	    RF_ACTIVE);
471 
472 	if ((*ires) == NULL) {
473 		device_printf(sc->sc_dev, "could not allocate %s IRQ\n", iname);
474 		return (ENXIO);
475 	}
476 
477 	error = bus_setup_intr(sc->sc_dev, *ires, INTR_MPSAFE | INTR_TYPE_NET,
478 	    NULL, handler, sc, ihand);
479 
480 	if (error) {
481 		device_printf(sc->sc_dev, "failed to set up %s IRQ\n", iname);
482 		if (bus_release_resource(sc->sc_dev, SYS_RES_IRQ, *irid, *ires))
483 			device_printf(sc->sc_dev, "could not release %s IRQ\n",
484 			    iname);
485 
486 		(*ires) = NULL;
487 		return (error);
488 	}
489 
490 	return (0);
491 }
492 
493 static void
494 sec_release_intr(struct sec_softc *sc, struct resource *ires, void *ihand,
495     int irid, const char *iname)
496 {
497 	int error;
498 
499 	if (ires == NULL)
500 		return;
501 
502 	error = bus_teardown_intr(sc->sc_dev, ires, ihand);
503 	if (error)
504 		device_printf(sc->sc_dev, "bus_teardown_intr() failed for %s"
505 		    " IRQ, error %d\n", iname, error);
506 
507 	error = bus_release_resource(sc->sc_dev, SYS_RES_IRQ, irid, ires);
508 	if (error)
509 		device_printf(sc->sc_dev, "bus_release_resource() failed for %s"
510 		    " IRQ, error %d\n", iname, error);
511 }
512 
513 static void
514 sec_primary_intr(void *arg)
515 {
516 	struct sec_session *ses;
517 	struct sec_softc *sc = arg;
518 	struct sec_desc *desc;
519 	struct cryptop *crp;
520 	uint64_t isr;
521 	uint8_t hash[HASH_MAX_LEN];
522 	int i, wakeup = 0;
523 
524 	SEC_LOCK(sc, controller);
525 
526 	/* Check for errors */
527 	isr = SEC_READ(sc, SEC_ISR);
528 	if (isr & sc->sc_int_error_mask) {
529 		/* Check each channel for error */
530 		for (i = 0; i < SEC_CHANNELS; i++) {
531 			if ((isr & SEC_INT_CH_ERR(i)) == 0)
532 				continue;
533 
534 			device_printf(sc->sc_dev,
535 			    "I/O error on channel %i!\n", i);
536 
537 			/* Find and mark problematic descriptor */
538 			desc = sec_find_desc(sc, SEC_READ(sc,
539 			    SEC_CHAN_CDPR(i)));
540 
541 			if (desc != NULL)
542 				desc->sd_error = EIO;
543 
544 			/* Do partial channel reset */
545 			sec_channel_reset(sc, i, 0);
546 		}
547 	}
548 
549 	/* ACK interrupt */
550 	SEC_WRITE(sc, SEC_ICR, 0xFFFFFFFFFFFFFFFFULL);
551 
552 	SEC_UNLOCK(sc, controller);
553 	SEC_LOCK(sc, descriptors);
554 
555 	/* Handle processed descriptors */
556 	SEC_DESC_SYNC(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
557 
558 	while (SEC_QUEUED_DESC_CNT(sc) > 0) {
559 		desc = SEC_GET_QUEUED_DESC(sc);
560 
561 		if (desc->sd_desc->shd_done != 0xFF && desc->sd_error == 0) {
562 			SEC_PUT_BACK_QUEUED_DESC(sc);
563 			break;
564 		}
565 
566 		SEC_DESC_SYNC_POINTERS(desc, BUS_DMASYNC_PREREAD |
567 		    BUS_DMASYNC_PREWRITE);
568 
569 		crp = desc->sd_crp;
570 		crp->crp_etype = desc->sd_error;
571 		if (crp->crp_etype == 0) {
572 			ses = crypto_get_driver_session(crp->crp_session);
573 			if (ses->ss_mlen != 0) {
574 				if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) {
575 					crypto_copydata(crp,
576 					    crp->crp_digest_start,
577 					    ses->ss_mlen, hash);
578 					if (timingsafe_bcmp(
579 					    desc->sd_desc->shd_digest,
580 					    hash, ses->ss_mlen) != 0)
581 						crp->crp_etype = EBADMSG;
582 				} else
583 					crypto_copyback(crp,
584 					    crp->crp_digest_start,
585 					    ses->ss_mlen,
586 					    desc->sd_desc->shd_digest);
587 			}
588 		}
589 		crypto_done(desc->sd_crp);
590 
591 		SEC_DESC_FREE_POINTERS(desc);
592 		SEC_DESC_FREE_LT(sc, desc);
593 		SEC_DESC_QUEUED2FREE(sc);
594 	}
595 
596 	SEC_DESC_SYNC(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
597 
598 	if (!sc->sc_shutdown) {
599 		wakeup = sc->sc_blocked;
600 		sc->sc_blocked = 0;
601 	}
602 
603 	SEC_UNLOCK(sc, descriptors);
604 
605 	/* Enqueue ready descriptors in hardware */
606 	sec_enqueue(sc);
607 
608 	if (wakeup)
609 		crypto_unblock(sc->sc_cid, wakeup);
610 }
611 
612 static void
613 sec_secondary_intr(void *arg)
614 {
615 	struct sec_softc *sc = arg;
616 
617 	device_printf(sc->sc_dev, "spurious secondary interrupt!\n");
618 	sec_primary_intr(arg);
619 }
620 
621 static int
622 sec_controller_reset(struct sec_softc *sc)
623 {
624 	int timeout = SEC_TIMEOUT;
625 
626 	/* Reset Controller */
627 	SEC_WRITE(sc, SEC_MCR, SEC_MCR_SWR);
628 
629 	while (SEC_READ(sc, SEC_MCR) & SEC_MCR_SWR) {
630 		DELAY(1000);
631 		timeout -= 1000;
632 
633 		if (timeout < 0) {
634 			device_printf(sc->sc_dev, "timeout while waiting for "
635 			    "device reset!\n");
636 			return (ETIMEDOUT);
637 		}
638 	}
639 
640 	return (0);
641 }
642 
643 static int
644 sec_channel_reset(struct sec_softc *sc, int channel, int full)
645 {
646 	int timeout = SEC_TIMEOUT;
647 	uint64_t bit = (full) ? SEC_CHAN_CCR_R : SEC_CHAN_CCR_CON;
648 	uint64_t reg;
649 
650 	/* Reset Channel */
651 	reg = SEC_READ(sc, SEC_CHAN_CCR(channel));
652 	SEC_WRITE(sc, SEC_CHAN_CCR(channel), reg | bit);
653 
654 	while (SEC_READ(sc, SEC_CHAN_CCR(channel)) & bit) {
655 		DELAY(1000);
656 		timeout -= 1000;
657 
658 		if (timeout < 0) {
659 			device_printf(sc->sc_dev, "timeout while waiting for "
660 			    "channel reset!\n");
661 			return (ETIMEDOUT);
662 		}
663 	}
664 
665 	if (full) {
666 		reg = SEC_CHAN_CCR_CDIE | SEC_CHAN_CCR_NT | SEC_CHAN_CCR_BS;
667 
668 		switch(sc->sc_version) {
669 		case 2:
670 			reg |= SEC_CHAN_CCR_CDWE;
671 			break;
672 		case 3:
673 			reg |= SEC_CHAN_CCR_AWSE | SEC_CHAN_CCR_WGN;
674 			break;
675 		}
676 
677 		SEC_WRITE(sc, SEC_CHAN_CCR(channel), reg);
678 	}
679 
680 	return (0);
681 }
682 
683 static int
684 sec_init(struct sec_softc *sc)
685 {
686 	uint64_t reg;
687 	int error, i;
688 
689 	/* Reset controller twice to clear all pending interrupts */
690 	error = sec_controller_reset(sc);
691 	if (error)
692 		return (error);
693 
694 	error = sec_controller_reset(sc);
695 	if (error)
696 		return (error);
697 
698 	/* Reset channels */
699 	for (i = 0; i < SEC_CHANNELS; i++) {
700 		error = sec_channel_reset(sc, i, 1);
701 		if (error)
702 			return (error);
703 	}
704 
705 	/* Enable Interrupts */
706 	reg = SEC_INT_ITO;
707 	for (i = 0; i < SEC_CHANNELS; i++)
708 		reg |= SEC_INT_CH_DN(i) | SEC_INT_CH_ERR(i);
709 
710 	SEC_WRITE(sc, SEC_IER, reg);
711 
712 	return (error);
713 }
714 
715 static void
716 sec_alloc_dma_mem_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
717 {
718 	struct sec_dma_mem *dma_mem = arg;
719 
720 	if (error)
721 		return;
722 
723 	KASSERT(nseg == 1, ("Wrong number of segments, should be 1"));
724 	dma_mem->dma_paddr = segs->ds_addr;
725 }
726 
727 static void
728 sec_dma_map_desc_cb(void *arg, bus_dma_segment_t *segs, int nseg,
729     int error)
730 {
731 	struct sec_desc_map_info *sdmi = arg;
732 	struct sec_softc *sc = sdmi->sdmi_sc;
733 	struct sec_lt *lt = NULL;
734 	bus_addr_t addr;
735 	bus_size_t size;
736 	int i;
737 
738 	SEC_LOCK_ASSERT(sc, descriptors);
739 
740 	if (error)
741 		return;
742 
743 	for (i = 0; i < nseg; i++) {
744 		addr = segs[i].ds_addr;
745 		size = segs[i].ds_len;
746 
747 		/* Skip requested offset */
748 		if (sdmi->sdmi_offset >= size) {
749 			sdmi->sdmi_offset -= size;
750 			continue;
751 		}
752 
753 		addr += sdmi->sdmi_offset;
754 		size -= sdmi->sdmi_offset;
755 		sdmi->sdmi_offset = 0;
756 
757 		/* Do not link more than requested */
758 		if (sdmi->sdmi_size < size)
759 			size = sdmi->sdmi_size;
760 
761 		lt = SEC_ALLOC_LT_ENTRY(sc);
762 		lt->sl_lt->shl_length = size;
763 		lt->sl_lt->shl_r = 0;
764 		lt->sl_lt->shl_n = 0;
765 		lt->sl_lt->shl_ptr = addr;
766 
767 		if (sdmi->sdmi_lt_first == NULL)
768 			sdmi->sdmi_lt_first = lt;
769 
770 		sdmi->sdmi_lt_used += 1;
771 
772 		if ((sdmi->sdmi_size -= size) == 0)
773 			break;
774 	}
775 
776 	sdmi->sdmi_lt_last = lt;
777 }
778 
779 static int
780 sec_alloc_dma_mem(struct sec_softc *sc, struct sec_dma_mem *dma_mem,
781     bus_size_t size)
782 {
783 	int error;
784 
785 	if (dma_mem->dma_vaddr != NULL)
786 		return (EBUSY);
787 
788 	error = bus_dma_tag_create(NULL,	/* parent */
789 		SEC_DMA_ALIGNMENT, 0,		/* alignment, boundary */
790 		BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
791 		BUS_SPACE_MAXADDR,		/* highaddr */
792 		NULL, NULL,			/* filtfunc, filtfuncarg */
793 		size, 1,			/* maxsize, nsegments */
794 		size, 0,			/* maxsegsz, flags */
795 		NULL, NULL,			/* lockfunc, lockfuncarg */
796 		&(dma_mem->dma_tag));		/* dmat */
797 
798 	if (error) {
799 		device_printf(sc->sc_dev, "failed to allocate busdma tag, error"
800 		    " %i!\n", error);
801 		goto err1;
802 	}
803 
804 	error = bus_dmamem_alloc(dma_mem->dma_tag, &(dma_mem->dma_vaddr),
805 	    BUS_DMA_NOWAIT | BUS_DMA_ZERO, &(dma_mem->dma_map));
806 
807 	if (error) {
808 		device_printf(sc->sc_dev, "failed to allocate DMA safe"
809 		    " memory, error %i!\n", error);
810 		goto err2;
811 	}
812 
813 	error = bus_dmamap_load(dma_mem->dma_tag, dma_mem->dma_map,
814 		    dma_mem->dma_vaddr, size, sec_alloc_dma_mem_cb, dma_mem,
815 		    BUS_DMA_NOWAIT);
816 
817 	if (error) {
818 		device_printf(sc->sc_dev, "cannot get address of the DMA"
819 		    " memory, error %i\n", error);
820 		goto err3;
821 	}
822 
823 	dma_mem->dma_is_map = 0;
824 	return (0);
825 
826 err3:
827 	bus_dmamem_free(dma_mem->dma_tag, dma_mem->dma_vaddr, dma_mem->dma_map);
828 err2:
829 	bus_dma_tag_destroy(dma_mem->dma_tag);
830 err1:
831 	dma_mem->dma_vaddr = NULL;
832 	return(error);
833 }
834 
835 static int
836 sec_desc_map_dma(struct sec_softc *sc, struct sec_dma_mem *dma_mem,
837     struct cryptop *crp, bus_size_t size, struct sec_desc_map_info *sdmi)
838 {
839 	int error;
840 
841 	if (dma_mem->dma_vaddr != NULL)
842 		return (EBUSY);
843 
844 	switch (crp->crp_buf.cb_type) {
845 	case CRYPTO_BUF_CONTIG:
846 		break;
847 	case CRYPTO_BUF_UIO:
848 		size = SEC_FREE_LT_CNT(sc) * SEC_MAX_DMA_BLOCK_SIZE;
849 		break;
850 	case CRYPTO_BUF_MBUF:
851 		size = m_length(crp->crp_buf.cb_mbuf, NULL);
852 		break;
853 	case CRYPTO_BUF_SINGLE_MBUF:
854 		size = crp->crp_buf.cb_mbuf->m_len;
855 		break;
856 	case CRYPTO_BUF_VMPAGE:
857 		size = PAGE_SIZE - crp->crp_buf.cb_vm_page_offset;
858 		break;
859 	default:
860 		return (EINVAL);
861 	}
862 
863 	error = bus_dma_tag_create(NULL,	/* parent */
864 		SEC_DMA_ALIGNMENT, 0,		/* alignment, boundary */
865 		BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
866 		BUS_SPACE_MAXADDR,		/* highaddr */
867 		NULL, NULL,			/* filtfunc, filtfuncarg */
868 		size,				/* maxsize */
869 		SEC_FREE_LT_CNT(sc),		/* nsegments */
870 		SEC_MAX_DMA_BLOCK_SIZE, 0,	/* maxsegsz, flags */
871 		NULL, NULL,			/* lockfunc, lockfuncarg */
872 		&(dma_mem->dma_tag));		/* dmat */
873 
874 	if (error) {
875 		device_printf(sc->sc_dev, "failed to allocate busdma tag, error"
876 		    " %i!\n", error);
877 		dma_mem->dma_vaddr = NULL;
878 		return (error);
879 	}
880 
881 	error = bus_dmamap_create(dma_mem->dma_tag, 0, &(dma_mem->dma_map));
882 
883 	if (error) {
884 		device_printf(sc->sc_dev, "failed to create DMA map, error %i!"
885 		    "\n", error);
886 		bus_dma_tag_destroy(dma_mem->dma_tag);
887 		return (error);
888 	}
889 
890 	error = bus_dmamap_load_crp(dma_mem->dma_tag, dma_mem->dma_map, crp,
891 	    sec_dma_map_desc_cb, sdmi, BUS_DMA_NOWAIT);
892 
893 	if (error) {
894 		device_printf(sc->sc_dev, "cannot get address of the DMA"
895 		    " memory, error %i!\n", error);
896 		bus_dmamap_destroy(dma_mem->dma_tag, dma_mem->dma_map);
897 		bus_dma_tag_destroy(dma_mem->dma_tag);
898 		return (error);
899 	}
900 
901 	dma_mem->dma_is_map = 1;
902 	dma_mem->dma_vaddr = crp;
903 
904 	return (0);
905 }
906 
907 static void
908 sec_free_dma_mem(struct sec_dma_mem *dma_mem)
909 {
910 
911 	/* Check for double free */
912 	if (dma_mem->dma_vaddr == NULL)
913 		return;
914 
915 	bus_dmamap_unload(dma_mem->dma_tag, dma_mem->dma_map);
916 
917 	if (dma_mem->dma_is_map)
918 		bus_dmamap_destroy(dma_mem->dma_tag, dma_mem->dma_map);
919 	else
920 		bus_dmamem_free(dma_mem->dma_tag, dma_mem->dma_vaddr,
921 		    dma_mem->dma_map);
922 
923 	bus_dma_tag_destroy(dma_mem->dma_tag);
924 	dma_mem->dma_vaddr = NULL;
925 }
926 
927 static int
928 sec_eu_channel(struct sec_softc *sc, int eu)
929 {
930 	uint64_t reg;
931 	int channel = 0;
932 
933 	SEC_LOCK_ASSERT(sc, controller);
934 
935 	reg = SEC_READ(sc, SEC_EUASR);
936 
937 	switch (eu) {
938 	case SEC_EU_AFEU:
939 		channel = SEC_EUASR_AFEU(reg);
940 		break;
941 	case SEC_EU_DEU:
942 		channel = SEC_EUASR_DEU(reg);
943 		break;
944 	case SEC_EU_MDEU_A:
945 	case SEC_EU_MDEU_B:
946 		channel = SEC_EUASR_MDEU(reg);
947 		break;
948 	case SEC_EU_RNGU:
949 		channel = SEC_EUASR_RNGU(reg);
950 		break;
951 	case SEC_EU_PKEU:
952 		channel = SEC_EUASR_PKEU(reg);
953 		break;
954 	case SEC_EU_AESU:
955 		channel = SEC_EUASR_AESU(reg);
956 		break;
957 	case SEC_EU_KEU:
958 		channel = SEC_EUASR_KEU(reg);
959 		break;
960 	case SEC_EU_CRCU:
961 		channel = SEC_EUASR_CRCU(reg);
962 		break;
963 	}
964 
965 	return (channel - 1);
966 }
967 
968 static int
969 sec_enqueue_desc(struct sec_softc *sc, struct sec_desc *desc, int channel)
970 {
971 	u_int fflvl = SEC_MAX_FIFO_LEVEL;
972 	uint64_t reg;
973 	int i;
974 
975 	SEC_LOCK_ASSERT(sc, controller);
976 
977 	/* Find free channel if have not got one */
978 	if (channel < 0) {
979 		for (i = 0; i < SEC_CHANNELS; i++) {
980 			reg = SEC_READ(sc, SEC_CHAN_CSR(channel));
981 
982 			if ((reg & sc->sc_channel_idle_mask) == 0) {
983 				channel = i;
984 				break;
985 			}
986 		}
987 	}
988 
989 	/* There is no free channel */
990 	if (channel < 0)
991 		return (-1);
992 
993 	/* Check FIFO level on selected channel */
994 	reg = SEC_READ(sc, SEC_CHAN_CSR(channel));
995 
996 	switch(sc->sc_version) {
997 	case 2:
998 		fflvl = (reg >> SEC_CHAN_CSR2_FFLVL_S) & SEC_CHAN_CSR2_FFLVL_M;
999 		break;
1000 	case 3:
1001 		fflvl = (reg >> SEC_CHAN_CSR3_FFLVL_S) & SEC_CHAN_CSR3_FFLVL_M;
1002 		break;
1003 	}
1004 
1005 	if (fflvl >= SEC_MAX_FIFO_LEVEL)
1006 		return (-1);
1007 
1008 	/* Enqueue descriptor in channel */
1009 	SEC_WRITE(sc, SEC_CHAN_FF(channel), desc->sd_desc_paddr);
1010 
1011 	return (channel);
1012 }
1013 
1014 static void
1015 sec_enqueue(struct sec_softc *sc)
1016 {
1017 	struct sec_desc *desc;
1018 	int ch0, ch1;
1019 
1020 	SEC_LOCK(sc, descriptors);
1021 	SEC_LOCK(sc, controller);
1022 
1023 	while (SEC_READY_DESC_CNT(sc) > 0) {
1024 		desc = SEC_GET_READY_DESC(sc);
1025 
1026 		ch0 = sec_eu_channel(sc, desc->sd_desc->shd_eu_sel0);
1027 		ch1 = sec_eu_channel(sc, desc->sd_desc->shd_eu_sel1);
1028 
1029 		/*
1030 		 * Both EU are used by the same channel.
1031 		 * Enqueue descriptor in channel used by busy EUs.
1032 		 */
1033 		if (ch0 >= 0 && ch0 == ch1) {
1034 			if (sec_enqueue_desc(sc, desc, ch0) >= 0) {
1035 				SEC_DESC_READY2QUEUED(sc);
1036 				continue;
1037 			}
1038 		}
1039 
1040 		/*
1041 		 * Only one EU is free.
1042 		 * Enqueue descriptor in channel used by busy EU.
1043 		 */
1044 		if ((ch0 >= 0 && ch1 < 0) || (ch1 >= 0 && ch0 < 0)) {
1045 			if (sec_enqueue_desc(sc, desc, (ch0 >= 0) ? ch0 : ch1)
1046 			    >= 0) {
1047 				SEC_DESC_READY2QUEUED(sc);
1048 				continue;
1049 			}
1050 		}
1051 
1052 		/*
1053 		 * Both EU are free.
1054 		 * Enqueue descriptor in first free channel.
1055 		 */
1056 		if (ch0 < 0 && ch1 < 0) {
1057 			if (sec_enqueue_desc(sc, desc, -1) >= 0) {
1058 				SEC_DESC_READY2QUEUED(sc);
1059 				continue;
1060 			}
1061 		}
1062 
1063 		/* Current descriptor can not be queued at the moment */
1064 		SEC_PUT_BACK_READY_DESC(sc);
1065 		break;
1066 	}
1067 
1068 	SEC_UNLOCK(sc, controller);
1069 	SEC_UNLOCK(sc, descriptors);
1070 }
1071 
1072 static struct sec_desc *
1073 sec_find_desc(struct sec_softc *sc, bus_addr_t paddr)
1074 {
1075 	struct sec_desc *desc = NULL;
1076 	int i;
1077 
1078 	SEC_LOCK_ASSERT(sc, descriptors);
1079 
1080 	for (i = 0; i < SEC_CHANNELS; i++) {
1081 		if (sc->sc_desc[i].sd_desc_paddr == paddr) {
1082 			desc = &(sc->sc_desc[i]);
1083 			break;
1084 		}
1085 	}
1086 
1087 	return (desc);
1088 }
1089 
1090 static int
1091 sec_make_pointer_direct(struct sec_softc *sc, struct sec_desc *desc, u_int n,
1092     bus_addr_t data, bus_size_t dsize)
1093 {
1094 	struct sec_hw_desc_ptr *ptr;
1095 
1096 	SEC_LOCK_ASSERT(sc, descriptors);
1097 
1098 	ptr = &(desc->sd_desc->shd_pointer[n]);
1099 	ptr->shdp_length = dsize;
1100 	ptr->shdp_extent = 0;
1101 	ptr->shdp_j = 0;
1102 	ptr->shdp_ptr = data;
1103 
1104 	return (0);
1105 }
1106 
1107 static int
1108 sec_make_pointer(struct sec_softc *sc, struct sec_desc *desc,
1109     u_int n, struct cryptop *crp, bus_size_t doffset, bus_size_t dsize)
1110 {
1111 	struct sec_desc_map_info sdmi = { sc, dsize, doffset, NULL, NULL, 0 };
1112 	struct sec_hw_desc_ptr *ptr;
1113 	int error;
1114 
1115 	SEC_LOCK_ASSERT(sc, descriptors);
1116 
1117 	error = sec_desc_map_dma(sc, &(desc->sd_ptr_dmem[n]), crp, dsize,
1118 	    &sdmi);
1119 
1120 	if (error)
1121 		return (error);
1122 
1123 	sdmi.sdmi_lt_last->sl_lt->shl_r = 1;
1124 	desc->sd_lt_used += sdmi.sdmi_lt_used;
1125 
1126 	ptr = &(desc->sd_desc->shd_pointer[n]);
1127 	ptr->shdp_length = dsize;
1128 	ptr->shdp_extent = 0;
1129 	ptr->shdp_j = 1;
1130 	ptr->shdp_ptr = sdmi.sdmi_lt_first->sl_lt_paddr;
1131 
1132 	return (0);
1133 }
1134 
1135 static bool
1136 sec_cipher_supported(const struct crypto_session_params *csp)
1137 {
1138 
1139 	switch (csp->csp_cipher_alg) {
1140 	case CRYPTO_AES_CBC:
1141 		/* AESU */
1142 		if (csp->csp_ivlen != AES_BLOCK_LEN)
1143 			return (false);
1144 		break;
1145 	default:
1146 		return (false);
1147 	}
1148 
1149 	if (csp->csp_cipher_klen == 0 || csp->csp_cipher_klen > SEC_MAX_KEY_LEN)
1150 		return (false);
1151 
1152 	return (true);
1153 }
1154 
1155 static bool
1156 sec_auth_supported(struct sec_softc *sc,
1157     const struct crypto_session_params *csp)
1158 {
1159 
1160 	switch (csp->csp_auth_alg) {
1161 	case CRYPTO_SHA2_384_HMAC:
1162 	case CRYPTO_SHA2_512_HMAC:
1163 		if (sc->sc_version < 3)
1164 			return (false);
1165 		/* FALLTHROUGH */
1166 	case CRYPTO_SHA1_HMAC:
1167 	case CRYPTO_SHA2_256_HMAC:
1168 		if (csp->csp_auth_klen > SEC_MAX_KEY_LEN)
1169 			return (false);
1170 		break;
1171 	case CRYPTO_SHA1:
1172 		break;
1173 	default:
1174 		return (false);
1175 	}
1176 	return (true);
1177 }
1178 
1179 static int
1180 sec_probesession(device_t dev, const struct crypto_session_params *csp)
1181 {
1182 	struct sec_softc *sc = device_get_softc(dev);
1183 
1184 	if (csp->csp_flags != 0)
1185 		return (EINVAL);
1186 	switch (csp->csp_mode) {
1187 	case CSP_MODE_DIGEST:
1188 		if (!sec_auth_supported(sc, csp))
1189 			return (EINVAL);
1190 		break;
1191 	case CSP_MODE_CIPHER:
1192 		if (!sec_cipher_supported(csp))
1193 			return (EINVAL);
1194 		break;
1195 	case CSP_MODE_ETA:
1196 		if (!sec_auth_supported(sc, csp) || !sec_cipher_supported(csp))
1197 			return (EINVAL);
1198 		break;
1199 	default:
1200 		return (EINVAL);
1201 	}
1202 	return (CRYPTODEV_PROBE_HARDWARE);
1203 }
1204 
1205 static int
1206 sec_newsession(device_t dev, crypto_session_t cses,
1207     const struct crypto_session_params *csp)
1208 {
1209 	struct sec_eu_methods *eu = sec_eus;
1210 	struct sec_session *ses;
1211 
1212 	ses = crypto_get_driver_session(cses);
1213 
1214 	/* Find EU for this session */
1215 	while (eu->sem_make_desc != NULL) {
1216 		if (eu->sem_newsession(csp))
1217 			break;
1218 		eu++;
1219 	}
1220 	KASSERT(eu->sem_make_desc != NULL, ("failed to find eu for session"));
1221 
1222 	/* Save cipher key */
1223 	if (csp->csp_cipher_key != NULL)
1224 		memcpy(ses->ss_key, csp->csp_cipher_key, csp->csp_cipher_klen);
1225 
1226 	/* Save digest key */
1227 	if (csp->csp_auth_key != NULL)
1228 		memcpy(ses->ss_mkey, csp->csp_auth_key, csp->csp_auth_klen);
1229 
1230 	if (csp->csp_auth_alg != 0) {
1231 		if (csp->csp_auth_mlen == 0)
1232 			ses->ss_mlen = crypto_auth_hash(csp)->hashsize;
1233 		else
1234 			ses->ss_mlen = csp->csp_auth_mlen;
1235 	}
1236 
1237 	return (0);
1238 }
1239 
1240 static int
1241 sec_process(device_t dev, struct cryptop *crp, int hint)
1242 {
1243 	struct sec_softc *sc = device_get_softc(dev);
1244 	struct sec_desc *desc = NULL;
1245 	const struct crypto_session_params *csp;
1246 	struct sec_session *ses;
1247 	int error = 0;
1248 
1249 	ses = crypto_get_driver_session(crp->crp_session);
1250 	csp = crypto_get_params(crp->crp_session);
1251 
1252 	/* Check for input length */
1253 	if (crypto_buffer_len(&crp->crp_buf) > SEC_MAX_DMA_BLOCK_SIZE) {
1254 		crp->crp_etype = E2BIG;
1255 		crypto_done(crp);
1256 		return (0);
1257 	}
1258 
1259 	SEC_LOCK(sc, descriptors);
1260 	SEC_DESC_SYNC(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1261 
1262 	/* Block driver if there is no free descriptors or we are going down */
1263 	if (SEC_FREE_DESC_CNT(sc) == 0 || sc->sc_shutdown) {
1264 		sc->sc_blocked |= CRYPTO_SYMQ;
1265 		SEC_UNLOCK(sc, descriptors);
1266 		return (ERESTART);
1267 	}
1268 
1269 	/* Prepare descriptor */
1270 	desc = SEC_GET_FREE_DESC(sc);
1271 	desc->sd_lt_used = 0;
1272 	desc->sd_error = 0;
1273 	desc->sd_crp = crp;
1274 
1275 	if (csp->csp_cipher_alg != 0)
1276 		crypto_read_iv(crp, desc->sd_desc->shd_iv);
1277 
1278 	if (crp->crp_cipher_key != NULL)
1279 		memcpy(ses->ss_key, crp->crp_cipher_key, csp->csp_cipher_klen);
1280 
1281 	if (crp->crp_auth_key != NULL)
1282 		memcpy(ses->ss_mkey, crp->crp_auth_key, csp->csp_auth_klen);
1283 
1284 	memcpy(desc->sd_desc->shd_key, ses->ss_key, csp->csp_cipher_klen);
1285 	memcpy(desc->sd_desc->shd_mkey, ses->ss_mkey, csp->csp_auth_klen);
1286 
1287 	error = ses->ss_eu->sem_make_desc(sc, csp, desc, crp);
1288 
1289 	if (error) {
1290 		SEC_DESC_FREE_POINTERS(desc);
1291 		SEC_DESC_PUT_BACK_LT(sc, desc);
1292 		SEC_PUT_BACK_FREE_DESC(sc);
1293 		SEC_UNLOCK(sc, descriptors);
1294 		crp->crp_etype = error;
1295 		crypto_done(crp);
1296 		return (0);
1297 	}
1298 
1299 	/*
1300 	 * Skip DONE interrupt if this is not last request in burst, but only
1301 	 * if we are running on SEC 3.X. On SEC 2.X we have to enable DONE
1302 	 * signaling on each descriptor.
1303 	 */
1304 	if ((hint & CRYPTO_HINT_MORE) && sc->sc_version == 3)
1305 		desc->sd_desc->shd_dn = 0;
1306 	else
1307 		desc->sd_desc->shd_dn = 1;
1308 
1309 	SEC_DESC_SYNC(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1310 	SEC_DESC_SYNC_POINTERS(desc, BUS_DMASYNC_POSTREAD |
1311 	    BUS_DMASYNC_POSTWRITE);
1312 	SEC_DESC_FREE2READY(sc);
1313 	SEC_UNLOCK(sc, descriptors);
1314 
1315 	/* Enqueue ready descriptors in hardware */
1316 	sec_enqueue(sc);
1317 
1318 	return (0);
1319 }
1320 
1321 static int
1322 sec_build_common_ns_desc(struct sec_softc *sc, struct sec_desc *desc,
1323     const struct crypto_session_params *csp, struct cryptop *crp)
1324 {
1325 	struct sec_hw_desc *hd = desc->sd_desc;
1326 	int error;
1327 
1328 	hd->shd_desc_type = SEC_DT_COMMON_NONSNOOP;
1329 	hd->shd_eu_sel1 = SEC_EU_NONE;
1330 	hd->shd_mode1 = 0;
1331 
1332 	/* Pointer 0: NULL */
1333 	error = sec_make_pointer_direct(sc, desc, 0, 0, 0);
1334 	if (error)
1335 		return (error);
1336 
1337 	/* Pointer 1: IV IN */
1338 	error = sec_make_pointer_direct(sc, desc, 1, desc->sd_desc_paddr +
1339 	    offsetof(struct sec_hw_desc, shd_iv), csp->csp_ivlen);
1340 	if (error)
1341 		return (error);
1342 
1343 	/* Pointer 2: Cipher Key */
1344 	error = sec_make_pointer_direct(sc, desc, 2, desc->sd_desc_paddr +
1345 	    offsetof(struct sec_hw_desc, shd_key), csp->csp_cipher_klen);
1346  	if (error)
1347 		return (error);
1348 
1349 	/* Pointer 3: Data IN */
1350 	error = sec_make_pointer(sc, desc, 3, crp, crp->crp_payload_start,
1351 	    crp->crp_payload_length);
1352 	if (error)
1353 		return (error);
1354 
1355 	/* Pointer 4: Data OUT */
1356 	error = sec_make_pointer(sc, desc, 4, crp, crp->crp_payload_start,
1357 	    crp->crp_payload_length);
1358 	if (error)
1359 		return (error);
1360 
1361 	/* Pointer 5: IV OUT (Not used: NULL) */
1362 	error = sec_make_pointer_direct(sc, desc, 5, 0, 0);
1363 	if (error)
1364 		return (error);
1365 
1366 	/* Pointer 6: NULL */
1367 	error = sec_make_pointer_direct(sc, desc, 6, 0, 0);
1368 
1369 	return (error);
1370 }
1371 
1372 static int
1373 sec_build_common_s_desc(struct sec_softc *sc, struct sec_desc *desc,
1374     const struct crypto_session_params *csp, struct cryptop *crp)
1375 {
1376 	struct sec_hw_desc *hd = desc->sd_desc;
1377 	u_int eu, mode, hashlen;
1378 	int error;
1379 
1380 	error = sec_mdeu_config(csp, &eu, &mode, &hashlen);
1381 	if (error)
1382 		return (error);
1383 
1384 	hd->shd_desc_type = SEC_DT_HMAC_SNOOP;
1385 	hd->shd_eu_sel1 = eu;
1386 	hd->shd_mode1 = mode;
1387 
1388 	/* Pointer 0: HMAC Key */
1389 	error = sec_make_pointer_direct(sc, desc, 0, desc->sd_desc_paddr +
1390 	    offsetof(struct sec_hw_desc, shd_mkey), csp->csp_auth_klen);
1391 	if (error)
1392 		return (error);
1393 
1394 	/* Pointer 1: HMAC-Only Data IN */
1395 	error = sec_make_pointer(sc, desc, 1, crp, crp->crp_aad_start,
1396 	    crp->crp_aad_length);
1397 	if (error)
1398 		return (error);
1399 
1400 	/* Pointer 2: Cipher Key */
1401 	error = sec_make_pointer_direct(sc, desc, 2, desc->sd_desc_paddr +
1402 	    offsetof(struct sec_hw_desc, shd_key), csp->csp_cipher_klen);
1403  	if (error)
1404 		return (error);
1405 
1406 	/* Pointer 3: IV IN */
1407 	error = sec_make_pointer_direct(sc, desc, 3, desc->sd_desc_paddr +
1408 	    offsetof(struct sec_hw_desc, shd_iv), csp->csp_ivlen);
1409 	if (error)
1410 		return (error);
1411 
1412 	/* Pointer 4: Data IN */
1413 	error = sec_make_pointer(sc, desc, 4, crp, crp->crp_payload_start,
1414 	    crp->crp_payload_length);
1415 	if (error)
1416 		return (error);
1417 
1418 	/* Pointer 5: Data OUT */
1419 	error = sec_make_pointer(sc, desc, 5, crp, crp->crp_payload_start,
1420 	    crp->crp_payload_length);
1421 	if (error)
1422 		return (error);
1423 
1424 	/* Pointer 6: HMAC OUT */
1425 	error = sec_make_pointer_direct(sc, desc, 6, desc->sd_desc_paddr +
1426 	    offsetof(struct sec_hw_desc, shd_digest), hashlen);
1427 
1428 	return (error);
1429 }
1430 
1431 /* AESU */
1432 
1433 static bool
1434 sec_aesu_newsession(const struct crypto_session_params *csp)
1435 {
1436 
1437 	return (csp->csp_cipher_alg == CRYPTO_AES_CBC);
1438 }
1439 
1440 static int
1441 sec_aesu_make_desc(struct sec_softc *sc,
1442     const struct crypto_session_params *csp, struct sec_desc *desc,
1443     struct cryptop *crp)
1444 {
1445 	struct sec_hw_desc *hd = desc->sd_desc;
1446 	int error;
1447 
1448 	hd->shd_eu_sel0 = SEC_EU_AESU;
1449 	hd->shd_mode0 = SEC_AESU_MODE_CBC;
1450 
1451 	if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) {
1452 		hd->shd_mode0 |= SEC_AESU_MODE_ED;
1453 		hd->shd_dir = 0;
1454 	} else
1455 		hd->shd_dir = 1;
1456 
1457 	if (csp->csp_mode == CSP_MODE_ETA)
1458 		error = sec_build_common_s_desc(sc, desc, csp, crp);
1459 	else
1460 		error = sec_build_common_ns_desc(sc, desc, csp, crp);
1461 
1462 	return (error);
1463 }
1464 
1465 /* MDEU */
1466 
1467 static bool
1468 sec_mdeu_can_handle(u_int alg)
1469 {
1470 	switch (alg) {
1471 	case CRYPTO_SHA1:
1472 	case CRYPTO_SHA1_HMAC:
1473 	case CRYPTO_SHA2_256_HMAC:
1474 	case CRYPTO_SHA2_384_HMAC:
1475 	case CRYPTO_SHA2_512_HMAC:
1476 		return (true);
1477 	default:
1478 		return (false);
1479 	}
1480 }
1481 
1482 static int
1483 sec_mdeu_config(const struct crypto_session_params *csp, u_int *eu, u_int *mode,
1484     u_int *hashlen)
1485 {
1486 
1487 	*mode = SEC_MDEU_MODE_PD | SEC_MDEU_MODE_INIT;
1488 	*eu = SEC_EU_NONE;
1489 
1490 	switch (csp->csp_auth_alg) {
1491 	case CRYPTO_SHA1_HMAC:
1492 		*mode |= SEC_MDEU_MODE_HMAC;
1493 		/* FALLTHROUGH */
1494 	case CRYPTO_SHA1:
1495 		*eu = SEC_EU_MDEU_A;
1496 		*mode |= SEC_MDEU_MODE_SHA1;
1497 		*hashlen = SHA1_HASH_LEN;
1498 		break;
1499 	case CRYPTO_SHA2_256_HMAC:
1500 		*mode |= SEC_MDEU_MODE_HMAC | SEC_MDEU_MODE_SHA256;
1501 		*eu = SEC_EU_MDEU_A;
1502 		break;
1503 	case CRYPTO_SHA2_384_HMAC:
1504 		*mode |= SEC_MDEU_MODE_HMAC | SEC_MDEU_MODE_SHA384;
1505 		*eu = SEC_EU_MDEU_B;
1506 		break;
1507 	case CRYPTO_SHA2_512_HMAC:
1508 		*mode |= SEC_MDEU_MODE_HMAC | SEC_MDEU_MODE_SHA512;
1509 		*eu = SEC_EU_MDEU_B;
1510 		break;
1511 	default:
1512 		return (EINVAL);
1513 	}
1514 
1515 	if (*mode & SEC_MDEU_MODE_HMAC)
1516 		*hashlen = SEC_HMAC_HASH_LEN;
1517 
1518 	return (0);
1519 }
1520 
1521 static bool
1522 sec_mdeu_newsession(const struct crypto_session_params *csp)
1523 {
1524 
1525 	return (sec_mdeu_can_handle(csp->csp_auth_alg));
1526 }
1527 
1528 static int
1529 sec_mdeu_make_desc(struct sec_softc *sc,
1530     const struct crypto_session_params *csp,
1531     struct sec_desc *desc, struct cryptop *crp)
1532 {
1533 	struct sec_hw_desc *hd = desc->sd_desc;
1534 	u_int eu, mode, hashlen;
1535 	int error;
1536 
1537 	error = sec_mdeu_config(csp, &eu, &mode, &hashlen);
1538 	if (error)
1539 		return (error);
1540 
1541 	hd->shd_desc_type = SEC_DT_COMMON_NONSNOOP;
1542 	hd->shd_eu_sel0 = eu;
1543 	hd->shd_mode0 = mode;
1544 	hd->shd_eu_sel1 = SEC_EU_NONE;
1545 	hd->shd_mode1 = 0;
1546 
1547 	/* Pointer 0: NULL */
1548 	error = sec_make_pointer_direct(sc, desc, 0, 0, 0);
1549 	if (error)
1550 		return (error);
1551 
1552 	/* Pointer 1: Context In (Not used: NULL) */
1553 	error = sec_make_pointer_direct(sc, desc, 1, 0, 0);
1554 	if (error)
1555 		return (error);
1556 
1557 	/* Pointer 2: HMAC Key (or NULL, depending on digest type) */
1558 	if (hd->shd_mode0 & SEC_MDEU_MODE_HMAC)
1559 		error = sec_make_pointer_direct(sc, desc, 2,
1560 		    desc->sd_desc_paddr + offsetof(struct sec_hw_desc,
1561 		    shd_mkey), csp->csp_auth_klen);
1562 	else
1563 		error = sec_make_pointer_direct(sc, desc, 2, 0, 0);
1564 
1565 	if (error)
1566 		return (error);
1567 
1568 	/* Pointer 3: Input Data */
1569 	error = sec_make_pointer(sc, desc, 3, crp, crp->crp_payload_start,
1570 	    crp->crp_payload_length);
1571 	if (error)
1572 		return (error);
1573 
1574 	/* Pointer 4: NULL */
1575 	error = sec_make_pointer_direct(sc, desc, 4, 0, 0);
1576 	if (error)
1577 		return (error);
1578 
1579 	/* Pointer 5: Hash out */
1580 	error = sec_make_pointer_direct(sc, desc, 5, desc->sd_desc_paddr +
1581 	    offsetof(struct sec_hw_desc, shd_digest), hashlen);
1582 	if (error)
1583 		return (error);
1584 
1585 	/* Pointer 6: NULL */
1586 	error = sec_make_pointer_direct(sc, desc, 6, 0, 0);
1587 
1588 	return (0);
1589 }
1590