xref: /freebsd/sys/dev/sec/sec.c (revision eb69d1f144a6fcc765d1b9d44a5ae8082353e70b)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (C) 2008-2009 Semihalf, Piotr Ziecik
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
19  * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
20  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
21  * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
22  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
23  * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
24  * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
25  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26  */
27 
28 /*
29  * Freescale integrated Security Engine (SEC) driver. Currently SEC 2.0 and
30  * 3.0 are supported.
31  */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/bus.h>
39 #include <sys/endian.h>
40 #include <sys/kernel.h>
41 #include <sys/lock.h>
42 #include <sys/malloc.h>
43 #include <sys/mbuf.h>
44 #include <sys/module.h>
45 #include <sys/mutex.h>
46 #include <sys/random.h>
47 #include <sys/rman.h>
48 
49 #include <machine/_inttypes.h>
50 #include <machine/bus.h>
51 #include <machine/resource.h>
52 
53 #include <opencrypto/cryptodev.h>
54 #include "cryptodev_if.h"
55 
56 #include <dev/ofw/ofw_bus_subr.h>
57 #include <dev/sec/sec.h>
58 
59 static int	sec_probe(device_t dev);
60 static int	sec_attach(device_t dev);
61 static int	sec_detach(device_t dev);
62 static int	sec_suspend(device_t dev);
63 static int	sec_resume(device_t dev);
64 static int	sec_shutdown(device_t dev);
65 static void	sec_primary_intr(void *arg);
66 static void	sec_secondary_intr(void *arg);
67 static int	sec_setup_intr(struct sec_softc *sc, struct resource **ires,
68     void **ihand, int *irid, driver_intr_t handler, const char *iname);
69 static void	sec_release_intr(struct sec_softc *sc, struct resource *ires,
70     void *ihand, int irid, const char *iname);
71 static int	sec_controller_reset(struct sec_softc *sc);
72 static int	sec_channel_reset(struct sec_softc *sc, int channel, int full);
73 static int	sec_init(struct sec_softc *sc);
74 static int	sec_alloc_dma_mem(struct sec_softc *sc,
75     struct sec_dma_mem *dma_mem, bus_size_t size);
76 static int	sec_desc_map_dma(struct sec_softc *sc,
77     struct sec_dma_mem *dma_mem, void *mem, bus_size_t size, int type,
78     struct sec_desc_map_info *sdmi);
79 static void	sec_free_dma_mem(struct sec_dma_mem *dma_mem);
80 static void	sec_enqueue(struct sec_softc *sc);
81 static int	sec_enqueue_desc(struct sec_softc *sc, struct sec_desc *desc,
82     int channel);
83 static int	sec_eu_channel(struct sec_softc *sc, int eu);
84 static int	sec_make_pointer(struct sec_softc *sc, struct sec_desc *desc,
85     u_int n, void *data, bus_size_t doffset, bus_size_t dsize, int dtype);
86 static int	sec_make_pointer_direct(struct sec_softc *sc,
87     struct sec_desc *desc, u_int n, bus_addr_t data, bus_size_t dsize);
88 static int	sec_alloc_session(struct sec_softc *sc);
89 static int	sec_newsession(device_t dev, u_int32_t *sidp,
90     struct cryptoini *cri);
91 static int	sec_freesession(device_t dev, uint64_t tid);
92 static int	sec_process(device_t dev, struct cryptop *crp, int hint);
93 static int	sec_split_cri(struct cryptoini *cri, struct cryptoini **enc,
94     struct cryptoini **mac);
95 static int	sec_split_crp(struct cryptop *crp, struct cryptodesc **enc,
96     struct cryptodesc **mac);
97 static int	sec_build_common_ns_desc(struct sec_softc *sc,
98     struct sec_desc *desc, struct sec_session *ses, struct cryptop *crp,
99     struct cryptodesc *enc, int buftype);
100 static int	sec_build_common_s_desc(struct sec_softc *sc,
101     struct sec_desc *desc, struct sec_session *ses, struct cryptop *crp,
102     struct cryptodesc *enc, struct cryptodesc *mac, int buftype);
103 
104 static struct sec_session *sec_get_session(struct sec_softc *sc, u_int sid);
105 static struct sec_desc *sec_find_desc(struct sec_softc *sc, bus_addr_t paddr);
106 
107 /* AESU */
108 static int	sec_aesu_newsession(struct sec_softc *sc,
109     struct sec_session *ses, struct cryptoini *enc, struct cryptoini *mac);
110 static int	sec_aesu_make_desc(struct sec_softc *sc,
111     struct sec_session *ses, struct sec_desc *desc, struct cryptop *crp,
112     int buftype);
113 
114 /* DEU */
115 static int	sec_deu_newsession(struct sec_softc *sc,
116     struct sec_session *ses, struct cryptoini *enc, struct cryptoini *mac);
117 static int	sec_deu_make_desc(struct sec_softc *sc,
118     struct sec_session *ses, struct sec_desc *desc, struct cryptop *crp,
119     int buftype);
120 
121 /* MDEU */
122 static int	sec_mdeu_can_handle(u_int alg);
123 static int	sec_mdeu_config(struct cryptodesc *crd,
124     u_int *eu, u_int *mode, u_int *hashlen);
125 static int	sec_mdeu_newsession(struct sec_softc *sc,
126     struct sec_session *ses, struct cryptoini *enc, struct cryptoini *mac);
127 static int	sec_mdeu_make_desc(struct sec_softc *sc,
128     struct sec_session *ses, struct sec_desc *desc, struct cryptop *crp,
129     int buftype);
130 
131 static device_method_t sec_methods[] = {
132 	/* Device interface */
133 	DEVMETHOD(device_probe,		sec_probe),
134 	DEVMETHOD(device_attach,	sec_attach),
135 	DEVMETHOD(device_detach,	sec_detach),
136 
137 	DEVMETHOD(device_suspend,	sec_suspend),
138 	DEVMETHOD(device_resume,	sec_resume),
139 	DEVMETHOD(device_shutdown,	sec_shutdown),
140 
141 	/* Crypto methods */
142 	DEVMETHOD(cryptodev_newsession,	sec_newsession),
143 	DEVMETHOD(cryptodev_freesession,sec_freesession),
144 	DEVMETHOD(cryptodev_process,	sec_process),
145 
146 	DEVMETHOD_END
147 };
148 static driver_t sec_driver = {
149 	"sec",
150 	sec_methods,
151 	sizeof(struct sec_softc),
152 };
153 
154 static devclass_t sec_devclass;
155 DRIVER_MODULE(sec, simplebus, sec_driver, sec_devclass, 0, 0);
156 MODULE_DEPEND(sec, crypto, 1, 1, 1);
157 
158 static struct sec_eu_methods sec_eus[] = {
159 	{
160 		sec_aesu_newsession,
161 		sec_aesu_make_desc,
162 	},
163 	{
164 		sec_deu_newsession,
165 		sec_deu_make_desc,
166 	},
167 	{
168 		sec_mdeu_newsession,
169 		sec_mdeu_make_desc,
170 	},
171 	{ NULL, NULL }
172 };
173 
174 static inline void
175 sec_sync_dma_mem(struct sec_dma_mem *dma_mem, bus_dmasync_op_t op)
176 {
177 
178 	/* Sync only if dma memory is valid */
179 	if (dma_mem->dma_vaddr != NULL)
180 		bus_dmamap_sync(dma_mem->dma_tag, dma_mem->dma_map, op);
181 }
182 
183 static inline void
184 sec_free_session(struct sec_softc *sc, struct sec_session *ses)
185 {
186 
187 	SEC_LOCK(sc, sessions);
188 	ses->ss_used = 0;
189 	SEC_UNLOCK(sc, sessions);
190 }
191 
192 static inline void *
193 sec_get_pointer_data(struct sec_desc *desc, u_int n)
194 {
195 
196 	return (desc->sd_ptr_dmem[n].dma_vaddr);
197 }
198 
199 static int
200 sec_probe(device_t dev)
201 {
202 	struct sec_softc *sc;
203 	uint64_t id;
204 
205 	if (!ofw_bus_status_okay(dev))
206 		return (ENXIO);
207 
208 	if (!ofw_bus_is_compatible(dev, "fsl,sec2.0"))
209 		return (ENXIO);
210 
211 	sc = device_get_softc(dev);
212 
213 	sc->sc_rrid = 0;
214 	sc->sc_rres = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->sc_rrid,
215 	    RF_ACTIVE);
216 
217 	if (sc->sc_rres == NULL)
218 		return (ENXIO);
219 
220 	sc->sc_bas.bsh = rman_get_bushandle(sc->sc_rres);
221 	sc->sc_bas.bst = rman_get_bustag(sc->sc_rres);
222 
223 	id = SEC_READ(sc, SEC_ID);
224 
225 	bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_rrid, sc->sc_rres);
226 
227 	switch (id) {
228 	case SEC_20_ID:
229 		device_set_desc(dev, "Freescale Security Engine 2.0");
230 		sc->sc_version = 2;
231 		break;
232 	case SEC_30_ID:
233 		device_set_desc(dev, "Freescale Security Engine 3.0");
234 		sc->sc_version = 3;
235 		break;
236 	case SEC_31_ID:
237 		device_set_desc(dev, "Freescale Security Engine 3.1");
238 		sc->sc_version = 3;
239 		break;
240 	default:
241 		device_printf(dev, "unknown SEC ID 0x%016"PRIx64"!\n", id);
242 		return (ENXIO);
243 	}
244 
245 	return (0);
246 }
247 
248 static int
249 sec_attach(device_t dev)
250 {
251 	struct sec_softc *sc;
252 	struct sec_hw_lt *lt;
253 	int error = 0;
254 	int i;
255 
256 	sc = device_get_softc(dev);
257 	sc->sc_dev = dev;
258 	sc->sc_blocked = 0;
259 	sc->sc_shutdown = 0;
260 
261 	sc->sc_cid = crypto_get_driverid(dev, CRYPTOCAP_F_HARDWARE);
262 	if (sc->sc_cid < 0) {
263 		device_printf(dev, "could not get crypto driver ID!\n");
264 		return (ENXIO);
265 	}
266 
267 	/* Init locks */
268 	mtx_init(&sc->sc_controller_lock, device_get_nameunit(dev),
269 	    "SEC Controller lock", MTX_DEF);
270 	mtx_init(&sc->sc_descriptors_lock, device_get_nameunit(dev),
271 	    "SEC Descriptors lock", MTX_DEF);
272 	mtx_init(&sc->sc_sessions_lock, device_get_nameunit(dev),
273 	    "SEC Sessions lock", MTX_DEF);
274 
275 	/* Allocate I/O memory for SEC registers */
276 	sc->sc_rrid = 0;
277 	sc->sc_rres = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->sc_rrid,
278 	    RF_ACTIVE);
279 
280 	if (sc->sc_rres == NULL) {
281 		device_printf(dev, "could not allocate I/O memory!\n");
282 		goto fail1;
283 	}
284 
285 	sc->sc_bas.bsh = rman_get_bushandle(sc->sc_rres);
286 	sc->sc_bas.bst = rman_get_bustag(sc->sc_rres);
287 
288 	/* Setup interrupts */
289 	sc->sc_pri_irid = 0;
290 	error = sec_setup_intr(sc, &sc->sc_pri_ires, &sc->sc_pri_ihand,
291 	    &sc->sc_pri_irid, sec_primary_intr, "primary");
292 
293 	if (error)
294 		goto fail2;
295 
296 
297 	if (sc->sc_version == 3) {
298 		sc->sc_sec_irid = 1;
299 		error = sec_setup_intr(sc, &sc->sc_sec_ires, &sc->sc_sec_ihand,
300 		    &sc->sc_sec_irid, sec_secondary_intr, "secondary");
301 
302 		if (error)
303 			goto fail3;
304 	}
305 
306 	/* Alloc DMA memory for descriptors and link tables */
307 	error = sec_alloc_dma_mem(sc, &(sc->sc_desc_dmem),
308 	    SEC_DESCRIPTORS * sizeof(struct sec_hw_desc));
309 
310 	if (error)
311 		goto fail4;
312 
313 	error = sec_alloc_dma_mem(sc, &(sc->sc_lt_dmem),
314 	    (SEC_LT_ENTRIES + 1) * sizeof(struct sec_hw_lt));
315 
316 	if (error)
317 		goto fail5;
318 
319 	/* Fill in descriptors and link tables */
320 	for (i = 0; i < SEC_DESCRIPTORS; i++) {
321 		sc->sc_desc[i].sd_desc =
322 		    (struct sec_hw_desc*)(sc->sc_desc_dmem.dma_vaddr) + i;
323 		sc->sc_desc[i].sd_desc_paddr = sc->sc_desc_dmem.dma_paddr +
324 		    (i * sizeof(struct sec_hw_desc));
325 	}
326 
327 	for (i = 0; i < SEC_LT_ENTRIES + 1; i++) {
328 		sc->sc_lt[i].sl_lt =
329 		    (struct sec_hw_lt*)(sc->sc_lt_dmem.dma_vaddr) + i;
330 		sc->sc_lt[i].sl_lt_paddr = sc->sc_lt_dmem.dma_paddr +
331 		    (i * sizeof(struct sec_hw_lt));
332 	}
333 
334 	/* Last entry in link table is used to create a circle */
335 	lt = sc->sc_lt[SEC_LT_ENTRIES].sl_lt;
336 	lt->shl_length = 0;
337 	lt->shl_r = 0;
338 	lt->shl_n = 1;
339 	lt->shl_ptr = sc->sc_lt[0].sl_lt_paddr;
340 
341 	/* Init descriptor and link table queues pointers */
342 	SEC_CNT_INIT(sc, sc_free_desc_get_cnt, SEC_DESCRIPTORS);
343 	SEC_CNT_INIT(sc, sc_free_desc_put_cnt, SEC_DESCRIPTORS);
344 	SEC_CNT_INIT(sc, sc_ready_desc_get_cnt, SEC_DESCRIPTORS);
345 	SEC_CNT_INIT(sc, sc_ready_desc_put_cnt, SEC_DESCRIPTORS);
346 	SEC_CNT_INIT(sc, sc_queued_desc_get_cnt, SEC_DESCRIPTORS);
347 	SEC_CNT_INIT(sc, sc_queued_desc_put_cnt, SEC_DESCRIPTORS);
348 	SEC_CNT_INIT(sc, sc_lt_alloc_cnt, SEC_LT_ENTRIES);
349 	SEC_CNT_INIT(sc, sc_lt_free_cnt, SEC_LT_ENTRIES);
350 
351 	/* Create masks for fast checks */
352 	sc->sc_int_error_mask = 0;
353 	for (i = 0; i < SEC_CHANNELS; i++)
354 		sc->sc_int_error_mask |= (~0ULL & SEC_INT_CH_ERR(i));
355 
356 	switch (sc->sc_version) {
357 	case 2:
358 		sc->sc_channel_idle_mask =
359 		    (SEC_CHAN_CSR2_FFLVL_M << SEC_CHAN_CSR2_FFLVL_S) |
360 		    (SEC_CHAN_CSR2_MSTATE_M << SEC_CHAN_CSR2_MSTATE_S) |
361 		    (SEC_CHAN_CSR2_PSTATE_M << SEC_CHAN_CSR2_PSTATE_S) |
362 		    (SEC_CHAN_CSR2_GSTATE_M << SEC_CHAN_CSR2_GSTATE_S);
363 		break;
364 	case 3:
365 		sc->sc_channel_idle_mask =
366 		    (SEC_CHAN_CSR3_FFLVL_M << SEC_CHAN_CSR3_FFLVL_S) |
367 		    (SEC_CHAN_CSR3_MSTATE_M << SEC_CHAN_CSR3_MSTATE_S) |
368 		    (SEC_CHAN_CSR3_PSTATE_M << SEC_CHAN_CSR3_PSTATE_S) |
369 		    (SEC_CHAN_CSR3_GSTATE_M << SEC_CHAN_CSR3_GSTATE_S);
370 		break;
371 	}
372 
373 	/* Init hardware */
374 	error = sec_init(sc);
375 
376 	if (error)
377 		goto fail6;
378 
379 	/* Register in OCF (AESU) */
380 	crypto_register(sc->sc_cid, CRYPTO_AES_CBC, 0, 0);
381 
382 	/* Register in OCF (DEU) */
383 	crypto_register(sc->sc_cid, CRYPTO_DES_CBC, 0, 0);
384 	crypto_register(sc->sc_cid, CRYPTO_3DES_CBC, 0, 0);
385 
386 	/* Register in OCF (MDEU) */
387 	crypto_register(sc->sc_cid, CRYPTO_MD5, 0, 0);
388 	crypto_register(sc->sc_cid, CRYPTO_MD5_HMAC, 0, 0);
389 	crypto_register(sc->sc_cid, CRYPTO_SHA1, 0, 0);
390 	crypto_register(sc->sc_cid, CRYPTO_SHA1_HMAC, 0, 0);
391 	crypto_register(sc->sc_cid, CRYPTO_SHA2_256_HMAC, 0, 0);
392 	if (sc->sc_version >= 3) {
393 		crypto_register(sc->sc_cid, CRYPTO_SHA2_384_HMAC, 0, 0);
394 		crypto_register(sc->sc_cid, CRYPTO_SHA2_512_HMAC, 0, 0);
395 	}
396 
397 	return (0);
398 
399 fail6:
400 	sec_free_dma_mem(&(sc->sc_lt_dmem));
401 fail5:
402 	sec_free_dma_mem(&(sc->sc_desc_dmem));
403 fail4:
404 	sec_release_intr(sc, sc->sc_sec_ires, sc->sc_sec_ihand,
405 	    sc->sc_sec_irid, "secondary");
406 fail3:
407 	sec_release_intr(sc, sc->sc_pri_ires, sc->sc_pri_ihand,
408 	    sc->sc_pri_irid, "primary");
409 fail2:
410 	bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_rrid, sc->sc_rres);
411 fail1:
412 	mtx_destroy(&sc->sc_controller_lock);
413 	mtx_destroy(&sc->sc_descriptors_lock);
414 	mtx_destroy(&sc->sc_sessions_lock);
415 
416 	return (ENXIO);
417 }
418 
419 static int
420 sec_detach(device_t dev)
421 {
422 	struct sec_softc *sc = device_get_softc(dev);
423 	int i, error, timeout = SEC_TIMEOUT;
424 
425 	/* Prepare driver to shutdown */
426 	SEC_LOCK(sc, descriptors);
427 	sc->sc_shutdown = 1;
428 	SEC_UNLOCK(sc, descriptors);
429 
430 	/* Wait until all queued processing finishes */
431 	while (1) {
432 		SEC_LOCK(sc, descriptors);
433 		i = SEC_READY_DESC_CNT(sc) + SEC_QUEUED_DESC_CNT(sc);
434 		SEC_UNLOCK(sc, descriptors);
435 
436 		if (i == 0)
437 			break;
438 
439 		if (timeout < 0) {
440 			device_printf(dev, "queue flush timeout!\n");
441 
442 			/* DMA can be still active - stop it */
443 			for (i = 0; i < SEC_CHANNELS; i++)
444 				sec_channel_reset(sc, i, 1);
445 
446 			break;
447 		}
448 
449 		timeout -= 1000;
450 		DELAY(1000);
451 	}
452 
453 	/* Disable interrupts */
454 	SEC_WRITE(sc, SEC_IER, 0);
455 
456 	/* Unregister from OCF */
457 	crypto_unregister_all(sc->sc_cid);
458 
459 	/* Free DMA memory */
460 	for (i = 0; i < SEC_DESCRIPTORS; i++)
461 		SEC_DESC_FREE_POINTERS(&(sc->sc_desc[i]));
462 
463 	sec_free_dma_mem(&(sc->sc_lt_dmem));
464 	sec_free_dma_mem(&(sc->sc_desc_dmem));
465 
466 	/* Release interrupts */
467 	sec_release_intr(sc, sc->sc_pri_ires, sc->sc_pri_ihand,
468 	    sc->sc_pri_irid, "primary");
469 	sec_release_intr(sc, sc->sc_sec_ires, sc->sc_sec_ihand,
470 	    sc->sc_sec_irid, "secondary");
471 
472 	/* Release memory */
473 	if (sc->sc_rres) {
474 		error = bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_rrid,
475 		    sc->sc_rres);
476 		if (error)
477 			device_printf(dev, "bus_release_resource() failed for"
478 			    " I/O memory, error %d\n", error);
479 
480 		sc->sc_rres = NULL;
481 	}
482 
483 	mtx_destroy(&sc->sc_controller_lock);
484 	mtx_destroy(&sc->sc_descriptors_lock);
485 	mtx_destroy(&sc->sc_sessions_lock);
486 
487 	return (0);
488 }
489 
490 static int
491 sec_suspend(device_t dev)
492 {
493 
494 	return (0);
495 }
496 
497 static int
498 sec_resume(device_t dev)
499 {
500 
501 	return (0);
502 }
503 
504 static int
505 sec_shutdown(device_t dev)
506 {
507 
508 	return (0);
509 }
510 
511 static int
512 sec_setup_intr(struct sec_softc *sc, struct resource **ires, void **ihand,
513     int *irid, driver_intr_t handler, const char *iname)
514 {
515 	int error;
516 
517 	(*ires) = bus_alloc_resource_any(sc->sc_dev, SYS_RES_IRQ, irid,
518 	    RF_ACTIVE);
519 
520 	if ((*ires) == NULL) {
521 		device_printf(sc->sc_dev, "could not allocate %s IRQ\n", iname);
522 		return (ENXIO);
523 	}
524 
525 	error = bus_setup_intr(sc->sc_dev, *ires, INTR_MPSAFE | INTR_TYPE_NET,
526 	    NULL, handler, sc, ihand);
527 
528 	if (error) {
529 		device_printf(sc->sc_dev, "failed to set up %s IRQ\n", iname);
530 		if (bus_release_resource(sc->sc_dev, SYS_RES_IRQ, *irid, *ires))
531 			device_printf(sc->sc_dev, "could not release %s IRQ\n",
532 			    iname);
533 
534 		(*ires) = NULL;
535 		return (error);
536 	}
537 
538 	return (0);
539 }
540 
541 static void
542 sec_release_intr(struct sec_softc *sc, struct resource *ires, void *ihand,
543     int irid, const char *iname)
544 {
545 	int error;
546 
547 	if (ires == NULL)
548 		return;
549 
550 	error = bus_teardown_intr(sc->sc_dev, ires, ihand);
551 	if (error)
552 		device_printf(sc->sc_dev, "bus_teardown_intr() failed for %s"
553 		    " IRQ, error %d\n", iname, error);
554 
555 	error = bus_release_resource(sc->sc_dev, SYS_RES_IRQ, irid, ires);
556 	if (error)
557 		device_printf(sc->sc_dev, "bus_release_resource() failed for %s"
558 		    " IRQ, error %d\n", iname, error);
559 }
560 
561 static void
562 sec_primary_intr(void *arg)
563 {
564 	struct sec_softc *sc = arg;
565 	struct sec_desc *desc;
566 	uint64_t isr;
567 	int i, wakeup = 0;
568 
569 	SEC_LOCK(sc, controller);
570 
571 	/* Check for errors */
572 	isr = SEC_READ(sc, SEC_ISR);
573 	if (isr & sc->sc_int_error_mask) {
574 		/* Check each channel for error */
575 		for (i = 0; i < SEC_CHANNELS; i++) {
576 			if ((isr & SEC_INT_CH_ERR(i)) == 0)
577 				continue;
578 
579 			device_printf(sc->sc_dev,
580 			    "I/O error on channel %i!\n", i);
581 
582 			/* Find and mark problematic descriptor */
583 			desc = sec_find_desc(sc, SEC_READ(sc,
584 			    SEC_CHAN_CDPR(i)));
585 
586 			if (desc != NULL)
587 				desc->sd_error = EIO;
588 
589 			/* Do partial channel reset */
590 			sec_channel_reset(sc, i, 0);
591 		}
592 	}
593 
594 	/* ACK interrupt */
595 	SEC_WRITE(sc, SEC_ICR, 0xFFFFFFFFFFFFFFFFULL);
596 
597 	SEC_UNLOCK(sc, controller);
598 	SEC_LOCK(sc, descriptors);
599 
600 	/* Handle processed descriptors */
601 	SEC_DESC_SYNC(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
602 
603 	while (SEC_QUEUED_DESC_CNT(sc) > 0) {
604 		desc = SEC_GET_QUEUED_DESC(sc);
605 
606 		if (desc->sd_desc->shd_done != 0xFF && desc->sd_error == 0) {
607 			SEC_PUT_BACK_QUEUED_DESC(sc);
608 			break;
609 		}
610 
611 		SEC_DESC_SYNC_POINTERS(desc, BUS_DMASYNC_PREREAD |
612 		    BUS_DMASYNC_PREWRITE);
613 
614 		desc->sd_crp->crp_etype = desc->sd_error;
615 		crypto_done(desc->sd_crp);
616 
617 		SEC_DESC_FREE_POINTERS(desc);
618 		SEC_DESC_FREE_LT(sc, desc);
619 		SEC_DESC_QUEUED2FREE(sc);
620 	}
621 
622 	SEC_DESC_SYNC(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
623 
624 	if (!sc->sc_shutdown) {
625 		wakeup = sc->sc_blocked;
626 		sc->sc_blocked = 0;
627 	}
628 
629 	SEC_UNLOCK(sc, descriptors);
630 
631 	/* Enqueue ready descriptors in hardware */
632 	sec_enqueue(sc);
633 
634 	if (wakeup)
635 		crypto_unblock(sc->sc_cid, wakeup);
636 }
637 
638 static void
639 sec_secondary_intr(void *arg)
640 {
641 	struct sec_softc *sc = arg;
642 
643 	device_printf(sc->sc_dev, "spurious secondary interrupt!\n");
644 	sec_primary_intr(arg);
645 }
646 
647 static int
648 sec_controller_reset(struct sec_softc *sc)
649 {
650 	int timeout = SEC_TIMEOUT;
651 
652 	/* Reset Controller */
653 	SEC_WRITE(sc, SEC_MCR, SEC_MCR_SWR);
654 
655 	while (SEC_READ(sc, SEC_MCR) & SEC_MCR_SWR) {
656 		DELAY(1000);
657 		timeout -= 1000;
658 
659 		if (timeout < 0) {
660 			device_printf(sc->sc_dev, "timeout while waiting for "
661 			    "device reset!\n");
662 			return (ETIMEDOUT);
663 		}
664 	}
665 
666 	return (0);
667 }
668 
669 static int
670 sec_channel_reset(struct sec_softc *sc, int channel, int full)
671 {
672 	int timeout = SEC_TIMEOUT;
673 	uint64_t bit = (full) ? SEC_CHAN_CCR_R : SEC_CHAN_CCR_CON;
674 	uint64_t reg;
675 
676 	/* Reset Channel */
677 	reg = SEC_READ(sc, SEC_CHAN_CCR(channel));
678 	SEC_WRITE(sc, SEC_CHAN_CCR(channel), reg | bit);
679 
680 	while (SEC_READ(sc, SEC_CHAN_CCR(channel)) & bit) {
681 		DELAY(1000);
682 		timeout -= 1000;
683 
684 		if (timeout < 0) {
685 			device_printf(sc->sc_dev, "timeout while waiting for "
686 			    "channel reset!\n");
687 			return (ETIMEDOUT);
688 		}
689 	}
690 
691 	if (full) {
692 		reg = SEC_CHAN_CCR_CDIE | SEC_CHAN_CCR_NT | SEC_CHAN_CCR_BS;
693 
694 		switch(sc->sc_version) {
695 		case 2:
696 			reg |= SEC_CHAN_CCR_CDWE;
697 			break;
698 		case 3:
699 			reg |= SEC_CHAN_CCR_AWSE | SEC_CHAN_CCR_WGN;
700 			break;
701 		}
702 
703 		SEC_WRITE(sc, SEC_CHAN_CCR(channel), reg);
704 	}
705 
706 	return (0);
707 }
708 
709 static int
710 sec_init(struct sec_softc *sc)
711 {
712 	uint64_t reg;
713 	int error, i;
714 
715 	/* Reset controller twice to clear all pending interrupts */
716 	error = sec_controller_reset(sc);
717 	if (error)
718 		return (error);
719 
720 	error = sec_controller_reset(sc);
721 	if (error)
722 		return (error);
723 
724 	/* Reset channels */
725 	for (i = 0; i < SEC_CHANNELS; i++) {
726 		error = sec_channel_reset(sc, i, 1);
727 		if (error)
728 			return (error);
729 	}
730 
731 	/* Enable Interrupts */
732 	reg = SEC_INT_ITO;
733 	for (i = 0; i < SEC_CHANNELS; i++)
734 		reg |= SEC_INT_CH_DN(i) | SEC_INT_CH_ERR(i);
735 
736 	SEC_WRITE(sc, SEC_IER, reg);
737 
738 	return (error);
739 }
740 
741 static void
742 sec_alloc_dma_mem_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
743 {
744 	struct sec_dma_mem *dma_mem = arg;
745 
746 	if (error)
747 		return;
748 
749 	KASSERT(nseg == 1, ("Wrong number of segments, should be 1"));
750 	dma_mem->dma_paddr = segs->ds_addr;
751 }
752 
753 static void
754 sec_dma_map_desc_cb(void *arg, bus_dma_segment_t *segs, int nseg,
755     int error)
756 {
757 	struct sec_desc_map_info *sdmi = arg;
758 	struct sec_softc *sc = sdmi->sdmi_sc;
759 	struct sec_lt *lt = NULL;
760 	bus_addr_t addr;
761 	bus_size_t size;
762 	int i;
763 
764 	SEC_LOCK_ASSERT(sc, descriptors);
765 
766 	if (error)
767 		return;
768 
769 	for (i = 0; i < nseg; i++) {
770 		addr = segs[i].ds_addr;
771 		size = segs[i].ds_len;
772 
773 		/* Skip requested offset */
774 		if (sdmi->sdmi_offset >= size) {
775 			sdmi->sdmi_offset -= size;
776 			continue;
777 		}
778 
779 		addr += sdmi->sdmi_offset;
780 		size -= sdmi->sdmi_offset;
781 		sdmi->sdmi_offset = 0;
782 
783 		/* Do not link more than requested */
784 		if (sdmi->sdmi_size < size)
785 			size = sdmi->sdmi_size;
786 
787 		lt = SEC_ALLOC_LT_ENTRY(sc);
788 		lt->sl_lt->shl_length = size;
789 		lt->sl_lt->shl_r = 0;
790 		lt->sl_lt->shl_n = 0;
791 		lt->sl_lt->shl_ptr = addr;
792 
793 		if (sdmi->sdmi_lt_first == NULL)
794 			sdmi->sdmi_lt_first = lt;
795 
796 		sdmi->sdmi_lt_used += 1;
797 
798 		if ((sdmi->sdmi_size -= size) == 0)
799 			break;
800 	}
801 
802 	sdmi->sdmi_lt_last = lt;
803 }
804 
805 static void
806 sec_dma_map_desc_cb2(void *arg, bus_dma_segment_t *segs, int nseg,
807     bus_size_t size, int error)
808 {
809 
810 	sec_dma_map_desc_cb(arg, segs, nseg, error);
811 }
812 
813 static int
814 sec_alloc_dma_mem(struct sec_softc *sc, struct sec_dma_mem *dma_mem,
815     bus_size_t size)
816 {
817 	int error;
818 
819 	if (dma_mem->dma_vaddr != NULL)
820 		return (EBUSY);
821 
822 	error = bus_dma_tag_create(NULL,	/* parent */
823 		SEC_DMA_ALIGNMENT, 0,		/* alignment, boundary */
824 		BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
825 		BUS_SPACE_MAXADDR,		/* highaddr */
826 		NULL, NULL,			/* filtfunc, filtfuncarg */
827 		size, 1,			/* maxsize, nsegments */
828 		size, 0,			/* maxsegsz, flags */
829 		NULL, NULL,			/* lockfunc, lockfuncarg */
830 		&(dma_mem->dma_tag));		/* dmat */
831 
832 	if (error) {
833 		device_printf(sc->sc_dev, "failed to allocate busdma tag, error"
834 		    " %i!\n", error);
835 		goto err1;
836 	}
837 
838 	error = bus_dmamem_alloc(dma_mem->dma_tag, &(dma_mem->dma_vaddr),
839 	    BUS_DMA_NOWAIT | BUS_DMA_ZERO, &(dma_mem->dma_map));
840 
841 	if (error) {
842 		device_printf(sc->sc_dev, "failed to allocate DMA safe"
843 		    " memory, error %i!\n", error);
844 		goto err2;
845 	}
846 
847 	error = bus_dmamap_load(dma_mem->dma_tag, dma_mem->dma_map,
848 		    dma_mem->dma_vaddr, size, sec_alloc_dma_mem_cb, dma_mem,
849 		    BUS_DMA_NOWAIT);
850 
851 	if (error) {
852 		device_printf(sc->sc_dev, "cannot get address of the DMA"
853 		    " memory, error %i\n", error);
854 		goto err3;
855 	}
856 
857 	dma_mem->dma_is_map = 0;
858 	return (0);
859 
860 err3:
861 	bus_dmamem_free(dma_mem->dma_tag, dma_mem->dma_vaddr, dma_mem->dma_map);
862 err2:
863 	bus_dma_tag_destroy(dma_mem->dma_tag);
864 err1:
865 	dma_mem->dma_vaddr = NULL;
866 	return(error);
867 }
868 
869 static int
870 sec_desc_map_dma(struct sec_softc *sc, struct sec_dma_mem *dma_mem, void *mem,
871     bus_size_t size, int type, struct sec_desc_map_info *sdmi)
872 {
873 	int error;
874 
875 	if (dma_mem->dma_vaddr != NULL)
876 		return (EBUSY);
877 
878 	switch (type) {
879 	case SEC_MEMORY:
880 		break;
881 	case SEC_UIO:
882 		size = SEC_FREE_LT_CNT(sc) * SEC_MAX_DMA_BLOCK_SIZE;
883 		break;
884 	case SEC_MBUF:
885 		size = m_length((struct mbuf*)mem, NULL);
886 		break;
887 	default:
888 		return (EINVAL);
889 	}
890 
891 	error = bus_dma_tag_create(NULL,	/* parent */
892 		SEC_DMA_ALIGNMENT, 0,		/* alignment, boundary */
893 		BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
894 		BUS_SPACE_MAXADDR,		/* highaddr */
895 		NULL, NULL,			/* filtfunc, filtfuncarg */
896 		size,				/* maxsize */
897 		SEC_FREE_LT_CNT(sc),		/* nsegments */
898 		SEC_MAX_DMA_BLOCK_SIZE, 0,	/* maxsegsz, flags */
899 		NULL, NULL,			/* lockfunc, lockfuncarg */
900 		&(dma_mem->dma_tag));		/* dmat */
901 
902 	if (error) {
903 		device_printf(sc->sc_dev, "failed to allocate busdma tag, error"
904 		    " %i!\n", error);
905 		dma_mem->dma_vaddr = NULL;
906 		return (error);
907 	}
908 
909 	error = bus_dmamap_create(dma_mem->dma_tag, 0, &(dma_mem->dma_map));
910 
911 	if (error) {
912 		device_printf(sc->sc_dev, "failed to create DMA map, error %i!"
913 		    "\n", error);
914 		bus_dma_tag_destroy(dma_mem->dma_tag);
915 		return (error);
916 	}
917 
918 	switch (type) {
919 	case SEC_MEMORY:
920 		error = bus_dmamap_load(dma_mem->dma_tag, dma_mem->dma_map,
921 		    mem, size, sec_dma_map_desc_cb, sdmi, BUS_DMA_NOWAIT);
922 		break;
923 	case SEC_UIO:
924 		error = bus_dmamap_load_uio(dma_mem->dma_tag, dma_mem->dma_map,
925 		    mem, sec_dma_map_desc_cb2, sdmi, BUS_DMA_NOWAIT);
926 		break;
927 	case SEC_MBUF:
928 		error = bus_dmamap_load_mbuf(dma_mem->dma_tag, dma_mem->dma_map,
929 		    mem, sec_dma_map_desc_cb2, sdmi, BUS_DMA_NOWAIT);
930 		break;
931 	}
932 
933 	if (error) {
934 		device_printf(sc->sc_dev, "cannot get address of the DMA"
935 		    " memory, error %i!\n", error);
936 		bus_dmamap_destroy(dma_mem->dma_tag, dma_mem->dma_map);
937 		bus_dma_tag_destroy(dma_mem->dma_tag);
938 		return (error);
939 	}
940 
941 	dma_mem->dma_is_map = 1;
942 	dma_mem->dma_vaddr = mem;
943 
944 	return (0);
945 }
946 
947 static void
948 sec_free_dma_mem(struct sec_dma_mem *dma_mem)
949 {
950 
951 	/* Check for double free */
952 	if (dma_mem->dma_vaddr == NULL)
953 		return;
954 
955 	bus_dmamap_unload(dma_mem->dma_tag, dma_mem->dma_map);
956 
957 	if (dma_mem->dma_is_map)
958 		bus_dmamap_destroy(dma_mem->dma_tag, dma_mem->dma_map);
959 	else
960 		bus_dmamem_free(dma_mem->dma_tag, dma_mem->dma_vaddr,
961 		    dma_mem->dma_map);
962 
963 	bus_dma_tag_destroy(dma_mem->dma_tag);
964 	dma_mem->dma_vaddr = NULL;
965 }
966 
967 static int
968 sec_eu_channel(struct sec_softc *sc, int eu)
969 {
970 	uint64_t reg;
971 	int channel = 0;
972 
973 	SEC_LOCK_ASSERT(sc, controller);
974 
975 	reg = SEC_READ(sc, SEC_EUASR);
976 
977 	switch (eu) {
978 	case SEC_EU_AFEU:
979 		channel = SEC_EUASR_AFEU(reg);
980 		break;
981 	case SEC_EU_DEU:
982 		channel = SEC_EUASR_DEU(reg);
983 		break;
984 	case SEC_EU_MDEU_A:
985 	case SEC_EU_MDEU_B:
986 		channel = SEC_EUASR_MDEU(reg);
987 		break;
988 	case SEC_EU_RNGU:
989 		channel = SEC_EUASR_RNGU(reg);
990 		break;
991 	case SEC_EU_PKEU:
992 		channel = SEC_EUASR_PKEU(reg);
993 		break;
994 	case SEC_EU_AESU:
995 		channel = SEC_EUASR_AESU(reg);
996 		break;
997 	case SEC_EU_KEU:
998 		channel = SEC_EUASR_KEU(reg);
999 		break;
1000 	case SEC_EU_CRCU:
1001 		channel = SEC_EUASR_CRCU(reg);
1002 		break;
1003 	}
1004 
1005 	return (channel - 1);
1006 }
1007 
1008 static int
1009 sec_enqueue_desc(struct sec_softc *sc, struct sec_desc *desc, int channel)
1010 {
1011 	u_int fflvl = SEC_MAX_FIFO_LEVEL;
1012 	uint64_t reg;
1013 	int i;
1014 
1015 	SEC_LOCK_ASSERT(sc, controller);
1016 
1017 	/* Find free channel if have not got one */
1018 	if (channel < 0) {
1019 		for (i = 0; i < SEC_CHANNELS; i++) {
1020 			reg = SEC_READ(sc, SEC_CHAN_CSR(channel));
1021 
1022 			if ((reg & sc->sc_channel_idle_mask) == 0) {
1023 				channel = i;
1024 				break;
1025 			}
1026 		}
1027 	}
1028 
1029 	/* There is no free channel */
1030 	if (channel < 0)
1031 		return (-1);
1032 
1033 	/* Check FIFO level on selected channel */
1034 	reg = SEC_READ(sc, SEC_CHAN_CSR(channel));
1035 
1036 	switch(sc->sc_version) {
1037 	case 2:
1038 		fflvl = (reg >> SEC_CHAN_CSR2_FFLVL_S) & SEC_CHAN_CSR2_FFLVL_M;
1039 		break;
1040 	case 3:
1041 		fflvl = (reg >> SEC_CHAN_CSR3_FFLVL_S) & SEC_CHAN_CSR3_FFLVL_M;
1042 		break;
1043 	}
1044 
1045 	if (fflvl >= SEC_MAX_FIFO_LEVEL)
1046 		return (-1);
1047 
1048 	/* Enqueue descriptor in channel */
1049 	SEC_WRITE(sc, SEC_CHAN_FF(channel), desc->sd_desc_paddr);
1050 
1051 	return (channel);
1052 }
1053 
1054 static void
1055 sec_enqueue(struct sec_softc *sc)
1056 {
1057 	struct sec_desc *desc;
1058 	int ch0, ch1;
1059 
1060 	SEC_LOCK(sc, descriptors);
1061 	SEC_LOCK(sc, controller);
1062 
1063 	while (SEC_READY_DESC_CNT(sc) > 0) {
1064 		desc = SEC_GET_READY_DESC(sc);
1065 
1066 		ch0 = sec_eu_channel(sc, desc->sd_desc->shd_eu_sel0);
1067 		ch1 = sec_eu_channel(sc, desc->sd_desc->shd_eu_sel1);
1068 
1069 		/*
1070 		 * Both EU are used by the same channel.
1071 		 * Enqueue descriptor in channel used by busy EUs.
1072 		 */
1073 		if (ch0 >= 0 && ch0 == ch1) {
1074 			if (sec_enqueue_desc(sc, desc, ch0) >= 0) {
1075 				SEC_DESC_READY2QUEUED(sc);
1076 				continue;
1077 			}
1078 		}
1079 
1080 		/*
1081 		 * Only one EU is free.
1082 		 * Enqueue descriptor in channel used by busy EU.
1083 		 */
1084 		if ((ch0 >= 0 && ch1 < 0) || (ch1 >= 0 && ch0 < 0)) {
1085 			if (sec_enqueue_desc(sc, desc, (ch0 >= 0) ? ch0 : ch1)
1086 			    >= 0) {
1087 				SEC_DESC_READY2QUEUED(sc);
1088 				continue;
1089 			}
1090 		}
1091 
1092 		/*
1093 		 * Both EU are free.
1094 		 * Enqueue descriptor in first free channel.
1095 		 */
1096 		if (ch0 < 0 && ch1 < 0) {
1097 			if (sec_enqueue_desc(sc, desc, -1) >= 0) {
1098 				SEC_DESC_READY2QUEUED(sc);
1099 				continue;
1100 			}
1101 		}
1102 
1103 		/* Current descriptor can not be queued at the moment */
1104 		SEC_PUT_BACK_READY_DESC(sc);
1105 		break;
1106 	}
1107 
1108 	SEC_UNLOCK(sc, controller);
1109 	SEC_UNLOCK(sc, descriptors);
1110 }
1111 
1112 static struct sec_desc *
1113 sec_find_desc(struct sec_softc *sc, bus_addr_t paddr)
1114 {
1115 	struct sec_desc *desc = NULL;
1116 	int i;
1117 
1118 	SEC_LOCK_ASSERT(sc, descriptors);
1119 
1120 	for (i = 0; i < SEC_CHANNELS; i++) {
1121 		if (sc->sc_desc[i].sd_desc_paddr == paddr) {
1122 			desc = &(sc->sc_desc[i]);
1123 			break;
1124 		}
1125 	}
1126 
1127 	return (desc);
1128 }
1129 
1130 static int
1131 sec_make_pointer_direct(struct sec_softc *sc, struct sec_desc *desc, u_int n,
1132     bus_addr_t data, bus_size_t dsize)
1133 {
1134 	struct sec_hw_desc_ptr *ptr;
1135 
1136 	SEC_LOCK_ASSERT(sc, descriptors);
1137 
1138 	ptr = &(desc->sd_desc->shd_pointer[n]);
1139 	ptr->shdp_length = dsize;
1140 	ptr->shdp_extent = 0;
1141 	ptr->shdp_j = 0;
1142 	ptr->shdp_ptr = data;
1143 
1144 	return (0);
1145 }
1146 
1147 static int
1148 sec_make_pointer(struct sec_softc *sc, struct sec_desc *desc,
1149     u_int n, void *data, bus_size_t doffset, bus_size_t dsize, int dtype)
1150 {
1151 	struct sec_desc_map_info sdmi = { sc, dsize, doffset, NULL, NULL, 0 };
1152 	struct sec_hw_desc_ptr *ptr;
1153 	int error;
1154 
1155 	SEC_LOCK_ASSERT(sc, descriptors);
1156 
1157 	/* For flat memory map only requested region */
1158 	if (dtype == SEC_MEMORY) {
1159 		 data = (uint8_t*)(data) + doffset;
1160 		 sdmi.sdmi_offset = 0;
1161 	}
1162 
1163 	error = sec_desc_map_dma(sc, &(desc->sd_ptr_dmem[n]), data, dsize,
1164 	    dtype, &sdmi);
1165 
1166 	if (error)
1167 		return (error);
1168 
1169 	sdmi.sdmi_lt_last->sl_lt->shl_r = 1;
1170 	desc->sd_lt_used += sdmi.sdmi_lt_used;
1171 
1172 	ptr = &(desc->sd_desc->shd_pointer[n]);
1173 	ptr->shdp_length = dsize;
1174 	ptr->shdp_extent = 0;
1175 	ptr->shdp_j = 1;
1176 	ptr->shdp_ptr = sdmi.sdmi_lt_first->sl_lt_paddr;
1177 
1178 	return (0);
1179 }
1180 
1181 static int
1182 sec_split_cri(struct cryptoini *cri, struct cryptoini **enc,
1183     struct cryptoini **mac)
1184 {
1185 	struct cryptoini *e, *m;
1186 
1187 	e = cri;
1188 	m = cri->cri_next;
1189 
1190 	/* We can haldle only two operations */
1191 	if (m && m->cri_next)
1192 		return (EINVAL);
1193 
1194 	if (sec_mdeu_can_handle(e->cri_alg)) {
1195 		cri = m;
1196 		m = e;
1197 		e = cri;
1198 	}
1199 
1200 	if (m && !sec_mdeu_can_handle(m->cri_alg))
1201 		return (EINVAL);
1202 
1203 	*enc = e;
1204 	*mac = m;
1205 
1206 	return (0);
1207 }
1208 
1209 static int
1210 sec_split_crp(struct cryptop *crp, struct cryptodesc **enc,
1211     struct cryptodesc **mac)
1212 {
1213 	struct cryptodesc *e, *m, *t;
1214 
1215 	e = crp->crp_desc;
1216 	m = e->crd_next;
1217 
1218 	/* We can haldle only two operations */
1219 	if (m && m->crd_next)
1220 		return (EINVAL);
1221 
1222 	if (sec_mdeu_can_handle(e->crd_alg)) {
1223 		t = m;
1224 		m = e;
1225 		e = t;
1226 	}
1227 
1228 	if (m && !sec_mdeu_can_handle(m->crd_alg))
1229 		return (EINVAL);
1230 
1231 	*enc = e;
1232 	*mac = m;
1233 
1234 	return (0);
1235 }
1236 
1237 static int
1238 sec_alloc_session(struct sec_softc *sc)
1239 {
1240 	struct sec_session *ses = NULL;
1241 	int sid = -1;
1242 	u_int i;
1243 
1244 	SEC_LOCK(sc, sessions);
1245 
1246 	for (i = 0; i < SEC_MAX_SESSIONS; i++) {
1247 		if (sc->sc_sessions[i].ss_used == 0) {
1248 			ses = &(sc->sc_sessions[i]);
1249 			ses->ss_used = 1;
1250 			ses->ss_ivlen = 0;
1251 			ses->ss_klen = 0;
1252 			ses->ss_mklen = 0;
1253 			sid = i;
1254 			break;
1255 		}
1256 	}
1257 
1258 	SEC_UNLOCK(sc, sessions);
1259 
1260 	return (sid);
1261 }
1262 
1263 static struct sec_session *
1264 sec_get_session(struct sec_softc *sc, u_int sid)
1265 {
1266 	struct sec_session *ses;
1267 
1268 	if (sid >= SEC_MAX_SESSIONS)
1269 		return (NULL);
1270 
1271 	SEC_LOCK(sc, sessions);
1272 
1273 	ses = &(sc->sc_sessions[sid]);
1274 
1275 	if (ses->ss_used == 0)
1276 		ses = NULL;
1277 
1278 	SEC_UNLOCK(sc, sessions);
1279 
1280 	return (ses);
1281 }
1282 
1283 static int
1284 sec_newsession(device_t dev, u_int32_t *sidp, struct cryptoini *cri)
1285 {
1286 	struct sec_softc *sc = device_get_softc(dev);
1287 	struct sec_eu_methods *eu = sec_eus;
1288 	struct cryptoini *enc = NULL;
1289 	struct cryptoini *mac = NULL;
1290 	struct sec_session *ses;
1291 	int error = -1;
1292 	int sid;
1293 
1294 	error = sec_split_cri(cri, &enc, &mac);
1295 	if (error)
1296 		return (error);
1297 
1298 	/* Check key lengths */
1299 	if (enc && enc->cri_key && (enc->cri_klen / 8) > SEC_MAX_KEY_LEN)
1300 		return (E2BIG);
1301 
1302 	if (mac && mac->cri_key && (mac->cri_klen / 8) > SEC_MAX_KEY_LEN)
1303 		return (E2BIG);
1304 
1305 	/* Only SEC 3.0 supports digests larger than 256 bits */
1306 	if (sc->sc_version < 3 && mac && mac->cri_klen > 256)
1307 		return (E2BIG);
1308 
1309 	sid = sec_alloc_session(sc);
1310 	if (sid < 0)
1311 		return (ENOMEM);
1312 
1313 	ses = sec_get_session(sc, sid);
1314 
1315 	/* Find EU for this session */
1316 	while (eu->sem_make_desc != NULL) {
1317 		error = eu->sem_newsession(sc, ses, enc, mac);
1318 		if (error >= 0)
1319 			break;
1320 
1321 		eu++;
1322 	}
1323 
1324 	/* If not found, return EINVAL */
1325 	if (error < 0) {
1326 		sec_free_session(sc, ses);
1327 		return (EINVAL);
1328 	}
1329 
1330 	/* Save cipher key */
1331 	if (enc && enc->cri_key) {
1332 		ses->ss_klen = enc->cri_klen / 8;
1333 		memcpy(ses->ss_key, enc->cri_key, ses->ss_klen);
1334 	}
1335 
1336 	/* Save digest key */
1337 	if (mac && mac->cri_key) {
1338 		ses->ss_mklen = mac->cri_klen / 8;
1339 		memcpy(ses->ss_mkey, mac->cri_key, ses->ss_mklen);
1340 	}
1341 
1342 	ses->ss_eu = eu;
1343 	*sidp = sid;
1344 
1345 	return (0);
1346 }
1347 
1348 static int
1349 sec_freesession(device_t dev, uint64_t tid)
1350 {
1351 	struct sec_softc *sc = device_get_softc(dev);
1352 	struct sec_session *ses;
1353 	int error = 0;
1354 
1355 	ses = sec_get_session(sc, CRYPTO_SESID2LID(tid));
1356 	if (ses == NULL)
1357 		return (EINVAL);
1358 
1359 	sec_free_session(sc, ses);
1360 
1361 	return (error);
1362 }
1363 
1364 static int
1365 sec_process(device_t dev, struct cryptop *crp, int hint)
1366 {
1367 	struct sec_softc *sc = device_get_softc(dev);
1368 	struct sec_desc *desc = NULL;
1369 	struct cryptodesc *mac, *enc;
1370 	struct sec_session *ses;
1371 	int buftype, error = 0;
1372 
1373 	/* Check Session ID */
1374 	ses = sec_get_session(sc, CRYPTO_SESID2LID(crp->crp_sid));
1375 	if (ses == NULL) {
1376 		crp->crp_etype = EINVAL;
1377 		crypto_done(crp);
1378 		return (0);
1379 	}
1380 
1381 	/* Check for input length */
1382 	if (crp->crp_ilen > SEC_MAX_DMA_BLOCK_SIZE) {
1383 		crp->crp_etype = E2BIG;
1384 		crypto_done(crp);
1385 		return (0);
1386 	}
1387 
1388 	/* Get descriptors */
1389 	if (sec_split_crp(crp, &enc, &mac)) {
1390 		crp->crp_etype = EINVAL;
1391 		crypto_done(crp);
1392 		return (0);
1393 	}
1394 
1395 	SEC_LOCK(sc, descriptors);
1396 	SEC_DESC_SYNC(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1397 
1398 	/* Block driver if there is no free descriptors or we are going down */
1399 	if (SEC_FREE_DESC_CNT(sc) == 0 || sc->sc_shutdown) {
1400 		sc->sc_blocked |= CRYPTO_SYMQ;
1401 		SEC_UNLOCK(sc, descriptors);
1402 		return (ERESTART);
1403 	}
1404 
1405 	/* Prepare descriptor */
1406 	desc = SEC_GET_FREE_DESC(sc);
1407 	desc->sd_lt_used = 0;
1408 	desc->sd_error = 0;
1409 	desc->sd_crp = crp;
1410 
1411 	if (crp->crp_flags & CRYPTO_F_IOV)
1412 		buftype = SEC_UIO;
1413 	else if (crp->crp_flags & CRYPTO_F_IMBUF)
1414 		buftype = SEC_MBUF;
1415 	else
1416 		buftype = SEC_MEMORY;
1417 
1418 	if (enc && enc->crd_flags & CRD_F_ENCRYPT) {
1419 		if (enc->crd_flags & CRD_F_IV_EXPLICIT)
1420 			memcpy(desc->sd_desc->shd_iv, enc->crd_iv,
1421 			    ses->ss_ivlen);
1422 		else
1423 			arc4rand(desc->sd_desc->shd_iv, ses->ss_ivlen, 0);
1424 
1425 		if ((enc->crd_flags & CRD_F_IV_PRESENT) == 0)
1426 			crypto_copyback(crp->crp_flags, crp->crp_buf,
1427 			    enc->crd_inject, ses->ss_ivlen,
1428 			    desc->sd_desc->shd_iv);
1429 	} else if (enc) {
1430 		if (enc->crd_flags & CRD_F_IV_EXPLICIT)
1431 			memcpy(desc->sd_desc->shd_iv, enc->crd_iv,
1432 			    ses->ss_ivlen);
1433 		else
1434 			crypto_copydata(crp->crp_flags, crp->crp_buf,
1435 			    enc->crd_inject, ses->ss_ivlen,
1436 			    desc->sd_desc->shd_iv);
1437 	}
1438 
1439 	if (enc && enc->crd_flags & CRD_F_KEY_EXPLICIT) {
1440 		if ((enc->crd_klen / 8) <= SEC_MAX_KEY_LEN) {
1441 			ses->ss_klen = enc->crd_klen / 8;
1442 			memcpy(ses->ss_key, enc->crd_key, ses->ss_klen);
1443 		} else
1444 			error = E2BIG;
1445 	}
1446 
1447 	if (!error && mac && mac->crd_flags & CRD_F_KEY_EXPLICIT) {
1448 		if ((mac->crd_klen / 8) <= SEC_MAX_KEY_LEN) {
1449 			ses->ss_mklen = mac->crd_klen / 8;
1450 			memcpy(ses->ss_mkey, mac->crd_key, ses->ss_mklen);
1451 		} else
1452 			error = E2BIG;
1453 	}
1454 
1455 	if (!error) {
1456 		memcpy(desc->sd_desc->shd_key, ses->ss_key, ses->ss_klen);
1457 		memcpy(desc->sd_desc->shd_mkey, ses->ss_mkey, ses->ss_mklen);
1458 
1459 		error = ses->ss_eu->sem_make_desc(sc, ses, desc, crp, buftype);
1460 	}
1461 
1462 	if (error) {
1463 		SEC_DESC_FREE_POINTERS(desc);
1464 		SEC_DESC_PUT_BACK_LT(sc, desc);
1465 		SEC_PUT_BACK_FREE_DESC(sc);
1466 		SEC_UNLOCK(sc, descriptors);
1467 		crp->crp_etype = error;
1468 		crypto_done(crp);
1469 		return (0);
1470 	}
1471 
1472 	/*
1473 	 * Skip DONE interrupt if this is not last request in burst, but only
1474 	 * if we are running on SEC 3.X. On SEC 2.X we have to enable DONE
1475 	 * signaling on each descriptor.
1476 	 */
1477 	if ((hint & CRYPTO_HINT_MORE) && sc->sc_version == 3)
1478 		desc->sd_desc->shd_dn = 0;
1479 	else
1480 		desc->sd_desc->shd_dn = 1;
1481 
1482 	SEC_DESC_SYNC(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1483 	SEC_DESC_SYNC_POINTERS(desc, BUS_DMASYNC_POSTREAD |
1484 	    BUS_DMASYNC_POSTWRITE);
1485 	SEC_DESC_FREE2READY(sc);
1486 	SEC_UNLOCK(sc, descriptors);
1487 
1488 	/* Enqueue ready descriptors in hardware */
1489 	sec_enqueue(sc);
1490 
1491 	return (0);
1492 }
1493 
1494 static int
1495 sec_build_common_ns_desc(struct sec_softc *sc, struct sec_desc *desc,
1496     struct sec_session *ses, struct cryptop *crp, struct cryptodesc *enc,
1497     int buftype)
1498 {
1499 	struct sec_hw_desc *hd = desc->sd_desc;
1500 	int error;
1501 
1502 	hd->shd_desc_type = SEC_DT_COMMON_NONSNOOP;
1503 	hd->shd_eu_sel1 = SEC_EU_NONE;
1504 	hd->shd_mode1 = 0;
1505 
1506 	/* Pointer 0: NULL */
1507 	error = sec_make_pointer_direct(sc, desc, 0, 0, 0);
1508 	if (error)
1509 		return (error);
1510 
1511 	/* Pointer 1: IV IN */
1512 	error = sec_make_pointer_direct(sc, desc, 1, desc->sd_desc_paddr +
1513 	    offsetof(struct sec_hw_desc, shd_iv), ses->ss_ivlen);
1514 	if (error)
1515 		return (error);
1516 
1517 	/* Pointer 2: Cipher Key */
1518 	error = sec_make_pointer_direct(sc, desc, 2, desc->sd_desc_paddr +
1519 	    offsetof(struct sec_hw_desc, shd_key), ses->ss_klen);
1520  	if (error)
1521 		return (error);
1522 
1523 	/* Pointer 3: Data IN */
1524 	error = sec_make_pointer(sc, desc, 3, crp->crp_buf, enc->crd_skip,
1525 	    enc->crd_len, buftype);
1526 	if (error)
1527 		return (error);
1528 
1529 	/* Pointer 4: Data OUT */
1530 	error = sec_make_pointer(sc, desc, 4, crp->crp_buf, enc->crd_skip,
1531 	    enc->crd_len, buftype);
1532 	if (error)
1533 		return (error);
1534 
1535 	/* Pointer 5: IV OUT (Not used: NULL) */
1536 	error = sec_make_pointer_direct(sc, desc, 5, 0, 0);
1537 	if (error)
1538 		return (error);
1539 
1540 	/* Pointer 6: NULL */
1541 	error = sec_make_pointer_direct(sc, desc, 6, 0, 0);
1542 
1543 	return (error);
1544 }
1545 
1546 static int
1547 sec_build_common_s_desc(struct sec_softc *sc, struct sec_desc *desc,
1548     struct sec_session *ses, struct cryptop *crp, struct cryptodesc *enc,
1549     struct cryptodesc *mac, int buftype)
1550 {
1551 	struct sec_hw_desc *hd = desc->sd_desc;
1552 	u_int eu, mode, hashlen;
1553 	int error;
1554 
1555 	if (mac->crd_len < enc->crd_len)
1556 		return (EINVAL);
1557 
1558 	if (mac->crd_skip + mac->crd_len != enc->crd_skip + enc->crd_len)
1559 		return (EINVAL);
1560 
1561 	error = sec_mdeu_config(mac, &eu, &mode, &hashlen);
1562 	if (error)
1563 		return (error);
1564 
1565 	hd->shd_desc_type = SEC_DT_HMAC_SNOOP;
1566 	hd->shd_eu_sel1 = eu;
1567 	hd->shd_mode1 = mode;
1568 
1569 	/* Pointer 0: HMAC Key */
1570 	error = sec_make_pointer_direct(sc, desc, 0, desc->sd_desc_paddr +
1571 	    offsetof(struct sec_hw_desc, shd_mkey), ses->ss_mklen);
1572 	if (error)
1573 		return (error);
1574 
1575 	/* Pointer 1: HMAC-Only Data IN */
1576 	error = sec_make_pointer(sc, desc, 1, crp->crp_buf, mac->crd_skip,
1577 	    mac->crd_len - enc->crd_len, buftype);
1578 	if (error)
1579 		return (error);
1580 
1581 	/* Pointer 2: Cipher Key */
1582 	error = sec_make_pointer_direct(sc, desc, 2, desc->sd_desc_paddr +
1583 	    offsetof(struct sec_hw_desc, shd_key), ses->ss_klen);
1584  	if (error)
1585 		return (error);
1586 
1587 	/* Pointer 3: IV IN */
1588 	error = sec_make_pointer_direct(sc, desc, 3, desc->sd_desc_paddr +
1589 	    offsetof(struct sec_hw_desc, shd_iv), ses->ss_ivlen);
1590 	if (error)
1591 		return (error);
1592 
1593 	/* Pointer 4: Data IN */
1594 	error = sec_make_pointer(sc, desc, 4, crp->crp_buf, enc->crd_skip,
1595 	    enc->crd_len, buftype);
1596 	if (error)
1597 		return (error);
1598 
1599 	/* Pointer 5: Data OUT */
1600 	error = sec_make_pointer(sc, desc, 5, crp->crp_buf, enc->crd_skip,
1601 	    enc->crd_len, buftype);
1602 	if (error)
1603 		return (error);
1604 
1605 	/* Pointer 6: HMAC OUT */
1606 	error = sec_make_pointer(sc, desc, 6, crp->crp_buf, mac->crd_inject,
1607 	    hashlen, buftype);
1608 
1609 	return (error);
1610 }
1611 
1612 /* AESU */
1613 
1614 static int
1615 sec_aesu_newsession(struct sec_softc *sc, struct sec_session *ses,
1616     struct cryptoini *enc, struct cryptoini *mac)
1617 {
1618 
1619 	if (enc == NULL)
1620 		return (-1);
1621 
1622 	if (enc->cri_alg != CRYPTO_AES_CBC)
1623 		return (-1);
1624 
1625 	ses->ss_ivlen = AES_BLOCK_LEN;
1626 
1627 	return (0);
1628 }
1629 
1630 static int
1631 sec_aesu_make_desc(struct sec_softc *sc, struct sec_session *ses,
1632     struct sec_desc *desc, struct cryptop *crp, int buftype)
1633 {
1634 	struct sec_hw_desc *hd = desc->sd_desc;
1635 	struct cryptodesc *enc, *mac;
1636 	int error;
1637 
1638 	error = sec_split_crp(crp, &enc, &mac);
1639 	if (error)
1640 		return (error);
1641 
1642 	if (!enc)
1643 		return (EINVAL);
1644 
1645 	hd->shd_eu_sel0 = SEC_EU_AESU;
1646 	hd->shd_mode0 = SEC_AESU_MODE_CBC;
1647 
1648 	if (enc->crd_alg != CRYPTO_AES_CBC)
1649 		return (EINVAL);
1650 
1651 	if (enc->crd_flags & CRD_F_ENCRYPT) {
1652 		hd->shd_mode0 |= SEC_AESU_MODE_ED;
1653 		hd->shd_dir = 0;
1654 	} else
1655 		hd->shd_dir = 1;
1656 
1657 	if (mac)
1658 		error = sec_build_common_s_desc(sc, desc, ses, crp, enc, mac,
1659 		    buftype);
1660 	else
1661 		error = sec_build_common_ns_desc(sc, desc, ses, crp, enc,
1662 		    buftype);
1663 
1664 	return (error);
1665 }
1666 
1667 /* DEU */
1668 
1669 static int
1670 sec_deu_newsession(struct sec_softc *sc, struct sec_session *ses,
1671     struct cryptoini *enc, struct cryptoini *mac)
1672 {
1673 
1674 	if (enc == NULL)
1675 		return (-1);
1676 
1677 	switch (enc->cri_alg) {
1678 	case CRYPTO_DES_CBC:
1679 	case CRYPTO_3DES_CBC:
1680 		break;
1681 	default:
1682 		return (-1);
1683 	}
1684 
1685 	ses->ss_ivlen = DES_BLOCK_LEN;
1686 
1687 	return (0);
1688 }
1689 
1690 static int
1691 sec_deu_make_desc(struct sec_softc *sc, struct sec_session *ses,
1692     struct sec_desc *desc, struct cryptop *crp, int buftype)
1693 {
1694 	struct sec_hw_desc *hd = desc->sd_desc;
1695 	struct cryptodesc *enc, *mac;
1696 	int error;
1697 
1698 	error = sec_split_crp(crp, &enc, &mac);
1699 	if (error)
1700 		return (error);
1701 
1702 	if (!enc)
1703 		return (EINVAL);
1704 
1705 	hd->shd_eu_sel0 = SEC_EU_DEU;
1706 	hd->shd_mode0 = SEC_DEU_MODE_CBC;
1707 
1708 	switch (enc->crd_alg) {
1709 	case CRYPTO_3DES_CBC:
1710 		hd->shd_mode0 |= SEC_DEU_MODE_TS;
1711 		break;
1712 	case CRYPTO_DES_CBC:
1713 		break;
1714 	default:
1715 		return (EINVAL);
1716 	}
1717 
1718 	if (enc->crd_flags & CRD_F_ENCRYPT) {
1719 		hd->shd_mode0 |= SEC_DEU_MODE_ED;
1720 		hd->shd_dir = 0;
1721 	} else
1722 		hd->shd_dir = 1;
1723 
1724 	if (mac)
1725 		error = sec_build_common_s_desc(sc, desc, ses, crp, enc, mac,
1726 		    buftype);
1727 	else
1728 		error = sec_build_common_ns_desc(sc, desc, ses, crp, enc,
1729 		    buftype);
1730 
1731 	return (error);
1732 }
1733 
1734 /* MDEU */
1735 
1736 static int
1737 sec_mdeu_can_handle(u_int alg)
1738 {
1739 	switch (alg) {
1740 	case CRYPTO_MD5:
1741 	case CRYPTO_SHA1:
1742 	case CRYPTO_MD5_HMAC:
1743 	case CRYPTO_SHA1_HMAC:
1744 	case CRYPTO_SHA2_256_HMAC:
1745 	case CRYPTO_SHA2_384_HMAC:
1746 	case CRYPTO_SHA2_512_HMAC:
1747 		return (1);
1748 	default:
1749 		return (0);
1750 	}
1751 }
1752 
1753 static int
1754 sec_mdeu_config(struct cryptodesc *crd, u_int *eu, u_int *mode, u_int *hashlen)
1755 {
1756 
1757 	*mode = SEC_MDEU_MODE_PD | SEC_MDEU_MODE_INIT;
1758 	*eu = SEC_EU_NONE;
1759 
1760 	switch (crd->crd_alg) {
1761 	case CRYPTO_MD5_HMAC:
1762 		*mode |= SEC_MDEU_MODE_HMAC;
1763 		/* FALLTHROUGH */
1764 	case CRYPTO_MD5:
1765 		*eu = SEC_EU_MDEU_A;
1766 		*mode |= SEC_MDEU_MODE_MD5;
1767 		*hashlen = MD5_HASH_LEN;
1768 		break;
1769 	case CRYPTO_SHA1_HMAC:
1770 		*mode |= SEC_MDEU_MODE_HMAC;
1771 		/* FALLTHROUGH */
1772 	case CRYPTO_SHA1:
1773 		*eu = SEC_EU_MDEU_A;
1774 		*mode |= SEC_MDEU_MODE_SHA1;
1775 		*hashlen = SHA1_HASH_LEN;
1776 		break;
1777 	case CRYPTO_SHA2_256_HMAC:
1778 		*mode |= SEC_MDEU_MODE_HMAC | SEC_MDEU_MODE_SHA256;
1779 		*eu = SEC_EU_MDEU_A;
1780 		break;
1781 	case CRYPTO_SHA2_384_HMAC:
1782 		*mode |= SEC_MDEU_MODE_HMAC | SEC_MDEU_MODE_SHA384;
1783 		*eu = SEC_EU_MDEU_B;
1784 		break;
1785 	case CRYPTO_SHA2_512_HMAC:
1786 		*mode |= SEC_MDEU_MODE_HMAC | SEC_MDEU_MODE_SHA512;
1787 		*eu = SEC_EU_MDEU_B;
1788 		break;
1789 	default:
1790 		return (EINVAL);
1791 	}
1792 
1793 	if (*mode & SEC_MDEU_MODE_HMAC)
1794 		*hashlen = SEC_HMAC_HASH_LEN;
1795 
1796 	return (0);
1797 }
1798 
1799 static int
1800 sec_mdeu_newsession(struct sec_softc *sc, struct sec_session *ses,
1801     struct cryptoini *enc, struct cryptoini *mac)
1802 {
1803 
1804 	if (mac && sec_mdeu_can_handle(mac->cri_alg))
1805 		return (0);
1806 
1807 	return (-1);
1808 }
1809 
1810 static int
1811 sec_mdeu_make_desc(struct sec_softc *sc, struct sec_session *ses,
1812     struct sec_desc *desc, struct cryptop *crp, int buftype)
1813 {
1814 	struct cryptodesc *enc, *mac;
1815 	struct sec_hw_desc *hd = desc->sd_desc;
1816 	u_int eu, mode, hashlen;
1817 	int error;
1818 
1819 	error = sec_split_crp(crp, &enc, &mac);
1820 	if (error)
1821 		return (error);
1822 
1823 	if (enc)
1824 		return (EINVAL);
1825 
1826 	error = sec_mdeu_config(mac, &eu, &mode, &hashlen);
1827 	if (error)
1828 		return (error);
1829 
1830 	hd->shd_desc_type = SEC_DT_COMMON_NONSNOOP;
1831 	hd->shd_eu_sel0 = eu;
1832 	hd->shd_mode0 = mode;
1833 	hd->shd_eu_sel1 = SEC_EU_NONE;
1834 	hd->shd_mode1 = 0;
1835 
1836 	/* Pointer 0: NULL */
1837 	error = sec_make_pointer_direct(sc, desc, 0, 0, 0);
1838 	if (error)
1839 		return (error);
1840 
1841 	/* Pointer 1: Context In (Not used: NULL) */
1842 	error = sec_make_pointer_direct(sc, desc, 1, 0, 0);
1843 	if (error)
1844 		return (error);
1845 
1846 	/* Pointer 2: HMAC Key (or NULL, depending on digest type) */
1847 	if (hd->shd_mode0 & SEC_MDEU_MODE_HMAC)
1848 		error = sec_make_pointer_direct(sc, desc, 2,
1849 		    desc->sd_desc_paddr + offsetof(struct sec_hw_desc,
1850 		    shd_mkey), ses->ss_mklen);
1851 	else
1852 		error = sec_make_pointer_direct(sc, desc, 2, 0, 0);
1853 
1854 	if (error)
1855 		return (error);
1856 
1857 	/* Pointer 3: Input Data */
1858 	error = sec_make_pointer(sc, desc, 3, crp->crp_buf, mac->crd_skip,
1859 	    mac->crd_len, buftype);
1860 	if (error)
1861 		return (error);
1862 
1863 	/* Pointer 4: NULL */
1864 	error = sec_make_pointer_direct(sc, desc, 4, 0, 0);
1865 	if (error)
1866 		return (error);
1867 
1868 	/* Pointer 5: Hash out */
1869 	error = sec_make_pointer(sc, desc, 5, crp->crp_buf,
1870 	    mac->crd_inject, hashlen, buftype);
1871 	if (error)
1872 		return (error);
1873 
1874 	/* Pointer 6: NULL */
1875 	error = sec_make_pointer_direct(sc, desc, 6, 0, 0);
1876 
1877 	return (0);
1878 }
1879