xref: /freebsd/sys/crypto/ccp/ccp_hardware.c (revision 685dc743dc3b5645e34836464128e1c0558b404b)
1844d9543SConrad Meyer /*-
2*4d846d26SWarner Losh  * SPDX-License-Identifier: BSD-2-Clause
3844d9543SConrad Meyer  *
4844d9543SConrad Meyer  * Copyright (c) 2017 Chelsio Communications, Inc.
5844d9543SConrad Meyer  * Copyright (c) 2017 Conrad Meyer <cem@FreeBSD.org>
6844d9543SConrad Meyer  * All rights reserved.
7844d9543SConrad Meyer  * Largely borrowed from ccr(4), Written by: John Baldwin <jhb@FreeBSD.org>
8844d9543SConrad Meyer  *
9844d9543SConrad Meyer  * Redistribution and use in source and binary forms, with or without
10844d9543SConrad Meyer  * modification, are permitted provided that the following conditions
11844d9543SConrad Meyer  * are met:
12844d9543SConrad Meyer  * 1. Redistributions of source code must retain the above copyright
13844d9543SConrad Meyer  *    notice, this list of conditions and the following disclaimer.
14844d9543SConrad Meyer  * 2. Redistributions in binary form must reproduce the above copyright
15844d9543SConrad Meyer  *    notice, this list of conditions and the following disclaimer in the
16844d9543SConrad Meyer  *    documentation and/or other materials provided with the distribution.
17844d9543SConrad Meyer  *
18844d9543SConrad Meyer  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19844d9543SConrad Meyer  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20844d9543SConrad Meyer  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21844d9543SConrad Meyer  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22844d9543SConrad Meyer  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23844d9543SConrad Meyer  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24844d9543SConrad Meyer  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25844d9543SConrad Meyer  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26844d9543SConrad Meyer  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27844d9543SConrad Meyer  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28844d9543SConrad Meyer  * SUCH DAMAGE.
29844d9543SConrad Meyer  */
30844d9543SConrad Meyer 
31844d9543SConrad Meyer #include <sys/cdefs.h>
32844d9543SConrad Meyer #include "opt_ddb.h"
33844d9543SConrad Meyer 
34e2e050c8SConrad Meyer #include <sys/param.h>
35844d9543SConrad Meyer #include <sys/bus.h>
36844d9543SConrad Meyer #include <sys/lock.h>
37844d9543SConrad Meyer #include <sys/kernel.h>
38844d9543SConrad Meyer #include <sys/malloc.h>
39844d9543SConrad Meyer #include <sys/mutex.h>
40844d9543SConrad Meyer #include <sys/module.h>
41844d9543SConrad Meyer #include <sys/rman.h>
42844d9543SConrad Meyer #include <sys/sglist.h>
43844d9543SConrad Meyer #include <sys/sysctl.h>
44844d9543SConrad Meyer 
45844d9543SConrad Meyer #ifdef DDB
46844d9543SConrad Meyer #include <ddb/ddb.h>
47844d9543SConrad Meyer #endif
48844d9543SConrad Meyer 
49844d9543SConrad Meyer #include <dev/pci/pcireg.h>
50844d9543SConrad Meyer #include <dev/pci/pcivar.h>
51844d9543SConrad Meyer 
52844d9543SConrad Meyer #include <machine/bus.h>
53844d9543SConrad Meyer #include <machine/resource.h>
54844d9543SConrad Meyer #include <machine/vmparam.h>
55844d9543SConrad Meyer 
56844d9543SConrad Meyer #include <opencrypto/cryptodev.h>
57844d9543SConrad Meyer #include <opencrypto/xform.h>
58844d9543SConrad Meyer 
59844d9543SConrad Meyer #include <vm/vm.h>
60844d9543SConrad Meyer #include <vm/pmap.h>
61844d9543SConrad Meyer 
62844d9543SConrad Meyer #include "cryptodev_if.h"
63844d9543SConrad Meyer 
64844d9543SConrad Meyer #include "ccp.h"
65844d9543SConrad Meyer #include "ccp_hardware.h"
66844d9543SConrad Meyer #include "ccp_lsb.h"
67844d9543SConrad Meyer 
68844d9543SConrad Meyer CTASSERT(sizeof(struct ccp_desc) == 32);
69844d9543SConrad Meyer 
70844d9543SConrad Meyer static struct ccp_xts_unitsize_map_entry {
71844d9543SConrad Meyer 	enum ccp_xts_unitsize cxu_id;
72844d9543SConrad Meyer 	unsigned cxu_size;
73844d9543SConrad Meyer } ccp_xts_unitsize_map[] = {
74844d9543SConrad Meyer 	{ CCP_XTS_AES_UNIT_SIZE_16, 16 },
75844d9543SConrad Meyer 	{ CCP_XTS_AES_UNIT_SIZE_512, 512 },
76844d9543SConrad Meyer 	{ CCP_XTS_AES_UNIT_SIZE_1024, 1024 },
77844d9543SConrad Meyer 	{ CCP_XTS_AES_UNIT_SIZE_2048, 2048 },
78844d9543SConrad Meyer 	{ CCP_XTS_AES_UNIT_SIZE_4096, 4096 },
79844d9543SConrad Meyer };
80844d9543SConrad Meyer 
817029da5cSPawel Biernacki SYSCTL_NODE(_hw, OID_AUTO, ccp, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
827029da5cSPawel Biernacki     "ccp node");
83844d9543SConrad Meyer 
84844d9543SConrad Meyer unsigned g_ccp_ring_order = 11;
85844d9543SConrad Meyer SYSCTL_UINT(_hw_ccp, OID_AUTO, ring_order, CTLFLAG_RDTUN, &g_ccp_ring_order,
86844d9543SConrad Meyer     0, "Set CCP ring order.  (1 << this) == ring size.  Min: 6, Max: 16");
87844d9543SConrad Meyer 
88844d9543SConrad Meyer /*
89844d9543SConrad Meyer  * Zero buffer, sufficient for padding LSB entries, that does not span a page
90844d9543SConrad Meyer  * boundary
91844d9543SConrad Meyer  */
92844d9543SConrad Meyer static const char g_zeroes[32] __aligned(32);
93844d9543SConrad Meyer 
94844d9543SConrad Meyer static inline uint32_t
ccp_read_4(struct ccp_softc * sc,uint32_t offset)95844d9543SConrad Meyer ccp_read_4(struct ccp_softc *sc, uint32_t offset)
96844d9543SConrad Meyer {
97844d9543SConrad Meyer 	return (bus_space_read_4(sc->pci_bus_tag, sc->pci_bus_handle, offset));
98844d9543SConrad Meyer }
99844d9543SConrad Meyer 
100844d9543SConrad Meyer static inline void
ccp_write_4(struct ccp_softc * sc,uint32_t offset,uint32_t value)101844d9543SConrad Meyer ccp_write_4(struct ccp_softc *sc, uint32_t offset, uint32_t value)
102844d9543SConrad Meyer {
103844d9543SConrad Meyer 	bus_space_write_4(sc->pci_bus_tag, sc->pci_bus_handle, offset, value);
104844d9543SConrad Meyer }
105844d9543SConrad Meyer 
106844d9543SConrad Meyer static inline uint32_t
ccp_read_queue_4(struct ccp_softc * sc,unsigned queue,uint32_t offset)107844d9543SConrad Meyer ccp_read_queue_4(struct ccp_softc *sc, unsigned queue, uint32_t offset)
108844d9543SConrad Meyer {
109844d9543SConrad Meyer 	/*
110844d9543SConrad Meyer 	 * Each queue gets its own 4kB register space.  Queue 0 is at 0x1000.
111844d9543SConrad Meyer 	 */
112844d9543SConrad Meyer 	return (ccp_read_4(sc, (CMD_Q_STATUS_INCR * (1 + queue)) + offset));
113844d9543SConrad Meyer }
114844d9543SConrad Meyer 
115844d9543SConrad Meyer static inline void
ccp_write_queue_4(struct ccp_softc * sc,unsigned queue,uint32_t offset,uint32_t value)116844d9543SConrad Meyer ccp_write_queue_4(struct ccp_softc *sc, unsigned queue, uint32_t offset,
117844d9543SConrad Meyer     uint32_t value)
118844d9543SConrad Meyer {
119844d9543SConrad Meyer 	ccp_write_4(sc, (CMD_Q_STATUS_INCR * (1 + queue)) + offset, value);
120844d9543SConrad Meyer }
121844d9543SConrad Meyer 
122844d9543SConrad Meyer void
ccp_queue_write_tail(struct ccp_queue * qp)123844d9543SConrad Meyer ccp_queue_write_tail(struct ccp_queue *qp)
124844d9543SConrad Meyer {
125844d9543SConrad Meyer 	ccp_write_queue_4(qp->cq_softc, qp->cq_qindex, CMD_Q_TAIL_LO_BASE,
126844d9543SConrad Meyer 	    ((uint32_t)qp->desc_ring_bus_addr) + (Q_DESC_SIZE * qp->cq_tail));
127844d9543SConrad Meyer }
128844d9543SConrad Meyer 
129844d9543SConrad Meyer /*
130844d9543SConrad Meyer  * Given a queue and a reserved LSB entry index, compute the LSB *entry id* of
131844d9543SConrad Meyer  * that entry for the queue's private LSB region.
132844d9543SConrad Meyer  */
133844d9543SConrad Meyer static inline uint8_t
ccp_queue_lsb_entry(struct ccp_queue * qp,unsigned lsb_entry)134844d9543SConrad Meyer ccp_queue_lsb_entry(struct ccp_queue *qp, unsigned lsb_entry)
135844d9543SConrad Meyer {
136844d9543SConrad Meyer 	return ((qp->private_lsb * LSB_REGION_LENGTH + lsb_entry));
137844d9543SConrad Meyer }
138844d9543SConrad Meyer 
139844d9543SConrad Meyer /*
140844d9543SConrad Meyer  * Given a queue and a reserved LSB entry index, compute the LSB *address* of
141844d9543SConrad Meyer  * that entry for the queue's private LSB region.
142844d9543SConrad Meyer  */
143844d9543SConrad Meyer static inline uint32_t
ccp_queue_lsb_address(struct ccp_queue * qp,unsigned lsb_entry)144844d9543SConrad Meyer ccp_queue_lsb_address(struct ccp_queue *qp, unsigned lsb_entry)
145844d9543SConrad Meyer {
146844d9543SConrad Meyer 	return (ccp_queue_lsb_entry(qp, lsb_entry) * LSB_ENTRY_SIZE);
147844d9543SConrad Meyer }
148844d9543SConrad Meyer 
149844d9543SConrad Meyer /*
150844d9543SConrad Meyer  * Some terminology:
151844d9543SConrad Meyer  *
152844d9543SConrad Meyer  * LSB - Local Storage Block
153844d9543SConrad Meyer  * =========================
154844d9543SConrad Meyer  *
155844d9543SConrad Meyer  * 8 segments/regions, each containing 16 entries.
156844d9543SConrad Meyer  *
157844d9543SConrad Meyer  * Each entry contains 256 bits (32 bytes).
158844d9543SConrad Meyer  *
159844d9543SConrad Meyer  * Segments are virtually addressed in commands, but accesses cannot cross
160844d9543SConrad Meyer  * segment boundaries.  Virtual map uses an identity mapping by default
161844d9543SConrad Meyer  * (virtual segment N corresponds to physical segment N).
162844d9543SConrad Meyer  *
163844d9543SConrad Meyer  * Access to a physical region can be restricted to any subset of all five
164844d9543SConrad Meyer  * queues.
165844d9543SConrad Meyer  *
166844d9543SConrad Meyer  * "Pass-through" mode
167844d9543SConrad Meyer  * ===================
168844d9543SConrad Meyer  *
169844d9543SConrad Meyer  * Pass-through is a generic DMA engine, much like ioat(4).  Some nice
170844d9543SConrad Meyer  * features:
171844d9543SConrad Meyer  *
172844d9543SConrad Meyer  * - Supports byte-swapping for endian conversion (32- or 256-bit words)
173844d9543SConrad Meyer  * - AND, OR, XOR with fixed 256-bit mask
174844d9543SConrad Meyer  * - CRC32 of data (may be used in tandem with bswap, but not bit operations)
175844d9543SConrad Meyer  * - Read/write of LSB
176844d9543SConrad Meyer  * - Memset
177844d9543SConrad Meyer  *
178844d9543SConrad Meyer  * If bit manipulation mode is enabled, input must be a multiple of 256 bits
179844d9543SConrad Meyer  * (32 bytes).
180844d9543SConrad Meyer  *
181844d9543SConrad Meyer  * If byte-swapping is enabled, input must be a multiple of the word size.
182844d9543SConrad Meyer  *
183844d9543SConrad Meyer  * Zlib mode -- only usable from one queue at a time, single job at a time.
184844d9543SConrad Meyer  * ========================================================================
185844d9543SConrad Meyer  *
186844d9543SConrad Meyer  * Only usable from private host, aka PSP?  Not host processor?
187844d9543SConrad Meyer  *
188844d9543SConrad Meyer  * RNG.
189844d9543SConrad Meyer  * ====
190844d9543SConrad Meyer  *
191844d9543SConrad Meyer  * Raw bits are conditioned with AES and fed through CTR_DRBG.  Output goes in
192844d9543SConrad Meyer  * a ring buffer readable by software.
193844d9543SConrad Meyer  *
194844d9543SConrad Meyer  * NIST SP 800-90B Repetition Count and Adaptive Proportion health checks are
195844d9543SConrad Meyer  * implemented on the raw input stream and may be enabled to verify min-entropy
196844d9543SConrad Meyer  * of 0.5 bits per bit.
197844d9543SConrad Meyer  */
198844d9543SConrad Meyer 
199844d9543SConrad Meyer static void
ccp_dmamap_cb(void * arg,bus_dma_segment_t * segs,int nseg,int error)200844d9543SConrad Meyer ccp_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
201844d9543SConrad Meyer {
202844d9543SConrad Meyer 	bus_addr_t *baddr;
203844d9543SConrad Meyer 
204844d9543SConrad Meyer 	KASSERT(error == 0, ("%s: error:%d", __func__, error));
205844d9543SConrad Meyer 	baddr = arg;
206844d9543SConrad Meyer 	*baddr = segs->ds_addr;
207844d9543SConrad Meyer }
208844d9543SConrad Meyer 
209844d9543SConrad Meyer static int
ccp_hw_attach_queue(device_t dev,uint64_t lsbmask,unsigned queue)210844d9543SConrad Meyer ccp_hw_attach_queue(device_t dev, uint64_t lsbmask, unsigned queue)
211844d9543SConrad Meyer {
212844d9543SConrad Meyer 	struct ccp_softc *sc;
213844d9543SConrad Meyer 	struct ccp_queue *qp;
214844d9543SConrad Meyer 	void *desc;
215844d9543SConrad Meyer 	size_t ringsz, num_descriptors;
216844d9543SConrad Meyer 	int error;
217844d9543SConrad Meyer 
218844d9543SConrad Meyer 	desc = NULL;
219844d9543SConrad Meyer 	sc = device_get_softc(dev);
220844d9543SConrad Meyer 	qp = &sc->queues[queue];
221844d9543SConrad Meyer 
222844d9543SConrad Meyer 	/*
223844d9543SConrad Meyer 	 * Don't bother allocating a ring for queues the host isn't allowed to
224844d9543SConrad Meyer 	 * drive.
225844d9543SConrad Meyer 	 */
226844d9543SConrad Meyer 	if ((sc->valid_queues & (1 << queue)) == 0)
227844d9543SConrad Meyer 		return (0);
228844d9543SConrad Meyer 
229844d9543SConrad Meyer 	ccp_queue_decode_lsb_regions(sc, lsbmask, queue);
230844d9543SConrad Meyer 
231844d9543SConrad Meyer 	/* Ignore queues that do not have any LSB access. */
232844d9543SConrad Meyer 	if (qp->lsb_mask == 0) {
233844d9543SConrad Meyer 		device_printf(dev, "Ignoring queue %u with no LSB access\n",
234844d9543SConrad Meyer 		    queue);
235844d9543SConrad Meyer 		sc->valid_queues &= ~(1 << queue);
236844d9543SConrad Meyer 		return (0);
237844d9543SConrad Meyer 	}
238844d9543SConrad Meyer 
239844d9543SConrad Meyer 	num_descriptors = 1 << sc->ring_size_order;
240844d9543SConrad Meyer 	ringsz = sizeof(struct ccp_desc) * num_descriptors;
241844d9543SConrad Meyer 
242844d9543SConrad Meyer 	/*
243844d9543SConrad Meyer 	 * "Queue_Size" is order - 1.
244844d9543SConrad Meyer 	 *
245844d9543SConrad Meyer 	 * Queue must be aligned to 5+Queue_Size+1 == 5 + order bits.
246844d9543SConrad Meyer 	 */
247844d9543SConrad Meyer 	error = bus_dma_tag_create(bus_get_dma_tag(dev),
248844d9543SConrad Meyer 	    1 << (5 + sc->ring_size_order),
249844d9543SConrad Meyer #if defined(__i386__) && !defined(PAE)
250844d9543SConrad Meyer 	    0, BUS_SPACE_MAXADDR,
251844d9543SConrad Meyer #else
252844d9543SConrad Meyer 	    (bus_addr_t)1 << 32, BUS_SPACE_MAXADDR_48BIT,
253844d9543SConrad Meyer #endif
254844d9543SConrad Meyer 	    BUS_SPACE_MAXADDR, NULL, NULL, ringsz, 1,
255844d9543SConrad Meyer 	    ringsz, 0, NULL, NULL, &qp->ring_desc_tag);
256844d9543SConrad Meyer 	if (error != 0)
257844d9543SConrad Meyer 		goto out;
258844d9543SConrad Meyer 
259844d9543SConrad Meyer 	error = bus_dmamem_alloc(qp->ring_desc_tag, &desc,
260844d9543SConrad Meyer 	    BUS_DMA_ZERO | BUS_DMA_WAITOK, &qp->ring_desc_map);
261844d9543SConrad Meyer 	if (error != 0)
262844d9543SConrad Meyer 		goto out;
263844d9543SConrad Meyer 
264844d9543SConrad Meyer 	error = bus_dmamap_load(qp->ring_desc_tag, qp->ring_desc_map, desc,
265844d9543SConrad Meyer 	    ringsz, ccp_dmamap_cb, &qp->desc_ring_bus_addr, BUS_DMA_WAITOK);
266844d9543SConrad Meyer 	if (error != 0)
267844d9543SConrad Meyer 		goto out;
268844d9543SConrad Meyer 
269844d9543SConrad Meyer 	qp->desc_ring = desc;
270844d9543SConrad Meyer 	qp->completions_ring = malloc(num_descriptors *
271844d9543SConrad Meyer 	    sizeof(*qp->completions_ring), M_CCP, M_ZERO | M_WAITOK);
272844d9543SConrad Meyer 
273844d9543SConrad Meyer 	/* Zero control register; among other things, clears the RUN flag. */
274844d9543SConrad Meyer 	qp->qcontrol = 0;
275844d9543SConrad Meyer 	ccp_write_queue_4(sc, queue, CMD_Q_CONTROL_BASE, qp->qcontrol);
276844d9543SConrad Meyer 	ccp_write_queue_4(sc, queue, CMD_Q_INT_ENABLE_BASE, 0);
277844d9543SConrad Meyer 
278844d9543SConrad Meyer 	/* Clear any leftover interrupt status flags */
279844d9543SConrad Meyer 	ccp_write_queue_4(sc, queue, CMD_Q_INTERRUPT_STATUS_BASE,
280844d9543SConrad Meyer 	    ALL_INTERRUPTS);
281844d9543SConrad Meyer 
282844d9543SConrad Meyer 	qp->qcontrol |= (sc->ring_size_order - 1) << CMD_Q_SIZE_SHIFT;
283844d9543SConrad Meyer 
284844d9543SConrad Meyer 	ccp_write_queue_4(sc, queue, CMD_Q_TAIL_LO_BASE,
285844d9543SConrad Meyer 	    (uint32_t)qp->desc_ring_bus_addr);
286844d9543SConrad Meyer 	ccp_write_queue_4(sc, queue, CMD_Q_HEAD_LO_BASE,
287844d9543SConrad Meyer 	    (uint32_t)qp->desc_ring_bus_addr);
288844d9543SConrad Meyer 
289844d9543SConrad Meyer 	/*
290844d9543SConrad Meyer 	 * Enable completion interrupts, as well as error or administrative
291844d9543SConrad Meyer 	 * halt interrupts.  We don't use administrative halts, but they
292844d9543SConrad Meyer 	 * shouldn't trip unless we do, so it ought to be harmless.
293844d9543SConrad Meyer 	 */
294844d9543SConrad Meyer 	ccp_write_queue_4(sc, queue, CMD_Q_INT_ENABLE_BASE,
295844d9543SConrad Meyer 	    INT_COMPLETION | INT_ERROR | INT_QUEUE_STOPPED);
296844d9543SConrad Meyer 
297844d9543SConrad Meyer 	qp->qcontrol |= (qp->desc_ring_bus_addr >> 32) << CMD_Q_PTR_HI_SHIFT;
298844d9543SConrad Meyer 	qp->qcontrol |= CMD_Q_RUN;
299844d9543SConrad Meyer 	ccp_write_queue_4(sc, queue, CMD_Q_CONTROL_BASE, qp->qcontrol);
300844d9543SConrad Meyer 
301844d9543SConrad Meyer out:
302844d9543SConrad Meyer 	if (error != 0) {
303844d9543SConrad Meyer 		if (qp->desc_ring != NULL)
304844d9543SConrad Meyer 			bus_dmamap_unload(qp->ring_desc_tag,
305844d9543SConrad Meyer 			    qp->ring_desc_map);
306844d9543SConrad Meyer 		if (desc != NULL)
307844d9543SConrad Meyer 			bus_dmamem_free(qp->ring_desc_tag, desc,
308844d9543SConrad Meyer 			    qp->ring_desc_map);
309844d9543SConrad Meyer 		if (qp->ring_desc_tag != NULL)
310844d9543SConrad Meyer 			bus_dma_tag_destroy(qp->ring_desc_tag);
311844d9543SConrad Meyer 	}
312844d9543SConrad Meyer 	return (error);
313844d9543SConrad Meyer }
314844d9543SConrad Meyer 
315844d9543SConrad Meyer static void
ccp_hw_detach_queue(device_t dev,unsigned queue)316844d9543SConrad Meyer ccp_hw_detach_queue(device_t dev, unsigned queue)
317844d9543SConrad Meyer {
318844d9543SConrad Meyer 	struct ccp_softc *sc;
319844d9543SConrad Meyer 	struct ccp_queue *qp;
320844d9543SConrad Meyer 
321844d9543SConrad Meyer 	sc = device_get_softc(dev);
322844d9543SConrad Meyer 	qp = &sc->queues[queue];
323844d9543SConrad Meyer 
324844d9543SConrad Meyer 	/*
325844d9543SConrad Meyer 	 * Don't bother allocating a ring for queues the host isn't allowed to
326844d9543SConrad Meyer 	 * drive.
327844d9543SConrad Meyer 	 */
328844d9543SConrad Meyer 	if ((sc->valid_queues & (1 << queue)) == 0)
329844d9543SConrad Meyer 		return;
330844d9543SConrad Meyer 
331844d9543SConrad Meyer 	free(qp->completions_ring, M_CCP);
332844d9543SConrad Meyer 	bus_dmamap_unload(qp->ring_desc_tag, qp->ring_desc_map);
333844d9543SConrad Meyer 	bus_dmamem_free(qp->ring_desc_tag, qp->desc_ring, qp->ring_desc_map);
334844d9543SConrad Meyer 	bus_dma_tag_destroy(qp->ring_desc_tag);
335844d9543SConrad Meyer }
336844d9543SConrad Meyer 
337844d9543SConrad Meyer static int
ccp_map_pci_bar(device_t dev)338844d9543SConrad Meyer ccp_map_pci_bar(device_t dev)
339844d9543SConrad Meyer {
340844d9543SConrad Meyer 	struct ccp_softc *sc;
341844d9543SConrad Meyer 
342844d9543SConrad Meyer 	sc = device_get_softc(dev);
343844d9543SConrad Meyer 
344844d9543SConrad Meyer 	sc->pci_resource_id = PCIR_BAR(2);
345844d9543SConrad Meyer 	sc->pci_resource = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
346844d9543SConrad Meyer 	    &sc->pci_resource_id, RF_ACTIVE);
347844d9543SConrad Meyer 	if (sc->pci_resource == NULL) {
348844d9543SConrad Meyer 		device_printf(dev, "unable to allocate pci resource\n");
349844d9543SConrad Meyer 		return (ENODEV);
350844d9543SConrad Meyer 	}
351844d9543SConrad Meyer 
352844d9543SConrad Meyer 	sc->pci_resource_id_msix = PCIR_BAR(5);
353844d9543SConrad Meyer 	sc->pci_resource_msix = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
354844d9543SConrad Meyer 	    &sc->pci_resource_id_msix, RF_ACTIVE);
355844d9543SConrad Meyer 	if (sc->pci_resource_msix == NULL) {
356844d9543SConrad Meyer 		device_printf(dev, "unable to allocate pci resource msix\n");
357844d9543SConrad Meyer 		bus_release_resource(dev, SYS_RES_MEMORY, sc->pci_resource_id,
358844d9543SConrad Meyer 		    sc->pci_resource);
359844d9543SConrad Meyer 		return (ENODEV);
360844d9543SConrad Meyer 	}
361844d9543SConrad Meyer 
362844d9543SConrad Meyer 	sc->pci_bus_tag = rman_get_bustag(sc->pci_resource);
363844d9543SConrad Meyer 	sc->pci_bus_handle = rman_get_bushandle(sc->pci_resource);
364844d9543SConrad Meyer 	return (0);
365844d9543SConrad Meyer }
366844d9543SConrad Meyer 
367844d9543SConrad Meyer static void
ccp_unmap_pci_bar(device_t dev)368844d9543SConrad Meyer ccp_unmap_pci_bar(device_t dev)
369844d9543SConrad Meyer {
370844d9543SConrad Meyer 	struct ccp_softc *sc;
371844d9543SConrad Meyer 
372844d9543SConrad Meyer 	sc = device_get_softc(dev);
373844d9543SConrad Meyer 
374844d9543SConrad Meyer 	bus_release_resource(dev, SYS_RES_MEMORY, sc->pci_resource_id_msix,
375844d9543SConrad Meyer 	    sc->pci_resource_msix);
376844d9543SConrad Meyer 	bus_release_resource(dev, SYS_RES_MEMORY, sc->pci_resource_id,
377844d9543SConrad Meyer 	    sc->pci_resource);
378844d9543SConrad Meyer }
379844d9543SConrad Meyer 
380844d9543SConrad Meyer const static struct ccp_error_code {
381844d9543SConrad Meyer 	uint8_t		ce_code;
382844d9543SConrad Meyer 	const char	*ce_name;
383844d9543SConrad Meyer 	int		ce_errno;
384844d9543SConrad Meyer 	const char	*ce_desc;
385844d9543SConrad Meyer } ccp_error_codes[] = {
386844d9543SConrad Meyer 	{ 0x01, "ILLEGAL_ENGINE", EIO, "Requested engine was invalid" },
387844d9543SConrad Meyer 	{ 0x03, "ILLEGAL_FUNCTION_TYPE", EIO,
388844d9543SConrad Meyer 	    "A non-supported function type was specified" },
389844d9543SConrad Meyer 	{ 0x04, "ILLEGAL_FUNCTION_MODE", EIO,
390844d9543SConrad Meyer 	    "A non-supported function mode was specified" },
391844d9543SConrad Meyer 	{ 0x05, "ILLEGAL_FUNCTION_ENCRYPT", EIO,
392844d9543SConrad Meyer 	    "A CMAC type was specified when ENCRYPT was not specified" },
393844d9543SConrad Meyer 	{ 0x06, "ILLEGAL_FUNCTION_SIZE", EIO,
394844d9543SConrad Meyer 	    "A non-supported function size was specified.\n"
395844d9543SConrad Meyer 	    "AES-CFB: Size was not 127 or 7;\n"
396844d9543SConrad Meyer 	    "3DES-CFB: Size was not 7;\n"
397844d9543SConrad Meyer 	    "RSA: See supported size table (7.4.2);\n"
398844d9543SConrad Meyer 	    "ECC: Size was greater than 576 bits." },
399844d9543SConrad Meyer 	{ 0x07, "Zlib_MISSING_INIT_EOM", EIO,
400844d9543SConrad Meyer 	    "Zlib command does not have INIT and EOM set" },
401844d9543SConrad Meyer 	{ 0x08, "ILLEGAL_FUNCTION_RSVD", EIO,
402844d9543SConrad Meyer 	    "Reserved bits in a function specification were not 0" },
403844d9543SConrad Meyer 	{ 0x09, "ILLEGAL_BUFFER_LENGTH", EIO,
404844d9543SConrad Meyer 	    "The buffer length specified was not correct for the selected engine"
405844d9543SConrad Meyer 	},
406844d9543SConrad Meyer 	{ 0x0A, "VLSB_FAULT", EIO, "Illegal VLSB segment mapping:\n"
407844d9543SConrad Meyer 	    "Undefined VLSB segment mapping or\n"
408844d9543SConrad Meyer 	    "mapping to unsupported LSB segment id" },
409844d9543SConrad Meyer 	{ 0x0B, "ILLEGAL_MEM_ADDR", EFAULT,
410844d9543SConrad Meyer 	    "The specified source/destination buffer access was illegal:\n"
411844d9543SConrad Meyer 	    "Data buffer located in a LSB location disallowed by the LSB protection masks; or\n"
412844d9543SConrad Meyer 	    "Data buffer not completely contained within a single segment; or\n"
413844d9543SConrad Meyer 	    "Pointer with Fixed=1 is not 32-bit aligned; or\n"
414844d9543SConrad Meyer 	    "Pointer with Fixed=1 attempted to reference non-AXI1 (local) memory."
415844d9543SConrad Meyer 	},
416844d9543SConrad Meyer 	{ 0x0C, "ILLEGAL_MEM_SEL", EIO,
417844d9543SConrad Meyer 	    "A src_mem, dst_mem, or key_mem field was illegal:\n"
418844d9543SConrad Meyer 	    "A field was set to a reserved value; or\n"
419844d9543SConrad Meyer 	    "A public command attempted to reference AXI1 (local) or GART memory; or\n"
420844d9543SConrad Meyer 	    "A Zlib command attmpted to use the LSB." },
421844d9543SConrad Meyer 	{ 0x0D, "ILLEGAL_CONTEXT_ADDR", EIO,
422844d9543SConrad Meyer 	    "The specified context location was illegal:\n"
423844d9543SConrad Meyer 	    "Context located in a LSB location disallowed by the LSB protection masks; or\n"
424844d9543SConrad Meyer 	    "Context not completely contained within a single segment." },
425844d9543SConrad Meyer 	{ 0x0E, "ILLEGAL_KEY_ADDR", EIO,
426844d9543SConrad Meyer 	    "The specified key location was illegal:\n"
427844d9543SConrad Meyer 	    "Key located in a LSB location disallowed by the LSB protection masks; or\n"
428844d9543SConrad Meyer 	    "Key not completely contained within a single segment." },
429844d9543SConrad Meyer 	{ 0x12, "CMD_TIMEOUT", EIO, "A command timeout violation occurred" },
430844d9543SConrad Meyer 	/* XXX Could fill out these descriptions too */
431844d9543SConrad Meyer 	{ 0x13, "IDMA0_AXI_SLVERR", EIO, "" },
432844d9543SConrad Meyer 	{ 0x14, "IDMA0_AXI_DECERR", EIO, "" },
433844d9543SConrad Meyer 	{ 0x16, "IDMA1_AXI_SLVERR", EIO, "" },
434844d9543SConrad Meyer 	{ 0x17, "IDMA1_AXI_DECERR", EIO, "" },
435844d9543SConrad Meyer 	{ 0x19, "ZLIBVHB_AXI_SLVERR", EIO, "" },
436844d9543SConrad Meyer 	{ 0x1A, "ZLIBVHB_AXI_DECERR", EIO, "" },
437844d9543SConrad Meyer 	{ 0x1C, "ZLIB_UNEXPECTED_EOM", EIO, "" },
438844d9543SConrad Meyer 	{ 0x1D, "ZLIB_EXTRA_DATA", EIO, "" },
439844d9543SConrad Meyer 	{ 0x1E, "ZLIB_BTYPE", EIO, "" },
440844d9543SConrad Meyer 	{ 0x20, "ZLIB_UNDEFINED_DISTANCE_SYMBOL", EIO, "" },
441844d9543SConrad Meyer 	{ 0x21, "ZLIB_CODE_LENGTH_SYMBOL", EIO, "" },
442844d9543SConrad Meyer 	{ 0x22, "ZLIB_VHB_ILLEGAL_FETCH", EIO, "" },
443844d9543SConrad Meyer 	{ 0x23, "ZLIB_UNCOMPRESSED_LEN", EIO, "" },
444844d9543SConrad Meyer 	{ 0x24, "ZLIB_LIMIT_REACHED", EIO, "" },
445844d9543SConrad Meyer 	{ 0x25, "ZLIB_CHECKSUM_MISMATCH", EIO, "" },
446844d9543SConrad Meyer 	{ 0x26, "ODMA0_AXI_SLVERR", EIO, "" },
447844d9543SConrad Meyer 	{ 0x27, "ODMA0_AXI_DECERR", EIO, "" },
448844d9543SConrad Meyer 	{ 0x29, "ODMA1_AXI_SLVERR", EIO, "" },
449844d9543SConrad Meyer 	{ 0x2A, "ODMA1_AXI_DECERR", EIO, "" },
450844d9543SConrad Meyer 	{ 0x2B, "LSB_PARITY_ERR", EIO,
451844d9543SConrad Meyer 	    "A read from the LSB encountered a parity error" },
452844d9543SConrad Meyer };
453844d9543SConrad Meyer 
454844d9543SConrad Meyer static void
ccp_intr_handle_error(struct ccp_queue * qp,const struct ccp_desc * desc)455844d9543SConrad Meyer ccp_intr_handle_error(struct ccp_queue *qp, const struct ccp_desc *desc)
456844d9543SConrad Meyer {
457844d9543SConrad Meyer 	struct ccp_completion_ctx *cctx;
458844d9543SConrad Meyer 	const struct ccp_error_code *ec;
459844d9543SConrad Meyer 	struct ccp_softc *sc;
460844d9543SConrad Meyer 	uint32_t status, error, esource, faultblock;
461844d9543SConrad Meyer 	unsigned q, idx;
462844d9543SConrad Meyer 	int errno;
463844d9543SConrad Meyer 
464844d9543SConrad Meyer 	sc = qp->cq_softc;
465844d9543SConrad Meyer 	q = qp->cq_qindex;
466844d9543SConrad Meyer 
467844d9543SConrad Meyer 	status = ccp_read_queue_4(sc, q, CMD_Q_STATUS_BASE);
468844d9543SConrad Meyer 
469844d9543SConrad Meyer 	error = status & STATUS_ERROR_MASK;
470844d9543SConrad Meyer 
471844d9543SConrad Meyer 	/* Decode error status */
472844d9543SConrad Meyer 	ec = NULL;
473844d9543SConrad Meyer 	for (idx = 0; idx < nitems(ccp_error_codes); idx++)
474844d9543SConrad Meyer 		if (ccp_error_codes[idx].ce_code == error) {
475844d9543SConrad Meyer 			ec = &ccp_error_codes[idx];
476844d9543SConrad Meyer 			break;
477844d9543SConrad Meyer 		}
478844d9543SConrad Meyer 
479844d9543SConrad Meyer 	esource = (status >> STATUS_ERRORSOURCE_SHIFT) &
480844d9543SConrad Meyer 	    STATUS_ERRORSOURCE_MASK;
481844d9543SConrad Meyer 	faultblock = (status >> STATUS_VLSB_FAULTBLOCK_SHIFT) &
482844d9543SConrad Meyer 	    STATUS_VLSB_FAULTBLOCK_MASK;
483844d9543SConrad Meyer 	device_printf(sc->dev, "Error: %s (%u) Source: %u Faulting LSB block: %u\n",
484844d9543SConrad Meyer 	    (ec != NULL) ? ec->ce_name : "(reserved)", error, esource,
485844d9543SConrad Meyer 	    faultblock);
486844d9543SConrad Meyer 	if (ec != NULL)
487844d9543SConrad Meyer 		device_printf(sc->dev, "Error description: %s\n", ec->ce_desc);
488844d9543SConrad Meyer 
489844d9543SConrad Meyer 	/* TODO Could format the desc nicely here */
490844d9543SConrad Meyer 	idx = desc - qp->desc_ring;
491844d9543SConrad Meyer 	DPRINTF(sc->dev, "Bad descriptor index: %u contents: %32D\n", idx,
492844d9543SConrad Meyer 	    (const void *)desc, " ");
493844d9543SConrad Meyer 
494844d9543SConrad Meyer 	/*
495844d9543SConrad Meyer 	 * TODO Per § 14.4 "Error Handling," DMA_Status, DMA_Read/Write_Status,
496844d9543SConrad Meyer 	 * Zlib Decompress status may be interesting.
497844d9543SConrad Meyer 	 */
498844d9543SConrad Meyer 
499844d9543SConrad Meyer 	while (true) {
500844d9543SConrad Meyer 		/* Keep unused descriptors zero for next use. */
501844d9543SConrad Meyer 		memset(&qp->desc_ring[idx], 0, sizeof(qp->desc_ring[idx]));
502844d9543SConrad Meyer 
503844d9543SConrad Meyer 		cctx = &qp->completions_ring[idx];
504844d9543SConrad Meyer 
505844d9543SConrad Meyer 		/*
506844d9543SConrad Meyer 		 * Restart procedure described in § 14.2.5.  Could be used by HoC if we
507844d9543SConrad Meyer 		 * used that.
508844d9543SConrad Meyer 		 *
509844d9543SConrad Meyer 		 * Advance HEAD_LO past bad descriptor + any remaining in
510844d9543SConrad Meyer 		 * transaction manually, then restart queue.
511844d9543SConrad Meyer 		 */
512844d9543SConrad Meyer 		idx = (idx + 1) % (1 << sc->ring_size_order);
513844d9543SConrad Meyer 
514844d9543SConrad Meyer 		/* Callback function signals end of transaction */
515844d9543SConrad Meyer 		if (cctx->callback_fn != NULL) {
516844d9543SConrad Meyer 			if (ec == NULL)
517844d9543SConrad Meyer 				errno = EIO;
518844d9543SConrad Meyer 			else
519844d9543SConrad Meyer 				errno = ec->ce_errno;
520844d9543SConrad Meyer 			/* TODO More specific error code */
521844d9543SConrad Meyer 			cctx->callback_fn(qp, cctx->session, cctx->callback_arg, errno);
522844d9543SConrad Meyer 			cctx->callback_fn = NULL;
523844d9543SConrad Meyer 			break;
524844d9543SConrad Meyer 		}
525844d9543SConrad Meyer 	}
526844d9543SConrad Meyer 
527844d9543SConrad Meyer 	qp->cq_head = idx;
528844d9543SConrad Meyer 	qp->cq_waiting = false;
529844d9543SConrad Meyer 	wakeup(&qp->cq_tail);
530844d9543SConrad Meyer 	DPRINTF(sc->dev, "%s: wrote sw head:%u\n", __func__, qp->cq_head);
531844d9543SConrad Meyer 	ccp_write_queue_4(sc, q, CMD_Q_HEAD_LO_BASE,
532844d9543SConrad Meyer 	    (uint32_t)qp->desc_ring_bus_addr + (idx * Q_DESC_SIZE));
533844d9543SConrad Meyer 	ccp_write_queue_4(sc, q, CMD_Q_CONTROL_BASE, qp->qcontrol);
534844d9543SConrad Meyer 	DPRINTF(sc->dev, "%s: Restarted queue\n", __func__);
535844d9543SConrad Meyer }
536844d9543SConrad Meyer 
537844d9543SConrad Meyer static void
ccp_intr_run_completions(struct ccp_queue * qp,uint32_t ints)538844d9543SConrad Meyer ccp_intr_run_completions(struct ccp_queue *qp, uint32_t ints)
539844d9543SConrad Meyer {
540844d9543SConrad Meyer 	struct ccp_completion_ctx *cctx;
541844d9543SConrad Meyer 	struct ccp_softc *sc;
542844d9543SConrad Meyer 	const struct ccp_desc *desc;
543844d9543SConrad Meyer 	uint32_t headlo, idx;
544844d9543SConrad Meyer 	unsigned q, completed;
545844d9543SConrad Meyer 
546844d9543SConrad Meyer 	sc = qp->cq_softc;
547844d9543SConrad Meyer 	q = qp->cq_qindex;
548844d9543SConrad Meyer 
549844d9543SConrad Meyer 	mtx_lock(&qp->cq_lock);
550844d9543SConrad Meyer 
551844d9543SConrad Meyer 	/*
552844d9543SConrad Meyer 	 * Hardware HEAD_LO points to the first incomplete descriptor.  Process
553844d9543SConrad Meyer 	 * any submitted and completed descriptors, up to but not including
554844d9543SConrad Meyer 	 * HEAD_LO.
555844d9543SConrad Meyer 	 */
556844d9543SConrad Meyer 	headlo = ccp_read_queue_4(sc, q, CMD_Q_HEAD_LO_BASE);
557844d9543SConrad Meyer 	idx = (headlo - (uint32_t)qp->desc_ring_bus_addr) / Q_DESC_SIZE;
558844d9543SConrad Meyer 
559844d9543SConrad Meyer 	DPRINTF(sc->dev, "%s: hw head:%u sw head:%u\n", __func__, idx,
560844d9543SConrad Meyer 	    qp->cq_head);
561844d9543SConrad Meyer 	completed = 0;
562844d9543SConrad Meyer 	while (qp->cq_head != idx) {
563844d9543SConrad Meyer 		DPRINTF(sc->dev, "%s: completing:%u\n", __func__, qp->cq_head);
564844d9543SConrad Meyer 
565844d9543SConrad Meyer 		cctx = &qp->completions_ring[qp->cq_head];
566844d9543SConrad Meyer 		if (cctx->callback_fn != NULL) {
567844d9543SConrad Meyer 			cctx->callback_fn(qp, cctx->session,
568844d9543SConrad Meyer 			    cctx->callback_arg, 0);
569844d9543SConrad Meyer 			cctx->callback_fn = NULL;
570844d9543SConrad Meyer 		}
571844d9543SConrad Meyer 
572844d9543SConrad Meyer 		/* Keep unused descriptors zero for next use. */
573844d9543SConrad Meyer 		memset(&qp->desc_ring[qp->cq_head], 0,
574844d9543SConrad Meyer 		    sizeof(qp->desc_ring[qp->cq_head]));
575844d9543SConrad Meyer 
576844d9543SConrad Meyer 		qp->cq_head = (qp->cq_head + 1) % (1 << sc->ring_size_order);
577844d9543SConrad Meyer 		completed++;
578844d9543SConrad Meyer 	}
579844d9543SConrad Meyer 	if (completed > 0) {
580844d9543SConrad Meyer 		qp->cq_waiting = false;
581844d9543SConrad Meyer 		wakeup(&qp->cq_tail);
582844d9543SConrad Meyer 	}
583844d9543SConrad Meyer 
584844d9543SConrad Meyer 	DPRINTF(sc->dev, "%s: wrote sw head:%u\n", __func__, qp->cq_head);
585844d9543SConrad Meyer 
586844d9543SConrad Meyer 	/*
587844d9543SConrad Meyer 	 * Desc points to the first incomplete descriptor, at the time we read
588844d9543SConrad Meyer 	 * HEAD_LO.  If there was an error flagged in interrupt status, the HW
589844d9543SConrad Meyer 	 * will not proceed past the erroneous descriptor by itself.
590844d9543SConrad Meyer 	 */
591844d9543SConrad Meyer 	desc = &qp->desc_ring[idx];
592844d9543SConrad Meyer 	if ((ints & INT_ERROR) != 0)
593844d9543SConrad Meyer 		ccp_intr_handle_error(qp, desc);
594844d9543SConrad Meyer 
595844d9543SConrad Meyer 	mtx_unlock(&qp->cq_lock);
596844d9543SConrad Meyer }
597844d9543SConrad Meyer 
598844d9543SConrad Meyer static void
ccp_intr_handler(void * arg)599844d9543SConrad Meyer ccp_intr_handler(void *arg)
600844d9543SConrad Meyer {
601844d9543SConrad Meyer 	struct ccp_softc *sc = arg;
602844d9543SConrad Meyer 	size_t i;
603844d9543SConrad Meyer 	uint32_t ints;
604844d9543SConrad Meyer 
605844d9543SConrad Meyer 	DPRINTF(sc->dev, "%s: interrupt\n", __func__);
606844d9543SConrad Meyer 
607844d9543SConrad Meyer 	/*
608844d9543SConrad Meyer 	 * We get one global interrupt per PCI device, shared over all of
609844d9543SConrad Meyer 	 * its queues.  Scan each valid queue on interrupt for flags indicating
610844d9543SConrad Meyer 	 * activity.
611844d9543SConrad Meyer 	 */
612844d9543SConrad Meyer 	for (i = 0; i < nitems(sc->queues); i++) {
613844d9543SConrad Meyer 		if ((sc->valid_queues & (1 << i)) == 0)
614844d9543SConrad Meyer 			continue;
615844d9543SConrad Meyer 
616844d9543SConrad Meyer 		ints = ccp_read_queue_4(sc, i, CMD_Q_INTERRUPT_STATUS_BASE);
617844d9543SConrad Meyer 		if (ints == 0)
618844d9543SConrad Meyer 			continue;
619844d9543SConrad Meyer 
620844d9543SConrad Meyer #if 0
621844d9543SConrad Meyer 		DPRINTF(sc->dev, "%s: %x interrupts on queue %zu\n", __func__,
622844d9543SConrad Meyer 		    (unsigned)ints, i);
623844d9543SConrad Meyer #endif
624844d9543SConrad Meyer 		/* Write back 1s to clear interrupt status bits. */
625844d9543SConrad Meyer 		ccp_write_queue_4(sc, i, CMD_Q_INTERRUPT_STATUS_BASE, ints);
626844d9543SConrad Meyer 
627844d9543SConrad Meyer 		/*
628844d9543SConrad Meyer 		 * If there was an error, we still need to run completions on
629844d9543SConrad Meyer 		 * any descriptors prior to the error.  The completions handler
630844d9543SConrad Meyer 		 * invoked below will also handle the error descriptor.
631844d9543SConrad Meyer 		 */
632844d9543SConrad Meyer 		if ((ints & (INT_COMPLETION | INT_ERROR)) != 0)
633844d9543SConrad Meyer 			ccp_intr_run_completions(&sc->queues[i], ints);
634844d9543SConrad Meyer 
635844d9543SConrad Meyer 		if ((ints & INT_QUEUE_STOPPED) != 0)
636844d9543SConrad Meyer 			device_printf(sc->dev, "%s: queue %zu stopped\n",
637844d9543SConrad Meyer 			    __func__, i);
638844d9543SConrad Meyer 	}
639844d9543SConrad Meyer 
640844d9543SConrad Meyer 	/* Re-enable interrupts after processing */
641844d9543SConrad Meyer 	for (i = 0; i < nitems(sc->queues); i++) {
642844d9543SConrad Meyer 		if ((sc->valid_queues & (1 << i)) == 0)
643844d9543SConrad Meyer 			continue;
644844d9543SConrad Meyer 		ccp_write_queue_4(sc, i, CMD_Q_INT_ENABLE_BASE,
645844d9543SConrad Meyer 		    INT_COMPLETION | INT_ERROR | INT_QUEUE_STOPPED);
646844d9543SConrad Meyer 	}
647844d9543SConrad Meyer }
648844d9543SConrad Meyer 
649844d9543SConrad Meyer static int
ccp_intr_filter(void * arg)650844d9543SConrad Meyer ccp_intr_filter(void *arg)
651844d9543SConrad Meyer {
652844d9543SConrad Meyer 	struct ccp_softc *sc = arg;
653844d9543SConrad Meyer 	size_t i;
654844d9543SConrad Meyer 
655844d9543SConrad Meyer 	/* TODO: Split individual queues into separate taskqueues? */
656844d9543SConrad Meyer 	for (i = 0; i < nitems(sc->queues); i++) {
657844d9543SConrad Meyer 		if ((sc->valid_queues & (1 << i)) == 0)
658844d9543SConrad Meyer 			continue;
659844d9543SConrad Meyer 
660844d9543SConrad Meyer 		/* Mask interrupt until task completes */
661844d9543SConrad Meyer 		ccp_write_queue_4(sc, i, CMD_Q_INT_ENABLE_BASE, 0);
662844d9543SConrad Meyer 	}
663844d9543SConrad Meyer 
664844d9543SConrad Meyer 	return (FILTER_SCHEDULE_THREAD);
665844d9543SConrad Meyer }
666844d9543SConrad Meyer 
667844d9543SConrad Meyer static int
ccp_setup_interrupts(struct ccp_softc * sc)668844d9543SConrad Meyer ccp_setup_interrupts(struct ccp_softc *sc)
669844d9543SConrad Meyer {
670844d9543SConrad Meyer 	uint32_t nvec;
671844d9543SConrad Meyer 	int rid, error, n, ridcopy;
672844d9543SConrad Meyer 
673844d9543SConrad Meyer 	n = pci_msix_count(sc->dev);
674844d9543SConrad Meyer 	if (n < 1) {
675844d9543SConrad Meyer 		device_printf(sc->dev, "%s: msix_count: %d\n", __func__, n);
676844d9543SConrad Meyer 		return (ENXIO);
677844d9543SConrad Meyer 	}
678844d9543SConrad Meyer 
679844d9543SConrad Meyer 	nvec = n;
680844d9543SConrad Meyer 	error = pci_alloc_msix(sc->dev, &nvec);
681844d9543SConrad Meyer 	if (error != 0) {
682844d9543SConrad Meyer 		device_printf(sc->dev, "%s: alloc_msix error: %d\n", __func__,
683844d9543SConrad Meyer 		    error);
684844d9543SConrad Meyer 		return (error);
685844d9543SConrad Meyer 	}
686844d9543SConrad Meyer 	if (nvec < 1) {
687844d9543SConrad Meyer 		device_printf(sc->dev, "%s: alloc_msix: 0 vectors\n",
688844d9543SConrad Meyer 		    __func__);
689844d9543SConrad Meyer 		return (ENXIO);
690844d9543SConrad Meyer 	}
691844d9543SConrad Meyer 	if (nvec > nitems(sc->intr_res)) {
692844d9543SConrad Meyer 		device_printf(sc->dev, "%s: too many vectors: %u\n", __func__,
693844d9543SConrad Meyer 		    nvec);
694844d9543SConrad Meyer 		nvec = nitems(sc->intr_res);
695844d9543SConrad Meyer 	}
696844d9543SConrad Meyer 
697844d9543SConrad Meyer 	for (rid = 1; rid < 1 + nvec; rid++) {
698844d9543SConrad Meyer 		ridcopy = rid;
699844d9543SConrad Meyer 		sc->intr_res[rid - 1] = bus_alloc_resource_any(sc->dev,
700844d9543SConrad Meyer 		    SYS_RES_IRQ, &ridcopy, RF_ACTIVE);
701844d9543SConrad Meyer 		if (sc->intr_res[rid - 1] == NULL) {
702844d9543SConrad Meyer 			device_printf(sc->dev, "%s: Failed to alloc IRQ resource\n",
703844d9543SConrad Meyer 			    __func__);
704844d9543SConrad Meyer 			return (ENXIO);
705844d9543SConrad Meyer 		}
706844d9543SConrad Meyer 
707844d9543SConrad Meyer 		sc->intr_tag[rid - 1] = NULL;
708844d9543SConrad Meyer 		error = bus_setup_intr(sc->dev, sc->intr_res[rid - 1],
709844d9543SConrad Meyer 		    INTR_MPSAFE | INTR_TYPE_MISC, ccp_intr_filter,
710844d9543SConrad Meyer 		    ccp_intr_handler, sc, &sc->intr_tag[rid - 1]);
711844d9543SConrad Meyer 		if (error != 0)
712844d9543SConrad Meyer 			device_printf(sc->dev, "%s: setup_intr: %d\n",
713844d9543SConrad Meyer 			    __func__, error);
714844d9543SConrad Meyer 	}
715844d9543SConrad Meyer 	sc->intr_count = nvec;
716844d9543SConrad Meyer 
717844d9543SConrad Meyer 	return (error);
718844d9543SConrad Meyer }
719844d9543SConrad Meyer 
720844d9543SConrad Meyer static void
ccp_release_interrupts(struct ccp_softc * sc)721844d9543SConrad Meyer ccp_release_interrupts(struct ccp_softc *sc)
722844d9543SConrad Meyer {
723844d9543SConrad Meyer 	unsigned i;
724844d9543SConrad Meyer 
725844d9543SConrad Meyer 	for (i = 0; i < sc->intr_count; i++) {
726844d9543SConrad Meyer 		if (sc->intr_tag[i] != NULL)
727844d9543SConrad Meyer 			bus_teardown_intr(sc->dev, sc->intr_res[i],
728844d9543SConrad Meyer 			    sc->intr_tag[i]);
729844d9543SConrad Meyer 		if (sc->intr_res[i] != NULL)
730844d9543SConrad Meyer 			bus_release_resource(sc->dev, SYS_RES_IRQ,
731844d9543SConrad Meyer 			    rman_get_rid(sc->intr_res[i]), sc->intr_res[i]);
732844d9543SConrad Meyer 	}
733844d9543SConrad Meyer 
734844d9543SConrad Meyer 	pci_release_msi(sc->dev);
735844d9543SConrad Meyer }
736844d9543SConrad Meyer 
737844d9543SConrad Meyer int
ccp_hw_attach(device_t dev)738844d9543SConrad Meyer ccp_hw_attach(device_t dev)
739844d9543SConrad Meyer {
740844d9543SConrad Meyer 	struct ccp_softc *sc;
741844d9543SConrad Meyer 	uint64_t lsbmask;
742844d9543SConrad Meyer 	uint32_t version, lsbmasklo, lsbmaskhi;
743844d9543SConrad Meyer 	unsigned queue_idx, j;
744844d9543SConrad Meyer 	int error;
745844d9543SConrad Meyer 	bool bars_mapped, interrupts_setup;
746844d9543SConrad Meyer 
747844d9543SConrad Meyer 	queue_idx = 0;
748844d9543SConrad Meyer 	bars_mapped = interrupts_setup = false;
749844d9543SConrad Meyer 	sc = device_get_softc(dev);
750844d9543SConrad Meyer 
751844d9543SConrad Meyer 	error = ccp_map_pci_bar(dev);
752844d9543SConrad Meyer 	if (error != 0) {
753844d9543SConrad Meyer 		device_printf(dev, "%s: couldn't map BAR(s)\n", __func__);
754844d9543SConrad Meyer 		goto out;
755844d9543SConrad Meyer 	}
756844d9543SConrad Meyer 	bars_mapped = true;
757844d9543SConrad Meyer 
758844d9543SConrad Meyer 	error = pci_enable_busmaster(dev);
759844d9543SConrad Meyer 	if (error != 0) {
760844d9543SConrad Meyer 		device_printf(dev, "%s: couldn't enable busmaster\n",
761844d9543SConrad Meyer 		    __func__);
762844d9543SConrad Meyer 		goto out;
763844d9543SConrad Meyer 	}
764844d9543SConrad Meyer 
765844d9543SConrad Meyer 	sc->ring_size_order = g_ccp_ring_order;
766844d9543SConrad Meyer 	if (sc->ring_size_order < 6 || sc->ring_size_order > 16) {
767844d9543SConrad Meyer 		device_printf(dev, "bogus hw.ccp.ring_order\n");
768844d9543SConrad Meyer 		error = EINVAL;
769844d9543SConrad Meyer 		goto out;
770844d9543SConrad Meyer 	}
771844d9543SConrad Meyer 	sc->valid_queues = ccp_read_4(sc, CMD_QUEUE_MASK_OFFSET);
772844d9543SConrad Meyer 
773844d9543SConrad Meyer 	version = ccp_read_4(sc, VERSION_REG);
774844d9543SConrad Meyer 	if ((version & VERSION_NUM_MASK) < 5) {
775844d9543SConrad Meyer 		device_printf(dev,
776844d9543SConrad Meyer 		    "driver supports version 5 and later hardware\n");
777844d9543SConrad Meyer 		error = ENXIO;
778844d9543SConrad Meyer 		goto out;
779844d9543SConrad Meyer 	}
780844d9543SConrad Meyer 
781844d9543SConrad Meyer 	error = ccp_setup_interrupts(sc);
782844d9543SConrad Meyer 	if (error != 0)
783844d9543SConrad Meyer 		goto out;
784844d9543SConrad Meyer 	interrupts_setup = true;
785844d9543SConrad Meyer 
786844d9543SConrad Meyer 	sc->hw_version = version & VERSION_NUM_MASK;
787844d9543SConrad Meyer 	sc->num_queues = (version >> VERSION_NUMVQM_SHIFT) &
788844d9543SConrad Meyer 	    VERSION_NUMVQM_MASK;
789844d9543SConrad Meyer 	sc->num_lsb_entries = (version >> VERSION_LSBSIZE_SHIFT) &
790844d9543SConrad Meyer 	    VERSION_LSBSIZE_MASK;
791844d9543SConrad Meyer 	sc->hw_features = version & VERSION_CAP_MASK;
792844d9543SConrad Meyer 
793844d9543SConrad Meyer 	/*
794844d9543SConrad Meyer 	 * Copy private LSB mask to public registers to enable access to LSB
795844d9543SConrad Meyer 	 * from all queues allowed by BIOS.
796844d9543SConrad Meyer 	 */
797844d9543SConrad Meyer 	lsbmasklo = ccp_read_4(sc, LSB_PRIVATE_MASK_LO_OFFSET);
798844d9543SConrad Meyer 	lsbmaskhi = ccp_read_4(sc, LSB_PRIVATE_MASK_HI_OFFSET);
799844d9543SConrad Meyer 	ccp_write_4(sc, LSB_PUBLIC_MASK_LO_OFFSET, lsbmasklo);
800844d9543SConrad Meyer 	ccp_write_4(sc, LSB_PUBLIC_MASK_HI_OFFSET, lsbmaskhi);
801844d9543SConrad Meyer 
802844d9543SConrad Meyer 	lsbmask = ((uint64_t)lsbmaskhi << 30) | lsbmasklo;
803844d9543SConrad Meyer 
804844d9543SConrad Meyer 	for (; queue_idx < nitems(sc->queues); queue_idx++) {
805844d9543SConrad Meyer 		error = ccp_hw_attach_queue(dev, lsbmask, queue_idx);
806844d9543SConrad Meyer 		if (error != 0) {
807844d9543SConrad Meyer 			device_printf(dev, "%s: couldn't attach queue %u\n",
808844d9543SConrad Meyer 			    __func__, queue_idx);
809844d9543SConrad Meyer 			goto out;
810844d9543SConrad Meyer 		}
811844d9543SConrad Meyer 	}
812844d9543SConrad Meyer 	ccp_assign_lsb_regions(sc, lsbmask);
813844d9543SConrad Meyer 
814844d9543SConrad Meyer out:
815844d9543SConrad Meyer 	if (error != 0) {
816844d9543SConrad Meyer 		if (interrupts_setup)
817844d9543SConrad Meyer 			ccp_release_interrupts(sc);
818844d9543SConrad Meyer 		for (j = 0; j < queue_idx; j++)
819844d9543SConrad Meyer 			ccp_hw_detach_queue(dev, j);
820844d9543SConrad Meyer 		if (sc->ring_size_order != 0)
821844d9543SConrad Meyer 			pci_disable_busmaster(dev);
822844d9543SConrad Meyer 		if (bars_mapped)
823844d9543SConrad Meyer 			ccp_unmap_pci_bar(dev);
824844d9543SConrad Meyer 	}
825844d9543SConrad Meyer 	return (error);
826844d9543SConrad Meyer }
827844d9543SConrad Meyer 
828844d9543SConrad Meyer void
ccp_hw_detach(device_t dev)829844d9543SConrad Meyer ccp_hw_detach(device_t dev)
830844d9543SConrad Meyer {
831844d9543SConrad Meyer 	struct ccp_softc *sc;
832844d9543SConrad Meyer 	unsigned i;
833844d9543SConrad Meyer 
834844d9543SConrad Meyer 	sc = device_get_softc(dev);
835844d9543SConrad Meyer 
836844d9543SConrad Meyer 	for (i = 0; i < nitems(sc->queues); i++)
837844d9543SConrad Meyer 		ccp_hw_detach_queue(dev, i);
838844d9543SConrad Meyer 
839844d9543SConrad Meyer 	ccp_release_interrupts(sc);
840844d9543SConrad Meyer 	pci_disable_busmaster(dev);
841844d9543SConrad Meyer 	ccp_unmap_pci_bar(dev);
842844d9543SConrad Meyer }
843844d9543SConrad Meyer 
844844d9543SConrad Meyer static int __must_check
ccp_passthrough(struct ccp_queue * qp,bus_addr_t dst,enum ccp_memtype dst_type,bus_addr_t src,enum ccp_memtype src_type,bus_size_t len,enum ccp_passthru_byteswap swapmode,enum ccp_passthru_bitwise bitmode,bool interrupt,const struct ccp_completion_ctx * cctx)845844d9543SConrad Meyer ccp_passthrough(struct ccp_queue *qp, bus_addr_t dst,
846844d9543SConrad Meyer     enum ccp_memtype dst_type, bus_addr_t src, enum ccp_memtype src_type,
847844d9543SConrad Meyer     bus_size_t len, enum ccp_passthru_byteswap swapmode,
848844d9543SConrad Meyer     enum ccp_passthru_bitwise bitmode, bool interrupt,
849844d9543SConrad Meyer     const struct ccp_completion_ctx *cctx)
850844d9543SConrad Meyer {
851844d9543SConrad Meyer 	struct ccp_desc *desc;
852844d9543SConrad Meyer 
853844d9543SConrad Meyer 	if (ccp_queue_get_ring_space(qp) == 0)
854844d9543SConrad Meyer 		return (EAGAIN);
855844d9543SConrad Meyer 
856844d9543SConrad Meyer 	desc = &qp->desc_ring[qp->cq_tail];
857844d9543SConrad Meyer 
858844d9543SConrad Meyer 	memset(desc, 0, sizeof(*desc));
859844d9543SConrad Meyer 	desc->engine = CCP_ENGINE_PASSTHRU;
860844d9543SConrad Meyer 
861844d9543SConrad Meyer 	desc->pt.ioc = interrupt;
862844d9543SConrad Meyer 	desc->pt.byteswap = swapmode;
863844d9543SConrad Meyer 	desc->pt.bitwise = bitmode;
864844d9543SConrad Meyer 	desc->length = len;
865844d9543SConrad Meyer 
866844d9543SConrad Meyer 	desc->src_lo = (uint32_t)src;
867844d9543SConrad Meyer 	desc->src_hi = src >> 32;
868844d9543SConrad Meyer 	desc->src_mem = src_type;
869844d9543SConrad Meyer 
870844d9543SConrad Meyer 	desc->dst_lo = (uint32_t)dst;
871844d9543SConrad Meyer 	desc->dst_hi = dst >> 32;
872844d9543SConrad Meyer 	desc->dst_mem = dst_type;
873844d9543SConrad Meyer 
874844d9543SConrad Meyer 	if (bitmode != CCP_PASSTHRU_BITWISE_NOOP)
875844d9543SConrad Meyer 		desc->lsb_ctx_id = ccp_queue_lsb_entry(qp, LSB_ENTRY_KEY);
876844d9543SConrad Meyer 
877844d9543SConrad Meyer 	if (cctx != NULL)
878844d9543SConrad Meyer 		memcpy(&qp->completions_ring[qp->cq_tail], cctx, sizeof(*cctx));
879844d9543SConrad Meyer 
880844d9543SConrad Meyer 	qp->cq_tail = (qp->cq_tail + 1) % (1 << qp->cq_softc->ring_size_order);
881844d9543SConrad Meyer 	return (0);
882844d9543SConrad Meyer }
883844d9543SConrad Meyer 
884844d9543SConrad Meyer static int __must_check
ccp_passthrough_sgl(struct ccp_queue * qp,bus_addr_t lsb_addr,bool tolsb,struct sglist * sgl,bus_size_t len,bool interrupt,const struct ccp_completion_ctx * cctx)885844d9543SConrad Meyer ccp_passthrough_sgl(struct ccp_queue *qp, bus_addr_t lsb_addr, bool tolsb,
886844d9543SConrad Meyer     struct sglist *sgl, bus_size_t len, bool interrupt,
887844d9543SConrad Meyer     const struct ccp_completion_ctx *cctx)
888844d9543SConrad Meyer {
889844d9543SConrad Meyer 	struct sglist_seg *seg;
890844d9543SConrad Meyer 	size_t i, remain, nb;
891844d9543SConrad Meyer 	int error;
892844d9543SConrad Meyer 
893844d9543SConrad Meyer 	remain = len;
894844d9543SConrad Meyer 	for (i = 0; i < sgl->sg_nseg && remain != 0; i++) {
895844d9543SConrad Meyer 		seg = &sgl->sg_segs[i];
896c0341432SJohn Baldwin 		/* crp lengths are int, so 32-bit min() is ok. */
897844d9543SConrad Meyer 		nb = min(remain, seg->ss_len);
898844d9543SConrad Meyer 
899844d9543SConrad Meyer 		if (tolsb)
900844d9543SConrad Meyer 			error = ccp_passthrough(qp, lsb_addr, CCP_MEMTYPE_SB,
901844d9543SConrad Meyer 			    seg->ss_paddr, CCP_MEMTYPE_SYSTEM, nb,
902844d9543SConrad Meyer 			    CCP_PASSTHRU_BYTESWAP_NOOP,
903844d9543SConrad Meyer 			    CCP_PASSTHRU_BITWISE_NOOP,
904844d9543SConrad Meyer 			    (nb == remain) && interrupt, cctx);
905844d9543SConrad Meyer 		else
906844d9543SConrad Meyer 			error = ccp_passthrough(qp, seg->ss_paddr,
907844d9543SConrad Meyer 			    CCP_MEMTYPE_SYSTEM, lsb_addr, CCP_MEMTYPE_SB, nb,
908844d9543SConrad Meyer 			    CCP_PASSTHRU_BYTESWAP_NOOP,
909844d9543SConrad Meyer 			    CCP_PASSTHRU_BITWISE_NOOP,
910844d9543SConrad Meyer 			    (nb == remain) && interrupt, cctx);
911844d9543SConrad Meyer 		if (error != 0)
912844d9543SConrad Meyer 			return (error);
913844d9543SConrad Meyer 
914844d9543SConrad Meyer 		remain -= nb;
915844d9543SConrad Meyer 	}
916844d9543SConrad Meyer 	return (0);
917844d9543SConrad Meyer }
918844d9543SConrad Meyer 
919844d9543SConrad Meyer /*
920844d9543SConrad Meyer  * Note that these vectors are in reverse of the usual order.
921844d9543SConrad Meyer  */
922844d9543SConrad Meyer const struct SHA_vectors {
923844d9543SConrad Meyer 	uint32_t SHA1[8];
924844d9543SConrad Meyer 	uint32_t SHA224[8];
925844d9543SConrad Meyer 	uint32_t SHA256[8];
926844d9543SConrad Meyer 	uint64_t SHA384[8];
927844d9543SConrad Meyer 	uint64_t SHA512[8];
928844d9543SConrad Meyer } SHA_H __aligned(PAGE_SIZE) = {
929844d9543SConrad Meyer 	.SHA1 = {
930844d9543SConrad Meyer 		0xc3d2e1f0ul,
931844d9543SConrad Meyer 		0x10325476ul,
932844d9543SConrad Meyer 		0x98badcfeul,
933844d9543SConrad Meyer 		0xefcdab89ul,
934844d9543SConrad Meyer 		0x67452301ul,
935844d9543SConrad Meyer 		0,
936844d9543SConrad Meyer 		0,
937844d9543SConrad Meyer 		0,
938844d9543SConrad Meyer 	},
939844d9543SConrad Meyer 	.SHA224 = {
940844d9543SConrad Meyer 		0xbefa4fa4ul,
941844d9543SConrad Meyer 		0x64f98fa7ul,
942844d9543SConrad Meyer 		0x68581511ul,
943844d9543SConrad Meyer 		0xffc00b31ul,
944844d9543SConrad Meyer 		0xf70e5939ul,
945844d9543SConrad Meyer 		0x3070dd17ul,
946844d9543SConrad Meyer 		0x367cd507ul,
947844d9543SConrad Meyer 		0xc1059ed8ul,
948844d9543SConrad Meyer 	},
949844d9543SConrad Meyer 	.SHA256 = {
950844d9543SConrad Meyer 		0x5be0cd19ul,
951844d9543SConrad Meyer 		0x1f83d9abul,
952844d9543SConrad Meyer 		0x9b05688cul,
953844d9543SConrad Meyer 		0x510e527ful,
954844d9543SConrad Meyer 		0xa54ff53aul,
955844d9543SConrad Meyer 		0x3c6ef372ul,
956844d9543SConrad Meyer 		0xbb67ae85ul,
957844d9543SConrad Meyer 		0x6a09e667ul,
958844d9543SConrad Meyer 	},
959844d9543SConrad Meyer 	.SHA384 = {
960844d9543SConrad Meyer 		0x47b5481dbefa4fa4ull,
961844d9543SConrad Meyer 		0xdb0c2e0d64f98fa7ull,
962844d9543SConrad Meyer 		0x8eb44a8768581511ull,
963844d9543SConrad Meyer 		0x67332667ffc00b31ull,
964844d9543SConrad Meyer 		0x152fecd8f70e5939ull,
965844d9543SConrad Meyer 		0x9159015a3070dd17ull,
966844d9543SConrad Meyer 		0x629a292a367cd507ull,
967844d9543SConrad Meyer 		0xcbbb9d5dc1059ed8ull,
968844d9543SConrad Meyer 	},
969844d9543SConrad Meyer 	.SHA512 = {
970844d9543SConrad Meyer 		0x5be0cd19137e2179ull,
971844d9543SConrad Meyer 		0x1f83d9abfb41bd6bull,
972844d9543SConrad Meyer 		0x9b05688c2b3e6c1full,
973844d9543SConrad Meyer 		0x510e527fade682d1ull,
974844d9543SConrad Meyer 		0xa54ff53a5f1d36f1ull,
975844d9543SConrad Meyer 		0x3c6ef372fe94f82bull,
976844d9543SConrad Meyer 		0xbb67ae8584caa73bull,
977844d9543SConrad Meyer 		0x6a09e667f3bcc908ull,
978844d9543SConrad Meyer 	},
979844d9543SConrad Meyer };
980844d9543SConrad Meyer /*
981844d9543SConrad Meyer  * Ensure vectors do not cross a page boundary.
982844d9543SConrad Meyer  *
983844d9543SConrad Meyer  * Disabled due to a new Clang error:  "expression is not an integral constant
984844d9543SConrad Meyer  * expression."  GCC (cross toolchain) seems to handle this assertion with
985844d9543SConrad Meyer  * _Static_assert just fine.
986844d9543SConrad Meyer  */
987844d9543SConrad Meyer #if 0
988844d9543SConrad Meyer CTASSERT(PAGE_SIZE - ((uintptr_t)&SHA_H % PAGE_SIZE) >= sizeof(SHA_H));
989844d9543SConrad Meyer #endif
990844d9543SConrad Meyer 
991844d9543SConrad Meyer const struct SHA_Defn {
992844d9543SConrad Meyer 	enum sha_version version;
993844d9543SConrad Meyer 	const void *H_vectors;
994844d9543SConrad Meyer 	size_t H_size;
995d8787d4fSMark Johnston 	const struct auth_hash *axf;
996844d9543SConrad Meyer 	enum ccp_sha_type engine_type;
997844d9543SConrad Meyer } SHA_definitions[] = {
998844d9543SConrad Meyer 	{
999844d9543SConrad Meyer 		.version = SHA1,
1000844d9543SConrad Meyer 		.H_vectors = SHA_H.SHA1,
1001844d9543SConrad Meyer 		.H_size = sizeof(SHA_H.SHA1),
1002844d9543SConrad Meyer 		.axf = &auth_hash_hmac_sha1,
1003844d9543SConrad Meyer 		.engine_type = CCP_SHA_TYPE_1,
1004844d9543SConrad Meyer 	},
1005844d9543SConrad Meyer #if 0
1006844d9543SConrad Meyer 	{
1007844d9543SConrad Meyer 		.version = SHA2_224,
1008844d9543SConrad Meyer 		.H_vectors = SHA_H.SHA224,
1009844d9543SConrad Meyer 		.H_size = sizeof(SHA_H.SHA224),
1010844d9543SConrad Meyer 		.axf = &auth_hash_hmac_sha2_224,
1011844d9543SConrad Meyer 		.engine_type = CCP_SHA_TYPE_224,
1012844d9543SConrad Meyer 	},
1013844d9543SConrad Meyer #endif
1014844d9543SConrad Meyer 	{
1015844d9543SConrad Meyer 		.version = SHA2_256,
1016844d9543SConrad Meyer 		.H_vectors = SHA_H.SHA256,
1017844d9543SConrad Meyer 		.H_size = sizeof(SHA_H.SHA256),
1018844d9543SConrad Meyer 		.axf = &auth_hash_hmac_sha2_256,
1019844d9543SConrad Meyer 		.engine_type = CCP_SHA_TYPE_256,
1020844d9543SConrad Meyer 	},
1021844d9543SConrad Meyer 	{
1022844d9543SConrad Meyer 		.version = SHA2_384,
1023844d9543SConrad Meyer 		.H_vectors = SHA_H.SHA384,
1024844d9543SConrad Meyer 		.H_size = sizeof(SHA_H.SHA384),
1025844d9543SConrad Meyer 		.axf = &auth_hash_hmac_sha2_384,
1026844d9543SConrad Meyer 		.engine_type = CCP_SHA_TYPE_384,
1027844d9543SConrad Meyer 	},
1028844d9543SConrad Meyer 	{
1029844d9543SConrad Meyer 		.version = SHA2_512,
1030844d9543SConrad Meyer 		.H_vectors = SHA_H.SHA512,
1031844d9543SConrad Meyer 		.H_size = sizeof(SHA_H.SHA512),
1032844d9543SConrad Meyer 		.axf = &auth_hash_hmac_sha2_512,
1033844d9543SConrad Meyer 		.engine_type = CCP_SHA_TYPE_512,
1034844d9543SConrad Meyer 	},
1035844d9543SConrad Meyer };
1036844d9543SConrad Meyer 
1037844d9543SConrad Meyer static int __must_check
ccp_sha_single_desc(struct ccp_queue * qp,const struct SHA_Defn * defn,vm_paddr_t addr,size_t len,bool start,bool end,uint64_t msgbits)1038844d9543SConrad Meyer ccp_sha_single_desc(struct ccp_queue *qp, const struct SHA_Defn *defn,
1039844d9543SConrad Meyer     vm_paddr_t addr, size_t len, bool start, bool end, uint64_t msgbits)
1040844d9543SConrad Meyer {
1041844d9543SConrad Meyer 	struct ccp_desc *desc;
1042844d9543SConrad Meyer 
1043844d9543SConrad Meyer 	if (ccp_queue_get_ring_space(qp) == 0)
1044844d9543SConrad Meyer 		return (EAGAIN);
1045844d9543SConrad Meyer 
1046844d9543SConrad Meyer 	desc = &qp->desc_ring[qp->cq_tail];
1047844d9543SConrad Meyer 
1048844d9543SConrad Meyer 	memset(desc, 0, sizeof(*desc));
1049844d9543SConrad Meyer 	desc->engine = CCP_ENGINE_SHA;
1050844d9543SConrad Meyer 	desc->som = start;
1051844d9543SConrad Meyer 	desc->eom = end;
1052844d9543SConrad Meyer 
1053844d9543SConrad Meyer 	desc->sha.type = defn->engine_type;
1054844d9543SConrad Meyer 	desc->length = len;
1055844d9543SConrad Meyer 
1056844d9543SConrad Meyer 	if (end) {
1057844d9543SConrad Meyer 		desc->sha_len_lo = (uint32_t)msgbits;
1058844d9543SConrad Meyer 		desc->sha_len_hi = msgbits >> 32;
1059844d9543SConrad Meyer 	}
1060844d9543SConrad Meyer 
1061844d9543SConrad Meyer 	desc->src_lo = (uint32_t)addr;
1062844d9543SConrad Meyer 	desc->src_hi = addr >> 32;
1063844d9543SConrad Meyer 	desc->src_mem = CCP_MEMTYPE_SYSTEM;
1064844d9543SConrad Meyer 
1065844d9543SConrad Meyer 	desc->lsb_ctx_id = ccp_queue_lsb_entry(qp, LSB_ENTRY_SHA);
1066844d9543SConrad Meyer 
1067844d9543SConrad Meyer 	qp->cq_tail = (qp->cq_tail + 1) % (1 << qp->cq_softc->ring_size_order);
1068844d9543SConrad Meyer 	return (0);
1069844d9543SConrad Meyer }
1070844d9543SConrad Meyer 
1071844d9543SConrad Meyer static int __must_check
ccp_sha(struct ccp_queue * qp,enum sha_version version,struct sglist * sgl_src,struct sglist * sgl_dst,const struct ccp_completion_ctx * cctx)1072844d9543SConrad Meyer ccp_sha(struct ccp_queue *qp, enum sha_version version, struct sglist *sgl_src,
1073844d9543SConrad Meyer     struct sglist *sgl_dst, const struct ccp_completion_ctx *cctx)
1074844d9543SConrad Meyer {
1075844d9543SConrad Meyer 	const struct SHA_Defn *defn;
1076844d9543SConrad Meyer 	struct sglist_seg *seg;
1077844d9543SConrad Meyer 	size_t i, msgsize, remaining, nb;
1078844d9543SConrad Meyer 	uint32_t lsbaddr;
1079844d9543SConrad Meyer 	int error;
1080844d9543SConrad Meyer 
1081844d9543SConrad Meyer 	for (i = 0; i < nitems(SHA_definitions); i++)
1082844d9543SConrad Meyer 		if (SHA_definitions[i].version == version)
1083844d9543SConrad Meyer 			break;
1084844d9543SConrad Meyer 	if (i == nitems(SHA_definitions))
1085844d9543SConrad Meyer 		return (EINVAL);
1086844d9543SConrad Meyer 	defn = &SHA_definitions[i];
1087844d9543SConrad Meyer 
1088844d9543SConrad Meyer 	/* XXX validate input ??? */
1089844d9543SConrad Meyer 
1090844d9543SConrad Meyer 	/* Load initial SHA state into LSB */
1091844d9543SConrad Meyer 	/* XXX ensure H_vectors don't span page boundaries */
1092844d9543SConrad Meyer 	error = ccp_passthrough(qp, ccp_queue_lsb_address(qp, LSB_ENTRY_SHA),
1093844d9543SConrad Meyer 	    CCP_MEMTYPE_SB, pmap_kextract((vm_offset_t)defn->H_vectors),
1094844d9543SConrad Meyer 	    CCP_MEMTYPE_SYSTEM, roundup2(defn->H_size, LSB_ENTRY_SIZE),
1095844d9543SConrad Meyer 	    CCP_PASSTHRU_BYTESWAP_NOOP, CCP_PASSTHRU_BITWISE_NOOP, false,
1096844d9543SConrad Meyer 	    NULL);
1097844d9543SConrad Meyer 	if (error != 0)
1098844d9543SConrad Meyer 		return (error);
1099844d9543SConrad Meyer 
1100844d9543SConrad Meyer 	/* Execute series of SHA updates on correctly sized buffers */
1101844d9543SConrad Meyer 	msgsize = 0;
1102844d9543SConrad Meyer 	for (i = 0; i < sgl_src->sg_nseg; i++) {
1103844d9543SConrad Meyer 		seg = &sgl_src->sg_segs[i];
1104844d9543SConrad Meyer 		msgsize += seg->ss_len;
1105844d9543SConrad Meyer 		error = ccp_sha_single_desc(qp, defn, seg->ss_paddr,
1106844d9543SConrad Meyer 		    seg->ss_len, i == 0, i == sgl_src->sg_nseg - 1,
1107844d9543SConrad Meyer 		    msgsize << 3);
1108844d9543SConrad Meyer 		if (error != 0)
1109844d9543SConrad Meyer 			return (error);
1110844d9543SConrad Meyer 	}
1111844d9543SConrad Meyer 
1112844d9543SConrad Meyer 	/* Copy result out to sgl_dst */
1113844d9543SConrad Meyer 	remaining = roundup2(defn->H_size, LSB_ENTRY_SIZE);
1114844d9543SConrad Meyer 	lsbaddr = ccp_queue_lsb_address(qp, LSB_ENTRY_SHA);
1115844d9543SConrad Meyer 	for (i = 0; i < sgl_dst->sg_nseg; i++) {
1116844d9543SConrad Meyer 		seg = &sgl_dst->sg_segs[i];
1117c0341432SJohn Baldwin 		/* crp lengths are int, so 32-bit min() is ok. */
1118844d9543SConrad Meyer 		nb = min(remaining, seg->ss_len);
1119844d9543SConrad Meyer 
1120844d9543SConrad Meyer 		error = ccp_passthrough(qp, seg->ss_paddr, CCP_MEMTYPE_SYSTEM,
1121844d9543SConrad Meyer 		    lsbaddr, CCP_MEMTYPE_SB, nb, CCP_PASSTHRU_BYTESWAP_NOOP,
1122844d9543SConrad Meyer 		    CCP_PASSTHRU_BITWISE_NOOP,
1123844d9543SConrad Meyer 		    (cctx != NULL) ? (nb == remaining) : false,
1124844d9543SConrad Meyer 		    (nb == remaining) ? cctx : NULL);
1125844d9543SConrad Meyer 		if (error != 0)
1126844d9543SConrad Meyer 			return (error);
1127844d9543SConrad Meyer 
1128844d9543SConrad Meyer 		remaining -= nb;
1129844d9543SConrad Meyer 		lsbaddr += nb;
1130844d9543SConrad Meyer 		if (remaining == 0)
1131844d9543SConrad Meyer 			break;
1132844d9543SConrad Meyer 	}
1133844d9543SConrad Meyer 
1134844d9543SConrad Meyer 	return (0);
1135844d9543SConrad Meyer }
1136844d9543SConrad Meyer 
1137844d9543SConrad Meyer static void
byteswap256(uint64_t * buffer)1138844d9543SConrad Meyer byteswap256(uint64_t *buffer)
1139844d9543SConrad Meyer {
1140844d9543SConrad Meyer 	uint64_t t;
1141844d9543SConrad Meyer 
1142844d9543SConrad Meyer 	t = bswap64(buffer[3]);
1143844d9543SConrad Meyer 	buffer[3] = bswap64(buffer[0]);
1144844d9543SConrad Meyer 	buffer[0] = t;
1145844d9543SConrad Meyer 
1146844d9543SConrad Meyer 	t = bswap64(buffer[2]);
1147844d9543SConrad Meyer 	buffer[2] = bswap64(buffer[1]);
1148844d9543SConrad Meyer 	buffer[1] = t;
1149844d9543SConrad Meyer }
1150844d9543SConrad Meyer 
1151844d9543SConrad Meyer /*
1152844d9543SConrad Meyer  * Translate CCP internal LSB hash format into a standard hash ouput.
1153844d9543SConrad Meyer  *
1154844d9543SConrad Meyer  * Manipulates input buffer with byteswap256 operation.
1155844d9543SConrad Meyer  */
1156844d9543SConrad Meyer static void
ccp_sha_copy_result(char * output,char * buffer,enum sha_version version)1157844d9543SConrad Meyer ccp_sha_copy_result(char *output, char *buffer, enum sha_version version)
1158844d9543SConrad Meyer {
1159844d9543SConrad Meyer 	const struct SHA_Defn *defn;
1160844d9543SConrad Meyer 	size_t i;
1161844d9543SConrad Meyer 
1162844d9543SConrad Meyer 	for (i = 0; i < nitems(SHA_definitions); i++)
1163844d9543SConrad Meyer 		if (SHA_definitions[i].version == version)
1164844d9543SConrad Meyer 			break;
1165844d9543SConrad Meyer 	if (i == nitems(SHA_definitions))
1166844d9543SConrad Meyer 		panic("bogus sha version auth_mode %u\n", (unsigned)version);
1167844d9543SConrad Meyer 
1168844d9543SConrad Meyer 	defn = &SHA_definitions[i];
1169844d9543SConrad Meyer 
1170844d9543SConrad Meyer 	/* Swap 256bit manually -- DMA engine can, but with limitations */
1171844d9543SConrad Meyer 	byteswap256((void *)buffer);
1172844d9543SConrad Meyer 	if (defn->axf->hashsize > LSB_ENTRY_SIZE)
1173844d9543SConrad Meyer 		byteswap256((void *)(buffer + LSB_ENTRY_SIZE));
1174844d9543SConrad Meyer 
1175844d9543SConrad Meyer 	switch (defn->version) {
1176844d9543SConrad Meyer 	case SHA1:
1177844d9543SConrad Meyer 		memcpy(output, buffer + 12, defn->axf->hashsize);
1178844d9543SConrad Meyer 		break;
1179844d9543SConrad Meyer #if 0
1180844d9543SConrad Meyer 	case SHA2_224:
1181844d9543SConrad Meyer 		memcpy(output, buffer + XXX, defn->axf->hashsize);
1182844d9543SConrad Meyer 		break;
1183844d9543SConrad Meyer #endif
1184844d9543SConrad Meyer 	case SHA2_256:
1185844d9543SConrad Meyer 		memcpy(output, buffer, defn->axf->hashsize);
1186844d9543SConrad Meyer 		break;
1187844d9543SConrad Meyer 	case SHA2_384:
1188844d9543SConrad Meyer 		memcpy(output,
1189844d9543SConrad Meyer 		    buffer + LSB_ENTRY_SIZE * 3 - defn->axf->hashsize,
1190844d9543SConrad Meyer 		    defn->axf->hashsize - LSB_ENTRY_SIZE);
1191844d9543SConrad Meyer 		memcpy(output + defn->axf->hashsize - LSB_ENTRY_SIZE, buffer,
1192844d9543SConrad Meyer 		    LSB_ENTRY_SIZE);
1193844d9543SConrad Meyer 		break;
1194844d9543SConrad Meyer 	case SHA2_512:
1195844d9543SConrad Meyer 		memcpy(output, buffer + LSB_ENTRY_SIZE, LSB_ENTRY_SIZE);
1196844d9543SConrad Meyer 		memcpy(output + LSB_ENTRY_SIZE, buffer, LSB_ENTRY_SIZE);
1197844d9543SConrad Meyer 		break;
1198844d9543SConrad Meyer 	}
1199844d9543SConrad Meyer }
1200844d9543SConrad Meyer 
1201844d9543SConrad Meyer static void
ccp_do_hmac_done(struct ccp_queue * qp,struct ccp_session * s,struct cryptop * crp,int error)1202844d9543SConrad Meyer ccp_do_hmac_done(struct ccp_queue *qp, struct ccp_session *s,
1203c0341432SJohn Baldwin     struct cryptop *crp, int error)
1204844d9543SConrad Meyer {
1205844d9543SConrad Meyer 	char ihash[SHA2_512_HASH_LEN /* max hash len */];
1206844d9543SConrad Meyer 	union authctx auth_ctx;
1207d8787d4fSMark Johnston 	const struct auth_hash *axf;
1208844d9543SConrad Meyer 
1209844d9543SConrad Meyer 	axf = s->hmac.auth_hash;
1210844d9543SConrad Meyer 
1211844d9543SConrad Meyer 	s->pending--;
1212844d9543SConrad Meyer 
1213844d9543SConrad Meyer 	if (error != 0) {
1214844d9543SConrad Meyer 		crp->crp_etype = error;
1215844d9543SConrad Meyer 		goto out;
1216844d9543SConrad Meyer 	}
1217844d9543SConrad Meyer 
1218844d9543SConrad Meyer 	/* Do remaining outer hash over small inner hash in software */
1219844d9543SConrad Meyer 	axf->Init(&auth_ctx);
1220844d9543SConrad Meyer 	axf->Update(&auth_ctx, s->hmac.opad, axf->blocksize);
1221c0341432SJohn Baldwin 	ccp_sha_copy_result(ihash, s->hmac.res, s->hmac.auth_mode);
1222844d9543SConrad Meyer #if 0
1223844d9543SConrad Meyer 	INSECURE_DEBUG(dev, "%s sha intermediate=%64D\n", __func__,
1224844d9543SConrad Meyer 	    (u_char *)ihash, " ");
1225844d9543SConrad Meyer #endif
1226844d9543SConrad Meyer 	axf->Update(&auth_ctx, ihash, axf->hashsize);
1227c0341432SJohn Baldwin 	axf->Final(s->hmac.res, &auth_ctx);
1228844d9543SConrad Meyer 
1229c0341432SJohn Baldwin 	if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) {
1230c0341432SJohn Baldwin 		crypto_copydata(crp, crp->crp_digest_start, s->hmac.hash_len,
1231c0341432SJohn Baldwin 		    ihash);
1232c0341432SJohn Baldwin 		if (timingsafe_bcmp(s->hmac.res, ihash, s->hmac.hash_len) != 0)
1233c0341432SJohn Baldwin 			crp->crp_etype = EBADMSG;
1234c0341432SJohn Baldwin 	} else
1235c0341432SJohn Baldwin 		crypto_copyback(crp, crp->crp_digest_start, s->hmac.hash_len,
1236c0341432SJohn Baldwin 		    s->hmac.res);
1237844d9543SConrad Meyer 
1238844d9543SConrad Meyer 	/* Avoid leaking key material */
1239844d9543SConrad Meyer 	explicit_bzero(&auth_ctx, sizeof(auth_ctx));
1240c0341432SJohn Baldwin 	explicit_bzero(s->hmac.res, sizeof(s->hmac.res));
1241844d9543SConrad Meyer 
1242844d9543SConrad Meyer out:
1243844d9543SConrad Meyer 	crypto_done(crp);
1244844d9543SConrad Meyer }
1245844d9543SConrad Meyer 
1246844d9543SConrad Meyer static void
ccp_hmac_done(struct ccp_queue * qp,struct ccp_session * s,void * vcrp,int error)1247844d9543SConrad Meyer ccp_hmac_done(struct ccp_queue *qp, struct ccp_session *s, void *vcrp,
1248844d9543SConrad Meyer     int error)
1249844d9543SConrad Meyer {
1250844d9543SConrad Meyer 	struct cryptop *crp;
1251844d9543SConrad Meyer 
1252844d9543SConrad Meyer 	crp = vcrp;
1253c0341432SJohn Baldwin 	ccp_do_hmac_done(qp, s, crp, error);
1254844d9543SConrad Meyer }
1255844d9543SConrad Meyer 
1256844d9543SConrad Meyer static int __must_check
ccp_do_hmac(struct ccp_queue * qp,struct ccp_session * s,struct cryptop * crp,const struct ccp_completion_ctx * cctx)1257844d9543SConrad Meyer ccp_do_hmac(struct ccp_queue *qp, struct ccp_session *s, struct cryptop *crp,
1258c0341432SJohn Baldwin     const struct ccp_completion_ctx *cctx)
1259844d9543SConrad Meyer {
1260844d9543SConrad Meyer 	device_t dev;
1261d8787d4fSMark Johnston 	const struct auth_hash *axf;
1262844d9543SConrad Meyer 	int error;
1263844d9543SConrad Meyer 
1264844d9543SConrad Meyer 	dev = qp->cq_softc->dev;
1265844d9543SConrad Meyer 	axf = s->hmac.auth_hash;
1266844d9543SConrad Meyer 
1267844d9543SConrad Meyer 	/*
1268844d9543SConrad Meyer 	 * Populate the SGL describing inside hash contents.  We want to hash
1269844d9543SConrad Meyer 	 * the ipad (key XOR fixed bit pattern) concatenated with the user
1270844d9543SConrad Meyer 	 * data.
1271844d9543SConrad Meyer 	 */
1272844d9543SConrad Meyer 	sglist_reset(qp->cq_sg_ulptx);
1273844d9543SConrad Meyer 	error = sglist_append(qp->cq_sg_ulptx, s->hmac.ipad, axf->blocksize);
1274844d9543SConrad Meyer 	if (error != 0)
1275844d9543SConrad Meyer 		return (error);
1276c0341432SJohn Baldwin 	if (crp->crp_aad_length != 0) {
1277844d9543SConrad Meyer 		error = sglist_append_sglist(qp->cq_sg_ulptx, qp->cq_sg_crp,
1278c0341432SJohn Baldwin 		    crp->crp_aad_start, crp->crp_aad_length);
1279c0341432SJohn Baldwin 		if (error != 0)
1280c0341432SJohn Baldwin 			return (error);
1281c0341432SJohn Baldwin 	}
1282c0341432SJohn Baldwin 	error = sglist_append_sglist(qp->cq_sg_ulptx, qp->cq_sg_crp,
1283c0341432SJohn Baldwin 	    crp->crp_payload_start, crp->crp_payload_length);
1284844d9543SConrad Meyer 	if (error != 0) {
1285844d9543SConrad Meyer 		DPRINTF(dev, "%s: sglist too short\n", __func__);
1286844d9543SConrad Meyer 		return (error);
1287844d9543SConrad Meyer 	}
1288c0341432SJohn Baldwin 	/* Populate SGL for output -- use hmac.res buffer. */
1289844d9543SConrad Meyer 	sglist_reset(qp->cq_sg_dst);
1290c0341432SJohn Baldwin 	error = sglist_append(qp->cq_sg_dst, s->hmac.res,
1291844d9543SConrad Meyer 	    roundup2(axf->hashsize, LSB_ENTRY_SIZE));
1292844d9543SConrad Meyer 	if (error != 0)
1293844d9543SConrad Meyer 		return (error);
1294844d9543SConrad Meyer 
1295844d9543SConrad Meyer 	error = ccp_sha(qp, s->hmac.auth_mode, qp->cq_sg_ulptx, qp->cq_sg_dst,
1296844d9543SConrad Meyer 	    cctx);
1297844d9543SConrad Meyer 	if (error != 0) {
1298844d9543SConrad Meyer 		DPRINTF(dev, "%s: ccp_sha error\n", __func__);
1299844d9543SConrad Meyer 		return (error);
1300844d9543SConrad Meyer 	}
1301844d9543SConrad Meyer 	return (0);
1302844d9543SConrad Meyer }
1303844d9543SConrad Meyer 
1304844d9543SConrad Meyer int __must_check
ccp_hmac(struct ccp_queue * qp,struct ccp_session * s,struct cryptop * crp)1305844d9543SConrad Meyer ccp_hmac(struct ccp_queue *qp, struct ccp_session *s, struct cryptop *crp)
1306844d9543SConrad Meyer {
1307844d9543SConrad Meyer 	struct ccp_completion_ctx ctx;
1308844d9543SConrad Meyer 
1309844d9543SConrad Meyer 	ctx.callback_fn = ccp_hmac_done;
1310844d9543SConrad Meyer 	ctx.callback_arg = crp;
1311844d9543SConrad Meyer 	ctx.session = s;
1312844d9543SConrad Meyer 
1313c0341432SJohn Baldwin 	return (ccp_do_hmac(qp, s, crp, &ctx));
1314844d9543SConrad Meyer }
1315844d9543SConrad Meyer 
1316844d9543SConrad Meyer static void
ccp_byteswap(char * data,size_t len)1317844d9543SConrad Meyer ccp_byteswap(char *data, size_t len)
1318844d9543SConrad Meyer {
1319844d9543SConrad Meyer 	size_t i;
1320844d9543SConrad Meyer 	char t;
1321844d9543SConrad Meyer 
1322844d9543SConrad Meyer 	len--;
1323844d9543SConrad Meyer 	for (i = 0; i < len; i++, len--) {
1324844d9543SConrad Meyer 		t = data[i];
1325844d9543SConrad Meyer 		data[i] = data[len];
1326844d9543SConrad Meyer 		data[len] = t;
1327844d9543SConrad Meyer 	}
1328844d9543SConrad Meyer }
1329844d9543SConrad Meyer 
1330844d9543SConrad Meyer static void
ccp_blkcipher_done(struct ccp_queue * qp,struct ccp_session * s,void * vcrp,int error)1331844d9543SConrad Meyer ccp_blkcipher_done(struct ccp_queue *qp, struct ccp_session *s, void *vcrp,
1332844d9543SConrad Meyer     int error)
1333844d9543SConrad Meyer {
1334844d9543SConrad Meyer 	struct cryptop *crp;
1335844d9543SConrad Meyer 
1336c0341432SJohn Baldwin 	explicit_bzero(&s->blkcipher.iv, sizeof(s->blkcipher.iv));
1337844d9543SConrad Meyer 
1338844d9543SConrad Meyer 	crp = vcrp;
1339844d9543SConrad Meyer 
1340844d9543SConrad Meyer 	s->pending--;
1341844d9543SConrad Meyer 
1342844d9543SConrad Meyer 	if (error != 0)
1343844d9543SConrad Meyer 		crp->crp_etype = error;
1344844d9543SConrad Meyer 
1345844d9543SConrad Meyer 	DPRINTF(qp->cq_softc->dev, "%s: qp=%p crp=%p\n", __func__, qp, crp);
1346844d9543SConrad Meyer 	crypto_done(crp);
1347844d9543SConrad Meyer }
1348844d9543SConrad Meyer 
1349844d9543SConrad Meyer static void
ccp_collect_iv(struct cryptop * crp,const struct crypto_session_params * csp,char * iv)1350c0341432SJohn Baldwin ccp_collect_iv(struct cryptop *crp, const struct crypto_session_params *csp,
1351c0341432SJohn Baldwin     char *iv)
1352844d9543SConrad Meyer {
1353844d9543SConrad Meyer 
135429fe41ddSJohn Baldwin 	crypto_read_iv(crp, iv);
1355844d9543SConrad Meyer 
1356844d9543SConrad Meyer 	/*
1357cb128893SJohn Baldwin 	 * Append an explicit counter of 1 for GCM.
1358844d9543SConrad Meyer 	 */
1359cb128893SJohn Baldwin 	if (csp->csp_cipher_alg == CRYPTO_AES_NIST_GCM_16)
1360c0341432SJohn Baldwin 		*(uint32_t *)&iv[12] = htobe32(1);
1361844d9543SConrad Meyer 
1362c0341432SJohn Baldwin 	if (csp->csp_cipher_alg == CRYPTO_AES_XTS &&
1363c0341432SJohn Baldwin 	    csp->csp_ivlen < AES_BLOCK_LEN)
1364c0341432SJohn Baldwin 		memset(&iv[csp->csp_ivlen], 0, AES_BLOCK_LEN - csp->csp_ivlen);
1365844d9543SConrad Meyer 
1366844d9543SConrad Meyer 	/* Reverse order of IV material for HW */
1367c0341432SJohn Baldwin 	INSECURE_DEBUG(NULL, "%s: IV: %16D len: %u\n", __func__, iv, " ",
1368c0341432SJohn Baldwin 	    csp->csp_ivlen);
1369844d9543SConrad Meyer 
1370844d9543SConrad Meyer 	/*
1371844d9543SConrad Meyer 	 * For unknown reasons, XTS mode expects the IV in the reverse byte
1372844d9543SConrad Meyer 	 * order to every other AES mode.
1373844d9543SConrad Meyer 	 */
1374c0341432SJohn Baldwin 	if (csp->csp_cipher_alg != CRYPTO_AES_XTS)
1375c0341432SJohn Baldwin 		ccp_byteswap(iv, AES_BLOCK_LEN);
1376844d9543SConrad Meyer }
1377844d9543SConrad Meyer 
1378844d9543SConrad Meyer static int __must_check
ccp_do_pst_to_lsb(struct ccp_queue * qp,uint32_t lsbaddr,const void * src,size_t len)1379844d9543SConrad Meyer ccp_do_pst_to_lsb(struct ccp_queue *qp, uint32_t lsbaddr, const void *src,
1380844d9543SConrad Meyer     size_t len)
1381844d9543SConrad Meyer {
1382844d9543SConrad Meyer 	int error;
1383844d9543SConrad Meyer 
1384844d9543SConrad Meyer 	sglist_reset(qp->cq_sg_ulptx);
1385844d9543SConrad Meyer 	error = sglist_append(qp->cq_sg_ulptx, __DECONST(void *, src), len);
1386844d9543SConrad Meyer 	if (error != 0)
1387844d9543SConrad Meyer 		return (error);
1388844d9543SConrad Meyer 
1389844d9543SConrad Meyer 	error = ccp_passthrough_sgl(qp, lsbaddr, true, qp->cq_sg_ulptx, len,
1390844d9543SConrad Meyer 	    false, NULL);
1391844d9543SConrad Meyer 	return (error);
1392844d9543SConrad Meyer }
1393844d9543SConrad Meyer 
1394844d9543SConrad Meyer static int __must_check
ccp_do_xts(struct ccp_queue * qp,struct ccp_session * s,struct cryptop * crp,enum ccp_cipher_dir dir,const struct ccp_completion_ctx * cctx)1395844d9543SConrad Meyer ccp_do_xts(struct ccp_queue *qp, struct ccp_session *s, struct cryptop *crp,
1396c0341432SJohn Baldwin     enum ccp_cipher_dir dir, const struct ccp_completion_ctx *cctx)
1397844d9543SConrad Meyer {
1398844d9543SConrad Meyer 	struct ccp_desc *desc;
1399844d9543SConrad Meyer 	device_t dev;
1400844d9543SConrad Meyer 	unsigned i;
1401844d9543SConrad Meyer 	enum ccp_xts_unitsize usize;
1402844d9543SConrad Meyer 
1403844d9543SConrad Meyer 	/* IV and Key data are already loaded */
1404844d9543SConrad Meyer 
1405844d9543SConrad Meyer 	dev = qp->cq_softc->dev;
1406844d9543SConrad Meyer 
1407844d9543SConrad Meyer 	for (i = 0; i < nitems(ccp_xts_unitsize_map); i++)
1408c0341432SJohn Baldwin 		if (ccp_xts_unitsize_map[i].cxu_size ==
1409c0341432SJohn Baldwin 		    crp->crp_payload_length) {
1410844d9543SConrad Meyer 			usize = ccp_xts_unitsize_map[i].cxu_id;
1411844d9543SConrad Meyer 			break;
1412844d9543SConrad Meyer 		}
1413844d9543SConrad Meyer 	if (i >= nitems(ccp_xts_unitsize_map))
1414844d9543SConrad Meyer 		return (EINVAL);
1415844d9543SConrad Meyer 
1416844d9543SConrad Meyer 	for (i = 0; i < qp->cq_sg_ulptx->sg_nseg; i++) {
1417844d9543SConrad Meyer 		struct sglist_seg *seg;
1418844d9543SConrad Meyer 
1419844d9543SConrad Meyer 		seg = &qp->cq_sg_ulptx->sg_segs[i];
1420844d9543SConrad Meyer 
1421844d9543SConrad Meyer 		desc = &qp->desc_ring[qp->cq_tail];
1422844d9543SConrad Meyer 		desc->engine = CCP_ENGINE_XTS_AES;
1423844d9543SConrad Meyer 		desc->som = (i == 0);
1424844d9543SConrad Meyer 		desc->eom = (i == qp->cq_sg_ulptx->sg_nseg - 1);
1425844d9543SConrad Meyer 		desc->ioc = (desc->eom && cctx != NULL);
1426844d9543SConrad Meyer 		DPRINTF(dev, "%s: XTS %u: som:%d eom:%d ioc:%d dir:%d\n",
1427844d9543SConrad Meyer 		    __func__, qp->cq_tail, (int)desc->som, (int)desc->eom,
1428844d9543SConrad Meyer 		    (int)desc->ioc, (int)dir);
1429844d9543SConrad Meyer 
1430844d9543SConrad Meyer 		if (desc->ioc)
1431844d9543SConrad Meyer 			memcpy(&qp->completions_ring[qp->cq_tail], cctx,
1432844d9543SConrad Meyer 			    sizeof(*cctx));
1433844d9543SConrad Meyer 
1434844d9543SConrad Meyer 		desc->aes_xts.encrypt = dir;
1435844d9543SConrad Meyer 		desc->aes_xts.type = s->blkcipher.cipher_type;
1436844d9543SConrad Meyer 		desc->aes_xts.size = usize;
1437844d9543SConrad Meyer 
1438844d9543SConrad Meyer 		DPRINTF(dev, "XXX %s: XTS %u: type:%u size:%u\n", __func__,
1439844d9543SConrad Meyer 		    qp->cq_tail, (unsigned)desc->aes_xts.type,
1440844d9543SConrad Meyer 		    (unsigned)desc->aes_xts.size);
1441844d9543SConrad Meyer 
1442844d9543SConrad Meyer 		desc->length = seg->ss_len;
1443844d9543SConrad Meyer 		desc->src_lo = (uint32_t)seg->ss_paddr;
1444844d9543SConrad Meyer 		desc->src_hi = (seg->ss_paddr >> 32);
1445844d9543SConrad Meyer 		desc->src_mem = CCP_MEMTYPE_SYSTEM;
1446844d9543SConrad Meyer 
1447844d9543SConrad Meyer 		/* Crypt in-place */
1448844d9543SConrad Meyer 		desc->dst_lo = desc->src_lo;
1449844d9543SConrad Meyer 		desc->dst_hi = desc->src_hi;
1450844d9543SConrad Meyer 		desc->dst_mem = desc->src_mem;
1451844d9543SConrad Meyer 
1452844d9543SConrad Meyer 		desc->key_lo = ccp_queue_lsb_address(qp, LSB_ENTRY_KEY);
1453844d9543SConrad Meyer 		desc->key_hi = 0;
1454844d9543SConrad Meyer 		desc->key_mem = CCP_MEMTYPE_SB;
1455844d9543SConrad Meyer 
1456844d9543SConrad Meyer 		desc->lsb_ctx_id = ccp_queue_lsb_entry(qp, LSB_ENTRY_IV);
1457844d9543SConrad Meyer 
1458844d9543SConrad Meyer 		qp->cq_tail = (qp->cq_tail + 1) %
1459844d9543SConrad Meyer 		    (1 << qp->cq_softc->ring_size_order);
1460844d9543SConrad Meyer 	}
1461844d9543SConrad Meyer 	return (0);
1462844d9543SConrad Meyer }
1463844d9543SConrad Meyer 
1464844d9543SConrad Meyer static int __must_check
ccp_do_blkcipher(struct ccp_queue * qp,struct ccp_session * s,struct cryptop * crp,const struct ccp_completion_ctx * cctx)1465844d9543SConrad Meyer ccp_do_blkcipher(struct ccp_queue *qp, struct ccp_session *s,
1466c0341432SJohn Baldwin     struct cryptop *crp, const struct ccp_completion_ctx *cctx)
1467844d9543SConrad Meyer {
1468c0341432SJohn Baldwin 	const struct crypto_session_params *csp;
1469844d9543SConrad Meyer 	struct ccp_desc *desc;
1470844d9543SConrad Meyer 	char *keydata;
1471844d9543SConrad Meyer 	device_t dev;
1472844d9543SConrad Meyer 	enum ccp_cipher_dir dir;
1473c0341432SJohn Baldwin 	int error, iv_len;
1474844d9543SConrad Meyer 	size_t keydata_len;
1475844d9543SConrad Meyer 	unsigned i, j;
1476844d9543SConrad Meyer 
1477844d9543SConrad Meyer 	dev = qp->cq_softc->dev;
1478844d9543SConrad Meyer 
1479c0341432SJohn Baldwin 	if (s->blkcipher.key_len == 0 || crp->crp_payload_length == 0) {
1480844d9543SConrad Meyer 		DPRINTF(dev, "%s: empty\n", __func__);
1481844d9543SConrad Meyer 		return (EINVAL);
1482844d9543SConrad Meyer 	}
1483c0341432SJohn Baldwin 	if ((crp->crp_payload_length % AES_BLOCK_LEN) != 0) {
1484c0341432SJohn Baldwin 		DPRINTF(dev, "%s: len modulo: %d\n", __func__,
1485c0341432SJohn Baldwin 		    crp->crp_payload_length);
1486844d9543SConrad Meyer 		return (EINVAL);
1487844d9543SConrad Meyer 	}
1488844d9543SConrad Meyer 
1489844d9543SConrad Meyer 	/*
1490844d9543SConrad Meyer 	 * Individual segments must be multiples of AES block size for the HW
1491844d9543SConrad Meyer 	 * to process it.  Non-compliant inputs aren't bogus, just not doable
1492844d9543SConrad Meyer 	 * on this hardware.
1493844d9543SConrad Meyer 	 */
1494844d9543SConrad Meyer 	for (i = 0; i < qp->cq_sg_crp->sg_nseg; i++)
1495844d9543SConrad Meyer 		if ((qp->cq_sg_crp->sg_segs[i].ss_len % AES_BLOCK_LEN) != 0) {
1496844d9543SConrad Meyer 			DPRINTF(dev, "%s: seg modulo: %zu\n", __func__,
1497844d9543SConrad Meyer 			    qp->cq_sg_crp->sg_segs[i].ss_len);
1498844d9543SConrad Meyer 			return (EINVAL);
1499844d9543SConrad Meyer 		}
1500844d9543SConrad Meyer 
1501844d9543SConrad Meyer 	/* Gather IV/nonce data */
1502c0341432SJohn Baldwin 	csp = crypto_get_params(crp->crp_session);
1503c0341432SJohn Baldwin 	ccp_collect_iv(crp, csp, s->blkcipher.iv);
1504c0341432SJohn Baldwin 	iv_len = csp->csp_ivlen;
1505c0341432SJohn Baldwin 	if (csp->csp_cipher_alg == CRYPTO_AES_XTS)
1506c0341432SJohn Baldwin 		iv_len = AES_BLOCK_LEN;
1507844d9543SConrad Meyer 
1508c0341432SJohn Baldwin 	if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op))
1509844d9543SConrad Meyer 		dir = CCP_CIPHER_DIR_ENCRYPT;
1510844d9543SConrad Meyer 	else
1511844d9543SConrad Meyer 		dir = CCP_CIPHER_DIR_DECRYPT;
1512844d9543SConrad Meyer 
1513844d9543SConrad Meyer 	/* Set up passthrough op(s) to copy IV into LSB */
1514844d9543SConrad Meyer 	error = ccp_do_pst_to_lsb(qp, ccp_queue_lsb_address(qp, LSB_ENTRY_IV),
1515c0341432SJohn Baldwin 	    s->blkcipher.iv, iv_len);
1516844d9543SConrad Meyer 	if (error != 0)
1517844d9543SConrad Meyer 		return (error);
1518844d9543SConrad Meyer 
1519844d9543SConrad Meyer 	/*
1520844d9543SConrad Meyer 	 * Initialize keydata and keydata_len for GCC.  The default case of the
1521844d9543SConrad Meyer 	 * following switch is impossible to reach, but GCC doesn't know that.
1522844d9543SConrad Meyer 	 */
1523844d9543SConrad Meyer 	keydata_len = 0;
1524844d9543SConrad Meyer 	keydata = NULL;
1525844d9543SConrad Meyer 
1526c0341432SJohn Baldwin 	switch (csp->csp_cipher_alg) {
1527844d9543SConrad Meyer 	case CRYPTO_AES_XTS:
1528844d9543SConrad Meyer 		for (j = 0; j < nitems(ccp_xts_unitsize_map); j++)
1529c0341432SJohn Baldwin 			if (ccp_xts_unitsize_map[j].cxu_size ==
1530c0341432SJohn Baldwin 			    crp->crp_payload_length)
1531844d9543SConrad Meyer 				break;
1532844d9543SConrad Meyer 		/* Input buffer must be a supported UnitSize */
1533844d9543SConrad Meyer 		if (j >= nitems(ccp_xts_unitsize_map)) {
1534844d9543SConrad Meyer 			device_printf(dev, "%s: rejected block size: %u\n",
1535c0341432SJohn Baldwin 			    __func__, crp->crp_payload_length);
1536844d9543SConrad Meyer 			return (EOPNOTSUPP);
1537844d9543SConrad Meyer 		}
1538844d9543SConrad Meyer 		/* FALLTHROUGH */
1539844d9543SConrad Meyer 	case CRYPTO_AES_CBC:
1540844d9543SConrad Meyer 	case CRYPTO_AES_ICM:
1541844d9543SConrad Meyer 		keydata = s->blkcipher.enckey;
1542844d9543SConrad Meyer 		keydata_len = s->blkcipher.key_len;
1543844d9543SConrad Meyer 		break;
1544844d9543SConrad Meyer 	}
1545844d9543SConrad Meyer 
1546844d9543SConrad Meyer 	INSECURE_DEBUG(dev, "%s: KEY(%zu): %16D\n", __func__, keydata_len,
1547844d9543SConrad Meyer 	    keydata, " ");
1548c0341432SJohn Baldwin 	if (csp->csp_cipher_alg == CRYPTO_AES_XTS)
1549844d9543SConrad Meyer 		INSECURE_DEBUG(dev, "%s: KEY(XTS): %64D\n", __func__, keydata, " ");
1550844d9543SConrad Meyer 
1551844d9543SConrad Meyer 	/* Reverse order of key material for HW */
1552844d9543SConrad Meyer 	ccp_byteswap(keydata, keydata_len);
1553844d9543SConrad Meyer 
1554844d9543SConrad Meyer 	/* Store key material into LSB to avoid page boundaries */
1555c0341432SJohn Baldwin 	if (csp->csp_cipher_alg == CRYPTO_AES_XTS) {
1556844d9543SConrad Meyer 		/*
1557844d9543SConrad Meyer 		 * XTS mode uses 2 256-bit vectors for the primary key and the
1558844d9543SConrad Meyer 		 * tweak key.  For 128-bit keys, the vectors are zero-padded.
1559844d9543SConrad Meyer 		 *
1560844d9543SConrad Meyer 		 * After byteswapping the combined OCF-provided K1:K2 vector
1561844d9543SConrad Meyer 		 * above, we need to reverse the order again so the hardware
1562844d9543SConrad Meyer 		 * gets the swapped keys in the order K1':K2'.
1563844d9543SConrad Meyer 		 */
1564844d9543SConrad Meyer 		error = ccp_do_pst_to_lsb(qp,
1565844d9543SConrad Meyer 		    ccp_queue_lsb_address(qp, LSB_ENTRY_KEY + 1), keydata,
1566844d9543SConrad Meyer 		    keydata_len / 2);
1567844d9543SConrad Meyer 		if (error != 0)
1568844d9543SConrad Meyer 			return (error);
1569844d9543SConrad Meyer 		error = ccp_do_pst_to_lsb(qp,
1570844d9543SConrad Meyer 		    ccp_queue_lsb_address(qp, LSB_ENTRY_KEY),
1571844d9543SConrad Meyer 		    keydata + (keydata_len / 2), keydata_len / 2);
1572844d9543SConrad Meyer 
1573844d9543SConrad Meyer 		/* Zero-pad 128 bit keys */
1574844d9543SConrad Meyer 		if (keydata_len == 32) {
1575844d9543SConrad Meyer 			if (error != 0)
1576844d9543SConrad Meyer 				return (error);
1577844d9543SConrad Meyer 			error = ccp_do_pst_to_lsb(qp,
1578844d9543SConrad Meyer 			    ccp_queue_lsb_address(qp, LSB_ENTRY_KEY) +
1579844d9543SConrad Meyer 			    keydata_len / 2, g_zeroes, keydata_len / 2);
1580844d9543SConrad Meyer 			if (error != 0)
1581844d9543SConrad Meyer 				return (error);
1582844d9543SConrad Meyer 			error = ccp_do_pst_to_lsb(qp,
1583844d9543SConrad Meyer 			    ccp_queue_lsb_address(qp, LSB_ENTRY_KEY + 1) +
1584844d9543SConrad Meyer 			    keydata_len / 2, g_zeroes, keydata_len / 2);
1585844d9543SConrad Meyer 		}
1586844d9543SConrad Meyer 	} else
1587844d9543SConrad Meyer 		error = ccp_do_pst_to_lsb(qp,
1588844d9543SConrad Meyer 		    ccp_queue_lsb_address(qp, LSB_ENTRY_KEY), keydata,
1589844d9543SConrad Meyer 		    keydata_len);
1590844d9543SConrad Meyer 	if (error != 0)
1591844d9543SConrad Meyer 		return (error);
1592844d9543SConrad Meyer 
1593844d9543SConrad Meyer 	/*
1594844d9543SConrad Meyer 	 * Point SGLs at the subset of cryptop buffer contents representing the
1595844d9543SConrad Meyer 	 * data.
1596844d9543SConrad Meyer 	 */
1597844d9543SConrad Meyer 	sglist_reset(qp->cq_sg_ulptx);
1598844d9543SConrad Meyer 	error = sglist_append_sglist(qp->cq_sg_ulptx, qp->cq_sg_crp,
1599c0341432SJohn Baldwin 	    crp->crp_payload_start, crp->crp_payload_length);
1600844d9543SConrad Meyer 	if (error != 0)
1601844d9543SConrad Meyer 		return (error);
1602844d9543SConrad Meyer 
1603844d9543SConrad Meyer 	INSECURE_DEBUG(dev, "%s: Contents: %16D\n", __func__,
1604844d9543SConrad Meyer 	    (void *)PHYS_TO_DMAP(qp->cq_sg_ulptx->sg_segs[0].ss_paddr), " ");
1605844d9543SConrad Meyer 
1606844d9543SConrad Meyer 	DPRINTF(dev, "%s: starting AES ops @ %u\n", __func__, qp->cq_tail);
1607844d9543SConrad Meyer 
1608844d9543SConrad Meyer 	if (ccp_queue_get_ring_space(qp) < qp->cq_sg_ulptx->sg_nseg)
1609844d9543SConrad Meyer 		return (EAGAIN);
1610844d9543SConrad Meyer 
1611c0341432SJohn Baldwin 	if (csp->csp_cipher_alg == CRYPTO_AES_XTS)
1612c0341432SJohn Baldwin 		return (ccp_do_xts(qp, s, crp, dir, cctx));
1613844d9543SConrad Meyer 
1614844d9543SConrad Meyer 	for (i = 0; i < qp->cq_sg_ulptx->sg_nseg; i++) {
1615844d9543SConrad Meyer 		struct sglist_seg *seg;
1616844d9543SConrad Meyer 
1617844d9543SConrad Meyer 		seg = &qp->cq_sg_ulptx->sg_segs[i];
1618844d9543SConrad Meyer 
1619844d9543SConrad Meyer 		desc = &qp->desc_ring[qp->cq_tail];
1620844d9543SConrad Meyer 		desc->engine = CCP_ENGINE_AES;
1621844d9543SConrad Meyer 		desc->som = (i == 0);
1622844d9543SConrad Meyer 		desc->eom = (i == qp->cq_sg_ulptx->sg_nseg - 1);
1623844d9543SConrad Meyer 		desc->ioc = (desc->eom && cctx != NULL);
1624844d9543SConrad Meyer 		DPRINTF(dev, "%s: AES %u: som:%d eom:%d ioc:%d dir:%d\n",
1625844d9543SConrad Meyer 		    __func__, qp->cq_tail, (int)desc->som, (int)desc->eom,
1626844d9543SConrad Meyer 		    (int)desc->ioc, (int)dir);
1627844d9543SConrad Meyer 
1628844d9543SConrad Meyer 		if (desc->ioc)
1629844d9543SConrad Meyer 			memcpy(&qp->completions_ring[qp->cq_tail], cctx,
1630844d9543SConrad Meyer 			    sizeof(*cctx));
1631844d9543SConrad Meyer 
1632844d9543SConrad Meyer 		desc->aes.encrypt = dir;
1633844d9543SConrad Meyer 		desc->aes.mode = s->blkcipher.cipher_mode;
1634844d9543SConrad Meyer 		desc->aes.type = s->blkcipher.cipher_type;
1635c0341432SJohn Baldwin 		if (csp->csp_cipher_alg == CRYPTO_AES_ICM)
1636844d9543SConrad Meyer 			/*
1637844d9543SConrad Meyer 			 * Size of CTR value in bits, - 1.  ICM mode uses all
1638844d9543SConrad Meyer 			 * 128 bits as counter.
1639844d9543SConrad Meyer 			 */
1640844d9543SConrad Meyer 			desc->aes.size = 127;
1641844d9543SConrad Meyer 
1642844d9543SConrad Meyer 		DPRINTF(dev, "%s: AES %u: mode:%u type:%u size:%u\n", __func__,
1643844d9543SConrad Meyer 		    qp->cq_tail, (unsigned)desc->aes.mode,
1644844d9543SConrad Meyer 		    (unsigned)desc->aes.type, (unsigned)desc->aes.size);
1645844d9543SConrad Meyer 
1646844d9543SConrad Meyer 		desc->length = seg->ss_len;
1647844d9543SConrad Meyer 		desc->src_lo = (uint32_t)seg->ss_paddr;
1648844d9543SConrad Meyer 		desc->src_hi = (seg->ss_paddr >> 32);
1649844d9543SConrad Meyer 		desc->src_mem = CCP_MEMTYPE_SYSTEM;
1650844d9543SConrad Meyer 
1651844d9543SConrad Meyer 		/* Crypt in-place */
1652844d9543SConrad Meyer 		desc->dst_lo = desc->src_lo;
1653844d9543SConrad Meyer 		desc->dst_hi = desc->src_hi;
1654844d9543SConrad Meyer 		desc->dst_mem = desc->src_mem;
1655844d9543SConrad Meyer 
1656844d9543SConrad Meyer 		desc->key_lo = ccp_queue_lsb_address(qp, LSB_ENTRY_KEY);
1657844d9543SConrad Meyer 		desc->key_hi = 0;
1658844d9543SConrad Meyer 		desc->key_mem = CCP_MEMTYPE_SB;
1659844d9543SConrad Meyer 
1660844d9543SConrad Meyer 		desc->lsb_ctx_id = ccp_queue_lsb_entry(qp, LSB_ENTRY_IV);
1661844d9543SConrad Meyer 
1662844d9543SConrad Meyer 		qp->cq_tail = (qp->cq_tail + 1) %
1663844d9543SConrad Meyer 		    (1 << qp->cq_softc->ring_size_order);
1664844d9543SConrad Meyer 	}
1665844d9543SConrad Meyer 	return (0);
1666844d9543SConrad Meyer }
1667844d9543SConrad Meyer 
1668844d9543SConrad Meyer int __must_check
ccp_blkcipher(struct ccp_queue * qp,struct ccp_session * s,struct cryptop * crp)1669844d9543SConrad Meyer ccp_blkcipher(struct ccp_queue *qp, struct ccp_session *s, struct cryptop *crp)
1670844d9543SConrad Meyer {
1671844d9543SConrad Meyer 	struct ccp_completion_ctx ctx;
1672844d9543SConrad Meyer 
1673844d9543SConrad Meyer 	ctx.callback_fn = ccp_blkcipher_done;
1674844d9543SConrad Meyer 	ctx.session = s;
1675844d9543SConrad Meyer 	ctx.callback_arg = crp;
1676844d9543SConrad Meyer 
1677c0341432SJohn Baldwin 	return (ccp_do_blkcipher(qp, s, crp, &ctx));
1678844d9543SConrad Meyer }
1679844d9543SConrad Meyer 
1680844d9543SConrad Meyer static void
ccp_authenc_done(struct ccp_queue * qp,struct ccp_session * s,void * vcrp,int error)1681844d9543SConrad Meyer ccp_authenc_done(struct ccp_queue *qp, struct ccp_session *s, void *vcrp,
1682844d9543SConrad Meyer     int error)
1683844d9543SConrad Meyer {
1684844d9543SConrad Meyer 	struct cryptop *crp;
1685844d9543SConrad Meyer 
1686c0341432SJohn Baldwin 	explicit_bzero(&s->blkcipher.iv, sizeof(s->blkcipher.iv));
1687844d9543SConrad Meyer 
1688844d9543SConrad Meyer 	crp = vcrp;
1689844d9543SConrad Meyer 
1690c0341432SJohn Baldwin 	ccp_do_hmac_done(qp, s, crp, error);
1691844d9543SConrad Meyer }
1692844d9543SConrad Meyer 
1693844d9543SConrad Meyer int __must_check
ccp_authenc(struct ccp_queue * qp,struct ccp_session * s,struct cryptop * crp)1694c0341432SJohn Baldwin ccp_authenc(struct ccp_queue *qp, struct ccp_session *s, struct cryptop *crp)
1695844d9543SConrad Meyer {
1696844d9543SConrad Meyer 	struct ccp_completion_ctx ctx;
1697844d9543SConrad Meyer 	int error;
1698844d9543SConrad Meyer 
1699844d9543SConrad Meyer 	ctx.callback_fn = ccp_authenc_done;
1700844d9543SConrad Meyer 	ctx.session = s;
1701844d9543SConrad Meyer 	ctx.callback_arg = crp;
1702844d9543SConrad Meyer 
1703844d9543SConrad Meyer 	/* Perform first operation */
1704c0341432SJohn Baldwin 	if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op))
1705c0341432SJohn Baldwin 		error = ccp_do_blkcipher(qp, s, crp, NULL);
1706844d9543SConrad Meyer 	else
1707c0341432SJohn Baldwin 		error = ccp_do_hmac(qp, s, crp, NULL);
1708844d9543SConrad Meyer 	if (error != 0)
1709844d9543SConrad Meyer 		return (error);
1710844d9543SConrad Meyer 
1711844d9543SConrad Meyer 	/* Perform second operation */
1712c0341432SJohn Baldwin 	if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op))
1713c0341432SJohn Baldwin 		error = ccp_do_hmac(qp, s, crp, &ctx);
1714844d9543SConrad Meyer 	else
1715c0341432SJohn Baldwin 		error = ccp_do_blkcipher(qp, s, crp, &ctx);
1716844d9543SConrad Meyer 	return (error);
1717844d9543SConrad Meyer }
1718844d9543SConrad Meyer 
1719844d9543SConrad Meyer static int __must_check
ccp_do_ghash_aad(struct ccp_queue * qp,struct ccp_session * s)1720844d9543SConrad Meyer ccp_do_ghash_aad(struct ccp_queue *qp, struct ccp_session *s)
1721844d9543SConrad Meyer {
1722844d9543SConrad Meyer 	struct ccp_desc *desc;
1723844d9543SConrad Meyer 	struct sglist_seg *seg;
1724844d9543SConrad Meyer 	unsigned i;
1725844d9543SConrad Meyer 
1726844d9543SConrad Meyer 	if (ccp_queue_get_ring_space(qp) < qp->cq_sg_ulptx->sg_nseg)
1727844d9543SConrad Meyer 		return (EAGAIN);
1728844d9543SConrad Meyer 
1729844d9543SConrad Meyer 	for (i = 0; i < qp->cq_sg_ulptx->sg_nseg; i++) {
1730844d9543SConrad Meyer 		seg = &qp->cq_sg_ulptx->sg_segs[i];
1731844d9543SConrad Meyer 
1732844d9543SConrad Meyer 		desc = &qp->desc_ring[qp->cq_tail];
1733844d9543SConrad Meyer 
1734844d9543SConrad Meyer 		desc->engine = CCP_ENGINE_AES;
1735844d9543SConrad Meyer 		desc->aes.mode = CCP_AES_MODE_GHASH;
1736844d9543SConrad Meyer 		desc->aes.type = s->blkcipher.cipher_type;
1737844d9543SConrad Meyer 		desc->aes.encrypt = CCP_AES_MODE_GHASH_AAD;
1738844d9543SConrad Meyer 
1739844d9543SConrad Meyer 		desc->som = (i == 0);
1740844d9543SConrad Meyer 		desc->length = seg->ss_len;
1741844d9543SConrad Meyer 
1742844d9543SConrad Meyer 		desc->src_lo = (uint32_t)seg->ss_paddr;
1743844d9543SConrad Meyer 		desc->src_hi = (seg->ss_paddr >> 32);
1744844d9543SConrad Meyer 		desc->src_mem = CCP_MEMTYPE_SYSTEM;
1745844d9543SConrad Meyer 
1746844d9543SConrad Meyer 		desc->lsb_ctx_id = ccp_queue_lsb_entry(qp, LSB_ENTRY_IV);
1747844d9543SConrad Meyer 
1748844d9543SConrad Meyer 		desc->key_lo = ccp_queue_lsb_address(qp, LSB_ENTRY_KEY);
1749844d9543SConrad Meyer 		desc->key_mem = CCP_MEMTYPE_SB;
1750844d9543SConrad Meyer 
1751844d9543SConrad Meyer 		qp->cq_tail = (qp->cq_tail + 1) %
1752844d9543SConrad Meyer 		    (1 << qp->cq_softc->ring_size_order);
1753844d9543SConrad Meyer 	}
1754844d9543SConrad Meyer 	return (0);
1755844d9543SConrad Meyer }
1756844d9543SConrad Meyer 
1757844d9543SConrad Meyer static int __must_check
ccp_do_gctr(struct ccp_queue * qp,struct ccp_session * s,enum ccp_cipher_dir dir,struct sglist_seg * seg,bool som,bool eom)1758844d9543SConrad Meyer ccp_do_gctr(struct ccp_queue *qp, struct ccp_session *s,
1759844d9543SConrad Meyer     enum ccp_cipher_dir dir, struct sglist_seg *seg, bool som, bool eom)
1760844d9543SConrad Meyer {
1761844d9543SConrad Meyer 	struct ccp_desc *desc;
1762844d9543SConrad Meyer 
1763844d9543SConrad Meyer 	if (ccp_queue_get_ring_space(qp) == 0)
1764844d9543SConrad Meyer 		return (EAGAIN);
1765844d9543SConrad Meyer 
1766844d9543SConrad Meyer 	desc = &qp->desc_ring[qp->cq_tail];
1767844d9543SConrad Meyer 
1768844d9543SConrad Meyer 	desc->engine = CCP_ENGINE_AES;
1769844d9543SConrad Meyer 	desc->aes.mode = CCP_AES_MODE_GCTR;
1770844d9543SConrad Meyer 	desc->aes.type = s->blkcipher.cipher_type;
1771844d9543SConrad Meyer 	desc->aes.encrypt = dir;
1772844d9543SConrad Meyer 	desc->aes.size = 8 * (seg->ss_len % GMAC_BLOCK_LEN) - 1;
1773844d9543SConrad Meyer 
1774844d9543SConrad Meyer 	desc->som = som;
1775844d9543SConrad Meyer 	desc->eom = eom;
1776844d9543SConrad Meyer 
1777844d9543SConrad Meyer 	/* Trailing bytes will be masked off by aes.size above. */
1778844d9543SConrad Meyer 	desc->length = roundup2(seg->ss_len, GMAC_BLOCK_LEN);
1779844d9543SConrad Meyer 
1780844d9543SConrad Meyer 	desc->dst_lo = desc->src_lo = (uint32_t)seg->ss_paddr;
1781844d9543SConrad Meyer 	desc->dst_hi = desc->src_hi = seg->ss_paddr >> 32;
1782844d9543SConrad Meyer 	desc->dst_mem = desc->src_mem = CCP_MEMTYPE_SYSTEM;
1783844d9543SConrad Meyer 
1784844d9543SConrad Meyer 	desc->lsb_ctx_id = ccp_queue_lsb_entry(qp, LSB_ENTRY_IV);
1785844d9543SConrad Meyer 
1786844d9543SConrad Meyer 	desc->key_lo = ccp_queue_lsb_address(qp, LSB_ENTRY_KEY);
1787844d9543SConrad Meyer 	desc->key_mem = CCP_MEMTYPE_SB;
1788844d9543SConrad Meyer 
1789844d9543SConrad Meyer 	qp->cq_tail = (qp->cq_tail + 1) %
1790844d9543SConrad Meyer 	    (1 << qp->cq_softc->ring_size_order);
1791844d9543SConrad Meyer 	return (0);
1792844d9543SConrad Meyer }
1793844d9543SConrad Meyer 
1794844d9543SConrad Meyer static int __must_check
ccp_do_ghash_final(struct ccp_queue * qp,struct ccp_session * s)1795844d9543SConrad Meyer ccp_do_ghash_final(struct ccp_queue *qp, struct ccp_session *s)
1796844d9543SConrad Meyer {
1797844d9543SConrad Meyer 	struct ccp_desc *desc;
1798844d9543SConrad Meyer 
1799844d9543SConrad Meyer 	if (ccp_queue_get_ring_space(qp) == 0)
1800844d9543SConrad Meyer 		return (EAGAIN);
1801844d9543SConrad Meyer 
1802844d9543SConrad Meyer 	desc = &qp->desc_ring[qp->cq_tail];
1803844d9543SConrad Meyer 
1804844d9543SConrad Meyer 	desc->engine = CCP_ENGINE_AES;
1805844d9543SConrad Meyer 	desc->aes.mode = CCP_AES_MODE_GHASH;
1806844d9543SConrad Meyer 	desc->aes.type = s->blkcipher.cipher_type;
1807844d9543SConrad Meyer 	desc->aes.encrypt = CCP_AES_MODE_GHASH_FINAL;
1808844d9543SConrad Meyer 
1809844d9543SConrad Meyer 	desc->length = GMAC_BLOCK_LEN;
1810844d9543SConrad Meyer 
1811844d9543SConrad Meyer 	desc->src_lo = ccp_queue_lsb_address(qp, LSB_ENTRY_GHASH_IN);
1812844d9543SConrad Meyer 	desc->src_mem = CCP_MEMTYPE_SB;
1813844d9543SConrad Meyer 
1814844d9543SConrad Meyer 	desc->lsb_ctx_id = ccp_queue_lsb_entry(qp, LSB_ENTRY_IV);
1815844d9543SConrad Meyer 
1816844d9543SConrad Meyer 	desc->key_lo = ccp_queue_lsb_address(qp, LSB_ENTRY_KEY);
1817844d9543SConrad Meyer 	desc->key_mem = CCP_MEMTYPE_SB;
1818844d9543SConrad Meyer 
1819844d9543SConrad Meyer 	desc->dst_lo = ccp_queue_lsb_address(qp, LSB_ENTRY_GHASH);
1820844d9543SConrad Meyer 	desc->dst_mem = CCP_MEMTYPE_SB;
1821844d9543SConrad Meyer 
1822844d9543SConrad Meyer 	qp->cq_tail = (qp->cq_tail + 1) %
1823844d9543SConrad Meyer 	    (1 << qp->cq_softc->ring_size_order);
1824844d9543SConrad Meyer 	return (0);
1825844d9543SConrad Meyer }
1826844d9543SConrad Meyer 
1827844d9543SConrad Meyer static void
ccp_gcm_done(struct ccp_queue * qp,struct ccp_session * s,void * vcrp,int error)1828844d9543SConrad Meyer ccp_gcm_done(struct ccp_queue *qp, struct ccp_session *s, void *vcrp,
1829844d9543SConrad Meyer     int error)
1830844d9543SConrad Meyer {
1831844d9543SConrad Meyer 	char tag[GMAC_DIGEST_LEN];
1832844d9543SConrad Meyer 	struct cryptop *crp;
1833844d9543SConrad Meyer 
1834844d9543SConrad Meyer 	crp = vcrp;
1835844d9543SConrad Meyer 
1836844d9543SConrad Meyer 	s->pending--;
1837844d9543SConrad Meyer 
1838844d9543SConrad Meyer 	if (error != 0) {
1839844d9543SConrad Meyer 		crp->crp_etype = error;
1840844d9543SConrad Meyer 		goto out;
1841844d9543SConrad Meyer 	}
1842844d9543SConrad Meyer 
1843844d9543SConrad Meyer 	/* Encrypt is done.  Decrypt needs to verify tag. */
1844c0341432SJohn Baldwin 	if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op))
1845844d9543SConrad Meyer 		goto out;
1846844d9543SConrad Meyer 
1847844d9543SConrad Meyer 	/* Copy in message tag. */
1848c0341432SJohn Baldwin 	crypto_copydata(crp, crp->crp_digest_start, s->gmac.hash_len, tag);
1849844d9543SConrad Meyer 
1850844d9543SConrad Meyer 	/* Verify tag against computed GMAC */
1851844d9543SConrad Meyer 	if (timingsafe_bcmp(tag, s->gmac.final_block, s->gmac.hash_len) != 0)
1852844d9543SConrad Meyer 		crp->crp_etype = EBADMSG;
1853844d9543SConrad Meyer 
1854844d9543SConrad Meyer out:
1855c0341432SJohn Baldwin 	explicit_bzero(&s->blkcipher.iv, sizeof(s->blkcipher.iv));
1856c0341432SJohn Baldwin 	explicit_bzero(&s->gmac.final_block, sizeof(s->gmac.final_block));
1857844d9543SConrad Meyer 	crypto_done(crp);
1858844d9543SConrad Meyer }
1859844d9543SConrad Meyer 
1860844d9543SConrad Meyer int __must_check
ccp_gcm(struct ccp_queue * qp,struct ccp_session * s,struct cryptop * crp)1861c0341432SJohn Baldwin ccp_gcm(struct ccp_queue *qp, struct ccp_session *s, struct cryptop *crp)
1862844d9543SConrad Meyer {
1863c0341432SJohn Baldwin 	const struct crypto_session_params *csp;
1864844d9543SConrad Meyer 	struct ccp_completion_ctx ctx;
1865844d9543SConrad Meyer 	enum ccp_cipher_dir dir;
1866844d9543SConrad Meyer 	device_t dev;
1867844d9543SConrad Meyer 	unsigned i;
1868844d9543SConrad Meyer 	int error;
1869844d9543SConrad Meyer 
1870844d9543SConrad Meyer 	if (s->blkcipher.key_len == 0)
1871844d9543SConrad Meyer 		return (EINVAL);
1872844d9543SConrad Meyer 
1873844d9543SConrad Meyer 	dev = qp->cq_softc->dev;
1874844d9543SConrad Meyer 
1875c0341432SJohn Baldwin 	if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op))
1876844d9543SConrad Meyer 		dir = CCP_CIPHER_DIR_ENCRYPT;
1877844d9543SConrad Meyer 	else
1878844d9543SConrad Meyer 		dir = CCP_CIPHER_DIR_DECRYPT;
1879844d9543SConrad Meyer 
1880844d9543SConrad Meyer 	/* Zero initial GHASH portion of context */
1881844d9543SConrad Meyer 	memset(s->blkcipher.iv, 0, sizeof(s->blkcipher.iv));
1882844d9543SConrad Meyer 
1883844d9543SConrad Meyer 	/* Gather IV data */
1884c0341432SJohn Baldwin 	csp = crypto_get_params(crp->crp_session);
1885c0341432SJohn Baldwin 	ccp_collect_iv(crp, csp, s->blkcipher.iv);
1886844d9543SConrad Meyer 
1887844d9543SConrad Meyer 	/* Reverse order of key material for HW */
1888844d9543SConrad Meyer 	ccp_byteswap(s->blkcipher.enckey, s->blkcipher.key_len);
1889844d9543SConrad Meyer 
1890844d9543SConrad Meyer 	/* Prepare input buffer of concatenated lengths for final GHASH */
1891c0341432SJohn Baldwin 	be64enc(s->gmac.final_block, (uint64_t)crp->crp_aad_length * 8);
1892c0341432SJohn Baldwin 	be64enc(&s->gmac.final_block[8], (uint64_t)crp->crp_payload_length * 8);
1893844d9543SConrad Meyer 
1894844d9543SConrad Meyer 	/* Send IV + initial zero GHASH, key data, and lengths buffer to LSB */
1895844d9543SConrad Meyer 	error = ccp_do_pst_to_lsb(qp, ccp_queue_lsb_address(qp, LSB_ENTRY_IV),
1896844d9543SConrad Meyer 	    s->blkcipher.iv, 32);
1897844d9543SConrad Meyer 	if (error != 0)
1898844d9543SConrad Meyer 		return (error);
1899844d9543SConrad Meyer 	error = ccp_do_pst_to_lsb(qp, ccp_queue_lsb_address(qp, LSB_ENTRY_KEY),
1900844d9543SConrad Meyer 	    s->blkcipher.enckey, s->blkcipher.key_len);
1901844d9543SConrad Meyer 	if (error != 0)
1902844d9543SConrad Meyer 		return (error);
1903844d9543SConrad Meyer 	error = ccp_do_pst_to_lsb(qp,
1904844d9543SConrad Meyer 	    ccp_queue_lsb_address(qp, LSB_ENTRY_GHASH_IN), s->gmac.final_block,
1905844d9543SConrad Meyer 	    GMAC_BLOCK_LEN);
1906844d9543SConrad Meyer 	if (error != 0)
1907844d9543SConrad Meyer 		return (error);
1908844d9543SConrad Meyer 
1909844d9543SConrad Meyer 	/* First step - compute GHASH over AAD */
1910c0341432SJohn Baldwin 	if (crp->crp_aad_length != 0) {
1911844d9543SConrad Meyer 		sglist_reset(qp->cq_sg_ulptx);
1912844d9543SConrad Meyer 		error = sglist_append_sglist(qp->cq_sg_ulptx, qp->cq_sg_crp,
1913c0341432SJohn Baldwin 		    crp->crp_aad_start, crp->crp_aad_length);
1914844d9543SConrad Meyer 		if (error != 0)
1915844d9543SConrad Meyer 			return (error);
1916844d9543SConrad Meyer 
1917844d9543SConrad Meyer 		/* This engine cannot process non-block multiple AAD data. */
1918844d9543SConrad Meyer 		for (i = 0; i < qp->cq_sg_ulptx->sg_nseg; i++)
1919844d9543SConrad Meyer 			if ((qp->cq_sg_ulptx->sg_segs[i].ss_len %
1920844d9543SConrad Meyer 			    GMAC_BLOCK_LEN) != 0) {
1921844d9543SConrad Meyer 				DPRINTF(dev, "%s: AD seg modulo: %zu\n",
1922844d9543SConrad Meyer 				    __func__,
1923844d9543SConrad Meyer 				    qp->cq_sg_ulptx->sg_segs[i].ss_len);
1924844d9543SConrad Meyer 				return (EINVAL);
1925844d9543SConrad Meyer 			}
1926844d9543SConrad Meyer 
1927844d9543SConrad Meyer 		error = ccp_do_ghash_aad(qp, s);
1928844d9543SConrad Meyer 		if (error != 0)
1929844d9543SConrad Meyer 			return (error);
1930844d9543SConrad Meyer 	}
1931844d9543SConrad Meyer 
1932844d9543SConrad Meyer 	/* Feed data piece by piece into GCTR */
1933844d9543SConrad Meyer 	sglist_reset(qp->cq_sg_ulptx);
1934844d9543SConrad Meyer 	error = sglist_append_sglist(qp->cq_sg_ulptx, qp->cq_sg_crp,
1935c0341432SJohn Baldwin 	    crp->crp_payload_start, crp->crp_payload_length);
1936844d9543SConrad Meyer 	if (error != 0)
1937844d9543SConrad Meyer 		return (error);
1938844d9543SConrad Meyer 
1939844d9543SConrad Meyer 	/*
1940844d9543SConrad Meyer 	 * All segments except the last must be even multiples of AES block
1941844d9543SConrad Meyer 	 * size for the HW to process it.  Non-compliant inputs aren't bogus,
1942844d9543SConrad Meyer 	 * just not doable on this hardware.
1943844d9543SConrad Meyer 	 *
1944844d9543SConrad Meyer 	 * XXX: Well, the hardware will produce a valid tag for shorter final
1945844d9543SConrad Meyer 	 * segment inputs, but it will still write out a block-sized plaintext
1946844d9543SConrad Meyer 	 * or ciphertext chunk.  For a typical CRP this tramples trailing data,
1947844d9543SConrad Meyer 	 * including the provided message tag.  So, reject such inputs for now.
1948844d9543SConrad Meyer 	 */
1949844d9543SConrad Meyer 	for (i = 0; i < qp->cq_sg_ulptx->sg_nseg; i++)
1950844d9543SConrad Meyer 		if ((qp->cq_sg_ulptx->sg_segs[i].ss_len % AES_BLOCK_LEN) != 0) {
1951844d9543SConrad Meyer 			DPRINTF(dev, "%s: seg modulo: %zu\n", __func__,
1952844d9543SConrad Meyer 			    qp->cq_sg_ulptx->sg_segs[i].ss_len);
1953844d9543SConrad Meyer 			return (EINVAL);
1954844d9543SConrad Meyer 		}
1955844d9543SConrad Meyer 
1956844d9543SConrad Meyer 	for (i = 0; i < qp->cq_sg_ulptx->sg_nseg; i++) {
1957844d9543SConrad Meyer 		struct sglist_seg *seg;
1958844d9543SConrad Meyer 
1959844d9543SConrad Meyer 		seg = &qp->cq_sg_ulptx->sg_segs[i];
1960844d9543SConrad Meyer 		error = ccp_do_gctr(qp, s, dir, seg,
1961c0341432SJohn Baldwin 		    (i == 0 && crp->crp_aad_length == 0),
1962844d9543SConrad Meyer 		    i == (qp->cq_sg_ulptx->sg_nseg - 1));
1963844d9543SConrad Meyer 		if (error != 0)
1964844d9543SConrad Meyer 			return (error);
1965844d9543SConrad Meyer 	}
1966844d9543SConrad Meyer 
1967844d9543SConrad Meyer 	/* Send just initial IV (not GHASH!) to LSB again */
1968844d9543SConrad Meyer 	error = ccp_do_pst_to_lsb(qp, ccp_queue_lsb_address(qp, LSB_ENTRY_IV),
1969c0341432SJohn Baldwin 	    s->blkcipher.iv, AES_BLOCK_LEN);
1970844d9543SConrad Meyer 	if (error != 0)
1971844d9543SConrad Meyer 		return (error);
1972844d9543SConrad Meyer 
1973844d9543SConrad Meyer 	ctx.callback_fn = ccp_gcm_done;
1974844d9543SConrad Meyer 	ctx.session = s;
1975844d9543SConrad Meyer 	ctx.callback_arg = crp;
1976844d9543SConrad Meyer 
1977844d9543SConrad Meyer 	/* Compute final hash and copy result back */
1978844d9543SConrad Meyer 	error = ccp_do_ghash_final(qp, s);
1979844d9543SConrad Meyer 	if (error != 0)
1980844d9543SConrad Meyer 		return (error);
1981844d9543SConrad Meyer 
1982844d9543SConrad Meyer 	/* When encrypting, copy computed tag out to caller buffer. */
1983844d9543SConrad Meyer 	sglist_reset(qp->cq_sg_ulptx);
1984844d9543SConrad Meyer 	if (dir == CCP_CIPHER_DIR_ENCRYPT)
1985844d9543SConrad Meyer 		error = sglist_append_sglist(qp->cq_sg_ulptx, qp->cq_sg_crp,
1986c0341432SJohn Baldwin 		    crp->crp_digest_start, s->gmac.hash_len);
1987844d9543SConrad Meyer 	else
1988844d9543SConrad Meyer 		/*
1989844d9543SConrad Meyer 		 * For decrypting, copy the computed tag out to our session
1990844d9543SConrad Meyer 		 * buffer to verify in our callback.
1991844d9543SConrad Meyer 		 */
1992844d9543SConrad Meyer 		error = sglist_append(qp->cq_sg_ulptx, s->gmac.final_block,
1993844d9543SConrad Meyer 		    s->gmac.hash_len);
1994844d9543SConrad Meyer 	if (error != 0)
1995844d9543SConrad Meyer 		return (error);
1996844d9543SConrad Meyer 	error = ccp_passthrough_sgl(qp,
1997844d9543SConrad Meyer 	    ccp_queue_lsb_address(qp, LSB_ENTRY_GHASH), false, qp->cq_sg_ulptx,
1998844d9543SConrad Meyer 	    s->gmac.hash_len, true, &ctx);
1999844d9543SConrad Meyer 	return (error);
2000844d9543SConrad Meyer }
2001844d9543SConrad Meyer 
2002844d9543SConrad Meyer #define MAX_TRNG_RETRIES	10
2003844d9543SConrad Meyer u_int
random_ccp_read(void * v,u_int c)2004844d9543SConrad Meyer random_ccp_read(void *v, u_int c)
2005844d9543SConrad Meyer {
2006844d9543SConrad Meyer 	uint32_t *buf;
2007844d9543SConrad Meyer 	u_int i, j;
2008844d9543SConrad Meyer 
2009844d9543SConrad Meyer 	KASSERT(c % sizeof(*buf) == 0, ("%u not multiple of u_long", c));
2010844d9543SConrad Meyer 
2011844d9543SConrad Meyer 	buf = v;
2012844d9543SConrad Meyer 	for (i = c; i > 0; i -= sizeof(*buf)) {
2013844d9543SConrad Meyer 		for (j = 0; j < MAX_TRNG_RETRIES; j++) {
2014844d9543SConrad Meyer 			*buf = ccp_read_4(g_ccp_softc, TRNG_OUT_OFFSET);
2015844d9543SConrad Meyer 			if (*buf != 0)
2016844d9543SConrad Meyer 				break;
2017844d9543SConrad Meyer 		}
2018844d9543SConrad Meyer 		if (j == MAX_TRNG_RETRIES)
2019844d9543SConrad Meyer 			return (0);
2020844d9543SConrad Meyer 		buf++;
2021844d9543SConrad Meyer 	}
2022844d9543SConrad Meyer 	return (c);
2023844d9543SConrad Meyer 
2024844d9543SConrad Meyer }
2025844d9543SConrad Meyer 
2026844d9543SConrad Meyer #ifdef DDB
2027844d9543SConrad Meyer void
db_ccp_show_hw(struct ccp_softc * sc)2028844d9543SConrad Meyer db_ccp_show_hw(struct ccp_softc *sc)
2029844d9543SConrad Meyer {
2030844d9543SConrad Meyer 
2031844d9543SConrad Meyer 	db_printf("  queue mask: 0x%x\n",
2032844d9543SConrad Meyer 	    ccp_read_4(sc, CMD_QUEUE_MASK_OFFSET));
2033844d9543SConrad Meyer 	db_printf("  queue prio: 0x%x\n",
2034844d9543SConrad Meyer 	    ccp_read_4(sc, CMD_QUEUE_PRIO_OFFSET));
2035844d9543SConrad Meyer 	db_printf("  reqid: 0x%x\n", ccp_read_4(sc, CMD_REQID_CONFIG_OFFSET));
2036844d9543SConrad Meyer 	db_printf("  trng output: 0x%x\n", ccp_read_4(sc, TRNG_OUT_OFFSET));
2037844d9543SConrad Meyer 	db_printf("  cmd timeout: 0x%x\n",
2038844d9543SConrad Meyer 	    ccp_read_4(sc, CMD_CMD_TIMEOUT_OFFSET));
2039844d9543SConrad Meyer 	db_printf("  lsb public mask lo: 0x%x\n",
2040844d9543SConrad Meyer 	    ccp_read_4(sc, LSB_PUBLIC_MASK_LO_OFFSET));
2041844d9543SConrad Meyer 	db_printf("  lsb public mask hi: 0x%x\n",
2042844d9543SConrad Meyer 	    ccp_read_4(sc, LSB_PUBLIC_MASK_HI_OFFSET));
2043844d9543SConrad Meyer 	db_printf("  lsb private mask lo: 0x%x\n",
2044844d9543SConrad Meyer 	    ccp_read_4(sc, LSB_PRIVATE_MASK_LO_OFFSET));
2045844d9543SConrad Meyer 	db_printf("  lsb private mask hi: 0x%x\n",
2046844d9543SConrad Meyer 	    ccp_read_4(sc, LSB_PRIVATE_MASK_HI_OFFSET));
2047844d9543SConrad Meyer 	db_printf("  version: 0x%x\n", ccp_read_4(sc, VERSION_REG));
2048844d9543SConrad Meyer }
2049844d9543SConrad Meyer 
2050844d9543SConrad Meyer void
db_ccp_show_queue_hw(struct ccp_queue * qp)2051844d9543SConrad Meyer db_ccp_show_queue_hw(struct ccp_queue *qp)
2052844d9543SConrad Meyer {
2053844d9543SConrad Meyer 	const struct ccp_error_code *ec;
2054844d9543SConrad Meyer 	struct ccp_softc *sc;
2055844d9543SConrad Meyer 	uint32_t status, error, esource, faultblock, headlo, qcontrol;
2056844d9543SConrad Meyer 	unsigned q, i;
2057844d9543SConrad Meyer 
2058844d9543SConrad Meyer 	sc = qp->cq_softc;
2059844d9543SConrad Meyer 	q = qp->cq_qindex;
2060844d9543SConrad Meyer 
2061844d9543SConrad Meyer 	qcontrol = ccp_read_queue_4(sc, q, CMD_Q_CONTROL_BASE);
2062844d9543SConrad Meyer 	db_printf("  qcontrol: 0x%x%s%s\n", qcontrol,
2063844d9543SConrad Meyer 	    (qcontrol & CMD_Q_RUN) ? " RUN" : "",
2064844d9543SConrad Meyer 	    (qcontrol & CMD_Q_HALTED) ? " HALTED" : "");
2065844d9543SConrad Meyer 	db_printf("  tail_lo: 0x%x\n",
2066844d9543SConrad Meyer 	    ccp_read_queue_4(sc, q, CMD_Q_TAIL_LO_BASE));
2067844d9543SConrad Meyer 	headlo = ccp_read_queue_4(sc, q, CMD_Q_HEAD_LO_BASE);
2068844d9543SConrad Meyer 	db_printf("  head_lo: 0x%x\n", headlo);
2069844d9543SConrad Meyer 	db_printf("  int enable: 0x%x\n",
2070844d9543SConrad Meyer 	    ccp_read_queue_4(sc, q, CMD_Q_INT_ENABLE_BASE));
2071844d9543SConrad Meyer 	db_printf("  interrupt status: 0x%x\n",
2072844d9543SConrad Meyer 	    ccp_read_queue_4(sc, q, CMD_Q_INTERRUPT_STATUS_BASE));
2073844d9543SConrad Meyer 	status = ccp_read_queue_4(sc, q, CMD_Q_STATUS_BASE);
2074844d9543SConrad Meyer 	db_printf("  status: 0x%x\n", status);
2075844d9543SConrad Meyer 	db_printf("  int stats: 0x%x\n",
2076844d9543SConrad Meyer 	    ccp_read_queue_4(sc, q, CMD_Q_INT_STATUS_BASE));
2077844d9543SConrad Meyer 
2078844d9543SConrad Meyer 	error = status & STATUS_ERROR_MASK;
2079844d9543SConrad Meyer 	if (error == 0)
2080844d9543SConrad Meyer 		return;
2081844d9543SConrad Meyer 
2082844d9543SConrad Meyer 	esource = (status >> STATUS_ERRORSOURCE_SHIFT) &
2083844d9543SConrad Meyer 	    STATUS_ERRORSOURCE_MASK;
2084844d9543SConrad Meyer 	faultblock = (status >> STATUS_VLSB_FAULTBLOCK_SHIFT) &
2085844d9543SConrad Meyer 	    STATUS_VLSB_FAULTBLOCK_MASK;
2086844d9543SConrad Meyer 
2087844d9543SConrad Meyer 	ec = NULL;
2088844d9543SConrad Meyer 	for (i = 0; i < nitems(ccp_error_codes); i++)
2089844d9543SConrad Meyer 		if (ccp_error_codes[i].ce_code == error)
2090844d9543SConrad Meyer 			break;
2091844d9543SConrad Meyer 	if (i < nitems(ccp_error_codes))
2092844d9543SConrad Meyer 		ec = &ccp_error_codes[i];
2093844d9543SConrad Meyer 
2094844d9543SConrad Meyer 	db_printf("  Error: %s (%u) Source: %u Faulting LSB block: %u\n",
2095844d9543SConrad Meyer 	    (ec != NULL) ? ec->ce_name : "(reserved)", error, esource,
2096844d9543SConrad Meyer 	    faultblock);
2097844d9543SConrad Meyer 	if (ec != NULL)
2098844d9543SConrad Meyer 		db_printf("  Error description: %s\n", ec->ce_desc);
2099844d9543SConrad Meyer 
2100844d9543SConrad Meyer 	i = (headlo - (uint32_t)qp->desc_ring_bus_addr) / Q_DESC_SIZE;
2101844d9543SConrad Meyer 	db_printf("  Bad descriptor idx: %u contents:\n  %32D\n", i,
2102844d9543SConrad Meyer 	    (void *)&qp->desc_ring[i], " ");
2103844d9543SConrad Meyer }
2104844d9543SConrad Meyer #endif
2105