1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2017 Conrad Meyer <cem@FreeBSD.org>
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29 #pragma once
30
31 /*
32 * Keccak SHAKE128 (if supported by the device?) uses a 1344 bit block.
33 * SHA3-224 is the next largest block size, at 1152 bits. However, crypto(4)
34 * doesn't support any SHA3 hash, so SHA2 is the constraint:
35 */
36 #define CCP_HASH_MAX_BLOCK_SIZE (SHA2_512_BLOCK_LEN)
37
38 #define CCP_AES_MAX_KEY_LEN (AES_XTS_MAX_KEY)
39 #define CCP_MAX_CRYPTO_IV_LEN 32 /* GCM IV + GHASH context */
40
41 #define MAX_HW_QUEUES 5
42 #define MAX_LSB_REGIONS 8
43
44 #ifndef __must_check
45 #define __must_check __attribute__((__warn_unused_result__))
46 #endif
47
48 /*
49 * Internal data structures.
50 */
51 enum sha_version {
52 SHA1,
53 #if 0
54 SHA2_224,
55 #endif
56 SHA2_256, SHA2_384, SHA2_512
57 };
58
59 /*
60 * XXX: The hmac.res, gmac.final_block, and blkcipher.iv fields are
61 * used by individual requests meaning that sessions cannot have more
62 * than a single request in flight at a time.
63 */
64 struct ccp_session_hmac {
65 const struct auth_hash *auth_hash;
66 int hash_len;
67 unsigned int auth_mode;
68 char ipad[CCP_HASH_MAX_BLOCK_SIZE];
69 char opad[CCP_HASH_MAX_BLOCK_SIZE];
70 char res[CCP_HASH_MAX_BLOCK_SIZE];
71 };
72
73 struct ccp_session_gmac {
74 int hash_len;
75 char final_block[GMAC_BLOCK_LEN];
76 };
77
78 struct ccp_session_blkcipher {
79 unsigned cipher_mode;
80 unsigned cipher_type;
81 unsigned key_len;
82 char enckey[CCP_AES_MAX_KEY_LEN];
83 char iv[CCP_MAX_CRYPTO_IV_LEN];
84 };
85
86 struct ccp_session {
87 bool active;
88 int pending;
89 enum { HMAC, BLKCIPHER, AUTHENC, GCM } mode;
90 unsigned queue;
91 union {
92 struct ccp_session_hmac hmac;
93 struct ccp_session_gmac gmac;
94 };
95 struct ccp_session_blkcipher blkcipher;
96 };
97
98 struct ccp_softc;
99 struct ccp_queue {
100 struct mtx cq_lock;
101 unsigned cq_qindex;
102 struct ccp_softc *cq_softc;
103
104 /* Host memory and tracking structures for descriptor ring. */
105 bus_dma_tag_t ring_desc_tag;
106 bus_dmamap_t ring_desc_map;
107 struct ccp_desc *desc_ring;
108 bus_addr_t desc_ring_bus_addr;
109 /* Callbacks and arguments ring; indices correspond to above ring. */
110 struct ccp_completion_ctx *completions_ring;
111
112 uint32_t qcontrol; /* Cached register value */
113 unsigned lsb_mask; /* LSBs available to queue */
114 int private_lsb; /* Reserved LSB #, or -1 */
115
116 unsigned cq_head;
117 unsigned cq_tail;
118 unsigned cq_acq_tail;
119
120 bool cq_waiting; /* Thread waiting for space */
121
122 struct sglist *cq_sg_crp;
123 struct sglist *cq_sg_ulptx;
124 struct sglist *cq_sg_dst;
125 };
126
127 struct ccp_completion_ctx {
128 void (*callback_fn)(struct ccp_queue *qp, struct ccp_session *s,
129 void *arg, int error);
130 void *callback_arg;
131 struct ccp_session *session;
132 };
133
134 struct ccp_softc {
135 device_t dev;
136 int32_t cid;
137 struct mtx lock;
138 bool detaching;
139
140 unsigned ring_size_order;
141
142 /*
143 * Each command queue is either public or private. "Private"
144 * (PSP-only) by default. PSP grants access to some queues to host via
145 * QMR (Queue Mask Register). Set bits are host accessible.
146 */
147 uint8_t valid_queues;
148
149 uint8_t hw_version;
150 uint8_t num_queues;
151 uint16_t hw_features;
152 uint16_t num_lsb_entries;
153
154 /* Primary BAR (RID 2) used for register access */
155 bus_space_tag_t pci_bus_tag;
156 bus_space_handle_t pci_bus_handle;
157 int pci_resource_id;
158 struct resource *pci_resource;
159
160 /* Secondary BAR (RID 5) apparently used for MSI-X */
161 int pci_resource_id_msix;
162 struct resource *pci_resource_msix;
163
164 /* Interrupt resources */
165 void *intr_tag[2];
166 struct resource *intr_res[2];
167 unsigned intr_count;
168
169 struct ccp_queue queues[MAX_HW_QUEUES];
170 };
171
172 /* Internal globals */
173 SYSCTL_DECL(_hw_ccp);
174 MALLOC_DECLARE(M_CCP);
175 extern bool g_debug_print;
176 extern struct ccp_softc *g_ccp_softc;
177
178 /*
179 * Debug macros.
180 */
181 #define DPRINTF(dev, ...) do { \
182 if (!g_debug_print) \
183 break; \
184 if ((dev) != NULL) \
185 device_printf((dev), "XXX " __VA_ARGS__); \
186 else \
187 printf("ccpXXX: " __VA_ARGS__); \
188 } while (0)
189
190 #if 0
191 #define INSECURE_DEBUG(dev, ...) do { \
192 if (!g_debug_print) \
193 break; \
194 if ((dev) != NULL) \
195 device_printf((dev), "XXX " __VA_ARGS__); \
196 else \
197 printf("ccpXXX: " __VA_ARGS__); \
198 } while (0)
199 #else
200 #define INSECURE_DEBUG(dev, ...)
201 #endif
202
203 /*
204 * Internal hardware manipulation routines.
205 */
206 int ccp_hw_attach(device_t dev);
207 void ccp_hw_detach(device_t dev);
208
209 void ccp_queue_write_tail(struct ccp_queue *qp);
210
211 #ifdef DDB
212 void db_ccp_show_hw(struct ccp_softc *sc);
213 void db_ccp_show_queue_hw(struct ccp_queue *qp);
214 #endif
215
216 /*
217 * Internal hardware crypt-op submission routines.
218 */
219 int ccp_authenc(struct ccp_queue *sc, struct ccp_session *s,
220 struct cryptop *crp) __must_check;
221 int ccp_blkcipher(struct ccp_queue *sc, struct ccp_session *s,
222 struct cryptop *crp) __must_check;
223 int ccp_gcm(struct ccp_queue *sc, struct ccp_session *s, struct cryptop *crp)
224 __must_check;
225 int ccp_hmac(struct ccp_queue *sc, struct ccp_session *s, struct cryptop *crp)
226 __must_check;
227
228 /*
229 * Internal hardware TRNG read routine.
230 */
231 u_int random_ccp_read(void *v, u_int c);
232
233 /* XXX */
234 int ccp_queue_acquire_reserve(struct ccp_queue *qp, unsigned n, int mflags)
235 __must_check;
236 void ccp_queue_abort(struct ccp_queue *qp);
237 void ccp_queue_release(struct ccp_queue *qp);
238
239 /*
240 * Internal inline routines.
241 */
242 static inline unsigned
ccp_queue_get_active(struct ccp_queue * qp)243 ccp_queue_get_active(struct ccp_queue *qp)
244 {
245 struct ccp_softc *sc;
246
247 sc = qp->cq_softc;
248 return ((qp->cq_tail - qp->cq_head) & ((1 << sc->ring_size_order) - 1));
249 }
250
251 static inline unsigned
ccp_queue_get_ring_space(struct ccp_queue * qp)252 ccp_queue_get_ring_space(struct ccp_queue *qp)
253 {
254 struct ccp_softc *sc;
255
256 sc = qp->cq_softc;
257 return ((1 << sc->ring_size_order) - ccp_queue_get_active(qp) - 1);
258 }
259