1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2017 Conrad Meyer <cem@FreeBSD.org> 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 * 28 * $FreeBSD$ 29 */ 30 31 #pragma once 32 33 /* 34 * Keccak SHAKE128 (if supported by the device?) uses a 1344 bit block. 35 * SHA3-224 is the next largest block size, at 1152 bits. However, crypto(4) 36 * doesn't support any SHA3 hash, so SHA2 is the constraint: 37 */ 38 #define CCP_HASH_MAX_BLOCK_SIZE (SHA2_512_HMAC_BLOCK_LEN) 39 40 #define CCP_AES_MAX_KEY_LEN (AES_XTS_MAX_KEY) 41 #define CCP_MAX_CRYPTO_IV_LEN 32 /* GCM IV + GHASH context */ 42 43 #define MAX_HW_QUEUES 5 44 #define MAX_LSB_REGIONS 8 45 46 #ifndef __must_check 47 #define __must_check __attribute__((__warn_unused_result__)) 48 #endif 49 50 /* 51 * Internal data structures. 52 */ 53 enum sha_version { 54 SHA1, 55 #if 0 56 SHA2_224, 57 #endif 58 SHA2_256, SHA2_384, SHA2_512 59 }; 60 61 struct ccp_session_hmac { 62 struct auth_hash *auth_hash; 63 int hash_len; 64 unsigned int partial_digest_len; 65 unsigned int auth_mode; 66 unsigned int mk_size; 67 char ipad[CCP_HASH_MAX_BLOCK_SIZE]; 68 char opad[CCP_HASH_MAX_BLOCK_SIZE]; 69 }; 70 71 struct ccp_session_gmac { 72 int hash_len; 73 char final_block[GMAC_BLOCK_LEN]; 74 }; 75 76 struct ccp_session_blkcipher { 77 unsigned cipher_mode; 78 unsigned cipher_type; 79 unsigned key_len; 80 unsigned iv_len; 81 char enckey[CCP_AES_MAX_KEY_LEN]; 82 char iv[CCP_MAX_CRYPTO_IV_LEN]; 83 }; 84 85 struct ccp_session { 86 bool active : 1; 87 bool cipher_first : 1; 88 int pending; 89 enum { HMAC, BLKCIPHER, AUTHENC, GCM } mode; 90 unsigned queue; 91 union { 92 struct ccp_session_hmac hmac; 93 struct ccp_session_gmac gmac; 94 }; 95 struct ccp_session_blkcipher blkcipher; 96 }; 97 98 struct ccp_softc; 99 struct ccp_queue { 100 struct mtx cq_lock; 101 unsigned cq_qindex; 102 struct ccp_softc *cq_softc; 103 104 /* Host memory and tracking structures for descriptor ring. */ 105 bus_dma_tag_t ring_desc_tag; 106 bus_dmamap_t ring_desc_map; 107 struct ccp_desc *desc_ring; 108 bus_addr_t desc_ring_bus_addr; 109 /* Callbacks and arguments ring; indices correspond to above ring. */ 110 struct ccp_completion_ctx *completions_ring; 111 112 uint32_t qcontrol; /* Cached register value */ 113 unsigned lsb_mask; /* LSBs available to queue */ 114 int private_lsb; /* Reserved LSB #, or -1 */ 115 116 unsigned cq_head; 117 unsigned cq_tail; 118 unsigned cq_acq_tail; 119 120 bool cq_waiting; /* Thread waiting for space */ 121 122 struct sglist *cq_sg_crp; 123 struct sglist *cq_sg_ulptx; 124 struct sglist *cq_sg_dst; 125 }; 126 127 struct ccp_completion_ctx { 128 void (*callback_fn)(struct ccp_queue *qp, struct ccp_session *s, 129 void *arg, int error); 130 void *callback_arg; 131 struct ccp_session *session; 132 }; 133 134 struct ccp_softc { 135 device_t dev; 136 int32_t cid; 137 struct ccp_session *sessions; 138 int nsessions; 139 struct mtx lock; 140 bool detaching; 141 142 unsigned ring_size_order; 143 144 /* 145 * Each command queue is either public or private. "Private" 146 * (PSP-only) by default. PSP grants access to some queues to host via 147 * QMR (Queue Mask Register). Set bits are host accessible. 148 */ 149 uint8_t valid_queues; 150 151 uint8_t hw_version; 152 uint8_t num_queues; 153 uint16_t hw_features; 154 uint16_t num_lsb_entries; 155 156 /* Primary BAR (RID 2) used for register access */ 157 bus_space_tag_t pci_bus_tag; 158 bus_space_handle_t pci_bus_handle; 159 int pci_resource_id; 160 struct resource *pci_resource; 161 162 /* Secondary BAR (RID 5) apparently used for MSI-X */ 163 int pci_resource_id_msix; 164 struct resource *pci_resource_msix; 165 166 /* Interrupt resources */ 167 void *intr_tag[2]; 168 struct resource *intr_res[2]; 169 unsigned intr_count; 170 171 struct ccp_queue queues[MAX_HW_QUEUES]; 172 }; 173 174 /* Internal globals */ 175 SYSCTL_DECL(_hw_ccp); 176 MALLOC_DECLARE(M_CCP); 177 extern bool g_debug_print; 178 extern struct ccp_softc *g_ccp_softc; 179 180 /* 181 * Debug macros. 182 */ 183 #define DPRINTF(dev, ...) do { \ 184 if (!g_debug_print) \ 185 break; \ 186 if ((dev) != NULL) \ 187 device_printf((dev), "XXX " __VA_ARGS__); \ 188 else \ 189 printf("ccpXXX: " __VA_ARGS__); \ 190 } while (0) 191 192 #if 0 193 #define INSECURE_DEBUG(dev, ...) do { \ 194 if (!g_debug_print) \ 195 break; \ 196 if ((dev) != NULL) \ 197 device_printf((dev), "XXX " __VA_ARGS__); \ 198 else \ 199 printf("ccpXXX: " __VA_ARGS__); \ 200 } while (0) 201 #else 202 #define INSECURE_DEBUG(dev, ...) 203 #endif 204 205 /* 206 * Internal hardware manipulation routines. 207 */ 208 int ccp_hw_attach(device_t dev); 209 void ccp_hw_detach(device_t dev); 210 211 void ccp_queue_write_tail(struct ccp_queue *qp); 212 213 #ifdef DDB 214 void db_ccp_show_hw(struct ccp_softc *sc); 215 void db_ccp_show_queue_hw(struct ccp_queue *qp); 216 #endif 217 218 /* 219 * Internal hardware crypt-op submission routines. 220 */ 221 int ccp_authenc(struct ccp_queue *sc, struct ccp_session *s, 222 struct cryptop *crp, struct cryptodesc *crda, struct cryptodesc *crde) 223 __must_check; 224 int ccp_blkcipher(struct ccp_queue *sc, struct ccp_session *s, 225 struct cryptop *crp) __must_check; 226 int ccp_gcm(struct ccp_queue *sc, struct ccp_session *s, struct cryptop *crp, 227 struct cryptodesc *crda, struct cryptodesc *crde) __must_check; 228 int ccp_hmac(struct ccp_queue *sc, struct ccp_session *s, struct cryptop *crp) 229 __must_check; 230 231 /* 232 * Internal hardware TRNG read routine. 233 */ 234 u_int random_ccp_read(void *v, u_int c); 235 236 /* XXX */ 237 int ccp_queue_acquire_reserve(struct ccp_queue *qp, unsigned n, int mflags) 238 __must_check; 239 void ccp_queue_abort(struct ccp_queue *qp); 240 void ccp_queue_release(struct ccp_queue *qp); 241 242 /* 243 * Internal inline routines. 244 */ 245 static inline unsigned 246 ccp_queue_get_active(struct ccp_queue *qp) 247 { 248 struct ccp_softc *sc; 249 250 sc = qp->cq_softc; 251 return ((qp->cq_tail - qp->cq_head) & ((1 << sc->ring_size_order) - 1)); 252 } 253 254 static inline unsigned 255 ccp_queue_get_ring_space(struct ccp_queue *qp) 256 { 257 struct ccp_softc *sc; 258 259 sc = qp->cq_softc; 260 return ((1 << sc->ring_size_order) - ccp_queue_get_active(qp) - 1); 261 } 262