1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* Copyright (c) 2019 HiSilicon Limited. */ 3 4 #ifndef __HISI_SEC_V2_H 5 #define __HISI_SEC_V2_H 6 7 #include <linux/hisi_acc_qm.h> 8 #include "sec_crypto.h" 9 10 #define SEC_PBUF_SZ 512 11 #define SEC_MAX_MAC_LEN 64 12 #define SEC_IV_SIZE 24 13 #define SEC_SGE_NR_NUM 4 14 #define SEC_SGL_ALIGN_SIZE 64 15 16 /* Algorithm resource per hardware SEC queue */ 17 struct sec_alg_res { 18 u8 *pbuf; 19 dma_addr_t pbuf_dma; 20 u8 *c_ivin; 21 dma_addr_t c_ivin_dma; 22 u8 *a_ivin; 23 dma_addr_t a_ivin_dma; 24 u8 *out_mac; 25 dma_addr_t out_mac_dma; 26 u16 depth; 27 }; 28 29 struct sec_hw_sge { 30 dma_addr_t buf; 31 void *page_ctrl; 32 __le32 len; 33 __le32 pad; 34 __le32 pad0; 35 __le32 pad1; 36 }; 37 38 struct sec_hw_sgl { 39 dma_addr_t next_dma; 40 __le16 entry_sum_in_chain; 41 __le16 entry_sum_in_sgl; 42 __le16 entry_length_in_sgl; 43 __le16 pad0; 44 __le64 pad1[5]; 45 struct sec_hw_sgl *next; 46 struct sec_hw_sge sge_entries[SEC_SGE_NR_NUM]; 47 } __aligned(SEC_SGL_ALIGN_SIZE); 48 49 struct sec_src_dst_buf { 50 struct sec_hw_sgl in; 51 struct sec_hw_sgl out; 52 }; 53 54 struct sec_request_buf { 55 union { 56 struct sec_src_dst_buf data_buf; 57 __u8 pbuf[SEC_PBUF_SZ]; 58 }; 59 dma_addr_t in_dma; 60 dma_addr_t out_dma; 61 }; 62 63 /* Cipher request of SEC private */ 64 struct sec_cipher_req { 65 struct hisi_acc_hw_sgl *c_out; 66 dma_addr_t c_out_dma; 67 u8 *c_ivin; 68 dma_addr_t c_ivin_dma; 69 struct skcipher_request *sk_req; 70 u32 c_len; 71 bool encrypt; 72 __u8 c_ivin_buf[SEC_IV_SIZE]; 73 }; 74 75 struct sec_aead_req { 76 u8 *out_mac; 77 dma_addr_t out_mac_dma; 78 u8 *a_ivin; 79 dma_addr_t a_ivin_dma; 80 struct aead_request *aead_req; 81 __u8 a_ivin_buf[SEC_IV_SIZE]; 82 __u8 out_mac_buf[SEC_MAX_MAC_LEN]; 83 }; 84 85 /* SEC request of Crypto */ 86 struct sec_req { 87 union { 88 struct sec_sqe sec_sqe; 89 struct sec_sqe3 sec_sqe3; 90 }; 91 struct sec_ctx *ctx; 92 struct sec_qp_ctx *qp_ctx; 93 94 /** 95 * Common parameter of the SEC request. 96 */ 97 struct hisi_acc_hw_sgl *in; 98 dma_addr_t in_dma; 99 struct sec_cipher_req c_req; 100 struct sec_aead_req aead_req; 101 struct crypto_async_request *base; 102 103 int err_type; 104 int req_id; 105 u32 flag; 106 107 bool use_pbuf; 108 109 struct list_head list; 110 struct sec_request_buf buf; 111 }; 112 113 /** 114 * struct sec_req_op - Operations for SEC request 115 * @buf_map: DMA map the SGL buffers of the request 116 * @buf_unmap: DMA unmap the SGL buffers of the request 117 * @bd_fill: Fill the SEC queue BD 118 * @bd_send: Send the SEC BD into the hardware queue 119 * @callback: Call back for the request 120 * @process: Main processing logic of Skcipher 121 */ 122 struct sec_req_op { 123 int (*buf_map)(struct sec_ctx *ctx, struct sec_req *req); 124 void (*buf_unmap)(struct sec_ctx *ctx, struct sec_req *req); 125 void (*do_transfer)(struct sec_ctx *ctx, struct sec_req *req); 126 int (*bd_fill)(struct sec_ctx *ctx, struct sec_req *req); 127 int (*bd_send)(struct sec_ctx *ctx, struct sec_req *req); 128 void (*callback)(struct sec_ctx *ctx, struct sec_req *req, int err); 129 int (*process)(struct sec_ctx *ctx, struct sec_req *req); 130 }; 131 132 /* SEC auth context */ 133 struct sec_auth_ctx { 134 dma_addr_t a_key_dma; 135 u8 *a_key; 136 u8 a_key_len; 137 u8 a_alg; 138 struct crypto_shash *hash_tfm; 139 struct crypto_aead *fallback_aead_tfm; 140 }; 141 142 /* SEC cipher context which cipher's relatives */ 143 struct sec_cipher_ctx { 144 u8 *c_key; 145 dma_addr_t c_key_dma; 146 sector_t iv_offset; 147 u32 c_gran_size; 148 u32 ivsize; 149 u8 c_mode; 150 u8 c_alg; 151 u8 c_key_len; 152 153 /* add software support */ 154 bool fallback; 155 struct crypto_sync_skcipher *fbtfm; 156 }; 157 158 /* SEC queue context which defines queue's relatives */ 159 struct sec_qp_ctx { 160 struct hisi_qp *qp; 161 struct sec_req **req_list; 162 struct idr req_idr; 163 struct sec_alg_res *res; 164 struct sec_ctx *ctx; 165 spinlock_t req_lock; 166 spinlock_t id_lock; 167 struct hisi_acc_sgl_pool *c_in_pool; 168 struct hisi_acc_sgl_pool *c_out_pool; 169 u16 send_head; 170 }; 171 172 enum sec_alg_type { 173 SEC_SKCIPHER, 174 SEC_AEAD 175 }; 176 177 /* SEC Crypto TFM context which defines queue and cipher .etc relatives */ 178 struct sec_ctx { 179 struct sec_qp_ctx *qp_ctx; 180 struct sec_dev *sec; 181 const struct sec_req_op *req_op; 182 struct hisi_qp **qps; 183 184 /* Half queues for encipher, and half for decipher */ 185 u32 hlf_q_num; 186 187 /* Current cyclic index to select a queue for encipher */ 188 atomic_t enc_qcyclic; 189 190 /* Current cyclic index to select a queue for decipher */ 191 atomic_t dec_qcyclic; 192 193 enum sec_alg_type alg_type; 194 bool pbuf_supported; 195 struct sec_cipher_ctx c_ctx; 196 struct sec_auth_ctx a_ctx; 197 u8 type_supported; 198 struct device *dev; 199 }; 200 201 202 enum sec_debug_file_index { 203 SEC_CLEAR_ENABLE, 204 SEC_DEBUG_FILE_NUM, 205 }; 206 207 struct sec_debug_file { 208 enum sec_debug_file_index index; 209 spinlock_t lock; 210 struct hisi_qm *qm; 211 }; 212 213 struct sec_dfx { 214 atomic64_t send_cnt; 215 atomic64_t recv_cnt; 216 atomic64_t send_busy_cnt; 217 atomic64_t recv_busy_cnt; 218 atomic64_t err_bd_cnt; 219 atomic64_t invalid_req_cnt; 220 atomic64_t done_flag_cnt; 221 }; 222 223 struct sec_debug { 224 struct sec_dfx dfx; 225 struct sec_debug_file files[SEC_DEBUG_FILE_NUM]; 226 }; 227 228 struct sec_dev { 229 struct hisi_qm qm; 230 struct sec_debug debug; 231 u32 ctx_q_num; 232 bool iommu_used; 233 }; 234 235 enum sec_cap_type { 236 SEC_QM_NFE_MASK_CAP = 0x0, 237 SEC_QM_RESET_MASK_CAP, 238 SEC_QM_OOO_SHUTDOWN_MASK_CAP, 239 SEC_QM_CE_MASK_CAP, 240 SEC_NFE_MASK_CAP, 241 SEC_RESET_MASK_CAP, 242 SEC_OOO_SHUTDOWN_MASK_CAP, 243 SEC_CE_MASK_CAP, 244 SEC_CLUSTER_NUM_CAP, 245 SEC_CORE_TYPE_NUM_CAP, 246 SEC_CORE_NUM_CAP, 247 SEC_CORES_PER_CLUSTER_NUM_CAP, 248 SEC_CORE_ENABLE_BITMAP, 249 SEC_DRV_ALG_BITMAP_LOW, 250 SEC_DRV_ALG_BITMAP_HIGH, 251 SEC_DEV_ALG_BITMAP_LOW, 252 SEC_DEV_ALG_BITMAP_HIGH, 253 SEC_CORE1_ALG_BITMAP_LOW, 254 SEC_CORE1_ALG_BITMAP_HIGH, 255 SEC_CORE2_ALG_BITMAP_LOW, 256 SEC_CORE2_ALG_BITMAP_HIGH, 257 SEC_CORE3_ALG_BITMAP_LOW, 258 SEC_CORE3_ALG_BITMAP_HIGH, 259 SEC_CORE4_ALG_BITMAP_LOW, 260 SEC_CORE4_ALG_BITMAP_HIGH, 261 }; 262 263 enum sec_cap_table_type { 264 QM_RAS_NFE_TYPE = 0x0, 265 QM_RAS_NFE_RESET, 266 QM_RAS_CE_TYPE, 267 SEC_RAS_NFE_TYPE, 268 SEC_RAS_NFE_RESET, 269 SEC_RAS_CE_TYPE, 270 SEC_CORE_INFO, 271 SEC_CORE_EN, 272 SEC_DRV_ALG_BITMAP_LOW_TB, 273 SEC_DRV_ALG_BITMAP_HIGH_TB, 274 SEC_ALG_BITMAP_LOW, 275 SEC_ALG_BITMAP_HIGH, 276 SEC_CORE1_BITMAP_LOW, 277 SEC_CORE1_BITMAP_HIGH, 278 SEC_CORE2_BITMAP_LOW, 279 SEC_CORE2_BITMAP_HIGH, 280 SEC_CORE3_BITMAP_LOW, 281 SEC_CORE3_BITMAP_HIGH, 282 SEC_CORE4_BITMAP_LOW, 283 SEC_CORE4_BITMAP_HIGH, 284 }; 285 286 void sec_destroy_qps(struct hisi_qp **qps, int qp_num); 287 struct hisi_qp **sec_create_qps(void); 288 int sec_register_to_crypto(struct hisi_qm *qm); 289 void sec_unregister_from_crypto(struct hisi_qm *qm); 290 u64 sec_get_alg_bitmap(struct hisi_qm *qm, u32 high, u32 low); 291 #endif 292