1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (c) 2012-2014, The Linux Foundation. All rights reserved. 4 */ 5 6 #include <crypto/internal/hash.h> 7 #include <linux/err.h> 8 #include <linux/interrupt.h> 9 #include <linux/types.h> 10 #include <crypto/scatterwalk.h> 11 #include <crypto/sha1.h> 12 #include <crypto/sha2.h> 13 14 #include "cipher.h" 15 #include "common.h" 16 #include "core.h" 17 #include "regs-v5.h" 18 #include "sha.h" 19 #include "aead.h" 20 21 static inline u32 qce_read(struct qce_device *qce, u32 offset) 22 { 23 return readl(qce->base + offset); 24 } 25 26 static inline void qce_write(struct qce_device *qce, u32 offset, u32 val) 27 { 28 writel(val, qce->base + offset); 29 } 30 31 static inline void qce_write_array(struct qce_device *qce, u32 offset, 32 const u32 *val, unsigned int len) 33 { 34 int i; 35 36 for (i = 0; i < len; i++) 37 qce_write(qce, offset + i * sizeof(u32), val[i]); 38 } 39 40 static inline void 41 qce_clear_array(struct qce_device *qce, u32 offset, unsigned int len) 42 { 43 int i; 44 45 for (i = 0; i < len; i++) 46 qce_write(qce, offset + i * sizeof(u32), 0); 47 } 48 49 static u32 qce_config_reg(struct qce_device *qce, int little) 50 { 51 u32 beats = (qce->burst_size >> 3) - 1; 52 u32 pipe_pair = qce->pipe_pair_id; 53 u32 config; 54 55 config = (beats << REQ_SIZE_SHIFT) & REQ_SIZE_MASK; 56 config |= BIT(MASK_DOUT_INTR_SHIFT) | BIT(MASK_DIN_INTR_SHIFT) | 57 BIT(MASK_OP_DONE_INTR_SHIFT) | BIT(MASK_ERR_INTR_SHIFT); 58 config |= (pipe_pair << PIPE_SET_SELECT_SHIFT) & PIPE_SET_SELECT_MASK; 59 config &= ~HIGH_SPD_EN_N_SHIFT; 60 61 if (little) 62 config |= BIT(LITTLE_ENDIAN_MODE_SHIFT); 63 64 return config; 65 } 66 67 void qce_cpu_to_be32p_array(__be32 *dst, const u8 *src, unsigned int len) 68 { 69 __be32 *d = dst; 70 const u8 *s = src; 71 unsigned int n; 72 73 n = len / sizeof(u32); 74 for (; n > 0; n--) { 75 *d = cpu_to_be32p((const __u32 *) s); 76 s += sizeof(__u32); 77 d++; 78 } 79 } 80 81 static void qce_setup_config(struct qce_device *qce) 82 { 83 u32 config; 84 85 /* get big endianness */ 86 config = qce_config_reg(qce, 0); 87 88 /* clear status */ 89 qce_write(qce, REG_STATUS, 0); 90 qce_write(qce, REG_CONFIG, config); 91 } 92 93 static inline void qce_crypto_go(struct qce_device *qce, bool result_dump) 94 { 95 if (result_dump) 96 qce_write(qce, REG_GOPROC, BIT(GO_SHIFT) | BIT(RESULTS_DUMP_SHIFT)); 97 else 98 qce_write(qce, REG_GOPROC, BIT(GO_SHIFT)); 99 } 100 101 #if defined(CONFIG_CRYPTO_DEV_QCE_SHA) || defined(CONFIG_CRYPTO_DEV_QCE_AEAD) 102 static u32 qce_auth_cfg(unsigned long flags, u32 key_size, u32 auth_size) 103 { 104 u32 cfg = 0; 105 106 if (IS_CCM(flags) || IS_CMAC(flags)) 107 cfg |= AUTH_ALG_AES << AUTH_ALG_SHIFT; 108 else 109 cfg |= AUTH_ALG_SHA << AUTH_ALG_SHIFT; 110 111 if (IS_CCM(flags) || IS_CMAC(flags)) { 112 if (key_size == AES_KEYSIZE_128) 113 cfg |= AUTH_KEY_SZ_AES128 << AUTH_KEY_SIZE_SHIFT; 114 else if (key_size == AES_KEYSIZE_256) 115 cfg |= AUTH_KEY_SZ_AES256 << AUTH_KEY_SIZE_SHIFT; 116 } 117 118 if (IS_SHA1(flags) || IS_SHA1_HMAC(flags)) 119 cfg |= AUTH_SIZE_SHA1 << AUTH_SIZE_SHIFT; 120 else if (IS_SHA256(flags) || IS_SHA256_HMAC(flags)) 121 cfg |= AUTH_SIZE_SHA256 << AUTH_SIZE_SHIFT; 122 else if (IS_CMAC(flags)) 123 cfg |= AUTH_SIZE_ENUM_16_BYTES << AUTH_SIZE_SHIFT; 124 else if (IS_CCM(flags)) 125 cfg |= (auth_size - 1) << AUTH_SIZE_SHIFT; 126 127 if (IS_SHA1(flags) || IS_SHA256(flags)) 128 cfg |= AUTH_MODE_HASH << AUTH_MODE_SHIFT; 129 else if (IS_SHA1_HMAC(flags) || IS_SHA256_HMAC(flags)) 130 cfg |= AUTH_MODE_HMAC << AUTH_MODE_SHIFT; 131 else if (IS_CCM(flags)) 132 cfg |= AUTH_MODE_CCM << AUTH_MODE_SHIFT; 133 else if (IS_CMAC(flags)) 134 cfg |= AUTH_MODE_CMAC << AUTH_MODE_SHIFT; 135 136 if (IS_SHA(flags) || IS_SHA_HMAC(flags)) 137 cfg |= AUTH_POS_BEFORE << AUTH_POS_SHIFT; 138 139 if (IS_CCM(flags)) 140 cfg |= QCE_MAX_NONCE_WORDS << AUTH_NONCE_NUM_WORDS_SHIFT; 141 142 return cfg; 143 } 144 #endif 145 146 #ifdef CONFIG_CRYPTO_DEV_QCE_SHA 147 static int qce_setup_regs_ahash(struct crypto_async_request *async_req) 148 { 149 struct ahash_request *req = ahash_request_cast(async_req); 150 struct crypto_ahash *ahash = __crypto_ahash_cast(async_req->tfm); 151 struct qce_sha_reqctx *rctx = ahash_request_ctx_dma(req); 152 struct qce_alg_template *tmpl = to_ahash_tmpl(async_req->tfm); 153 struct qce_device *qce = tmpl->qce; 154 unsigned int digestsize = crypto_ahash_digestsize(ahash); 155 unsigned int blocksize = crypto_tfm_alg_blocksize(async_req->tfm); 156 __be32 auth[SHA256_DIGEST_SIZE / sizeof(__be32)] = {0}; 157 __be32 mackey[QCE_SHA_HMAC_KEY_SIZE / sizeof(__be32)] = {0}; 158 u32 auth_cfg = 0, config; 159 unsigned int iv_words; 160 161 /* if not the last, the size has to be on the block boundary */ 162 if (!rctx->last_blk && req->nbytes % blocksize) 163 return -EINVAL; 164 165 qce_setup_config(qce); 166 167 if (IS_CMAC(rctx->flags)) { 168 qce_write(qce, REG_AUTH_SEG_CFG, 0); 169 qce_write(qce, REG_ENCR_SEG_CFG, 0); 170 qce_write(qce, REG_ENCR_SEG_SIZE, 0); 171 qce_clear_array(qce, REG_AUTH_IV0, 16); 172 qce_clear_array(qce, REG_AUTH_KEY0, 16); 173 qce_clear_array(qce, REG_AUTH_BYTECNT0, 4); 174 175 auth_cfg = qce_auth_cfg(rctx->flags, rctx->authklen, digestsize); 176 } 177 178 if (IS_SHA_HMAC(rctx->flags) || IS_CMAC(rctx->flags)) { 179 u32 authkey_words = rctx->authklen / sizeof(u32); 180 181 qce_cpu_to_be32p_array(mackey, rctx->authkey, rctx->authklen); 182 qce_write_array(qce, REG_AUTH_KEY0, (u32 *)mackey, 183 authkey_words); 184 } 185 186 if (IS_CMAC(rctx->flags)) 187 goto go_proc; 188 189 if (rctx->first_blk) 190 memcpy(auth, rctx->digest, digestsize); 191 else 192 qce_cpu_to_be32p_array(auth, rctx->digest, digestsize); 193 194 iv_words = (IS_SHA1(rctx->flags) || IS_SHA1_HMAC(rctx->flags)) ? 5 : 8; 195 qce_write_array(qce, REG_AUTH_IV0, (u32 *)auth, iv_words); 196 197 if (rctx->first_blk) 198 qce_clear_array(qce, REG_AUTH_BYTECNT0, 4); 199 else 200 qce_write_array(qce, REG_AUTH_BYTECNT0, 201 (u32 *)rctx->byte_count, 2); 202 203 auth_cfg = qce_auth_cfg(rctx->flags, 0, digestsize); 204 205 if (rctx->last_blk) 206 auth_cfg |= BIT(AUTH_LAST_SHIFT); 207 else 208 auth_cfg &= ~BIT(AUTH_LAST_SHIFT); 209 210 if (rctx->first_blk) 211 auth_cfg |= BIT(AUTH_FIRST_SHIFT); 212 else 213 auth_cfg &= ~BIT(AUTH_FIRST_SHIFT); 214 215 go_proc: 216 qce_write(qce, REG_AUTH_SEG_CFG, auth_cfg); 217 qce_write(qce, REG_AUTH_SEG_SIZE, req->nbytes); 218 qce_write(qce, REG_AUTH_SEG_START, 0); 219 qce_write(qce, REG_ENCR_SEG_CFG, 0); 220 qce_write(qce, REG_SEG_SIZE, req->nbytes); 221 222 /* get little endianness */ 223 config = qce_config_reg(qce, 1); 224 qce_write(qce, REG_CONFIG, config); 225 226 qce_crypto_go(qce, true); 227 228 return 0; 229 } 230 #endif 231 232 #if defined(CONFIG_CRYPTO_DEV_QCE_SKCIPHER) || defined(CONFIG_CRYPTO_DEV_QCE_AEAD) 233 static u32 qce_encr_cfg(unsigned long flags, u32 aes_key_size) 234 { 235 u32 cfg = 0; 236 237 if (IS_AES(flags)) { 238 if (aes_key_size == AES_KEYSIZE_128) 239 cfg |= ENCR_KEY_SZ_AES128 << ENCR_KEY_SZ_SHIFT; 240 else if (aes_key_size == AES_KEYSIZE_256) 241 cfg |= ENCR_KEY_SZ_AES256 << ENCR_KEY_SZ_SHIFT; 242 } 243 244 if (IS_AES(flags)) 245 cfg |= ENCR_ALG_AES << ENCR_ALG_SHIFT; 246 else if (IS_DES(flags) || IS_3DES(flags)) 247 cfg |= ENCR_ALG_DES << ENCR_ALG_SHIFT; 248 249 if (IS_DES(flags)) 250 cfg |= ENCR_KEY_SZ_DES << ENCR_KEY_SZ_SHIFT; 251 252 if (IS_3DES(flags)) 253 cfg |= ENCR_KEY_SZ_3DES << ENCR_KEY_SZ_SHIFT; 254 255 switch (flags & QCE_MODE_MASK) { 256 case QCE_MODE_ECB: 257 cfg |= ENCR_MODE_ECB << ENCR_MODE_SHIFT; 258 break; 259 case QCE_MODE_CBC: 260 cfg |= ENCR_MODE_CBC << ENCR_MODE_SHIFT; 261 break; 262 case QCE_MODE_CTR: 263 cfg |= ENCR_MODE_CTR << ENCR_MODE_SHIFT; 264 break; 265 case QCE_MODE_XTS: 266 cfg |= ENCR_MODE_XTS << ENCR_MODE_SHIFT; 267 break; 268 case QCE_MODE_CCM: 269 cfg |= ENCR_MODE_CCM << ENCR_MODE_SHIFT; 270 cfg |= LAST_CCM_XFR << LAST_CCM_SHIFT; 271 break; 272 default: 273 return ~0; 274 } 275 276 return cfg; 277 } 278 #endif 279 280 #ifdef CONFIG_CRYPTO_DEV_QCE_SKCIPHER 281 static void qce_xts_swapiv(__be32 *dst, const u8 *src, unsigned int ivsize) 282 { 283 u8 swap[QCE_AES_IV_LENGTH]; 284 u32 i, j; 285 286 if (ivsize > QCE_AES_IV_LENGTH) 287 return; 288 289 memset(swap, 0, QCE_AES_IV_LENGTH); 290 291 for (i = (QCE_AES_IV_LENGTH - ivsize), j = ivsize - 1; 292 i < QCE_AES_IV_LENGTH; i++, j--) 293 swap[i] = src[j]; 294 295 qce_cpu_to_be32p_array(dst, swap, QCE_AES_IV_LENGTH); 296 } 297 298 static void qce_xtskey(struct qce_device *qce, const u8 *enckey, 299 unsigned int enckeylen, unsigned int cryptlen) 300 { 301 u32 xtskey[QCE_MAX_CIPHER_KEY_SIZE / sizeof(u32)] = {0}; 302 unsigned int xtsklen = enckeylen / (2 * sizeof(u32)); 303 304 qce_cpu_to_be32p_array((__be32 *)xtskey, enckey + enckeylen / 2, 305 enckeylen / 2); 306 qce_write_array(qce, REG_ENCR_XTS_KEY0, xtskey, xtsklen); 307 308 /* Set data unit size to cryptlen. Anything else causes 309 * crypto engine to return back incorrect results. 310 */ 311 qce_write(qce, REG_ENCR_XTS_DU_SIZE, cryptlen); 312 } 313 314 static int qce_setup_regs_skcipher(struct crypto_async_request *async_req) 315 { 316 struct skcipher_request *req = skcipher_request_cast(async_req); 317 struct qce_cipher_reqctx *rctx = skcipher_request_ctx(req); 318 struct qce_cipher_ctx *ctx = crypto_tfm_ctx(async_req->tfm); 319 struct qce_alg_template *tmpl = to_cipher_tmpl(crypto_skcipher_reqtfm(req)); 320 struct qce_device *qce = tmpl->qce; 321 __be32 enckey[QCE_MAX_CIPHER_KEY_SIZE / sizeof(__be32)] = {0}; 322 __be32 enciv[QCE_MAX_IV_SIZE / sizeof(__be32)] = {0}; 323 unsigned int enckey_words, enciv_words; 324 unsigned int keylen; 325 u32 encr_cfg = 0, auth_cfg = 0, config; 326 unsigned int ivsize = rctx->ivsize; 327 unsigned long flags = rctx->flags; 328 329 qce_setup_config(qce); 330 331 if (IS_XTS(flags)) 332 keylen = ctx->enc_keylen / 2; 333 else 334 keylen = ctx->enc_keylen; 335 336 qce_cpu_to_be32p_array(enckey, ctx->enc_key, keylen); 337 enckey_words = keylen / sizeof(u32); 338 339 qce_write(qce, REG_AUTH_SEG_CFG, auth_cfg); 340 341 encr_cfg = qce_encr_cfg(flags, keylen); 342 343 if (IS_DES(flags)) { 344 enciv_words = 2; 345 enckey_words = 2; 346 } else if (IS_3DES(flags)) { 347 enciv_words = 2; 348 enckey_words = 6; 349 } else if (IS_AES(flags)) { 350 if (IS_XTS(flags)) 351 qce_xtskey(qce, ctx->enc_key, ctx->enc_keylen, 352 rctx->cryptlen); 353 enciv_words = 4; 354 } else { 355 return -EINVAL; 356 } 357 358 qce_write_array(qce, REG_ENCR_KEY0, (u32 *)enckey, enckey_words); 359 360 if (!IS_ECB(flags)) { 361 if (IS_XTS(flags)) 362 qce_xts_swapiv(enciv, rctx->iv, ivsize); 363 else 364 qce_cpu_to_be32p_array(enciv, rctx->iv, ivsize); 365 366 qce_write_array(qce, REG_CNTR0_IV0, (u32 *)enciv, enciv_words); 367 } 368 369 if (IS_ENCRYPT(flags)) 370 encr_cfg |= BIT(ENCODE_SHIFT); 371 372 qce_write(qce, REG_ENCR_SEG_CFG, encr_cfg); 373 qce_write(qce, REG_ENCR_SEG_SIZE, rctx->cryptlen); 374 qce_write(qce, REG_ENCR_SEG_START, 0); 375 376 if (IS_CTR(flags)) { 377 qce_write(qce, REG_CNTR_MASK, ~0); 378 qce_write(qce, REG_CNTR_MASK0, ~0); 379 qce_write(qce, REG_CNTR_MASK1, ~0); 380 qce_write(qce, REG_CNTR_MASK2, ~0); 381 } 382 383 qce_write(qce, REG_SEG_SIZE, rctx->cryptlen); 384 385 /* get little endianness */ 386 config = qce_config_reg(qce, 1); 387 qce_write(qce, REG_CONFIG, config); 388 389 qce_crypto_go(qce, true); 390 391 return 0; 392 } 393 #endif 394 395 #ifdef CONFIG_CRYPTO_DEV_QCE_AEAD 396 static const u32 std_iv_sha1[SHA256_DIGEST_SIZE / sizeof(u32)] = { 397 SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4, 0, 0, 0 398 }; 399 400 static const u32 std_iv_sha256[SHA256_DIGEST_SIZE / sizeof(u32)] = { 401 SHA256_H0, SHA256_H1, SHA256_H2, SHA256_H3, 402 SHA256_H4, SHA256_H5, SHA256_H6, SHA256_H7 403 }; 404 405 static unsigned int qce_be32_to_cpu_array(u32 *dst, const u8 *src, unsigned int len) 406 { 407 u32 *d = dst; 408 const u8 *s = src; 409 unsigned int n; 410 411 n = len / sizeof(u32); 412 for (; n > 0; n--) { 413 *d = be32_to_cpup((const __be32 *)s); 414 s += sizeof(u32); 415 d++; 416 } 417 return DIV_ROUND_UP(len, sizeof(u32)); 418 } 419 420 static int qce_setup_regs_aead(struct crypto_async_request *async_req) 421 { 422 struct aead_request *req = aead_request_cast(async_req); 423 struct qce_aead_reqctx *rctx = aead_request_ctx_dma(req); 424 struct qce_aead_ctx *ctx = crypto_tfm_ctx(async_req->tfm); 425 struct qce_alg_template *tmpl = to_aead_tmpl(crypto_aead_reqtfm(req)); 426 struct qce_device *qce = tmpl->qce; 427 u32 enckey[QCE_MAX_CIPHER_KEY_SIZE / sizeof(u32)] = {0}; 428 u32 enciv[QCE_MAX_IV_SIZE / sizeof(u32)] = {0}; 429 u32 authkey[QCE_SHA_HMAC_KEY_SIZE / sizeof(u32)] = {0}; 430 u32 authiv[SHA256_DIGEST_SIZE / sizeof(u32)] = {0}; 431 u32 authnonce[QCE_MAX_NONCE / sizeof(u32)] = {0}; 432 unsigned int enc_keylen = ctx->enc_keylen; 433 unsigned int auth_keylen = ctx->auth_keylen; 434 unsigned int enc_ivsize = rctx->ivsize; 435 unsigned int auth_ivsize = 0; 436 unsigned int enckey_words, enciv_words; 437 unsigned int authkey_words, authiv_words, authnonce_words; 438 unsigned long flags = rctx->flags; 439 u32 encr_cfg, auth_cfg, config, totallen; 440 u32 iv_last_word; 441 442 qce_setup_config(qce); 443 444 /* Write encryption key */ 445 enckey_words = qce_be32_to_cpu_array(enckey, ctx->enc_key, enc_keylen); 446 qce_write_array(qce, REG_ENCR_KEY0, enckey, enckey_words); 447 448 /* Write encryption iv */ 449 enciv_words = qce_be32_to_cpu_array(enciv, rctx->iv, enc_ivsize); 450 qce_write_array(qce, REG_CNTR0_IV0, enciv, enciv_words); 451 452 if (IS_CCM(rctx->flags)) { 453 iv_last_word = enciv[enciv_words - 1]; 454 qce_write(qce, REG_CNTR3_IV3, iv_last_word + 1); 455 qce_write_array(qce, REG_ENCR_CCM_INT_CNTR0, (u32 *)enciv, enciv_words); 456 qce_write(qce, REG_CNTR_MASK, ~0); 457 qce_write(qce, REG_CNTR_MASK0, ~0); 458 qce_write(qce, REG_CNTR_MASK1, ~0); 459 qce_write(qce, REG_CNTR_MASK2, ~0); 460 } 461 462 /* Clear authentication IV and KEY registers of previous values */ 463 qce_clear_array(qce, REG_AUTH_IV0, 16); 464 qce_clear_array(qce, REG_AUTH_KEY0, 16); 465 466 /* Clear byte count */ 467 qce_clear_array(qce, REG_AUTH_BYTECNT0, 4); 468 469 /* Write authentication key */ 470 authkey_words = qce_be32_to_cpu_array(authkey, ctx->auth_key, auth_keylen); 471 qce_write_array(qce, REG_AUTH_KEY0, (u32 *)authkey, authkey_words); 472 473 /* Write initial authentication IV only for HMAC algorithms */ 474 if (IS_SHA_HMAC(rctx->flags)) { 475 /* Write default authentication iv */ 476 if (IS_SHA1_HMAC(rctx->flags)) { 477 auth_ivsize = SHA1_DIGEST_SIZE; 478 memcpy(authiv, std_iv_sha1, auth_ivsize); 479 } else if (IS_SHA256_HMAC(rctx->flags)) { 480 auth_ivsize = SHA256_DIGEST_SIZE; 481 memcpy(authiv, std_iv_sha256, auth_ivsize); 482 } 483 authiv_words = auth_ivsize / sizeof(u32); 484 qce_write_array(qce, REG_AUTH_IV0, (u32 *)authiv, authiv_words); 485 } else if (IS_CCM(rctx->flags)) { 486 /* Write nonce for CCM algorithms */ 487 authnonce_words = qce_be32_to_cpu_array(authnonce, rctx->ccm_nonce, QCE_MAX_NONCE); 488 qce_write_array(qce, REG_AUTH_INFO_NONCE0, authnonce, authnonce_words); 489 } 490 491 /* Set up ENCR_SEG_CFG */ 492 encr_cfg = qce_encr_cfg(flags, enc_keylen); 493 if (IS_ENCRYPT(flags)) 494 encr_cfg |= BIT(ENCODE_SHIFT); 495 qce_write(qce, REG_ENCR_SEG_CFG, encr_cfg); 496 497 /* Set up AUTH_SEG_CFG */ 498 auth_cfg = qce_auth_cfg(rctx->flags, auth_keylen, ctx->authsize); 499 auth_cfg |= BIT(AUTH_LAST_SHIFT); 500 auth_cfg |= BIT(AUTH_FIRST_SHIFT); 501 if (IS_ENCRYPT(flags)) { 502 if (IS_CCM(rctx->flags)) 503 auth_cfg |= AUTH_POS_BEFORE << AUTH_POS_SHIFT; 504 else 505 auth_cfg |= AUTH_POS_AFTER << AUTH_POS_SHIFT; 506 } else { 507 if (IS_CCM(rctx->flags)) 508 auth_cfg |= AUTH_POS_AFTER << AUTH_POS_SHIFT; 509 else 510 auth_cfg |= AUTH_POS_BEFORE << AUTH_POS_SHIFT; 511 } 512 qce_write(qce, REG_AUTH_SEG_CFG, auth_cfg); 513 514 totallen = rctx->cryptlen + rctx->assoclen; 515 516 /* Set the encryption size and start offset */ 517 if (IS_CCM(rctx->flags) && IS_DECRYPT(rctx->flags)) 518 qce_write(qce, REG_ENCR_SEG_SIZE, rctx->cryptlen + ctx->authsize); 519 else 520 qce_write(qce, REG_ENCR_SEG_SIZE, rctx->cryptlen); 521 qce_write(qce, REG_ENCR_SEG_START, rctx->assoclen & 0xffff); 522 523 /* Set the authentication size and start offset */ 524 qce_write(qce, REG_AUTH_SEG_SIZE, totallen); 525 qce_write(qce, REG_AUTH_SEG_START, 0); 526 527 /* Write total length */ 528 if (IS_CCM(rctx->flags) && IS_DECRYPT(rctx->flags)) 529 qce_write(qce, REG_SEG_SIZE, totallen + ctx->authsize); 530 else 531 qce_write(qce, REG_SEG_SIZE, totallen); 532 533 /* get little endianness */ 534 config = qce_config_reg(qce, 1); 535 qce_write(qce, REG_CONFIG, config); 536 537 /* Start the process */ 538 qce_crypto_go(qce, !IS_CCM(flags)); 539 540 return 0; 541 } 542 #endif 543 544 int qce_start(struct crypto_async_request *async_req, u32 type) 545 { 546 switch (type) { 547 #ifdef CONFIG_CRYPTO_DEV_QCE_SKCIPHER 548 case CRYPTO_ALG_TYPE_SKCIPHER: 549 return qce_setup_regs_skcipher(async_req); 550 #endif 551 #ifdef CONFIG_CRYPTO_DEV_QCE_SHA 552 case CRYPTO_ALG_TYPE_AHASH: 553 return qce_setup_regs_ahash(async_req); 554 #endif 555 #ifdef CONFIG_CRYPTO_DEV_QCE_AEAD 556 case CRYPTO_ALG_TYPE_AEAD: 557 return qce_setup_regs_aead(async_req); 558 #endif 559 default: 560 return -EINVAL; 561 } 562 } 563 564 #define STATUS_ERRORS \ 565 (BIT(SW_ERR_SHIFT) | BIT(AXI_ERR_SHIFT) | BIT(HSD_ERR_SHIFT)) 566 567 int qce_check_status(struct qce_device *qce, u32 *status) 568 { 569 int ret = 0; 570 571 *status = qce_read(qce, REG_STATUS); 572 573 /* 574 * Don't use result dump status. The operation may not be complete. 575 * Instead, use the status we just read from device. In case, we need to 576 * use result_status from result dump the result_status needs to be byte 577 * swapped, since we set the device to little endian. 578 */ 579 if (*status & STATUS_ERRORS || !(*status & BIT(OPERATION_DONE_SHIFT))) 580 ret = -ENXIO; 581 else if (*status & BIT(MAC_FAILED_SHIFT)) 582 ret = -EBADMSG; 583 584 return ret; 585 } 586 587 void qce_get_version(struct qce_device *qce, u32 *major, u32 *minor, u32 *step) 588 { 589 u32 val; 590 591 val = qce_read(qce, REG_VERSION); 592 *major = (val & CORE_MAJOR_REV_MASK) >> CORE_MAJOR_REV_SHIFT; 593 *minor = (val & CORE_MINOR_REV_MASK) >> CORE_MINOR_REV_SHIFT; 594 *step = (val & CORE_STEP_REV_MASK) >> CORE_STEP_REV_SHIFT; 595 } 596