1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* Asymmetric algorithms supported by virtio crypto device 3 * 4 * Authors: zhenwei pi <pizhenwei@bytedance.com> 5 * lei he <helei.sig11@bytedance.com> 6 * 7 * Copyright 2022 Bytedance CO., LTD. 8 */ 9 10 #include <linux/mpi.h> 11 #include <linux/scatterlist.h> 12 #include <crypto/algapi.h> 13 #include <crypto/internal/akcipher.h> 14 #include <crypto/internal/rsa.h> 15 #include <linux/err.h> 16 #include <crypto/scatterwalk.h> 17 #include <linux/atomic.h> 18 19 #include <uapi/linux/virtio_crypto.h> 20 #include "virtio_crypto_common.h" 21 22 struct virtio_crypto_rsa_ctx { 23 MPI n; 24 }; 25 26 struct virtio_crypto_akcipher_ctx { 27 struct crypto_engine_ctx enginectx; 28 struct virtio_crypto *vcrypto; 29 struct crypto_akcipher *tfm; 30 bool session_valid; 31 __u64 session_id; 32 union { 33 struct virtio_crypto_rsa_ctx rsa_ctx; 34 }; 35 }; 36 37 struct virtio_crypto_akcipher_request { 38 struct virtio_crypto_request base; 39 struct virtio_crypto_akcipher_ctx *akcipher_ctx; 40 struct akcipher_request *akcipher_req; 41 void *src_buf; 42 void *dst_buf; 43 uint32_t opcode; 44 }; 45 46 struct virtio_crypto_akcipher_algo { 47 uint32_t algonum; 48 uint32_t service; 49 unsigned int active_devs; 50 struct akcipher_alg algo; 51 }; 52 53 static DEFINE_MUTEX(algs_lock); 54 55 static void virtio_crypto_akcipher_finalize_req( 56 struct virtio_crypto_akcipher_request *vc_akcipher_req, 57 struct akcipher_request *req, int err) 58 { 59 virtcrypto_clear_request(&vc_akcipher_req->base); 60 61 crypto_finalize_akcipher_request(vc_akcipher_req->base.dataq->engine, req, err); 62 } 63 64 static void virtio_crypto_dataq_akcipher_callback(struct virtio_crypto_request *vc_req, int len) 65 { 66 struct virtio_crypto_akcipher_request *vc_akcipher_req = 67 container_of(vc_req, struct virtio_crypto_akcipher_request, base); 68 struct akcipher_request *akcipher_req; 69 int error; 70 71 switch (vc_req->status) { 72 case VIRTIO_CRYPTO_OK: 73 error = 0; 74 break; 75 case VIRTIO_CRYPTO_INVSESS: 76 case VIRTIO_CRYPTO_ERR: 77 error = -EINVAL; 78 break; 79 case VIRTIO_CRYPTO_BADMSG: 80 error = -EBADMSG; 81 break; 82 83 case VIRTIO_CRYPTO_KEY_REJECTED: 84 error = -EKEYREJECTED; 85 break; 86 87 default: 88 error = -EIO; 89 break; 90 } 91 92 akcipher_req = vc_akcipher_req->akcipher_req; 93 if (vc_akcipher_req->opcode != VIRTIO_CRYPTO_AKCIPHER_VERIFY) { 94 /* actuall length maybe less than dst buffer */ 95 akcipher_req->dst_len = len - sizeof(vc_req->status); 96 sg_copy_from_buffer(akcipher_req->dst, sg_nents(akcipher_req->dst), 97 vc_akcipher_req->dst_buf, akcipher_req->dst_len); 98 } 99 virtio_crypto_akcipher_finalize_req(vc_akcipher_req, akcipher_req, error); 100 } 101 102 static int virtio_crypto_alg_akcipher_init_session(struct virtio_crypto_akcipher_ctx *ctx, 103 struct virtio_crypto_ctrl_header *header, void *para, 104 const uint8_t *key, unsigned int keylen) 105 { 106 struct scatterlist outhdr_sg, key_sg, inhdr_sg, *sgs[3]; 107 struct virtio_crypto *vcrypto = ctx->vcrypto; 108 uint8_t *pkey; 109 int err; 110 unsigned int num_out = 0, num_in = 0; 111 struct virtio_crypto_op_ctrl_req *ctrl; 112 struct virtio_crypto_session_input *input; 113 struct virtio_crypto_ctrl_request *vc_ctrl_req; 114 115 pkey = kmemdup(key, keylen, GFP_ATOMIC); 116 if (!pkey) 117 return -ENOMEM; 118 119 vc_ctrl_req = kzalloc(sizeof(*vc_ctrl_req), GFP_KERNEL); 120 if (!vc_ctrl_req) { 121 err = -ENOMEM; 122 goto out; 123 } 124 125 ctrl = &vc_ctrl_req->ctrl; 126 memcpy(&ctrl->header, header, sizeof(ctrl->header)); 127 memcpy(&ctrl->u, para, sizeof(ctrl->u)); 128 input = &vc_ctrl_req->input; 129 input->status = cpu_to_le32(VIRTIO_CRYPTO_ERR); 130 131 sg_init_one(&outhdr_sg, ctrl, sizeof(*ctrl)); 132 sgs[num_out++] = &outhdr_sg; 133 134 sg_init_one(&key_sg, pkey, keylen); 135 sgs[num_out++] = &key_sg; 136 137 sg_init_one(&inhdr_sg, input, sizeof(*input)); 138 sgs[num_out + num_in++] = &inhdr_sg; 139 140 err = virtio_crypto_ctrl_vq_request(vcrypto, sgs, num_out, num_in, vc_ctrl_req); 141 if (err < 0) 142 goto out; 143 144 if (le32_to_cpu(input->status) != VIRTIO_CRYPTO_OK) { 145 pr_err("virtio_crypto: Create session failed status: %u\n", 146 le32_to_cpu(input->status)); 147 err = -EINVAL; 148 goto out; 149 } 150 151 ctx->session_id = le64_to_cpu(input->session_id); 152 ctx->session_valid = true; 153 err = 0; 154 155 out: 156 kfree(vc_ctrl_req); 157 kfree_sensitive(pkey); 158 159 return err; 160 } 161 162 static int virtio_crypto_alg_akcipher_close_session(struct virtio_crypto_akcipher_ctx *ctx) 163 { 164 struct scatterlist outhdr_sg, inhdr_sg, *sgs[2]; 165 struct virtio_crypto_destroy_session_req *destroy_session; 166 struct virtio_crypto *vcrypto = ctx->vcrypto; 167 unsigned int num_out = 0, num_in = 0; 168 int err; 169 struct virtio_crypto_op_ctrl_req *ctrl; 170 struct virtio_crypto_inhdr *ctrl_status; 171 struct virtio_crypto_ctrl_request *vc_ctrl_req; 172 173 if (!ctx->session_valid) 174 return 0; 175 176 vc_ctrl_req = kzalloc(sizeof(*vc_ctrl_req), GFP_KERNEL); 177 if (!vc_ctrl_req) 178 return -ENOMEM; 179 180 ctrl_status = &vc_ctrl_req->ctrl_status; 181 ctrl_status->status = VIRTIO_CRYPTO_ERR; 182 ctrl = &vc_ctrl_req->ctrl; 183 ctrl->header.opcode = cpu_to_le32(VIRTIO_CRYPTO_AKCIPHER_DESTROY_SESSION); 184 ctrl->header.queue_id = 0; 185 186 destroy_session = &ctrl->u.destroy_session; 187 destroy_session->session_id = cpu_to_le64(ctx->session_id); 188 189 sg_init_one(&outhdr_sg, ctrl, sizeof(*ctrl)); 190 sgs[num_out++] = &outhdr_sg; 191 192 sg_init_one(&inhdr_sg, &ctrl_status->status, sizeof(ctrl_status->status)); 193 sgs[num_out + num_in++] = &inhdr_sg; 194 195 err = virtio_crypto_ctrl_vq_request(vcrypto, sgs, num_out, num_in, vc_ctrl_req); 196 if (err < 0) 197 goto out; 198 199 if (ctrl_status->status != VIRTIO_CRYPTO_OK) { 200 pr_err("virtio_crypto: Close session failed status: %u, session_id: 0x%llx\n", 201 ctrl_status->status, destroy_session->session_id); 202 err = -EINVAL; 203 goto out; 204 } 205 206 err = 0; 207 ctx->session_valid = false; 208 209 out: 210 kfree(vc_ctrl_req); 211 212 return err; 213 } 214 215 static int __virtio_crypto_akcipher_do_req(struct virtio_crypto_akcipher_request *vc_akcipher_req, 216 struct akcipher_request *req, struct data_queue *data_vq) 217 { 218 struct virtio_crypto_akcipher_ctx *ctx = vc_akcipher_req->akcipher_ctx; 219 struct virtio_crypto_request *vc_req = &vc_akcipher_req->base; 220 struct virtio_crypto *vcrypto = ctx->vcrypto; 221 struct virtio_crypto_op_data_req *req_data = vc_req->req_data; 222 struct scatterlist *sgs[4], outhdr_sg, inhdr_sg, srcdata_sg, dstdata_sg; 223 void *src_buf = NULL, *dst_buf = NULL; 224 unsigned int num_out = 0, num_in = 0; 225 int node = dev_to_node(&vcrypto->vdev->dev); 226 unsigned long flags; 227 int ret = -ENOMEM; 228 bool verify = vc_akcipher_req->opcode == VIRTIO_CRYPTO_AKCIPHER_VERIFY; 229 unsigned int src_len = verify ? req->src_len + req->dst_len : req->src_len; 230 231 /* out header */ 232 sg_init_one(&outhdr_sg, req_data, sizeof(*req_data)); 233 sgs[num_out++] = &outhdr_sg; 234 235 /* src data */ 236 src_buf = kcalloc_node(src_len, 1, GFP_KERNEL, node); 237 if (!src_buf) 238 goto err; 239 240 if (verify) { 241 /* for verify operation, both src and dst data work as OUT direction */ 242 sg_copy_to_buffer(req->src, sg_nents(req->src), src_buf, src_len); 243 sg_init_one(&srcdata_sg, src_buf, src_len); 244 sgs[num_out++] = &srcdata_sg; 245 } else { 246 sg_copy_to_buffer(req->src, sg_nents(req->src), src_buf, src_len); 247 sg_init_one(&srcdata_sg, src_buf, src_len); 248 sgs[num_out++] = &srcdata_sg; 249 250 /* dst data */ 251 dst_buf = kcalloc_node(req->dst_len, 1, GFP_KERNEL, node); 252 if (!dst_buf) 253 goto err; 254 255 sg_init_one(&dstdata_sg, dst_buf, req->dst_len); 256 sgs[num_out + num_in++] = &dstdata_sg; 257 } 258 259 vc_akcipher_req->src_buf = src_buf; 260 vc_akcipher_req->dst_buf = dst_buf; 261 262 /* in header */ 263 sg_init_one(&inhdr_sg, &vc_req->status, sizeof(vc_req->status)); 264 sgs[num_out + num_in++] = &inhdr_sg; 265 266 spin_lock_irqsave(&data_vq->lock, flags); 267 ret = virtqueue_add_sgs(data_vq->vq, sgs, num_out, num_in, vc_req, GFP_ATOMIC); 268 virtqueue_kick(data_vq->vq); 269 spin_unlock_irqrestore(&data_vq->lock, flags); 270 if (ret) 271 goto err; 272 273 return 0; 274 275 err: 276 kfree(src_buf); 277 kfree(dst_buf); 278 279 return -ENOMEM; 280 } 281 282 static int virtio_crypto_rsa_do_req(struct crypto_engine *engine, void *vreq) 283 { 284 struct akcipher_request *req = container_of(vreq, struct akcipher_request, base); 285 struct virtio_crypto_akcipher_request *vc_akcipher_req = akcipher_request_ctx(req); 286 struct virtio_crypto_request *vc_req = &vc_akcipher_req->base; 287 struct virtio_crypto_akcipher_ctx *ctx = vc_akcipher_req->akcipher_ctx; 288 struct virtio_crypto *vcrypto = ctx->vcrypto; 289 struct data_queue *data_vq = vc_req->dataq; 290 struct virtio_crypto_op_header *header; 291 struct virtio_crypto_akcipher_data_req *akcipher_req; 292 int ret; 293 294 vc_req->sgs = NULL; 295 vc_req->req_data = kzalloc_node(sizeof(*vc_req->req_data), 296 GFP_KERNEL, dev_to_node(&vcrypto->vdev->dev)); 297 if (!vc_req->req_data) 298 return -ENOMEM; 299 300 /* build request header */ 301 header = &vc_req->req_data->header; 302 header->opcode = cpu_to_le32(vc_akcipher_req->opcode); 303 header->algo = cpu_to_le32(VIRTIO_CRYPTO_AKCIPHER_RSA); 304 header->session_id = cpu_to_le64(ctx->session_id); 305 306 /* build request akcipher data */ 307 akcipher_req = &vc_req->req_data->u.akcipher_req; 308 akcipher_req->para.src_data_len = cpu_to_le32(req->src_len); 309 akcipher_req->para.dst_data_len = cpu_to_le32(req->dst_len); 310 311 ret = __virtio_crypto_akcipher_do_req(vc_akcipher_req, req, data_vq); 312 if (ret < 0) { 313 kfree_sensitive(vc_req->req_data); 314 vc_req->req_data = NULL; 315 return ret; 316 } 317 318 return 0; 319 } 320 321 static int virtio_crypto_rsa_req(struct akcipher_request *req, uint32_t opcode) 322 { 323 struct crypto_akcipher *atfm = crypto_akcipher_reqtfm(req); 324 struct virtio_crypto_akcipher_ctx *ctx = akcipher_tfm_ctx(atfm); 325 struct virtio_crypto_akcipher_request *vc_akcipher_req = akcipher_request_ctx(req); 326 struct virtio_crypto_request *vc_req = &vc_akcipher_req->base; 327 struct virtio_crypto *vcrypto = ctx->vcrypto; 328 /* Use the first data virtqueue as default */ 329 struct data_queue *data_vq = &vcrypto->data_vq[0]; 330 331 vc_req->dataq = data_vq; 332 vc_req->alg_cb = virtio_crypto_dataq_akcipher_callback; 333 vc_akcipher_req->akcipher_ctx = ctx; 334 vc_akcipher_req->akcipher_req = req; 335 vc_akcipher_req->opcode = opcode; 336 337 return crypto_transfer_akcipher_request_to_engine(data_vq->engine, req); 338 } 339 340 static int virtio_crypto_rsa_encrypt(struct akcipher_request *req) 341 { 342 return virtio_crypto_rsa_req(req, VIRTIO_CRYPTO_AKCIPHER_ENCRYPT); 343 } 344 345 static int virtio_crypto_rsa_decrypt(struct akcipher_request *req) 346 { 347 return virtio_crypto_rsa_req(req, VIRTIO_CRYPTO_AKCIPHER_DECRYPT); 348 } 349 350 static int virtio_crypto_rsa_sign(struct akcipher_request *req) 351 { 352 return virtio_crypto_rsa_req(req, VIRTIO_CRYPTO_AKCIPHER_SIGN); 353 } 354 355 static int virtio_crypto_rsa_verify(struct akcipher_request *req) 356 { 357 return virtio_crypto_rsa_req(req, VIRTIO_CRYPTO_AKCIPHER_VERIFY); 358 } 359 360 static int virtio_crypto_rsa_set_key(struct crypto_akcipher *tfm, 361 const void *key, 362 unsigned int keylen, 363 bool private, 364 int padding_algo, 365 int hash_algo) 366 { 367 struct virtio_crypto_akcipher_ctx *ctx = akcipher_tfm_ctx(tfm); 368 struct virtio_crypto_rsa_ctx *rsa_ctx = &ctx->rsa_ctx; 369 struct virtio_crypto *vcrypto; 370 struct virtio_crypto_ctrl_header header; 371 struct virtio_crypto_akcipher_session_para para; 372 struct rsa_key rsa_key = {0}; 373 int node = virtio_crypto_get_current_node(); 374 uint32_t keytype; 375 int ret; 376 377 /* mpi_free will test n, just free it. */ 378 mpi_free(rsa_ctx->n); 379 rsa_ctx->n = NULL; 380 381 if (private) { 382 keytype = VIRTIO_CRYPTO_AKCIPHER_KEY_TYPE_PRIVATE; 383 ret = rsa_parse_priv_key(&rsa_key, key, keylen); 384 } else { 385 keytype = VIRTIO_CRYPTO_AKCIPHER_KEY_TYPE_PUBLIC; 386 ret = rsa_parse_pub_key(&rsa_key, key, keylen); 387 } 388 389 if (ret) 390 return ret; 391 392 rsa_ctx->n = mpi_read_raw_data(rsa_key.n, rsa_key.n_sz); 393 if (!rsa_ctx->n) 394 return -ENOMEM; 395 396 if (!ctx->vcrypto) { 397 vcrypto = virtcrypto_get_dev_node(node, VIRTIO_CRYPTO_SERVICE_AKCIPHER, 398 VIRTIO_CRYPTO_AKCIPHER_RSA); 399 if (!vcrypto) { 400 pr_err("virtio_crypto: Could not find a virtio device in the system or unsupported algo\n"); 401 return -ENODEV; 402 } 403 404 ctx->vcrypto = vcrypto; 405 } else { 406 virtio_crypto_alg_akcipher_close_session(ctx); 407 } 408 409 /* set ctrl header */ 410 header.opcode = cpu_to_le32(VIRTIO_CRYPTO_AKCIPHER_CREATE_SESSION); 411 header.algo = cpu_to_le32(VIRTIO_CRYPTO_AKCIPHER_RSA); 412 header.queue_id = 0; 413 414 /* set RSA para */ 415 para.algo = cpu_to_le32(VIRTIO_CRYPTO_AKCIPHER_RSA); 416 para.keytype = cpu_to_le32(keytype); 417 para.keylen = cpu_to_le32(keylen); 418 para.u.rsa.padding_algo = cpu_to_le32(padding_algo); 419 para.u.rsa.hash_algo = cpu_to_le32(hash_algo); 420 421 return virtio_crypto_alg_akcipher_init_session(ctx, &header, ¶, key, keylen); 422 } 423 424 static int virtio_crypto_rsa_raw_set_priv_key(struct crypto_akcipher *tfm, 425 const void *key, 426 unsigned int keylen) 427 { 428 return virtio_crypto_rsa_set_key(tfm, key, keylen, 1, 429 VIRTIO_CRYPTO_RSA_RAW_PADDING, 430 VIRTIO_CRYPTO_RSA_NO_HASH); 431 } 432 433 434 static int virtio_crypto_p1pad_rsa_sha1_set_priv_key(struct crypto_akcipher *tfm, 435 const void *key, 436 unsigned int keylen) 437 { 438 return virtio_crypto_rsa_set_key(tfm, key, keylen, 1, 439 VIRTIO_CRYPTO_RSA_PKCS1_PADDING, 440 VIRTIO_CRYPTO_RSA_SHA1); 441 } 442 443 static int virtio_crypto_rsa_raw_set_pub_key(struct crypto_akcipher *tfm, 444 const void *key, 445 unsigned int keylen) 446 { 447 return virtio_crypto_rsa_set_key(tfm, key, keylen, 0, 448 VIRTIO_CRYPTO_RSA_RAW_PADDING, 449 VIRTIO_CRYPTO_RSA_NO_HASH); 450 } 451 452 static int virtio_crypto_p1pad_rsa_sha1_set_pub_key(struct crypto_akcipher *tfm, 453 const void *key, 454 unsigned int keylen) 455 { 456 return virtio_crypto_rsa_set_key(tfm, key, keylen, 0, 457 VIRTIO_CRYPTO_RSA_PKCS1_PADDING, 458 VIRTIO_CRYPTO_RSA_SHA1); 459 } 460 461 static unsigned int virtio_crypto_rsa_max_size(struct crypto_akcipher *tfm) 462 { 463 struct virtio_crypto_akcipher_ctx *ctx = akcipher_tfm_ctx(tfm); 464 struct virtio_crypto_rsa_ctx *rsa_ctx = &ctx->rsa_ctx; 465 466 return mpi_get_size(rsa_ctx->n); 467 } 468 469 static int virtio_crypto_rsa_init_tfm(struct crypto_akcipher *tfm) 470 { 471 struct virtio_crypto_akcipher_ctx *ctx = akcipher_tfm_ctx(tfm); 472 473 ctx->tfm = tfm; 474 ctx->enginectx.op.do_one_request = virtio_crypto_rsa_do_req; 475 ctx->enginectx.op.prepare_request = NULL; 476 ctx->enginectx.op.unprepare_request = NULL; 477 478 return 0; 479 } 480 481 static void virtio_crypto_rsa_exit_tfm(struct crypto_akcipher *tfm) 482 { 483 struct virtio_crypto_akcipher_ctx *ctx = akcipher_tfm_ctx(tfm); 484 struct virtio_crypto_rsa_ctx *rsa_ctx = &ctx->rsa_ctx; 485 486 virtio_crypto_alg_akcipher_close_session(ctx); 487 virtcrypto_dev_put(ctx->vcrypto); 488 mpi_free(rsa_ctx->n); 489 rsa_ctx->n = NULL; 490 } 491 492 static struct virtio_crypto_akcipher_algo virtio_crypto_akcipher_algs[] = { 493 { 494 .algonum = VIRTIO_CRYPTO_AKCIPHER_RSA, 495 .service = VIRTIO_CRYPTO_SERVICE_AKCIPHER, 496 .algo = { 497 .encrypt = virtio_crypto_rsa_encrypt, 498 .decrypt = virtio_crypto_rsa_decrypt, 499 .set_pub_key = virtio_crypto_rsa_raw_set_pub_key, 500 .set_priv_key = virtio_crypto_rsa_raw_set_priv_key, 501 .max_size = virtio_crypto_rsa_max_size, 502 .init = virtio_crypto_rsa_init_tfm, 503 .exit = virtio_crypto_rsa_exit_tfm, 504 .reqsize = sizeof(struct virtio_crypto_akcipher_request), 505 .base = { 506 .cra_name = "rsa", 507 .cra_driver_name = "virtio-crypto-rsa", 508 .cra_priority = 150, 509 .cra_module = THIS_MODULE, 510 .cra_ctxsize = sizeof(struct virtio_crypto_akcipher_ctx), 511 }, 512 }, 513 }, 514 { 515 .algonum = VIRTIO_CRYPTO_AKCIPHER_RSA, 516 .service = VIRTIO_CRYPTO_SERVICE_AKCIPHER, 517 .algo = { 518 .encrypt = virtio_crypto_rsa_encrypt, 519 .decrypt = virtio_crypto_rsa_decrypt, 520 .sign = virtio_crypto_rsa_sign, 521 .verify = virtio_crypto_rsa_verify, 522 .set_pub_key = virtio_crypto_p1pad_rsa_sha1_set_pub_key, 523 .set_priv_key = virtio_crypto_p1pad_rsa_sha1_set_priv_key, 524 .max_size = virtio_crypto_rsa_max_size, 525 .init = virtio_crypto_rsa_init_tfm, 526 .exit = virtio_crypto_rsa_exit_tfm, 527 .reqsize = sizeof(struct virtio_crypto_akcipher_request), 528 .base = { 529 .cra_name = "pkcs1pad(rsa,sha1)", 530 .cra_driver_name = "virtio-pkcs1-rsa-with-sha1", 531 .cra_priority = 150, 532 .cra_module = THIS_MODULE, 533 .cra_ctxsize = sizeof(struct virtio_crypto_akcipher_ctx), 534 }, 535 }, 536 }, 537 }; 538 539 int virtio_crypto_akcipher_algs_register(struct virtio_crypto *vcrypto) 540 { 541 int ret = 0; 542 int i = 0; 543 544 mutex_lock(&algs_lock); 545 546 for (i = 0; i < ARRAY_SIZE(virtio_crypto_akcipher_algs); i++) { 547 uint32_t service = virtio_crypto_akcipher_algs[i].service; 548 uint32_t algonum = virtio_crypto_akcipher_algs[i].algonum; 549 550 if (!virtcrypto_algo_is_supported(vcrypto, service, algonum)) 551 continue; 552 553 if (virtio_crypto_akcipher_algs[i].active_devs == 0) { 554 ret = crypto_register_akcipher(&virtio_crypto_akcipher_algs[i].algo); 555 if (ret) 556 goto unlock; 557 } 558 559 virtio_crypto_akcipher_algs[i].active_devs++; 560 dev_info(&vcrypto->vdev->dev, "Registered akcipher algo %s\n", 561 virtio_crypto_akcipher_algs[i].algo.base.cra_name); 562 } 563 564 unlock: 565 mutex_unlock(&algs_lock); 566 return ret; 567 } 568 569 void virtio_crypto_akcipher_algs_unregister(struct virtio_crypto *vcrypto) 570 { 571 int i = 0; 572 573 mutex_lock(&algs_lock); 574 575 for (i = 0; i < ARRAY_SIZE(virtio_crypto_akcipher_algs); i++) { 576 uint32_t service = virtio_crypto_akcipher_algs[i].service; 577 uint32_t algonum = virtio_crypto_akcipher_algs[i].algonum; 578 579 if (virtio_crypto_akcipher_algs[i].active_devs == 0 || 580 !virtcrypto_algo_is_supported(vcrypto, service, algonum)) 581 continue; 582 583 if (virtio_crypto_akcipher_algs[i].active_devs == 1) 584 crypto_unregister_akcipher(&virtio_crypto_akcipher_algs[i].algo); 585 586 virtio_crypto_akcipher_algs[i].active_devs--; 587 } 588 589 mutex_unlock(&algs_lock); 590 } 591