1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* Driver for Virtio crypto device. 3 * 4 * Copyright 2016 HUAWEI TECHNOLOGIES CO., LTD. 5 */ 6 7 #include <linux/err.h> 8 #include <linux/module.h> 9 #include <linux/virtio_config.h> 10 #include <linux/cpu.h> 11 12 #include <uapi/linux/virtio_crypto.h> 13 #include "virtio_crypto_common.h" 14 15 16 void 17 virtcrypto_clear_request(struct virtio_crypto_request *vc_req) 18 { 19 if (vc_req) { 20 kfree_sensitive(vc_req->req_data); 21 kfree(vc_req->sgs); 22 } 23 } 24 25 static void virtio_crypto_ctrlq_callback(struct virtio_crypto_ctrl_request *vc_ctrl_req) 26 { 27 complete(&vc_ctrl_req->compl); 28 } 29 30 static void virtcrypto_ctrlq_callback(struct virtqueue *vq) 31 { 32 struct virtio_crypto *vcrypto = vq->vdev->priv; 33 struct virtio_crypto_ctrl_request *vc_ctrl_req; 34 unsigned long flags; 35 unsigned int len; 36 37 spin_lock_irqsave(&vcrypto->ctrl_lock, flags); 38 do { 39 virtqueue_disable_cb(vq); 40 while ((vc_ctrl_req = virtqueue_get_buf(vq, &len)) != NULL) { 41 spin_unlock_irqrestore(&vcrypto->ctrl_lock, flags); 42 virtio_crypto_ctrlq_callback(vc_ctrl_req); 43 spin_lock_irqsave(&vcrypto->ctrl_lock, flags); 44 } 45 } while (!virtqueue_enable_cb(vq)); 46 spin_unlock_irqrestore(&vcrypto->ctrl_lock, flags); 47 } 48 49 int virtio_crypto_ctrl_vq_request(struct virtio_crypto *vcrypto, struct scatterlist *sgs[], 50 unsigned int out_sgs, unsigned int in_sgs, 51 struct virtio_crypto_ctrl_request *vc_ctrl_req) 52 { 53 int err; 54 unsigned long flags; 55 56 init_completion(&vc_ctrl_req->compl); 57 58 spin_lock_irqsave(&vcrypto->ctrl_lock, flags); 59 err = virtqueue_add_sgs(vcrypto->ctrl_vq, sgs, out_sgs, in_sgs, vc_ctrl_req, GFP_ATOMIC); 60 if (err < 0) { 61 spin_unlock_irqrestore(&vcrypto->ctrl_lock, flags); 62 return err; 63 } 64 65 virtqueue_kick(vcrypto->ctrl_vq); 66 spin_unlock_irqrestore(&vcrypto->ctrl_lock, flags); 67 68 wait_for_completion(&vc_ctrl_req->compl); 69 70 return 0; 71 } 72 73 static void virtcrypto_done_task(unsigned long data) 74 { 75 struct data_queue *data_vq = (struct data_queue *)data; 76 struct virtqueue *vq = data_vq->vq; 77 struct virtio_crypto_request *vc_req; 78 unsigned int len; 79 80 do { 81 virtqueue_disable_cb(vq); 82 while ((vc_req = virtqueue_get_buf(vq, &len)) != NULL) { 83 if (vc_req->alg_cb) 84 vc_req->alg_cb(vc_req, len); 85 } 86 } while (!virtqueue_enable_cb(vq)); 87 } 88 89 static void virtcrypto_dataq_callback(struct virtqueue *vq) 90 { 91 struct virtio_crypto *vcrypto = vq->vdev->priv; 92 struct data_queue *dq = &vcrypto->data_vq[vq->index]; 93 94 tasklet_schedule(&dq->done_task); 95 } 96 97 static int virtcrypto_find_vqs(struct virtio_crypto *vi) 98 { 99 struct virtqueue_info *vqs_info; 100 struct virtqueue **vqs; 101 int ret = -ENOMEM; 102 int i, total_vqs; 103 struct device *dev = &vi->vdev->dev; 104 105 /* 106 * We expect 1 data virtqueue, followed by 107 * possible N-1 data queues used in multiqueue mode, 108 * followed by control vq. 109 */ 110 total_vqs = vi->max_data_queues + 1; 111 112 /* Allocate space for find_vqs parameters */ 113 vqs = kcalloc(total_vqs, sizeof(*vqs), GFP_KERNEL); 114 if (!vqs) 115 goto err_vq; 116 vqs_info = kcalloc(total_vqs, sizeof(*vqs_info), GFP_KERNEL); 117 if (!vqs_info) 118 goto err_vqs_info; 119 120 /* Parameters for control virtqueue */ 121 vqs_info[total_vqs - 1].callback = virtcrypto_ctrlq_callback; 122 vqs_info[total_vqs - 1].name = "controlq"; 123 124 /* Allocate/initialize parameters for data virtqueues */ 125 for (i = 0; i < vi->max_data_queues; i++) { 126 vqs_info[i].callback = virtcrypto_dataq_callback; 127 snprintf(vi->data_vq[i].name, sizeof(vi->data_vq[i].name), 128 "dataq.%d", i); 129 vqs_info[i].name = vi->data_vq[i].name; 130 } 131 132 ret = virtio_find_vqs(vi->vdev, total_vqs, vqs, vqs_info, NULL); 133 if (ret) 134 goto err_find; 135 136 vi->ctrl_vq = vqs[total_vqs - 1]; 137 138 for (i = 0; i < vi->max_data_queues; i++) { 139 spin_lock_init(&vi->data_vq[i].lock); 140 vi->data_vq[i].vq = vqs[i]; 141 /* Initialize crypto engine */ 142 vi->data_vq[i].engine = crypto_engine_alloc_init_and_set(dev, true, NULL, true, 143 virtqueue_get_vring_size(vqs[i])); 144 if (!vi->data_vq[i].engine) { 145 ret = -ENOMEM; 146 goto err_engine; 147 } 148 tasklet_init(&vi->data_vq[i].done_task, virtcrypto_done_task, 149 (unsigned long)&vi->data_vq[i]); 150 } 151 152 kfree(vqs_info); 153 kfree(vqs); 154 155 return 0; 156 157 err_engine: 158 err_find: 159 kfree(vqs_info); 160 err_vqs_info: 161 kfree(vqs); 162 err_vq: 163 return ret; 164 } 165 166 static int virtcrypto_alloc_queues(struct virtio_crypto *vi) 167 { 168 vi->data_vq = kcalloc(vi->max_data_queues, sizeof(*vi->data_vq), 169 GFP_KERNEL); 170 if (!vi->data_vq) 171 return -ENOMEM; 172 173 return 0; 174 } 175 176 static void virtcrypto_clean_affinity(struct virtio_crypto *vi, long hcpu) 177 { 178 int i; 179 180 if (vi->affinity_hint_set) { 181 for (i = 0; i < vi->max_data_queues; i++) 182 virtqueue_set_affinity(vi->data_vq[i].vq, NULL); 183 184 vi->affinity_hint_set = false; 185 } 186 } 187 188 static void virtcrypto_set_affinity(struct virtio_crypto *vcrypto) 189 { 190 int i = 0; 191 int cpu; 192 193 /* 194 * In single queue mode, we don't set the cpu affinity. 195 */ 196 if (vcrypto->curr_queue == 1 || vcrypto->max_data_queues == 1) { 197 virtcrypto_clean_affinity(vcrypto, -1); 198 return; 199 } 200 201 /* 202 * In multiqueue mode, we let the queue to be private to one cpu 203 * by setting the affinity hint to eliminate the contention. 204 * 205 * TODO: adds cpu hotplug support by register cpu notifier. 206 * 207 */ 208 for_each_online_cpu(cpu) { 209 virtqueue_set_affinity(vcrypto->data_vq[i].vq, cpumask_of(cpu)); 210 if (++i >= vcrypto->max_data_queues) 211 break; 212 } 213 214 vcrypto->affinity_hint_set = true; 215 } 216 217 static void virtcrypto_free_queues(struct virtio_crypto *vi) 218 { 219 kfree(vi->data_vq); 220 } 221 222 static int virtcrypto_init_vqs(struct virtio_crypto *vi) 223 { 224 int ret; 225 226 /* Allocate send & receive queues */ 227 ret = virtcrypto_alloc_queues(vi); 228 if (ret) 229 goto err; 230 231 ret = virtcrypto_find_vqs(vi); 232 if (ret) 233 goto err_free; 234 235 cpus_read_lock(); 236 virtcrypto_set_affinity(vi); 237 cpus_read_unlock(); 238 239 return 0; 240 241 err_free: 242 virtcrypto_free_queues(vi); 243 err: 244 return ret; 245 } 246 247 static int virtcrypto_update_status(struct virtio_crypto *vcrypto) 248 { 249 u32 status; 250 int err; 251 252 virtio_cread_le(vcrypto->vdev, 253 struct virtio_crypto_config, status, &status); 254 255 /* 256 * Unknown status bits would be a host error and the driver 257 * should consider the device to be broken. 258 */ 259 if (status & (~VIRTIO_CRYPTO_S_HW_READY)) { 260 dev_warn(&vcrypto->vdev->dev, 261 "Unknown status bits: 0x%x\n", status); 262 263 virtio_break_device(vcrypto->vdev); 264 return -EPERM; 265 } 266 267 if (vcrypto->status == status) 268 return 0; 269 270 vcrypto->status = status; 271 272 if (vcrypto->status & VIRTIO_CRYPTO_S_HW_READY) { 273 err = virtcrypto_dev_start(vcrypto); 274 if (err) { 275 dev_err(&vcrypto->vdev->dev, 276 "Failed to start virtio crypto device.\n"); 277 278 return -EPERM; 279 } 280 dev_info(&vcrypto->vdev->dev, "Accelerator device is ready\n"); 281 } else { 282 virtcrypto_dev_stop(vcrypto); 283 dev_info(&vcrypto->vdev->dev, "Accelerator is not ready\n"); 284 } 285 286 return 0; 287 } 288 289 static int virtcrypto_start_crypto_engines(struct virtio_crypto *vcrypto) 290 { 291 int32_t i; 292 int ret; 293 294 for (i = 0; i < vcrypto->max_data_queues; i++) { 295 if (vcrypto->data_vq[i].engine) { 296 ret = crypto_engine_start(vcrypto->data_vq[i].engine); 297 if (ret) 298 goto err; 299 } 300 } 301 302 return 0; 303 304 err: 305 while (--i >= 0) 306 if (vcrypto->data_vq[i].engine) 307 crypto_engine_exit(vcrypto->data_vq[i].engine); 308 309 return ret; 310 } 311 312 static void virtcrypto_clear_crypto_engines(struct virtio_crypto *vcrypto) 313 { 314 u32 i; 315 316 for (i = 0; i < vcrypto->max_data_queues; i++) 317 if (vcrypto->data_vq[i].engine) 318 crypto_engine_exit(vcrypto->data_vq[i].engine); 319 } 320 321 static void virtcrypto_del_vqs(struct virtio_crypto *vcrypto) 322 { 323 struct virtio_device *vdev = vcrypto->vdev; 324 325 virtcrypto_clean_affinity(vcrypto, -1); 326 327 vdev->config->del_vqs(vdev); 328 329 virtcrypto_free_queues(vcrypto); 330 } 331 332 static void vcrypto_config_changed_work(struct work_struct *work) 333 { 334 struct virtio_crypto *vcrypto = 335 container_of(work, struct virtio_crypto, config_work); 336 337 virtcrypto_update_status(vcrypto); 338 } 339 340 static int virtcrypto_probe(struct virtio_device *vdev) 341 { 342 int err = -EFAULT; 343 struct virtio_crypto *vcrypto; 344 u32 max_data_queues = 0, max_cipher_key_len = 0; 345 u32 max_auth_key_len = 0; 346 u64 max_size = 0; 347 u32 cipher_algo_l = 0; 348 u32 cipher_algo_h = 0; 349 u32 hash_algo = 0; 350 u32 mac_algo_l = 0; 351 u32 mac_algo_h = 0; 352 u32 aead_algo = 0; 353 u32 akcipher_algo = 0; 354 u32 crypto_services = 0; 355 356 if (!virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) 357 return -ENODEV; 358 359 if (!vdev->config->get) { 360 dev_err(&vdev->dev, "%s failure: config access disabled\n", 361 __func__); 362 return -EINVAL; 363 } 364 365 if (num_possible_nodes() > 1 && dev_to_node(&vdev->dev) < 0) { 366 /* 367 * If the accelerator is connected to a node with no memory 368 * there is no point in using the accelerator since the remote 369 * memory transaction will be very slow. 370 */ 371 dev_err(&vdev->dev, "Invalid NUMA configuration.\n"); 372 return -EINVAL; 373 } 374 375 vcrypto = kzalloc_node(sizeof(*vcrypto), GFP_KERNEL, 376 dev_to_node(&vdev->dev)); 377 if (!vcrypto) 378 return -ENOMEM; 379 380 virtio_cread_le(vdev, struct virtio_crypto_config, 381 max_dataqueues, &max_data_queues); 382 if (max_data_queues < 1) 383 max_data_queues = 1; 384 385 virtio_cread_le(vdev, struct virtio_crypto_config, 386 max_cipher_key_len, &max_cipher_key_len); 387 virtio_cread_le(vdev, struct virtio_crypto_config, 388 max_auth_key_len, &max_auth_key_len); 389 virtio_cread_le(vdev, struct virtio_crypto_config, 390 max_size, &max_size); 391 virtio_cread_le(vdev, struct virtio_crypto_config, 392 crypto_services, &crypto_services); 393 virtio_cread_le(vdev, struct virtio_crypto_config, 394 cipher_algo_l, &cipher_algo_l); 395 virtio_cread_le(vdev, struct virtio_crypto_config, 396 cipher_algo_h, &cipher_algo_h); 397 virtio_cread_le(vdev, struct virtio_crypto_config, 398 hash_algo, &hash_algo); 399 virtio_cread_le(vdev, struct virtio_crypto_config, 400 mac_algo_l, &mac_algo_l); 401 virtio_cread_le(vdev, struct virtio_crypto_config, 402 mac_algo_h, &mac_algo_h); 403 virtio_cread_le(vdev, struct virtio_crypto_config, 404 aead_algo, &aead_algo); 405 if (crypto_services & (1 << VIRTIO_CRYPTO_SERVICE_AKCIPHER)) 406 virtio_cread_le(vdev, struct virtio_crypto_config, 407 akcipher_algo, &akcipher_algo); 408 409 /* Add virtio crypto device to global table */ 410 err = virtcrypto_devmgr_add_dev(vcrypto); 411 if (err) { 412 dev_err(&vdev->dev, "Failed to add new virtio crypto device.\n"); 413 goto free; 414 } 415 vcrypto->owner = THIS_MODULE; 416 vcrypto = vdev->priv = vcrypto; 417 vcrypto->vdev = vdev; 418 419 spin_lock_init(&vcrypto->ctrl_lock); 420 421 /* Use single data queue as default */ 422 vcrypto->curr_queue = 1; 423 vcrypto->max_data_queues = max_data_queues; 424 vcrypto->max_cipher_key_len = max_cipher_key_len; 425 vcrypto->max_auth_key_len = max_auth_key_len; 426 vcrypto->max_size = max_size; 427 vcrypto->crypto_services = crypto_services; 428 vcrypto->cipher_algo_l = cipher_algo_l; 429 vcrypto->cipher_algo_h = cipher_algo_h; 430 vcrypto->mac_algo_l = mac_algo_l; 431 vcrypto->mac_algo_h = mac_algo_h; 432 vcrypto->hash_algo = hash_algo; 433 vcrypto->aead_algo = aead_algo; 434 vcrypto->akcipher_algo = akcipher_algo; 435 436 dev_info(&vdev->dev, 437 "max_queues: %u, max_cipher_key_len: %u, max_auth_key_len: %u, max_size 0x%llx\n", 438 vcrypto->max_data_queues, 439 vcrypto->max_cipher_key_len, 440 vcrypto->max_auth_key_len, 441 vcrypto->max_size); 442 443 err = virtcrypto_init_vqs(vcrypto); 444 if (err) { 445 dev_err(&vdev->dev, "Failed to initialize vqs.\n"); 446 goto free_dev; 447 } 448 449 err = virtcrypto_start_crypto_engines(vcrypto); 450 if (err) 451 goto free_vqs; 452 453 virtio_device_ready(vdev); 454 455 err = virtcrypto_update_status(vcrypto); 456 if (err) 457 goto free_engines; 458 459 INIT_WORK(&vcrypto->config_work, vcrypto_config_changed_work); 460 461 return 0; 462 463 free_engines: 464 virtcrypto_clear_crypto_engines(vcrypto); 465 free_vqs: 466 virtio_reset_device(vdev); 467 virtcrypto_del_vqs(vcrypto); 468 free_dev: 469 virtcrypto_devmgr_rm_dev(vcrypto); 470 free: 471 kfree(vcrypto); 472 return err; 473 } 474 475 static void virtcrypto_free_unused_reqs(struct virtio_crypto *vcrypto) 476 { 477 struct virtio_crypto_request *vc_req; 478 int i; 479 struct virtqueue *vq; 480 481 for (i = 0; i < vcrypto->max_data_queues; i++) { 482 vq = vcrypto->data_vq[i].vq; 483 while ((vc_req = virtqueue_detach_unused_buf(vq)) != NULL) { 484 kfree(vc_req->req_data); 485 kfree(vc_req->sgs); 486 } 487 cond_resched(); 488 } 489 } 490 491 static void virtcrypto_remove(struct virtio_device *vdev) 492 { 493 struct virtio_crypto *vcrypto = vdev->priv; 494 int i; 495 496 dev_info(&vdev->dev, "Start virtcrypto_remove.\n"); 497 498 flush_work(&vcrypto->config_work); 499 if (virtcrypto_dev_started(vcrypto)) 500 virtcrypto_dev_stop(vcrypto); 501 for (i = 0; i < vcrypto->max_data_queues; i++) 502 tasklet_kill(&vcrypto->data_vq[i].done_task); 503 virtio_reset_device(vdev); 504 virtcrypto_free_unused_reqs(vcrypto); 505 virtcrypto_clear_crypto_engines(vcrypto); 506 virtcrypto_del_vqs(vcrypto); 507 virtcrypto_devmgr_rm_dev(vcrypto); 508 kfree(vcrypto); 509 } 510 511 static void virtcrypto_config_changed(struct virtio_device *vdev) 512 { 513 struct virtio_crypto *vcrypto = vdev->priv; 514 515 schedule_work(&vcrypto->config_work); 516 } 517 518 #ifdef CONFIG_PM_SLEEP 519 static int virtcrypto_freeze(struct virtio_device *vdev) 520 { 521 struct virtio_crypto *vcrypto = vdev->priv; 522 523 flush_work(&vcrypto->config_work); 524 virtio_reset_device(vdev); 525 virtcrypto_free_unused_reqs(vcrypto); 526 if (virtcrypto_dev_started(vcrypto)) 527 virtcrypto_dev_stop(vcrypto); 528 529 virtcrypto_clear_crypto_engines(vcrypto); 530 virtcrypto_del_vqs(vcrypto); 531 return 0; 532 } 533 534 static int virtcrypto_restore(struct virtio_device *vdev) 535 { 536 struct virtio_crypto *vcrypto = vdev->priv; 537 int err; 538 539 err = virtcrypto_init_vqs(vcrypto); 540 if (err) 541 return err; 542 543 err = virtcrypto_start_crypto_engines(vcrypto); 544 if (err) 545 goto free_vqs; 546 547 virtio_device_ready(vdev); 548 549 err = virtcrypto_dev_start(vcrypto); 550 if (err) { 551 dev_err(&vdev->dev, "Failed to start virtio crypto device.\n"); 552 goto free_engines; 553 } 554 555 return 0; 556 557 free_engines: 558 virtcrypto_clear_crypto_engines(vcrypto); 559 free_vqs: 560 virtio_reset_device(vdev); 561 virtcrypto_del_vqs(vcrypto); 562 return err; 563 } 564 #endif 565 566 static const unsigned int features[] = { 567 /* none */ 568 }; 569 570 static const struct virtio_device_id id_table[] = { 571 { VIRTIO_ID_CRYPTO, VIRTIO_DEV_ANY_ID }, 572 { 0 }, 573 }; 574 575 static struct virtio_driver virtio_crypto_driver = { 576 .driver.name = KBUILD_MODNAME, 577 .feature_table = features, 578 .feature_table_size = ARRAY_SIZE(features), 579 .id_table = id_table, 580 .probe = virtcrypto_probe, 581 .remove = virtcrypto_remove, 582 .config_changed = virtcrypto_config_changed, 583 #ifdef CONFIG_PM_SLEEP 584 .freeze = virtcrypto_freeze, 585 .restore = virtcrypto_restore, 586 #endif 587 }; 588 589 module_virtio_driver(virtio_crypto_driver); 590 591 MODULE_DEVICE_TABLE(virtio, id_table); 592 MODULE_DESCRIPTION("virtio crypto device driver"); 593 MODULE_LICENSE("GPL"); 594 MODULE_AUTHOR("Gonglei <arei.gonglei@huawei.com>"); 595