1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * sl3516-ce-core.c - hardware cryptographic offloader for Storlink SL3516 SoC 4 * 5 * Copyright (C) 2021 Corentin Labbe <clabbe@baylibre.com> 6 * 7 * Core file which registers crypto algorithms supported by the CryptoEngine 8 */ 9 10 #include <crypto/engine.h> 11 #include <crypto/internal/rng.h> 12 #include <crypto/internal/skcipher.h> 13 #include <linux/clk.h> 14 #include <linux/debugfs.h> 15 #include <linux/dev_printk.h> 16 #include <linux/dma-mapping.h> 17 #include <linux/err.h> 18 #include <linux/interrupt.h> 19 #include <linux/io.h> 20 #include <linux/irq.h> 21 #include <linux/kernel.h> 22 #include <linux/module.h> 23 #include <linux/of.h> 24 #include <linux/platform_device.h> 25 #include <linux/pm_runtime.h> 26 #include <linux/reset.h> 27 28 #include "sl3516-ce.h" 29 30 static int sl3516_ce_desc_init(struct sl3516_ce_dev *ce) 31 { 32 const size_t sz = sizeof(struct descriptor) * MAXDESC; 33 int i; 34 35 ce->tx = dma_alloc_coherent(ce->dev, sz, &ce->dtx, GFP_KERNEL); 36 if (!ce->tx) 37 return -ENOMEM; 38 ce->rx = dma_alloc_coherent(ce->dev, sz, &ce->drx, GFP_KERNEL); 39 if (!ce->rx) 40 goto err_rx; 41 42 for (i = 0; i < MAXDESC; i++) { 43 ce->tx[i].frame_ctrl.bits.own = CE_CPU; 44 ce->tx[i].next_desc.next_descriptor = ce->dtx + (i + 1) * sizeof(struct descriptor); 45 } 46 ce->tx[MAXDESC - 1].next_desc.next_descriptor = ce->dtx; 47 48 for (i = 0; i < MAXDESC; i++) { 49 ce->rx[i].frame_ctrl.bits.own = CE_CPU; 50 ce->rx[i].next_desc.next_descriptor = ce->drx + (i + 1) * sizeof(struct descriptor); 51 } 52 ce->rx[MAXDESC - 1].next_desc.next_descriptor = ce->drx; 53 54 ce->pctrl = dma_alloc_coherent(ce->dev, sizeof(struct pkt_control_ecb), 55 &ce->dctrl, GFP_KERNEL); 56 if (!ce->pctrl) 57 goto err_pctrl; 58 59 return 0; 60 err_pctrl: 61 dma_free_coherent(ce->dev, sz, ce->rx, ce->drx); 62 err_rx: 63 dma_free_coherent(ce->dev, sz, ce->tx, ce->dtx); 64 return -ENOMEM; 65 } 66 67 static void sl3516_ce_free_descs(struct sl3516_ce_dev *ce) 68 { 69 const size_t sz = sizeof(struct descriptor) * MAXDESC; 70 71 dma_free_coherent(ce->dev, sz, ce->tx, ce->dtx); 72 dma_free_coherent(ce->dev, sz, ce->rx, ce->drx); 73 dma_free_coherent(ce->dev, sizeof(struct pkt_control_ecb), ce->pctrl, 74 ce->dctrl); 75 } 76 77 static void start_dma_tx(struct sl3516_ce_dev *ce) 78 { 79 u32 v; 80 81 v = TXDMA_CTRL_START | TXDMA_CTRL_CHAIN_MODE | TXDMA_CTRL_CONTINUE | \ 82 TXDMA_CTRL_INT_FAIL | TXDMA_CTRL_INT_PERR | TXDMA_CTRL_BURST_UNK; 83 84 writel(v, ce->base + IPSEC_TXDMA_CTRL); 85 } 86 87 static void start_dma_rx(struct sl3516_ce_dev *ce) 88 { 89 u32 v; 90 91 v = RXDMA_CTRL_START | RXDMA_CTRL_CHAIN_MODE | RXDMA_CTRL_CONTINUE | \ 92 RXDMA_CTRL_BURST_UNK | RXDMA_CTRL_INT_FINISH | \ 93 RXDMA_CTRL_INT_FAIL | RXDMA_CTRL_INT_PERR | \ 94 RXDMA_CTRL_INT_EOD | RXDMA_CTRL_INT_EOF; 95 96 writel(v, ce->base + IPSEC_RXDMA_CTRL); 97 } 98 99 static struct descriptor *get_desc_tx(struct sl3516_ce_dev *ce) 100 { 101 struct descriptor *dd; 102 103 dd = &ce->tx[ce->ctx]; 104 ce->ctx++; 105 if (ce->ctx >= MAXDESC) 106 ce->ctx = 0; 107 return dd; 108 } 109 110 static struct descriptor *get_desc_rx(struct sl3516_ce_dev *ce) 111 { 112 struct descriptor *rdd; 113 114 rdd = &ce->rx[ce->crx]; 115 ce->crx++; 116 if (ce->crx >= MAXDESC) 117 ce->crx = 0; 118 return rdd; 119 } 120 121 int sl3516_ce_run_task(struct sl3516_ce_dev *ce, struct sl3516_ce_cipher_req_ctx *rctx, 122 const char *name) 123 { 124 struct descriptor *dd, *rdd = NULL; 125 u32 v; 126 int i, err = 0; 127 128 ce->stat_req++; 129 130 reinit_completion(&ce->complete); 131 ce->status = 0; 132 133 for (i = 0; i < rctx->nr_sgd; i++) { 134 dev_dbg(ce->dev, "%s handle DST SG %d/%d len=%d\n", __func__, 135 i, rctx->nr_sgd, rctx->t_dst[i].len); 136 rdd = get_desc_rx(ce); 137 rdd->buf_adr = rctx->t_dst[i].addr; 138 rdd->frame_ctrl.bits.buffer_size = rctx->t_dst[i].len; 139 rdd->frame_ctrl.bits.own = CE_DMA; 140 } 141 rdd->next_desc.bits.eofie = 1; 142 143 for (i = 0; i < rctx->nr_sgs; i++) { 144 dev_dbg(ce->dev, "%s handle SRC SG %d/%d len=%d\n", __func__, 145 i, rctx->nr_sgs, rctx->t_src[i].len); 146 rctx->h->algorithm_len = rctx->t_src[i].len; 147 148 dd = get_desc_tx(ce); 149 dd->frame_ctrl.raw = 0; 150 dd->flag_status.raw = 0; 151 dd->frame_ctrl.bits.buffer_size = rctx->pctrllen; 152 dd->buf_adr = ce->dctrl; 153 dd->flag_status.tx_flag.tqflag = rctx->tqflag; 154 dd->next_desc.bits.eofie = 0; 155 dd->next_desc.bits.dec = 0; 156 dd->next_desc.bits.sof_eof = DESC_FIRST | DESC_LAST; 157 dd->frame_ctrl.bits.own = CE_DMA; 158 159 dd = get_desc_tx(ce); 160 dd->frame_ctrl.raw = 0; 161 dd->flag_status.raw = 0; 162 dd->frame_ctrl.bits.buffer_size = rctx->t_src[i].len; 163 dd->buf_adr = rctx->t_src[i].addr; 164 dd->flag_status.tx_flag.tqflag = 0; 165 dd->next_desc.bits.eofie = 0; 166 dd->next_desc.bits.dec = 0; 167 dd->next_desc.bits.sof_eof = DESC_FIRST | DESC_LAST; 168 dd->frame_ctrl.bits.own = CE_DMA; 169 start_dma_tx(ce); 170 start_dma_rx(ce); 171 } 172 wait_for_completion_interruptible_timeout(&ce->complete, 173 msecs_to_jiffies(5000)); 174 if (ce->status == 0) { 175 dev_err(ce->dev, "DMA timeout for %s\n", name); 176 err = -EFAULT; 177 } 178 v = readl(ce->base + IPSEC_STATUS_REG); 179 if (v & 0xFFF) { 180 dev_err(ce->dev, "IPSEC_STATUS_REG %x\n", v); 181 err = -EFAULT; 182 } 183 184 return err; 185 } 186 187 static irqreturn_t ce_irq_handler(int irq, void *data) 188 { 189 struct sl3516_ce_dev *ce = (struct sl3516_ce_dev *)data; 190 u32 v; 191 192 ce->stat_irq++; 193 194 v = readl(ce->base + IPSEC_DMA_STATUS); 195 writel(v, ce->base + IPSEC_DMA_STATUS); 196 197 if (v & DMA_STATUS_TS_DERR) 198 dev_err(ce->dev, "AHB bus Error While Tx !!!\n"); 199 if (v & DMA_STATUS_TS_PERR) 200 dev_err(ce->dev, "Tx Descriptor Protocol Error !!!\n"); 201 if (v & DMA_STATUS_RS_DERR) 202 dev_err(ce->dev, "AHB bus Error While Rx !!!\n"); 203 if (v & DMA_STATUS_RS_PERR) 204 dev_err(ce->dev, "Rx Descriptor Protocol Error !!!\n"); 205 206 if (v & DMA_STATUS_TS_EOFI) 207 ce->stat_irq_tx++; 208 if (v & DMA_STATUS_RS_EOFI) { 209 ce->status = 1; 210 complete(&ce->complete); 211 ce->stat_irq_rx++; 212 return IRQ_HANDLED; 213 } 214 215 return IRQ_HANDLED; 216 } 217 218 static struct sl3516_ce_alg_template ce_algs[] = { 219 { 220 .type = CRYPTO_ALG_TYPE_SKCIPHER, 221 .mode = ECB_AES, 222 .alg.skcipher.base = { 223 .base = { 224 .cra_name = "ecb(aes)", 225 .cra_driver_name = "ecb-aes-sl3516", 226 .cra_priority = 400, 227 .cra_blocksize = AES_BLOCK_SIZE, 228 .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | 229 CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, 230 .cra_ctxsize = sizeof(struct sl3516_ce_cipher_tfm_ctx), 231 .cra_module = THIS_MODULE, 232 .cra_alignmask = 0xf, 233 .cra_init = sl3516_ce_cipher_init, 234 .cra_exit = sl3516_ce_cipher_exit, 235 }, 236 .min_keysize = AES_MIN_KEY_SIZE, 237 .max_keysize = AES_MAX_KEY_SIZE, 238 .setkey = sl3516_ce_aes_setkey, 239 .encrypt = sl3516_ce_skencrypt, 240 .decrypt = sl3516_ce_skdecrypt, 241 }, 242 .alg.skcipher.op = { 243 .do_one_request = sl3516_ce_handle_cipher_request, 244 }, 245 }, 246 }; 247 248 static int sl3516_ce_debugfs_show(struct seq_file *seq, void *v) 249 { 250 struct sl3516_ce_dev *ce = seq->private; 251 unsigned int i; 252 253 seq_printf(seq, "HWRNG %lu %lu\n", 254 ce->hwrng_stat_req, ce->hwrng_stat_bytes); 255 seq_printf(seq, "IRQ %lu\n", ce->stat_irq); 256 seq_printf(seq, "IRQ TX %lu\n", ce->stat_irq_tx); 257 seq_printf(seq, "IRQ RX %lu\n", ce->stat_irq_rx); 258 seq_printf(seq, "nreq %lu\n", ce->stat_req); 259 seq_printf(seq, "fallback SG count TX %lu\n", ce->fallback_sg_count_tx); 260 seq_printf(seq, "fallback SG count RX %lu\n", ce->fallback_sg_count_rx); 261 seq_printf(seq, "fallback modulo16 %lu\n", ce->fallback_mod16); 262 seq_printf(seq, "fallback align16 %lu\n", ce->fallback_align16); 263 seq_printf(seq, "fallback not same len %lu\n", ce->fallback_not_same_len); 264 265 for (i = 0; i < ARRAY_SIZE(ce_algs); i++) { 266 if (!ce_algs[i].ce) 267 continue; 268 switch (ce_algs[i].type) { 269 case CRYPTO_ALG_TYPE_SKCIPHER: 270 seq_printf(seq, "%s %s reqs=%lu fallback=%lu\n", 271 ce_algs[i].alg.skcipher.base.base.cra_driver_name, 272 ce_algs[i].alg.skcipher.base.base.cra_name, 273 ce_algs[i].stat_req, ce_algs[i].stat_fb); 274 break; 275 } 276 } 277 return 0; 278 } 279 280 DEFINE_SHOW_ATTRIBUTE(sl3516_ce_debugfs); 281 282 static int sl3516_ce_register_algs(struct sl3516_ce_dev *ce) 283 { 284 int err; 285 unsigned int i; 286 287 for (i = 0; i < ARRAY_SIZE(ce_algs); i++) { 288 ce_algs[i].ce = ce; 289 switch (ce_algs[i].type) { 290 case CRYPTO_ALG_TYPE_SKCIPHER: 291 dev_info(ce->dev, "DEBUG: Register %s\n", 292 ce_algs[i].alg.skcipher.base.base.cra_name); 293 err = crypto_engine_register_skcipher(&ce_algs[i].alg.skcipher); 294 if (err) { 295 dev_err(ce->dev, "Fail to register %s\n", 296 ce_algs[i].alg.skcipher.base.base.cra_name); 297 ce_algs[i].ce = NULL; 298 return err; 299 } 300 break; 301 default: 302 ce_algs[i].ce = NULL; 303 dev_err(ce->dev, "ERROR: tried to register an unknown algo\n"); 304 } 305 } 306 return 0; 307 } 308 309 static void sl3516_ce_unregister_algs(struct sl3516_ce_dev *ce) 310 { 311 unsigned int i; 312 313 for (i = 0; i < ARRAY_SIZE(ce_algs); i++) { 314 if (!ce_algs[i].ce) 315 continue; 316 switch (ce_algs[i].type) { 317 case CRYPTO_ALG_TYPE_SKCIPHER: 318 dev_info(ce->dev, "Unregister %d %s\n", i, 319 ce_algs[i].alg.skcipher.base.base.cra_name); 320 crypto_engine_unregister_skcipher(&ce_algs[i].alg.skcipher); 321 break; 322 } 323 } 324 } 325 326 static void sl3516_ce_start(struct sl3516_ce_dev *ce) 327 { 328 ce->ctx = 0; 329 ce->crx = 0; 330 writel(ce->dtx, ce->base + IPSEC_TXDMA_CURR_DESC); 331 writel(ce->drx, ce->base + IPSEC_RXDMA_CURR_DESC); 332 writel(0, ce->base + IPSEC_DMA_STATUS); 333 } 334 335 /* 336 * Power management strategy: The device is suspended unless a TFM exists for 337 * one of the algorithms proposed by this driver. 338 */ 339 static int sl3516_ce_pm_suspend(struct device *dev) 340 { 341 struct sl3516_ce_dev *ce = dev_get_drvdata(dev); 342 343 reset_control_assert(ce->reset); 344 clk_disable_unprepare(ce->clks); 345 return 0; 346 } 347 348 static int sl3516_ce_pm_resume(struct device *dev) 349 { 350 struct sl3516_ce_dev *ce = dev_get_drvdata(dev); 351 int err; 352 353 err = clk_prepare_enable(ce->clks); 354 if (err) { 355 dev_err(ce->dev, "Cannot prepare_enable\n"); 356 goto error; 357 } 358 err = reset_control_deassert(ce->reset); 359 if (err) { 360 dev_err(ce->dev, "Cannot deassert reset control\n"); 361 goto error; 362 } 363 364 sl3516_ce_start(ce); 365 366 return 0; 367 error: 368 sl3516_ce_pm_suspend(dev); 369 return err; 370 } 371 372 static const struct dev_pm_ops sl3516_ce_pm_ops = { 373 SET_RUNTIME_PM_OPS(sl3516_ce_pm_suspend, sl3516_ce_pm_resume, NULL) 374 }; 375 376 static int sl3516_ce_pm_init(struct sl3516_ce_dev *ce) 377 { 378 int err; 379 380 pm_runtime_use_autosuspend(ce->dev); 381 pm_runtime_set_autosuspend_delay(ce->dev, 2000); 382 383 err = pm_runtime_set_suspended(ce->dev); 384 if (err) 385 return err; 386 pm_runtime_enable(ce->dev); 387 return err; 388 } 389 390 static void sl3516_ce_pm_exit(struct sl3516_ce_dev *ce) 391 { 392 pm_runtime_disable(ce->dev); 393 } 394 395 static int sl3516_ce_probe(struct platform_device *pdev) 396 { 397 struct sl3516_ce_dev *ce; 398 int err, irq; 399 u32 v; 400 401 ce = devm_kzalloc(&pdev->dev, sizeof(*ce), GFP_KERNEL); 402 if (!ce) 403 return -ENOMEM; 404 405 ce->dev = &pdev->dev; 406 platform_set_drvdata(pdev, ce); 407 408 ce->base = devm_platform_ioremap_resource(pdev, 0); 409 if (IS_ERR(ce->base)) 410 return PTR_ERR(ce->base); 411 412 irq = platform_get_irq(pdev, 0); 413 if (irq < 0) 414 return irq; 415 416 err = devm_request_irq(&pdev->dev, irq, ce_irq_handler, 0, "crypto", ce); 417 if (err) { 418 dev_err(ce->dev, "Cannot request Crypto Engine IRQ (err=%d)\n", err); 419 return err; 420 } 421 422 ce->reset = devm_reset_control_get(&pdev->dev, NULL); 423 if (IS_ERR(ce->reset)) 424 return dev_err_probe(&pdev->dev, PTR_ERR(ce->reset), 425 "No reset control found\n"); 426 ce->clks = devm_clk_get(ce->dev, NULL); 427 if (IS_ERR(ce->clks)) { 428 err = PTR_ERR(ce->clks); 429 dev_err(ce->dev, "Cannot get clock err=%d\n", err); 430 return err; 431 } 432 433 err = sl3516_ce_desc_init(ce); 434 if (err) 435 return err; 436 437 err = sl3516_ce_pm_init(ce); 438 if (err) 439 goto error_pm; 440 441 init_completion(&ce->complete); 442 443 ce->engine = crypto_engine_alloc_init(ce->dev, true); 444 if (!ce->engine) { 445 dev_err(ce->dev, "Cannot allocate engine\n"); 446 err = -ENOMEM; 447 goto error_engine; 448 } 449 450 err = crypto_engine_start(ce->engine); 451 if (err) { 452 dev_err(ce->dev, "Cannot start engine\n"); 453 goto error_engine; 454 } 455 456 err = sl3516_ce_register_algs(ce); 457 if (err) 458 goto error_alg; 459 460 err = sl3516_ce_rng_register(ce); 461 if (err) 462 goto error_rng; 463 464 err = pm_runtime_resume_and_get(ce->dev); 465 if (err < 0) 466 goto error_pmuse; 467 468 v = readl(ce->base + IPSEC_ID); 469 dev_info(ce->dev, "SL3516 dev %lx rev %lx\n", 470 v & GENMASK(31, 4), 471 v & GENMASK(3, 0)); 472 v = readl(ce->base + IPSEC_DMA_DEVICE_ID); 473 dev_info(ce->dev, "SL3516 DMA dev %lx rev %lx\n", 474 v & GENMASK(15, 4), 475 v & GENMASK(3, 0)); 476 477 pm_runtime_put_sync(ce->dev); 478 479 if (IS_ENABLED(CONFIG_CRYPTO_DEV_SL3516_DEBUG)) { 480 struct dentry *dbgfs_dir __maybe_unused; 481 struct dentry *dbgfs_stats __maybe_unused; 482 483 /* Ignore error of debugfs */ 484 dbgfs_dir = debugfs_create_dir("sl3516", NULL); 485 dbgfs_stats = debugfs_create_file("stats", 0444, 486 dbgfs_dir, ce, 487 &sl3516_ce_debugfs_fops); 488 #ifdef CONFIG_CRYPTO_DEV_SL3516_DEBUG 489 ce->dbgfs_dir = dbgfs_dir; 490 ce->dbgfs_stats = dbgfs_stats; 491 #endif 492 } 493 494 return 0; 495 error_pmuse: 496 sl3516_ce_rng_unregister(ce); 497 error_rng: 498 sl3516_ce_unregister_algs(ce); 499 error_alg: 500 crypto_engine_exit(ce->engine); 501 error_engine: 502 sl3516_ce_pm_exit(ce); 503 error_pm: 504 sl3516_ce_free_descs(ce); 505 return err; 506 } 507 508 static void sl3516_ce_remove(struct platform_device *pdev) 509 { 510 struct sl3516_ce_dev *ce = platform_get_drvdata(pdev); 511 512 sl3516_ce_rng_unregister(ce); 513 sl3516_ce_unregister_algs(ce); 514 crypto_engine_exit(ce->engine); 515 sl3516_ce_pm_exit(ce); 516 sl3516_ce_free_descs(ce); 517 518 #ifdef CONFIG_CRYPTO_DEV_SL3516_DEBUG 519 debugfs_remove_recursive(ce->dbgfs_dir); 520 #endif 521 } 522 523 static const struct of_device_id sl3516_ce_crypto_of_match_table[] = { 524 { .compatible = "cortina,sl3516-crypto"}, 525 {} 526 }; 527 MODULE_DEVICE_TABLE(of, sl3516_ce_crypto_of_match_table); 528 529 static struct platform_driver sl3516_ce_driver = { 530 .probe = sl3516_ce_probe, 531 .remove = sl3516_ce_remove, 532 .driver = { 533 .name = "sl3516-crypto", 534 .pm = &sl3516_ce_pm_ops, 535 .of_match_table = sl3516_ce_crypto_of_match_table, 536 }, 537 }; 538 539 module_platform_driver(sl3516_ce_driver); 540 541 MODULE_DESCRIPTION("SL3516 cryptographic offloader"); 542 MODULE_LICENSE("GPL"); 543 MODULE_AUTHOR("Corentin Labbe <clabbe@baylibre.com>"); 544