1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (c) 2010-2014, The Linux Foundation. All rights reserved. 4 */ 5 6 #include <linux/clk.h> 7 #include <linux/dma-mapping.h> 8 #include <linux/interconnect.h> 9 #include <linux/interrupt.h> 10 #include <linux/module.h> 11 #include <linux/mod_devicetable.h> 12 #include <linux/platform_device.h> 13 #include <linux/spinlock.h> 14 #include <linux/types.h> 15 #include <crypto/algapi.h> 16 #include <crypto/internal/hash.h> 17 18 #include "core.h" 19 #include "cipher.h" 20 #include "sha.h" 21 #include "aead.h" 22 23 #define QCE_MAJOR_VERSION5 0x05 24 #define QCE_QUEUE_LENGTH 1 25 26 #define QCE_DEFAULT_MEM_BANDWIDTH 393600 27 28 static const struct qce_algo_ops *qce_ops[] = { 29 #ifdef CONFIG_CRYPTO_DEV_QCE_SKCIPHER 30 &skcipher_ops, 31 #endif 32 #ifdef CONFIG_CRYPTO_DEV_QCE_SHA 33 &ahash_ops, 34 #endif 35 #ifdef CONFIG_CRYPTO_DEV_QCE_AEAD 36 &aead_ops, 37 #endif 38 }; 39 40 static void qce_unregister_algs(struct qce_device *qce) 41 { 42 const struct qce_algo_ops *ops; 43 int i; 44 45 for (i = 0; i < ARRAY_SIZE(qce_ops); i++) { 46 ops = qce_ops[i]; 47 ops->unregister_algs(qce); 48 } 49 } 50 51 static int qce_register_algs(struct qce_device *qce) 52 { 53 const struct qce_algo_ops *ops; 54 int i, j, ret = -ENODEV; 55 56 for (i = 0; i < ARRAY_SIZE(qce_ops); i++) { 57 ops = qce_ops[i]; 58 ret = ops->register_algs(qce); 59 if (ret) { 60 for (j = i - 1; j >= 0; j--) 61 ops->unregister_algs(qce); 62 return ret; 63 } 64 } 65 66 return 0; 67 } 68 69 static int qce_handle_request(struct crypto_async_request *async_req) 70 { 71 int ret = -EINVAL, i; 72 const struct qce_algo_ops *ops; 73 u32 type = crypto_tfm_alg_type(async_req->tfm); 74 75 for (i = 0; i < ARRAY_SIZE(qce_ops); i++) { 76 ops = qce_ops[i]; 77 if (type != ops->type) 78 continue; 79 ret = ops->async_req_handle(async_req); 80 break; 81 } 82 83 return ret; 84 } 85 86 static int qce_handle_queue(struct qce_device *qce, 87 struct crypto_async_request *req) 88 { 89 struct crypto_async_request *async_req, *backlog; 90 unsigned long flags; 91 int ret = 0, err; 92 93 spin_lock_irqsave(&qce->lock, flags); 94 95 if (req) 96 ret = crypto_enqueue_request(&qce->queue, req); 97 98 /* busy, do not dequeue request */ 99 if (qce->req) { 100 spin_unlock_irqrestore(&qce->lock, flags); 101 return ret; 102 } 103 104 backlog = crypto_get_backlog(&qce->queue); 105 async_req = crypto_dequeue_request(&qce->queue); 106 if (async_req) 107 qce->req = async_req; 108 109 spin_unlock_irqrestore(&qce->lock, flags); 110 111 if (!async_req) 112 return ret; 113 114 if (backlog) { 115 spin_lock_bh(&qce->lock); 116 crypto_request_complete(backlog, -EINPROGRESS); 117 spin_unlock_bh(&qce->lock); 118 } 119 120 err = qce_handle_request(async_req); 121 if (err) { 122 qce->result = err; 123 tasklet_schedule(&qce->done_tasklet); 124 } 125 126 return ret; 127 } 128 129 static void qce_tasklet_req_done(unsigned long data) 130 { 131 struct qce_device *qce = (struct qce_device *)data; 132 struct crypto_async_request *req; 133 unsigned long flags; 134 135 spin_lock_irqsave(&qce->lock, flags); 136 req = qce->req; 137 qce->req = NULL; 138 spin_unlock_irqrestore(&qce->lock, flags); 139 140 if (req) 141 crypto_request_complete(req, qce->result); 142 143 qce_handle_queue(qce, NULL); 144 } 145 146 static int qce_async_request_enqueue(struct qce_device *qce, 147 struct crypto_async_request *req) 148 { 149 return qce_handle_queue(qce, req); 150 } 151 152 static void qce_async_request_done(struct qce_device *qce, int ret) 153 { 154 qce->result = ret; 155 tasklet_schedule(&qce->done_tasklet); 156 } 157 158 static int qce_check_version(struct qce_device *qce) 159 { 160 u32 major, minor, step; 161 162 qce_get_version(qce, &major, &minor, &step); 163 164 /* 165 * the driver does not support v5 with minor 0 because it has special 166 * alignment requirements. 167 */ 168 if (major != QCE_MAJOR_VERSION5 || minor == 0) 169 return -ENODEV; 170 171 qce->burst_size = QCE_BAM_BURST_SIZE; 172 173 /* 174 * Rx and tx pipes are treated as a pair inside CE. 175 * Pipe pair number depends on the actual BAM dma pipe 176 * that is used for transfers. The BAM dma pipes are passed 177 * from the device tree and used to derive the pipe pair 178 * id in the CE driver as follows. 179 * BAM dma pipes(rx, tx) CE pipe pair id 180 * 0,1 0 181 * 2,3 1 182 * 4,5 2 183 * 6,7 3 184 * ... 185 */ 186 qce->pipe_pair_id = qce->dma.rxchan->chan_id >> 1; 187 188 dev_dbg(qce->dev, "Crypto device found, version %d.%d.%d\n", 189 major, minor, step); 190 191 return 0; 192 } 193 194 static int qce_crypto_probe(struct platform_device *pdev) 195 { 196 struct device *dev = &pdev->dev; 197 struct qce_device *qce; 198 int ret; 199 200 qce = devm_kzalloc(dev, sizeof(*qce), GFP_KERNEL); 201 if (!qce) 202 return -ENOMEM; 203 204 qce->dev = dev; 205 platform_set_drvdata(pdev, qce); 206 207 qce->base = devm_platform_ioremap_resource(pdev, 0); 208 if (IS_ERR(qce->base)) 209 return PTR_ERR(qce->base); 210 211 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); 212 if (ret < 0) 213 return ret; 214 215 qce->core = devm_clk_get_optional(qce->dev, "core"); 216 if (IS_ERR(qce->core)) 217 return PTR_ERR(qce->core); 218 219 qce->iface = devm_clk_get_optional(qce->dev, "iface"); 220 if (IS_ERR(qce->iface)) 221 return PTR_ERR(qce->iface); 222 223 qce->bus = devm_clk_get_optional(qce->dev, "bus"); 224 if (IS_ERR(qce->bus)) 225 return PTR_ERR(qce->bus); 226 227 qce->mem_path = devm_of_icc_get(qce->dev, "memory"); 228 if (IS_ERR(qce->mem_path)) 229 return PTR_ERR(qce->mem_path); 230 231 ret = icc_set_bw(qce->mem_path, QCE_DEFAULT_MEM_BANDWIDTH, QCE_DEFAULT_MEM_BANDWIDTH); 232 if (ret) 233 return ret; 234 235 ret = clk_prepare_enable(qce->core); 236 if (ret) 237 goto err_mem_path_disable; 238 239 ret = clk_prepare_enable(qce->iface); 240 if (ret) 241 goto err_clks_core; 242 243 ret = clk_prepare_enable(qce->bus); 244 if (ret) 245 goto err_clks_iface; 246 247 ret = qce_dma_request(qce->dev, &qce->dma); 248 if (ret) 249 goto err_clks; 250 251 ret = qce_check_version(qce); 252 if (ret) 253 goto err_dma; 254 255 spin_lock_init(&qce->lock); 256 tasklet_init(&qce->done_tasklet, qce_tasklet_req_done, 257 (unsigned long)qce); 258 crypto_init_queue(&qce->queue, QCE_QUEUE_LENGTH); 259 260 qce->async_req_enqueue = qce_async_request_enqueue; 261 qce->async_req_done = qce_async_request_done; 262 263 ret = qce_register_algs(qce); 264 if (ret) 265 goto err_dma; 266 267 return 0; 268 269 err_dma: 270 qce_dma_release(&qce->dma); 271 err_clks: 272 clk_disable_unprepare(qce->bus); 273 err_clks_iface: 274 clk_disable_unprepare(qce->iface); 275 err_clks_core: 276 clk_disable_unprepare(qce->core); 277 err_mem_path_disable: 278 icc_set_bw(qce->mem_path, 0, 0); 279 280 return ret; 281 } 282 283 static void qce_crypto_remove(struct platform_device *pdev) 284 { 285 struct qce_device *qce = platform_get_drvdata(pdev); 286 287 tasklet_kill(&qce->done_tasklet); 288 qce_unregister_algs(qce); 289 qce_dma_release(&qce->dma); 290 clk_disable_unprepare(qce->bus); 291 clk_disable_unprepare(qce->iface); 292 clk_disable_unprepare(qce->core); 293 } 294 295 static const struct of_device_id qce_crypto_of_match[] = { 296 { .compatible = "qcom,crypto-v5.1", }, 297 { .compatible = "qcom,crypto-v5.4", }, 298 { .compatible = "qcom,qce", }, 299 {} 300 }; 301 MODULE_DEVICE_TABLE(of, qce_crypto_of_match); 302 303 static struct platform_driver qce_crypto_driver = { 304 .probe = qce_crypto_probe, 305 .remove = qce_crypto_remove, 306 .driver = { 307 .name = KBUILD_MODNAME, 308 .of_match_table = qce_crypto_of_match, 309 }, 310 }; 311 module_platform_driver(qce_crypto_driver); 312 313 MODULE_LICENSE("GPL v2"); 314 MODULE_DESCRIPTION("Qualcomm crypto engine driver"); 315 MODULE_ALIAS("platform:" KBUILD_MODNAME); 316 MODULE_AUTHOR("The Linux Foundation"); 317