1 /* 2 * Copyright (c) 2010-2014, The Linux Foundation. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License version 2 and 6 * only version 2 as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 * GNU General Public License for more details. 12 */ 13 14 #include <linux/clk.h> 15 #include <linux/interrupt.h> 16 #include <linux/module.h> 17 #include <linux/mod_devicetable.h> 18 #include <linux/platform_device.h> 19 #include <linux/spinlock.h> 20 #include <linux/types.h> 21 #include <crypto/algapi.h> 22 #include <crypto/internal/hash.h> 23 #include <crypto/sha.h> 24 25 #include "core.h" 26 #include "cipher.h" 27 #include "sha.h" 28 29 #define QCE_MAJOR_VERSION5 0x05 30 #define QCE_QUEUE_LENGTH 1 31 32 static const struct qce_algo_ops *qce_ops[] = { 33 &ablkcipher_ops, 34 &ahash_ops, 35 }; 36 37 static void qce_unregister_algs(struct qce_device *qce) 38 { 39 const struct qce_algo_ops *ops; 40 int i; 41 42 for (i = 0; i < ARRAY_SIZE(qce_ops); i++) { 43 ops = qce_ops[i]; 44 ops->unregister_algs(qce); 45 } 46 } 47 48 static int qce_register_algs(struct qce_device *qce) 49 { 50 const struct qce_algo_ops *ops; 51 int i, ret = -ENODEV; 52 53 for (i = 0; i < ARRAY_SIZE(qce_ops); i++) { 54 ops = qce_ops[i]; 55 ret = ops->register_algs(qce); 56 if (ret) 57 break; 58 } 59 60 return ret; 61 } 62 63 static int qce_handle_request(struct crypto_async_request *async_req) 64 { 65 int ret = -EINVAL, i; 66 const struct qce_algo_ops *ops; 67 u32 type = crypto_tfm_alg_type(async_req->tfm); 68 69 for (i = 0; i < ARRAY_SIZE(qce_ops); i++) { 70 ops = qce_ops[i]; 71 if (type != ops->type) 72 continue; 73 ret = ops->async_req_handle(async_req); 74 break; 75 } 76 77 return ret; 78 } 79 80 static int qce_handle_queue(struct qce_device *qce, 81 struct crypto_async_request *req) 82 { 83 struct crypto_async_request *async_req, *backlog; 84 unsigned long flags; 85 int ret = 0, err; 86 87 spin_lock_irqsave(&qce->lock, flags); 88 89 if (req) 90 ret = crypto_enqueue_request(&qce->queue, req); 91 92 /* busy, do not dequeue request */ 93 if (qce->req) { 94 spin_unlock_irqrestore(&qce->lock, flags); 95 return ret; 96 } 97 98 backlog = crypto_get_backlog(&qce->queue); 99 async_req = crypto_dequeue_request(&qce->queue); 100 if (async_req) 101 qce->req = async_req; 102 103 spin_unlock_irqrestore(&qce->lock, flags); 104 105 if (!async_req) 106 return ret; 107 108 if (backlog) { 109 spin_lock_bh(&qce->lock); 110 backlog->complete(backlog, -EINPROGRESS); 111 spin_unlock_bh(&qce->lock); 112 } 113 114 err = qce_handle_request(async_req); 115 if (err) { 116 qce->result = err; 117 tasklet_schedule(&qce->done_tasklet); 118 } 119 120 return ret; 121 } 122 123 static void qce_tasklet_req_done(unsigned long data) 124 { 125 struct qce_device *qce = (struct qce_device *)data; 126 struct crypto_async_request *req; 127 unsigned long flags; 128 129 spin_lock_irqsave(&qce->lock, flags); 130 req = qce->req; 131 qce->req = NULL; 132 spin_unlock_irqrestore(&qce->lock, flags); 133 134 if (req) 135 req->complete(req, qce->result); 136 137 qce_handle_queue(qce, NULL); 138 } 139 140 static int qce_async_request_enqueue(struct qce_device *qce, 141 struct crypto_async_request *req) 142 { 143 return qce_handle_queue(qce, req); 144 } 145 146 static void qce_async_request_done(struct qce_device *qce, int ret) 147 { 148 qce->result = ret; 149 tasklet_schedule(&qce->done_tasklet); 150 } 151 152 static int qce_check_version(struct qce_device *qce) 153 { 154 u32 major, minor, step; 155 156 qce_get_version(qce, &major, &minor, &step); 157 158 /* 159 * the driver does not support v5 with minor 0 because it has special 160 * alignment requirements. 161 */ 162 if (major != QCE_MAJOR_VERSION5 || minor == 0) 163 return -ENODEV; 164 165 qce->burst_size = QCE_BAM_BURST_SIZE; 166 qce->pipe_pair_id = 1; 167 168 dev_dbg(qce->dev, "Crypto device found, version %d.%d.%d\n", 169 major, minor, step); 170 171 return 0; 172 } 173 174 static int qce_crypto_probe(struct platform_device *pdev) 175 { 176 struct device *dev = &pdev->dev; 177 struct qce_device *qce; 178 struct resource *res; 179 int ret; 180 181 qce = devm_kzalloc(dev, sizeof(*qce), GFP_KERNEL); 182 if (!qce) 183 return -ENOMEM; 184 185 qce->dev = dev; 186 platform_set_drvdata(pdev, qce); 187 188 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 189 qce->base = devm_ioremap_resource(&pdev->dev, res); 190 if (IS_ERR(qce->base)) 191 return PTR_ERR(qce->base); 192 193 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); 194 if (ret < 0) 195 return ret; 196 197 qce->core = devm_clk_get(qce->dev, "core"); 198 if (IS_ERR(qce->core)) 199 return PTR_ERR(qce->core); 200 201 qce->iface = devm_clk_get(qce->dev, "iface"); 202 if (IS_ERR(qce->iface)) 203 return PTR_ERR(qce->iface); 204 205 qce->bus = devm_clk_get(qce->dev, "bus"); 206 if (IS_ERR(qce->bus)) 207 return PTR_ERR(qce->bus); 208 209 ret = clk_prepare_enable(qce->core); 210 if (ret) 211 return ret; 212 213 ret = clk_prepare_enable(qce->iface); 214 if (ret) 215 goto err_clks_core; 216 217 ret = clk_prepare_enable(qce->bus); 218 if (ret) 219 goto err_clks_iface; 220 221 ret = qce_dma_request(qce->dev, &qce->dma); 222 if (ret) 223 goto err_clks; 224 225 ret = qce_check_version(qce); 226 if (ret) 227 goto err_clks; 228 229 spin_lock_init(&qce->lock); 230 tasklet_init(&qce->done_tasklet, qce_tasklet_req_done, 231 (unsigned long)qce); 232 crypto_init_queue(&qce->queue, QCE_QUEUE_LENGTH); 233 234 qce->async_req_enqueue = qce_async_request_enqueue; 235 qce->async_req_done = qce_async_request_done; 236 237 ret = qce_register_algs(qce); 238 if (ret) 239 goto err_dma; 240 241 return 0; 242 243 err_dma: 244 qce_dma_release(&qce->dma); 245 err_clks: 246 clk_disable_unprepare(qce->bus); 247 err_clks_iface: 248 clk_disable_unprepare(qce->iface); 249 err_clks_core: 250 clk_disable_unprepare(qce->core); 251 return ret; 252 } 253 254 static int qce_crypto_remove(struct platform_device *pdev) 255 { 256 struct qce_device *qce = platform_get_drvdata(pdev); 257 258 tasklet_kill(&qce->done_tasklet); 259 qce_unregister_algs(qce); 260 qce_dma_release(&qce->dma); 261 clk_disable_unprepare(qce->bus); 262 clk_disable_unprepare(qce->iface); 263 clk_disable_unprepare(qce->core); 264 return 0; 265 } 266 267 static const struct of_device_id qce_crypto_of_match[] = { 268 { .compatible = "qcom,crypto-v5.1", }, 269 {} 270 }; 271 MODULE_DEVICE_TABLE(of, qce_crypto_of_match); 272 273 static struct platform_driver qce_crypto_driver = { 274 .probe = qce_crypto_probe, 275 .remove = qce_crypto_remove, 276 .driver = { 277 .name = KBUILD_MODNAME, 278 .of_match_table = qce_crypto_of_match, 279 }, 280 }; 281 module_platform_driver(qce_crypto_driver); 282 283 MODULE_LICENSE("GPL v2"); 284 MODULE_DESCRIPTION("Qualcomm crypto engine driver"); 285 MODULE_ALIAS("platform:" KBUILD_MODNAME); 286 MODULE_AUTHOR("The Linux Foundation"); 287