1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Cryptographic API.
4 *
5 * Support for OMAP SHA1/MD5 HW acceleration.
6 *
7 * Copyright (c) 2010 Nokia Corporation
8 * Author: Dmitry Kasatkin <dmitry.kasatkin@nokia.com>
9 * Copyright (c) 2011 Texas Instruments Incorporated
10 *
11 * Some ideas are from old omap-sha1-md5.c driver.
12 */
13
14 #define pr_fmt(fmt) "%s: " fmt, __func__
15
16 #include <crypto/engine.h>
17 #include <crypto/hmac.h>
18 #include <crypto/internal/hash.h>
19 #include <crypto/scatterwalk.h>
20 #include <crypto/sha1.h>
21 #include <crypto/sha2.h>
22 #include <linux/err.h>
23 #include <linux/device.h>
24 #include <linux/dma-mapping.h>
25 #include <linux/dmaengine.h>
26 #include <linux/init.h>
27 #include <linux/interrupt.h>
28 #include <linux/io.h>
29 #include <linux/irq.h>
30 #include <linux/kernel.h>
31 #include <linux/module.h>
32 #include <linux/of.h>
33 #include <linux/of_address.h>
34 #include <linux/of_irq.h>
35 #include <linux/platform_device.h>
36 #include <linux/pm_runtime.h>
37 #include <linux/scatterlist.h>
38 #include <linux/slab.h>
39 #include <linux/string.h>
40 #include <linux/sysfs.h>
41 #include <linux/workqueue.h>
42
43 #define MD5_DIGEST_SIZE 16
44
45 #define SHA_REG_IDIGEST(dd, x) ((dd)->pdata->idigest_ofs + ((x)*0x04))
46 #define SHA_REG_DIN(dd, x) ((dd)->pdata->din_ofs + ((x) * 0x04))
47 #define SHA_REG_DIGCNT(dd) ((dd)->pdata->digcnt_ofs)
48
49 #define SHA_REG_ODIGEST(dd, x) ((dd)->pdata->odigest_ofs + (x * 0x04))
50
51 #define SHA_REG_CTRL 0x18
52 #define SHA_REG_CTRL_LENGTH (0xFFFFFFFF << 5)
53 #define SHA_REG_CTRL_CLOSE_HASH (1 << 4)
54 #define SHA_REG_CTRL_ALGO_CONST (1 << 3)
55 #define SHA_REG_CTRL_ALGO (1 << 2)
56 #define SHA_REG_CTRL_INPUT_READY (1 << 1)
57 #define SHA_REG_CTRL_OUTPUT_READY (1 << 0)
58
59 #define SHA_REG_REV(dd) ((dd)->pdata->rev_ofs)
60
61 #define SHA_REG_MASK(dd) ((dd)->pdata->mask_ofs)
62 #define SHA_REG_MASK_DMA_EN (1 << 3)
63 #define SHA_REG_MASK_IT_EN (1 << 2)
64 #define SHA_REG_MASK_SOFTRESET (1 << 1)
65 #define SHA_REG_AUTOIDLE (1 << 0)
66
67 #define SHA_REG_SYSSTATUS(dd) ((dd)->pdata->sysstatus_ofs)
68 #define SHA_REG_SYSSTATUS_RESETDONE (1 << 0)
69
70 #define SHA_REG_MODE(dd) ((dd)->pdata->mode_ofs)
71 #define SHA_REG_MODE_HMAC_OUTER_HASH (1 << 7)
72 #define SHA_REG_MODE_HMAC_KEY_PROC (1 << 5)
73 #define SHA_REG_MODE_CLOSE_HASH (1 << 4)
74 #define SHA_REG_MODE_ALGO_CONSTANT (1 << 3)
75
76 #define SHA_REG_MODE_ALGO_MASK (7 << 0)
77 #define SHA_REG_MODE_ALGO_MD5_128 (0 << 1)
78 #define SHA_REG_MODE_ALGO_SHA1_160 (1 << 1)
79 #define SHA_REG_MODE_ALGO_SHA2_224 (2 << 1)
80 #define SHA_REG_MODE_ALGO_SHA2_256 (3 << 1)
81 #define SHA_REG_MODE_ALGO_SHA2_384 (1 << 0)
82 #define SHA_REG_MODE_ALGO_SHA2_512 (3 << 0)
83
84 #define SHA_REG_LENGTH(dd) ((dd)->pdata->length_ofs)
85
86 #define SHA_REG_IRQSTATUS 0x118
87 #define SHA_REG_IRQSTATUS_CTX_RDY (1 << 3)
88 #define SHA_REG_IRQSTATUS_PARTHASH_RDY (1 << 2)
89 #define SHA_REG_IRQSTATUS_INPUT_RDY (1 << 1)
90 #define SHA_REG_IRQSTATUS_OUTPUT_RDY (1 << 0)
91
92 #define SHA_REG_IRQENA 0x11C
93 #define SHA_REG_IRQENA_CTX_RDY (1 << 3)
94 #define SHA_REG_IRQENA_PARTHASH_RDY (1 << 2)
95 #define SHA_REG_IRQENA_INPUT_RDY (1 << 1)
96 #define SHA_REG_IRQENA_OUTPUT_RDY (1 << 0)
97
98 #define DEFAULT_TIMEOUT_INTERVAL HZ
99
100 #define DEFAULT_AUTOSUSPEND_DELAY 1000
101
102 /* mostly device flags */
103 #define FLAGS_FINAL 1
104 #define FLAGS_DMA_ACTIVE 2
105 #define FLAGS_OUTPUT_READY 3
106 #define FLAGS_CPU 5
107 #define FLAGS_DMA_READY 6
108 #define FLAGS_AUTO_XOR 7
109 #define FLAGS_BE32_SHA1 8
110 #define FLAGS_SGS_COPIED 9
111 #define FLAGS_SGS_ALLOCED 10
112 #define FLAGS_HUGE 11
113
114 /* context flags */
115 #define FLAGS_FINUP 16
116
117 #define FLAGS_MODE_SHIFT 18
118 #define FLAGS_MODE_MASK (SHA_REG_MODE_ALGO_MASK << FLAGS_MODE_SHIFT)
119 #define FLAGS_MODE_MD5 (SHA_REG_MODE_ALGO_MD5_128 << FLAGS_MODE_SHIFT)
120 #define FLAGS_MODE_SHA1 (SHA_REG_MODE_ALGO_SHA1_160 << FLAGS_MODE_SHIFT)
121 #define FLAGS_MODE_SHA224 (SHA_REG_MODE_ALGO_SHA2_224 << FLAGS_MODE_SHIFT)
122 #define FLAGS_MODE_SHA256 (SHA_REG_MODE_ALGO_SHA2_256 << FLAGS_MODE_SHIFT)
123 #define FLAGS_MODE_SHA384 (SHA_REG_MODE_ALGO_SHA2_384 << FLAGS_MODE_SHIFT)
124 #define FLAGS_MODE_SHA512 (SHA_REG_MODE_ALGO_SHA2_512 << FLAGS_MODE_SHIFT)
125
126 #define FLAGS_HMAC 21
127 #define FLAGS_ERROR 22
128
129 #define OP_UPDATE 1
130 #define OP_FINAL 2
131
132 #define OMAP_ALIGN_MASK (sizeof(u32)-1)
133 #define OMAP_ALIGNED __attribute__((aligned(sizeof(u32))))
134
135 #define BUFLEN SHA512_BLOCK_SIZE
136 #define OMAP_SHA_DMA_THRESHOLD 256
137
138 #define OMAP_SHA_MAX_DMA_LEN (1024 * 2048)
139
140 struct omap_sham_dev;
141
142 struct omap_sham_reqctx {
143 struct omap_sham_dev *dd;
144 unsigned long flags;
145 u8 op;
146
147 u8 digest[SHA512_DIGEST_SIZE] OMAP_ALIGNED;
148 size_t digcnt;
149 size_t bufcnt;
150 size_t buflen;
151
152 /* walk state */
153 struct scatterlist *sg;
154 struct scatterlist sgl[2];
155 int offset; /* offset in current sg */
156 int sg_len;
157 unsigned int total; /* total request */
158
159 u8 buffer[] OMAP_ALIGNED;
160 };
161
162 struct omap_sham_hmac_ctx {
163 struct crypto_shash *shash;
164 u8 ipad[SHA512_BLOCK_SIZE] OMAP_ALIGNED;
165 u8 opad[SHA512_BLOCK_SIZE] OMAP_ALIGNED;
166 };
167
168 struct omap_sham_ctx {
169 unsigned long flags;
170
171 /* fallback stuff */
172 struct crypto_shash *fallback;
173
174 struct omap_sham_hmac_ctx base[];
175 };
176
177 #define OMAP_SHAM_QUEUE_LENGTH 10
178
179 struct omap_sham_algs_info {
180 struct ahash_engine_alg *algs_list;
181 unsigned int size;
182 unsigned int registered;
183 };
184
185 struct omap_sham_pdata {
186 struct omap_sham_algs_info *algs_info;
187 unsigned int algs_info_size;
188 unsigned long flags;
189 int digest_size;
190
191 void (*copy_hash)(struct ahash_request *req, int out);
192 void (*write_ctrl)(struct omap_sham_dev *dd, size_t length,
193 int final, int dma);
194 void (*trigger)(struct omap_sham_dev *dd, size_t length);
195 int (*poll_irq)(struct omap_sham_dev *dd);
196 irqreturn_t (*intr_hdlr)(int irq, void *dev_id);
197
198 u32 odigest_ofs;
199 u32 idigest_ofs;
200 u32 din_ofs;
201 u32 digcnt_ofs;
202 u32 rev_ofs;
203 u32 mask_ofs;
204 u32 sysstatus_ofs;
205 u32 mode_ofs;
206 u32 length_ofs;
207
208 u32 major_mask;
209 u32 major_shift;
210 u32 minor_mask;
211 u32 minor_shift;
212 };
213
214 struct omap_sham_dev {
215 struct list_head list;
216 unsigned long phys_base;
217 struct device *dev;
218 void __iomem *io_base;
219 int irq;
220 int err;
221 struct dma_chan *dma_lch;
222 struct work_struct done_task;
223 u8 polling_mode;
224 u8 xmit_buf[BUFLEN] OMAP_ALIGNED;
225
226 unsigned long flags;
227 int fallback_sz;
228 struct crypto_queue queue;
229 struct ahash_request *req;
230 struct crypto_engine *engine;
231
232 const struct omap_sham_pdata *pdata;
233 };
234
235 struct omap_sham_drv {
236 struct list_head dev_list;
237 spinlock_t lock;
238 unsigned long flags;
239 };
240
241 static struct omap_sham_drv sham = {
242 .dev_list = LIST_HEAD_INIT(sham.dev_list),
243 .lock = __SPIN_LOCK_UNLOCKED(sham.lock),
244 };
245
246 static int omap_sham_enqueue(struct ahash_request *req, unsigned int op);
247 static void omap_sham_finish_req(struct ahash_request *req, int err);
248
omap_sham_read(struct omap_sham_dev * dd,u32 offset)249 static inline u32 omap_sham_read(struct omap_sham_dev *dd, u32 offset)
250 {
251 return __raw_readl(dd->io_base + offset);
252 }
253
omap_sham_write(struct omap_sham_dev * dd,u32 offset,u32 value)254 static inline void omap_sham_write(struct omap_sham_dev *dd,
255 u32 offset, u32 value)
256 {
257 __raw_writel(value, dd->io_base + offset);
258 }
259
omap_sham_write_mask(struct omap_sham_dev * dd,u32 address,u32 value,u32 mask)260 static inline void omap_sham_write_mask(struct omap_sham_dev *dd, u32 address,
261 u32 value, u32 mask)
262 {
263 u32 val;
264
265 val = omap_sham_read(dd, address);
266 val &= ~mask;
267 val |= value;
268 omap_sham_write(dd, address, val);
269 }
270
omap_sham_wait(struct omap_sham_dev * dd,u32 offset,u32 bit)271 static inline int omap_sham_wait(struct omap_sham_dev *dd, u32 offset, u32 bit)
272 {
273 unsigned long timeout = jiffies + DEFAULT_TIMEOUT_INTERVAL;
274
275 while (!(omap_sham_read(dd, offset) & bit)) {
276 if (time_is_before_jiffies(timeout))
277 return -ETIMEDOUT;
278 }
279
280 return 0;
281 }
282
omap_sham_copy_hash_omap2(struct ahash_request * req,int out)283 static void omap_sham_copy_hash_omap2(struct ahash_request *req, int out)
284 {
285 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
286 struct omap_sham_dev *dd = ctx->dd;
287 u32 *hash = (u32 *)ctx->digest;
288 int i;
289
290 for (i = 0; i < dd->pdata->digest_size / sizeof(u32); i++) {
291 if (out)
292 hash[i] = omap_sham_read(dd, SHA_REG_IDIGEST(dd, i));
293 else
294 omap_sham_write(dd, SHA_REG_IDIGEST(dd, i), hash[i]);
295 }
296 }
297
omap_sham_copy_hash_omap4(struct ahash_request * req,int out)298 static void omap_sham_copy_hash_omap4(struct ahash_request *req, int out)
299 {
300 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
301 struct omap_sham_dev *dd = ctx->dd;
302 int i;
303
304 if (ctx->flags & BIT(FLAGS_HMAC)) {
305 struct crypto_ahash *tfm = crypto_ahash_reqtfm(dd->req);
306 struct omap_sham_ctx *tctx = crypto_ahash_ctx(tfm);
307 struct omap_sham_hmac_ctx *bctx = tctx->base;
308 u32 *opad = (u32 *)bctx->opad;
309
310 for (i = 0; i < dd->pdata->digest_size / sizeof(u32); i++) {
311 if (out)
312 opad[i] = omap_sham_read(dd,
313 SHA_REG_ODIGEST(dd, i));
314 else
315 omap_sham_write(dd, SHA_REG_ODIGEST(dd, i),
316 opad[i]);
317 }
318 }
319
320 omap_sham_copy_hash_omap2(req, out);
321 }
322
omap_sham_copy_ready_hash(struct ahash_request * req)323 static void omap_sham_copy_ready_hash(struct ahash_request *req)
324 {
325 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
326 u32 *in = (u32 *)ctx->digest;
327 u32 *hash = (u32 *)req->result;
328 int i, d, big_endian = 0;
329
330 if (!hash)
331 return;
332
333 switch (ctx->flags & FLAGS_MODE_MASK) {
334 case FLAGS_MODE_MD5:
335 d = MD5_DIGEST_SIZE / sizeof(u32);
336 break;
337 case FLAGS_MODE_SHA1:
338 /* OMAP2 SHA1 is big endian */
339 if (test_bit(FLAGS_BE32_SHA1, &ctx->dd->flags))
340 big_endian = 1;
341 d = SHA1_DIGEST_SIZE / sizeof(u32);
342 break;
343 case FLAGS_MODE_SHA224:
344 d = SHA224_DIGEST_SIZE / sizeof(u32);
345 break;
346 case FLAGS_MODE_SHA256:
347 d = SHA256_DIGEST_SIZE / sizeof(u32);
348 break;
349 case FLAGS_MODE_SHA384:
350 d = SHA384_DIGEST_SIZE / sizeof(u32);
351 break;
352 case FLAGS_MODE_SHA512:
353 d = SHA512_DIGEST_SIZE / sizeof(u32);
354 break;
355 default:
356 d = 0;
357 }
358
359 if (big_endian)
360 for (i = 0; i < d; i++)
361 put_unaligned(be32_to_cpup((__be32 *)in + i), &hash[i]);
362 else
363 for (i = 0; i < d; i++)
364 put_unaligned(le32_to_cpup((__le32 *)in + i), &hash[i]);
365 }
366
omap_sham_write_ctrl_omap2(struct omap_sham_dev * dd,size_t length,int final,int dma)367 static void omap_sham_write_ctrl_omap2(struct omap_sham_dev *dd, size_t length,
368 int final, int dma)
369 {
370 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
371 u32 val = length << 5, mask;
372
373 if (likely(ctx->digcnt))
374 omap_sham_write(dd, SHA_REG_DIGCNT(dd), ctx->digcnt);
375
376 omap_sham_write_mask(dd, SHA_REG_MASK(dd),
377 SHA_REG_MASK_IT_EN | (dma ? SHA_REG_MASK_DMA_EN : 0),
378 SHA_REG_MASK_IT_EN | SHA_REG_MASK_DMA_EN);
379 /*
380 * Setting ALGO_CONST only for the first iteration
381 * and CLOSE_HASH only for the last one.
382 */
383 if ((ctx->flags & FLAGS_MODE_MASK) == FLAGS_MODE_SHA1)
384 val |= SHA_REG_CTRL_ALGO;
385 if (!ctx->digcnt)
386 val |= SHA_REG_CTRL_ALGO_CONST;
387 if (final)
388 val |= SHA_REG_CTRL_CLOSE_HASH;
389
390 mask = SHA_REG_CTRL_ALGO_CONST | SHA_REG_CTRL_CLOSE_HASH |
391 SHA_REG_CTRL_ALGO | SHA_REG_CTRL_LENGTH;
392
393 omap_sham_write_mask(dd, SHA_REG_CTRL, val, mask);
394 }
395
omap_sham_trigger_omap2(struct omap_sham_dev * dd,size_t length)396 static void omap_sham_trigger_omap2(struct omap_sham_dev *dd, size_t length)
397 {
398 }
399
omap_sham_poll_irq_omap2(struct omap_sham_dev * dd)400 static int omap_sham_poll_irq_omap2(struct omap_sham_dev *dd)
401 {
402 return omap_sham_wait(dd, SHA_REG_CTRL, SHA_REG_CTRL_INPUT_READY);
403 }
404
get_block_size(struct omap_sham_reqctx * ctx)405 static int get_block_size(struct omap_sham_reqctx *ctx)
406 {
407 int d;
408
409 switch (ctx->flags & FLAGS_MODE_MASK) {
410 case FLAGS_MODE_MD5:
411 case FLAGS_MODE_SHA1:
412 d = SHA1_BLOCK_SIZE;
413 break;
414 case FLAGS_MODE_SHA224:
415 case FLAGS_MODE_SHA256:
416 d = SHA256_BLOCK_SIZE;
417 break;
418 case FLAGS_MODE_SHA384:
419 case FLAGS_MODE_SHA512:
420 d = SHA512_BLOCK_SIZE;
421 break;
422 default:
423 d = 0;
424 }
425
426 return d;
427 }
428
omap_sham_write_n(struct omap_sham_dev * dd,u32 offset,u32 * value,int count)429 static void omap_sham_write_n(struct omap_sham_dev *dd, u32 offset,
430 u32 *value, int count)
431 {
432 for (; count--; value++, offset += 4)
433 omap_sham_write(dd, offset, *value);
434 }
435
omap_sham_write_ctrl_omap4(struct omap_sham_dev * dd,size_t length,int final,int dma)436 static void omap_sham_write_ctrl_omap4(struct omap_sham_dev *dd, size_t length,
437 int final, int dma)
438 {
439 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
440 u32 val, mask;
441
442 if (likely(ctx->digcnt))
443 omap_sham_write(dd, SHA_REG_DIGCNT(dd), ctx->digcnt);
444
445 /*
446 * Setting ALGO_CONST only for the first iteration and
447 * CLOSE_HASH only for the last one. Note that flags mode bits
448 * correspond to algorithm encoding in mode register.
449 */
450 val = (ctx->flags & FLAGS_MODE_MASK) >> (FLAGS_MODE_SHIFT);
451 if (!ctx->digcnt) {
452 struct crypto_ahash *tfm = crypto_ahash_reqtfm(dd->req);
453 struct omap_sham_ctx *tctx = crypto_ahash_ctx(tfm);
454 struct omap_sham_hmac_ctx *bctx = tctx->base;
455 int bs, nr_dr;
456
457 val |= SHA_REG_MODE_ALGO_CONSTANT;
458
459 if (ctx->flags & BIT(FLAGS_HMAC)) {
460 bs = get_block_size(ctx);
461 nr_dr = bs / (2 * sizeof(u32));
462 val |= SHA_REG_MODE_HMAC_KEY_PROC;
463 omap_sham_write_n(dd, SHA_REG_ODIGEST(dd, 0),
464 (u32 *)bctx->ipad, nr_dr);
465 omap_sham_write_n(dd, SHA_REG_IDIGEST(dd, 0),
466 (u32 *)bctx->ipad + nr_dr, nr_dr);
467 ctx->digcnt += bs;
468 }
469 }
470
471 if (final) {
472 val |= SHA_REG_MODE_CLOSE_HASH;
473
474 if (ctx->flags & BIT(FLAGS_HMAC))
475 val |= SHA_REG_MODE_HMAC_OUTER_HASH;
476 }
477
478 mask = SHA_REG_MODE_ALGO_CONSTANT | SHA_REG_MODE_CLOSE_HASH |
479 SHA_REG_MODE_ALGO_MASK | SHA_REG_MODE_HMAC_OUTER_HASH |
480 SHA_REG_MODE_HMAC_KEY_PROC;
481
482 dev_dbg(dd->dev, "ctrl: %08x, flags: %08lx\n", val, ctx->flags);
483 omap_sham_write_mask(dd, SHA_REG_MODE(dd), val, mask);
484 omap_sham_write(dd, SHA_REG_IRQENA, SHA_REG_IRQENA_OUTPUT_RDY);
485 omap_sham_write_mask(dd, SHA_REG_MASK(dd),
486 SHA_REG_MASK_IT_EN |
487 (dma ? SHA_REG_MASK_DMA_EN : 0),
488 SHA_REG_MASK_IT_EN | SHA_REG_MASK_DMA_EN);
489 }
490
omap_sham_trigger_omap4(struct omap_sham_dev * dd,size_t length)491 static void omap_sham_trigger_omap4(struct omap_sham_dev *dd, size_t length)
492 {
493 omap_sham_write(dd, SHA_REG_LENGTH(dd), length);
494 }
495
omap_sham_poll_irq_omap4(struct omap_sham_dev * dd)496 static int omap_sham_poll_irq_omap4(struct omap_sham_dev *dd)
497 {
498 return omap_sham_wait(dd, SHA_REG_IRQSTATUS,
499 SHA_REG_IRQSTATUS_INPUT_RDY);
500 }
501
omap_sham_xmit_cpu(struct omap_sham_dev * dd,size_t length,int final)502 static int omap_sham_xmit_cpu(struct omap_sham_dev *dd, size_t length,
503 int final)
504 {
505 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
506 int count, len32, bs32, offset = 0;
507 const u32 *buffer;
508 int mlen;
509 struct sg_mapping_iter mi;
510
511 dev_dbg(dd->dev, "xmit_cpu: digcnt: %zd, length: %zd, final: %d\n",
512 ctx->digcnt, length, final);
513
514 dd->pdata->write_ctrl(dd, length, final, 0);
515 dd->pdata->trigger(dd, length);
516
517 /* should be non-zero before next lines to disable clocks later */
518 ctx->digcnt += length;
519 ctx->total -= length;
520
521 if (final)
522 set_bit(FLAGS_FINAL, &dd->flags); /* catch last interrupt */
523
524 set_bit(FLAGS_CPU, &dd->flags);
525
526 len32 = DIV_ROUND_UP(length, sizeof(u32));
527 bs32 = get_block_size(ctx) / sizeof(u32);
528
529 sg_miter_start(&mi, ctx->sg, ctx->sg_len,
530 SG_MITER_FROM_SG | SG_MITER_ATOMIC);
531
532 mlen = 0;
533
534 while (len32) {
535 if (dd->pdata->poll_irq(dd))
536 return -ETIMEDOUT;
537
538 for (count = 0; count < min(len32, bs32); count++, offset++) {
539 if (!mlen) {
540 sg_miter_next(&mi);
541 mlen = mi.length;
542 if (!mlen) {
543 pr_err("sg miter failure.\n");
544 return -EINVAL;
545 }
546 offset = 0;
547 buffer = mi.addr;
548 }
549 omap_sham_write(dd, SHA_REG_DIN(dd, count),
550 buffer[offset]);
551 mlen -= 4;
552 }
553 len32 -= min(len32, bs32);
554 }
555
556 sg_miter_stop(&mi);
557
558 return -EINPROGRESS;
559 }
560
omap_sham_dma_callback(void * param)561 static void omap_sham_dma_callback(void *param)
562 {
563 struct omap_sham_dev *dd = param;
564
565 set_bit(FLAGS_DMA_READY, &dd->flags);
566 queue_work(system_bh_wq, &dd->done_task);
567 }
568
omap_sham_xmit_dma(struct omap_sham_dev * dd,size_t length,int final)569 static int omap_sham_xmit_dma(struct omap_sham_dev *dd, size_t length,
570 int final)
571 {
572 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
573 struct dma_async_tx_descriptor *tx;
574 struct dma_slave_config cfg;
575 int ret;
576
577 dev_dbg(dd->dev, "xmit_dma: digcnt: %zd, length: %zd, final: %d\n",
578 ctx->digcnt, length, final);
579
580 if (!dma_map_sg(dd->dev, ctx->sg, ctx->sg_len, DMA_TO_DEVICE)) {
581 dev_err(dd->dev, "dma_map_sg error\n");
582 return -EINVAL;
583 }
584
585 memset(&cfg, 0, sizeof(cfg));
586
587 cfg.dst_addr = dd->phys_base + SHA_REG_DIN(dd, 0);
588 cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
589 cfg.dst_maxburst = get_block_size(ctx) / DMA_SLAVE_BUSWIDTH_4_BYTES;
590
591 ret = dmaengine_slave_config(dd->dma_lch, &cfg);
592 if (ret) {
593 pr_err("omap-sham: can't configure dmaengine slave: %d\n", ret);
594 return ret;
595 }
596
597 tx = dmaengine_prep_slave_sg(dd->dma_lch, ctx->sg, ctx->sg_len,
598 DMA_MEM_TO_DEV,
599 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
600
601 if (!tx) {
602 dev_err(dd->dev, "prep_slave_sg failed\n");
603 return -EINVAL;
604 }
605
606 tx->callback = omap_sham_dma_callback;
607 tx->callback_param = dd;
608
609 dd->pdata->write_ctrl(dd, length, final, 1);
610
611 ctx->digcnt += length;
612 ctx->total -= length;
613
614 if (final)
615 set_bit(FLAGS_FINAL, &dd->flags); /* catch last interrupt */
616
617 set_bit(FLAGS_DMA_ACTIVE, &dd->flags);
618
619 dmaengine_submit(tx);
620 dma_async_issue_pending(dd->dma_lch);
621
622 dd->pdata->trigger(dd, length);
623
624 return -EINPROGRESS;
625 }
626
omap_sham_copy_sg_lists(struct omap_sham_reqctx * ctx,struct scatterlist * sg,int bs,int new_len)627 static int omap_sham_copy_sg_lists(struct omap_sham_reqctx *ctx,
628 struct scatterlist *sg, int bs, int new_len)
629 {
630 int n = sg_nents(sg);
631 struct scatterlist *tmp;
632 int offset = ctx->offset;
633
634 ctx->total = new_len;
635
636 if (ctx->bufcnt)
637 n++;
638
639 ctx->sg = kmalloc_objs(*sg, n);
640 if (!ctx->sg)
641 return -ENOMEM;
642
643 sg_init_table(ctx->sg, n);
644
645 tmp = ctx->sg;
646
647 ctx->sg_len = 0;
648
649 if (ctx->bufcnt) {
650 sg_set_buf(tmp, ctx->dd->xmit_buf, ctx->bufcnt);
651 tmp = sg_next(tmp);
652 ctx->sg_len++;
653 new_len -= ctx->bufcnt;
654 }
655
656 while (sg && new_len) {
657 int len = sg->length - offset;
658
659 if (len <= 0) {
660 offset -= sg->length;
661 sg = sg_next(sg);
662 continue;
663 }
664
665 if (new_len < len)
666 len = new_len;
667
668 if (len > 0) {
669 new_len -= len;
670 sg_set_page(tmp, sg_page(sg), len, sg->offset + offset);
671 offset = 0;
672 ctx->offset = 0;
673 ctx->sg_len++;
674 if (new_len <= 0)
675 break;
676 tmp = sg_next(tmp);
677 }
678
679 sg = sg_next(sg);
680 }
681
682 if (tmp)
683 sg_mark_end(tmp);
684
685 set_bit(FLAGS_SGS_ALLOCED, &ctx->dd->flags);
686
687 ctx->offset += new_len - ctx->bufcnt;
688 ctx->bufcnt = 0;
689
690 return 0;
691 }
692
omap_sham_copy_sgs(struct omap_sham_reqctx * ctx,struct scatterlist * sg,int bs,unsigned int new_len)693 static int omap_sham_copy_sgs(struct omap_sham_reqctx *ctx,
694 struct scatterlist *sg, int bs,
695 unsigned int new_len)
696 {
697 int pages;
698 void *buf;
699
700 pages = get_order(new_len);
701
702 buf = (void *)__get_free_pages(GFP_ATOMIC, pages);
703 if (!buf) {
704 pr_err("Couldn't allocate pages for unaligned cases.\n");
705 return -ENOMEM;
706 }
707
708 if (ctx->bufcnt)
709 memcpy(buf, ctx->dd->xmit_buf, ctx->bufcnt);
710
711 scatterwalk_map_and_copy(buf + ctx->bufcnt, sg, ctx->offset,
712 min(new_len, ctx->total) - ctx->bufcnt, 0);
713 sg_init_table(ctx->sgl, 1);
714 sg_set_buf(ctx->sgl, buf, new_len);
715 ctx->sg = ctx->sgl;
716 set_bit(FLAGS_SGS_COPIED, &ctx->dd->flags);
717 ctx->sg_len = 1;
718 ctx->offset += new_len - ctx->bufcnt;
719 ctx->bufcnt = 0;
720 ctx->total = new_len;
721
722 return 0;
723 }
724
omap_sham_align_sgs(struct scatterlist * sg,int nbytes,int bs,bool final,struct omap_sham_reqctx * rctx)725 static int omap_sham_align_sgs(struct scatterlist *sg,
726 int nbytes, int bs, bool final,
727 struct omap_sham_reqctx *rctx)
728 {
729 int n = 0;
730 bool aligned = true;
731 bool list_ok = true;
732 struct scatterlist *sg_tmp = sg;
733 int new_len;
734 int offset = rctx->offset;
735 int bufcnt = rctx->bufcnt;
736
737 if (!sg || !sg->length || !nbytes) {
738 if (bufcnt) {
739 bufcnt = DIV_ROUND_UP(bufcnt, bs) * bs;
740 sg_init_table(rctx->sgl, 1);
741 sg_set_buf(rctx->sgl, rctx->dd->xmit_buf, bufcnt);
742 rctx->sg = rctx->sgl;
743 rctx->sg_len = 1;
744 }
745
746 return 0;
747 }
748
749 new_len = nbytes;
750
751 if (offset)
752 list_ok = false;
753
754 if (final)
755 new_len = DIV_ROUND_UP(new_len, bs) * bs;
756 else
757 new_len = (new_len - 1) / bs * bs;
758
759 if (!new_len)
760 return 0;
761
762 if (nbytes != new_len)
763 list_ok = false;
764
765 while (nbytes > 0 && sg_tmp) {
766 n++;
767
768 if (bufcnt) {
769 if (!IS_ALIGNED(bufcnt, bs)) {
770 aligned = false;
771 break;
772 }
773 nbytes -= bufcnt;
774 bufcnt = 0;
775 if (!nbytes)
776 list_ok = false;
777
778 continue;
779 }
780
781 #ifdef CONFIG_ZONE_DMA
782 if (page_zonenum(sg_page(sg_tmp)) != ZONE_DMA) {
783 aligned = false;
784 break;
785 }
786 #endif
787
788 if (offset < sg_tmp->length) {
789 if (!IS_ALIGNED(offset + sg_tmp->offset, 4)) {
790 aligned = false;
791 break;
792 }
793
794 if (!IS_ALIGNED(sg_tmp->length - offset, bs)) {
795 aligned = false;
796 break;
797 }
798 }
799
800 if (offset) {
801 offset -= sg_tmp->length;
802 if (offset < 0) {
803 nbytes += offset;
804 offset = 0;
805 }
806 } else {
807 nbytes -= sg_tmp->length;
808 }
809
810 sg_tmp = sg_next(sg_tmp);
811
812 if (nbytes < 0) {
813 list_ok = false;
814 break;
815 }
816 }
817
818 if (new_len > OMAP_SHA_MAX_DMA_LEN) {
819 new_len = OMAP_SHA_MAX_DMA_LEN;
820 aligned = false;
821 }
822
823 if (!aligned)
824 return omap_sham_copy_sgs(rctx, sg, bs, new_len);
825 else if (!list_ok)
826 return omap_sham_copy_sg_lists(rctx, sg, bs, new_len);
827
828 rctx->total = new_len;
829 rctx->offset += new_len;
830 rctx->sg_len = n;
831 if (rctx->bufcnt) {
832 sg_init_table(rctx->sgl, 2);
833 sg_set_buf(rctx->sgl, rctx->dd->xmit_buf, rctx->bufcnt);
834 sg_chain(rctx->sgl, 2, sg);
835 rctx->sg = rctx->sgl;
836 } else {
837 rctx->sg = sg;
838 }
839
840 return 0;
841 }
842
omap_sham_prepare_request(struct crypto_engine * engine,void * areq)843 static int omap_sham_prepare_request(struct crypto_engine *engine, void *areq)
844 {
845 struct ahash_request *req = container_of(areq, struct ahash_request,
846 base);
847 struct omap_sham_reqctx *rctx = ahash_request_ctx(req);
848 int bs;
849 int ret;
850 unsigned int nbytes;
851 bool final = rctx->flags & BIT(FLAGS_FINUP);
852 bool update = rctx->op == OP_UPDATE;
853 int hash_later;
854
855 bs = get_block_size(rctx);
856
857 nbytes = rctx->bufcnt;
858
859 if (update)
860 nbytes += req->nbytes - rctx->offset;
861
862 dev_dbg(rctx->dd->dev,
863 "%s: nbytes=%d, bs=%d, total=%d, offset=%d, bufcnt=%zd\n",
864 __func__, nbytes, bs, rctx->total, rctx->offset,
865 rctx->bufcnt);
866
867 if (!nbytes)
868 return 0;
869
870 rctx->total = nbytes;
871
872 if (update && req->nbytes && (!IS_ALIGNED(rctx->bufcnt, bs))) {
873 int len = bs - rctx->bufcnt % bs;
874
875 if (len > req->nbytes)
876 len = req->nbytes;
877 scatterwalk_map_and_copy(rctx->buffer + rctx->bufcnt, req->src,
878 0, len, 0);
879 rctx->bufcnt += len;
880 rctx->offset = len;
881 }
882
883 if (rctx->bufcnt)
884 memcpy(rctx->dd->xmit_buf, rctx->buffer, rctx->bufcnt);
885
886 ret = omap_sham_align_sgs(req->src, nbytes, bs, final, rctx);
887 if (ret)
888 return ret;
889
890 hash_later = nbytes - rctx->total;
891 if (hash_later < 0)
892 hash_later = 0;
893
894 if (hash_later && hash_later <= rctx->buflen) {
895 scatterwalk_map_and_copy(rctx->buffer,
896 req->src,
897 req->nbytes - hash_later,
898 hash_later, 0);
899
900 rctx->bufcnt = hash_later;
901 } else {
902 rctx->bufcnt = 0;
903 }
904
905 if (hash_later > rctx->buflen)
906 set_bit(FLAGS_HUGE, &rctx->dd->flags);
907
908 rctx->total = min(nbytes, rctx->total);
909
910 return 0;
911 }
912
omap_sham_update_dma_stop(struct omap_sham_dev * dd)913 static int omap_sham_update_dma_stop(struct omap_sham_dev *dd)
914 {
915 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
916
917 dma_unmap_sg(dd->dev, ctx->sg, ctx->sg_len, DMA_TO_DEVICE);
918
919 clear_bit(FLAGS_DMA_ACTIVE, &dd->flags);
920
921 return 0;
922 }
923
omap_sham_find_dev(struct omap_sham_reqctx * ctx)924 static struct omap_sham_dev *omap_sham_find_dev(struct omap_sham_reqctx *ctx)
925 {
926 struct omap_sham_dev *dd;
927
928 if (ctx->dd)
929 return ctx->dd;
930
931 spin_lock_bh(&sham.lock);
932 dd = list_first_entry(&sham.dev_list, struct omap_sham_dev, list);
933 list_move_tail(&dd->list, &sham.dev_list);
934 ctx->dd = dd;
935 spin_unlock_bh(&sham.lock);
936
937 return dd;
938 }
939
omap_sham_init(struct ahash_request * req)940 static int omap_sham_init(struct ahash_request *req)
941 {
942 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
943 struct omap_sham_ctx *tctx = crypto_ahash_ctx(tfm);
944 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
945 struct omap_sham_dev *dd;
946 int bs = 0;
947
948 ctx->dd = NULL;
949
950 dd = omap_sham_find_dev(ctx);
951 if (!dd)
952 return -ENODEV;
953
954 ctx->flags = 0;
955
956 dev_dbg(dd->dev, "init: digest size: %d\n",
957 crypto_ahash_digestsize(tfm));
958
959 switch (crypto_ahash_digestsize(tfm)) {
960 case MD5_DIGEST_SIZE:
961 ctx->flags |= FLAGS_MODE_MD5;
962 bs = SHA1_BLOCK_SIZE;
963 break;
964 case SHA1_DIGEST_SIZE:
965 ctx->flags |= FLAGS_MODE_SHA1;
966 bs = SHA1_BLOCK_SIZE;
967 break;
968 case SHA224_DIGEST_SIZE:
969 ctx->flags |= FLAGS_MODE_SHA224;
970 bs = SHA224_BLOCK_SIZE;
971 break;
972 case SHA256_DIGEST_SIZE:
973 ctx->flags |= FLAGS_MODE_SHA256;
974 bs = SHA256_BLOCK_SIZE;
975 break;
976 case SHA384_DIGEST_SIZE:
977 ctx->flags |= FLAGS_MODE_SHA384;
978 bs = SHA384_BLOCK_SIZE;
979 break;
980 case SHA512_DIGEST_SIZE:
981 ctx->flags |= FLAGS_MODE_SHA512;
982 bs = SHA512_BLOCK_SIZE;
983 break;
984 }
985
986 ctx->bufcnt = 0;
987 ctx->digcnt = 0;
988 ctx->total = 0;
989 ctx->offset = 0;
990 ctx->buflen = BUFLEN;
991
992 if (tctx->flags & BIT(FLAGS_HMAC)) {
993 if (!test_bit(FLAGS_AUTO_XOR, &dd->flags)) {
994 struct omap_sham_hmac_ctx *bctx = tctx->base;
995
996 memcpy(ctx->buffer, bctx->ipad, bs);
997 ctx->bufcnt = bs;
998 }
999
1000 ctx->flags |= BIT(FLAGS_HMAC);
1001 }
1002
1003 return 0;
1004
1005 }
1006
omap_sham_update_req(struct omap_sham_dev * dd)1007 static int omap_sham_update_req(struct omap_sham_dev *dd)
1008 {
1009 struct ahash_request *req = dd->req;
1010 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
1011 int err;
1012 bool final = (ctx->flags & BIT(FLAGS_FINUP)) &&
1013 !(dd->flags & BIT(FLAGS_HUGE));
1014
1015 dev_dbg(dd->dev, "update_req: total: %u, digcnt: %zd, final: %d",
1016 ctx->total, ctx->digcnt, final);
1017
1018 if (ctx->total < get_block_size(ctx) ||
1019 ctx->total < dd->fallback_sz)
1020 ctx->flags |= BIT(FLAGS_CPU);
1021
1022 if (ctx->flags & BIT(FLAGS_CPU))
1023 err = omap_sham_xmit_cpu(dd, ctx->total, final);
1024 else
1025 err = omap_sham_xmit_dma(dd, ctx->total, final);
1026
1027 /* wait for dma completion before can take more data */
1028 dev_dbg(dd->dev, "update: err: %d, digcnt: %zd\n", err, ctx->digcnt);
1029
1030 return err;
1031 }
1032
omap_sham_final_req(struct omap_sham_dev * dd)1033 static int omap_sham_final_req(struct omap_sham_dev *dd)
1034 {
1035 struct ahash_request *req = dd->req;
1036 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
1037 int err = 0, use_dma = 1;
1038
1039 if (dd->flags & BIT(FLAGS_HUGE))
1040 return 0;
1041
1042 if ((ctx->total <= get_block_size(ctx)) || dd->polling_mode)
1043 /*
1044 * faster to handle last block with cpu or
1045 * use cpu when dma is not present.
1046 */
1047 use_dma = 0;
1048
1049 if (use_dma)
1050 err = omap_sham_xmit_dma(dd, ctx->total, 1);
1051 else
1052 err = omap_sham_xmit_cpu(dd, ctx->total, 1);
1053
1054 ctx->bufcnt = 0;
1055
1056 dev_dbg(dd->dev, "final_req: err: %d\n", err);
1057
1058 return err;
1059 }
1060
omap_sham_hash_one_req(struct crypto_engine * engine,void * areq)1061 static int omap_sham_hash_one_req(struct crypto_engine *engine, void *areq)
1062 {
1063 struct ahash_request *req = container_of(areq, struct ahash_request,
1064 base);
1065 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
1066 struct omap_sham_dev *dd = ctx->dd;
1067 int err;
1068 bool final = (ctx->flags & BIT(FLAGS_FINUP)) &&
1069 !(dd->flags & BIT(FLAGS_HUGE));
1070
1071 dev_dbg(dd->dev, "hash-one: op: %u, total: %u, digcnt: %zd, final: %d",
1072 ctx->op, ctx->total, ctx->digcnt, final);
1073
1074 err = omap_sham_prepare_request(engine, areq);
1075 if (err)
1076 return err;
1077
1078 err = pm_runtime_resume_and_get(dd->dev);
1079 if (err < 0) {
1080 dev_err(dd->dev, "failed to get sync: %d\n", err);
1081 return err;
1082 }
1083
1084 dd->err = 0;
1085 dd->req = req;
1086
1087 if (ctx->digcnt)
1088 dd->pdata->copy_hash(req, 0);
1089
1090 if (ctx->op == OP_UPDATE)
1091 err = omap_sham_update_req(dd);
1092 else if (ctx->op == OP_FINAL)
1093 err = omap_sham_final_req(dd);
1094
1095 if (err != -EINPROGRESS)
1096 omap_sham_finish_req(req, err);
1097
1098 return 0;
1099 }
1100
omap_sham_finish_hmac(struct ahash_request * req)1101 static int omap_sham_finish_hmac(struct ahash_request *req)
1102 {
1103 struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
1104 struct omap_sham_hmac_ctx *bctx = tctx->base;
1105 int bs = crypto_shash_blocksize(bctx->shash);
1106 int ds = crypto_shash_digestsize(bctx->shash);
1107 SHASH_DESC_ON_STACK(shash, bctx->shash);
1108
1109 shash->tfm = bctx->shash;
1110
1111 return crypto_shash_init(shash) ?:
1112 crypto_shash_update(shash, bctx->opad, bs) ?:
1113 crypto_shash_finup(shash, req->result, ds, req->result);
1114 }
1115
omap_sham_finish(struct ahash_request * req)1116 static int omap_sham_finish(struct ahash_request *req)
1117 {
1118 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
1119 struct omap_sham_dev *dd = ctx->dd;
1120 int err = 0;
1121
1122 if (ctx->digcnt) {
1123 omap_sham_copy_ready_hash(req);
1124 if ((ctx->flags & BIT(FLAGS_HMAC)) &&
1125 !test_bit(FLAGS_AUTO_XOR, &dd->flags))
1126 err = omap_sham_finish_hmac(req);
1127 }
1128
1129 dev_dbg(dd->dev, "digcnt: %zd, bufcnt: %zd\n", ctx->digcnt, ctx->bufcnt);
1130
1131 return err;
1132 }
1133
omap_sham_finish_req(struct ahash_request * req,int err)1134 static void omap_sham_finish_req(struct ahash_request *req, int err)
1135 {
1136 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
1137 struct omap_sham_dev *dd = ctx->dd;
1138
1139 if (test_bit(FLAGS_SGS_COPIED, &dd->flags))
1140 free_pages((unsigned long)sg_virt(ctx->sg),
1141 get_order(ctx->sg->length));
1142
1143 if (test_bit(FLAGS_SGS_ALLOCED, &dd->flags))
1144 kfree(ctx->sg);
1145
1146 ctx->sg = NULL;
1147
1148 dd->flags &= ~(BIT(FLAGS_SGS_ALLOCED) | BIT(FLAGS_SGS_COPIED) |
1149 BIT(FLAGS_CPU) | BIT(FLAGS_DMA_READY) |
1150 BIT(FLAGS_OUTPUT_READY));
1151
1152 if (!err)
1153 dd->pdata->copy_hash(req, 1);
1154
1155 if (dd->flags & BIT(FLAGS_HUGE)) {
1156 /* Re-enqueue the request */
1157 omap_sham_enqueue(req, ctx->op);
1158 return;
1159 }
1160
1161 if (!err) {
1162 if (test_bit(FLAGS_FINAL, &dd->flags))
1163 err = omap_sham_finish(req);
1164 } else {
1165 ctx->flags |= BIT(FLAGS_ERROR);
1166 }
1167
1168 /* atomic operation is not needed here */
1169 dd->flags &= ~(BIT(FLAGS_FINAL) | BIT(FLAGS_CPU) |
1170 BIT(FLAGS_DMA_READY) | BIT(FLAGS_OUTPUT_READY));
1171
1172 pm_runtime_put_autosuspend(dd->dev);
1173
1174 ctx->offset = 0;
1175
1176 crypto_finalize_hash_request(dd->engine, req, err);
1177 }
1178
omap_sham_handle_queue(struct omap_sham_dev * dd,struct ahash_request * req)1179 static int omap_sham_handle_queue(struct omap_sham_dev *dd,
1180 struct ahash_request *req)
1181 {
1182 return crypto_transfer_hash_request_to_engine(dd->engine, req);
1183 }
1184
omap_sham_enqueue(struct ahash_request * req,unsigned int op)1185 static int omap_sham_enqueue(struct ahash_request *req, unsigned int op)
1186 {
1187 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
1188 struct omap_sham_dev *dd = ctx->dd;
1189
1190 ctx->op = op;
1191
1192 return omap_sham_handle_queue(dd, req);
1193 }
1194
omap_sham_update(struct ahash_request * req)1195 static int omap_sham_update(struct ahash_request *req)
1196 {
1197 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
1198 struct omap_sham_dev *dd = omap_sham_find_dev(ctx);
1199
1200 if (!req->nbytes)
1201 return 0;
1202
1203 if (ctx->bufcnt + req->nbytes <= ctx->buflen) {
1204 scatterwalk_map_and_copy(ctx->buffer + ctx->bufcnt, req->src,
1205 0, req->nbytes, 0);
1206 ctx->bufcnt += req->nbytes;
1207 return 0;
1208 }
1209
1210 if (dd->polling_mode)
1211 ctx->flags |= BIT(FLAGS_CPU);
1212
1213 return omap_sham_enqueue(req, OP_UPDATE);
1214 }
1215
omap_sham_final_shash(struct ahash_request * req)1216 static int omap_sham_final_shash(struct ahash_request *req)
1217 {
1218 struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
1219 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
1220 int offset = 0;
1221
1222 /*
1223 * If we are running HMAC on limited hardware support, skip
1224 * the ipad in the beginning of the buffer if we are going for
1225 * software fallback algorithm.
1226 */
1227 if (test_bit(FLAGS_HMAC, &ctx->flags) &&
1228 !test_bit(FLAGS_AUTO_XOR, &ctx->dd->flags))
1229 offset = get_block_size(ctx);
1230
1231 return crypto_shash_tfm_digest(tctx->fallback, ctx->buffer + offset,
1232 ctx->bufcnt - offset, req->result);
1233 }
1234
omap_sham_final(struct ahash_request * req)1235 static int omap_sham_final(struct ahash_request *req)
1236 {
1237 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
1238
1239 ctx->flags |= BIT(FLAGS_FINUP);
1240
1241 if (ctx->flags & BIT(FLAGS_ERROR))
1242 return 0; /* uncompleted hash is not needed */
1243
1244 /*
1245 * OMAP HW accel works only with buffers >= 9.
1246 * HMAC is always >= 9 because ipad == block size.
1247 * If buffersize is less than fallback_sz, we use fallback
1248 * SW encoding, as using DMA + HW in this case doesn't provide
1249 * any benefit.
1250 */
1251 if (!ctx->digcnt && ctx->bufcnt < ctx->dd->fallback_sz)
1252 return omap_sham_final_shash(req);
1253 else if (ctx->bufcnt)
1254 return omap_sham_enqueue(req, OP_FINAL);
1255
1256 /* copy ready hash (+ finalize hmac) */
1257 return omap_sham_finish(req);
1258 }
1259
omap_sham_finup(struct ahash_request * req)1260 static int omap_sham_finup(struct ahash_request *req)
1261 {
1262 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
1263 int err1, err2;
1264
1265 ctx->flags |= BIT(FLAGS_FINUP);
1266
1267 err1 = omap_sham_update(req);
1268 if (err1 == -EINPROGRESS || err1 == -EBUSY)
1269 return err1;
1270 /*
1271 * final() has to be always called to cleanup resources
1272 * even if udpate() failed, except EINPROGRESS
1273 */
1274 err2 = omap_sham_final(req);
1275
1276 return err1 ?: err2;
1277 }
1278
omap_sham_digest(struct ahash_request * req)1279 static int omap_sham_digest(struct ahash_request *req)
1280 {
1281 return omap_sham_init(req) ?: omap_sham_finup(req);
1282 }
1283
omap_sham_setkey(struct crypto_ahash * tfm,const u8 * key,unsigned int keylen)1284 static int omap_sham_setkey(struct crypto_ahash *tfm, const u8 *key,
1285 unsigned int keylen)
1286 {
1287 struct omap_sham_ctx *tctx = crypto_ahash_ctx(tfm);
1288 struct omap_sham_hmac_ctx *bctx = tctx->base;
1289 int bs = crypto_shash_blocksize(bctx->shash);
1290 int ds = crypto_shash_digestsize(bctx->shash);
1291 int err, i;
1292
1293 err = crypto_shash_setkey(tctx->fallback, key, keylen);
1294 if (err)
1295 return err;
1296
1297 if (keylen > bs) {
1298 err = crypto_shash_tfm_digest(bctx->shash, key, keylen,
1299 bctx->ipad);
1300 if (err)
1301 return err;
1302 keylen = ds;
1303 } else {
1304 memcpy(bctx->ipad, key, keylen);
1305 }
1306
1307 memset(bctx->ipad + keylen, 0, bs - keylen);
1308
1309 if (!test_bit(FLAGS_AUTO_XOR, &sham.flags)) {
1310 memcpy(bctx->opad, bctx->ipad, bs);
1311
1312 for (i = 0; i < bs; i++) {
1313 bctx->ipad[i] ^= HMAC_IPAD_VALUE;
1314 bctx->opad[i] ^= HMAC_OPAD_VALUE;
1315 }
1316 }
1317
1318 return err;
1319 }
1320
omap_sham_cra_init_alg(struct crypto_tfm * tfm,const char * alg_base)1321 static int omap_sham_cra_init_alg(struct crypto_tfm *tfm, const char *alg_base)
1322 {
1323 struct omap_sham_ctx *tctx = crypto_tfm_ctx(tfm);
1324 const char *alg_name = crypto_tfm_alg_name(tfm);
1325
1326 /* Allocate a fallback and abort if it failed. */
1327 tctx->fallback = crypto_alloc_shash(alg_name, 0,
1328 CRYPTO_ALG_NEED_FALLBACK);
1329 if (IS_ERR(tctx->fallback)) {
1330 pr_err("omap-sham: fallback driver '%s' "
1331 "could not be loaded.\n", alg_name);
1332 return PTR_ERR(tctx->fallback);
1333 }
1334
1335 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1336 sizeof(struct omap_sham_reqctx) + BUFLEN);
1337
1338 if (alg_base) {
1339 struct omap_sham_hmac_ctx *bctx = tctx->base;
1340 tctx->flags |= BIT(FLAGS_HMAC);
1341 bctx->shash = crypto_alloc_shash(alg_base, 0,
1342 CRYPTO_ALG_NEED_FALLBACK);
1343 if (IS_ERR(bctx->shash)) {
1344 pr_err("omap-sham: base driver '%s' "
1345 "could not be loaded.\n", alg_base);
1346 crypto_free_shash(tctx->fallback);
1347 return PTR_ERR(bctx->shash);
1348 }
1349
1350 }
1351
1352 return 0;
1353 }
1354
omap_sham_cra_init(struct crypto_tfm * tfm)1355 static int omap_sham_cra_init(struct crypto_tfm *tfm)
1356 {
1357 return omap_sham_cra_init_alg(tfm, NULL);
1358 }
1359
omap_sham_cra_sha1_init(struct crypto_tfm * tfm)1360 static int omap_sham_cra_sha1_init(struct crypto_tfm *tfm)
1361 {
1362 return omap_sham_cra_init_alg(tfm, "sha1");
1363 }
1364
omap_sham_cra_sha224_init(struct crypto_tfm * tfm)1365 static int omap_sham_cra_sha224_init(struct crypto_tfm *tfm)
1366 {
1367 return omap_sham_cra_init_alg(tfm, "sha224");
1368 }
1369
omap_sham_cra_sha256_init(struct crypto_tfm * tfm)1370 static int omap_sham_cra_sha256_init(struct crypto_tfm *tfm)
1371 {
1372 return omap_sham_cra_init_alg(tfm, "sha256");
1373 }
1374
omap_sham_cra_md5_init(struct crypto_tfm * tfm)1375 static int omap_sham_cra_md5_init(struct crypto_tfm *tfm)
1376 {
1377 return omap_sham_cra_init_alg(tfm, "md5");
1378 }
1379
omap_sham_cra_sha384_init(struct crypto_tfm * tfm)1380 static int omap_sham_cra_sha384_init(struct crypto_tfm *tfm)
1381 {
1382 return omap_sham_cra_init_alg(tfm, "sha384");
1383 }
1384
omap_sham_cra_sha512_init(struct crypto_tfm * tfm)1385 static int omap_sham_cra_sha512_init(struct crypto_tfm *tfm)
1386 {
1387 return omap_sham_cra_init_alg(tfm, "sha512");
1388 }
1389
omap_sham_cra_exit(struct crypto_tfm * tfm)1390 static void omap_sham_cra_exit(struct crypto_tfm *tfm)
1391 {
1392 struct omap_sham_ctx *tctx = crypto_tfm_ctx(tfm);
1393
1394 crypto_free_shash(tctx->fallback);
1395 tctx->fallback = NULL;
1396
1397 if (tctx->flags & BIT(FLAGS_HMAC)) {
1398 struct omap_sham_hmac_ctx *bctx = tctx->base;
1399 crypto_free_shash(bctx->shash);
1400 }
1401 }
1402
omap_sham_export(struct ahash_request * req,void * out)1403 static int omap_sham_export(struct ahash_request *req, void *out)
1404 {
1405 struct omap_sham_reqctx *rctx = ahash_request_ctx(req);
1406
1407 memcpy(out, rctx, sizeof(*rctx) + rctx->bufcnt);
1408
1409 return 0;
1410 }
1411
omap_sham_import(struct ahash_request * req,const void * in)1412 static int omap_sham_import(struct ahash_request *req, const void *in)
1413 {
1414 struct omap_sham_reqctx *rctx = ahash_request_ctx(req);
1415 const struct omap_sham_reqctx *ctx_in = in;
1416
1417 memcpy(rctx, in, sizeof(*rctx) + ctx_in->bufcnt);
1418
1419 return 0;
1420 }
1421
1422 static struct ahash_engine_alg algs_sha1_md5[] = {
1423 {
1424 .base.init = omap_sham_init,
1425 .base.update = omap_sham_update,
1426 .base.final = omap_sham_final,
1427 .base.finup = omap_sham_finup,
1428 .base.digest = omap_sham_digest,
1429 .base.halg.digestsize = SHA1_DIGEST_SIZE,
1430 .base.halg.base = {
1431 .cra_name = "sha1",
1432 .cra_driver_name = "omap-sha1",
1433 .cra_priority = 400,
1434 .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
1435 CRYPTO_ALG_ASYNC |
1436 CRYPTO_ALG_NEED_FALLBACK,
1437 .cra_blocksize = SHA1_BLOCK_SIZE,
1438 .cra_ctxsize = sizeof(struct omap_sham_ctx),
1439 .cra_module = THIS_MODULE,
1440 .cra_init = omap_sham_cra_init,
1441 .cra_exit = omap_sham_cra_exit,
1442 },
1443 .op.do_one_request = omap_sham_hash_one_req,
1444 },
1445 {
1446 .base.init = omap_sham_init,
1447 .base.update = omap_sham_update,
1448 .base.final = omap_sham_final,
1449 .base.finup = omap_sham_finup,
1450 .base.digest = omap_sham_digest,
1451 .base.halg.digestsize = MD5_DIGEST_SIZE,
1452 .base.halg.base = {
1453 .cra_name = "md5",
1454 .cra_driver_name = "omap-md5",
1455 .cra_priority = 400,
1456 .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
1457 CRYPTO_ALG_ASYNC |
1458 CRYPTO_ALG_NEED_FALLBACK,
1459 .cra_blocksize = SHA1_BLOCK_SIZE,
1460 .cra_ctxsize = sizeof(struct omap_sham_ctx),
1461 .cra_module = THIS_MODULE,
1462 .cra_init = omap_sham_cra_init,
1463 .cra_exit = omap_sham_cra_exit,
1464 },
1465 .op.do_one_request = omap_sham_hash_one_req,
1466 },
1467 {
1468 .base.init = omap_sham_init,
1469 .base.update = omap_sham_update,
1470 .base.final = omap_sham_final,
1471 .base.finup = omap_sham_finup,
1472 .base.digest = omap_sham_digest,
1473 .base.setkey = omap_sham_setkey,
1474 .base.halg.digestsize = SHA1_DIGEST_SIZE,
1475 .base.halg.base = {
1476 .cra_name = "hmac(sha1)",
1477 .cra_driver_name = "omap-hmac-sha1",
1478 .cra_priority = 400,
1479 .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
1480 CRYPTO_ALG_ASYNC |
1481 CRYPTO_ALG_NEED_FALLBACK,
1482 .cra_blocksize = SHA1_BLOCK_SIZE,
1483 .cra_ctxsize = sizeof(struct omap_sham_ctx) +
1484 sizeof(struct omap_sham_hmac_ctx),
1485 .cra_module = THIS_MODULE,
1486 .cra_init = omap_sham_cra_sha1_init,
1487 .cra_exit = omap_sham_cra_exit,
1488 },
1489 .op.do_one_request = omap_sham_hash_one_req,
1490 },
1491 {
1492 .base.init = omap_sham_init,
1493 .base.update = omap_sham_update,
1494 .base.final = omap_sham_final,
1495 .base.finup = omap_sham_finup,
1496 .base.digest = omap_sham_digest,
1497 .base.setkey = omap_sham_setkey,
1498 .base.halg.digestsize = MD5_DIGEST_SIZE,
1499 .base.halg.base = {
1500 .cra_name = "hmac(md5)",
1501 .cra_driver_name = "omap-hmac-md5",
1502 .cra_priority = 400,
1503 .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
1504 CRYPTO_ALG_ASYNC |
1505 CRYPTO_ALG_NEED_FALLBACK,
1506 .cra_blocksize = SHA1_BLOCK_SIZE,
1507 .cra_ctxsize = sizeof(struct omap_sham_ctx) +
1508 sizeof(struct omap_sham_hmac_ctx),
1509 .cra_module = THIS_MODULE,
1510 .cra_init = omap_sham_cra_md5_init,
1511 .cra_exit = omap_sham_cra_exit,
1512 },
1513 .op.do_one_request = omap_sham_hash_one_req,
1514 }
1515 };
1516
1517 /* OMAP4 has some algs in addition to what OMAP2 has */
1518 static struct ahash_engine_alg algs_sha224_sha256[] = {
1519 {
1520 .base.init = omap_sham_init,
1521 .base.update = omap_sham_update,
1522 .base.final = omap_sham_final,
1523 .base.finup = omap_sham_finup,
1524 .base.digest = omap_sham_digest,
1525 .base.halg.digestsize = SHA224_DIGEST_SIZE,
1526 .base.halg.base = {
1527 .cra_name = "sha224",
1528 .cra_driver_name = "omap-sha224",
1529 .cra_priority = 400,
1530 .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
1531 CRYPTO_ALG_ASYNC |
1532 CRYPTO_ALG_NEED_FALLBACK,
1533 .cra_blocksize = SHA224_BLOCK_SIZE,
1534 .cra_ctxsize = sizeof(struct omap_sham_ctx),
1535 .cra_module = THIS_MODULE,
1536 .cra_init = omap_sham_cra_init,
1537 .cra_exit = omap_sham_cra_exit,
1538 },
1539 .op.do_one_request = omap_sham_hash_one_req,
1540 },
1541 {
1542 .base.init = omap_sham_init,
1543 .base.update = omap_sham_update,
1544 .base.final = omap_sham_final,
1545 .base.finup = omap_sham_finup,
1546 .base.digest = omap_sham_digest,
1547 .base.halg.digestsize = SHA256_DIGEST_SIZE,
1548 .base.halg.base = {
1549 .cra_name = "sha256",
1550 .cra_driver_name = "omap-sha256",
1551 .cra_priority = 400,
1552 .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
1553 CRYPTO_ALG_ASYNC |
1554 CRYPTO_ALG_NEED_FALLBACK,
1555 .cra_blocksize = SHA256_BLOCK_SIZE,
1556 .cra_ctxsize = sizeof(struct omap_sham_ctx),
1557 .cra_module = THIS_MODULE,
1558 .cra_init = omap_sham_cra_init,
1559 .cra_exit = omap_sham_cra_exit,
1560 },
1561 .op.do_one_request = omap_sham_hash_one_req,
1562 },
1563 {
1564 .base.init = omap_sham_init,
1565 .base.update = omap_sham_update,
1566 .base.final = omap_sham_final,
1567 .base.finup = omap_sham_finup,
1568 .base.digest = omap_sham_digest,
1569 .base.setkey = omap_sham_setkey,
1570 .base.halg.digestsize = SHA224_DIGEST_SIZE,
1571 .base.halg.base = {
1572 .cra_name = "hmac(sha224)",
1573 .cra_driver_name = "omap-hmac-sha224",
1574 .cra_priority = 400,
1575 .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
1576 CRYPTO_ALG_ASYNC |
1577 CRYPTO_ALG_NEED_FALLBACK,
1578 .cra_blocksize = SHA224_BLOCK_SIZE,
1579 .cra_ctxsize = sizeof(struct omap_sham_ctx) +
1580 sizeof(struct omap_sham_hmac_ctx),
1581 .cra_module = THIS_MODULE,
1582 .cra_init = omap_sham_cra_sha224_init,
1583 .cra_exit = omap_sham_cra_exit,
1584 },
1585 .op.do_one_request = omap_sham_hash_one_req,
1586 },
1587 {
1588 .base.init = omap_sham_init,
1589 .base.update = omap_sham_update,
1590 .base.final = omap_sham_final,
1591 .base.finup = omap_sham_finup,
1592 .base.digest = omap_sham_digest,
1593 .base.setkey = omap_sham_setkey,
1594 .base.halg.digestsize = SHA256_DIGEST_SIZE,
1595 .base.halg.base = {
1596 .cra_name = "hmac(sha256)",
1597 .cra_driver_name = "omap-hmac-sha256",
1598 .cra_priority = 400,
1599 .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
1600 CRYPTO_ALG_ASYNC |
1601 CRYPTO_ALG_NEED_FALLBACK,
1602 .cra_blocksize = SHA256_BLOCK_SIZE,
1603 .cra_ctxsize = sizeof(struct omap_sham_ctx) +
1604 sizeof(struct omap_sham_hmac_ctx),
1605 .cra_module = THIS_MODULE,
1606 .cra_init = omap_sham_cra_sha256_init,
1607 .cra_exit = omap_sham_cra_exit,
1608 },
1609 .op.do_one_request = omap_sham_hash_one_req,
1610 },
1611 };
1612
1613 static struct ahash_engine_alg algs_sha384_sha512[] = {
1614 {
1615 .base.init = omap_sham_init,
1616 .base.update = omap_sham_update,
1617 .base.final = omap_sham_final,
1618 .base.finup = omap_sham_finup,
1619 .base.digest = omap_sham_digest,
1620 .base.halg.digestsize = SHA384_DIGEST_SIZE,
1621 .base.halg.base = {
1622 .cra_name = "sha384",
1623 .cra_driver_name = "omap-sha384",
1624 .cra_priority = 400,
1625 .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
1626 CRYPTO_ALG_ASYNC |
1627 CRYPTO_ALG_NEED_FALLBACK,
1628 .cra_blocksize = SHA384_BLOCK_SIZE,
1629 .cra_ctxsize = sizeof(struct omap_sham_ctx),
1630 .cra_module = THIS_MODULE,
1631 .cra_init = omap_sham_cra_init,
1632 .cra_exit = omap_sham_cra_exit,
1633 },
1634 .op.do_one_request = omap_sham_hash_one_req,
1635 },
1636 {
1637 .base.init = omap_sham_init,
1638 .base.update = omap_sham_update,
1639 .base.final = omap_sham_final,
1640 .base.finup = omap_sham_finup,
1641 .base.digest = omap_sham_digest,
1642 .base.halg.digestsize = SHA512_DIGEST_SIZE,
1643 .base.halg.base = {
1644 .cra_name = "sha512",
1645 .cra_driver_name = "omap-sha512",
1646 .cra_priority = 400,
1647 .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
1648 CRYPTO_ALG_ASYNC |
1649 CRYPTO_ALG_NEED_FALLBACK,
1650 .cra_blocksize = SHA512_BLOCK_SIZE,
1651 .cra_ctxsize = sizeof(struct omap_sham_ctx),
1652 .cra_module = THIS_MODULE,
1653 .cra_init = omap_sham_cra_init,
1654 .cra_exit = omap_sham_cra_exit,
1655 },
1656 .op.do_one_request = omap_sham_hash_one_req,
1657 },
1658 {
1659 .base.init = omap_sham_init,
1660 .base.update = omap_sham_update,
1661 .base.final = omap_sham_final,
1662 .base.finup = omap_sham_finup,
1663 .base.digest = omap_sham_digest,
1664 .base.setkey = omap_sham_setkey,
1665 .base.halg.digestsize = SHA384_DIGEST_SIZE,
1666 .base.halg.base = {
1667 .cra_name = "hmac(sha384)",
1668 .cra_driver_name = "omap-hmac-sha384",
1669 .cra_priority = 400,
1670 .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
1671 CRYPTO_ALG_ASYNC |
1672 CRYPTO_ALG_NEED_FALLBACK,
1673 .cra_blocksize = SHA384_BLOCK_SIZE,
1674 .cra_ctxsize = sizeof(struct omap_sham_ctx) +
1675 sizeof(struct omap_sham_hmac_ctx),
1676 .cra_module = THIS_MODULE,
1677 .cra_init = omap_sham_cra_sha384_init,
1678 .cra_exit = omap_sham_cra_exit,
1679 },
1680 .op.do_one_request = omap_sham_hash_one_req,
1681 },
1682 {
1683 .base.init = omap_sham_init,
1684 .base.update = omap_sham_update,
1685 .base.final = omap_sham_final,
1686 .base.finup = omap_sham_finup,
1687 .base.digest = omap_sham_digest,
1688 .base.setkey = omap_sham_setkey,
1689 .base.halg.digestsize = SHA512_DIGEST_SIZE,
1690 .base.halg.base = {
1691 .cra_name = "hmac(sha512)",
1692 .cra_driver_name = "omap-hmac-sha512",
1693 .cra_priority = 400,
1694 .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
1695 CRYPTO_ALG_ASYNC |
1696 CRYPTO_ALG_NEED_FALLBACK,
1697 .cra_blocksize = SHA512_BLOCK_SIZE,
1698 .cra_ctxsize = sizeof(struct omap_sham_ctx) +
1699 sizeof(struct omap_sham_hmac_ctx),
1700 .cra_module = THIS_MODULE,
1701 .cra_init = omap_sham_cra_sha512_init,
1702 .cra_exit = omap_sham_cra_exit,
1703 },
1704 .op.do_one_request = omap_sham_hash_one_req,
1705 },
1706 };
1707
omap_sham_done_task(struct work_struct * t)1708 static void omap_sham_done_task(struct work_struct *t)
1709 {
1710 struct omap_sham_dev *dd = from_work(dd, t, done_task);
1711 int err = 0;
1712
1713 dev_dbg(dd->dev, "%s: flags=%lx\n", __func__, dd->flags);
1714
1715 if (test_bit(FLAGS_CPU, &dd->flags)) {
1716 if (test_and_clear_bit(FLAGS_OUTPUT_READY, &dd->flags))
1717 goto finish;
1718 } else if (test_bit(FLAGS_DMA_READY, &dd->flags)) {
1719 if (test_bit(FLAGS_DMA_ACTIVE, &dd->flags)) {
1720 omap_sham_update_dma_stop(dd);
1721 if (dd->err) {
1722 err = dd->err;
1723 goto finish;
1724 }
1725 }
1726 if (test_and_clear_bit(FLAGS_OUTPUT_READY, &dd->flags)) {
1727 /* hash or semi-hash ready */
1728 clear_bit(FLAGS_DMA_READY, &dd->flags);
1729 goto finish;
1730 }
1731 }
1732
1733 return;
1734
1735 finish:
1736 dev_dbg(dd->dev, "update done: err: %d\n", err);
1737 /* finish curent request */
1738 omap_sham_finish_req(dd->req, err);
1739 }
1740
omap_sham_irq_common(struct omap_sham_dev * dd)1741 static irqreturn_t omap_sham_irq_common(struct omap_sham_dev *dd)
1742 {
1743 set_bit(FLAGS_OUTPUT_READY, &dd->flags);
1744 queue_work(system_bh_wq, &dd->done_task);
1745
1746 return IRQ_HANDLED;
1747 }
1748
omap_sham_irq_omap2(int irq,void * dev_id)1749 static irqreturn_t omap_sham_irq_omap2(int irq, void *dev_id)
1750 {
1751 struct omap_sham_dev *dd = dev_id;
1752
1753 if (unlikely(test_bit(FLAGS_FINAL, &dd->flags)))
1754 /* final -> allow device to go to power-saving mode */
1755 omap_sham_write_mask(dd, SHA_REG_CTRL, 0, SHA_REG_CTRL_LENGTH);
1756
1757 omap_sham_write_mask(dd, SHA_REG_CTRL, SHA_REG_CTRL_OUTPUT_READY,
1758 SHA_REG_CTRL_OUTPUT_READY);
1759 omap_sham_read(dd, SHA_REG_CTRL);
1760
1761 return omap_sham_irq_common(dd);
1762 }
1763
omap_sham_irq_omap4(int irq,void * dev_id)1764 static irqreturn_t omap_sham_irq_omap4(int irq, void *dev_id)
1765 {
1766 struct omap_sham_dev *dd = dev_id;
1767
1768 omap_sham_write_mask(dd, SHA_REG_MASK(dd), 0, SHA_REG_MASK_IT_EN);
1769
1770 return omap_sham_irq_common(dd);
1771 }
1772
1773 static struct omap_sham_algs_info omap_sham_algs_info_omap2[] = {
1774 {
1775 .algs_list = algs_sha1_md5,
1776 .size = ARRAY_SIZE(algs_sha1_md5),
1777 },
1778 };
1779
1780 static const struct omap_sham_pdata omap_sham_pdata_omap2 = {
1781 .algs_info = omap_sham_algs_info_omap2,
1782 .algs_info_size = ARRAY_SIZE(omap_sham_algs_info_omap2),
1783 .flags = BIT(FLAGS_BE32_SHA1),
1784 .digest_size = SHA1_DIGEST_SIZE,
1785 .copy_hash = omap_sham_copy_hash_omap2,
1786 .write_ctrl = omap_sham_write_ctrl_omap2,
1787 .trigger = omap_sham_trigger_omap2,
1788 .poll_irq = omap_sham_poll_irq_omap2,
1789 .intr_hdlr = omap_sham_irq_omap2,
1790 .idigest_ofs = 0x00,
1791 .din_ofs = 0x1c,
1792 .digcnt_ofs = 0x14,
1793 .rev_ofs = 0x5c,
1794 .mask_ofs = 0x60,
1795 .sysstatus_ofs = 0x64,
1796 .major_mask = 0xf0,
1797 .major_shift = 4,
1798 .minor_mask = 0x0f,
1799 .minor_shift = 0,
1800 };
1801
1802 #ifdef CONFIG_OF
1803 static struct omap_sham_algs_info omap_sham_algs_info_omap4[] = {
1804 {
1805 .algs_list = algs_sha1_md5,
1806 .size = ARRAY_SIZE(algs_sha1_md5),
1807 },
1808 {
1809 .algs_list = algs_sha224_sha256,
1810 .size = ARRAY_SIZE(algs_sha224_sha256),
1811 },
1812 };
1813
1814 static const struct omap_sham_pdata omap_sham_pdata_omap4 = {
1815 .algs_info = omap_sham_algs_info_omap4,
1816 .algs_info_size = ARRAY_SIZE(omap_sham_algs_info_omap4),
1817 .flags = BIT(FLAGS_AUTO_XOR),
1818 .digest_size = SHA256_DIGEST_SIZE,
1819 .copy_hash = omap_sham_copy_hash_omap4,
1820 .write_ctrl = omap_sham_write_ctrl_omap4,
1821 .trigger = omap_sham_trigger_omap4,
1822 .poll_irq = omap_sham_poll_irq_omap4,
1823 .intr_hdlr = omap_sham_irq_omap4,
1824 .idigest_ofs = 0x020,
1825 .odigest_ofs = 0x0,
1826 .din_ofs = 0x080,
1827 .digcnt_ofs = 0x040,
1828 .rev_ofs = 0x100,
1829 .mask_ofs = 0x110,
1830 .sysstatus_ofs = 0x114,
1831 .mode_ofs = 0x44,
1832 .length_ofs = 0x48,
1833 .major_mask = 0x0700,
1834 .major_shift = 8,
1835 .minor_mask = 0x003f,
1836 .minor_shift = 0,
1837 };
1838
1839 static struct omap_sham_algs_info omap_sham_algs_info_omap5[] = {
1840 {
1841 .algs_list = algs_sha1_md5,
1842 .size = ARRAY_SIZE(algs_sha1_md5),
1843 },
1844 {
1845 .algs_list = algs_sha224_sha256,
1846 .size = ARRAY_SIZE(algs_sha224_sha256),
1847 },
1848 {
1849 .algs_list = algs_sha384_sha512,
1850 .size = ARRAY_SIZE(algs_sha384_sha512),
1851 },
1852 };
1853
1854 static const struct omap_sham_pdata omap_sham_pdata_omap5 = {
1855 .algs_info = omap_sham_algs_info_omap5,
1856 .algs_info_size = ARRAY_SIZE(omap_sham_algs_info_omap5),
1857 .flags = BIT(FLAGS_AUTO_XOR),
1858 .digest_size = SHA512_DIGEST_SIZE,
1859 .copy_hash = omap_sham_copy_hash_omap4,
1860 .write_ctrl = omap_sham_write_ctrl_omap4,
1861 .trigger = omap_sham_trigger_omap4,
1862 .poll_irq = omap_sham_poll_irq_omap4,
1863 .intr_hdlr = omap_sham_irq_omap4,
1864 .idigest_ofs = 0x240,
1865 .odigest_ofs = 0x200,
1866 .din_ofs = 0x080,
1867 .digcnt_ofs = 0x280,
1868 .rev_ofs = 0x100,
1869 .mask_ofs = 0x110,
1870 .sysstatus_ofs = 0x114,
1871 .mode_ofs = 0x284,
1872 .length_ofs = 0x288,
1873 .major_mask = 0x0700,
1874 .major_shift = 8,
1875 .minor_mask = 0x003f,
1876 .minor_shift = 0,
1877 };
1878
1879 static const struct of_device_id omap_sham_of_match[] = {
1880 {
1881 .compatible = "ti,omap2-sham",
1882 .data = &omap_sham_pdata_omap2,
1883 },
1884 {
1885 .compatible = "ti,omap3-sham",
1886 .data = &omap_sham_pdata_omap2,
1887 },
1888 {
1889 .compatible = "ti,omap4-sham",
1890 .data = &omap_sham_pdata_omap4,
1891 },
1892 {
1893 .compatible = "ti,omap5-sham",
1894 .data = &omap_sham_pdata_omap5,
1895 },
1896 {},
1897 };
1898 MODULE_DEVICE_TABLE(of, omap_sham_of_match);
1899
omap_sham_get_res_of(struct omap_sham_dev * dd,struct device * dev,struct resource * res)1900 static int omap_sham_get_res_of(struct omap_sham_dev *dd,
1901 struct device *dev, struct resource *res)
1902 {
1903 struct device_node *node = dev->of_node;
1904 int err = 0;
1905
1906 dd->pdata = of_device_get_match_data(dev);
1907 if (!dd->pdata) {
1908 dev_err(dev, "no compatible OF match\n");
1909 err = -EINVAL;
1910 goto err;
1911 }
1912
1913 err = of_address_to_resource(node, 0, res);
1914 if (err < 0) {
1915 dev_err(dev, "can't translate OF node address\n");
1916 err = -EINVAL;
1917 goto err;
1918 }
1919
1920 dd->irq = irq_of_parse_and_map(node, 0);
1921 if (!dd->irq) {
1922 dev_err(dev, "can't translate OF irq value\n");
1923 err = -EINVAL;
1924 goto err;
1925 }
1926
1927 err:
1928 return err;
1929 }
1930 #else
1931 static const struct of_device_id omap_sham_of_match[] = {
1932 {},
1933 };
1934
omap_sham_get_res_of(struct omap_sham_dev * dd,struct device * dev,struct resource * res)1935 static int omap_sham_get_res_of(struct omap_sham_dev *dd,
1936 struct device *dev, struct resource *res)
1937 {
1938 return -EINVAL;
1939 }
1940 #endif
1941
omap_sham_get_res_pdev(struct omap_sham_dev * dd,struct platform_device * pdev,struct resource * res)1942 static int omap_sham_get_res_pdev(struct omap_sham_dev *dd,
1943 struct platform_device *pdev, struct resource *res)
1944 {
1945 struct device *dev = &pdev->dev;
1946 struct resource *r;
1947 int err = 0;
1948
1949 /* Get the base address */
1950 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1951 if (!r) {
1952 dev_err(dev, "no MEM resource info\n");
1953 err = -ENODEV;
1954 goto err;
1955 }
1956 memcpy(res, r, sizeof(*res));
1957
1958 /* Get the IRQ */
1959 dd->irq = platform_get_irq(pdev, 0);
1960 if (dd->irq < 0) {
1961 err = dd->irq;
1962 goto err;
1963 }
1964
1965 /* Only OMAP2/3 can be non-DT */
1966 dd->pdata = &omap_sham_pdata_omap2;
1967
1968 err:
1969 return err;
1970 }
1971
fallback_show(struct device * dev,struct device_attribute * attr,char * buf)1972 static ssize_t fallback_show(struct device *dev, struct device_attribute *attr,
1973 char *buf)
1974 {
1975 struct omap_sham_dev *dd = dev_get_drvdata(dev);
1976
1977 return sysfs_emit(buf, "%d\n", dd->fallback_sz);
1978 }
1979
fallback_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1980 static ssize_t fallback_store(struct device *dev, struct device_attribute *attr,
1981 const char *buf, size_t size)
1982 {
1983 struct omap_sham_dev *dd = dev_get_drvdata(dev);
1984 ssize_t status;
1985 long value;
1986
1987 status = kstrtol(buf, 0, &value);
1988 if (status)
1989 return status;
1990
1991 /* HW accelerator only works with buffers > 9 */
1992 if (value < 9) {
1993 dev_err(dev, "minimum fallback size 9\n");
1994 return -EINVAL;
1995 }
1996
1997 dd->fallback_sz = value;
1998
1999 return size;
2000 }
2001
queue_len_show(struct device * dev,struct device_attribute * attr,char * buf)2002 static ssize_t queue_len_show(struct device *dev, struct device_attribute *attr,
2003 char *buf)
2004 {
2005 struct omap_sham_dev *dd = dev_get_drvdata(dev);
2006
2007 return sysfs_emit(buf, "%d\n", dd->queue.max_qlen);
2008 }
2009
queue_len_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)2010 static ssize_t queue_len_store(struct device *dev,
2011 struct device_attribute *attr, const char *buf,
2012 size_t size)
2013 {
2014 struct omap_sham_dev *dd = dev_get_drvdata(dev);
2015 ssize_t status;
2016 long value;
2017
2018 status = kstrtol(buf, 0, &value);
2019 if (status)
2020 return status;
2021
2022 if (value < 1)
2023 return -EINVAL;
2024
2025 /*
2026 * Changing the queue size in fly is safe, if size becomes smaller
2027 * than current size, it will just not accept new entries until
2028 * it has shrank enough.
2029 */
2030 dd->queue.max_qlen = value;
2031
2032 return size;
2033 }
2034
2035 static DEVICE_ATTR_RW(queue_len);
2036 static DEVICE_ATTR_RW(fallback);
2037
2038 static struct attribute *omap_sham_attrs[] = {
2039 &dev_attr_queue_len.attr,
2040 &dev_attr_fallback.attr,
2041 NULL,
2042 };
2043 ATTRIBUTE_GROUPS(omap_sham);
2044
omap_sham_probe(struct platform_device * pdev)2045 static int omap_sham_probe(struct platform_device *pdev)
2046 {
2047 struct omap_sham_dev *dd;
2048 struct device *dev = &pdev->dev;
2049 struct resource res;
2050 dma_cap_mask_t mask;
2051 int err, i, j;
2052 u32 rev;
2053
2054 dd = devm_kzalloc(dev, sizeof(struct omap_sham_dev), GFP_KERNEL);
2055 if (dd == NULL) {
2056 dev_err(dev, "unable to alloc data struct.\n");
2057 err = -ENOMEM;
2058 goto data_err;
2059 }
2060 dd->dev = dev;
2061 platform_set_drvdata(pdev, dd);
2062
2063 INIT_LIST_HEAD(&dd->list);
2064 INIT_WORK(&dd->done_task, omap_sham_done_task);
2065 crypto_init_queue(&dd->queue, OMAP_SHAM_QUEUE_LENGTH);
2066
2067 err = (dev->of_node) ? omap_sham_get_res_of(dd, dev, &res) :
2068 omap_sham_get_res_pdev(dd, pdev, &res);
2069 if (err)
2070 goto data_err;
2071
2072 dd->io_base = devm_ioremap_resource(dev, &res);
2073 if (IS_ERR(dd->io_base)) {
2074 err = PTR_ERR(dd->io_base);
2075 goto data_err;
2076 }
2077 dd->phys_base = res.start;
2078
2079 err = devm_request_irq(dev, dd->irq, dd->pdata->intr_hdlr,
2080 IRQF_TRIGGER_NONE, dev_name(dev), dd);
2081 if (err) {
2082 dev_err(dev, "unable to request irq %d, err = %d\n",
2083 dd->irq, err);
2084 goto data_err;
2085 }
2086
2087 dma_cap_zero(mask);
2088 dma_cap_set(DMA_SLAVE, mask);
2089
2090 dd->dma_lch = dma_request_chan(dev, "rx");
2091 if (IS_ERR(dd->dma_lch)) {
2092 err = PTR_ERR(dd->dma_lch);
2093 if (err == -EPROBE_DEFER)
2094 goto data_err;
2095
2096 dd->polling_mode = 1;
2097 dev_dbg(dev, "using polling mode instead of dma\n");
2098 }
2099
2100 dd->flags |= dd->pdata->flags;
2101 sham.flags |= dd->pdata->flags;
2102
2103 pm_runtime_use_autosuspend(dev);
2104 pm_runtime_set_autosuspend_delay(dev, DEFAULT_AUTOSUSPEND_DELAY);
2105
2106 dd->fallback_sz = OMAP_SHA_DMA_THRESHOLD;
2107
2108 pm_runtime_enable(dev);
2109
2110 err = pm_runtime_resume_and_get(dev);
2111 if (err < 0) {
2112 dev_err(dev, "failed to get sync: %d\n", err);
2113 goto err_pm;
2114 }
2115
2116 rev = omap_sham_read(dd, SHA_REG_REV(dd));
2117 pm_runtime_put_sync(&pdev->dev);
2118
2119 dev_info(dev, "hw accel on OMAP rev %u.%u\n",
2120 (rev & dd->pdata->major_mask) >> dd->pdata->major_shift,
2121 (rev & dd->pdata->minor_mask) >> dd->pdata->minor_shift);
2122
2123 spin_lock_bh(&sham.lock);
2124 list_add_tail(&dd->list, &sham.dev_list);
2125 spin_unlock_bh(&sham.lock);
2126
2127 dd->engine = crypto_engine_alloc_init(dev, 1);
2128 if (!dd->engine) {
2129 err = -ENOMEM;
2130 goto err_engine;
2131 }
2132
2133 err = crypto_engine_start(dd->engine);
2134 if (err)
2135 goto err_engine_start;
2136
2137 for (i = 0; i < dd->pdata->algs_info_size; i++) {
2138 if (dd->pdata->algs_info[i].registered)
2139 break;
2140
2141 for (j = 0; j < dd->pdata->algs_info[i].size; j++) {
2142 struct ahash_engine_alg *ealg;
2143 struct ahash_alg *alg;
2144
2145 ealg = &dd->pdata->algs_info[i].algs_list[j];
2146 alg = &ealg->base;
2147 alg->export = omap_sham_export;
2148 alg->import = omap_sham_import;
2149 alg->halg.statesize = sizeof(struct omap_sham_reqctx) +
2150 BUFLEN;
2151 err = crypto_engine_register_ahash(ealg);
2152 if (err)
2153 goto err_algs;
2154
2155 dd->pdata->algs_info[i].registered++;
2156 }
2157 }
2158
2159 return 0;
2160
2161 err_algs:
2162 for (i = dd->pdata->algs_info_size - 1; i >= 0; i--)
2163 for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--)
2164 crypto_engine_unregister_ahash(
2165 &dd->pdata->algs_info[i].algs_list[j]);
2166 err_engine_start:
2167 crypto_engine_exit(dd->engine);
2168 err_engine:
2169 spin_lock_bh(&sham.lock);
2170 list_del(&dd->list);
2171 spin_unlock_bh(&sham.lock);
2172 err_pm:
2173 pm_runtime_dont_use_autosuspend(dev);
2174 pm_runtime_disable(dev);
2175 if (!dd->polling_mode)
2176 dma_release_channel(dd->dma_lch);
2177 data_err:
2178 dev_err(dev, "initialization failed.\n");
2179
2180 return err;
2181 }
2182
omap_sham_remove(struct platform_device * pdev)2183 static void omap_sham_remove(struct platform_device *pdev)
2184 {
2185 struct omap_sham_dev *dd;
2186 int i, j;
2187
2188 dd = platform_get_drvdata(pdev);
2189
2190 spin_lock_bh(&sham.lock);
2191 list_del(&dd->list);
2192 spin_unlock_bh(&sham.lock);
2193 for (i = dd->pdata->algs_info_size - 1; i >= 0; i--)
2194 for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--) {
2195 crypto_engine_unregister_ahash(
2196 &dd->pdata->algs_info[i].algs_list[j]);
2197 dd->pdata->algs_info[i].registered--;
2198 }
2199 cancel_work_sync(&dd->done_task);
2200 pm_runtime_dont_use_autosuspend(&pdev->dev);
2201 pm_runtime_disable(&pdev->dev);
2202
2203 if (!dd->polling_mode)
2204 dma_release_channel(dd->dma_lch);
2205 }
2206
2207 static struct platform_driver omap_sham_driver = {
2208 .probe = omap_sham_probe,
2209 .remove = omap_sham_remove,
2210 .driver = {
2211 .name = "omap-sham",
2212 .of_match_table = omap_sham_of_match,
2213 .dev_groups = omap_sham_groups,
2214 },
2215 };
2216
2217 module_platform_driver(omap_sham_driver);
2218
2219 MODULE_DESCRIPTION("OMAP SHA1/MD5 hw acceleration support.");
2220 MODULE_LICENSE("GPL v2");
2221 MODULE_AUTHOR("Dmitry Kasatkin");
2222 MODULE_ALIAS("platform:omap-sham");
2223