1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * sun8i-ce-core.c - hardware cryptographic offloader for
4 * Allwinner H3/A64/H5/H2+/H6/R40 SoC
5 *
6 * Copyright (C) 2015-2019 Corentin Labbe <clabbe.montjoie@gmail.com>
7 *
8 * Core file which registers crypto algorithms supported by the CryptoEngine.
9 *
10 * You could find a link for the datasheet in Documentation/arch/arm/sunxi.rst
11 */
12
13 #include <crypto/engine.h>
14 #include <crypto/internal/hash.h>
15 #include <crypto/internal/rng.h>
16 #include <crypto/internal/skcipher.h>
17 #include <linux/clk.h>
18 #include <linux/delay.h>
19 #include <linux/dma-mapping.h>
20 #include <linux/err.h>
21 #include <linux/interrupt.h>
22 #include <linux/io.h>
23 #include <linux/irq.h>
24 #include <linux/kernel.h>
25 #include <linux/module.h>
26 #include <linux/of.h>
27 #include <linux/platform_device.h>
28 #include <linux/pm_runtime.h>
29 #include <linux/reset.h>
30
31 #include "sun8i-ce.h"
32
33 /*
34 * mod clock is lower on H3 than other SoC due to some DMA timeout occurring
35 * with high value.
36 * If you want to tune mod clock, loading driver and passing selftest is
37 * insufficient, you need to test with some LUKS test (mount and write to it)
38 */
39 static const struct ce_variant ce_h3_variant = {
40 .alg_cipher = { CE_ALG_AES, CE_ALG_DES, CE_ALG_3DES,
41 },
42 .alg_hash = { CE_ALG_MD5, CE_ALG_SHA1, CE_ALG_SHA224, CE_ALG_SHA256,
43 CE_ALG_SHA384, CE_ALG_SHA512
44 },
45 .op_mode = { CE_OP_ECB, CE_OP_CBC
46 },
47 .ce_clks = {
48 { "bus", 0, 200000000 },
49 { "mod", 50000000, 0 },
50 },
51 .esr = ESR_H3,
52 .prng = CE_ALG_PRNG,
53 .trng = CE_ID_NOTSUPP,
54 };
55
56 static const struct ce_variant ce_h5_variant = {
57 .alg_cipher = { CE_ALG_AES, CE_ALG_DES, CE_ALG_3DES,
58 },
59 .alg_hash = { CE_ALG_MD5, CE_ALG_SHA1, CE_ALG_SHA224, CE_ALG_SHA256,
60 CE_ID_NOTSUPP, CE_ID_NOTSUPP
61 },
62 .op_mode = { CE_OP_ECB, CE_OP_CBC
63 },
64 .ce_clks = {
65 { "bus", 0, 200000000 },
66 { "mod", 300000000, 0 },
67 },
68 .esr = ESR_H5,
69 .prng = CE_ALG_PRNG,
70 .trng = CE_ID_NOTSUPP,
71 };
72
73 static const struct ce_variant ce_h6_variant = {
74 .alg_cipher = { CE_ALG_AES, CE_ALG_DES, CE_ALG_3DES,
75 },
76 .alg_hash = { CE_ALG_MD5, CE_ALG_SHA1, CE_ALG_SHA224, CE_ALG_SHA256,
77 CE_ALG_SHA384, CE_ALG_SHA512
78 },
79 .op_mode = { CE_OP_ECB, CE_OP_CBC
80 },
81 .cipher_t_dlen_in_bytes = true,
82 .hash_t_dlen_in_bits = true,
83 .prng_t_dlen_in_bytes = true,
84 .trng_t_dlen_in_bytes = true,
85 .ce_clks = {
86 { "bus", 0, 200000000 },
87 { "mod", 300000000, 0 },
88 { "ram", 0, 400000000 },
89 },
90 .esr = ESR_H6,
91 .prng = CE_ALG_PRNG_V2,
92 .trng = CE_ALG_TRNG_V2,
93 };
94
95 static const struct ce_variant ce_h616_variant = {
96 .alg_cipher = { CE_ALG_AES, CE_ALG_DES, CE_ALG_3DES,
97 },
98 .alg_hash = { CE_ALG_MD5, CE_ALG_SHA1, CE_ALG_SHA224, CE_ALG_SHA256,
99 CE_ALG_SHA384, CE_ALG_SHA512
100 },
101 .op_mode = { CE_OP_ECB, CE_OP_CBC
102 },
103 .cipher_t_dlen_in_bytes = true,
104 .hash_t_dlen_in_bits = true,
105 .prng_t_dlen_in_bytes = true,
106 .trng_t_dlen_in_bytes = true,
107 .needs_word_addresses = true,
108 .ce_clks = {
109 { "bus", 0, 200000000 },
110 { "mod", 300000000, 0 },
111 { "ram", 0, 400000000 },
112 { "trng", 0, 0 },
113 },
114 .esr = ESR_H6,
115 .prng = CE_ALG_PRNG_V2,
116 .trng = CE_ALG_TRNG_V2,
117 };
118
119 static const struct ce_variant ce_a64_variant = {
120 .alg_cipher = { CE_ALG_AES, CE_ALG_DES, CE_ALG_3DES,
121 },
122 .alg_hash = { CE_ALG_MD5, CE_ALG_SHA1, CE_ALG_SHA224, CE_ALG_SHA256,
123 CE_ID_NOTSUPP, CE_ID_NOTSUPP
124 },
125 .op_mode = { CE_OP_ECB, CE_OP_CBC
126 },
127 .ce_clks = {
128 { "bus", 0, 200000000 },
129 { "mod", 300000000, 0 },
130 },
131 .esr = ESR_A64,
132 .prng = CE_ALG_PRNG,
133 .trng = CE_ID_NOTSUPP,
134 };
135
136 static const struct ce_variant ce_d1_variant = {
137 .alg_cipher = { CE_ALG_AES, CE_ALG_DES, CE_ALG_3DES,
138 },
139 .alg_hash = { CE_ALG_MD5, CE_ALG_SHA1, CE_ALG_SHA224, CE_ALG_SHA256,
140 CE_ALG_SHA384, CE_ALG_SHA512
141 },
142 .op_mode = { CE_OP_ECB, CE_OP_CBC
143 },
144 .ce_clks = {
145 { "bus", 0, 200000000 },
146 { "mod", 300000000, 0 },
147 { "ram", 0, 400000000 },
148 { "trng", 0, 0 },
149 },
150 .esr = ESR_D1,
151 .prng = CE_ALG_PRNG,
152 .trng = CE_ALG_TRNG,
153 };
154
155 static const struct ce_variant ce_r40_variant = {
156 .alg_cipher = { CE_ALG_AES, CE_ALG_DES, CE_ALG_3DES,
157 },
158 .alg_hash = { CE_ALG_MD5, CE_ALG_SHA1, CE_ALG_SHA224, CE_ALG_SHA256,
159 CE_ID_NOTSUPP, CE_ID_NOTSUPP
160 },
161 .op_mode = { CE_OP_ECB, CE_OP_CBC
162 },
163 .ce_clks = {
164 { "bus", 0, 200000000 },
165 { "mod", 300000000, 0 },
166 },
167 .esr = ESR_R40,
168 .prng = CE_ALG_PRNG,
169 .trng = CE_ID_NOTSUPP,
170 };
171
172 /*
173 * sun8i_ce_get_engine_number() get the next channel slot
174 * This is a simple round-robin way of getting the next channel
175 * The flow 3 is reserve for xRNG operations
176 */
sun8i_ce_get_engine_number(struct sun8i_ce_dev * ce)177 int sun8i_ce_get_engine_number(struct sun8i_ce_dev *ce)
178 {
179 return atomic_inc_return(&ce->flow) % (MAXFLOW - 1);
180 }
181
sun8i_ce_run_task(struct sun8i_ce_dev * ce,int flow,const char * name)182 int sun8i_ce_run_task(struct sun8i_ce_dev *ce, int flow, const char *name)
183 {
184 u32 v;
185 int err = 0;
186 struct ce_task *cet = ce->chanlist[flow].tl;
187
188 #ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
189 ce->chanlist[flow].stat_req++;
190 #endif
191
192 mutex_lock(&ce->mlock);
193
194 v = readl(ce->base + CE_ICR);
195 v |= 1 << flow;
196 writel(v, ce->base + CE_ICR);
197
198 reinit_completion(&ce->chanlist[flow].complete);
199 writel(desc_addr_val(ce, ce->chanlist[flow].t_phy), ce->base + CE_TDQ);
200
201 ce->chanlist[flow].status = 0;
202 /* Be sure all data is written before enabling the task */
203 wmb();
204
205 /* Only H6 needs to write a part of t_common_ctl along with "1", but since it is ignored
206 * on older SoCs, we have no reason to complicate things.
207 */
208 v = 1 | ((le32_to_cpu(ce->chanlist[flow].tl->t_common_ctl) & 0x7F) << 8);
209 writel(v, ce->base + CE_TLR);
210 mutex_unlock(&ce->mlock);
211
212 wait_for_completion_interruptible_timeout(&ce->chanlist[flow].complete,
213 msecs_to_jiffies(ce->chanlist[flow].timeout));
214
215 if (ce->chanlist[flow].status == 0) {
216 dev_err(ce->dev, "DMA timeout for %s (tm=%d) on flow %d\n", name,
217 ce->chanlist[flow].timeout, flow);
218 err = -EFAULT;
219 }
220 /* No need to lock for this read, the channel is locked so
221 * nothing could modify the error value for this channel
222 */
223 v = readl(ce->base + CE_ESR);
224 switch (ce->variant->esr) {
225 case ESR_H3:
226 /* Sadly, the error bit is not per flow */
227 if (v) {
228 dev_err(ce->dev, "CE ERROR: %x for flow %x\n", v, flow);
229 err = -EFAULT;
230 print_hex_dump(KERN_INFO, "TASK: ", DUMP_PREFIX_NONE, 16, 4,
231 cet, sizeof(struct ce_task), false);
232 }
233 if (v & CE_ERR_ALGO_NOTSUP)
234 dev_err(ce->dev, "CE ERROR: algorithm not supported\n");
235 if (v & CE_ERR_DATALEN)
236 dev_err(ce->dev, "CE ERROR: data length error\n");
237 if (v & CE_ERR_KEYSRAM)
238 dev_err(ce->dev, "CE ERROR: keysram access error for AES\n");
239 break;
240 case ESR_A64:
241 case ESR_D1:
242 case ESR_H5:
243 case ESR_R40:
244 v >>= (flow * 4);
245 v &= 0xF;
246 if (v) {
247 dev_err(ce->dev, "CE ERROR: %x for flow %x\n", v, flow);
248 err = -EFAULT;
249 print_hex_dump(KERN_INFO, "TASK: ", DUMP_PREFIX_NONE, 16, 4,
250 cet, sizeof(struct ce_task), false);
251 }
252 if (v & CE_ERR_ALGO_NOTSUP)
253 dev_err(ce->dev, "CE ERROR: algorithm not supported\n");
254 if (v & CE_ERR_DATALEN)
255 dev_err(ce->dev, "CE ERROR: data length error\n");
256 if (v & CE_ERR_KEYSRAM)
257 dev_err(ce->dev, "CE ERROR: keysram access error for AES\n");
258 break;
259 case ESR_H6:
260 v >>= (flow * 8);
261 v &= 0xFF;
262 if (v) {
263 dev_err(ce->dev, "CE ERROR: %x for flow %x\n", v, flow);
264 err = -EFAULT;
265 print_hex_dump(KERN_INFO, "TASK: ", DUMP_PREFIX_NONE, 16, 4,
266 cet, sizeof(struct ce_task), false);
267 }
268 if (v & CE_ERR_ALGO_NOTSUP)
269 dev_err(ce->dev, "CE ERROR: algorithm not supported\n");
270 if (v & CE_ERR_DATALEN)
271 dev_err(ce->dev, "CE ERROR: data length error\n");
272 if (v & CE_ERR_KEYSRAM)
273 dev_err(ce->dev, "CE ERROR: keysram access error for AES\n");
274 if (v & CE_ERR_ADDR_INVALID)
275 dev_err(ce->dev, "CE ERROR: address invalid\n");
276 if (v & CE_ERR_KEYLADDER)
277 dev_err(ce->dev, "CE ERROR: key ladder configuration error\n");
278 break;
279 }
280
281 return err;
282 }
283
ce_irq_handler(int irq,void * data)284 static irqreturn_t ce_irq_handler(int irq, void *data)
285 {
286 struct sun8i_ce_dev *ce = (struct sun8i_ce_dev *)data;
287 int flow = 0;
288 u32 p;
289
290 p = readl(ce->base + CE_ISR);
291 for (flow = 0; flow < MAXFLOW; flow++) {
292 if (p & (BIT(flow))) {
293 writel(BIT(flow), ce->base + CE_ISR);
294 ce->chanlist[flow].status = 1;
295 complete(&ce->chanlist[flow].complete);
296 }
297 }
298
299 return IRQ_HANDLED;
300 }
301
302 static struct sun8i_ce_alg_template ce_algs[] = {
303 {
304 .type = CRYPTO_ALG_TYPE_SKCIPHER,
305 .ce_algo_id = CE_ID_CIPHER_AES,
306 .ce_blockmode = CE_ID_OP_CBC,
307 .alg.skcipher.base = {
308 .base = {
309 .cra_name = "cbc(aes)",
310 .cra_driver_name = "cbc-aes-sun8i-ce",
311 .cra_priority = 400,
312 .cra_blocksize = AES_BLOCK_SIZE,
313 .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
314 CRYPTO_ALG_ASYNC |
315 CRYPTO_ALG_NEED_FALLBACK,
316 .cra_ctxsize = sizeof(struct sun8i_cipher_tfm_ctx),
317 .cra_module = THIS_MODULE,
318 .cra_alignmask = 0xf,
319 .cra_init = sun8i_ce_cipher_init,
320 .cra_exit = sun8i_ce_cipher_exit,
321 },
322 .min_keysize = AES_MIN_KEY_SIZE,
323 .max_keysize = AES_MAX_KEY_SIZE,
324 .ivsize = AES_BLOCK_SIZE,
325 .setkey = sun8i_ce_aes_setkey,
326 .encrypt = sun8i_ce_skencrypt,
327 .decrypt = sun8i_ce_skdecrypt,
328 },
329 .alg.skcipher.op = {
330 .do_one_request = sun8i_ce_cipher_do_one,
331 },
332 },
333 {
334 .type = CRYPTO_ALG_TYPE_SKCIPHER,
335 .ce_algo_id = CE_ID_CIPHER_AES,
336 .ce_blockmode = CE_ID_OP_ECB,
337 .alg.skcipher.base = {
338 .base = {
339 .cra_name = "ecb(aes)",
340 .cra_driver_name = "ecb-aes-sun8i-ce",
341 .cra_priority = 400,
342 .cra_blocksize = AES_BLOCK_SIZE,
343 .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
344 CRYPTO_ALG_ASYNC |
345 CRYPTO_ALG_NEED_FALLBACK,
346 .cra_ctxsize = sizeof(struct sun8i_cipher_tfm_ctx),
347 .cra_module = THIS_MODULE,
348 .cra_alignmask = 0xf,
349 .cra_init = sun8i_ce_cipher_init,
350 .cra_exit = sun8i_ce_cipher_exit,
351 },
352 .min_keysize = AES_MIN_KEY_SIZE,
353 .max_keysize = AES_MAX_KEY_SIZE,
354 .setkey = sun8i_ce_aes_setkey,
355 .encrypt = sun8i_ce_skencrypt,
356 .decrypt = sun8i_ce_skdecrypt,
357 },
358 .alg.skcipher.op = {
359 .do_one_request = sun8i_ce_cipher_do_one,
360 },
361 },
362 {
363 .type = CRYPTO_ALG_TYPE_SKCIPHER,
364 .ce_algo_id = CE_ID_CIPHER_DES3,
365 .ce_blockmode = CE_ID_OP_CBC,
366 .alg.skcipher.base = {
367 .base = {
368 .cra_name = "cbc(des3_ede)",
369 .cra_driver_name = "cbc-des3-sun8i-ce",
370 .cra_priority = 400,
371 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
372 .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
373 CRYPTO_ALG_ASYNC |
374 CRYPTO_ALG_NEED_FALLBACK,
375 .cra_ctxsize = sizeof(struct sun8i_cipher_tfm_ctx),
376 .cra_module = THIS_MODULE,
377 .cra_alignmask = 0xf,
378 .cra_init = sun8i_ce_cipher_init,
379 .cra_exit = sun8i_ce_cipher_exit,
380 },
381 .min_keysize = DES3_EDE_KEY_SIZE,
382 .max_keysize = DES3_EDE_KEY_SIZE,
383 .ivsize = DES3_EDE_BLOCK_SIZE,
384 .setkey = sun8i_ce_des3_setkey,
385 .encrypt = sun8i_ce_skencrypt,
386 .decrypt = sun8i_ce_skdecrypt,
387 },
388 .alg.skcipher.op = {
389 .do_one_request = sun8i_ce_cipher_do_one,
390 },
391 },
392 {
393 .type = CRYPTO_ALG_TYPE_SKCIPHER,
394 .ce_algo_id = CE_ID_CIPHER_DES3,
395 .ce_blockmode = CE_ID_OP_ECB,
396 .alg.skcipher.base = {
397 .base = {
398 .cra_name = "ecb(des3_ede)",
399 .cra_driver_name = "ecb-des3-sun8i-ce",
400 .cra_priority = 400,
401 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
402 .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
403 CRYPTO_ALG_ASYNC |
404 CRYPTO_ALG_NEED_FALLBACK,
405 .cra_ctxsize = sizeof(struct sun8i_cipher_tfm_ctx),
406 .cra_module = THIS_MODULE,
407 .cra_alignmask = 0xf,
408 .cra_init = sun8i_ce_cipher_init,
409 .cra_exit = sun8i_ce_cipher_exit,
410 },
411 .min_keysize = DES3_EDE_KEY_SIZE,
412 .max_keysize = DES3_EDE_KEY_SIZE,
413 .setkey = sun8i_ce_des3_setkey,
414 .encrypt = sun8i_ce_skencrypt,
415 .decrypt = sun8i_ce_skdecrypt,
416 },
417 .alg.skcipher.op = {
418 .do_one_request = sun8i_ce_cipher_do_one,
419 },
420 },
421 #ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_HASH
422 { .type = CRYPTO_ALG_TYPE_AHASH,
423 .ce_algo_id = CE_ID_HASH_MD5,
424 .alg.hash.base = {
425 .init = sun8i_ce_hash_init,
426 .update = sun8i_ce_hash_update,
427 .final = sun8i_ce_hash_final,
428 .finup = sun8i_ce_hash_finup,
429 .digest = sun8i_ce_hash_digest,
430 .export = sun8i_ce_hash_export,
431 .import = sun8i_ce_hash_import,
432 .init_tfm = sun8i_ce_hash_init_tfm,
433 .exit_tfm = sun8i_ce_hash_exit_tfm,
434 .halg = {
435 .digestsize = MD5_DIGEST_SIZE,
436 .statesize = sizeof(struct md5_state),
437 .base = {
438 .cra_name = "md5",
439 .cra_driver_name = "md5-sun8i-ce",
440 .cra_priority = 300,
441 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
442 CRYPTO_ALG_ASYNC |
443 CRYPTO_ALG_NEED_FALLBACK,
444 .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
445 .cra_ctxsize = sizeof(struct sun8i_ce_hash_tfm_ctx),
446 .cra_module = THIS_MODULE,
447 }
448 }
449 },
450 .alg.hash.op = {
451 .do_one_request = sun8i_ce_hash_run,
452 },
453
454 },
455 { .type = CRYPTO_ALG_TYPE_AHASH,
456 .ce_algo_id = CE_ID_HASH_SHA1,
457 .alg.hash.base = {
458 .init = sun8i_ce_hash_init,
459 .update = sun8i_ce_hash_update,
460 .final = sun8i_ce_hash_final,
461 .finup = sun8i_ce_hash_finup,
462 .digest = sun8i_ce_hash_digest,
463 .export = sun8i_ce_hash_export,
464 .import = sun8i_ce_hash_import,
465 .init_tfm = sun8i_ce_hash_init_tfm,
466 .exit_tfm = sun8i_ce_hash_exit_tfm,
467 .halg = {
468 .digestsize = SHA1_DIGEST_SIZE,
469 .statesize = sizeof(struct sha1_state),
470 .base = {
471 .cra_name = "sha1",
472 .cra_driver_name = "sha1-sun8i-ce",
473 .cra_priority = 300,
474 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
475 CRYPTO_ALG_ASYNC |
476 CRYPTO_ALG_NEED_FALLBACK,
477 .cra_blocksize = SHA1_BLOCK_SIZE,
478 .cra_ctxsize = sizeof(struct sun8i_ce_hash_tfm_ctx),
479 .cra_module = THIS_MODULE,
480 }
481 }
482 },
483 .alg.hash.op = {
484 .do_one_request = sun8i_ce_hash_run,
485 },
486 },
487 { .type = CRYPTO_ALG_TYPE_AHASH,
488 .ce_algo_id = CE_ID_HASH_SHA224,
489 .alg.hash.base = {
490 .init = sun8i_ce_hash_init,
491 .update = sun8i_ce_hash_update,
492 .final = sun8i_ce_hash_final,
493 .finup = sun8i_ce_hash_finup,
494 .digest = sun8i_ce_hash_digest,
495 .export = sun8i_ce_hash_export,
496 .import = sun8i_ce_hash_import,
497 .init_tfm = sun8i_ce_hash_init_tfm,
498 .exit_tfm = sun8i_ce_hash_exit_tfm,
499 .halg = {
500 .digestsize = SHA224_DIGEST_SIZE,
501 .statesize = sizeof(struct sha256_state),
502 .base = {
503 .cra_name = "sha224",
504 .cra_driver_name = "sha224-sun8i-ce",
505 .cra_priority = 300,
506 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
507 CRYPTO_ALG_ASYNC |
508 CRYPTO_ALG_NEED_FALLBACK,
509 .cra_blocksize = SHA224_BLOCK_SIZE,
510 .cra_ctxsize = sizeof(struct sun8i_ce_hash_tfm_ctx),
511 .cra_module = THIS_MODULE,
512 }
513 }
514 },
515 .alg.hash.op = {
516 .do_one_request = sun8i_ce_hash_run,
517 },
518 },
519 { .type = CRYPTO_ALG_TYPE_AHASH,
520 .ce_algo_id = CE_ID_HASH_SHA256,
521 .alg.hash.base = {
522 .init = sun8i_ce_hash_init,
523 .update = sun8i_ce_hash_update,
524 .final = sun8i_ce_hash_final,
525 .finup = sun8i_ce_hash_finup,
526 .digest = sun8i_ce_hash_digest,
527 .export = sun8i_ce_hash_export,
528 .import = sun8i_ce_hash_import,
529 .init_tfm = sun8i_ce_hash_init_tfm,
530 .exit_tfm = sun8i_ce_hash_exit_tfm,
531 .halg = {
532 .digestsize = SHA256_DIGEST_SIZE,
533 .statesize = sizeof(struct sha256_state),
534 .base = {
535 .cra_name = "sha256",
536 .cra_driver_name = "sha256-sun8i-ce",
537 .cra_priority = 300,
538 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
539 CRYPTO_ALG_ASYNC |
540 CRYPTO_ALG_NEED_FALLBACK,
541 .cra_blocksize = SHA256_BLOCK_SIZE,
542 .cra_ctxsize = sizeof(struct sun8i_ce_hash_tfm_ctx),
543 .cra_module = THIS_MODULE,
544 }
545 }
546 },
547 .alg.hash.op = {
548 .do_one_request = sun8i_ce_hash_run,
549 },
550 },
551 { .type = CRYPTO_ALG_TYPE_AHASH,
552 .ce_algo_id = CE_ID_HASH_SHA384,
553 .alg.hash.base = {
554 .init = sun8i_ce_hash_init,
555 .update = sun8i_ce_hash_update,
556 .final = sun8i_ce_hash_final,
557 .finup = sun8i_ce_hash_finup,
558 .digest = sun8i_ce_hash_digest,
559 .export = sun8i_ce_hash_export,
560 .import = sun8i_ce_hash_import,
561 .init_tfm = sun8i_ce_hash_init_tfm,
562 .exit_tfm = sun8i_ce_hash_exit_tfm,
563 .halg = {
564 .digestsize = SHA384_DIGEST_SIZE,
565 .statesize = sizeof(struct sha512_state),
566 .base = {
567 .cra_name = "sha384",
568 .cra_driver_name = "sha384-sun8i-ce",
569 .cra_priority = 300,
570 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
571 CRYPTO_ALG_ASYNC |
572 CRYPTO_ALG_NEED_FALLBACK,
573 .cra_blocksize = SHA384_BLOCK_SIZE,
574 .cra_ctxsize = sizeof(struct sun8i_ce_hash_tfm_ctx),
575 .cra_module = THIS_MODULE,
576 }
577 }
578 },
579 .alg.hash.op = {
580 .do_one_request = sun8i_ce_hash_run,
581 },
582 },
583 { .type = CRYPTO_ALG_TYPE_AHASH,
584 .ce_algo_id = CE_ID_HASH_SHA512,
585 .alg.hash.base = {
586 .init = sun8i_ce_hash_init,
587 .update = sun8i_ce_hash_update,
588 .final = sun8i_ce_hash_final,
589 .finup = sun8i_ce_hash_finup,
590 .digest = sun8i_ce_hash_digest,
591 .export = sun8i_ce_hash_export,
592 .import = sun8i_ce_hash_import,
593 .init_tfm = sun8i_ce_hash_init_tfm,
594 .exit_tfm = sun8i_ce_hash_exit_tfm,
595 .halg = {
596 .digestsize = SHA512_DIGEST_SIZE,
597 .statesize = sizeof(struct sha512_state),
598 .base = {
599 .cra_name = "sha512",
600 .cra_driver_name = "sha512-sun8i-ce",
601 .cra_priority = 300,
602 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
603 CRYPTO_ALG_ASYNC |
604 CRYPTO_ALG_NEED_FALLBACK,
605 .cra_blocksize = SHA512_BLOCK_SIZE,
606 .cra_ctxsize = sizeof(struct sun8i_ce_hash_tfm_ctx),
607 .cra_module = THIS_MODULE,
608 }
609 }
610 },
611 .alg.hash.op = {
612 .do_one_request = sun8i_ce_hash_run,
613 },
614 },
615 #endif
616 #ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_PRNG
617 {
618 .type = CRYPTO_ALG_TYPE_RNG,
619 .alg.rng = {
620 .base = {
621 .cra_name = "stdrng",
622 .cra_driver_name = "sun8i-ce-prng",
623 .cra_priority = 300,
624 .cra_ctxsize = sizeof(struct sun8i_ce_rng_tfm_ctx),
625 .cra_module = THIS_MODULE,
626 .cra_init = sun8i_ce_prng_init,
627 .cra_exit = sun8i_ce_prng_exit,
628 },
629 .generate = sun8i_ce_prng_generate,
630 .seed = sun8i_ce_prng_seed,
631 .seedsize = PRNG_SEED_SIZE,
632 }
633 },
634 #endif
635 };
636
sun8i_ce_debugfs_show(struct seq_file * seq,void * v)637 static int sun8i_ce_debugfs_show(struct seq_file *seq, void *v)
638 {
639 struct sun8i_ce_dev *ce __maybe_unused = seq->private;
640 unsigned int i;
641
642 for (i = 0; i < MAXFLOW; i++)
643 seq_printf(seq, "Channel %d: nreq %lu\n", i,
644 #ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
645 ce->chanlist[i].stat_req);
646 #else
647 0ul);
648 #endif
649
650 for (i = 0; i < ARRAY_SIZE(ce_algs); i++) {
651 if (!ce_algs[i].ce)
652 continue;
653 switch (ce_algs[i].type) {
654 case CRYPTO_ALG_TYPE_SKCIPHER:
655 seq_printf(seq, "%s %s reqs=%lu fallback=%lu\n",
656 ce_algs[i].alg.skcipher.base.base.cra_driver_name,
657 ce_algs[i].alg.skcipher.base.base.cra_name,
658 ce_algs[i].stat_req, ce_algs[i].stat_fb);
659 seq_printf(seq, "\tLast fallback is: %s\n",
660 ce_algs[i].fbname);
661 seq_printf(seq, "\tFallback due to 0 length: %lu\n",
662 ce_algs[i].stat_fb_len0);
663 seq_printf(seq, "\tFallback due to length !mod16: %lu\n",
664 ce_algs[i].stat_fb_mod16);
665 seq_printf(seq, "\tFallback due to length < IV: %lu\n",
666 ce_algs[i].stat_fb_leniv);
667 seq_printf(seq, "\tFallback due to source alignment: %lu\n",
668 ce_algs[i].stat_fb_srcali);
669 seq_printf(seq, "\tFallback due to dest alignment: %lu\n",
670 ce_algs[i].stat_fb_dstali);
671 seq_printf(seq, "\tFallback due to source length: %lu\n",
672 ce_algs[i].stat_fb_srclen);
673 seq_printf(seq, "\tFallback due to dest length: %lu\n",
674 ce_algs[i].stat_fb_dstlen);
675 seq_printf(seq, "\tFallback due to SG numbers: %lu\n",
676 ce_algs[i].stat_fb_maxsg);
677 break;
678 case CRYPTO_ALG_TYPE_AHASH:
679 seq_printf(seq, "%s %s reqs=%lu fallback=%lu\n",
680 ce_algs[i].alg.hash.base.halg.base.cra_driver_name,
681 ce_algs[i].alg.hash.base.halg.base.cra_name,
682 ce_algs[i].stat_req, ce_algs[i].stat_fb);
683 seq_printf(seq, "\tLast fallback is: %s\n",
684 ce_algs[i].fbname);
685 seq_printf(seq, "\tFallback due to 0 length: %lu\n",
686 ce_algs[i].stat_fb_len0);
687 seq_printf(seq, "\tFallback due to length: %lu\n",
688 ce_algs[i].stat_fb_srclen);
689 seq_printf(seq, "\tFallback due to alignment: %lu\n",
690 ce_algs[i].stat_fb_srcali);
691 seq_printf(seq, "\tFallback due to SG numbers: %lu\n",
692 ce_algs[i].stat_fb_maxsg);
693 break;
694 case CRYPTO_ALG_TYPE_RNG:
695 seq_printf(seq, "%s %s reqs=%lu bytes=%lu\n",
696 ce_algs[i].alg.rng.base.cra_driver_name,
697 ce_algs[i].alg.rng.base.cra_name,
698 ce_algs[i].stat_req, ce_algs[i].stat_bytes);
699 break;
700 }
701 }
702 #if defined(CONFIG_CRYPTO_DEV_SUN8I_CE_TRNG) && \
703 defined(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG)
704 seq_printf(seq, "HWRNG %lu %lu\n",
705 ce->hwrng_stat_req, ce->hwrng_stat_bytes);
706 #endif
707 return 0;
708 }
709
710 DEFINE_SHOW_ATTRIBUTE(sun8i_ce_debugfs);
711
sun8i_ce_free_chanlist(struct sun8i_ce_dev * ce,int i)712 static void sun8i_ce_free_chanlist(struct sun8i_ce_dev *ce, int i)
713 {
714 while (i >= 0) {
715 crypto_engine_exit(ce->chanlist[i].engine);
716 if (ce->chanlist[i].tl)
717 dma_free_coherent(ce->dev, sizeof(struct ce_task),
718 ce->chanlist[i].tl,
719 ce->chanlist[i].t_phy);
720 i--;
721 }
722 }
723
724 /*
725 * Allocate the channel list structure
726 */
sun8i_ce_allocate_chanlist(struct sun8i_ce_dev * ce)727 static int sun8i_ce_allocate_chanlist(struct sun8i_ce_dev *ce)
728 {
729 int i, err;
730
731 ce->chanlist = devm_kcalloc(ce->dev, MAXFLOW,
732 sizeof(struct sun8i_ce_flow), GFP_KERNEL);
733 if (!ce->chanlist)
734 return -ENOMEM;
735
736 for (i = 0; i < MAXFLOW; i++) {
737 init_completion(&ce->chanlist[i].complete);
738
739 ce->chanlist[i].engine = crypto_engine_alloc_init(ce->dev, true);
740 if (!ce->chanlist[i].engine) {
741 dev_err(ce->dev, "Cannot allocate engine\n");
742 i--;
743 err = -ENOMEM;
744 goto error_engine;
745 }
746 err = crypto_engine_start(ce->chanlist[i].engine);
747 if (err) {
748 dev_err(ce->dev, "Cannot start engine\n");
749 goto error_engine;
750 }
751 ce->chanlist[i].tl = dma_alloc_coherent(ce->dev,
752 sizeof(struct ce_task),
753 &ce->chanlist[i].t_phy,
754 GFP_KERNEL);
755 if (!ce->chanlist[i].tl) {
756 dev_err(ce->dev, "Cannot get DMA memory for task %d\n",
757 i);
758 err = -ENOMEM;
759 goto error_engine;
760 }
761 ce->chanlist[i].bounce_iv = devm_kmalloc(ce->dev, AES_BLOCK_SIZE,
762 GFP_KERNEL | GFP_DMA);
763 if (!ce->chanlist[i].bounce_iv) {
764 err = -ENOMEM;
765 goto error_engine;
766 }
767 ce->chanlist[i].backup_iv = devm_kmalloc(ce->dev, AES_BLOCK_SIZE,
768 GFP_KERNEL);
769 if (!ce->chanlist[i].backup_iv) {
770 err = -ENOMEM;
771 goto error_engine;
772 }
773 }
774 return 0;
775 error_engine:
776 sun8i_ce_free_chanlist(ce, i);
777 return err;
778 }
779
780 /*
781 * Power management strategy: The device is suspended unless a TFM exists for
782 * one of the algorithms proposed by this driver.
783 */
sun8i_ce_pm_suspend(struct device * dev)784 static int sun8i_ce_pm_suspend(struct device *dev)
785 {
786 struct sun8i_ce_dev *ce = dev_get_drvdata(dev);
787 int i;
788
789 reset_control_assert(ce->reset);
790 for (i = 0; i < CE_MAX_CLOCKS; i++)
791 clk_disable_unprepare(ce->ceclks[i]);
792 return 0;
793 }
794
sun8i_ce_pm_resume(struct device * dev)795 static int sun8i_ce_pm_resume(struct device *dev)
796 {
797 struct sun8i_ce_dev *ce = dev_get_drvdata(dev);
798 int err, i;
799
800 for (i = 0; i < CE_MAX_CLOCKS; i++) {
801 if (!ce->variant->ce_clks[i].name)
802 continue;
803 err = clk_prepare_enable(ce->ceclks[i]);
804 if (err) {
805 dev_err(ce->dev, "Cannot prepare_enable %s\n",
806 ce->variant->ce_clks[i].name);
807 goto error;
808 }
809 }
810 err = reset_control_deassert(ce->reset);
811 if (err) {
812 dev_err(ce->dev, "Cannot deassert reset control\n");
813 goto error;
814 }
815 return 0;
816 error:
817 sun8i_ce_pm_suspend(dev);
818 return err;
819 }
820
821 static const struct dev_pm_ops sun8i_ce_pm_ops = {
822 SET_RUNTIME_PM_OPS(sun8i_ce_pm_suspend, sun8i_ce_pm_resume, NULL)
823 };
824
sun8i_ce_pm_init(struct sun8i_ce_dev * ce)825 static int sun8i_ce_pm_init(struct sun8i_ce_dev *ce)
826 {
827 int err;
828
829 pm_runtime_use_autosuspend(ce->dev);
830 pm_runtime_set_autosuspend_delay(ce->dev, 2000);
831
832 err = pm_runtime_set_suspended(ce->dev);
833 if (err)
834 return err;
835 pm_runtime_enable(ce->dev);
836 return err;
837 }
838
sun8i_ce_pm_exit(struct sun8i_ce_dev * ce)839 static void sun8i_ce_pm_exit(struct sun8i_ce_dev *ce)
840 {
841 pm_runtime_disable(ce->dev);
842 }
843
sun8i_ce_get_clks(struct sun8i_ce_dev * ce)844 static int sun8i_ce_get_clks(struct sun8i_ce_dev *ce)
845 {
846 unsigned long cr;
847 int err, i;
848
849 for (i = 0; i < CE_MAX_CLOCKS; i++) {
850 if (!ce->variant->ce_clks[i].name)
851 continue;
852 ce->ceclks[i] = devm_clk_get(ce->dev, ce->variant->ce_clks[i].name);
853 if (IS_ERR(ce->ceclks[i])) {
854 err = PTR_ERR(ce->ceclks[i]);
855 dev_err(ce->dev, "Cannot get %s CE clock err=%d\n",
856 ce->variant->ce_clks[i].name, err);
857 return err;
858 }
859 cr = clk_get_rate(ce->ceclks[i]);
860 if (!cr)
861 return -EINVAL;
862 if (ce->variant->ce_clks[i].freq > 0 &&
863 cr != ce->variant->ce_clks[i].freq) {
864 dev_info(ce->dev, "Set %s clock to %lu (%lu Mhz) from %lu (%lu Mhz)\n",
865 ce->variant->ce_clks[i].name,
866 ce->variant->ce_clks[i].freq,
867 ce->variant->ce_clks[i].freq / 1000000,
868 cr, cr / 1000000);
869 err = clk_set_rate(ce->ceclks[i], ce->variant->ce_clks[i].freq);
870 if (err)
871 dev_err(ce->dev, "Fail to set %s clk speed to %lu hz\n",
872 ce->variant->ce_clks[i].name,
873 ce->variant->ce_clks[i].freq);
874 }
875 if (ce->variant->ce_clks[i].max_freq > 0 &&
876 cr > ce->variant->ce_clks[i].max_freq)
877 dev_warn(ce->dev, "Frequency for %s (%lu hz) is higher than datasheet's recommendation (%lu hz)",
878 ce->variant->ce_clks[i].name, cr,
879 ce->variant->ce_clks[i].max_freq);
880 }
881 return 0;
882 }
883
sun8i_ce_register_algs(struct sun8i_ce_dev * ce)884 static int sun8i_ce_register_algs(struct sun8i_ce_dev *ce)
885 {
886 int ce_method, err, id;
887 unsigned int i;
888
889 for (i = 0; i < ARRAY_SIZE(ce_algs); i++) {
890 ce_algs[i].ce = ce;
891 switch (ce_algs[i].type) {
892 case CRYPTO_ALG_TYPE_SKCIPHER:
893 id = ce_algs[i].ce_algo_id;
894 ce_method = ce->variant->alg_cipher[id];
895 if (ce_method == CE_ID_NOTSUPP) {
896 dev_dbg(ce->dev,
897 "DEBUG: Algo of %s not supported\n",
898 ce_algs[i].alg.skcipher.base.base.cra_name);
899 ce_algs[i].ce = NULL;
900 break;
901 }
902 id = ce_algs[i].ce_blockmode;
903 ce_method = ce->variant->op_mode[id];
904 if (ce_method == CE_ID_NOTSUPP) {
905 dev_dbg(ce->dev, "DEBUG: Blockmode of %s not supported\n",
906 ce_algs[i].alg.skcipher.base.base.cra_name);
907 ce_algs[i].ce = NULL;
908 break;
909 }
910 dev_info(ce->dev, "Register %s\n",
911 ce_algs[i].alg.skcipher.base.base.cra_name);
912 err = crypto_engine_register_skcipher(&ce_algs[i].alg.skcipher);
913 if (err) {
914 dev_err(ce->dev, "ERROR: Fail to register %s\n",
915 ce_algs[i].alg.skcipher.base.base.cra_name);
916 ce_algs[i].ce = NULL;
917 return err;
918 }
919 break;
920 case CRYPTO_ALG_TYPE_AHASH:
921 id = ce_algs[i].ce_algo_id;
922 ce_method = ce->variant->alg_hash[id];
923 if (ce_method == CE_ID_NOTSUPP) {
924 dev_info(ce->dev,
925 "DEBUG: Algo of %s not supported\n",
926 ce_algs[i].alg.hash.base.halg.base.cra_name);
927 ce_algs[i].ce = NULL;
928 break;
929 }
930 dev_info(ce->dev, "Register %s\n",
931 ce_algs[i].alg.hash.base.halg.base.cra_name);
932 err = crypto_engine_register_ahash(&ce_algs[i].alg.hash);
933 if (err) {
934 dev_err(ce->dev, "ERROR: Fail to register %s\n",
935 ce_algs[i].alg.hash.base.halg.base.cra_name);
936 ce_algs[i].ce = NULL;
937 return err;
938 }
939 break;
940 case CRYPTO_ALG_TYPE_RNG:
941 if (ce->variant->prng == CE_ID_NOTSUPP) {
942 dev_info(ce->dev,
943 "DEBUG: Algo of %s not supported\n",
944 ce_algs[i].alg.rng.base.cra_name);
945 ce_algs[i].ce = NULL;
946 break;
947 }
948 dev_info(ce->dev, "Register %s\n",
949 ce_algs[i].alg.rng.base.cra_name);
950 err = crypto_register_rng(&ce_algs[i].alg.rng);
951 if (err) {
952 dev_err(ce->dev, "Fail to register %s\n",
953 ce_algs[i].alg.rng.base.cra_name);
954 ce_algs[i].ce = NULL;
955 }
956 break;
957 default:
958 ce_algs[i].ce = NULL;
959 dev_err(ce->dev, "ERROR: tried to register an unknown algo\n");
960 }
961 }
962 return 0;
963 }
964
sun8i_ce_unregister_algs(struct sun8i_ce_dev * ce)965 static void sun8i_ce_unregister_algs(struct sun8i_ce_dev *ce)
966 {
967 unsigned int i;
968
969 for (i = 0; i < ARRAY_SIZE(ce_algs); i++) {
970 if (!ce_algs[i].ce)
971 continue;
972 switch (ce_algs[i].type) {
973 case CRYPTO_ALG_TYPE_SKCIPHER:
974 dev_info(ce->dev, "Unregister %d %s\n", i,
975 ce_algs[i].alg.skcipher.base.base.cra_name);
976 crypto_engine_unregister_skcipher(&ce_algs[i].alg.skcipher);
977 break;
978 case CRYPTO_ALG_TYPE_AHASH:
979 dev_info(ce->dev, "Unregister %d %s\n", i,
980 ce_algs[i].alg.hash.base.halg.base.cra_name);
981 crypto_engine_unregister_ahash(&ce_algs[i].alg.hash);
982 break;
983 case CRYPTO_ALG_TYPE_RNG:
984 dev_info(ce->dev, "Unregister %d %s\n", i,
985 ce_algs[i].alg.rng.base.cra_name);
986 crypto_unregister_rng(&ce_algs[i].alg.rng);
987 break;
988 }
989 }
990 }
991
sun8i_ce_probe(struct platform_device * pdev)992 static int sun8i_ce_probe(struct platform_device *pdev)
993 {
994 struct sun8i_ce_dev *ce;
995 int err, irq;
996 u32 v;
997
998 ce = devm_kzalloc(&pdev->dev, sizeof(*ce), GFP_KERNEL);
999 if (!ce)
1000 return -ENOMEM;
1001
1002 ce->dev = &pdev->dev;
1003 platform_set_drvdata(pdev, ce);
1004
1005 ce->variant = of_device_get_match_data(&pdev->dev);
1006 if (!ce->variant) {
1007 dev_err(&pdev->dev, "Missing Crypto Engine variant\n");
1008 return -EINVAL;
1009 }
1010
1011 ce->base = devm_platform_ioremap_resource(pdev, 0);
1012 if (IS_ERR(ce->base))
1013 return PTR_ERR(ce->base);
1014
1015 err = sun8i_ce_get_clks(ce);
1016 if (err)
1017 return err;
1018
1019 /* Get Non Secure IRQ */
1020 irq = platform_get_irq(pdev, 0);
1021 if (irq < 0)
1022 return irq;
1023
1024 ce->reset = devm_reset_control_get(&pdev->dev, NULL);
1025 if (IS_ERR(ce->reset))
1026 return dev_err_probe(&pdev->dev, PTR_ERR(ce->reset),
1027 "No reset control found\n");
1028
1029 mutex_init(&ce->mlock);
1030 mutex_init(&ce->rnglock);
1031
1032 err = sun8i_ce_allocate_chanlist(ce);
1033 if (err)
1034 return err;
1035
1036 err = sun8i_ce_pm_init(ce);
1037 if (err)
1038 goto error_pm;
1039
1040 err = devm_request_irq(&pdev->dev, irq, ce_irq_handler, 0,
1041 "sun8i-ce-ns", ce);
1042 if (err) {
1043 dev_err(ce->dev, "Cannot request CryptoEngine Non-secure IRQ (err=%d)\n", err);
1044 goto error_irq;
1045 }
1046
1047 err = sun8i_ce_register_algs(ce);
1048 if (err)
1049 goto error_alg;
1050
1051 err = pm_runtime_resume_and_get(ce->dev);
1052 if (err < 0)
1053 goto error_alg;
1054
1055 #ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_TRNG
1056 sun8i_ce_hwrng_register(ce);
1057 #endif
1058
1059 v = readl(ce->base + CE_CTR);
1060 v >>= CE_DIE_ID_SHIFT;
1061 v &= CE_DIE_ID_MASK;
1062 dev_info(&pdev->dev, "CryptoEngine Die ID %x\n", v);
1063
1064 pm_runtime_put_sync(ce->dev);
1065
1066 if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG)) {
1067 struct dentry *dbgfs_dir __maybe_unused;
1068 struct dentry *dbgfs_stats __maybe_unused;
1069
1070 /* Ignore error of debugfs */
1071 dbgfs_dir = debugfs_create_dir("sun8i-ce", NULL);
1072 dbgfs_stats = debugfs_create_file("stats", 0444,
1073 dbgfs_dir, ce,
1074 &sun8i_ce_debugfs_fops);
1075
1076 #ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
1077 ce->dbgfs_dir = dbgfs_dir;
1078 ce->dbgfs_stats = dbgfs_stats;
1079 #endif
1080 }
1081
1082 return 0;
1083 error_alg:
1084 sun8i_ce_unregister_algs(ce);
1085 error_irq:
1086 sun8i_ce_pm_exit(ce);
1087 error_pm:
1088 sun8i_ce_free_chanlist(ce, MAXFLOW - 1);
1089 return err;
1090 }
1091
sun8i_ce_remove(struct platform_device * pdev)1092 static void sun8i_ce_remove(struct platform_device *pdev)
1093 {
1094 struct sun8i_ce_dev *ce = platform_get_drvdata(pdev);
1095
1096 #ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_TRNG
1097 sun8i_ce_hwrng_unregister(ce);
1098 #endif
1099
1100 sun8i_ce_unregister_algs(ce);
1101
1102 #ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
1103 debugfs_remove_recursive(ce->dbgfs_dir);
1104 #endif
1105
1106 sun8i_ce_free_chanlist(ce, MAXFLOW - 1);
1107
1108 sun8i_ce_pm_exit(ce);
1109 }
1110
1111 static const struct of_device_id sun8i_ce_crypto_of_match_table[] = {
1112 { .compatible = "allwinner,sun8i-h3-crypto",
1113 .data = &ce_h3_variant },
1114 { .compatible = "allwinner,sun8i-r40-crypto",
1115 .data = &ce_r40_variant },
1116 { .compatible = "allwinner,sun20i-d1-crypto",
1117 .data = &ce_d1_variant },
1118 { .compatible = "allwinner,sun50i-a64-crypto",
1119 .data = &ce_a64_variant },
1120 { .compatible = "allwinner,sun50i-h5-crypto",
1121 .data = &ce_h5_variant },
1122 { .compatible = "allwinner,sun50i-h6-crypto",
1123 .data = &ce_h6_variant },
1124 { .compatible = "allwinner,sun50i-h616-crypto",
1125 .data = &ce_h616_variant },
1126 {}
1127 };
1128 MODULE_DEVICE_TABLE(of, sun8i_ce_crypto_of_match_table);
1129
1130 static struct platform_driver sun8i_ce_driver = {
1131 .probe = sun8i_ce_probe,
1132 .remove_new = sun8i_ce_remove,
1133 .driver = {
1134 .name = "sun8i-ce",
1135 .pm = &sun8i_ce_pm_ops,
1136 .of_match_table = sun8i_ce_crypto_of_match_table,
1137 },
1138 };
1139
1140 module_platform_driver(sun8i_ce_driver);
1141
1142 MODULE_DESCRIPTION("Allwinner Crypto Engine cryptographic offloader");
1143 MODULE_LICENSE("GPL");
1144 MODULE_AUTHOR("Corentin Labbe <clabbe.montjoie@gmail.com>");
1145