1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Driver for ARTPEC-6 crypto block using the kernel asynchronous crypto api.
4 *
5 * Copyright (C) 2014-2017 Axis Communications AB
6 */
7 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8
9 #include <linux/bitfield.h>
10 #include <linux/crypto.h>
11 #include <linux/debugfs.h>
12 #include <linux/delay.h>
13 #include <linux/dma-mapping.h>
14 #include <linux/fault-inject.h>
15 #include <linux/init.h>
16 #include <linux/interrupt.h>
17 #include <linux/kernel.h>
18 #include <linux/list.h>
19 #include <linux/module.h>
20 #include <linux/of.h>
21 #include <linux/platform_device.h>
22 #include <linux/scatterlist.h>
23 #include <linux/slab.h>
24
25 #include <crypto/aes.h>
26 #include <crypto/gcm.h>
27 #include <crypto/internal/aead.h>
28 #include <crypto/internal/hash.h>
29 #include <crypto/internal/skcipher.h>
30 #include <crypto/scatterwalk.h>
31 #include <crypto/sha1.h>
32 #include <crypto/sha2.h>
33 #include <crypto/xts.h>
34
35 /* Max length of a line in all cache levels for Artpec SoCs. */
36 #define ARTPEC_CACHE_LINE_MAX 32
37
38 #define PDMA_OUT_CFG 0x0000
39 #define PDMA_OUT_BUF_CFG 0x0004
40 #define PDMA_OUT_CMD 0x0008
41 #define PDMA_OUT_DESCRQ_PUSH 0x0010
42 #define PDMA_OUT_DESCRQ_STAT 0x0014
43
44 #define A6_PDMA_IN_CFG 0x0028
45 #define A6_PDMA_IN_BUF_CFG 0x002c
46 #define A6_PDMA_IN_CMD 0x0030
47 #define A6_PDMA_IN_STATQ_PUSH 0x0038
48 #define A6_PDMA_IN_DESCRQ_PUSH 0x0044
49 #define A6_PDMA_IN_DESCRQ_STAT 0x0048
50 #define A6_PDMA_INTR_MASK 0x0068
51 #define A6_PDMA_ACK_INTR 0x006c
52 #define A6_PDMA_MASKED_INTR 0x0074
53
54 #define A7_PDMA_IN_CFG 0x002c
55 #define A7_PDMA_IN_BUF_CFG 0x0030
56 #define A7_PDMA_IN_CMD 0x0034
57 #define A7_PDMA_IN_STATQ_PUSH 0x003c
58 #define A7_PDMA_IN_DESCRQ_PUSH 0x0048
59 #define A7_PDMA_IN_DESCRQ_STAT 0x004C
60 #define A7_PDMA_INTR_MASK 0x006c
61 #define A7_PDMA_ACK_INTR 0x0070
62 #define A7_PDMA_MASKED_INTR 0x0078
63
64 #define PDMA_OUT_CFG_EN BIT(0)
65
66 #define PDMA_OUT_BUF_CFG_DATA_BUF_SIZE GENMASK(4, 0)
67 #define PDMA_OUT_BUF_CFG_DESCR_BUF_SIZE GENMASK(9, 5)
68
69 #define PDMA_OUT_CMD_START BIT(0)
70 #define A6_PDMA_OUT_CMD_STOP BIT(3)
71 #define A7_PDMA_OUT_CMD_STOP BIT(2)
72
73 #define PDMA_OUT_DESCRQ_PUSH_LEN GENMASK(5, 0)
74 #define PDMA_OUT_DESCRQ_PUSH_ADDR GENMASK(31, 6)
75
76 #define PDMA_OUT_DESCRQ_STAT_LEVEL GENMASK(3, 0)
77 #define PDMA_OUT_DESCRQ_STAT_SIZE GENMASK(7, 4)
78
79 #define PDMA_IN_CFG_EN BIT(0)
80
81 #define PDMA_IN_BUF_CFG_DATA_BUF_SIZE GENMASK(4, 0)
82 #define PDMA_IN_BUF_CFG_DESCR_BUF_SIZE GENMASK(9, 5)
83 #define PDMA_IN_BUF_CFG_STAT_BUF_SIZE GENMASK(14, 10)
84
85 #define PDMA_IN_CMD_START BIT(0)
86 #define A6_PDMA_IN_CMD_FLUSH_STAT BIT(2)
87 #define A6_PDMA_IN_CMD_STOP BIT(3)
88 #define A7_PDMA_IN_CMD_FLUSH_STAT BIT(1)
89 #define A7_PDMA_IN_CMD_STOP BIT(2)
90
91 #define PDMA_IN_STATQ_PUSH_LEN GENMASK(5, 0)
92 #define PDMA_IN_STATQ_PUSH_ADDR GENMASK(31, 6)
93
94 #define PDMA_IN_DESCRQ_PUSH_LEN GENMASK(5, 0)
95 #define PDMA_IN_DESCRQ_PUSH_ADDR GENMASK(31, 6)
96
97 #define PDMA_IN_DESCRQ_STAT_LEVEL GENMASK(3, 0)
98 #define PDMA_IN_DESCRQ_STAT_SIZE GENMASK(7, 4)
99
100 #define A6_PDMA_INTR_MASK_IN_DATA BIT(2)
101 #define A6_PDMA_INTR_MASK_IN_EOP BIT(3)
102 #define A6_PDMA_INTR_MASK_IN_EOP_FLUSH BIT(4)
103
104 #define A7_PDMA_INTR_MASK_IN_DATA BIT(3)
105 #define A7_PDMA_INTR_MASK_IN_EOP BIT(4)
106 #define A7_PDMA_INTR_MASK_IN_EOP_FLUSH BIT(5)
107
108 #define A6_CRY_MD_OPER GENMASK(19, 16)
109
110 #define A6_CRY_MD_HASH_SEL_CTX GENMASK(21, 20)
111 #define A6_CRY_MD_HASH_HMAC_FIN BIT(23)
112
113 #define A6_CRY_MD_CIPHER_LEN GENMASK(21, 20)
114 #define A6_CRY_MD_CIPHER_DECR BIT(22)
115 #define A6_CRY_MD_CIPHER_TWEAK BIT(23)
116 #define A6_CRY_MD_CIPHER_DSEQ BIT(24)
117
118 #define A7_CRY_MD_OPER GENMASK(11, 8)
119
120 #define A7_CRY_MD_HASH_SEL_CTX GENMASK(13, 12)
121 #define A7_CRY_MD_HASH_HMAC_FIN BIT(15)
122
123 #define A7_CRY_MD_CIPHER_LEN GENMASK(13, 12)
124 #define A7_CRY_MD_CIPHER_DECR BIT(14)
125 #define A7_CRY_MD_CIPHER_TWEAK BIT(15)
126 #define A7_CRY_MD_CIPHER_DSEQ BIT(16)
127
128 /* DMA metadata constants */
129 #define regk_crypto_aes_cbc 0x00000002
130 #define regk_crypto_aes_ctr 0x00000003
131 #define regk_crypto_aes_ecb 0x00000001
132 #define regk_crypto_aes_gcm 0x00000004
133 #define regk_crypto_aes_xts 0x00000005
134 #define regk_crypto_cache 0x00000002
135 #define a6_regk_crypto_dlkey 0x0000000a
136 #define a7_regk_crypto_dlkey 0x0000000e
137 #define regk_crypto_ext 0x00000001
138 #define regk_crypto_hmac_sha1 0x00000007
139 #define regk_crypto_hmac_sha256 0x00000009
140 #define regk_crypto_init 0x00000000
141 #define regk_crypto_key_128 0x00000000
142 #define regk_crypto_key_192 0x00000001
143 #define regk_crypto_key_256 0x00000002
144 #define regk_crypto_null 0x00000000
145 #define regk_crypto_sha1 0x00000006
146 #define regk_crypto_sha256 0x00000008
147
148 /* DMA descriptor structures */
149 struct pdma_descr_ctrl {
150 unsigned char short_descr : 1;
151 unsigned char pad1 : 1;
152 unsigned char eop : 1;
153 unsigned char intr : 1;
154 unsigned char short_len : 3;
155 unsigned char pad2 : 1;
156 } __packed;
157
158 struct pdma_data_descr {
159 unsigned int len : 24;
160 unsigned int buf : 32;
161 } __packed;
162
163 struct pdma_short_descr {
164 unsigned char data[7];
165 } __packed;
166
167 struct pdma_descr {
168 struct pdma_descr_ctrl ctrl;
169 union {
170 struct pdma_data_descr data;
171 struct pdma_short_descr shrt;
172 };
173 };
174
175 struct pdma_stat_descr {
176 unsigned char pad1 : 1;
177 unsigned char pad2 : 1;
178 unsigned char eop : 1;
179 unsigned char pad3 : 5;
180 unsigned int len : 24;
181 };
182
183 /* Each descriptor array can hold max 64 entries */
184 #define PDMA_DESCR_COUNT 64
185
186 #define MODULE_NAME "Artpec-6 CA"
187
188 /* Hash modes (including HMAC variants) */
189 #define ARTPEC6_CRYPTO_HASH_SHA1 1
190 #define ARTPEC6_CRYPTO_HASH_SHA256 2
191
192 /* Crypto modes */
193 #define ARTPEC6_CRYPTO_CIPHER_AES_ECB 1
194 #define ARTPEC6_CRYPTO_CIPHER_AES_CBC 2
195 #define ARTPEC6_CRYPTO_CIPHER_AES_CTR 3
196 #define ARTPEC6_CRYPTO_CIPHER_AES_XTS 5
197
198 /* The PDMA is a DMA-engine tightly coupled with a ciphering engine.
199 * It operates on a descriptor array with up to 64 descriptor entries.
200 * The arrays must be 64 byte aligned in memory.
201 *
202 * The ciphering unit has no registers and is completely controlled by
203 * a 4-byte metadata that is inserted at the beginning of each dma packet.
204 *
205 * A dma packet is a sequence of descriptors terminated by setting the .eop
206 * field in the final descriptor of the packet.
207 *
208 * Multiple packets are used for providing context data, key data and
209 * the plain/ciphertext.
210 *
211 * PDMA Descriptors (Array)
212 * +------+------+------+~~+-------+------+----
213 * | 0 | 1 | 2 |~~| 11 EOP| 12 | ....
214 * +--+---+--+---+----+-+~~+-------+----+-+----
215 * | | | | |
216 * | | | | |
217 * __|__ +-------++-------++-------+ +----+
218 * | MD | |Payload||Payload||Payload| | MD |
219 * +-----+ +-------++-------++-------+ +----+
220 */
221
222 struct artpec6_crypto_bounce_buffer {
223 struct list_head list;
224 size_t length;
225 struct scatterlist *sg;
226 size_t offset;
227 /* buf is aligned to ARTPEC_CACHE_LINE_MAX and
228 * holds up to ARTPEC_CACHE_LINE_MAX bytes data.
229 */
230 void *buf;
231 };
232
233 struct artpec6_crypto_dma_map {
234 dma_addr_t dma_addr;
235 size_t size;
236 enum dma_data_direction dir;
237 };
238
239 struct artpec6_crypto_dma_descriptors {
240 struct pdma_descr out[PDMA_DESCR_COUNT] __aligned(64);
241 struct pdma_descr in[PDMA_DESCR_COUNT] __aligned(64);
242 u32 stat[PDMA_DESCR_COUNT] __aligned(64);
243 struct list_head bounce_buffers;
244 /* Enough maps for all out/in buffers, and all three descr. arrays */
245 struct artpec6_crypto_dma_map maps[PDMA_DESCR_COUNT * 2 + 2];
246 dma_addr_t out_dma_addr;
247 dma_addr_t in_dma_addr;
248 dma_addr_t stat_dma_addr;
249 size_t out_cnt;
250 size_t in_cnt;
251 size_t map_count;
252 };
253
254 enum artpec6_crypto_variant {
255 ARTPEC6_CRYPTO,
256 ARTPEC7_CRYPTO,
257 };
258
259 struct artpec6_crypto {
260 void __iomem *base;
261 spinlock_t queue_lock;
262 struct list_head queue; /* waiting for pdma fifo space */
263 struct list_head pending; /* submitted to pdma fifo */
264 struct tasklet_struct task;
265 struct kmem_cache *dma_cache;
266 int pending_count;
267 struct timer_list timer;
268 enum artpec6_crypto_variant variant;
269 void *pad_buffer; /* cache-aligned block padding buffer */
270 void *zero_buffer;
271 };
272
273 enum artpec6_crypto_hash_flags {
274 HASH_FLAG_INIT_CTX = 2,
275 HASH_FLAG_UPDATE = 4,
276 HASH_FLAG_FINALIZE = 8,
277 HASH_FLAG_HMAC = 16,
278 HASH_FLAG_UPDATE_KEY = 32,
279 };
280
281 struct artpec6_crypto_req_common {
282 struct list_head list;
283 struct list_head complete_in_progress;
284 struct artpec6_crypto_dma_descriptors *dma;
285 struct crypto_async_request *req;
286 void (*complete)(struct crypto_async_request *req);
287 gfp_t gfp_flags;
288 };
289
290 struct artpec6_hash_request_context {
291 char partial_buffer[SHA256_BLOCK_SIZE];
292 char partial_buffer_out[SHA256_BLOCK_SIZE];
293 char key_buffer[SHA256_BLOCK_SIZE];
294 char pad_buffer[SHA256_BLOCK_SIZE + 32];
295 unsigned char digeststate[SHA256_DIGEST_SIZE];
296 size_t partial_bytes;
297 u64 digcnt;
298 u32 key_md;
299 u32 hash_md;
300 enum artpec6_crypto_hash_flags hash_flags;
301 struct artpec6_crypto_req_common common;
302 };
303
304 struct artpec6_hash_export_state {
305 char partial_buffer[SHA256_BLOCK_SIZE];
306 unsigned char digeststate[SHA256_DIGEST_SIZE];
307 size_t partial_bytes;
308 u64 digcnt;
309 int oper;
310 unsigned int hash_flags;
311 };
312
313 struct artpec6_hashalg_context {
314 char hmac_key[SHA256_BLOCK_SIZE];
315 size_t hmac_key_length;
316 struct crypto_shash *child_hash;
317 };
318
319 struct artpec6_crypto_request_context {
320 u32 cipher_md;
321 bool decrypt;
322 struct artpec6_crypto_req_common common;
323 };
324
325 struct artpec6_cryptotfm_context {
326 unsigned char aes_key[2*AES_MAX_KEY_SIZE];
327 size_t key_length;
328 u32 key_md;
329 int crypto_type;
330 struct crypto_sync_skcipher *fallback;
331 };
332
333 struct artpec6_crypto_aead_hw_ctx {
334 __be64 aad_length_bits;
335 __be64 text_length_bits;
336 __u8 J0[AES_BLOCK_SIZE];
337 };
338
339 struct artpec6_crypto_aead_req_ctx {
340 struct artpec6_crypto_aead_hw_ctx hw_ctx;
341 u32 cipher_md;
342 bool decrypt;
343 struct artpec6_crypto_req_common common;
344 __u8 decryption_tag[AES_BLOCK_SIZE] ____cacheline_aligned;
345 };
346
347 /* The crypto framework makes it hard to avoid this global. */
348 static struct device *artpec6_crypto_dev;
349
350 #ifdef CONFIG_FAULT_INJECTION
351 static DECLARE_FAULT_ATTR(artpec6_crypto_fail_status_read);
352 static DECLARE_FAULT_ATTR(artpec6_crypto_fail_dma_array_full);
353 #endif
354
355 enum {
356 ARTPEC6_CRYPTO_PREPARE_HASH_NO_START,
357 ARTPEC6_CRYPTO_PREPARE_HASH_START,
358 };
359
360 static int artpec6_crypto_prepare_aead(struct aead_request *areq);
361 static int artpec6_crypto_prepare_crypto(struct skcipher_request *areq);
362 static int artpec6_crypto_prepare_hash(struct ahash_request *areq);
363
364 static void
365 artpec6_crypto_complete_crypto(struct crypto_async_request *req);
366 static void
367 artpec6_crypto_complete_cbc_encrypt(struct crypto_async_request *req);
368 static void
369 artpec6_crypto_complete_cbc_decrypt(struct crypto_async_request *req);
370 static void
371 artpec6_crypto_complete_aead(struct crypto_async_request *req);
372 static void
373 artpec6_crypto_complete_hash(struct crypto_async_request *req);
374
375 static int
376 artpec6_crypto_common_destroy(struct artpec6_crypto_req_common *common);
377
378 static void
379 artpec6_crypto_start_dma(struct artpec6_crypto_req_common *common);
380
381 struct artpec6_crypto_walk {
382 struct scatterlist *sg;
383 size_t offset;
384 };
385
artpec6_crypto_walk_init(struct artpec6_crypto_walk * awalk,struct scatterlist * sg)386 static void artpec6_crypto_walk_init(struct artpec6_crypto_walk *awalk,
387 struct scatterlist *sg)
388 {
389 awalk->sg = sg;
390 awalk->offset = 0;
391 }
392
artpec6_crypto_walk_advance(struct artpec6_crypto_walk * awalk,size_t nbytes)393 static size_t artpec6_crypto_walk_advance(struct artpec6_crypto_walk *awalk,
394 size_t nbytes)
395 {
396 while (nbytes && awalk->sg) {
397 size_t piece;
398
399 WARN_ON(awalk->offset > awalk->sg->length);
400
401 piece = min(nbytes, (size_t)awalk->sg->length - awalk->offset);
402 nbytes -= piece;
403 awalk->offset += piece;
404 if (awalk->offset == awalk->sg->length) {
405 awalk->sg = sg_next(awalk->sg);
406 awalk->offset = 0;
407 }
408
409 }
410
411 return nbytes;
412 }
413
414 static size_t
artpec6_crypto_walk_chunklen(const struct artpec6_crypto_walk * awalk)415 artpec6_crypto_walk_chunklen(const struct artpec6_crypto_walk *awalk)
416 {
417 WARN_ON(awalk->sg->length == awalk->offset);
418
419 return awalk->sg->length - awalk->offset;
420 }
421
422 static dma_addr_t
artpec6_crypto_walk_chunk_phys(const struct artpec6_crypto_walk * awalk)423 artpec6_crypto_walk_chunk_phys(const struct artpec6_crypto_walk *awalk)
424 {
425 return sg_phys(awalk->sg) + awalk->offset;
426 }
427
428 static void
artpec6_crypto_copy_bounce_buffers(struct artpec6_crypto_req_common * common)429 artpec6_crypto_copy_bounce_buffers(struct artpec6_crypto_req_common *common)
430 {
431 struct artpec6_crypto_dma_descriptors *dma = common->dma;
432 struct artpec6_crypto_bounce_buffer *b;
433 struct artpec6_crypto_bounce_buffer *next;
434
435 list_for_each_entry_safe(b, next, &dma->bounce_buffers, list) {
436 pr_debug("bounce entry %p: %zu bytes @ %zu from %p\n",
437 b, b->length, b->offset, b->buf);
438 sg_pcopy_from_buffer(b->sg,
439 1,
440 b->buf,
441 b->length,
442 b->offset);
443
444 list_del(&b->list);
445 kfree(b);
446 }
447 }
448
artpec6_crypto_busy(void)449 static inline bool artpec6_crypto_busy(void)
450 {
451 struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
452 int fifo_count = ac->pending_count;
453
454 return fifo_count > 6;
455 }
456
artpec6_crypto_submit(struct artpec6_crypto_req_common * req)457 static int artpec6_crypto_submit(struct artpec6_crypto_req_common *req)
458 {
459 struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
460 int ret = -EBUSY;
461
462 spin_lock_bh(&ac->queue_lock);
463
464 if (!artpec6_crypto_busy()) {
465 list_add_tail(&req->list, &ac->pending);
466 artpec6_crypto_start_dma(req);
467 ret = -EINPROGRESS;
468 } else if (req->req->flags & CRYPTO_TFM_REQ_MAY_BACKLOG) {
469 list_add_tail(&req->list, &ac->queue);
470 } else {
471 artpec6_crypto_common_destroy(req);
472 }
473
474 spin_unlock_bh(&ac->queue_lock);
475
476 return ret;
477 }
478
artpec6_crypto_start_dma(struct artpec6_crypto_req_common * common)479 static void artpec6_crypto_start_dma(struct artpec6_crypto_req_common *common)
480 {
481 struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
482 enum artpec6_crypto_variant variant = ac->variant;
483 void __iomem *base = ac->base;
484 struct artpec6_crypto_dma_descriptors *dma = common->dma;
485 u32 ind, statd, outd;
486
487 /* Make descriptor content visible to the DMA before starting it. */
488 wmb();
489
490 ind = FIELD_PREP(PDMA_IN_DESCRQ_PUSH_LEN, dma->in_cnt - 1) |
491 FIELD_PREP(PDMA_IN_DESCRQ_PUSH_ADDR, dma->in_dma_addr >> 6);
492
493 statd = FIELD_PREP(PDMA_IN_STATQ_PUSH_LEN, dma->in_cnt - 1) |
494 FIELD_PREP(PDMA_IN_STATQ_PUSH_ADDR, dma->stat_dma_addr >> 6);
495
496 outd = FIELD_PREP(PDMA_OUT_DESCRQ_PUSH_LEN, dma->out_cnt - 1) |
497 FIELD_PREP(PDMA_OUT_DESCRQ_PUSH_ADDR, dma->out_dma_addr >> 6);
498
499 if (variant == ARTPEC6_CRYPTO) {
500 writel_relaxed(ind, base + A6_PDMA_IN_DESCRQ_PUSH);
501 writel_relaxed(statd, base + A6_PDMA_IN_STATQ_PUSH);
502 writel_relaxed(PDMA_IN_CMD_START, base + A6_PDMA_IN_CMD);
503 } else {
504 writel_relaxed(ind, base + A7_PDMA_IN_DESCRQ_PUSH);
505 writel_relaxed(statd, base + A7_PDMA_IN_STATQ_PUSH);
506 writel_relaxed(PDMA_IN_CMD_START, base + A7_PDMA_IN_CMD);
507 }
508
509 writel_relaxed(outd, base + PDMA_OUT_DESCRQ_PUSH);
510 writel_relaxed(PDMA_OUT_CMD_START, base + PDMA_OUT_CMD);
511
512 ac->pending_count++;
513 }
514
515 static void
artpec6_crypto_init_dma_operation(struct artpec6_crypto_req_common * common)516 artpec6_crypto_init_dma_operation(struct artpec6_crypto_req_common *common)
517 {
518 struct artpec6_crypto_dma_descriptors *dma = common->dma;
519
520 dma->out_cnt = 0;
521 dma->in_cnt = 0;
522 dma->map_count = 0;
523 INIT_LIST_HEAD(&dma->bounce_buffers);
524 }
525
fault_inject_dma_descr(void)526 static bool fault_inject_dma_descr(void)
527 {
528 #ifdef CONFIG_FAULT_INJECTION
529 return should_fail(&artpec6_crypto_fail_dma_array_full, 1);
530 #else
531 return false;
532 #endif
533 }
534
535 /** artpec6_crypto_setup_out_descr_phys - Setup an out channel with a
536 * physical address
537 *
538 * @addr: The physical address of the data buffer
539 * @len: The length of the data buffer
540 * @eop: True if this is the last buffer in the packet
541 *
542 * @return 0 on success or -ENOSPC if there are no more descriptors available
543 */
544 static int
artpec6_crypto_setup_out_descr_phys(struct artpec6_crypto_req_common * common,dma_addr_t addr,size_t len,bool eop)545 artpec6_crypto_setup_out_descr_phys(struct artpec6_crypto_req_common *common,
546 dma_addr_t addr, size_t len, bool eop)
547 {
548 struct artpec6_crypto_dma_descriptors *dma = common->dma;
549 struct pdma_descr *d;
550
551 if (dma->out_cnt >= PDMA_DESCR_COUNT ||
552 fault_inject_dma_descr()) {
553 pr_err("No free OUT DMA descriptors available!\n");
554 return -ENOSPC;
555 }
556
557 d = &dma->out[dma->out_cnt++];
558 memset(d, 0, sizeof(*d));
559
560 d->ctrl.short_descr = 0;
561 d->ctrl.eop = eop;
562 d->data.len = len;
563 d->data.buf = addr;
564 return 0;
565 }
566
567 /** artpec6_crypto_setup_out_descr_short - Setup a short out descriptor
568 *
569 * @dst: The virtual address of the data
570 * @len: The length of the data, must be between 1 to 7 bytes
571 * @eop: True if this is the last buffer in the packet
572 *
573 * @return 0 on success
574 * -ENOSPC if no more descriptors are available
575 * -EINVAL if the data length exceeds 7 bytes
576 */
577 static int
artpec6_crypto_setup_out_descr_short(struct artpec6_crypto_req_common * common,void * dst,unsigned int len,bool eop)578 artpec6_crypto_setup_out_descr_short(struct artpec6_crypto_req_common *common,
579 void *dst, unsigned int len, bool eop)
580 {
581 struct artpec6_crypto_dma_descriptors *dma = common->dma;
582 struct pdma_descr *d;
583
584 if (dma->out_cnt >= PDMA_DESCR_COUNT ||
585 fault_inject_dma_descr()) {
586 pr_err("No free OUT DMA descriptors available!\n");
587 return -ENOSPC;
588 } else if (len > 7 || len < 1) {
589 return -EINVAL;
590 }
591 d = &dma->out[dma->out_cnt++];
592 memset(d, 0, sizeof(*d));
593
594 d->ctrl.short_descr = 1;
595 d->ctrl.short_len = len;
596 d->ctrl.eop = eop;
597 memcpy(d->shrt.data, dst, len);
598 return 0;
599 }
600
artpec6_crypto_dma_map_page(struct artpec6_crypto_req_common * common,struct page * page,size_t offset,size_t size,enum dma_data_direction dir,dma_addr_t * dma_addr_out)601 static int artpec6_crypto_dma_map_page(struct artpec6_crypto_req_common *common,
602 struct page *page, size_t offset,
603 size_t size,
604 enum dma_data_direction dir,
605 dma_addr_t *dma_addr_out)
606 {
607 struct artpec6_crypto_dma_descriptors *dma = common->dma;
608 struct device *dev = artpec6_crypto_dev;
609 struct artpec6_crypto_dma_map *map;
610 dma_addr_t dma_addr;
611
612 *dma_addr_out = 0;
613
614 if (dma->map_count >= ARRAY_SIZE(dma->maps))
615 return -ENOMEM;
616
617 dma_addr = dma_map_page(dev, page, offset, size, dir);
618 if (dma_mapping_error(dev, dma_addr))
619 return -ENOMEM;
620
621 map = &dma->maps[dma->map_count++];
622 map->size = size;
623 map->dma_addr = dma_addr;
624 map->dir = dir;
625
626 *dma_addr_out = dma_addr;
627
628 return 0;
629 }
630
631 static int
artpec6_crypto_dma_map_single(struct artpec6_crypto_req_common * common,void * ptr,size_t size,enum dma_data_direction dir,dma_addr_t * dma_addr_out)632 artpec6_crypto_dma_map_single(struct artpec6_crypto_req_common *common,
633 void *ptr, size_t size,
634 enum dma_data_direction dir,
635 dma_addr_t *dma_addr_out)
636 {
637 struct page *page = virt_to_page(ptr);
638 size_t offset = (uintptr_t)ptr & ~PAGE_MASK;
639
640 return artpec6_crypto_dma_map_page(common, page, offset, size, dir,
641 dma_addr_out);
642 }
643
644 static int
artpec6_crypto_dma_map_descs(struct artpec6_crypto_req_common * common)645 artpec6_crypto_dma_map_descs(struct artpec6_crypto_req_common *common)
646 {
647 struct artpec6_crypto_dma_descriptors *dma = common->dma;
648 int ret;
649
650 ret = artpec6_crypto_dma_map_single(common, dma->in,
651 sizeof(dma->in[0]) * dma->in_cnt,
652 DMA_TO_DEVICE, &dma->in_dma_addr);
653 if (ret)
654 return ret;
655
656 ret = artpec6_crypto_dma_map_single(common, dma->out,
657 sizeof(dma->out[0]) * dma->out_cnt,
658 DMA_TO_DEVICE, &dma->out_dma_addr);
659 if (ret)
660 return ret;
661
662 /* We only read one stat descriptor */
663 dma->stat[dma->in_cnt - 1] = 0;
664
665 /*
666 * DMA_BIDIRECTIONAL since we need our zeroing of the stat descriptor
667 * to be written.
668 */
669 return artpec6_crypto_dma_map_single(common,
670 dma->stat,
671 sizeof(dma->stat[0]) * dma->in_cnt,
672 DMA_BIDIRECTIONAL,
673 &dma->stat_dma_addr);
674 }
675
676 static void
artpec6_crypto_dma_unmap_all(struct artpec6_crypto_req_common * common)677 artpec6_crypto_dma_unmap_all(struct artpec6_crypto_req_common *common)
678 {
679 struct artpec6_crypto_dma_descriptors *dma = common->dma;
680 struct device *dev = artpec6_crypto_dev;
681 int i;
682
683 for (i = 0; i < dma->map_count; i++) {
684 struct artpec6_crypto_dma_map *map = &dma->maps[i];
685
686 dma_unmap_page(dev, map->dma_addr, map->size, map->dir);
687 }
688
689 dma->map_count = 0;
690 }
691
692 /** artpec6_crypto_setup_out_descr - Setup an out descriptor
693 *
694 * @dst: The virtual address of the data
695 * @len: The length of the data
696 * @eop: True if this is the last buffer in the packet
697 * @use_short: If this is true and the data length is 7 bytes or less then
698 * a short descriptor will be used
699 *
700 * @return 0 on success
701 * Any errors from artpec6_crypto_setup_out_descr_short() or
702 * setup_out_descr_phys()
703 */
704 static int
artpec6_crypto_setup_out_descr(struct artpec6_crypto_req_common * common,void * dst,unsigned int len,bool eop,bool use_short)705 artpec6_crypto_setup_out_descr(struct artpec6_crypto_req_common *common,
706 void *dst, unsigned int len, bool eop,
707 bool use_short)
708 {
709 if (use_short && len < 7) {
710 return artpec6_crypto_setup_out_descr_short(common, dst, len,
711 eop);
712 } else {
713 int ret;
714 dma_addr_t dma_addr;
715
716 ret = artpec6_crypto_dma_map_single(common, dst, len,
717 DMA_TO_DEVICE,
718 &dma_addr);
719 if (ret)
720 return ret;
721
722 return artpec6_crypto_setup_out_descr_phys(common, dma_addr,
723 len, eop);
724 }
725 }
726
727 /** artpec6_crypto_setup_in_descr_phys - Setup an in channel with a
728 * physical address
729 *
730 * @addr: The physical address of the data buffer
731 * @len: The length of the data buffer
732 * @intr: True if an interrupt should be fired after HW processing of this
733 * descriptor
734 *
735 */
736 static int
artpec6_crypto_setup_in_descr_phys(struct artpec6_crypto_req_common * common,dma_addr_t addr,unsigned int len,bool intr)737 artpec6_crypto_setup_in_descr_phys(struct artpec6_crypto_req_common *common,
738 dma_addr_t addr, unsigned int len, bool intr)
739 {
740 struct artpec6_crypto_dma_descriptors *dma = common->dma;
741 struct pdma_descr *d;
742
743 if (dma->in_cnt >= PDMA_DESCR_COUNT ||
744 fault_inject_dma_descr()) {
745 pr_err("No free IN DMA descriptors available!\n");
746 return -ENOSPC;
747 }
748 d = &dma->in[dma->in_cnt++];
749 memset(d, 0, sizeof(*d));
750
751 d->ctrl.intr = intr;
752 d->data.len = len;
753 d->data.buf = addr;
754 return 0;
755 }
756
757 /** artpec6_crypto_setup_in_descr - Setup an in channel descriptor
758 *
759 * @buffer: The virtual address to of the data buffer
760 * @len: The length of the data buffer
761 * @last: If this is the last data buffer in the request (i.e. an interrupt
762 * is needed
763 *
764 * Short descriptors are not used for the in channel
765 */
766 static int
artpec6_crypto_setup_in_descr(struct artpec6_crypto_req_common * common,void * buffer,unsigned int len,bool last)767 artpec6_crypto_setup_in_descr(struct artpec6_crypto_req_common *common,
768 void *buffer, unsigned int len, bool last)
769 {
770 dma_addr_t dma_addr;
771 int ret;
772
773 ret = artpec6_crypto_dma_map_single(common, buffer, len,
774 DMA_FROM_DEVICE, &dma_addr);
775 if (ret)
776 return ret;
777
778 return artpec6_crypto_setup_in_descr_phys(common, dma_addr, len, last);
779 }
780
781 static struct artpec6_crypto_bounce_buffer *
artpec6_crypto_alloc_bounce(gfp_t flags)782 artpec6_crypto_alloc_bounce(gfp_t flags)
783 {
784 void *base;
785 size_t alloc_size = sizeof(struct artpec6_crypto_bounce_buffer) +
786 2 * ARTPEC_CACHE_LINE_MAX;
787 struct artpec6_crypto_bounce_buffer *bbuf = kzalloc(alloc_size, flags);
788
789 if (!bbuf)
790 return NULL;
791
792 base = bbuf + 1;
793 bbuf->buf = PTR_ALIGN(base, ARTPEC_CACHE_LINE_MAX);
794 return bbuf;
795 }
796
setup_bounce_buffer_in(struct artpec6_crypto_req_common * common,struct artpec6_crypto_walk * walk,size_t size)797 static int setup_bounce_buffer_in(struct artpec6_crypto_req_common *common,
798 struct artpec6_crypto_walk *walk, size_t size)
799 {
800 struct artpec6_crypto_bounce_buffer *bbuf;
801 int ret;
802
803 bbuf = artpec6_crypto_alloc_bounce(common->gfp_flags);
804 if (!bbuf)
805 return -ENOMEM;
806
807 bbuf->length = size;
808 bbuf->sg = walk->sg;
809 bbuf->offset = walk->offset;
810
811 ret = artpec6_crypto_setup_in_descr(common, bbuf->buf, size, false);
812 if (ret) {
813 kfree(bbuf);
814 return ret;
815 }
816
817 pr_debug("BOUNCE %zu offset %zu\n", size, walk->offset);
818 list_add_tail(&bbuf->list, &common->dma->bounce_buffers);
819 return 0;
820 }
821
822 static int
artpec6_crypto_setup_sg_descrs_in(struct artpec6_crypto_req_common * common,struct artpec6_crypto_walk * walk,size_t count)823 artpec6_crypto_setup_sg_descrs_in(struct artpec6_crypto_req_common *common,
824 struct artpec6_crypto_walk *walk,
825 size_t count)
826 {
827 size_t chunk;
828 int ret;
829 dma_addr_t addr;
830
831 while (walk->sg && count) {
832 chunk = min(count, artpec6_crypto_walk_chunklen(walk));
833 addr = artpec6_crypto_walk_chunk_phys(walk);
834
835 /* When destination buffers are not aligned to the cache line
836 * size we need bounce buffers. The DMA-API requires that the
837 * entire line is owned by the DMA buffer and this holds also
838 * for the case when coherent DMA is used.
839 */
840 if (!IS_ALIGNED(addr, ARTPEC_CACHE_LINE_MAX)) {
841 chunk = min_t(dma_addr_t, chunk,
842 ALIGN(addr, ARTPEC_CACHE_LINE_MAX) -
843 addr);
844
845 pr_debug("CHUNK-b %pad:%zu\n", &addr, chunk);
846 ret = setup_bounce_buffer_in(common, walk, chunk);
847 } else if (chunk < ARTPEC_CACHE_LINE_MAX) {
848 pr_debug("CHUNK-b %pad:%zu\n", &addr, chunk);
849 ret = setup_bounce_buffer_in(common, walk, chunk);
850 } else {
851 dma_addr_t dma_addr;
852
853 chunk = chunk & ~(ARTPEC_CACHE_LINE_MAX-1);
854
855 pr_debug("CHUNK %pad:%zu\n", &addr, chunk);
856
857 ret = artpec6_crypto_dma_map_page(common,
858 sg_page(walk->sg),
859 walk->sg->offset +
860 walk->offset,
861 chunk,
862 DMA_FROM_DEVICE,
863 &dma_addr);
864 if (ret)
865 return ret;
866
867 ret = artpec6_crypto_setup_in_descr_phys(common,
868 dma_addr,
869 chunk, false);
870 }
871
872 if (ret)
873 return ret;
874
875 count = count - chunk;
876 artpec6_crypto_walk_advance(walk, chunk);
877 }
878
879 if (count)
880 pr_err("EOL unexpected %zu bytes left\n", count);
881
882 return count ? -EINVAL : 0;
883 }
884
885 static int
artpec6_crypto_setup_sg_descrs_out(struct artpec6_crypto_req_common * common,struct artpec6_crypto_walk * walk,size_t count)886 artpec6_crypto_setup_sg_descrs_out(struct artpec6_crypto_req_common *common,
887 struct artpec6_crypto_walk *walk,
888 size_t count)
889 {
890 size_t chunk;
891 int ret;
892 dma_addr_t addr;
893
894 while (walk->sg && count) {
895 chunk = min(count, artpec6_crypto_walk_chunklen(walk));
896 addr = artpec6_crypto_walk_chunk_phys(walk);
897
898 pr_debug("OUT-CHUNK %pad:%zu\n", &addr, chunk);
899
900 if (addr & 3) {
901 char buf[3];
902
903 chunk = min_t(size_t, chunk, (4-(addr&3)));
904
905 sg_pcopy_to_buffer(walk->sg, 1, buf, chunk,
906 walk->offset);
907
908 ret = artpec6_crypto_setup_out_descr_short(common, buf,
909 chunk,
910 false);
911 } else {
912 dma_addr_t dma_addr;
913
914 ret = artpec6_crypto_dma_map_page(common,
915 sg_page(walk->sg),
916 walk->sg->offset +
917 walk->offset,
918 chunk,
919 DMA_TO_DEVICE,
920 &dma_addr);
921 if (ret)
922 return ret;
923
924 ret = artpec6_crypto_setup_out_descr_phys(common,
925 dma_addr,
926 chunk, false);
927 }
928
929 if (ret)
930 return ret;
931
932 count = count - chunk;
933 artpec6_crypto_walk_advance(walk, chunk);
934 }
935
936 if (count)
937 pr_err("EOL unexpected %zu bytes left\n", count);
938
939 return count ? -EINVAL : 0;
940 }
941
942
943 /** artpec6_crypto_terminate_out_descrs - Set the EOP on the last out descriptor
944 *
945 * If the out descriptor list is non-empty, then the eop flag on the
946 * last used out descriptor will be set.
947 *
948 * @return 0 on success
949 * -EINVAL if the out descriptor is empty or has overflown
950 */
951 static int
artpec6_crypto_terminate_out_descrs(struct artpec6_crypto_req_common * common)952 artpec6_crypto_terminate_out_descrs(struct artpec6_crypto_req_common *common)
953 {
954 struct artpec6_crypto_dma_descriptors *dma = common->dma;
955 struct pdma_descr *d;
956
957 if (!dma->out_cnt || dma->out_cnt > PDMA_DESCR_COUNT) {
958 pr_err("%s: OUT descriptor list is %s\n",
959 MODULE_NAME, dma->out_cnt ? "empty" : "full");
960 return -EINVAL;
961
962 }
963
964 d = &dma->out[dma->out_cnt-1];
965 d->ctrl.eop = 1;
966
967 return 0;
968 }
969
970 /** artpec6_crypto_terminate_in_descrs - Set the interrupt flag on the last
971 * in descriptor
972 *
973 * See artpec6_crypto_terminate_out_descrs() for return values
974 */
975 static int
artpec6_crypto_terminate_in_descrs(struct artpec6_crypto_req_common * common)976 artpec6_crypto_terminate_in_descrs(struct artpec6_crypto_req_common *common)
977 {
978 struct artpec6_crypto_dma_descriptors *dma = common->dma;
979 struct pdma_descr *d;
980
981 if (!dma->in_cnt || dma->in_cnt > PDMA_DESCR_COUNT) {
982 pr_err("%s: IN descriptor list is %s\n",
983 MODULE_NAME, dma->in_cnt ? "empty" : "full");
984 return -EINVAL;
985 }
986
987 d = &dma->in[dma->in_cnt-1];
988 d->ctrl.intr = 1;
989 return 0;
990 }
991
992 /** create_hash_pad - Create a Secure Hash conformant pad
993 *
994 * @dst: The destination buffer to write the pad. Must be at least 64 bytes
995 * @dgstlen: The total length of the hash digest in bytes
996 * @bitcount: The total length of the digest in bits
997 *
998 * @return The total number of padding bytes written to @dst
999 */
1000 static size_t
create_hash_pad(int oper,unsigned char * dst,u64 dgstlen,u64 bitcount)1001 create_hash_pad(int oper, unsigned char *dst, u64 dgstlen, u64 bitcount)
1002 {
1003 unsigned int mod, target, diff, pad_bytes, size_bytes;
1004 __be64 bits = __cpu_to_be64(bitcount);
1005
1006 switch (oper) {
1007 case regk_crypto_sha1:
1008 case regk_crypto_sha256:
1009 case regk_crypto_hmac_sha1:
1010 case regk_crypto_hmac_sha256:
1011 target = 448 / 8;
1012 mod = 512 / 8;
1013 size_bytes = 8;
1014 break;
1015 default:
1016 target = 896 / 8;
1017 mod = 1024 / 8;
1018 size_bytes = 16;
1019 break;
1020 }
1021
1022 target -= 1;
1023 diff = dgstlen & (mod - 1);
1024 pad_bytes = diff > target ? target + mod - diff : target - diff;
1025
1026 memset(dst + 1, 0, pad_bytes);
1027 dst[0] = 0x80;
1028
1029 if (size_bytes == 16) {
1030 memset(dst + 1 + pad_bytes, 0, 8);
1031 memcpy(dst + 1 + pad_bytes + 8, &bits, 8);
1032 } else {
1033 memcpy(dst + 1 + pad_bytes, &bits, 8);
1034 }
1035
1036 return pad_bytes + size_bytes + 1;
1037 }
1038
artpec6_crypto_common_init(struct artpec6_crypto_req_common * common,struct crypto_async_request * parent,void (* complete)(struct crypto_async_request * req),struct scatterlist * dstsg,unsigned int nbytes)1039 static int artpec6_crypto_common_init(struct artpec6_crypto_req_common *common,
1040 struct crypto_async_request *parent,
1041 void (*complete)(struct crypto_async_request *req),
1042 struct scatterlist *dstsg, unsigned int nbytes)
1043 {
1044 gfp_t flags;
1045 struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
1046
1047 flags = (parent->flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
1048 GFP_KERNEL : GFP_ATOMIC;
1049
1050 common->gfp_flags = flags;
1051 common->dma = kmem_cache_alloc(ac->dma_cache, flags);
1052 if (!common->dma)
1053 return -ENOMEM;
1054
1055 common->req = parent;
1056 common->complete = complete;
1057 return 0;
1058 }
1059
1060 static void
artpec6_crypto_bounce_destroy(struct artpec6_crypto_dma_descriptors * dma)1061 artpec6_crypto_bounce_destroy(struct artpec6_crypto_dma_descriptors *dma)
1062 {
1063 struct artpec6_crypto_bounce_buffer *b;
1064 struct artpec6_crypto_bounce_buffer *next;
1065
1066 list_for_each_entry_safe(b, next, &dma->bounce_buffers, list) {
1067 kfree(b);
1068 }
1069 }
1070
1071 static int
artpec6_crypto_common_destroy(struct artpec6_crypto_req_common * common)1072 artpec6_crypto_common_destroy(struct artpec6_crypto_req_common *common)
1073 {
1074 struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
1075
1076 artpec6_crypto_dma_unmap_all(common);
1077 artpec6_crypto_bounce_destroy(common->dma);
1078 kmem_cache_free(ac->dma_cache, common->dma);
1079 common->dma = NULL;
1080 return 0;
1081 }
1082
1083 /*
1084 * Ciphering functions.
1085 */
artpec6_crypto_encrypt(struct skcipher_request * req)1086 static int artpec6_crypto_encrypt(struct skcipher_request *req)
1087 {
1088 struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
1089 struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(cipher);
1090 struct artpec6_crypto_request_context *req_ctx = NULL;
1091 void (*complete)(struct crypto_async_request *req);
1092 int ret;
1093
1094 req_ctx = skcipher_request_ctx(req);
1095
1096 switch (ctx->crypto_type) {
1097 case ARTPEC6_CRYPTO_CIPHER_AES_CBC:
1098 case ARTPEC6_CRYPTO_CIPHER_AES_ECB:
1099 case ARTPEC6_CRYPTO_CIPHER_AES_XTS:
1100 req_ctx->decrypt = 0;
1101 break;
1102 default:
1103 break;
1104 }
1105
1106 switch (ctx->crypto_type) {
1107 case ARTPEC6_CRYPTO_CIPHER_AES_CBC:
1108 complete = artpec6_crypto_complete_cbc_encrypt;
1109 break;
1110 default:
1111 complete = artpec6_crypto_complete_crypto;
1112 break;
1113 }
1114
1115 ret = artpec6_crypto_common_init(&req_ctx->common,
1116 &req->base,
1117 complete,
1118 req->dst, req->cryptlen);
1119 if (ret)
1120 return ret;
1121
1122 ret = artpec6_crypto_prepare_crypto(req);
1123 if (ret) {
1124 artpec6_crypto_common_destroy(&req_ctx->common);
1125 return ret;
1126 }
1127
1128 return artpec6_crypto_submit(&req_ctx->common);
1129 }
1130
artpec6_crypto_decrypt(struct skcipher_request * req)1131 static int artpec6_crypto_decrypt(struct skcipher_request *req)
1132 {
1133 int ret;
1134 struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
1135 struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(cipher);
1136 struct artpec6_crypto_request_context *req_ctx = NULL;
1137 void (*complete)(struct crypto_async_request *req);
1138
1139 req_ctx = skcipher_request_ctx(req);
1140
1141 switch (ctx->crypto_type) {
1142 case ARTPEC6_CRYPTO_CIPHER_AES_CBC:
1143 case ARTPEC6_CRYPTO_CIPHER_AES_ECB:
1144 case ARTPEC6_CRYPTO_CIPHER_AES_XTS:
1145 req_ctx->decrypt = 1;
1146 break;
1147 default:
1148 break;
1149 }
1150
1151
1152 switch (ctx->crypto_type) {
1153 case ARTPEC6_CRYPTO_CIPHER_AES_CBC:
1154 complete = artpec6_crypto_complete_cbc_decrypt;
1155 break;
1156 default:
1157 complete = artpec6_crypto_complete_crypto;
1158 break;
1159 }
1160
1161 ret = artpec6_crypto_common_init(&req_ctx->common, &req->base,
1162 complete,
1163 req->dst, req->cryptlen);
1164 if (ret)
1165 return ret;
1166
1167 ret = artpec6_crypto_prepare_crypto(req);
1168 if (ret) {
1169 artpec6_crypto_common_destroy(&req_ctx->common);
1170 return ret;
1171 }
1172
1173 return artpec6_crypto_submit(&req_ctx->common);
1174 }
1175
1176 static int
artpec6_crypto_ctr_crypt(struct skcipher_request * req,bool encrypt)1177 artpec6_crypto_ctr_crypt(struct skcipher_request *req, bool encrypt)
1178 {
1179 struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
1180 struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(cipher);
1181 size_t iv_len = crypto_skcipher_ivsize(cipher);
1182 unsigned int counter = be32_to_cpup((__be32 *)
1183 (req->iv + iv_len - 4));
1184 unsigned int nblks = ALIGN(req->cryptlen, AES_BLOCK_SIZE) /
1185 AES_BLOCK_SIZE;
1186
1187 /*
1188 * The hardware uses only the last 32-bits as the counter while the
1189 * kernel tests (aes_ctr_enc_tv_template[4] for example) expect that
1190 * the whole IV is a counter. So fallback if the counter is going to
1191 * overlow.
1192 */
1193 if (counter + nblks < counter) {
1194 int ret;
1195
1196 pr_debug("counter %x will overflow (nblks %u), falling back\n",
1197 counter, counter + nblks);
1198
1199 ret = crypto_sync_skcipher_setkey(ctx->fallback, ctx->aes_key,
1200 ctx->key_length);
1201 if (ret)
1202 return ret;
1203
1204 {
1205 SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
1206
1207 skcipher_request_set_sync_tfm(subreq, ctx->fallback);
1208 skcipher_request_set_callback(subreq, req->base.flags,
1209 NULL, NULL);
1210 skcipher_request_set_crypt(subreq, req->src, req->dst,
1211 req->cryptlen, req->iv);
1212 ret = encrypt ? crypto_skcipher_encrypt(subreq)
1213 : crypto_skcipher_decrypt(subreq);
1214 skcipher_request_zero(subreq);
1215 }
1216 return ret;
1217 }
1218
1219 return encrypt ? artpec6_crypto_encrypt(req)
1220 : artpec6_crypto_decrypt(req);
1221 }
1222
artpec6_crypto_ctr_encrypt(struct skcipher_request * req)1223 static int artpec6_crypto_ctr_encrypt(struct skcipher_request *req)
1224 {
1225 return artpec6_crypto_ctr_crypt(req, true);
1226 }
1227
artpec6_crypto_ctr_decrypt(struct skcipher_request * req)1228 static int artpec6_crypto_ctr_decrypt(struct skcipher_request *req)
1229 {
1230 return artpec6_crypto_ctr_crypt(req, false);
1231 }
1232
1233 /*
1234 * AEAD functions
1235 */
artpec6_crypto_aead_init(struct crypto_aead * tfm)1236 static int artpec6_crypto_aead_init(struct crypto_aead *tfm)
1237 {
1238 struct artpec6_cryptotfm_context *tfm_ctx = crypto_aead_ctx(tfm);
1239
1240 memset(tfm_ctx, 0, sizeof(*tfm_ctx));
1241
1242 crypto_aead_set_reqsize(tfm,
1243 sizeof(struct artpec6_crypto_aead_req_ctx));
1244
1245 return 0;
1246 }
1247
artpec6_crypto_aead_set_key(struct crypto_aead * tfm,const u8 * key,unsigned int len)1248 static int artpec6_crypto_aead_set_key(struct crypto_aead *tfm, const u8 *key,
1249 unsigned int len)
1250 {
1251 struct artpec6_cryptotfm_context *ctx = crypto_tfm_ctx(&tfm->base);
1252
1253 if (len != 16 && len != 24 && len != 32)
1254 return -EINVAL;
1255
1256 ctx->key_length = len;
1257
1258 memcpy(ctx->aes_key, key, len);
1259 return 0;
1260 }
1261
artpec6_crypto_aead_encrypt(struct aead_request * req)1262 static int artpec6_crypto_aead_encrypt(struct aead_request *req)
1263 {
1264 int ret;
1265 struct artpec6_crypto_aead_req_ctx *req_ctx = aead_request_ctx(req);
1266
1267 req_ctx->decrypt = false;
1268 ret = artpec6_crypto_common_init(&req_ctx->common, &req->base,
1269 artpec6_crypto_complete_aead,
1270 NULL, 0);
1271 if (ret)
1272 return ret;
1273
1274 ret = artpec6_crypto_prepare_aead(req);
1275 if (ret) {
1276 artpec6_crypto_common_destroy(&req_ctx->common);
1277 return ret;
1278 }
1279
1280 return artpec6_crypto_submit(&req_ctx->common);
1281 }
1282
artpec6_crypto_aead_decrypt(struct aead_request * req)1283 static int artpec6_crypto_aead_decrypt(struct aead_request *req)
1284 {
1285 int ret;
1286 struct artpec6_crypto_aead_req_ctx *req_ctx = aead_request_ctx(req);
1287
1288 req_ctx->decrypt = true;
1289 if (req->cryptlen < AES_BLOCK_SIZE)
1290 return -EINVAL;
1291
1292 ret = artpec6_crypto_common_init(&req_ctx->common,
1293 &req->base,
1294 artpec6_crypto_complete_aead,
1295 NULL, 0);
1296 if (ret)
1297 return ret;
1298
1299 ret = artpec6_crypto_prepare_aead(req);
1300 if (ret) {
1301 artpec6_crypto_common_destroy(&req_ctx->common);
1302 return ret;
1303 }
1304
1305 return artpec6_crypto_submit(&req_ctx->common);
1306 }
1307
artpec6_crypto_prepare_hash(struct ahash_request * areq)1308 static int artpec6_crypto_prepare_hash(struct ahash_request *areq)
1309 {
1310 struct artpec6_hashalg_context *ctx = crypto_tfm_ctx(areq->base.tfm);
1311 struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(areq);
1312 size_t digestsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(areq));
1313 size_t contextsize = digestsize;
1314 size_t blocksize = crypto_tfm_alg_blocksize(
1315 crypto_ahash_tfm(crypto_ahash_reqtfm(areq)));
1316 struct artpec6_crypto_req_common *common = &req_ctx->common;
1317 struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
1318 enum artpec6_crypto_variant variant = ac->variant;
1319 u32 sel_ctx;
1320 bool ext_ctx = false;
1321 bool run_hw = false;
1322 int error = 0;
1323
1324 artpec6_crypto_init_dma_operation(common);
1325
1326 /* Upload HMAC key, must be first the first packet */
1327 if (req_ctx->hash_flags & HASH_FLAG_HMAC) {
1328 if (variant == ARTPEC6_CRYPTO) {
1329 req_ctx->key_md = FIELD_PREP(A6_CRY_MD_OPER,
1330 a6_regk_crypto_dlkey);
1331 } else {
1332 req_ctx->key_md = FIELD_PREP(A7_CRY_MD_OPER,
1333 a7_regk_crypto_dlkey);
1334 }
1335
1336 /* Copy and pad up the key */
1337 memcpy(req_ctx->key_buffer, ctx->hmac_key,
1338 ctx->hmac_key_length);
1339 memset(req_ctx->key_buffer + ctx->hmac_key_length, 0,
1340 blocksize - ctx->hmac_key_length);
1341
1342 error = artpec6_crypto_setup_out_descr(common,
1343 (void *)&req_ctx->key_md,
1344 sizeof(req_ctx->key_md), false, false);
1345 if (error)
1346 return error;
1347
1348 error = artpec6_crypto_setup_out_descr(common,
1349 req_ctx->key_buffer, blocksize,
1350 true, false);
1351 if (error)
1352 return error;
1353 }
1354
1355 if (!(req_ctx->hash_flags & HASH_FLAG_INIT_CTX)) {
1356 /* Restore context */
1357 sel_ctx = regk_crypto_ext;
1358 ext_ctx = true;
1359 } else {
1360 sel_ctx = regk_crypto_init;
1361 }
1362
1363 if (variant == ARTPEC6_CRYPTO) {
1364 req_ctx->hash_md &= ~A6_CRY_MD_HASH_SEL_CTX;
1365 req_ctx->hash_md |= FIELD_PREP(A6_CRY_MD_HASH_SEL_CTX, sel_ctx);
1366
1367 /* If this is the final round, set the final flag */
1368 if (req_ctx->hash_flags & HASH_FLAG_FINALIZE)
1369 req_ctx->hash_md |= A6_CRY_MD_HASH_HMAC_FIN;
1370 } else {
1371 req_ctx->hash_md &= ~A7_CRY_MD_HASH_SEL_CTX;
1372 req_ctx->hash_md |= FIELD_PREP(A7_CRY_MD_HASH_SEL_CTX, sel_ctx);
1373
1374 /* If this is the final round, set the final flag */
1375 if (req_ctx->hash_flags & HASH_FLAG_FINALIZE)
1376 req_ctx->hash_md |= A7_CRY_MD_HASH_HMAC_FIN;
1377 }
1378
1379 /* Setup up metadata descriptors */
1380 error = artpec6_crypto_setup_out_descr(common,
1381 (void *)&req_ctx->hash_md,
1382 sizeof(req_ctx->hash_md), false, false);
1383 if (error)
1384 return error;
1385
1386 error = artpec6_crypto_setup_in_descr(common, ac->pad_buffer, 4, false);
1387 if (error)
1388 return error;
1389
1390 if (ext_ctx) {
1391 error = artpec6_crypto_setup_out_descr(common,
1392 req_ctx->digeststate,
1393 contextsize, false, false);
1394
1395 if (error)
1396 return error;
1397 }
1398
1399 if (req_ctx->hash_flags & HASH_FLAG_UPDATE) {
1400 size_t done_bytes = 0;
1401 size_t total_bytes = areq->nbytes + req_ctx->partial_bytes;
1402 size_t ready_bytes = round_down(total_bytes, blocksize);
1403 struct artpec6_crypto_walk walk;
1404
1405 run_hw = ready_bytes > 0;
1406 if (req_ctx->partial_bytes && ready_bytes) {
1407 /* We have a partial buffer and will at least some bytes
1408 * to the HW. Empty this partial buffer before tackling
1409 * the SG lists
1410 */
1411 memcpy(req_ctx->partial_buffer_out,
1412 req_ctx->partial_buffer,
1413 req_ctx->partial_bytes);
1414
1415 error = artpec6_crypto_setup_out_descr(common,
1416 req_ctx->partial_buffer_out,
1417 req_ctx->partial_bytes,
1418 false, true);
1419 if (error)
1420 return error;
1421
1422 /* Reset partial buffer */
1423 done_bytes += req_ctx->partial_bytes;
1424 req_ctx->partial_bytes = 0;
1425 }
1426
1427 artpec6_crypto_walk_init(&walk, areq->src);
1428
1429 error = artpec6_crypto_setup_sg_descrs_out(common, &walk,
1430 ready_bytes -
1431 done_bytes);
1432 if (error)
1433 return error;
1434
1435 if (walk.sg) {
1436 size_t sg_skip = ready_bytes - done_bytes;
1437 size_t sg_rem = areq->nbytes - sg_skip;
1438
1439 sg_pcopy_to_buffer(areq->src, sg_nents(areq->src),
1440 req_ctx->partial_buffer +
1441 req_ctx->partial_bytes,
1442 sg_rem, sg_skip);
1443
1444 req_ctx->partial_bytes += sg_rem;
1445 }
1446
1447 req_ctx->digcnt += ready_bytes;
1448 req_ctx->hash_flags &= ~(HASH_FLAG_UPDATE);
1449 }
1450
1451 /* Finalize */
1452 if (req_ctx->hash_flags & HASH_FLAG_FINALIZE) {
1453 size_t hash_pad_len;
1454 u64 digest_bits;
1455 u32 oper;
1456
1457 if (variant == ARTPEC6_CRYPTO)
1458 oper = FIELD_GET(A6_CRY_MD_OPER, req_ctx->hash_md);
1459 else
1460 oper = FIELD_GET(A7_CRY_MD_OPER, req_ctx->hash_md);
1461
1462 /* Write out the partial buffer if present */
1463 if (req_ctx->partial_bytes) {
1464 memcpy(req_ctx->partial_buffer_out,
1465 req_ctx->partial_buffer,
1466 req_ctx->partial_bytes);
1467 error = artpec6_crypto_setup_out_descr(common,
1468 req_ctx->partial_buffer_out,
1469 req_ctx->partial_bytes,
1470 false, true);
1471 if (error)
1472 return error;
1473
1474 req_ctx->digcnt += req_ctx->partial_bytes;
1475 req_ctx->partial_bytes = 0;
1476 }
1477
1478 if (req_ctx->hash_flags & HASH_FLAG_HMAC)
1479 digest_bits = 8 * (req_ctx->digcnt + blocksize);
1480 else
1481 digest_bits = 8 * req_ctx->digcnt;
1482
1483 /* Add the hash pad */
1484 hash_pad_len = create_hash_pad(oper, req_ctx->pad_buffer,
1485 req_ctx->digcnt, digest_bits);
1486 error = artpec6_crypto_setup_out_descr(common,
1487 req_ctx->pad_buffer,
1488 hash_pad_len, false,
1489 true);
1490 req_ctx->digcnt = 0;
1491
1492 if (error)
1493 return error;
1494
1495 /* Descriptor for the final result */
1496 error = artpec6_crypto_setup_in_descr(common, areq->result,
1497 digestsize,
1498 true);
1499 if (error)
1500 return error;
1501
1502 } else { /* This is not the final operation for this request */
1503 if (!run_hw)
1504 return ARTPEC6_CRYPTO_PREPARE_HASH_NO_START;
1505
1506 /* Save the result to the context */
1507 error = artpec6_crypto_setup_in_descr(common,
1508 req_ctx->digeststate,
1509 contextsize, false);
1510 if (error)
1511 return error;
1512 /* fall through */
1513 }
1514
1515 req_ctx->hash_flags &= ~(HASH_FLAG_INIT_CTX | HASH_FLAG_UPDATE |
1516 HASH_FLAG_FINALIZE);
1517
1518 error = artpec6_crypto_terminate_in_descrs(common);
1519 if (error)
1520 return error;
1521
1522 error = artpec6_crypto_terminate_out_descrs(common);
1523 if (error)
1524 return error;
1525
1526 error = artpec6_crypto_dma_map_descs(common);
1527 if (error)
1528 return error;
1529
1530 return ARTPEC6_CRYPTO_PREPARE_HASH_START;
1531 }
1532
1533
artpec6_crypto_aes_ecb_init(struct crypto_skcipher * tfm)1534 static int artpec6_crypto_aes_ecb_init(struct crypto_skcipher *tfm)
1535 {
1536 struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(tfm);
1537
1538 crypto_skcipher_set_reqsize(tfm,
1539 sizeof(struct artpec6_crypto_request_context));
1540 ctx->crypto_type = ARTPEC6_CRYPTO_CIPHER_AES_ECB;
1541
1542 return 0;
1543 }
1544
artpec6_crypto_aes_ctr_init(struct crypto_skcipher * tfm)1545 static int artpec6_crypto_aes_ctr_init(struct crypto_skcipher *tfm)
1546 {
1547 struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(tfm);
1548
1549 ctx->fallback =
1550 crypto_alloc_sync_skcipher(crypto_tfm_alg_name(&tfm->base),
1551 0, CRYPTO_ALG_NEED_FALLBACK);
1552 if (IS_ERR(ctx->fallback))
1553 return PTR_ERR(ctx->fallback);
1554
1555 crypto_skcipher_set_reqsize(tfm,
1556 sizeof(struct artpec6_crypto_request_context));
1557 ctx->crypto_type = ARTPEC6_CRYPTO_CIPHER_AES_CTR;
1558
1559 return 0;
1560 }
1561
artpec6_crypto_aes_cbc_init(struct crypto_skcipher * tfm)1562 static int artpec6_crypto_aes_cbc_init(struct crypto_skcipher *tfm)
1563 {
1564 struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(tfm);
1565
1566 crypto_skcipher_set_reqsize(tfm,
1567 sizeof(struct artpec6_crypto_request_context));
1568 ctx->crypto_type = ARTPEC6_CRYPTO_CIPHER_AES_CBC;
1569
1570 return 0;
1571 }
1572
artpec6_crypto_aes_xts_init(struct crypto_skcipher * tfm)1573 static int artpec6_crypto_aes_xts_init(struct crypto_skcipher *tfm)
1574 {
1575 struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(tfm);
1576
1577 crypto_skcipher_set_reqsize(tfm,
1578 sizeof(struct artpec6_crypto_request_context));
1579 ctx->crypto_type = ARTPEC6_CRYPTO_CIPHER_AES_XTS;
1580
1581 return 0;
1582 }
1583
artpec6_crypto_aes_exit(struct crypto_skcipher * tfm)1584 static void artpec6_crypto_aes_exit(struct crypto_skcipher *tfm)
1585 {
1586 struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(tfm);
1587
1588 memset(ctx, 0, sizeof(*ctx));
1589 }
1590
artpec6_crypto_aes_ctr_exit(struct crypto_skcipher * tfm)1591 static void artpec6_crypto_aes_ctr_exit(struct crypto_skcipher *tfm)
1592 {
1593 struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(tfm);
1594
1595 crypto_free_sync_skcipher(ctx->fallback);
1596 artpec6_crypto_aes_exit(tfm);
1597 }
1598
1599 static int
artpec6_crypto_cipher_set_key(struct crypto_skcipher * cipher,const u8 * key,unsigned int keylen)1600 artpec6_crypto_cipher_set_key(struct crypto_skcipher *cipher, const u8 *key,
1601 unsigned int keylen)
1602 {
1603 struct artpec6_cryptotfm_context *ctx =
1604 crypto_skcipher_ctx(cipher);
1605
1606 switch (keylen) {
1607 case 16:
1608 case 24:
1609 case 32:
1610 break;
1611 default:
1612 return -EINVAL;
1613 }
1614
1615 memcpy(ctx->aes_key, key, keylen);
1616 ctx->key_length = keylen;
1617 return 0;
1618 }
1619
1620 static int
artpec6_crypto_xts_set_key(struct crypto_skcipher * cipher,const u8 * key,unsigned int keylen)1621 artpec6_crypto_xts_set_key(struct crypto_skcipher *cipher, const u8 *key,
1622 unsigned int keylen)
1623 {
1624 struct artpec6_cryptotfm_context *ctx =
1625 crypto_skcipher_ctx(cipher);
1626 int ret;
1627
1628 ret = xts_verify_key(cipher, key, keylen);
1629 if (ret)
1630 return ret;
1631
1632 switch (keylen) {
1633 case 32:
1634 case 48:
1635 case 64:
1636 break;
1637 default:
1638 return -EINVAL;
1639 }
1640
1641 memcpy(ctx->aes_key, key, keylen);
1642 ctx->key_length = keylen;
1643 return 0;
1644 }
1645
1646 /** artpec6_crypto_process_crypto - Prepare an async block cipher crypto request
1647 *
1648 * @req: The asynch request to process
1649 *
1650 * @return 0 if the dma job was successfully prepared
1651 * <0 on error
1652 *
1653 * This function sets up the PDMA descriptors for a block cipher request.
1654 *
1655 * The required padding is added for AES-CTR using a statically defined
1656 * buffer.
1657 *
1658 * The PDMA descriptor list will be as follows:
1659 *
1660 * OUT: [KEY_MD][KEY][EOP]<CIPHER_MD>[IV]<data_0>...[data_n][AES-CTR_pad]<eop>
1661 * IN: <CIPHER_MD><data_0>...[data_n]<intr>
1662 *
1663 */
artpec6_crypto_prepare_crypto(struct skcipher_request * areq)1664 static int artpec6_crypto_prepare_crypto(struct skcipher_request *areq)
1665 {
1666 int ret;
1667 struct artpec6_crypto_walk walk;
1668 struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(areq);
1669 struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(cipher);
1670 struct artpec6_crypto_request_context *req_ctx = NULL;
1671 size_t iv_len = crypto_skcipher_ivsize(cipher);
1672 struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
1673 enum artpec6_crypto_variant variant = ac->variant;
1674 struct artpec6_crypto_req_common *common;
1675 bool cipher_decr = false;
1676 size_t cipher_klen;
1677 u32 cipher_len = 0; /* Same as regk_crypto_key_128 for NULL crypto */
1678 u32 oper;
1679
1680 req_ctx = skcipher_request_ctx(areq);
1681 common = &req_ctx->common;
1682
1683 artpec6_crypto_init_dma_operation(common);
1684
1685 if (variant == ARTPEC6_CRYPTO)
1686 ctx->key_md = FIELD_PREP(A6_CRY_MD_OPER, a6_regk_crypto_dlkey);
1687 else
1688 ctx->key_md = FIELD_PREP(A7_CRY_MD_OPER, a7_regk_crypto_dlkey);
1689
1690 ret = artpec6_crypto_setup_out_descr(common, (void *)&ctx->key_md,
1691 sizeof(ctx->key_md), false, false);
1692 if (ret)
1693 return ret;
1694
1695 ret = artpec6_crypto_setup_out_descr(common, ctx->aes_key,
1696 ctx->key_length, true, false);
1697 if (ret)
1698 return ret;
1699
1700 req_ctx->cipher_md = 0;
1701
1702 if (ctx->crypto_type == ARTPEC6_CRYPTO_CIPHER_AES_XTS)
1703 cipher_klen = ctx->key_length/2;
1704 else
1705 cipher_klen = ctx->key_length;
1706
1707 /* Metadata */
1708 switch (cipher_klen) {
1709 case 16:
1710 cipher_len = regk_crypto_key_128;
1711 break;
1712 case 24:
1713 cipher_len = regk_crypto_key_192;
1714 break;
1715 case 32:
1716 cipher_len = regk_crypto_key_256;
1717 break;
1718 default:
1719 pr_err("%s: Invalid key length %zu!\n",
1720 MODULE_NAME, ctx->key_length);
1721 return -EINVAL;
1722 }
1723
1724 switch (ctx->crypto_type) {
1725 case ARTPEC6_CRYPTO_CIPHER_AES_ECB:
1726 oper = regk_crypto_aes_ecb;
1727 cipher_decr = req_ctx->decrypt;
1728 break;
1729
1730 case ARTPEC6_CRYPTO_CIPHER_AES_CBC:
1731 oper = regk_crypto_aes_cbc;
1732 cipher_decr = req_ctx->decrypt;
1733 break;
1734
1735 case ARTPEC6_CRYPTO_CIPHER_AES_CTR:
1736 oper = regk_crypto_aes_ctr;
1737 cipher_decr = false;
1738 break;
1739
1740 case ARTPEC6_CRYPTO_CIPHER_AES_XTS:
1741 oper = regk_crypto_aes_xts;
1742 cipher_decr = req_ctx->decrypt;
1743
1744 if (variant == ARTPEC6_CRYPTO)
1745 req_ctx->cipher_md |= A6_CRY_MD_CIPHER_DSEQ;
1746 else
1747 req_ctx->cipher_md |= A7_CRY_MD_CIPHER_DSEQ;
1748 break;
1749
1750 default:
1751 pr_err("%s: Invalid cipher mode %d!\n",
1752 MODULE_NAME, ctx->crypto_type);
1753 return -EINVAL;
1754 }
1755
1756 if (variant == ARTPEC6_CRYPTO) {
1757 req_ctx->cipher_md |= FIELD_PREP(A6_CRY_MD_OPER, oper);
1758 req_ctx->cipher_md |= FIELD_PREP(A6_CRY_MD_CIPHER_LEN,
1759 cipher_len);
1760 if (cipher_decr)
1761 req_ctx->cipher_md |= A6_CRY_MD_CIPHER_DECR;
1762 } else {
1763 req_ctx->cipher_md |= FIELD_PREP(A7_CRY_MD_OPER, oper);
1764 req_ctx->cipher_md |= FIELD_PREP(A7_CRY_MD_CIPHER_LEN,
1765 cipher_len);
1766 if (cipher_decr)
1767 req_ctx->cipher_md |= A7_CRY_MD_CIPHER_DECR;
1768 }
1769
1770 ret = artpec6_crypto_setup_out_descr(common,
1771 &req_ctx->cipher_md,
1772 sizeof(req_ctx->cipher_md),
1773 false, false);
1774 if (ret)
1775 return ret;
1776
1777 ret = artpec6_crypto_setup_in_descr(common, ac->pad_buffer, 4, false);
1778 if (ret)
1779 return ret;
1780
1781 if (iv_len) {
1782 ret = artpec6_crypto_setup_out_descr(common, areq->iv, iv_len,
1783 false, false);
1784 if (ret)
1785 return ret;
1786 }
1787 /* Data out */
1788 artpec6_crypto_walk_init(&walk, areq->src);
1789 ret = artpec6_crypto_setup_sg_descrs_out(common, &walk, areq->cryptlen);
1790 if (ret)
1791 return ret;
1792
1793 /* Data in */
1794 artpec6_crypto_walk_init(&walk, areq->dst);
1795 ret = artpec6_crypto_setup_sg_descrs_in(common, &walk, areq->cryptlen);
1796 if (ret)
1797 return ret;
1798
1799 /* CTR-mode padding required by the HW. */
1800 if (ctx->crypto_type == ARTPEC6_CRYPTO_CIPHER_AES_CTR ||
1801 ctx->crypto_type == ARTPEC6_CRYPTO_CIPHER_AES_XTS) {
1802 size_t pad = ALIGN(areq->cryptlen, AES_BLOCK_SIZE) -
1803 areq->cryptlen;
1804
1805 if (pad) {
1806 ret = artpec6_crypto_setup_out_descr(common,
1807 ac->pad_buffer,
1808 pad, false, false);
1809 if (ret)
1810 return ret;
1811
1812 ret = artpec6_crypto_setup_in_descr(common,
1813 ac->pad_buffer, pad,
1814 false);
1815 if (ret)
1816 return ret;
1817 }
1818 }
1819
1820 ret = artpec6_crypto_terminate_out_descrs(common);
1821 if (ret)
1822 return ret;
1823
1824 ret = artpec6_crypto_terminate_in_descrs(common);
1825 if (ret)
1826 return ret;
1827
1828 return artpec6_crypto_dma_map_descs(common);
1829 }
1830
artpec6_crypto_prepare_aead(struct aead_request * areq)1831 static int artpec6_crypto_prepare_aead(struct aead_request *areq)
1832 {
1833 size_t count;
1834 int ret;
1835 size_t input_length;
1836 struct artpec6_cryptotfm_context *ctx = crypto_tfm_ctx(areq->base.tfm);
1837 struct artpec6_crypto_aead_req_ctx *req_ctx = aead_request_ctx(areq);
1838 struct crypto_aead *cipher = crypto_aead_reqtfm(areq);
1839 struct artpec6_crypto_req_common *common = &req_ctx->common;
1840 struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
1841 enum artpec6_crypto_variant variant = ac->variant;
1842 u32 md_cipher_len;
1843
1844 artpec6_crypto_init_dma_operation(common);
1845
1846 /* Key */
1847 if (variant == ARTPEC6_CRYPTO) {
1848 ctx->key_md = FIELD_PREP(A6_CRY_MD_OPER,
1849 a6_regk_crypto_dlkey);
1850 } else {
1851 ctx->key_md = FIELD_PREP(A7_CRY_MD_OPER,
1852 a7_regk_crypto_dlkey);
1853 }
1854 ret = artpec6_crypto_setup_out_descr(common, (void *)&ctx->key_md,
1855 sizeof(ctx->key_md), false, false);
1856 if (ret)
1857 return ret;
1858
1859 ret = artpec6_crypto_setup_out_descr(common, ctx->aes_key,
1860 ctx->key_length, true, false);
1861 if (ret)
1862 return ret;
1863
1864 req_ctx->cipher_md = 0;
1865
1866 switch (ctx->key_length) {
1867 case 16:
1868 md_cipher_len = regk_crypto_key_128;
1869 break;
1870 case 24:
1871 md_cipher_len = regk_crypto_key_192;
1872 break;
1873 case 32:
1874 md_cipher_len = regk_crypto_key_256;
1875 break;
1876 default:
1877 return -EINVAL;
1878 }
1879
1880 if (variant == ARTPEC6_CRYPTO) {
1881 req_ctx->cipher_md |= FIELD_PREP(A6_CRY_MD_OPER,
1882 regk_crypto_aes_gcm);
1883 req_ctx->cipher_md |= FIELD_PREP(A6_CRY_MD_CIPHER_LEN,
1884 md_cipher_len);
1885 if (req_ctx->decrypt)
1886 req_ctx->cipher_md |= A6_CRY_MD_CIPHER_DECR;
1887 } else {
1888 req_ctx->cipher_md |= FIELD_PREP(A7_CRY_MD_OPER,
1889 regk_crypto_aes_gcm);
1890 req_ctx->cipher_md |= FIELD_PREP(A7_CRY_MD_CIPHER_LEN,
1891 md_cipher_len);
1892 if (req_ctx->decrypt)
1893 req_ctx->cipher_md |= A7_CRY_MD_CIPHER_DECR;
1894 }
1895
1896 ret = artpec6_crypto_setup_out_descr(common,
1897 (void *) &req_ctx->cipher_md,
1898 sizeof(req_ctx->cipher_md), false,
1899 false);
1900 if (ret)
1901 return ret;
1902
1903 ret = artpec6_crypto_setup_in_descr(common, ac->pad_buffer, 4, false);
1904 if (ret)
1905 return ret;
1906
1907 /* For the decryption, cryptlen includes the tag. */
1908 input_length = areq->cryptlen;
1909 if (req_ctx->decrypt)
1910 input_length -= crypto_aead_authsize(cipher);
1911
1912 /* Prepare the context buffer */
1913 req_ctx->hw_ctx.aad_length_bits =
1914 __cpu_to_be64(8*areq->assoclen);
1915
1916 req_ctx->hw_ctx.text_length_bits =
1917 __cpu_to_be64(8*input_length);
1918
1919 memcpy(req_ctx->hw_ctx.J0, areq->iv, crypto_aead_ivsize(cipher));
1920 // The HW omits the initial increment of the counter field.
1921 memcpy(req_ctx->hw_ctx.J0 + GCM_AES_IV_SIZE, "\x00\x00\x00\x01", 4);
1922
1923 ret = artpec6_crypto_setup_out_descr(common, &req_ctx->hw_ctx,
1924 sizeof(struct artpec6_crypto_aead_hw_ctx), false, false);
1925 if (ret)
1926 return ret;
1927
1928 {
1929 struct artpec6_crypto_walk walk;
1930
1931 artpec6_crypto_walk_init(&walk, areq->src);
1932
1933 /* Associated data */
1934 count = areq->assoclen;
1935 ret = artpec6_crypto_setup_sg_descrs_out(common, &walk, count);
1936 if (ret)
1937 return ret;
1938
1939 if (!IS_ALIGNED(areq->assoclen, 16)) {
1940 size_t assoc_pad = 16 - (areq->assoclen % 16);
1941 /* The HW mandates zero padding here */
1942 ret = artpec6_crypto_setup_out_descr(common,
1943 ac->zero_buffer,
1944 assoc_pad, false,
1945 false);
1946 if (ret)
1947 return ret;
1948 }
1949
1950 /* Data to crypto */
1951 count = input_length;
1952 ret = artpec6_crypto_setup_sg_descrs_out(common, &walk, count);
1953 if (ret)
1954 return ret;
1955
1956 if (!IS_ALIGNED(input_length, 16)) {
1957 size_t crypto_pad = 16 - (input_length % 16);
1958 /* The HW mandates zero padding here */
1959 ret = artpec6_crypto_setup_out_descr(common,
1960 ac->zero_buffer,
1961 crypto_pad,
1962 false,
1963 false);
1964 if (ret)
1965 return ret;
1966 }
1967 }
1968
1969 /* Data from crypto */
1970 {
1971 struct artpec6_crypto_walk walk;
1972 size_t output_len = areq->cryptlen;
1973
1974 if (req_ctx->decrypt)
1975 output_len -= crypto_aead_authsize(cipher);
1976
1977 artpec6_crypto_walk_init(&walk, areq->dst);
1978
1979 /* skip associated data in the output */
1980 count = artpec6_crypto_walk_advance(&walk, areq->assoclen);
1981 if (count)
1982 return -EINVAL;
1983
1984 count = output_len;
1985 ret = artpec6_crypto_setup_sg_descrs_in(common, &walk, count);
1986 if (ret)
1987 return ret;
1988
1989 /* Put padding between the cryptotext and the auth tag */
1990 if (!IS_ALIGNED(output_len, 16)) {
1991 size_t crypto_pad = 16 - (output_len % 16);
1992
1993 ret = artpec6_crypto_setup_in_descr(common,
1994 ac->pad_buffer,
1995 crypto_pad, false);
1996 if (ret)
1997 return ret;
1998 }
1999
2000 /* The authentication tag shall follow immediately after
2001 * the output ciphertext. For decryption it is put in a context
2002 * buffer for later compare against the input tag.
2003 */
2004
2005 if (req_ctx->decrypt) {
2006 ret = artpec6_crypto_setup_in_descr(common,
2007 req_ctx->decryption_tag, AES_BLOCK_SIZE, false);
2008 if (ret)
2009 return ret;
2010
2011 } else {
2012 /* For encryption the requested tag size may be smaller
2013 * than the hardware's generated tag.
2014 */
2015 size_t authsize = crypto_aead_authsize(cipher);
2016
2017 ret = artpec6_crypto_setup_sg_descrs_in(common, &walk,
2018 authsize);
2019 if (ret)
2020 return ret;
2021
2022 if (authsize < AES_BLOCK_SIZE) {
2023 count = AES_BLOCK_SIZE - authsize;
2024 ret = artpec6_crypto_setup_in_descr(common,
2025 ac->pad_buffer,
2026 count, false);
2027 if (ret)
2028 return ret;
2029 }
2030 }
2031
2032 }
2033
2034 ret = artpec6_crypto_terminate_in_descrs(common);
2035 if (ret)
2036 return ret;
2037
2038 ret = artpec6_crypto_terminate_out_descrs(common);
2039 if (ret)
2040 return ret;
2041
2042 return artpec6_crypto_dma_map_descs(common);
2043 }
2044
artpec6_crypto_process_queue(struct artpec6_crypto * ac,struct list_head * completions)2045 static void artpec6_crypto_process_queue(struct artpec6_crypto *ac,
2046 struct list_head *completions)
2047 {
2048 struct artpec6_crypto_req_common *req;
2049
2050 while (!list_empty(&ac->queue) && !artpec6_crypto_busy()) {
2051 req = list_first_entry(&ac->queue,
2052 struct artpec6_crypto_req_common,
2053 list);
2054 list_move_tail(&req->list, &ac->pending);
2055 artpec6_crypto_start_dma(req);
2056
2057 list_add_tail(&req->complete_in_progress, completions);
2058 }
2059
2060 /*
2061 * In some cases, the hardware can raise an in_eop_flush interrupt
2062 * before actually updating the status, so we have an timer which will
2063 * recheck the status on timeout. Since the cases are expected to be
2064 * very rare, we use a relatively large timeout value. There should be
2065 * no noticeable negative effect if we timeout spuriously.
2066 */
2067 if (ac->pending_count)
2068 mod_timer(&ac->timer, jiffies + msecs_to_jiffies(100));
2069 else
2070 del_timer(&ac->timer);
2071 }
2072
artpec6_crypto_timeout(struct timer_list * t)2073 static void artpec6_crypto_timeout(struct timer_list *t)
2074 {
2075 struct artpec6_crypto *ac = from_timer(ac, t, timer);
2076
2077 dev_info_ratelimited(artpec6_crypto_dev, "timeout\n");
2078
2079 tasklet_schedule(&ac->task);
2080 }
2081
artpec6_crypto_task(unsigned long data)2082 static void artpec6_crypto_task(unsigned long data)
2083 {
2084 struct artpec6_crypto *ac = (struct artpec6_crypto *)data;
2085 struct artpec6_crypto_req_common *req;
2086 struct artpec6_crypto_req_common *n;
2087 struct list_head complete_done;
2088 struct list_head complete_in_progress;
2089
2090 INIT_LIST_HEAD(&complete_done);
2091 INIT_LIST_HEAD(&complete_in_progress);
2092
2093 if (list_empty(&ac->pending)) {
2094 pr_debug("Spurious IRQ\n");
2095 return;
2096 }
2097
2098 spin_lock(&ac->queue_lock);
2099
2100 list_for_each_entry_safe(req, n, &ac->pending, list) {
2101 struct artpec6_crypto_dma_descriptors *dma = req->dma;
2102 u32 stat;
2103 dma_addr_t stataddr;
2104
2105 stataddr = dma->stat_dma_addr + 4 * (req->dma->in_cnt - 1);
2106 dma_sync_single_for_cpu(artpec6_crypto_dev,
2107 stataddr,
2108 4,
2109 DMA_BIDIRECTIONAL);
2110
2111 stat = req->dma->stat[req->dma->in_cnt-1];
2112
2113 /* A non-zero final status descriptor indicates
2114 * this job has finished.
2115 */
2116 pr_debug("Request %p status is %X\n", req, stat);
2117 if (!stat)
2118 break;
2119
2120 /* Allow testing of timeout handling with fault injection */
2121 #ifdef CONFIG_FAULT_INJECTION
2122 if (should_fail(&artpec6_crypto_fail_status_read, 1))
2123 continue;
2124 #endif
2125
2126 pr_debug("Completing request %p\n", req);
2127
2128 list_move_tail(&req->list, &complete_done);
2129
2130 ac->pending_count--;
2131 }
2132
2133 artpec6_crypto_process_queue(ac, &complete_in_progress);
2134
2135 spin_unlock(&ac->queue_lock);
2136
2137 /* Perform the completion callbacks without holding the queue lock
2138 * to allow new request submissions from the callbacks.
2139 */
2140 list_for_each_entry_safe(req, n, &complete_done, list) {
2141 artpec6_crypto_dma_unmap_all(req);
2142 artpec6_crypto_copy_bounce_buffers(req);
2143 artpec6_crypto_common_destroy(req);
2144
2145 req->complete(req->req);
2146 }
2147
2148 list_for_each_entry_safe(req, n, &complete_in_progress,
2149 complete_in_progress) {
2150 crypto_request_complete(req->req, -EINPROGRESS);
2151 }
2152 }
2153
artpec6_crypto_complete_crypto(struct crypto_async_request * req)2154 static void artpec6_crypto_complete_crypto(struct crypto_async_request *req)
2155 {
2156 crypto_request_complete(req, 0);
2157 }
2158
2159 static void
artpec6_crypto_complete_cbc_decrypt(struct crypto_async_request * req)2160 artpec6_crypto_complete_cbc_decrypt(struct crypto_async_request *req)
2161 {
2162 struct skcipher_request *cipher_req = container_of(req,
2163 struct skcipher_request, base);
2164
2165 scatterwalk_map_and_copy(cipher_req->iv, cipher_req->src,
2166 cipher_req->cryptlen - AES_BLOCK_SIZE,
2167 AES_BLOCK_SIZE, 0);
2168 skcipher_request_complete(cipher_req, 0);
2169 }
2170
2171 static void
artpec6_crypto_complete_cbc_encrypt(struct crypto_async_request * req)2172 artpec6_crypto_complete_cbc_encrypt(struct crypto_async_request *req)
2173 {
2174 struct skcipher_request *cipher_req = container_of(req,
2175 struct skcipher_request, base);
2176
2177 scatterwalk_map_and_copy(cipher_req->iv, cipher_req->dst,
2178 cipher_req->cryptlen - AES_BLOCK_SIZE,
2179 AES_BLOCK_SIZE, 0);
2180 skcipher_request_complete(cipher_req, 0);
2181 }
2182
artpec6_crypto_complete_aead(struct crypto_async_request * req)2183 static void artpec6_crypto_complete_aead(struct crypto_async_request *req)
2184 {
2185 int result = 0;
2186
2187 /* Verify GCM hashtag. */
2188 struct aead_request *areq = container_of(req,
2189 struct aead_request, base);
2190 struct crypto_aead *aead = crypto_aead_reqtfm(areq);
2191 struct artpec6_crypto_aead_req_ctx *req_ctx = aead_request_ctx(areq);
2192
2193 if (req_ctx->decrypt) {
2194 u8 input_tag[AES_BLOCK_SIZE];
2195 unsigned int authsize = crypto_aead_authsize(aead);
2196
2197 sg_pcopy_to_buffer(areq->src,
2198 sg_nents(areq->src),
2199 input_tag,
2200 authsize,
2201 areq->assoclen + areq->cryptlen -
2202 authsize);
2203
2204 if (crypto_memneq(req_ctx->decryption_tag,
2205 input_tag,
2206 authsize)) {
2207 pr_debug("***EBADMSG:\n");
2208 print_hex_dump_debug("ref:", DUMP_PREFIX_ADDRESS, 32, 1,
2209 input_tag, authsize, true);
2210 print_hex_dump_debug("out:", DUMP_PREFIX_ADDRESS, 32, 1,
2211 req_ctx->decryption_tag,
2212 authsize, true);
2213
2214 result = -EBADMSG;
2215 }
2216 }
2217
2218 aead_request_complete(areq, result);
2219 }
2220
artpec6_crypto_complete_hash(struct crypto_async_request * req)2221 static void artpec6_crypto_complete_hash(struct crypto_async_request *req)
2222 {
2223 crypto_request_complete(req, 0);
2224 }
2225
2226
2227 /*------------------- Hash functions -----------------------------------------*/
2228 static int
artpec6_crypto_hash_set_key(struct crypto_ahash * tfm,const u8 * key,unsigned int keylen)2229 artpec6_crypto_hash_set_key(struct crypto_ahash *tfm,
2230 const u8 *key, unsigned int keylen)
2231 {
2232 struct artpec6_hashalg_context *tfm_ctx = crypto_tfm_ctx(&tfm->base);
2233 size_t blocksize;
2234 int ret;
2235
2236 if (!keylen) {
2237 pr_err("Invalid length (%d) of HMAC key\n",
2238 keylen);
2239 return -EINVAL;
2240 }
2241
2242 memset(tfm_ctx->hmac_key, 0, sizeof(tfm_ctx->hmac_key));
2243
2244 blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
2245
2246 if (keylen > blocksize) {
2247 tfm_ctx->hmac_key_length = blocksize;
2248
2249 ret = crypto_shash_tfm_digest(tfm_ctx->child_hash, key, keylen,
2250 tfm_ctx->hmac_key);
2251 if (ret)
2252 return ret;
2253 } else {
2254 memcpy(tfm_ctx->hmac_key, key, keylen);
2255 tfm_ctx->hmac_key_length = keylen;
2256 }
2257
2258 return 0;
2259 }
2260
2261 static int
artpec6_crypto_init_hash(struct ahash_request * req,u8 type,int hmac)2262 artpec6_crypto_init_hash(struct ahash_request *req, u8 type, int hmac)
2263 {
2264 struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
2265 enum artpec6_crypto_variant variant = ac->variant;
2266 struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
2267 u32 oper;
2268
2269 memset(req_ctx, 0, sizeof(*req_ctx));
2270
2271 req_ctx->hash_flags = HASH_FLAG_INIT_CTX;
2272 if (hmac)
2273 req_ctx->hash_flags |= (HASH_FLAG_HMAC | HASH_FLAG_UPDATE_KEY);
2274
2275 switch (type) {
2276 case ARTPEC6_CRYPTO_HASH_SHA1:
2277 oper = hmac ? regk_crypto_hmac_sha1 : regk_crypto_sha1;
2278 break;
2279 case ARTPEC6_CRYPTO_HASH_SHA256:
2280 oper = hmac ? regk_crypto_hmac_sha256 : regk_crypto_sha256;
2281 break;
2282 default:
2283 pr_err("%s: Unsupported hash type 0x%x\n", MODULE_NAME, type);
2284 return -EINVAL;
2285 }
2286
2287 if (variant == ARTPEC6_CRYPTO)
2288 req_ctx->hash_md = FIELD_PREP(A6_CRY_MD_OPER, oper);
2289 else
2290 req_ctx->hash_md = FIELD_PREP(A7_CRY_MD_OPER, oper);
2291
2292 return 0;
2293 }
2294
artpec6_crypto_prepare_submit_hash(struct ahash_request * req)2295 static int artpec6_crypto_prepare_submit_hash(struct ahash_request *req)
2296 {
2297 struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
2298 int ret;
2299
2300 if (!req_ctx->common.dma) {
2301 ret = artpec6_crypto_common_init(&req_ctx->common,
2302 &req->base,
2303 artpec6_crypto_complete_hash,
2304 NULL, 0);
2305
2306 if (ret)
2307 return ret;
2308 }
2309
2310 ret = artpec6_crypto_prepare_hash(req);
2311 switch (ret) {
2312 case ARTPEC6_CRYPTO_PREPARE_HASH_START:
2313 ret = artpec6_crypto_submit(&req_ctx->common);
2314 break;
2315
2316 case ARTPEC6_CRYPTO_PREPARE_HASH_NO_START:
2317 ret = 0;
2318 fallthrough;
2319
2320 default:
2321 artpec6_crypto_common_destroy(&req_ctx->common);
2322 break;
2323 }
2324
2325 return ret;
2326 }
2327
artpec6_crypto_hash_final(struct ahash_request * req)2328 static int artpec6_crypto_hash_final(struct ahash_request *req)
2329 {
2330 struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
2331
2332 req_ctx->hash_flags |= HASH_FLAG_FINALIZE;
2333
2334 return artpec6_crypto_prepare_submit_hash(req);
2335 }
2336
artpec6_crypto_hash_update(struct ahash_request * req)2337 static int artpec6_crypto_hash_update(struct ahash_request *req)
2338 {
2339 struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
2340
2341 req_ctx->hash_flags |= HASH_FLAG_UPDATE;
2342
2343 return artpec6_crypto_prepare_submit_hash(req);
2344 }
2345
artpec6_crypto_sha1_init(struct ahash_request * req)2346 static int artpec6_crypto_sha1_init(struct ahash_request *req)
2347 {
2348 return artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA1, 0);
2349 }
2350
artpec6_crypto_sha1_digest(struct ahash_request * req)2351 static int artpec6_crypto_sha1_digest(struct ahash_request *req)
2352 {
2353 struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
2354
2355 artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA1, 0);
2356
2357 req_ctx->hash_flags |= HASH_FLAG_UPDATE | HASH_FLAG_FINALIZE;
2358
2359 return artpec6_crypto_prepare_submit_hash(req);
2360 }
2361
artpec6_crypto_sha256_init(struct ahash_request * req)2362 static int artpec6_crypto_sha256_init(struct ahash_request *req)
2363 {
2364 return artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA256, 0);
2365 }
2366
artpec6_crypto_sha256_digest(struct ahash_request * req)2367 static int artpec6_crypto_sha256_digest(struct ahash_request *req)
2368 {
2369 struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
2370
2371 artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA256, 0);
2372 req_ctx->hash_flags |= HASH_FLAG_UPDATE | HASH_FLAG_FINALIZE;
2373
2374 return artpec6_crypto_prepare_submit_hash(req);
2375 }
2376
artpec6_crypto_hmac_sha256_init(struct ahash_request * req)2377 static int artpec6_crypto_hmac_sha256_init(struct ahash_request *req)
2378 {
2379 return artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA256, 1);
2380 }
2381
artpec6_crypto_hmac_sha256_digest(struct ahash_request * req)2382 static int artpec6_crypto_hmac_sha256_digest(struct ahash_request *req)
2383 {
2384 struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
2385
2386 artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA256, 1);
2387 req_ctx->hash_flags |= HASH_FLAG_UPDATE | HASH_FLAG_FINALIZE;
2388
2389 return artpec6_crypto_prepare_submit_hash(req);
2390 }
2391
artpec6_crypto_ahash_init_common(struct crypto_tfm * tfm,const char * base_hash_name)2392 static int artpec6_crypto_ahash_init_common(struct crypto_tfm *tfm,
2393 const char *base_hash_name)
2394 {
2395 struct artpec6_hashalg_context *tfm_ctx = crypto_tfm_ctx(tfm);
2396
2397 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
2398 sizeof(struct artpec6_hash_request_context));
2399 memset(tfm_ctx, 0, sizeof(*tfm_ctx));
2400
2401 if (base_hash_name) {
2402 struct crypto_shash *child;
2403
2404 child = crypto_alloc_shash(base_hash_name, 0,
2405 CRYPTO_ALG_NEED_FALLBACK);
2406
2407 if (IS_ERR(child))
2408 return PTR_ERR(child);
2409
2410 tfm_ctx->child_hash = child;
2411 }
2412
2413 return 0;
2414 }
2415
artpec6_crypto_ahash_init(struct crypto_tfm * tfm)2416 static int artpec6_crypto_ahash_init(struct crypto_tfm *tfm)
2417 {
2418 return artpec6_crypto_ahash_init_common(tfm, NULL);
2419 }
2420
artpec6_crypto_ahash_init_hmac_sha256(struct crypto_tfm * tfm)2421 static int artpec6_crypto_ahash_init_hmac_sha256(struct crypto_tfm *tfm)
2422 {
2423 return artpec6_crypto_ahash_init_common(tfm, "sha256");
2424 }
2425
artpec6_crypto_ahash_exit(struct crypto_tfm * tfm)2426 static void artpec6_crypto_ahash_exit(struct crypto_tfm *tfm)
2427 {
2428 struct artpec6_hashalg_context *tfm_ctx = crypto_tfm_ctx(tfm);
2429
2430 if (tfm_ctx->child_hash)
2431 crypto_free_shash(tfm_ctx->child_hash);
2432
2433 memset(tfm_ctx->hmac_key, 0, sizeof(tfm_ctx->hmac_key));
2434 tfm_ctx->hmac_key_length = 0;
2435 }
2436
artpec6_crypto_hash_export(struct ahash_request * req,void * out)2437 static int artpec6_crypto_hash_export(struct ahash_request *req, void *out)
2438 {
2439 const struct artpec6_hash_request_context *ctx = ahash_request_ctx(req);
2440 struct artpec6_hash_export_state *state = out;
2441 struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
2442 enum artpec6_crypto_variant variant = ac->variant;
2443
2444 BUILD_BUG_ON(sizeof(state->partial_buffer) !=
2445 sizeof(ctx->partial_buffer));
2446 BUILD_BUG_ON(sizeof(state->digeststate) != sizeof(ctx->digeststate));
2447
2448 state->digcnt = ctx->digcnt;
2449 state->partial_bytes = ctx->partial_bytes;
2450 state->hash_flags = ctx->hash_flags;
2451
2452 if (variant == ARTPEC6_CRYPTO)
2453 state->oper = FIELD_GET(A6_CRY_MD_OPER, ctx->hash_md);
2454 else
2455 state->oper = FIELD_GET(A7_CRY_MD_OPER, ctx->hash_md);
2456
2457 memcpy(state->partial_buffer, ctx->partial_buffer,
2458 sizeof(state->partial_buffer));
2459 memcpy(state->digeststate, ctx->digeststate,
2460 sizeof(state->digeststate));
2461
2462 return 0;
2463 }
2464
artpec6_crypto_hash_import(struct ahash_request * req,const void * in)2465 static int artpec6_crypto_hash_import(struct ahash_request *req, const void *in)
2466 {
2467 struct artpec6_hash_request_context *ctx = ahash_request_ctx(req);
2468 const struct artpec6_hash_export_state *state = in;
2469 struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
2470 enum artpec6_crypto_variant variant = ac->variant;
2471
2472 memset(ctx, 0, sizeof(*ctx));
2473
2474 ctx->digcnt = state->digcnt;
2475 ctx->partial_bytes = state->partial_bytes;
2476 ctx->hash_flags = state->hash_flags;
2477
2478 if (variant == ARTPEC6_CRYPTO)
2479 ctx->hash_md = FIELD_PREP(A6_CRY_MD_OPER, state->oper);
2480 else
2481 ctx->hash_md = FIELD_PREP(A7_CRY_MD_OPER, state->oper);
2482
2483 memcpy(ctx->partial_buffer, state->partial_buffer,
2484 sizeof(state->partial_buffer));
2485 memcpy(ctx->digeststate, state->digeststate,
2486 sizeof(state->digeststate));
2487
2488 return 0;
2489 }
2490
init_crypto_hw(struct artpec6_crypto * ac)2491 static int init_crypto_hw(struct artpec6_crypto *ac)
2492 {
2493 enum artpec6_crypto_variant variant = ac->variant;
2494 void __iomem *base = ac->base;
2495 u32 out_descr_buf_size;
2496 u32 out_data_buf_size;
2497 u32 in_data_buf_size;
2498 u32 in_descr_buf_size;
2499 u32 in_stat_buf_size;
2500 u32 in, out;
2501
2502 /*
2503 * The PDMA unit contains 1984 bytes of internal memory for the OUT
2504 * channels and 1024 bytes for the IN channel. This is an elastic
2505 * memory used to internally store the descriptors and data. The values
2506 * ares specified in 64 byte incremements. Trustzone buffers are not
2507 * used at this stage.
2508 */
2509 out_data_buf_size = 16; /* 1024 bytes for data */
2510 out_descr_buf_size = 15; /* 960 bytes for descriptors */
2511 in_data_buf_size = 8; /* 512 bytes for data */
2512 in_descr_buf_size = 4; /* 256 bytes for descriptors */
2513 in_stat_buf_size = 4; /* 256 bytes for stat descrs */
2514
2515 BUILD_BUG_ON_MSG((out_data_buf_size
2516 + out_descr_buf_size) * 64 > 1984,
2517 "Invalid OUT configuration");
2518
2519 BUILD_BUG_ON_MSG((in_data_buf_size
2520 + in_descr_buf_size
2521 + in_stat_buf_size) * 64 > 1024,
2522 "Invalid IN configuration");
2523
2524 in = FIELD_PREP(PDMA_IN_BUF_CFG_DATA_BUF_SIZE, in_data_buf_size) |
2525 FIELD_PREP(PDMA_IN_BUF_CFG_DESCR_BUF_SIZE, in_descr_buf_size) |
2526 FIELD_PREP(PDMA_IN_BUF_CFG_STAT_BUF_SIZE, in_stat_buf_size);
2527
2528 out = FIELD_PREP(PDMA_OUT_BUF_CFG_DATA_BUF_SIZE, out_data_buf_size) |
2529 FIELD_PREP(PDMA_OUT_BUF_CFG_DESCR_BUF_SIZE, out_descr_buf_size);
2530
2531 writel_relaxed(out, base + PDMA_OUT_BUF_CFG);
2532 writel_relaxed(PDMA_OUT_CFG_EN, base + PDMA_OUT_CFG);
2533
2534 if (variant == ARTPEC6_CRYPTO) {
2535 writel_relaxed(in, base + A6_PDMA_IN_BUF_CFG);
2536 writel_relaxed(PDMA_IN_CFG_EN, base + A6_PDMA_IN_CFG);
2537 writel_relaxed(A6_PDMA_INTR_MASK_IN_DATA |
2538 A6_PDMA_INTR_MASK_IN_EOP_FLUSH,
2539 base + A6_PDMA_INTR_MASK);
2540 } else {
2541 writel_relaxed(in, base + A7_PDMA_IN_BUF_CFG);
2542 writel_relaxed(PDMA_IN_CFG_EN, base + A7_PDMA_IN_CFG);
2543 writel_relaxed(A7_PDMA_INTR_MASK_IN_DATA |
2544 A7_PDMA_INTR_MASK_IN_EOP_FLUSH,
2545 base + A7_PDMA_INTR_MASK);
2546 }
2547
2548 return 0;
2549 }
2550
artpec6_crypto_disable_hw(struct artpec6_crypto * ac)2551 static void artpec6_crypto_disable_hw(struct artpec6_crypto *ac)
2552 {
2553 enum artpec6_crypto_variant variant = ac->variant;
2554 void __iomem *base = ac->base;
2555
2556 if (variant == ARTPEC6_CRYPTO) {
2557 writel_relaxed(A6_PDMA_IN_CMD_STOP, base + A6_PDMA_IN_CMD);
2558 writel_relaxed(0, base + A6_PDMA_IN_CFG);
2559 writel_relaxed(A6_PDMA_OUT_CMD_STOP, base + PDMA_OUT_CMD);
2560 } else {
2561 writel_relaxed(A7_PDMA_IN_CMD_STOP, base + A7_PDMA_IN_CMD);
2562 writel_relaxed(0, base + A7_PDMA_IN_CFG);
2563 writel_relaxed(A7_PDMA_OUT_CMD_STOP, base + PDMA_OUT_CMD);
2564 }
2565
2566 writel_relaxed(0, base + PDMA_OUT_CFG);
2567
2568 }
2569
artpec6_crypto_irq(int irq,void * dev_id)2570 static irqreturn_t artpec6_crypto_irq(int irq, void *dev_id)
2571 {
2572 struct artpec6_crypto *ac = dev_id;
2573 enum artpec6_crypto_variant variant = ac->variant;
2574 void __iomem *base = ac->base;
2575 u32 mask_in_data, mask_in_eop_flush;
2576 u32 in_cmd_flush_stat, in_cmd_reg;
2577 u32 ack_intr_reg;
2578 u32 ack = 0;
2579 u32 intr;
2580
2581 if (variant == ARTPEC6_CRYPTO) {
2582 intr = readl_relaxed(base + A6_PDMA_MASKED_INTR);
2583 mask_in_data = A6_PDMA_INTR_MASK_IN_DATA;
2584 mask_in_eop_flush = A6_PDMA_INTR_MASK_IN_EOP_FLUSH;
2585 in_cmd_flush_stat = A6_PDMA_IN_CMD_FLUSH_STAT;
2586 in_cmd_reg = A6_PDMA_IN_CMD;
2587 ack_intr_reg = A6_PDMA_ACK_INTR;
2588 } else {
2589 intr = readl_relaxed(base + A7_PDMA_MASKED_INTR);
2590 mask_in_data = A7_PDMA_INTR_MASK_IN_DATA;
2591 mask_in_eop_flush = A7_PDMA_INTR_MASK_IN_EOP_FLUSH;
2592 in_cmd_flush_stat = A7_PDMA_IN_CMD_FLUSH_STAT;
2593 in_cmd_reg = A7_PDMA_IN_CMD;
2594 ack_intr_reg = A7_PDMA_ACK_INTR;
2595 }
2596
2597 /* We get two interrupt notifications from each job.
2598 * The in_data means all data was sent to memory and then
2599 * we request a status flush command to write the per-job
2600 * status to its status vector. This ensures that the
2601 * tasklet can detect exactly how many submitted jobs
2602 * that have finished.
2603 */
2604 if (intr & mask_in_data)
2605 ack |= mask_in_data;
2606
2607 if (intr & mask_in_eop_flush)
2608 ack |= mask_in_eop_flush;
2609 else
2610 writel_relaxed(in_cmd_flush_stat, base + in_cmd_reg);
2611
2612 writel_relaxed(ack, base + ack_intr_reg);
2613
2614 if (intr & mask_in_eop_flush)
2615 tasklet_schedule(&ac->task);
2616
2617 return IRQ_HANDLED;
2618 }
2619
2620 /*------------------- Algorithm definitions ----------------------------------*/
2621
2622 /* Hashes */
2623 static struct ahash_alg hash_algos[] = {
2624 /* SHA-1 */
2625 {
2626 .init = artpec6_crypto_sha1_init,
2627 .update = artpec6_crypto_hash_update,
2628 .final = artpec6_crypto_hash_final,
2629 .digest = artpec6_crypto_sha1_digest,
2630 .import = artpec6_crypto_hash_import,
2631 .export = artpec6_crypto_hash_export,
2632 .halg.digestsize = SHA1_DIGEST_SIZE,
2633 .halg.statesize = sizeof(struct artpec6_hash_export_state),
2634 .halg.base = {
2635 .cra_name = "sha1",
2636 .cra_driver_name = "artpec-sha1",
2637 .cra_priority = 300,
2638 .cra_flags = CRYPTO_ALG_ASYNC |
2639 CRYPTO_ALG_ALLOCATES_MEMORY,
2640 .cra_blocksize = SHA1_BLOCK_SIZE,
2641 .cra_ctxsize = sizeof(struct artpec6_hashalg_context),
2642 .cra_module = THIS_MODULE,
2643 .cra_init = artpec6_crypto_ahash_init,
2644 .cra_exit = artpec6_crypto_ahash_exit,
2645 }
2646 },
2647 /* SHA-256 */
2648 {
2649 .init = artpec6_crypto_sha256_init,
2650 .update = artpec6_crypto_hash_update,
2651 .final = artpec6_crypto_hash_final,
2652 .digest = artpec6_crypto_sha256_digest,
2653 .import = artpec6_crypto_hash_import,
2654 .export = artpec6_crypto_hash_export,
2655 .halg.digestsize = SHA256_DIGEST_SIZE,
2656 .halg.statesize = sizeof(struct artpec6_hash_export_state),
2657 .halg.base = {
2658 .cra_name = "sha256",
2659 .cra_driver_name = "artpec-sha256",
2660 .cra_priority = 300,
2661 .cra_flags = CRYPTO_ALG_ASYNC |
2662 CRYPTO_ALG_ALLOCATES_MEMORY,
2663 .cra_blocksize = SHA256_BLOCK_SIZE,
2664 .cra_ctxsize = sizeof(struct artpec6_hashalg_context),
2665 .cra_module = THIS_MODULE,
2666 .cra_init = artpec6_crypto_ahash_init,
2667 .cra_exit = artpec6_crypto_ahash_exit,
2668 }
2669 },
2670 /* HMAC SHA-256 */
2671 {
2672 .init = artpec6_crypto_hmac_sha256_init,
2673 .update = artpec6_crypto_hash_update,
2674 .final = artpec6_crypto_hash_final,
2675 .digest = artpec6_crypto_hmac_sha256_digest,
2676 .import = artpec6_crypto_hash_import,
2677 .export = artpec6_crypto_hash_export,
2678 .setkey = artpec6_crypto_hash_set_key,
2679 .halg.digestsize = SHA256_DIGEST_SIZE,
2680 .halg.statesize = sizeof(struct artpec6_hash_export_state),
2681 .halg.base = {
2682 .cra_name = "hmac(sha256)",
2683 .cra_driver_name = "artpec-hmac-sha256",
2684 .cra_priority = 300,
2685 .cra_flags = CRYPTO_ALG_ASYNC |
2686 CRYPTO_ALG_ALLOCATES_MEMORY,
2687 .cra_blocksize = SHA256_BLOCK_SIZE,
2688 .cra_ctxsize = sizeof(struct artpec6_hashalg_context),
2689 .cra_module = THIS_MODULE,
2690 .cra_init = artpec6_crypto_ahash_init_hmac_sha256,
2691 .cra_exit = artpec6_crypto_ahash_exit,
2692 }
2693 },
2694 };
2695
2696 /* Crypto */
2697 static struct skcipher_alg crypto_algos[] = {
2698 /* AES - ECB */
2699 {
2700 .base = {
2701 .cra_name = "ecb(aes)",
2702 .cra_driver_name = "artpec6-ecb-aes",
2703 .cra_priority = 300,
2704 .cra_flags = CRYPTO_ALG_ASYNC |
2705 CRYPTO_ALG_ALLOCATES_MEMORY,
2706 .cra_blocksize = AES_BLOCK_SIZE,
2707 .cra_ctxsize = sizeof(struct artpec6_cryptotfm_context),
2708 .cra_alignmask = 3,
2709 .cra_module = THIS_MODULE,
2710 },
2711 .min_keysize = AES_MIN_KEY_SIZE,
2712 .max_keysize = AES_MAX_KEY_SIZE,
2713 .setkey = artpec6_crypto_cipher_set_key,
2714 .encrypt = artpec6_crypto_encrypt,
2715 .decrypt = artpec6_crypto_decrypt,
2716 .init = artpec6_crypto_aes_ecb_init,
2717 .exit = artpec6_crypto_aes_exit,
2718 },
2719 /* AES - CTR */
2720 {
2721 .base = {
2722 .cra_name = "ctr(aes)",
2723 .cra_driver_name = "artpec6-ctr-aes",
2724 .cra_priority = 300,
2725 .cra_flags = CRYPTO_ALG_ASYNC |
2726 CRYPTO_ALG_ALLOCATES_MEMORY |
2727 CRYPTO_ALG_NEED_FALLBACK,
2728 .cra_blocksize = 1,
2729 .cra_ctxsize = sizeof(struct artpec6_cryptotfm_context),
2730 .cra_alignmask = 3,
2731 .cra_module = THIS_MODULE,
2732 },
2733 .min_keysize = AES_MIN_KEY_SIZE,
2734 .max_keysize = AES_MAX_KEY_SIZE,
2735 .ivsize = AES_BLOCK_SIZE,
2736 .setkey = artpec6_crypto_cipher_set_key,
2737 .encrypt = artpec6_crypto_ctr_encrypt,
2738 .decrypt = artpec6_crypto_ctr_decrypt,
2739 .init = artpec6_crypto_aes_ctr_init,
2740 .exit = artpec6_crypto_aes_ctr_exit,
2741 },
2742 /* AES - CBC */
2743 {
2744 .base = {
2745 .cra_name = "cbc(aes)",
2746 .cra_driver_name = "artpec6-cbc-aes",
2747 .cra_priority = 300,
2748 .cra_flags = CRYPTO_ALG_ASYNC |
2749 CRYPTO_ALG_ALLOCATES_MEMORY,
2750 .cra_blocksize = AES_BLOCK_SIZE,
2751 .cra_ctxsize = sizeof(struct artpec6_cryptotfm_context),
2752 .cra_alignmask = 3,
2753 .cra_module = THIS_MODULE,
2754 },
2755 .min_keysize = AES_MIN_KEY_SIZE,
2756 .max_keysize = AES_MAX_KEY_SIZE,
2757 .ivsize = AES_BLOCK_SIZE,
2758 .setkey = artpec6_crypto_cipher_set_key,
2759 .encrypt = artpec6_crypto_encrypt,
2760 .decrypt = artpec6_crypto_decrypt,
2761 .init = artpec6_crypto_aes_cbc_init,
2762 .exit = artpec6_crypto_aes_exit
2763 },
2764 /* AES - XTS */
2765 {
2766 .base = {
2767 .cra_name = "xts(aes)",
2768 .cra_driver_name = "artpec6-xts-aes",
2769 .cra_priority = 300,
2770 .cra_flags = CRYPTO_ALG_ASYNC |
2771 CRYPTO_ALG_ALLOCATES_MEMORY,
2772 .cra_blocksize = 1,
2773 .cra_ctxsize = sizeof(struct artpec6_cryptotfm_context),
2774 .cra_alignmask = 3,
2775 .cra_module = THIS_MODULE,
2776 },
2777 .min_keysize = 2*AES_MIN_KEY_SIZE,
2778 .max_keysize = 2*AES_MAX_KEY_SIZE,
2779 .ivsize = 16,
2780 .setkey = artpec6_crypto_xts_set_key,
2781 .encrypt = artpec6_crypto_encrypt,
2782 .decrypt = artpec6_crypto_decrypt,
2783 .init = artpec6_crypto_aes_xts_init,
2784 .exit = artpec6_crypto_aes_exit,
2785 },
2786 };
2787
2788 static struct aead_alg aead_algos[] = {
2789 {
2790 .init = artpec6_crypto_aead_init,
2791 .setkey = artpec6_crypto_aead_set_key,
2792 .encrypt = artpec6_crypto_aead_encrypt,
2793 .decrypt = artpec6_crypto_aead_decrypt,
2794 .ivsize = GCM_AES_IV_SIZE,
2795 .maxauthsize = AES_BLOCK_SIZE,
2796
2797 .base = {
2798 .cra_name = "gcm(aes)",
2799 .cra_driver_name = "artpec-gcm-aes",
2800 .cra_priority = 300,
2801 .cra_flags = CRYPTO_ALG_ASYNC |
2802 CRYPTO_ALG_ALLOCATES_MEMORY |
2803 CRYPTO_ALG_KERN_DRIVER_ONLY,
2804 .cra_blocksize = 1,
2805 .cra_ctxsize = sizeof(struct artpec6_cryptotfm_context),
2806 .cra_alignmask = 3,
2807 .cra_module = THIS_MODULE,
2808 },
2809 }
2810 };
2811
2812 #ifdef CONFIG_DEBUG_FS
2813
2814 static struct dentry *dbgfs_root;
2815
artpec6_crypto_init_debugfs(void)2816 static void artpec6_crypto_init_debugfs(void)
2817 {
2818 dbgfs_root = debugfs_create_dir("artpec6_crypto", NULL);
2819
2820 #ifdef CONFIG_FAULT_INJECTION
2821 fault_create_debugfs_attr("fail_status_read", dbgfs_root,
2822 &artpec6_crypto_fail_status_read);
2823
2824 fault_create_debugfs_attr("fail_dma_array_full", dbgfs_root,
2825 &artpec6_crypto_fail_dma_array_full);
2826 #endif
2827 }
2828
artpec6_crypto_free_debugfs(void)2829 static void artpec6_crypto_free_debugfs(void)
2830 {
2831 debugfs_remove_recursive(dbgfs_root);
2832 dbgfs_root = NULL;
2833 }
2834 #endif
2835
2836 static const struct of_device_id artpec6_crypto_of_match[] = {
2837 { .compatible = "axis,artpec6-crypto", .data = (void *)ARTPEC6_CRYPTO },
2838 { .compatible = "axis,artpec7-crypto", .data = (void *)ARTPEC7_CRYPTO },
2839 {}
2840 };
2841 MODULE_DEVICE_TABLE(of, artpec6_crypto_of_match);
2842
artpec6_crypto_probe(struct platform_device * pdev)2843 static int artpec6_crypto_probe(struct platform_device *pdev)
2844 {
2845 const struct of_device_id *match;
2846 enum artpec6_crypto_variant variant;
2847 struct artpec6_crypto *ac;
2848 struct device *dev = &pdev->dev;
2849 void __iomem *base;
2850 int irq;
2851 int err;
2852
2853 if (artpec6_crypto_dev)
2854 return -ENODEV;
2855
2856 match = of_match_node(artpec6_crypto_of_match, dev->of_node);
2857 if (!match)
2858 return -EINVAL;
2859
2860 variant = (enum artpec6_crypto_variant)match->data;
2861
2862 base = devm_platform_ioremap_resource(pdev, 0);
2863 if (IS_ERR(base))
2864 return PTR_ERR(base);
2865
2866 irq = platform_get_irq(pdev, 0);
2867 if (irq < 0)
2868 return -ENODEV;
2869
2870 ac = devm_kzalloc(&pdev->dev, sizeof(struct artpec6_crypto),
2871 GFP_KERNEL);
2872 if (!ac)
2873 return -ENOMEM;
2874
2875 platform_set_drvdata(pdev, ac);
2876 ac->variant = variant;
2877
2878 spin_lock_init(&ac->queue_lock);
2879 INIT_LIST_HEAD(&ac->queue);
2880 INIT_LIST_HEAD(&ac->pending);
2881 timer_setup(&ac->timer, artpec6_crypto_timeout, 0);
2882
2883 ac->base = base;
2884
2885 ac->dma_cache = kmem_cache_create("artpec6_crypto_dma",
2886 sizeof(struct artpec6_crypto_dma_descriptors),
2887 64,
2888 0,
2889 NULL);
2890 if (!ac->dma_cache)
2891 return -ENOMEM;
2892
2893 #ifdef CONFIG_DEBUG_FS
2894 artpec6_crypto_init_debugfs();
2895 #endif
2896
2897 tasklet_init(&ac->task, artpec6_crypto_task,
2898 (unsigned long)ac);
2899
2900 ac->pad_buffer = devm_kzalloc(&pdev->dev, 2 * ARTPEC_CACHE_LINE_MAX,
2901 GFP_KERNEL);
2902 if (!ac->pad_buffer)
2903 return -ENOMEM;
2904 ac->pad_buffer = PTR_ALIGN(ac->pad_buffer, ARTPEC_CACHE_LINE_MAX);
2905
2906 ac->zero_buffer = devm_kzalloc(&pdev->dev, 2 * ARTPEC_CACHE_LINE_MAX,
2907 GFP_KERNEL);
2908 if (!ac->zero_buffer)
2909 return -ENOMEM;
2910 ac->zero_buffer = PTR_ALIGN(ac->zero_buffer, ARTPEC_CACHE_LINE_MAX);
2911
2912 err = init_crypto_hw(ac);
2913 if (err)
2914 goto free_cache;
2915
2916 err = devm_request_irq(&pdev->dev, irq, artpec6_crypto_irq, 0,
2917 "artpec6-crypto", ac);
2918 if (err)
2919 goto disable_hw;
2920
2921 artpec6_crypto_dev = &pdev->dev;
2922
2923 err = crypto_register_ahashes(hash_algos, ARRAY_SIZE(hash_algos));
2924 if (err) {
2925 dev_err(dev, "Failed to register ahashes\n");
2926 goto disable_hw;
2927 }
2928
2929 err = crypto_register_skciphers(crypto_algos, ARRAY_SIZE(crypto_algos));
2930 if (err) {
2931 dev_err(dev, "Failed to register ciphers\n");
2932 goto unregister_ahashes;
2933 }
2934
2935 err = crypto_register_aeads(aead_algos, ARRAY_SIZE(aead_algos));
2936 if (err) {
2937 dev_err(dev, "Failed to register aeads\n");
2938 goto unregister_algs;
2939 }
2940
2941 return 0;
2942
2943 unregister_algs:
2944 crypto_unregister_skciphers(crypto_algos, ARRAY_SIZE(crypto_algos));
2945 unregister_ahashes:
2946 crypto_unregister_ahashes(hash_algos, ARRAY_SIZE(hash_algos));
2947 disable_hw:
2948 artpec6_crypto_disable_hw(ac);
2949 free_cache:
2950 kmem_cache_destroy(ac->dma_cache);
2951 return err;
2952 }
2953
artpec6_crypto_remove(struct platform_device * pdev)2954 static void artpec6_crypto_remove(struct platform_device *pdev)
2955 {
2956 struct artpec6_crypto *ac = platform_get_drvdata(pdev);
2957 int irq = platform_get_irq(pdev, 0);
2958
2959 crypto_unregister_ahashes(hash_algos, ARRAY_SIZE(hash_algos));
2960 crypto_unregister_skciphers(crypto_algos, ARRAY_SIZE(crypto_algos));
2961 crypto_unregister_aeads(aead_algos, ARRAY_SIZE(aead_algos));
2962
2963 tasklet_disable(&ac->task);
2964 devm_free_irq(&pdev->dev, irq, ac);
2965 tasklet_kill(&ac->task);
2966 del_timer_sync(&ac->timer);
2967
2968 artpec6_crypto_disable_hw(ac);
2969
2970 kmem_cache_destroy(ac->dma_cache);
2971 #ifdef CONFIG_DEBUG_FS
2972 artpec6_crypto_free_debugfs();
2973 #endif
2974 }
2975
2976 static struct platform_driver artpec6_crypto_driver = {
2977 .probe = artpec6_crypto_probe,
2978 .remove_new = artpec6_crypto_remove,
2979 .driver = {
2980 .name = "artpec6-crypto",
2981 .of_match_table = artpec6_crypto_of_match,
2982 },
2983 };
2984
2985 module_platform_driver(artpec6_crypto_driver);
2986
2987 MODULE_AUTHOR("Axis Communications AB");
2988 MODULE_DESCRIPTION("ARTPEC-6 Crypto driver");
2989 MODULE_LICENSE("GPL");
2990