xref: /linux/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-cipher.c (revision da1d9caf95def6f0320819cf941c9fd1069ba9e1)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * sun4i-ss-cipher.c - hardware cryptographic accelerator for Allwinner A20 SoC
4  *
5  * Copyright (C) 2013-2015 Corentin LABBE <clabbe.montjoie@gmail.com>
6  *
7  * This file add support for AES cipher with 128,192,256 bits
8  * keysize in CBC and ECB mode.
9  * Add support also for DES and 3DES in CBC and ECB mode.
10  *
11  * You could find the datasheet in Documentation/arm/sunxi.rst
12  */
13 #include "sun4i-ss.h"
14 
15 static int noinline_for_stack sun4i_ss_opti_poll(struct skcipher_request *areq)
16 {
17 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
18 	struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
19 	struct sun4i_ss_ctx *ss = op->ss;
20 	unsigned int ivsize = crypto_skcipher_ivsize(tfm);
21 	struct sun4i_cipher_req_ctx *ctx = skcipher_request_ctx(areq);
22 	u32 mode = ctx->mode;
23 	/* when activating SS, the default FIFO space is SS_RX_DEFAULT(32) */
24 	u32 rx_cnt = SS_RX_DEFAULT;
25 	u32 tx_cnt = 0;
26 	u32 spaces;
27 	u32 v;
28 	int err = 0;
29 	unsigned int i;
30 	unsigned int ileft = areq->cryptlen;
31 	unsigned int oleft = areq->cryptlen;
32 	unsigned int todo;
33 	unsigned long pi = 0, po = 0; /* progress for in and out */
34 	bool miter_err;
35 	struct sg_mapping_iter mi, mo;
36 	unsigned int oi, oo; /* offset for in and out */
37 	unsigned long flags;
38 	struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
39 	struct sun4i_ss_alg_template *algt;
40 
41 	if (!areq->cryptlen)
42 		return 0;
43 
44 	if (!areq->src || !areq->dst) {
45 		dev_err_ratelimited(ss->dev, "ERROR: Some SGs are NULL\n");
46 		return -EINVAL;
47 	}
48 
49 	if (areq->iv && ivsize > 0 && mode & SS_DECRYPTION) {
50 		scatterwalk_map_and_copy(ctx->backup_iv, areq->src,
51 					 areq->cryptlen - ivsize, ivsize, 0);
52 	}
53 
54 	if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN4I_SS_DEBUG)) {
55 		algt = container_of(alg, struct sun4i_ss_alg_template, alg.crypto);
56 		algt->stat_opti++;
57 		algt->stat_bytes += areq->cryptlen;
58 	}
59 
60 	spin_lock_irqsave(&ss->slock, flags);
61 
62 	for (i = 0; i < op->keylen / 4; i++)
63 		writesl(ss->base + SS_KEY0 + i * 4, &op->key[i], 1);
64 
65 	if (areq->iv) {
66 		for (i = 0; i < 4 && i < ivsize / 4; i++) {
67 			v = *(u32 *)(areq->iv + i * 4);
68 			writesl(ss->base + SS_IV0 + i * 4, &v, 1);
69 		}
70 	}
71 	writel(mode, ss->base + SS_CTL);
72 
73 
74 	ileft = areq->cryptlen / 4;
75 	oleft = areq->cryptlen / 4;
76 	oi = 0;
77 	oo = 0;
78 	do {
79 		if (ileft) {
80 			sg_miter_start(&mi, areq->src, sg_nents(areq->src),
81 					SG_MITER_FROM_SG | SG_MITER_ATOMIC);
82 			if (pi)
83 				sg_miter_skip(&mi, pi);
84 			miter_err = sg_miter_next(&mi);
85 			if (!miter_err || !mi.addr) {
86 				dev_err_ratelimited(ss->dev, "ERROR: sg_miter return null\n");
87 				err = -EINVAL;
88 				goto release_ss;
89 			}
90 			todo = min(rx_cnt, ileft);
91 			todo = min_t(size_t, todo, (mi.length - oi) / 4);
92 			if (todo) {
93 				ileft -= todo;
94 				writesl(ss->base + SS_RXFIFO, mi.addr + oi, todo);
95 				oi += todo * 4;
96 			}
97 			if (oi == mi.length) {
98 				pi += mi.length;
99 				oi = 0;
100 			}
101 			sg_miter_stop(&mi);
102 		}
103 
104 		spaces = readl(ss->base + SS_FCSR);
105 		rx_cnt = SS_RXFIFO_SPACES(spaces);
106 		tx_cnt = SS_TXFIFO_SPACES(spaces);
107 
108 		sg_miter_start(&mo, areq->dst, sg_nents(areq->dst),
109 			       SG_MITER_TO_SG | SG_MITER_ATOMIC);
110 		if (po)
111 			sg_miter_skip(&mo, po);
112 		miter_err = sg_miter_next(&mo);
113 		if (!miter_err || !mo.addr) {
114 			dev_err_ratelimited(ss->dev, "ERROR: sg_miter return null\n");
115 			err = -EINVAL;
116 			goto release_ss;
117 		}
118 		todo = min(tx_cnt, oleft);
119 		todo = min_t(size_t, todo, (mo.length - oo) / 4);
120 		if (todo) {
121 			oleft -= todo;
122 			readsl(ss->base + SS_TXFIFO, mo.addr + oo, todo);
123 			oo += todo * 4;
124 		}
125 		if (oo == mo.length) {
126 			oo = 0;
127 			po += mo.length;
128 		}
129 		sg_miter_stop(&mo);
130 	} while (oleft);
131 
132 	if (areq->iv) {
133 		if (mode & SS_DECRYPTION) {
134 			memcpy(areq->iv, ctx->backup_iv, ivsize);
135 			memzero_explicit(ctx->backup_iv, ivsize);
136 		} else {
137 			scatterwalk_map_and_copy(areq->iv, areq->dst, areq->cryptlen - ivsize,
138 						 ivsize, 0);
139 		}
140 	}
141 
142 release_ss:
143 	writel(0, ss->base + SS_CTL);
144 	spin_unlock_irqrestore(&ss->slock, flags);
145 	return err;
146 }
147 
148 static int noinline_for_stack sun4i_ss_cipher_poll_fallback(struct skcipher_request *areq)
149 {
150 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
151 	struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
152 	struct sun4i_cipher_req_ctx *ctx = skcipher_request_ctx(areq);
153 	int err;
154 	struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
155 	struct sun4i_ss_alg_template *algt;
156 
157 	if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN4I_SS_DEBUG)) {
158 		algt = container_of(alg, struct sun4i_ss_alg_template, alg.crypto);
159 		algt->stat_fb++;
160 	}
161 
162 	skcipher_request_set_tfm(&ctx->fallback_req, op->fallback_tfm);
163 	skcipher_request_set_callback(&ctx->fallback_req, areq->base.flags,
164 				      areq->base.complete, areq->base.data);
165 	skcipher_request_set_crypt(&ctx->fallback_req, areq->src, areq->dst,
166 				   areq->cryptlen, areq->iv);
167 	if (ctx->mode & SS_DECRYPTION)
168 		err = crypto_skcipher_decrypt(&ctx->fallback_req);
169 	else
170 		err = crypto_skcipher_encrypt(&ctx->fallback_req);
171 
172 	return err;
173 }
174 
175 /* Generic function that support SG with size not multiple of 4 */
176 static int sun4i_ss_cipher_poll(struct skcipher_request *areq)
177 {
178 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
179 	struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
180 	struct sun4i_ss_ctx *ss = op->ss;
181 	int no_chunk = 1;
182 	struct scatterlist *in_sg = areq->src;
183 	struct scatterlist *out_sg = areq->dst;
184 	unsigned int ivsize = crypto_skcipher_ivsize(tfm);
185 	struct sun4i_cipher_req_ctx *ctx = skcipher_request_ctx(areq);
186 	struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
187 	struct sun4i_ss_alg_template *algt;
188 	u32 mode = ctx->mode;
189 	/* when activating SS, the default FIFO space is SS_RX_DEFAULT(32) */
190 	u32 rx_cnt = SS_RX_DEFAULT;
191 	u32 tx_cnt = 0;
192 	u32 v;
193 	u32 spaces;
194 	int err = 0;
195 	unsigned int i;
196 	unsigned int ileft = areq->cryptlen;
197 	unsigned int oleft = areq->cryptlen;
198 	unsigned int todo;
199 	struct sg_mapping_iter mi, mo;
200 	unsigned long pi = 0, po = 0; /* progress for in and out */
201 	bool miter_err;
202 	unsigned int oi, oo;	/* offset for in and out */
203 	unsigned int ob = 0;	/* offset in buf */
204 	unsigned int obo = 0;	/* offset in bufo*/
205 	unsigned int obl = 0;	/* length of data in bufo */
206 	unsigned long flags;
207 	bool need_fallback = false;
208 
209 	if (!areq->cryptlen)
210 		return 0;
211 
212 	if (!areq->src || !areq->dst) {
213 		dev_err_ratelimited(ss->dev, "ERROR: Some SGs are NULL\n");
214 		return -EINVAL;
215 	}
216 
217 	algt = container_of(alg, struct sun4i_ss_alg_template, alg.crypto);
218 	if (areq->cryptlen % algt->alg.crypto.base.cra_blocksize)
219 		need_fallback = true;
220 
221 	/*
222 	 * if we have only SGs with size multiple of 4,
223 	 * we can use the SS optimized function
224 	 */
225 	while (in_sg && no_chunk == 1) {
226 		if ((in_sg->length | in_sg->offset) & 3u)
227 			no_chunk = 0;
228 		in_sg = sg_next(in_sg);
229 	}
230 	while (out_sg && no_chunk == 1) {
231 		if ((out_sg->length | out_sg->offset) & 3u)
232 			no_chunk = 0;
233 		out_sg = sg_next(out_sg);
234 	}
235 
236 	if (no_chunk == 1 && !need_fallback)
237 		return sun4i_ss_opti_poll(areq);
238 
239 	if (need_fallback)
240 		return sun4i_ss_cipher_poll_fallback(areq);
241 
242 	if (areq->iv && ivsize > 0 && mode & SS_DECRYPTION) {
243 		scatterwalk_map_and_copy(ctx->backup_iv, areq->src,
244 					 areq->cryptlen - ivsize, ivsize, 0);
245 	}
246 
247 	if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN4I_SS_DEBUG)) {
248 		algt->stat_req++;
249 		algt->stat_bytes += areq->cryptlen;
250 	}
251 
252 	spin_lock_irqsave(&ss->slock, flags);
253 
254 	for (i = 0; i < op->keylen / 4; i++)
255 		writesl(ss->base + SS_KEY0 + i * 4, &op->key[i], 1);
256 
257 	if (areq->iv) {
258 		for (i = 0; i < 4 && i < ivsize / 4; i++) {
259 			v = *(u32 *)(areq->iv + i * 4);
260 			writesl(ss->base + SS_IV0 + i * 4, &v, 1);
261 		}
262 	}
263 	writel(mode, ss->base + SS_CTL);
264 
265 	ileft = areq->cryptlen;
266 	oleft = areq->cryptlen;
267 	oi = 0;
268 	oo = 0;
269 
270 	while (oleft) {
271 		if (ileft) {
272 			sg_miter_start(&mi, areq->src, sg_nents(areq->src),
273 				       SG_MITER_FROM_SG | SG_MITER_ATOMIC);
274 			if (pi)
275 				sg_miter_skip(&mi, pi);
276 			miter_err = sg_miter_next(&mi);
277 			if (!miter_err || !mi.addr) {
278 				dev_err_ratelimited(ss->dev, "ERROR: sg_miter return null\n");
279 				err = -EINVAL;
280 				goto release_ss;
281 			}
282 			/*
283 			 * todo is the number of consecutive 4byte word that we
284 			 * can read from current SG
285 			 */
286 			todo = min(rx_cnt, ileft / 4);
287 			todo = min_t(size_t, todo, (mi.length - oi) / 4);
288 			if (todo && !ob) {
289 				writesl(ss->base + SS_RXFIFO, mi.addr + oi,
290 					todo);
291 				ileft -= todo * 4;
292 				oi += todo * 4;
293 			} else {
294 				/*
295 				 * not enough consecutive bytes, so we need to
296 				 * linearize in buf. todo is in bytes
297 				 * After that copy, if we have a multiple of 4
298 				 * we need to be able to write all buf in one
299 				 * pass, so it is why we min() with rx_cnt
300 				 */
301 				todo = min(rx_cnt * 4 - ob, ileft);
302 				todo = min_t(size_t, todo, mi.length - oi);
303 				memcpy(ss->buf + ob, mi.addr + oi, todo);
304 				ileft -= todo;
305 				oi += todo;
306 				ob += todo;
307 				if (!(ob % 4)) {
308 					writesl(ss->base + SS_RXFIFO, ss->buf,
309 						ob / 4);
310 					ob = 0;
311 				}
312 			}
313 			if (oi == mi.length) {
314 				pi += mi.length;
315 				oi = 0;
316 			}
317 			sg_miter_stop(&mi);
318 		}
319 
320 		spaces = readl(ss->base + SS_FCSR);
321 		rx_cnt = SS_RXFIFO_SPACES(spaces);
322 		tx_cnt = SS_TXFIFO_SPACES(spaces);
323 
324 		if (!tx_cnt)
325 			continue;
326 		sg_miter_start(&mo, areq->dst, sg_nents(areq->dst),
327 			       SG_MITER_TO_SG | SG_MITER_ATOMIC);
328 		if (po)
329 			sg_miter_skip(&mo, po);
330 		miter_err = sg_miter_next(&mo);
331 		if (!miter_err || !mo.addr) {
332 			dev_err_ratelimited(ss->dev, "ERROR: sg_miter return null\n");
333 			err = -EINVAL;
334 			goto release_ss;
335 		}
336 		/* todo in 4bytes word */
337 		todo = min(tx_cnt, oleft / 4);
338 		todo = min_t(size_t, todo, (mo.length - oo) / 4);
339 
340 		if (todo) {
341 			readsl(ss->base + SS_TXFIFO, mo.addr + oo, todo);
342 			oleft -= todo * 4;
343 			oo += todo * 4;
344 			if (oo == mo.length) {
345 				po += mo.length;
346 				oo = 0;
347 			}
348 		} else {
349 			/*
350 			 * read obl bytes in bufo, we read at maximum for
351 			 * emptying the device
352 			 */
353 			readsl(ss->base + SS_TXFIFO, ss->bufo, tx_cnt);
354 			obl = tx_cnt * 4;
355 			obo = 0;
356 			do {
357 				/*
358 				 * how many bytes we can copy ?
359 				 * no more than remaining SG size
360 				 * no more than remaining buffer
361 				 * no need to test against oleft
362 				 */
363 				todo = min_t(size_t,
364 					     mo.length - oo, obl - obo);
365 				memcpy(mo.addr + oo, ss->bufo + obo, todo);
366 				oleft -= todo;
367 				obo += todo;
368 				oo += todo;
369 				if (oo == mo.length) {
370 					po += mo.length;
371 					sg_miter_next(&mo);
372 					oo = 0;
373 				}
374 			} while (obo < obl);
375 			/* bufo must be fully used here */
376 		}
377 		sg_miter_stop(&mo);
378 	}
379 	if (areq->iv) {
380 		if (mode & SS_DECRYPTION) {
381 			memcpy(areq->iv, ctx->backup_iv, ivsize);
382 			memzero_explicit(ctx->backup_iv, ivsize);
383 		} else {
384 			scatterwalk_map_and_copy(areq->iv, areq->dst, areq->cryptlen - ivsize,
385 						 ivsize, 0);
386 		}
387 	}
388 
389 release_ss:
390 	writel(0, ss->base + SS_CTL);
391 	spin_unlock_irqrestore(&ss->slock, flags);
392 
393 	return err;
394 }
395 
396 /* CBC AES */
397 int sun4i_ss_cbc_aes_encrypt(struct skcipher_request *areq)
398 {
399 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
400 	struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
401 	struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
402 
403 	rctx->mode = SS_OP_AES | SS_CBC | SS_ENABLED | SS_ENCRYPTION |
404 		op->keymode;
405 	return sun4i_ss_cipher_poll(areq);
406 }
407 
408 int sun4i_ss_cbc_aes_decrypt(struct skcipher_request *areq)
409 {
410 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
411 	struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
412 	struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
413 
414 	rctx->mode = SS_OP_AES | SS_CBC | SS_ENABLED | SS_DECRYPTION |
415 		op->keymode;
416 	return sun4i_ss_cipher_poll(areq);
417 }
418 
419 /* ECB AES */
420 int sun4i_ss_ecb_aes_encrypt(struct skcipher_request *areq)
421 {
422 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
423 	struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
424 	struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
425 
426 	rctx->mode = SS_OP_AES | SS_ECB | SS_ENABLED | SS_ENCRYPTION |
427 		op->keymode;
428 	return sun4i_ss_cipher_poll(areq);
429 }
430 
431 int sun4i_ss_ecb_aes_decrypt(struct skcipher_request *areq)
432 {
433 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
434 	struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
435 	struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
436 
437 	rctx->mode = SS_OP_AES | SS_ECB | SS_ENABLED | SS_DECRYPTION |
438 		op->keymode;
439 	return sun4i_ss_cipher_poll(areq);
440 }
441 
442 /* CBC DES */
443 int sun4i_ss_cbc_des_encrypt(struct skcipher_request *areq)
444 {
445 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
446 	struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
447 	struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
448 
449 	rctx->mode = SS_OP_DES | SS_CBC | SS_ENABLED | SS_ENCRYPTION |
450 		op->keymode;
451 	return sun4i_ss_cipher_poll(areq);
452 }
453 
454 int sun4i_ss_cbc_des_decrypt(struct skcipher_request *areq)
455 {
456 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
457 	struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
458 	struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
459 
460 	rctx->mode = SS_OP_DES | SS_CBC | SS_ENABLED | SS_DECRYPTION |
461 		op->keymode;
462 	return sun4i_ss_cipher_poll(areq);
463 }
464 
465 /* ECB DES */
466 int sun4i_ss_ecb_des_encrypt(struct skcipher_request *areq)
467 {
468 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
469 	struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
470 	struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
471 
472 	rctx->mode = SS_OP_DES | SS_ECB | SS_ENABLED | SS_ENCRYPTION |
473 		op->keymode;
474 	return sun4i_ss_cipher_poll(areq);
475 }
476 
477 int sun4i_ss_ecb_des_decrypt(struct skcipher_request *areq)
478 {
479 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
480 	struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
481 	struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
482 
483 	rctx->mode = SS_OP_DES | SS_ECB | SS_ENABLED | SS_DECRYPTION |
484 		op->keymode;
485 	return sun4i_ss_cipher_poll(areq);
486 }
487 
488 /* CBC 3DES */
489 int sun4i_ss_cbc_des3_encrypt(struct skcipher_request *areq)
490 {
491 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
492 	struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
493 	struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
494 
495 	rctx->mode = SS_OP_3DES | SS_CBC | SS_ENABLED | SS_ENCRYPTION |
496 		op->keymode;
497 	return sun4i_ss_cipher_poll(areq);
498 }
499 
500 int sun4i_ss_cbc_des3_decrypt(struct skcipher_request *areq)
501 {
502 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
503 	struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
504 	struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
505 
506 	rctx->mode = SS_OP_3DES | SS_CBC | SS_ENABLED | SS_DECRYPTION |
507 		op->keymode;
508 	return sun4i_ss_cipher_poll(areq);
509 }
510 
511 /* ECB 3DES */
512 int sun4i_ss_ecb_des3_encrypt(struct skcipher_request *areq)
513 {
514 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
515 	struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
516 	struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
517 
518 	rctx->mode = SS_OP_3DES | SS_ECB | SS_ENABLED | SS_ENCRYPTION |
519 		op->keymode;
520 	return sun4i_ss_cipher_poll(areq);
521 }
522 
523 int sun4i_ss_ecb_des3_decrypt(struct skcipher_request *areq)
524 {
525 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
526 	struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
527 	struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
528 
529 	rctx->mode = SS_OP_3DES | SS_ECB | SS_ENABLED | SS_DECRYPTION |
530 		op->keymode;
531 	return sun4i_ss_cipher_poll(areq);
532 }
533 
534 int sun4i_ss_cipher_init(struct crypto_tfm *tfm)
535 {
536 	struct sun4i_tfm_ctx *op = crypto_tfm_ctx(tfm);
537 	struct sun4i_ss_alg_template *algt;
538 	const char *name = crypto_tfm_alg_name(tfm);
539 	int err;
540 
541 	memset(op, 0, sizeof(struct sun4i_tfm_ctx));
542 
543 	algt = container_of(tfm->__crt_alg, struct sun4i_ss_alg_template,
544 			    alg.crypto.base);
545 	op->ss = algt->ss;
546 
547 	op->fallback_tfm = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
548 	if (IS_ERR(op->fallback_tfm)) {
549 		dev_err(op->ss->dev, "ERROR: Cannot allocate fallback for %s %ld\n",
550 			name, PTR_ERR(op->fallback_tfm));
551 		return PTR_ERR(op->fallback_tfm);
552 	}
553 
554 	crypto_skcipher_set_reqsize(__crypto_skcipher_cast(tfm),
555 				    sizeof(struct sun4i_cipher_req_ctx) +
556 				    crypto_skcipher_reqsize(op->fallback_tfm));
557 
558 	err = pm_runtime_resume_and_get(op->ss->dev);
559 	if (err < 0)
560 		goto error_pm;
561 
562 	return 0;
563 error_pm:
564 	crypto_free_skcipher(op->fallback_tfm);
565 	return err;
566 }
567 
568 void sun4i_ss_cipher_exit(struct crypto_tfm *tfm)
569 {
570 	struct sun4i_tfm_ctx *op = crypto_tfm_ctx(tfm);
571 
572 	crypto_free_skcipher(op->fallback_tfm);
573 	pm_runtime_put(op->ss->dev);
574 }
575 
576 /* check and set the AES key, prepare the mode to be used */
577 int sun4i_ss_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
578 			unsigned int keylen)
579 {
580 	struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
581 	struct sun4i_ss_ctx *ss = op->ss;
582 
583 	switch (keylen) {
584 	case 128 / 8:
585 		op->keymode = SS_AES_128BITS;
586 		break;
587 	case 192 / 8:
588 		op->keymode = SS_AES_192BITS;
589 		break;
590 	case 256 / 8:
591 		op->keymode = SS_AES_256BITS;
592 		break;
593 	default:
594 		dev_dbg(ss->dev, "ERROR: Invalid keylen %u\n", keylen);
595 		return -EINVAL;
596 	}
597 	op->keylen = keylen;
598 	memcpy(op->key, key, keylen);
599 
600 	crypto_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK);
601 	crypto_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
602 
603 	return crypto_skcipher_setkey(op->fallback_tfm, key, keylen);
604 }
605 
606 /* check and set the DES key, prepare the mode to be used */
607 int sun4i_ss_des_setkey(struct crypto_skcipher *tfm, const u8 *key,
608 			unsigned int keylen)
609 {
610 	struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
611 	int err;
612 
613 	err = verify_skcipher_des_key(tfm, key);
614 	if (err)
615 		return err;
616 
617 	op->keylen = keylen;
618 	memcpy(op->key, key, keylen);
619 
620 	crypto_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK);
621 	crypto_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
622 
623 	return crypto_skcipher_setkey(op->fallback_tfm, key, keylen);
624 }
625 
626 /* check and set the 3DES key, prepare the mode to be used */
627 int sun4i_ss_des3_setkey(struct crypto_skcipher *tfm, const u8 *key,
628 			 unsigned int keylen)
629 {
630 	struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
631 	int err;
632 
633 	err = verify_skcipher_des3_key(tfm, key);
634 	if (err)
635 		return err;
636 
637 	op->keylen = keylen;
638 	memcpy(op->key, key, keylen);
639 
640 	crypto_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK);
641 	crypto_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
642 
643 	return crypto_skcipher_setkey(op->fallback_tfm, key, keylen);
644 }
645