xref: /linux/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c (revision 1c07425e902cd3137961c3d45b4271bf8a9b8eb9)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * sun8i-ss-cipher.c - hardware cryptographic offloader for
4  * Allwinner A80/A83T SoC
5  *
6  * Copyright (C) 2016-2019 Corentin LABBE <clabbe.montjoie@gmail.com>
7  *
8  * This file add support for AES cipher with 128,192,256 bits keysize in
9  * CBC and ECB mode.
10  *
11  * You could find a link for the datasheet in Documentation/arm/sunxi.rst
12  */
13 
14 #include <linux/bottom_half.h>
15 #include <linux/crypto.h>
16 #include <linux/dma-mapping.h>
17 #include <linux/io.h>
18 #include <linux/pm_runtime.h>
19 #include <crypto/scatterwalk.h>
20 #include <crypto/internal/skcipher.h>
21 #include "sun8i-ss.h"
22 
23 static bool sun8i_ss_need_fallback(struct skcipher_request *areq)
24 {
25 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
26 	struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
27 	struct sun8i_ss_alg_template *algt = container_of(alg, struct sun8i_ss_alg_template, alg.skcipher);
28 	struct scatterlist *in_sg = areq->src;
29 	struct scatterlist *out_sg = areq->dst;
30 	struct scatterlist *sg;
31 	unsigned int todo, len;
32 
33 	if (areq->cryptlen == 0 || areq->cryptlen % 16) {
34 		algt->stat_fb_len++;
35 		return true;
36 	}
37 
38 	if (sg_nents_for_len(areq->src, areq->cryptlen) > 8 ||
39 		sg_nents_for_len(areq->dst, areq->cryptlen) > 8) {
40 		algt->stat_fb_sgnum++;
41 		return true;
42 	}
43 
44 	len = areq->cryptlen;
45 	sg = areq->src;
46 	while (sg) {
47 		todo = min(len, sg->length);
48 		if ((todo % 16) != 0) {
49 			algt->stat_fb_sglen++;
50 			return true;
51 		}
52 		if (!IS_ALIGNED(sg->offset, 16)) {
53 			algt->stat_fb_align++;
54 			return true;
55 		}
56 		len -= todo;
57 		sg = sg_next(sg);
58 	}
59 	len = areq->cryptlen;
60 	sg = areq->dst;
61 	while (sg) {
62 		todo = min(len, sg->length);
63 		if ((todo % 16) != 0) {
64 			algt->stat_fb_sglen++;
65 			return true;
66 		}
67 		if (!IS_ALIGNED(sg->offset, 16)) {
68 			algt->stat_fb_align++;
69 			return true;
70 		}
71 		len -= todo;
72 		sg = sg_next(sg);
73 	}
74 
75 	/* SS need same numbers of SG (with same length) for source and destination */
76 	in_sg = areq->src;
77 	out_sg = areq->dst;
78 	while (in_sg && out_sg) {
79 		if (in_sg->length != out_sg->length)
80 			return true;
81 		in_sg = sg_next(in_sg);
82 		out_sg = sg_next(out_sg);
83 	}
84 	if (in_sg || out_sg)
85 		return true;
86 	return false;
87 }
88 
89 static int sun8i_ss_cipher_fallback(struct skcipher_request *areq)
90 {
91 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
92 	struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
93 	struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
94 	int err;
95 
96 #ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
97 	struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
98 	struct sun8i_ss_alg_template *algt;
99 
100 	algt = container_of(alg, struct sun8i_ss_alg_template, alg.skcipher);
101 	algt->stat_fb++;
102 #endif
103 	skcipher_request_set_tfm(&rctx->fallback_req, op->fallback_tfm);
104 	skcipher_request_set_callback(&rctx->fallback_req, areq->base.flags,
105 				      areq->base.complete, areq->base.data);
106 	skcipher_request_set_crypt(&rctx->fallback_req, areq->src, areq->dst,
107 				   areq->cryptlen, areq->iv);
108 	if (rctx->op_dir & SS_DECRYPTION)
109 		err = crypto_skcipher_decrypt(&rctx->fallback_req);
110 	else
111 		err = crypto_skcipher_encrypt(&rctx->fallback_req);
112 	return err;
113 }
114 
115 static int sun8i_ss_setup_ivs(struct skcipher_request *areq)
116 {
117 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
118 	struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
119 	struct sun8i_ss_dev *ss = op->ss;
120 	struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
121 	struct scatterlist *sg = areq->src;
122 	unsigned int todo, offset;
123 	unsigned int len = areq->cryptlen;
124 	unsigned int ivsize = crypto_skcipher_ivsize(tfm);
125 	struct sun8i_ss_flow *sf = &ss->flows[rctx->flow];
126 	int i = 0;
127 	dma_addr_t a;
128 	int err;
129 
130 	rctx->ivlen = ivsize;
131 	if (rctx->op_dir & SS_DECRYPTION) {
132 		offset = areq->cryptlen - ivsize;
133 		scatterwalk_map_and_copy(sf->biv, areq->src, offset,
134 					 ivsize, 0);
135 	}
136 
137 	/* we need to copy all IVs from source in case DMA is bi-directionnal */
138 	while (sg && len) {
139 		if (sg_dma_len(sg) == 0) {
140 			sg = sg_next(sg);
141 			continue;
142 		}
143 		if (i == 0)
144 			memcpy(sf->iv[0], areq->iv, ivsize);
145 		a = dma_map_single(ss->dev, sf->iv[i], ivsize, DMA_TO_DEVICE);
146 		if (dma_mapping_error(ss->dev, a)) {
147 			memzero_explicit(sf->iv[i], ivsize);
148 			dev_err(ss->dev, "Cannot DMA MAP IV\n");
149 			err = -EFAULT;
150 			goto dma_iv_error;
151 		}
152 		rctx->p_iv[i] = a;
153 		/* we need to setup all others IVs only in the decrypt way */
154 		if (rctx->op_dir & SS_ENCRYPTION)
155 			return 0;
156 		todo = min(len, sg_dma_len(sg));
157 		len -= todo;
158 		i++;
159 		if (i < MAX_SG) {
160 			offset = sg->length - ivsize;
161 			scatterwalk_map_and_copy(sf->iv[i], sg, offset, ivsize, 0);
162 		}
163 		rctx->niv = i;
164 		sg = sg_next(sg);
165 	}
166 
167 	return 0;
168 dma_iv_error:
169 	i--;
170 	while (i >= 0) {
171 		dma_unmap_single(ss->dev, rctx->p_iv[i], ivsize, DMA_TO_DEVICE);
172 		memzero_explicit(sf->iv[i], ivsize);
173 		i--;
174 	}
175 	return err;
176 }
177 
178 static int sun8i_ss_cipher(struct skcipher_request *areq)
179 {
180 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
181 	struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
182 	struct sun8i_ss_dev *ss = op->ss;
183 	struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
184 	struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
185 	struct sun8i_ss_alg_template *algt;
186 	struct sun8i_ss_flow *sf = &ss->flows[rctx->flow];
187 	struct scatterlist *sg;
188 	unsigned int todo, len, offset, ivsize;
189 	int nr_sgs = 0;
190 	int nr_sgd = 0;
191 	int err = 0;
192 	int nsgs = sg_nents_for_len(areq->src, areq->cryptlen);
193 	int nsgd = sg_nents_for_len(areq->dst, areq->cryptlen);
194 	int i;
195 
196 	algt = container_of(alg, struct sun8i_ss_alg_template, alg.skcipher);
197 
198 	dev_dbg(ss->dev, "%s %s %u %x IV(%p %u) key=%u\n", __func__,
199 		crypto_tfm_alg_name(areq->base.tfm),
200 		areq->cryptlen,
201 		rctx->op_dir, areq->iv, crypto_skcipher_ivsize(tfm),
202 		op->keylen);
203 
204 #ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
205 	algt->stat_req++;
206 #endif
207 
208 	rctx->op_mode = ss->variant->op_mode[algt->ss_blockmode];
209 	rctx->method = ss->variant->alg_cipher[algt->ss_algo_id];
210 	rctx->keylen = op->keylen;
211 
212 	rctx->p_key = dma_map_single(ss->dev, op->key, op->keylen, DMA_TO_DEVICE);
213 	if (dma_mapping_error(ss->dev, rctx->p_key)) {
214 		dev_err(ss->dev, "Cannot DMA MAP KEY\n");
215 		err = -EFAULT;
216 		goto theend;
217 	}
218 
219 	ivsize = crypto_skcipher_ivsize(tfm);
220 	if (areq->iv && crypto_skcipher_ivsize(tfm) > 0) {
221 		err = sun8i_ss_setup_ivs(areq);
222 		if (err)
223 			goto theend_key;
224 	}
225 	if (areq->src == areq->dst) {
226 		nr_sgs = dma_map_sg(ss->dev, areq->src, nsgs, DMA_BIDIRECTIONAL);
227 		if (nr_sgs <= 0 || nr_sgs > 8) {
228 			dev_err(ss->dev, "Invalid sg number %d\n", nr_sgs);
229 			err = -EINVAL;
230 			goto theend_iv;
231 		}
232 		nr_sgd = nr_sgs;
233 	} else {
234 		nr_sgs = dma_map_sg(ss->dev, areq->src, nsgs, DMA_TO_DEVICE);
235 		if (nr_sgs <= 0 || nr_sgs > 8) {
236 			dev_err(ss->dev, "Invalid sg number %d\n", nr_sgs);
237 			err = -EINVAL;
238 			goto theend_iv;
239 		}
240 		nr_sgd = dma_map_sg(ss->dev, areq->dst, nsgd, DMA_FROM_DEVICE);
241 		if (nr_sgd <= 0 || nr_sgd > 8) {
242 			dev_err(ss->dev, "Invalid sg number %d\n", nr_sgd);
243 			err = -EINVAL;
244 			goto theend_sgs;
245 		}
246 	}
247 
248 	len = areq->cryptlen;
249 	i = 0;
250 	sg = areq->src;
251 	while (i < nr_sgs && sg && len) {
252 		if (sg_dma_len(sg) == 0)
253 			goto sgs_next;
254 		rctx->t_src[i].addr = sg_dma_address(sg);
255 		todo = min(len, sg_dma_len(sg));
256 		rctx->t_src[i].len = todo / 4;
257 		dev_dbg(ss->dev, "%s total=%u SGS(%d %u off=%d) todo=%u\n", __func__,
258 			areq->cryptlen, i, rctx->t_src[i].len, sg->offset, todo);
259 		len -= todo;
260 		i++;
261 sgs_next:
262 		sg = sg_next(sg);
263 	}
264 	if (len > 0) {
265 		dev_err(ss->dev, "remaining len %d\n", len);
266 		err = -EINVAL;
267 		goto theend_sgs;
268 	}
269 
270 	len = areq->cryptlen;
271 	i = 0;
272 	sg = areq->dst;
273 	while (i < nr_sgd && sg && len) {
274 		if (sg_dma_len(sg) == 0)
275 			goto sgd_next;
276 		rctx->t_dst[i].addr = sg_dma_address(sg);
277 		todo = min(len, sg_dma_len(sg));
278 		rctx->t_dst[i].len = todo / 4;
279 		dev_dbg(ss->dev, "%s total=%u SGD(%d %u off=%d) todo=%u\n", __func__,
280 			areq->cryptlen, i, rctx->t_dst[i].len, sg->offset, todo);
281 		len -= todo;
282 		i++;
283 sgd_next:
284 		sg = sg_next(sg);
285 	}
286 	if (len > 0) {
287 		dev_err(ss->dev, "remaining len %d\n", len);
288 		err = -EINVAL;
289 		goto theend_sgs;
290 	}
291 
292 	err = sun8i_ss_run_task(ss, rctx, crypto_tfm_alg_name(areq->base.tfm));
293 
294 theend_sgs:
295 	if (areq->src == areq->dst) {
296 		dma_unmap_sg(ss->dev, areq->src, nsgs, DMA_BIDIRECTIONAL);
297 	} else {
298 		dma_unmap_sg(ss->dev, areq->src, nsgs, DMA_TO_DEVICE);
299 		dma_unmap_sg(ss->dev, areq->dst, nsgd, DMA_FROM_DEVICE);
300 	}
301 
302 theend_iv:
303 	if (areq->iv && ivsize > 0) {
304 		for (i = 0; i < rctx->niv; i++) {
305 			dma_unmap_single(ss->dev, rctx->p_iv[i], ivsize, DMA_TO_DEVICE);
306 			memzero_explicit(sf->iv[i], ivsize);
307 		}
308 
309 		offset = areq->cryptlen - ivsize;
310 		if (rctx->op_dir & SS_DECRYPTION) {
311 			memcpy(areq->iv, sf->biv, ivsize);
312 			memzero_explicit(sf->biv, ivsize);
313 		} else {
314 			scatterwalk_map_and_copy(areq->iv, areq->dst, offset,
315 					ivsize, 0);
316 		}
317 	}
318 
319 theend_key:
320 	dma_unmap_single(ss->dev, rctx->p_key, op->keylen, DMA_TO_DEVICE);
321 
322 theend:
323 
324 	return err;
325 }
326 
327 static int sun8i_ss_handle_cipher_request(struct crypto_engine *engine, void *areq)
328 {
329 	int err;
330 	struct skcipher_request *breq = container_of(areq, struct skcipher_request, base);
331 
332 	err = sun8i_ss_cipher(breq);
333 	local_bh_disable();
334 	crypto_finalize_skcipher_request(engine, breq, err);
335 	local_bh_enable();
336 
337 	return 0;
338 }
339 
340 int sun8i_ss_skdecrypt(struct skcipher_request *areq)
341 {
342 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
343 	struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
344 	struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
345 	struct crypto_engine *engine;
346 	int e;
347 
348 	memset(rctx, 0, sizeof(struct sun8i_cipher_req_ctx));
349 	rctx->op_dir = SS_DECRYPTION;
350 
351 	if (sun8i_ss_need_fallback(areq))
352 		return sun8i_ss_cipher_fallback(areq);
353 
354 	e = sun8i_ss_get_engine_number(op->ss);
355 	engine = op->ss->flows[e].engine;
356 	rctx->flow = e;
357 
358 	return crypto_transfer_skcipher_request_to_engine(engine, areq);
359 }
360 
361 int sun8i_ss_skencrypt(struct skcipher_request *areq)
362 {
363 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
364 	struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
365 	struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
366 	struct crypto_engine *engine;
367 	int e;
368 
369 	memset(rctx, 0, sizeof(struct sun8i_cipher_req_ctx));
370 	rctx->op_dir = SS_ENCRYPTION;
371 
372 	if (sun8i_ss_need_fallback(areq))
373 		return sun8i_ss_cipher_fallback(areq);
374 
375 	e = sun8i_ss_get_engine_number(op->ss);
376 	engine = op->ss->flows[e].engine;
377 	rctx->flow = e;
378 
379 	return crypto_transfer_skcipher_request_to_engine(engine, areq);
380 }
381 
382 int sun8i_ss_cipher_init(struct crypto_tfm *tfm)
383 {
384 	struct sun8i_cipher_tfm_ctx *op = crypto_tfm_ctx(tfm);
385 	struct sun8i_ss_alg_template *algt;
386 	const char *name = crypto_tfm_alg_name(tfm);
387 	struct crypto_skcipher *sktfm = __crypto_skcipher_cast(tfm);
388 	struct skcipher_alg *alg = crypto_skcipher_alg(sktfm);
389 	int err;
390 
391 	memset(op, 0, sizeof(struct sun8i_cipher_tfm_ctx));
392 
393 	algt = container_of(alg, struct sun8i_ss_alg_template, alg.skcipher);
394 	op->ss = algt->ss;
395 
396 	op->fallback_tfm = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
397 	if (IS_ERR(op->fallback_tfm)) {
398 		dev_err(op->ss->dev, "ERROR: Cannot allocate fallback for %s %ld\n",
399 			name, PTR_ERR(op->fallback_tfm));
400 		return PTR_ERR(op->fallback_tfm);
401 	}
402 
403 	sktfm->reqsize = sizeof(struct sun8i_cipher_req_ctx) +
404 			 crypto_skcipher_reqsize(op->fallback_tfm);
405 
406 
407 	memcpy(algt->fbname,
408 	       crypto_tfm_alg_driver_name(crypto_skcipher_tfm(op->fallback_tfm)),
409 	       CRYPTO_MAX_ALG_NAME);
410 
411 	op->enginectx.op.do_one_request = sun8i_ss_handle_cipher_request;
412 	op->enginectx.op.prepare_request = NULL;
413 	op->enginectx.op.unprepare_request = NULL;
414 
415 	err = pm_runtime_resume_and_get(op->ss->dev);
416 	if (err < 0) {
417 		dev_err(op->ss->dev, "pm error %d\n", err);
418 		goto error_pm;
419 	}
420 
421 	return 0;
422 error_pm:
423 	crypto_free_skcipher(op->fallback_tfm);
424 	return err;
425 }
426 
427 void sun8i_ss_cipher_exit(struct crypto_tfm *tfm)
428 {
429 	struct sun8i_cipher_tfm_ctx *op = crypto_tfm_ctx(tfm);
430 
431 	kfree_sensitive(op->key);
432 	crypto_free_skcipher(op->fallback_tfm);
433 	pm_runtime_put_sync(op->ss->dev);
434 }
435 
436 int sun8i_ss_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
437 			unsigned int keylen)
438 {
439 	struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
440 	struct sun8i_ss_dev *ss = op->ss;
441 
442 	switch (keylen) {
443 	case 128 / 8:
444 		break;
445 	case 192 / 8:
446 		break;
447 	case 256 / 8:
448 		break;
449 	default:
450 		dev_dbg(ss->dev, "ERROR: Invalid keylen %u\n", keylen);
451 		return -EINVAL;
452 	}
453 	kfree_sensitive(op->key);
454 	op->keylen = keylen;
455 	op->key = kmemdup(key, keylen, GFP_KERNEL | GFP_DMA);
456 	if (!op->key)
457 		return -ENOMEM;
458 
459 	crypto_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK);
460 	crypto_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
461 
462 	return crypto_skcipher_setkey(op->fallback_tfm, key, keylen);
463 }
464 
465 int sun8i_ss_des3_setkey(struct crypto_skcipher *tfm, const u8 *key,
466 			 unsigned int keylen)
467 {
468 	struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
469 	struct sun8i_ss_dev *ss = op->ss;
470 
471 	if (unlikely(keylen != 3 * DES_KEY_SIZE)) {
472 		dev_dbg(ss->dev, "Invalid keylen %u\n", keylen);
473 		return -EINVAL;
474 	}
475 
476 	kfree_sensitive(op->key);
477 	op->keylen = keylen;
478 	op->key = kmemdup(key, keylen, GFP_KERNEL | GFP_DMA);
479 	if (!op->key)
480 		return -ENOMEM;
481 
482 	crypto_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK);
483 	crypto_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
484 
485 	return crypto_skcipher_setkey(op->fallback_tfm, key, keylen);
486 }
487