xref: /linux/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c (revision 39f75da7bcc829ddc4d40bb60d0e95520de7898b)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * sun8i-ce-cipher.c - hardware cryptographic offloader for
4  * Allwinner H3/A64/H5/H2+/H6/R40 SoC
5  *
6  * Copyright (C) 2016-2019 Corentin LABBE <clabbe.montjoie@gmail.com>
7  *
8  * This file add support for AES cipher with 128,192,256 bits keysize in
9  * CBC and ECB mode.
10  *
11  * You could find a link for the datasheet in Documentation/arm/sunxi.rst
12  */
13 
14 #include <linux/crypto.h>
15 #include <linux/dma-mapping.h>
16 #include <linux/io.h>
17 #include <linux/pm_runtime.h>
18 #include <crypto/scatterwalk.h>
19 #include <crypto/internal/des.h>
20 #include <crypto/internal/skcipher.h>
21 #include "sun8i-ce.h"
22 
23 static int sun8i_ce_cipher_need_fallback(struct skcipher_request *areq)
24 {
25 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
26 	struct scatterlist *sg;
27 
28 	if (sg_nents(areq->src) > MAX_SG || sg_nents(areq->dst) > MAX_SG)
29 		return true;
30 
31 	if (areq->cryptlen < crypto_skcipher_ivsize(tfm))
32 		return true;
33 
34 	if (areq->cryptlen == 0 || areq->cryptlen % 16)
35 		return true;
36 
37 	sg = areq->src;
38 	while (sg) {
39 		if (sg->length % 4 || !IS_ALIGNED(sg->offset, sizeof(u32)))
40 			return true;
41 		sg = sg_next(sg);
42 	}
43 	sg = areq->dst;
44 	while (sg) {
45 		if (sg->length % 4 || !IS_ALIGNED(sg->offset, sizeof(u32)))
46 			return true;
47 		sg = sg_next(sg);
48 	}
49 	return false;
50 }
51 
52 static int sun8i_ce_cipher_fallback(struct skcipher_request *areq)
53 {
54 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
55 	struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
56 	struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
57 	int err;
58 #ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
59 	struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
60 	struct sun8i_ce_alg_template *algt;
61 
62 	algt = container_of(alg, struct sun8i_ce_alg_template, alg.skcipher);
63 	algt->stat_fb++;
64 #endif
65 
66 	skcipher_request_set_tfm(&rctx->fallback_req, op->fallback_tfm);
67 	skcipher_request_set_callback(&rctx->fallback_req, areq->base.flags,
68 				      areq->base.complete, areq->base.data);
69 	skcipher_request_set_crypt(&rctx->fallback_req, areq->src, areq->dst,
70 				   areq->cryptlen, areq->iv);
71 	if (rctx->op_dir & CE_DECRYPTION)
72 		err = crypto_skcipher_decrypt(&rctx->fallback_req);
73 	else
74 		err = crypto_skcipher_encrypt(&rctx->fallback_req);
75 	return err;
76 }
77 
78 static int sun8i_ce_cipher_prepare(struct crypto_engine *engine, void *async_req)
79 {
80 	struct skcipher_request *areq = container_of(async_req, struct skcipher_request, base);
81 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
82 	struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
83 	struct sun8i_ce_dev *ce = op->ce;
84 	struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
85 	struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
86 	struct sun8i_ce_alg_template *algt;
87 	struct sun8i_ce_flow *chan;
88 	struct ce_task *cet;
89 	struct scatterlist *sg;
90 	unsigned int todo, len, offset, ivsize;
91 	u32 common, sym;
92 	int flow, i;
93 	int nr_sgs = 0;
94 	int nr_sgd = 0;
95 	int err = 0;
96 
97 	algt = container_of(alg, struct sun8i_ce_alg_template, alg.skcipher);
98 
99 	dev_dbg(ce->dev, "%s %s %u %x IV(%p %u) key=%u\n", __func__,
100 		crypto_tfm_alg_name(areq->base.tfm),
101 		areq->cryptlen,
102 		rctx->op_dir, areq->iv, crypto_skcipher_ivsize(tfm),
103 		op->keylen);
104 
105 #ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
106 	algt->stat_req++;
107 #endif
108 
109 	flow = rctx->flow;
110 
111 	chan = &ce->chanlist[flow];
112 
113 	cet = chan->tl;
114 	memset(cet, 0, sizeof(struct ce_task));
115 
116 	cet->t_id = cpu_to_le32(flow);
117 	common = ce->variant->alg_cipher[algt->ce_algo_id];
118 	common |= rctx->op_dir | CE_COMM_INT;
119 	cet->t_common_ctl = cpu_to_le32(common);
120 	/* CTS and recent CE (H6) need length in bytes, in word otherwise */
121 	if (ce->variant->cipher_t_dlen_in_bytes)
122 		cet->t_dlen = cpu_to_le32(areq->cryptlen);
123 	else
124 		cet->t_dlen = cpu_to_le32(areq->cryptlen / 4);
125 
126 	sym = ce->variant->op_mode[algt->ce_blockmode];
127 	len = op->keylen;
128 	switch (len) {
129 	case 128 / 8:
130 		sym |= CE_AES_128BITS;
131 		break;
132 	case 192 / 8:
133 		sym |= CE_AES_192BITS;
134 		break;
135 	case 256 / 8:
136 		sym |= CE_AES_256BITS;
137 		break;
138 	}
139 
140 	cet->t_sym_ctl = cpu_to_le32(sym);
141 	cet->t_asym_ctl = 0;
142 
143 	rctx->addr_key = dma_map_single(ce->dev, op->key, op->keylen, DMA_TO_DEVICE);
144 	if (dma_mapping_error(ce->dev, rctx->addr_key)) {
145 		dev_err(ce->dev, "Cannot DMA MAP KEY\n");
146 		err = -EFAULT;
147 		goto theend;
148 	}
149 	cet->t_key = cpu_to_le32(rctx->addr_key);
150 
151 	ivsize = crypto_skcipher_ivsize(tfm);
152 	if (areq->iv && crypto_skcipher_ivsize(tfm) > 0) {
153 		rctx->ivlen = ivsize;
154 		rctx->bounce_iv = kzalloc(ivsize, GFP_KERNEL | GFP_DMA);
155 		if (!rctx->bounce_iv) {
156 			err = -ENOMEM;
157 			goto theend_key;
158 		}
159 		if (rctx->op_dir & CE_DECRYPTION) {
160 			rctx->backup_iv = kzalloc(ivsize, GFP_KERNEL);
161 			if (!rctx->backup_iv) {
162 				err = -ENOMEM;
163 				goto theend_key;
164 			}
165 			offset = areq->cryptlen - ivsize;
166 			scatterwalk_map_and_copy(rctx->backup_iv, areq->src,
167 						 offset, ivsize, 0);
168 		}
169 		memcpy(rctx->bounce_iv, areq->iv, ivsize);
170 		rctx->addr_iv = dma_map_single(ce->dev, rctx->bounce_iv, rctx->ivlen,
171 					       DMA_TO_DEVICE);
172 		if (dma_mapping_error(ce->dev, rctx->addr_iv)) {
173 			dev_err(ce->dev, "Cannot DMA MAP IV\n");
174 			err = -ENOMEM;
175 			goto theend_iv;
176 		}
177 		cet->t_iv = cpu_to_le32(rctx->addr_iv);
178 	}
179 
180 	if (areq->src == areq->dst) {
181 		nr_sgs = dma_map_sg(ce->dev, areq->src, sg_nents(areq->src),
182 				    DMA_BIDIRECTIONAL);
183 		if (nr_sgs <= 0 || nr_sgs > MAX_SG) {
184 			dev_err(ce->dev, "Invalid sg number %d\n", nr_sgs);
185 			err = -EINVAL;
186 			goto theend_iv;
187 		}
188 		nr_sgd = nr_sgs;
189 	} else {
190 		nr_sgs = dma_map_sg(ce->dev, areq->src, sg_nents(areq->src),
191 				    DMA_TO_DEVICE);
192 		if (nr_sgs <= 0 || nr_sgs > MAX_SG) {
193 			dev_err(ce->dev, "Invalid sg number %d\n", nr_sgs);
194 			err = -EINVAL;
195 			goto theend_iv;
196 		}
197 		nr_sgd = dma_map_sg(ce->dev, areq->dst, sg_nents(areq->dst),
198 				    DMA_FROM_DEVICE);
199 		if (nr_sgd <= 0 || nr_sgd > MAX_SG) {
200 			dev_err(ce->dev, "Invalid sg number %d\n", nr_sgd);
201 			err = -EINVAL;
202 			goto theend_sgs;
203 		}
204 	}
205 
206 	len = areq->cryptlen;
207 	for_each_sg(areq->src, sg, nr_sgs, i) {
208 		cet->t_src[i].addr = cpu_to_le32(sg_dma_address(sg));
209 		todo = min(len, sg_dma_len(sg));
210 		cet->t_src[i].len = cpu_to_le32(todo / 4);
211 		dev_dbg(ce->dev, "%s total=%u SG(%d %u off=%d) todo=%u\n", __func__,
212 			areq->cryptlen, i, cet->t_src[i].len, sg->offset, todo);
213 		len -= todo;
214 	}
215 	if (len > 0) {
216 		dev_err(ce->dev, "remaining len %d\n", len);
217 		err = -EINVAL;
218 		goto theend_sgs;
219 	}
220 
221 	len = areq->cryptlen;
222 	for_each_sg(areq->dst, sg, nr_sgd, i) {
223 		cet->t_dst[i].addr = cpu_to_le32(sg_dma_address(sg));
224 		todo = min(len, sg_dma_len(sg));
225 		cet->t_dst[i].len = cpu_to_le32(todo / 4);
226 		dev_dbg(ce->dev, "%s total=%u SG(%d %u off=%d) todo=%u\n", __func__,
227 			areq->cryptlen, i, cet->t_dst[i].len, sg->offset, todo);
228 		len -= todo;
229 	}
230 	if (len > 0) {
231 		dev_err(ce->dev, "remaining len %d\n", len);
232 		err = -EINVAL;
233 		goto theend_sgs;
234 	}
235 
236 	chan->timeout = areq->cryptlen;
237 	rctx->nr_sgs = nr_sgs;
238 	rctx->nr_sgd = nr_sgd;
239 	return 0;
240 
241 theend_sgs:
242 	if (areq->src == areq->dst) {
243 		dma_unmap_sg(ce->dev, areq->src, sg_nents(areq->src),
244 			     DMA_BIDIRECTIONAL);
245 	} else {
246 		if (nr_sgs > 0)
247 			dma_unmap_sg(ce->dev, areq->src, sg_nents(areq->src),
248 				     DMA_TO_DEVICE);
249 		dma_unmap_sg(ce->dev, areq->dst, sg_nents(areq->dst),
250 			     DMA_FROM_DEVICE);
251 	}
252 
253 theend_iv:
254 	if (areq->iv && ivsize > 0) {
255 		if (rctx->addr_iv)
256 			dma_unmap_single(ce->dev, rctx->addr_iv, rctx->ivlen, DMA_TO_DEVICE);
257 		offset = areq->cryptlen - ivsize;
258 		if (rctx->op_dir & CE_DECRYPTION) {
259 			memcpy(areq->iv, rctx->backup_iv, ivsize);
260 			kfree_sensitive(rctx->backup_iv);
261 		} else {
262 			scatterwalk_map_and_copy(areq->iv, areq->dst, offset,
263 						 ivsize, 0);
264 		}
265 		kfree(rctx->bounce_iv);
266 	}
267 
268 theend_key:
269 	dma_unmap_single(ce->dev, rctx->addr_key, op->keylen, DMA_TO_DEVICE);
270 
271 theend:
272 	return err;
273 }
274 
275 static int sun8i_ce_cipher_run(struct crypto_engine *engine, void *areq)
276 {
277 	struct skcipher_request *breq = container_of(areq, struct skcipher_request, base);
278 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(breq);
279 	struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
280 	struct sun8i_ce_dev *ce = op->ce;
281 	struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(breq);
282 	int flow, err;
283 
284 	flow = rctx->flow;
285 	err = sun8i_ce_run_task(ce, flow, crypto_tfm_alg_name(breq->base.tfm));
286 	crypto_finalize_skcipher_request(engine, breq, err);
287 	return 0;
288 }
289 
290 static int sun8i_ce_cipher_unprepare(struct crypto_engine *engine, void *async_req)
291 {
292 	struct skcipher_request *areq = container_of(async_req, struct skcipher_request, base);
293 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
294 	struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
295 	struct sun8i_ce_dev *ce = op->ce;
296 	struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
297 	struct sun8i_ce_flow *chan;
298 	struct ce_task *cet;
299 	unsigned int ivsize, offset;
300 	int nr_sgs = rctx->nr_sgs;
301 	int nr_sgd = rctx->nr_sgd;
302 	int flow;
303 
304 	flow = rctx->flow;
305 	chan = &ce->chanlist[flow];
306 	cet = chan->tl;
307 	ivsize = crypto_skcipher_ivsize(tfm);
308 
309 	if (areq->src == areq->dst) {
310 		dma_unmap_sg(ce->dev, areq->src, nr_sgs, DMA_BIDIRECTIONAL);
311 	} else {
312 		if (nr_sgs > 0)
313 			dma_unmap_sg(ce->dev, areq->src, nr_sgs, DMA_TO_DEVICE);
314 		dma_unmap_sg(ce->dev, areq->dst, nr_sgd, DMA_FROM_DEVICE);
315 	}
316 
317 	if (areq->iv && ivsize > 0) {
318 		if (cet->t_iv)
319 			dma_unmap_single(ce->dev, rctx->addr_iv, rctx->ivlen, DMA_TO_DEVICE);
320 		offset = areq->cryptlen - ivsize;
321 		if (rctx->op_dir & CE_DECRYPTION) {
322 			memcpy(areq->iv, rctx->backup_iv, ivsize);
323 			kfree_sensitive(rctx->backup_iv);
324 		} else {
325 			scatterwalk_map_and_copy(areq->iv, areq->dst, offset,
326 						 ivsize, 0);
327 		}
328 		kfree(rctx->bounce_iv);
329 	}
330 
331 	dma_unmap_single(ce->dev, rctx->addr_key, op->keylen, DMA_TO_DEVICE);
332 
333 	return 0;
334 }
335 
336 int sun8i_ce_skdecrypt(struct skcipher_request *areq)
337 {
338 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
339 	struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
340 	struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
341 	struct crypto_engine *engine;
342 	int e;
343 
344 	rctx->op_dir = CE_DECRYPTION;
345 	if (sun8i_ce_cipher_need_fallback(areq))
346 		return sun8i_ce_cipher_fallback(areq);
347 
348 	e = sun8i_ce_get_engine_number(op->ce);
349 	rctx->flow = e;
350 	engine = op->ce->chanlist[e].engine;
351 
352 	return crypto_transfer_skcipher_request_to_engine(engine, areq);
353 }
354 
355 int sun8i_ce_skencrypt(struct skcipher_request *areq)
356 {
357 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
358 	struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
359 	struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
360 	struct crypto_engine *engine;
361 	int e;
362 
363 	rctx->op_dir = CE_ENCRYPTION;
364 	if (sun8i_ce_cipher_need_fallback(areq))
365 		return sun8i_ce_cipher_fallback(areq);
366 
367 	e = sun8i_ce_get_engine_number(op->ce);
368 	rctx->flow = e;
369 	engine = op->ce->chanlist[e].engine;
370 
371 	return crypto_transfer_skcipher_request_to_engine(engine, areq);
372 }
373 
374 int sun8i_ce_cipher_init(struct crypto_tfm *tfm)
375 {
376 	struct sun8i_cipher_tfm_ctx *op = crypto_tfm_ctx(tfm);
377 	struct sun8i_ce_alg_template *algt;
378 	const char *name = crypto_tfm_alg_name(tfm);
379 	struct crypto_skcipher *sktfm = __crypto_skcipher_cast(tfm);
380 	struct skcipher_alg *alg = crypto_skcipher_alg(sktfm);
381 	int err;
382 
383 	memset(op, 0, sizeof(struct sun8i_cipher_tfm_ctx));
384 
385 	algt = container_of(alg, struct sun8i_ce_alg_template, alg.skcipher);
386 	op->ce = algt->ce;
387 
388 	op->fallback_tfm = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
389 	if (IS_ERR(op->fallback_tfm)) {
390 		dev_err(op->ce->dev, "ERROR: Cannot allocate fallback for %s %ld\n",
391 			name, PTR_ERR(op->fallback_tfm));
392 		return PTR_ERR(op->fallback_tfm);
393 	}
394 
395 	sktfm->reqsize = sizeof(struct sun8i_cipher_req_ctx) +
396 			 crypto_skcipher_reqsize(op->fallback_tfm);
397 
398 
399 	dev_info(op->ce->dev, "Fallback for %s is %s\n",
400 		 crypto_tfm_alg_driver_name(&sktfm->base),
401 		 crypto_tfm_alg_driver_name(crypto_skcipher_tfm(op->fallback_tfm)));
402 
403 	op->enginectx.op.do_one_request = sun8i_ce_cipher_run;
404 	op->enginectx.op.prepare_request = sun8i_ce_cipher_prepare;
405 	op->enginectx.op.unprepare_request = sun8i_ce_cipher_unprepare;
406 
407 	err = pm_runtime_get_sync(op->ce->dev);
408 	if (err < 0)
409 		goto error_pm;
410 
411 	return 0;
412 error_pm:
413 	pm_runtime_put_noidle(op->ce->dev);
414 	crypto_free_skcipher(op->fallback_tfm);
415 	return err;
416 }
417 
418 void sun8i_ce_cipher_exit(struct crypto_tfm *tfm)
419 {
420 	struct sun8i_cipher_tfm_ctx *op = crypto_tfm_ctx(tfm);
421 
422 	kfree_sensitive(op->key);
423 	crypto_free_skcipher(op->fallback_tfm);
424 	pm_runtime_put_sync_suspend(op->ce->dev);
425 }
426 
427 int sun8i_ce_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
428 			unsigned int keylen)
429 {
430 	struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
431 	struct sun8i_ce_dev *ce = op->ce;
432 
433 	switch (keylen) {
434 	case 128 / 8:
435 		break;
436 	case 192 / 8:
437 		break;
438 	case 256 / 8:
439 		break;
440 	default:
441 		dev_dbg(ce->dev, "ERROR: Invalid keylen %u\n", keylen);
442 		return -EINVAL;
443 	}
444 	kfree_sensitive(op->key);
445 	op->keylen = keylen;
446 	op->key = kmemdup(key, keylen, GFP_KERNEL | GFP_DMA);
447 	if (!op->key)
448 		return -ENOMEM;
449 
450 	crypto_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK);
451 	crypto_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
452 
453 	return crypto_skcipher_setkey(op->fallback_tfm, key, keylen);
454 }
455 
456 int sun8i_ce_des3_setkey(struct crypto_skcipher *tfm, const u8 *key,
457 			 unsigned int keylen)
458 {
459 	struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
460 	int err;
461 
462 	err = verify_skcipher_des3_key(tfm, key);
463 	if (err)
464 		return err;
465 
466 	kfree_sensitive(op->key);
467 	op->keylen = keylen;
468 	op->key = kmemdup(key, keylen, GFP_KERNEL | GFP_DMA);
469 	if (!op->key)
470 		return -ENOMEM;
471 
472 	crypto_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK);
473 	crypto_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
474 
475 	return crypto_skcipher_setkey(op->fallback_tfm, key, keylen);
476 }
477