xref: /linux/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c (revision 312b62b6610cabea4cb535fd4889c41e9a84afca)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * sun8i-ce-cipher.c - hardware cryptographic offloader for
4  * Allwinner H3/A64/H5/H2+/H6/R40 SoC
5  *
6  * Copyright (C) 2016-2019 Corentin LABBE <clabbe.montjoie@gmail.com>
7  *
8  * This file add support for AES cipher with 128,192,256 bits keysize in
9  * CBC and ECB mode.
10  *
11  * You could find a link for the datasheet in Documentation/arm/sunxi.rst
12  */
13 
14 #include <linux/crypto.h>
15 #include <linux/dma-mapping.h>
16 #include <linux/io.h>
17 #include <linux/pm_runtime.h>
18 #include <crypto/scatterwalk.h>
19 #include <crypto/internal/des.h>
20 #include <crypto/internal/skcipher.h>
21 #include "sun8i-ce.h"
22 
23 static int sun8i_ce_cipher_need_fallback(struct skcipher_request *areq)
24 {
25 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
26 	struct scatterlist *sg;
27 
28 	if (sg_nents(areq->src) > MAX_SG || sg_nents(areq->dst) > MAX_SG)
29 		return true;
30 
31 	if (areq->cryptlen < crypto_skcipher_ivsize(tfm))
32 		return true;
33 
34 	if (areq->cryptlen == 0 || areq->cryptlen % 16)
35 		return true;
36 
37 	sg = areq->src;
38 	while (sg) {
39 		if (sg->length % 4 || !IS_ALIGNED(sg->offset, sizeof(u32)))
40 			return true;
41 		sg = sg_next(sg);
42 	}
43 	sg = areq->dst;
44 	while (sg) {
45 		if (sg->length % 4 || !IS_ALIGNED(sg->offset, sizeof(u32)))
46 			return true;
47 		sg = sg_next(sg);
48 	}
49 	return false;
50 }
51 
52 static int sun8i_ce_cipher_fallback(struct skcipher_request *areq)
53 {
54 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
55 	struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
56 	struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
57 	int err;
58 #ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
59 	struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
60 	struct sun8i_ce_alg_template *algt;
61 #endif
62 	SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, op->fallback_tfm);
63 
64 #ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
65 	algt = container_of(alg, struct sun8i_ce_alg_template, alg.skcipher);
66 	algt->stat_fb++;
67 #endif
68 
69 	skcipher_request_set_sync_tfm(subreq, op->fallback_tfm);
70 	skcipher_request_set_callback(subreq, areq->base.flags, NULL, NULL);
71 	skcipher_request_set_crypt(subreq, areq->src, areq->dst,
72 				   areq->cryptlen, areq->iv);
73 	if (rctx->op_dir & CE_DECRYPTION)
74 		err = crypto_skcipher_decrypt(subreq);
75 	else
76 		err = crypto_skcipher_encrypt(subreq);
77 	skcipher_request_zero(subreq);
78 	return err;
79 }
80 
81 static int sun8i_ce_cipher(struct skcipher_request *areq)
82 {
83 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
84 	struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
85 	struct sun8i_ce_dev *ce = op->ce;
86 	struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
87 	struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
88 	struct sun8i_ce_alg_template *algt;
89 	struct sun8i_ce_flow *chan;
90 	struct ce_task *cet;
91 	struct scatterlist *sg;
92 	unsigned int todo, len, offset, ivsize;
93 	dma_addr_t addr_iv = 0, addr_key = 0;
94 	void *backup_iv = NULL;
95 	u32 common, sym;
96 	int flow, i;
97 	int nr_sgs = 0;
98 	int nr_sgd = 0;
99 	int err = 0;
100 
101 	algt = container_of(alg, struct sun8i_ce_alg_template, alg.skcipher);
102 
103 	dev_dbg(ce->dev, "%s %s %u %x IV(%p %u) key=%u\n", __func__,
104 		crypto_tfm_alg_name(areq->base.tfm),
105 		areq->cryptlen,
106 		rctx->op_dir, areq->iv, crypto_skcipher_ivsize(tfm),
107 		op->keylen);
108 
109 #ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
110 	algt->stat_req++;
111 #endif
112 
113 	flow = rctx->flow;
114 
115 	chan = &ce->chanlist[flow];
116 
117 	cet = chan->tl;
118 	memset(cet, 0, sizeof(struct ce_task));
119 
120 	cet->t_id = cpu_to_le32(flow);
121 	common = ce->variant->alg_cipher[algt->ce_algo_id];
122 	common |= rctx->op_dir | CE_COMM_INT;
123 	cet->t_common_ctl = cpu_to_le32(common);
124 	/* CTS and recent CE (H6) need length in bytes, in word otherwise */
125 	if (ce->variant->has_t_dlen_in_bytes)
126 		cet->t_dlen = cpu_to_le32(areq->cryptlen);
127 	else
128 		cet->t_dlen = cpu_to_le32(areq->cryptlen / 4);
129 
130 	sym = ce->variant->op_mode[algt->ce_blockmode];
131 	len = op->keylen;
132 	switch (len) {
133 	case 128 / 8:
134 		sym |= CE_AES_128BITS;
135 		break;
136 	case 192 / 8:
137 		sym |= CE_AES_192BITS;
138 		break;
139 	case 256 / 8:
140 		sym |= CE_AES_256BITS;
141 		break;
142 	}
143 
144 	cet->t_sym_ctl = cpu_to_le32(sym);
145 	cet->t_asym_ctl = 0;
146 
147 	addr_key = dma_map_single(ce->dev, op->key, op->keylen, DMA_TO_DEVICE);
148 	cet->t_key = cpu_to_le32(addr_key);
149 	if (dma_mapping_error(ce->dev, addr_key)) {
150 		dev_err(ce->dev, "Cannot DMA MAP KEY\n");
151 		err = -EFAULT;
152 		goto theend;
153 	}
154 
155 	ivsize = crypto_skcipher_ivsize(tfm);
156 	if (areq->iv && crypto_skcipher_ivsize(tfm) > 0) {
157 		chan->ivlen = ivsize;
158 		chan->bounce_iv = kzalloc(ivsize, GFP_KERNEL | GFP_DMA);
159 		if (!chan->bounce_iv) {
160 			err = -ENOMEM;
161 			goto theend_key;
162 		}
163 		if (rctx->op_dir & CE_DECRYPTION) {
164 			backup_iv = kzalloc(ivsize, GFP_KERNEL);
165 			if (!backup_iv) {
166 				err = -ENOMEM;
167 				goto theend_key;
168 			}
169 			offset = areq->cryptlen - ivsize;
170 			scatterwalk_map_and_copy(backup_iv, areq->src, offset,
171 						 ivsize, 0);
172 		}
173 		memcpy(chan->bounce_iv, areq->iv, ivsize);
174 		addr_iv = dma_map_single(ce->dev, chan->bounce_iv, chan->ivlen,
175 					 DMA_TO_DEVICE);
176 		cet->t_iv = cpu_to_le32(addr_iv);
177 		if (dma_mapping_error(ce->dev, addr_iv)) {
178 			dev_err(ce->dev, "Cannot DMA MAP IV\n");
179 			err = -ENOMEM;
180 			goto theend_iv;
181 		}
182 	}
183 
184 	if (areq->src == areq->dst) {
185 		nr_sgs = dma_map_sg(ce->dev, areq->src, sg_nents(areq->src),
186 				    DMA_BIDIRECTIONAL);
187 		if (nr_sgs <= 0 || nr_sgs > MAX_SG) {
188 			dev_err(ce->dev, "Invalid sg number %d\n", nr_sgs);
189 			err = -EINVAL;
190 			goto theend_iv;
191 		}
192 		nr_sgd = nr_sgs;
193 	} else {
194 		nr_sgs = dma_map_sg(ce->dev, areq->src, sg_nents(areq->src),
195 				    DMA_TO_DEVICE);
196 		if (nr_sgs <= 0 || nr_sgs > MAX_SG) {
197 			dev_err(ce->dev, "Invalid sg number %d\n", nr_sgs);
198 			err = -EINVAL;
199 			goto theend_iv;
200 		}
201 		nr_sgd = dma_map_sg(ce->dev, areq->dst, sg_nents(areq->dst),
202 				    DMA_FROM_DEVICE);
203 		if (nr_sgd <= 0 || nr_sgd > MAX_SG) {
204 			dev_err(ce->dev, "Invalid sg number %d\n", nr_sgd);
205 			err = -EINVAL;
206 			goto theend_sgs;
207 		}
208 	}
209 
210 	len = areq->cryptlen;
211 	for_each_sg(areq->src, sg, nr_sgs, i) {
212 		cet->t_src[i].addr = cpu_to_le32(sg_dma_address(sg));
213 		todo = min(len, sg_dma_len(sg));
214 		cet->t_src[i].len = cpu_to_le32(todo / 4);
215 		dev_dbg(ce->dev, "%s total=%u SG(%d %u off=%d) todo=%u\n", __func__,
216 			areq->cryptlen, i, cet->t_src[i].len, sg->offset, todo);
217 		len -= todo;
218 	}
219 	if (len > 0) {
220 		dev_err(ce->dev, "remaining len %d\n", len);
221 		err = -EINVAL;
222 		goto theend_sgs;
223 	}
224 
225 	len = areq->cryptlen;
226 	for_each_sg(areq->dst, sg, nr_sgd, i) {
227 		cet->t_dst[i].addr = cpu_to_le32(sg_dma_address(sg));
228 		todo = min(len, sg_dma_len(sg));
229 		cet->t_dst[i].len = cpu_to_le32(todo / 4);
230 		dev_dbg(ce->dev, "%s total=%u SG(%d %u off=%d) todo=%u\n", __func__,
231 			areq->cryptlen, i, cet->t_dst[i].len, sg->offset, todo);
232 		len -= todo;
233 	}
234 	if (len > 0) {
235 		dev_err(ce->dev, "remaining len %d\n", len);
236 		err = -EINVAL;
237 		goto theend_sgs;
238 	}
239 
240 	chan->timeout = areq->cryptlen;
241 	err = sun8i_ce_run_task(ce, flow, crypto_tfm_alg_name(areq->base.tfm));
242 
243 theend_sgs:
244 	if (areq->src == areq->dst) {
245 		dma_unmap_sg(ce->dev, areq->src, nr_sgs, DMA_BIDIRECTIONAL);
246 	} else {
247 		if (nr_sgs > 0)
248 			dma_unmap_sg(ce->dev, areq->src, nr_sgs, DMA_TO_DEVICE);
249 		dma_unmap_sg(ce->dev, areq->dst, nr_sgd, DMA_FROM_DEVICE);
250 	}
251 
252 theend_iv:
253 	if (areq->iv && ivsize > 0) {
254 		if (addr_iv)
255 			dma_unmap_single(ce->dev, addr_iv, chan->ivlen,
256 					 DMA_TO_DEVICE);
257 		offset = areq->cryptlen - ivsize;
258 		if (rctx->op_dir & CE_DECRYPTION) {
259 			memcpy(areq->iv, backup_iv, ivsize);
260 			kzfree(backup_iv);
261 		} else {
262 			scatterwalk_map_and_copy(areq->iv, areq->dst, offset,
263 						 ivsize, 0);
264 		}
265 		kfree(chan->bounce_iv);
266 	}
267 
268 theend_key:
269 	dma_unmap_single(ce->dev, addr_key, op->keylen, DMA_TO_DEVICE);
270 
271 theend:
272 	return err;
273 }
274 
275 static int sun8i_ce_handle_cipher_request(struct crypto_engine *engine, void *areq)
276 {
277 	int err;
278 	struct skcipher_request *breq = container_of(areq, struct skcipher_request, base);
279 
280 	err = sun8i_ce_cipher(breq);
281 	crypto_finalize_skcipher_request(engine, breq, err);
282 
283 	return 0;
284 }
285 
286 int sun8i_ce_skdecrypt(struct skcipher_request *areq)
287 {
288 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
289 	struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
290 	struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
291 	struct crypto_engine *engine;
292 	int e;
293 
294 	rctx->op_dir = CE_DECRYPTION;
295 	if (sun8i_ce_cipher_need_fallback(areq))
296 		return sun8i_ce_cipher_fallback(areq);
297 
298 	e = sun8i_ce_get_engine_number(op->ce);
299 	rctx->flow = e;
300 	engine = op->ce->chanlist[e].engine;
301 
302 	return crypto_transfer_skcipher_request_to_engine(engine, areq);
303 }
304 
305 int sun8i_ce_skencrypt(struct skcipher_request *areq)
306 {
307 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
308 	struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
309 	struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
310 	struct crypto_engine *engine;
311 	int e;
312 
313 	rctx->op_dir = CE_ENCRYPTION;
314 	if (sun8i_ce_cipher_need_fallback(areq))
315 		return sun8i_ce_cipher_fallback(areq);
316 
317 	e = sun8i_ce_get_engine_number(op->ce);
318 	rctx->flow = e;
319 	engine = op->ce->chanlist[e].engine;
320 
321 	return crypto_transfer_skcipher_request_to_engine(engine, areq);
322 }
323 
324 int sun8i_ce_cipher_init(struct crypto_tfm *tfm)
325 {
326 	struct sun8i_cipher_tfm_ctx *op = crypto_tfm_ctx(tfm);
327 	struct sun8i_ce_alg_template *algt;
328 	const char *name = crypto_tfm_alg_name(tfm);
329 	struct crypto_skcipher *sktfm = __crypto_skcipher_cast(tfm);
330 	struct skcipher_alg *alg = crypto_skcipher_alg(sktfm);
331 	int err;
332 
333 	memset(op, 0, sizeof(struct sun8i_cipher_tfm_ctx));
334 
335 	algt = container_of(alg, struct sun8i_ce_alg_template, alg.skcipher);
336 	op->ce = algt->ce;
337 
338 	sktfm->reqsize = sizeof(struct sun8i_cipher_req_ctx);
339 
340 	op->fallback_tfm = crypto_alloc_sync_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
341 	if (IS_ERR(op->fallback_tfm)) {
342 		dev_err(op->ce->dev, "ERROR: Cannot allocate fallback for %s %ld\n",
343 			name, PTR_ERR(op->fallback_tfm));
344 		return PTR_ERR(op->fallback_tfm);
345 	}
346 
347 	dev_info(op->ce->dev, "Fallback for %s is %s\n",
348 		 crypto_tfm_alg_driver_name(&sktfm->base),
349 		 crypto_tfm_alg_driver_name(crypto_skcipher_tfm(&op->fallback_tfm->base)));
350 
351 	op->enginectx.op.do_one_request = sun8i_ce_handle_cipher_request;
352 	op->enginectx.op.prepare_request = NULL;
353 	op->enginectx.op.unprepare_request = NULL;
354 
355 	err = pm_runtime_get_sync(op->ce->dev);
356 	if (err < 0)
357 		goto error_pm;
358 
359 	return 0;
360 error_pm:
361 	crypto_free_sync_skcipher(op->fallback_tfm);
362 	return err;
363 }
364 
365 void sun8i_ce_cipher_exit(struct crypto_tfm *tfm)
366 {
367 	struct sun8i_cipher_tfm_ctx *op = crypto_tfm_ctx(tfm);
368 
369 	if (op->key) {
370 		memzero_explicit(op->key, op->keylen);
371 		kfree(op->key);
372 	}
373 	crypto_free_sync_skcipher(op->fallback_tfm);
374 	pm_runtime_put_sync_suspend(op->ce->dev);
375 }
376 
377 int sun8i_ce_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
378 			unsigned int keylen)
379 {
380 	struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
381 	struct sun8i_ce_dev *ce = op->ce;
382 
383 	switch (keylen) {
384 	case 128 / 8:
385 		break;
386 	case 192 / 8:
387 		break;
388 	case 256 / 8:
389 		break;
390 	default:
391 		dev_dbg(ce->dev, "ERROR: Invalid keylen %u\n", keylen);
392 		return -EINVAL;
393 	}
394 	if (op->key) {
395 		memzero_explicit(op->key, op->keylen);
396 		kfree(op->key);
397 	}
398 	op->keylen = keylen;
399 	op->key = kmemdup(key, keylen, GFP_KERNEL | GFP_DMA);
400 	if (!op->key)
401 		return -ENOMEM;
402 
403 	crypto_sync_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK);
404 	crypto_sync_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
405 
406 	return crypto_sync_skcipher_setkey(op->fallback_tfm, key, keylen);
407 }
408 
409 int sun8i_ce_des3_setkey(struct crypto_skcipher *tfm, const u8 *key,
410 			 unsigned int keylen)
411 {
412 	struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
413 	int err;
414 
415 	err = verify_skcipher_des3_key(tfm, key);
416 	if (err)
417 		return err;
418 
419 	if (op->key) {
420 		memzero_explicit(op->key, op->keylen);
421 		kfree(op->key);
422 	}
423 	op->keylen = keylen;
424 	op->key = kmemdup(key, keylen, GFP_KERNEL | GFP_DMA);
425 	if (!op->key)
426 		return -ENOMEM;
427 
428 	crypto_sync_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK);
429 	crypto_sync_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
430 
431 	return crypto_sync_skcipher_setkey(op->fallback_tfm, key, keylen);
432 }
433