xref: /linux/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-prng.c (revision cdd5b5a9761fd66d17586e4f4ba6588c70e640ea)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * sun8i-ss-prng.c - hardware cryptographic offloader for
4  * Allwinner A80/A83T SoC
5  *
6  * Copyright (C) 2015-2020 Corentin Labbe <clabbe@baylibre.com>
7  *
8  * This file handle the PRNG found in the SS
9  *
10  * You could find a link for the datasheet in Documentation/arch/arm/sunxi.rst
11  */
12 #include "sun8i-ss.h"
13 #include <linux/dma-mapping.h>
14 #include <linux/kernel.h>
15 #include <linux/mm.h>
16 #include <linux/pm_runtime.h>
17 #include <crypto/internal/rng.h>
18 
sun8i_ss_prng_seed(struct crypto_rng * tfm,const u8 * seed,unsigned int slen)19 int sun8i_ss_prng_seed(struct crypto_rng *tfm, const u8 *seed,
20 		       unsigned int slen)
21 {
22 	struct sun8i_ss_rng_tfm_ctx *ctx = crypto_rng_ctx(tfm);
23 
24 	if (ctx->seed && ctx->slen != slen) {
25 		kfree_sensitive(ctx->seed);
26 		ctx->slen = 0;
27 		ctx->seed = NULL;
28 	}
29 	if (!ctx->seed)
30 		ctx->seed = kmalloc(slen, GFP_KERNEL);
31 	if (!ctx->seed)
32 		return -ENOMEM;
33 
34 	memcpy(ctx->seed, seed, slen);
35 	ctx->slen = slen;
36 
37 	return 0;
38 }
39 
sun8i_ss_prng_init(struct crypto_tfm * tfm)40 int sun8i_ss_prng_init(struct crypto_tfm *tfm)
41 {
42 	struct sun8i_ss_rng_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
43 
44 	memset(ctx, 0, sizeof(struct sun8i_ss_rng_tfm_ctx));
45 	return 0;
46 }
47 
sun8i_ss_prng_exit(struct crypto_tfm * tfm)48 void sun8i_ss_prng_exit(struct crypto_tfm *tfm)
49 {
50 	struct sun8i_ss_rng_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
51 
52 	kfree_sensitive(ctx->seed);
53 	ctx->seed = NULL;
54 	ctx->slen = 0;
55 }
56 
sun8i_ss_prng_generate(struct crypto_rng * tfm,const u8 * src,unsigned int slen,u8 * dst,unsigned int dlen)57 int sun8i_ss_prng_generate(struct crypto_rng *tfm, const u8 *src,
58 			   unsigned int slen, u8 *dst, unsigned int dlen)
59 {
60 	struct sun8i_ss_rng_tfm_ctx *ctx = crypto_rng_ctx(tfm);
61 	struct rng_alg *alg = crypto_rng_alg(tfm);
62 	struct sun8i_ss_alg_template *algt;
63 	unsigned int todo_with_padding;
64 	struct sun8i_ss_dev *ss;
65 	dma_addr_t dma_iv, dma_dst;
66 	unsigned int todo;
67 	int err = 0;
68 	int flow;
69 	void *d;
70 	u32 v;
71 
72 	algt = container_of(alg, struct sun8i_ss_alg_template, alg.rng);
73 	ss = algt->ss;
74 
75 	if (ctx->slen == 0) {
76 		dev_err(ss->dev, "The PRNG is not seeded\n");
77 		return -EINVAL;
78 	}
79 
80 	/* The SS does not give an updated seed, so we need to get a new one.
81 	 * So we will ask for an extra PRNG_SEED_SIZE data.
82 	 * We want dlen + seedsize rounded up to a multiple of PRNG_DATA_SIZE
83 	 */
84 	todo = dlen + PRNG_SEED_SIZE + PRNG_DATA_SIZE;
85 	todo -= todo % PRNG_DATA_SIZE;
86 
87 	todo_with_padding = ALIGN(todo, dma_get_cache_alignment());
88 	if (todo_with_padding < todo || todo < dlen)
89 		return -EOVERFLOW;
90 
91 	d = kzalloc(todo_with_padding, GFP_KERNEL);
92 	if (!d)
93 		return -ENOMEM;
94 
95 	flow = sun8i_ss_get_engine_number(ss);
96 
97 #ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
98 	algt->stat_req++;
99 	algt->stat_bytes += todo;
100 #endif
101 
102 	v = SS_ALG_PRNG | SS_PRNG_CONTINUE | SS_START;
103 	if (flow)
104 		v |= SS_FLOW1;
105 	else
106 		v |= SS_FLOW0;
107 
108 	dma_iv = dma_map_single(ss->dev, ctx->seed, ctx->slen, DMA_TO_DEVICE);
109 	if (dma_mapping_error(ss->dev, dma_iv)) {
110 		dev_err(ss->dev, "Cannot DMA MAP IV\n");
111 		err = -EFAULT;
112 		goto err_free;
113 	}
114 
115 	dma_dst = dma_map_single(ss->dev, d, todo, DMA_FROM_DEVICE);
116 	if (dma_mapping_error(ss->dev, dma_dst)) {
117 		dev_err(ss->dev, "Cannot DMA MAP DST\n");
118 		err = -EFAULT;
119 		goto err_iv;
120 	}
121 
122 	err = pm_runtime_resume_and_get(ss->dev);
123 	if (err < 0)
124 		goto err_pm;
125 	err = 0;
126 
127 	mutex_lock(&ss->mlock);
128 	writel(dma_iv, ss->base + SS_IV_ADR_REG);
129 	/* the PRNG act badly (failing rngtest) without SS_KEY_ADR_REG set */
130 	writel(dma_iv, ss->base + SS_KEY_ADR_REG);
131 	writel(dma_dst, ss->base + SS_DST_ADR_REG);
132 	writel(todo / 4, ss->base + SS_LEN_ADR_REG);
133 
134 	reinit_completion(&ss->flows[flow].complete);
135 	ss->flows[flow].status = 0;
136 	/* Be sure all data is written before enabling the task */
137 	wmb();
138 
139 	writel(v, ss->base + SS_CTL_REG);
140 
141 	wait_for_completion_interruptible_timeout(&ss->flows[flow].complete,
142 						  msecs_to_jiffies(todo));
143 	if (ss->flows[flow].status == 0) {
144 		dev_err(ss->dev, "DMA timeout for PRNG (size=%u)\n", todo);
145 		err = -EFAULT;
146 	}
147 	/* Since cipher and hash use the linux/cryptoengine and that we have
148 	 * a cryptoengine per flow, we are sure that they will issue only one
149 	 * request per flow.
150 	 * Since the cryptoengine wait for completion before submitting a new
151 	 * one, the mlock could be left just after the final writel.
152 	 * But cryptoengine cannot handle crypto_rng, so we need to be sure
153 	 * nothing will use our flow.
154 	 * The easiest way is to grab mlock until the hardware end our requests.
155 	 * We could have used a per flow lock, but this would increase
156 	 * complexity.
157 	 * The drawback is that no request could be handled for the other flow.
158 	 */
159 	mutex_unlock(&ss->mlock);
160 
161 	pm_runtime_put(ss->dev);
162 
163 err_pm:
164 	dma_unmap_single(ss->dev, dma_dst, todo, DMA_FROM_DEVICE);
165 err_iv:
166 	dma_unmap_single(ss->dev, dma_iv, ctx->slen, DMA_TO_DEVICE);
167 
168 	if (!err) {
169 		memcpy(dst, d, dlen);
170 		/* Update seed */
171 		memcpy(ctx->seed, d + dlen, ctx->slen);
172 	}
173 err_free:
174 	kfree_sensitive(d);
175 
176 	return err;
177 }
178