xref: /linux/drivers/crypto/caam/caamrng.c (revision a703a4c2a3280835003d4d0eb8845bac0f1a6ef1)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * caam - Freescale FSL CAAM support for hw_random
4  *
5  * Copyright 2011 Freescale Semiconductor, Inc.
6  * Copyright 2018-2019, 2023 NXP
7  *
8  * Based on caamalg.c crypto API driver.
9  *
10  */
11 
12 #include <linux/hw_random.h>
13 #include <linux/completion.h>
14 #include <linux/atomic.h>
15 #include <linux/dma-mapping.h>
16 #include <linux/kernel.h>
17 #include <linux/kfifo.h>
18 
19 #include "compat.h"
20 
21 #include "regs.h"
22 #include "intern.h"
23 #include "desc_constr.h"
24 #include "jr.h"
25 #include "error.h"
26 
27 #define CAAM_RNG_MAX_FIFO_STORE_SIZE	16
28 
29 /*
30  * Length of used descriptors, see caam_init_desc()
31  */
32 #define CAAM_RNG_DESC_LEN (CAAM_CMD_SZ +				\
33 			   CAAM_CMD_SZ +				\
34 			   CAAM_CMD_SZ + CAAM_PTR_SZ_MAX)
35 
36 /* rng per-device context */
37 struct caam_rng_ctx {
38 	struct hwrng rng;
39 	struct device *jrdev;
40 	struct device *ctrldev;
41 	void *desc_async;
42 	void *desc_sync;
43 	struct work_struct worker;
44 	struct kfifo fifo;
45 };
46 
47 struct caam_rng_job_ctx {
48 	struct completion *done;
49 	int *err;
50 };
51 
52 static struct caam_rng_ctx *to_caam_rng_ctx(struct hwrng *r)
53 {
54 	return (struct caam_rng_ctx *)r->priv;
55 }
56 
57 static void caam_rng_done(struct device *jrdev, u32 *desc, u32 err,
58 			  void *context)
59 {
60 	struct caam_rng_job_ctx *jctx = context;
61 
62 	if (err)
63 		*jctx->err = caam_jr_strstatus(jrdev, err);
64 
65 	complete(jctx->done);
66 }
67 
68 static u32 *caam_init_desc(u32 *desc, dma_addr_t dst_dma)
69 {
70 	init_job_desc(desc, 0);	/* + 1 cmd_sz */
71 	/* Generate random bytes: + 1 cmd_sz */
72 	append_operation(desc, OP_ALG_ALGSEL_RNG | OP_TYPE_CLASS1_ALG |
73 			 OP_ALG_PR_ON);
74 	/* Store bytes: + 1 cmd_sz + caam_ptr_sz  */
75 	append_fifo_store(desc, dst_dma,
76 			  CAAM_RNG_MAX_FIFO_STORE_SIZE, FIFOST_TYPE_RNGSTORE);
77 
78 	print_hex_dump_debug("rng job desc@: ", DUMP_PREFIX_ADDRESS,
79 			     16, 4, desc, desc_bytes(desc), 1);
80 
81 	return desc;
82 }
83 
84 static int caam_rng_read_one(struct device *jrdev,
85 			     void *dst, int len,
86 			     void *desc,
87 			     struct completion *done)
88 {
89 	dma_addr_t dst_dma;
90 	int err, ret = 0;
91 	struct caam_rng_job_ctx jctx = {
92 		.done = done,
93 		.err  = &ret,
94 	};
95 
96 	len = CAAM_RNG_MAX_FIFO_STORE_SIZE;
97 
98 	dst_dma = dma_map_single(jrdev, dst, len, DMA_FROM_DEVICE);
99 	if (dma_mapping_error(jrdev, dst_dma)) {
100 		dev_err(jrdev, "unable to map destination memory\n");
101 		return -ENOMEM;
102 	}
103 
104 	init_completion(done);
105 	err = caam_jr_enqueue(jrdev,
106 			      caam_init_desc(desc, dst_dma),
107 			      caam_rng_done, &jctx);
108 	if (err == -EINPROGRESS) {
109 		wait_for_completion(done);
110 		err = 0;
111 	}
112 
113 	dma_unmap_single(jrdev, dst_dma, len, DMA_FROM_DEVICE);
114 
115 	return err ?: (ret ?: len);
116 }
117 
118 static void caam_rng_fill_async(struct caam_rng_ctx *ctx)
119 {
120 	struct scatterlist sg[1];
121 	struct completion done;
122 	int len, nents;
123 
124 	sg_init_table(sg, ARRAY_SIZE(sg));
125 	nents = kfifo_dma_in_prepare(&ctx->fifo, sg, ARRAY_SIZE(sg),
126 				     CAAM_RNG_MAX_FIFO_STORE_SIZE);
127 	if (!nents)
128 		return;
129 
130 	len = caam_rng_read_one(ctx->jrdev, sg_virt(&sg[0]),
131 				sg[0].length,
132 				ctx->desc_async,
133 				&done);
134 	if (len < 0)
135 		return;
136 
137 	kfifo_dma_in_finish(&ctx->fifo, len);
138 }
139 
140 static void caam_rng_worker(struct work_struct *work)
141 {
142 	struct caam_rng_ctx *ctx = container_of(work, struct caam_rng_ctx,
143 						worker);
144 	caam_rng_fill_async(ctx);
145 }
146 
147 static int caam_read(struct hwrng *rng, void *dst, size_t max, bool wait)
148 {
149 	struct caam_rng_ctx *ctx = to_caam_rng_ctx(rng);
150 	int out;
151 
152 	if (wait) {
153 		struct completion done;
154 
155 		return caam_rng_read_one(ctx->jrdev, dst, max,
156 					 ctx->desc_sync, &done);
157 	}
158 
159 	out = kfifo_out(&ctx->fifo, dst, max);
160 	if (kfifo_is_empty(&ctx->fifo))
161 		schedule_work(&ctx->worker);
162 
163 	return out;
164 }
165 
166 static void caam_cleanup(struct hwrng *rng)
167 {
168 	struct caam_rng_ctx *ctx = to_caam_rng_ctx(rng);
169 
170 	flush_work(&ctx->worker);
171 	caam_jr_free(ctx->jrdev);
172 	kfifo_free(&ctx->fifo);
173 }
174 
175 #ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_TEST
176 static inline void test_len(struct hwrng *rng, size_t len, bool wait)
177 {
178 	u8 *buf;
179 	int read_len;
180 	struct caam_rng_ctx *ctx = to_caam_rng_ctx(rng);
181 	struct device *dev = ctx->ctrldev;
182 
183 	buf = kcalloc(CAAM_RNG_MAX_FIFO_STORE_SIZE, sizeof(u8), GFP_KERNEL);
184 	if (!buf) {
185 		return;
186 	}
187 	while (len > 0) {
188 		read_len = rng->read(rng, buf, len, wait);
189 
190 		if (read_len < 0 || (read_len == 0 && wait)) {
191 			dev_err(dev, "RNG Read FAILED received %d bytes\n",
192 				read_len);
193 			kfree(buf);
194 			return;
195 		}
196 
197 		print_hex_dump_debug("random bytes@: ",
198 			DUMP_PREFIX_ADDRESS, 16, 4,
199 			buf, read_len, 1);
200 
201 		len = len - read_len;
202 	}
203 
204 	kfree(buf);
205 }
206 
207 static inline void test_mode_once(struct hwrng *rng, bool wait)
208 {
209 	test_len(rng, 32, wait);
210 	test_len(rng, 64, wait);
211 	test_len(rng, 128, wait);
212 }
213 
214 static void self_test(struct hwrng *rng)
215 {
216 	pr_info("Executing RNG SELF-TEST with wait\n");
217 	test_mode_once(rng, true);
218 }
219 #endif
220 
221 static int caam_init(struct hwrng *rng)
222 {
223 	struct caam_rng_ctx *ctx = to_caam_rng_ctx(rng);
224 	int err;
225 
226 	ctx->desc_sync = devm_kzalloc(ctx->ctrldev, CAAM_RNG_DESC_LEN,
227 				      GFP_KERNEL);
228 	if (!ctx->desc_sync)
229 		return -ENOMEM;
230 
231 	ctx->desc_async = devm_kzalloc(ctx->ctrldev, CAAM_RNG_DESC_LEN,
232 				       GFP_KERNEL);
233 	if (!ctx->desc_async)
234 		return -ENOMEM;
235 
236 	if (kfifo_alloc(&ctx->fifo, ALIGN(CAAM_RNG_MAX_FIFO_STORE_SIZE,
237 					  dma_get_cache_alignment()),
238 			GFP_KERNEL))
239 		return -ENOMEM;
240 
241 	INIT_WORK(&ctx->worker, caam_rng_worker);
242 
243 	ctx->jrdev = caam_jr_alloc();
244 	err = PTR_ERR_OR_ZERO(ctx->jrdev);
245 	if (err) {
246 		kfifo_free(&ctx->fifo);
247 		pr_err("Job Ring Device allocation for transform failed\n");
248 		return err;
249 	}
250 
251 	/*
252 	 * Fill async buffer to have early randomness data for
253 	 * hw_random
254 	 */
255 	caam_rng_fill_async(ctx);
256 
257 	return 0;
258 }
259 
260 int caam_rng_init(struct device *ctrldev);
261 
262 void caam_rng_exit(struct device *ctrldev)
263 {
264 	devres_release_group(ctrldev, caam_rng_init);
265 }
266 
267 int caam_rng_init(struct device *ctrldev)
268 {
269 	struct caam_rng_ctx *ctx;
270 	u32 rng_inst;
271 	struct caam_drv_private *priv = dev_get_drvdata(ctrldev);
272 	int ret;
273 
274 	/* Check for an instantiated RNG before registration */
275 	if (priv->era < 10)
276 		rng_inst = (rd_reg32(&priv->jr[0]->perfmon.cha_num_ls) &
277 			    CHA_ID_LS_RNG_MASK) >> CHA_ID_LS_RNG_SHIFT;
278 	else
279 		rng_inst = rd_reg32(&priv->jr[0]->vreg.rng) & CHA_VER_NUM_MASK;
280 
281 	if (!rng_inst)
282 		return 0;
283 
284 	if (!devres_open_group(ctrldev, caam_rng_init, GFP_KERNEL))
285 		return -ENOMEM;
286 
287 	ctx = devm_kzalloc(ctrldev, sizeof(*ctx), GFP_KERNEL);
288 	if (!ctx)
289 		return -ENOMEM;
290 
291 	ctx->ctrldev = ctrldev;
292 
293 	ctx->rng.name    = "rng-caam";
294 	ctx->rng.init    = caam_init;
295 	ctx->rng.cleanup = caam_cleanup;
296 	ctx->rng.read    = caam_read;
297 	ctx->rng.priv    = (unsigned long)ctx;
298 
299 	dev_info(ctrldev, "registering rng-caam\n");
300 
301 	ret = devm_hwrng_register(ctrldev, &ctx->rng);
302 	if (ret) {
303 		caam_rng_exit(ctrldev);
304 		return ret;
305 	}
306 
307 #ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_TEST
308 	self_test(&ctx->rng);
309 #endif
310 
311 	devres_close_group(ctrldev, caam_rng_init);
312 	return 0;
313 }
314