xref: /linux/drivers/char/hw_random/cctrng.c (revision 56e0b6273ec8791ffe1c3cdc5d32fe5d001fd520)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2019-2020 ARM Limited or its affiliates. */
3 
4 #include <linux/kernel.h>
5 #include <linux/module.h>
6 #include <linux/clk.h>
7 #include <linux/hw_random.h>
8 #include <linux/io.h>
9 #include <linux/platform_device.h>
10 #include <linux/pm_runtime.h>
11 #include <linux/interrupt.h>
12 #include <linux/irqreturn.h>
13 #include <linux/workqueue.h>
14 #include <linux/circ_buf.h>
15 #include <linux/completion.h>
16 #include <linux/of.h>
17 #include <linux/bitfield.h>
18 
19 #include "cctrng.h"
20 
21 #define CC_REG_LOW(name)  (name ## _BIT_SHIFT)
22 #define CC_REG_HIGH(name) (CC_REG_LOW(name) + name ## _BIT_SIZE - 1)
23 #define CC_GENMASK(name)  GENMASK(CC_REG_HIGH(name), CC_REG_LOW(name))
24 
25 #define CC_REG_FLD_GET(reg_name, fld_name, reg_val)     \
26 	(FIELD_GET(CC_GENMASK(CC_ ## reg_name ## _ ## fld_name), reg_val))
27 
28 #define CC_HW_RESET_LOOP_COUNT 10
29 #define CC_TRNG_SUSPEND_TIMEOUT 3000
30 
31 /* data circular buffer in words must be:
32  *  - of a power-of-2 size (limitation of circ_buf.h macros)
33  *  - at least 6, the size generated in the EHR according to HW implementation
34  */
35 #define CCTRNG_DATA_BUF_WORDS 32
36 
37 /* The timeout for the TRNG operation should be calculated with the formula:
38  * Timeout = EHR_NUM * VN_COEFF * EHR_LENGTH * SAMPLE_CNT * SCALE_VALUE
39  * while:
40  *  - SAMPLE_CNT is input value from the characterisation process
41  *  - all the rest are constants
42  */
43 #define EHR_NUM 1
44 #define VN_COEFF 4
45 #define EHR_LENGTH CC_TRNG_EHR_IN_BITS
46 #define SCALE_VALUE 2
47 #define CCTRNG_TIMEOUT(smpl_cnt) \
48 	(EHR_NUM * VN_COEFF * EHR_LENGTH * smpl_cnt * SCALE_VALUE)
49 
50 struct cctrng_drvdata {
51 	struct platform_device *pdev;
52 	void __iomem *cc_base;
53 	struct clk *clk;
54 	struct hwrng rng;
55 	u32 active_rosc;
56 	/* Sampling interval for each ring oscillator:
57 	 * count of ring oscillator cycles between consecutive bits sampling.
58 	 * Value of 0 indicates non-valid rosc
59 	 */
60 	u32 smpl_ratio[CC_TRNG_NUM_OF_ROSCS];
61 
62 	u32 data_buf[CCTRNG_DATA_BUF_WORDS];
63 	struct circ_buf circ;
64 	struct work_struct compwork;
65 	struct work_struct startwork;
66 
67 	/* pending_hw - 1 when HW is pending, 0 when it is idle */
68 	atomic_t pending_hw;
69 
70 	/* protects against multiple concurrent consumers of data_buf */
71 	spinlock_t read_lock;
72 };
73 
74 
75 /* functions for write/read CC registers */
76 static inline void cc_iowrite(struct cctrng_drvdata *drvdata, u32 reg, u32 val)
77 {
78 	iowrite32(val, (drvdata->cc_base + reg));
79 }
80 static inline u32 cc_ioread(struct cctrng_drvdata *drvdata, u32 reg)
81 {
82 	return ioread32(drvdata->cc_base + reg);
83 }
84 
85 
86 static int cc_trng_pm_get(struct device *dev)
87 {
88 	int rc = 0;
89 
90 	rc = pm_runtime_get_sync(dev);
91 
92 	/* pm_runtime_get_sync() can return 1 as a valid return code */
93 	return (rc == 1 ? 0 : rc);
94 }
95 
96 static void cc_trng_pm_put_suspend(struct device *dev)
97 {
98 	int rc = 0;
99 
100 	pm_runtime_mark_last_busy(dev);
101 	rc = pm_runtime_put_autosuspend(dev);
102 	if (rc)
103 		dev_err(dev, "pm_runtime_put_autosuspend returned %x\n", rc);
104 }
105 
106 static int cc_trng_pm_init(struct cctrng_drvdata *drvdata)
107 {
108 	struct device *dev = &(drvdata->pdev->dev);
109 
110 	/* must be before the enabling to avoid redundant suspending */
111 	pm_runtime_set_autosuspend_delay(dev, CC_TRNG_SUSPEND_TIMEOUT);
112 	pm_runtime_use_autosuspend(dev);
113 	/* set us as active - note we won't do PM ops until cc_trng_pm_go()! */
114 	return pm_runtime_set_active(dev);
115 }
116 
117 static void cc_trng_pm_go(struct cctrng_drvdata *drvdata)
118 {
119 	struct device *dev = &(drvdata->pdev->dev);
120 
121 	/* enable the PM module*/
122 	pm_runtime_enable(dev);
123 }
124 
125 static void cc_trng_pm_fini(struct cctrng_drvdata *drvdata)
126 {
127 	struct device *dev = &(drvdata->pdev->dev);
128 
129 	pm_runtime_disable(dev);
130 }
131 
132 
133 static inline int cc_trng_parse_sampling_ratio(struct cctrng_drvdata *drvdata)
134 {
135 	struct device *dev = &(drvdata->pdev->dev);
136 	struct device_node *np = drvdata->pdev->dev.of_node;
137 	int rc;
138 	int i;
139 	/* ret will be set to 0 if at least one rosc has (sampling ratio > 0) */
140 	int ret = -EINVAL;
141 
142 	rc = of_property_read_u32_array(np, "arm,rosc-ratio",
143 					drvdata->smpl_ratio,
144 					CC_TRNG_NUM_OF_ROSCS);
145 	if (rc) {
146 		/* arm,rosc-ratio was not found in device tree */
147 		return rc;
148 	}
149 
150 	/* verify that at least one rosc has (sampling ratio > 0) */
151 	for (i = 0; i < CC_TRNG_NUM_OF_ROSCS; ++i) {
152 		dev_dbg(dev, "rosc %d sampling ratio %u",
153 			i, drvdata->smpl_ratio[i]);
154 
155 		if (drvdata->smpl_ratio[i] > 0)
156 			ret = 0;
157 	}
158 
159 	return ret;
160 }
161 
162 static int cc_trng_change_rosc(struct cctrng_drvdata *drvdata)
163 {
164 	struct device *dev = &(drvdata->pdev->dev);
165 
166 	dev_dbg(dev, "cctrng change rosc (was %d)\n", drvdata->active_rosc);
167 	drvdata->active_rosc += 1;
168 
169 	while (drvdata->active_rosc < CC_TRNG_NUM_OF_ROSCS) {
170 		if (drvdata->smpl_ratio[drvdata->active_rosc] > 0)
171 			return 0;
172 
173 		drvdata->active_rosc += 1;
174 	}
175 	return -EINVAL;
176 }
177 
178 
179 static void cc_trng_enable_rnd_source(struct cctrng_drvdata *drvdata)
180 {
181 	u32 max_cycles;
182 
183 	/* Set watchdog threshold to maximal allowed time (in CPU cycles) */
184 	max_cycles = CCTRNG_TIMEOUT(drvdata->smpl_ratio[drvdata->active_rosc]);
185 	cc_iowrite(drvdata, CC_RNG_WATCHDOG_VAL_REG_OFFSET, max_cycles);
186 
187 	/* enable the RND source */
188 	cc_iowrite(drvdata, CC_RND_SOURCE_ENABLE_REG_OFFSET, 0x1);
189 
190 	/* unmask RNG interrupts */
191 	cc_iowrite(drvdata, CC_RNG_IMR_REG_OFFSET, (u32)~CC_RNG_INT_MASK);
192 }
193 
194 
195 /* increase circular data buffer index (head/tail) */
196 static inline void circ_idx_inc(int *idx, int bytes)
197 {
198 	*idx += (bytes + 3) >> 2;
199 	*idx &= (CCTRNG_DATA_BUF_WORDS - 1);
200 }
201 
202 static inline size_t circ_buf_space(struct cctrng_drvdata *drvdata)
203 {
204 	return CIRC_SPACE(drvdata->circ.head,
205 			  drvdata->circ.tail, CCTRNG_DATA_BUF_WORDS);
206 
207 }
208 
209 static int cctrng_read(struct hwrng *rng, void *data, size_t max, bool wait)
210 {
211 	/* current implementation ignores "wait" */
212 
213 	struct cctrng_drvdata *drvdata = (struct cctrng_drvdata *)rng->priv;
214 	struct device *dev = &(drvdata->pdev->dev);
215 	u32 *buf = (u32 *)drvdata->circ.buf;
216 	size_t copied = 0;
217 	size_t cnt_w;
218 	size_t size;
219 	size_t left;
220 
221 	if (!spin_trylock(&drvdata->read_lock)) {
222 		/* concurrent consumers from data_buf cannot be served */
223 		dev_dbg_ratelimited(dev, "unable to hold lock\n");
224 		return 0;
225 	}
226 
227 	/* copy till end of data buffer (without wrap back) */
228 	cnt_w = CIRC_CNT_TO_END(drvdata->circ.head,
229 				drvdata->circ.tail, CCTRNG_DATA_BUF_WORDS);
230 	size = min((cnt_w<<2), max);
231 	memcpy(data, &(buf[drvdata->circ.tail]), size);
232 	copied = size;
233 	circ_idx_inc(&drvdata->circ.tail, size);
234 	/* copy rest of data in data buffer */
235 	left = max - copied;
236 	if (left > 0) {
237 		cnt_w = CIRC_CNT(drvdata->circ.head,
238 				 drvdata->circ.tail, CCTRNG_DATA_BUF_WORDS);
239 		size = min((cnt_w<<2), left);
240 		memcpy(data, &(buf[drvdata->circ.tail]), size);
241 		copied += size;
242 		circ_idx_inc(&drvdata->circ.tail, size);
243 	}
244 
245 	spin_unlock(&drvdata->read_lock);
246 
247 	if (circ_buf_space(drvdata) >= CC_TRNG_EHR_IN_WORDS) {
248 		if (atomic_cmpxchg(&drvdata->pending_hw, 0, 1) == 0) {
249 			/* re-check space in buffer to avoid potential race */
250 			if (circ_buf_space(drvdata) >= CC_TRNG_EHR_IN_WORDS) {
251 				/* increment device's usage counter */
252 				int rc = cc_trng_pm_get(dev);
253 
254 				if (rc) {
255 					dev_err(dev,
256 						"cc_trng_pm_get returned %x\n",
257 						rc);
258 					return rc;
259 				}
260 
261 				/* schedule execution of deferred work handler
262 				 * for filling of data buffer
263 				 */
264 				schedule_work(&drvdata->startwork);
265 			} else {
266 				atomic_set(&drvdata->pending_hw, 0);
267 			}
268 		}
269 	}
270 
271 	return copied;
272 }
273 
274 static void cc_trng_hw_trigger(struct cctrng_drvdata *drvdata)
275 {
276 	u32 tmp_smpl_cnt = 0;
277 	struct device *dev = &(drvdata->pdev->dev);
278 
279 	dev_dbg(dev, "cctrng hw trigger.\n");
280 
281 	/* enable the HW RND clock */
282 	cc_iowrite(drvdata, CC_RNG_CLK_ENABLE_REG_OFFSET, 0x1);
283 
284 	/* do software reset */
285 	cc_iowrite(drvdata, CC_RNG_SW_RESET_REG_OFFSET, 0x1);
286 	/* in order to verify that the reset has completed,
287 	 * the sample count need to be verified
288 	 */
289 	do {
290 		/* enable the HW RND clock   */
291 		cc_iowrite(drvdata, CC_RNG_CLK_ENABLE_REG_OFFSET, 0x1);
292 
293 		/* set sampling ratio (rng_clocks) between consecutive bits */
294 		cc_iowrite(drvdata, CC_SAMPLE_CNT1_REG_OFFSET,
295 			   drvdata->smpl_ratio[drvdata->active_rosc]);
296 
297 		/* read the sampling ratio  */
298 		tmp_smpl_cnt = cc_ioread(drvdata, CC_SAMPLE_CNT1_REG_OFFSET);
299 
300 	} while (tmp_smpl_cnt != drvdata->smpl_ratio[drvdata->active_rosc]);
301 
302 	/* disable the RND source for setting new parameters in HW */
303 	cc_iowrite(drvdata, CC_RND_SOURCE_ENABLE_REG_OFFSET, 0);
304 
305 	cc_iowrite(drvdata, CC_RNG_ICR_REG_OFFSET, 0xFFFFFFFF);
306 
307 	cc_iowrite(drvdata, CC_TRNG_CONFIG_REG_OFFSET, drvdata->active_rosc);
308 
309 	/* Debug Control register: set to 0 - no bypasses */
310 	cc_iowrite(drvdata, CC_TRNG_DEBUG_CONTROL_REG_OFFSET, 0);
311 
312 	cc_trng_enable_rnd_source(drvdata);
313 }
314 
315 void cc_trng_compwork_handler(struct work_struct *w)
316 {
317 	u32 isr = 0;
318 	u32 ehr_valid = 0;
319 	struct cctrng_drvdata *drvdata =
320 			container_of(w, struct cctrng_drvdata, compwork);
321 	struct device *dev = &(drvdata->pdev->dev);
322 	int i;
323 
324 	/* stop DMA and the RNG source */
325 	cc_iowrite(drvdata, CC_RNG_DMA_ENABLE_REG_OFFSET, 0);
326 	cc_iowrite(drvdata, CC_RND_SOURCE_ENABLE_REG_OFFSET, 0);
327 
328 	/* read RNG_ISR and check for errors */
329 	isr = cc_ioread(drvdata, CC_RNG_ISR_REG_OFFSET);
330 	ehr_valid = CC_REG_FLD_GET(RNG_ISR, EHR_VALID, isr);
331 	dev_dbg(dev, "Got RNG_ISR=0x%08X (EHR_VALID=%u)\n", isr, ehr_valid);
332 
333 #ifdef CONFIG_CRYPTO_FIPS
334 	if (CC_REG_FLD_GET(RNG_ISR, CRNGT_ERR, isr) && fips_enabled) {
335 		fips_fail_notify();
336 		/* FIPS error is fatal */
337 		panic("Got HW CRNGT error while fips is enabled!\n");
338 	}
339 #endif
340 
341 	/* Clear all pending RNG interrupts */
342 	cc_iowrite(drvdata, CC_RNG_ICR_REG_OFFSET, isr);
343 
344 
345 	if (!ehr_valid) {
346 		/* in case of AUTOCORR/TIMEOUT error, try the next ROSC */
347 		if (CC_REG_FLD_GET(RNG_ISR, AUTOCORR_ERR, isr) ||
348 				CC_REG_FLD_GET(RNG_ISR, WATCHDOG, isr)) {
349 			dev_dbg(dev, "cctrng autocorr/timeout error.\n");
350 			goto next_rosc;
351 		}
352 
353 		/* in case of VN error, ignore it */
354 	}
355 
356 	/* read EHR data from registers */
357 	for (i = 0; i < CC_TRNG_EHR_IN_WORDS; i++) {
358 		/* calc word ptr in data_buf */
359 		u32 *buf = (u32 *)drvdata->circ.buf;
360 
361 		buf[drvdata->circ.head] = cc_ioread(drvdata,
362 				CC_EHR_DATA_0_REG_OFFSET + (i*sizeof(u32)));
363 
364 		/* EHR_DATA registers are cleared on read. In case 0 value was
365 		 * returned, restart the entropy collection.
366 		 */
367 		if (buf[drvdata->circ.head] == 0) {
368 			dev_dbg(dev, "Got 0 value in EHR. active_rosc %u\n",
369 				drvdata->active_rosc);
370 			goto next_rosc;
371 		}
372 
373 		circ_idx_inc(&drvdata->circ.head, 1<<2);
374 	}
375 
376 	atomic_set(&drvdata->pending_hw, 0);
377 
378 	/* continue to fill data buffer if needed */
379 	if (circ_buf_space(drvdata) >= CC_TRNG_EHR_IN_WORDS) {
380 		if (atomic_cmpxchg(&drvdata->pending_hw, 0, 1) == 0) {
381 			/* Re-enable rnd source */
382 			cc_trng_enable_rnd_source(drvdata);
383 			return;
384 		}
385 	}
386 
387 	cc_trng_pm_put_suspend(dev);
388 
389 	dev_dbg(dev, "compwork handler done\n");
390 	return;
391 
392 next_rosc:
393 	if ((circ_buf_space(drvdata) >= CC_TRNG_EHR_IN_WORDS) &&
394 			(cc_trng_change_rosc(drvdata) == 0)) {
395 		/* trigger trng hw with next rosc */
396 		cc_trng_hw_trigger(drvdata);
397 	} else {
398 		atomic_set(&drvdata->pending_hw, 0);
399 		cc_trng_pm_put_suspend(dev);
400 	}
401 }
402 
403 static irqreturn_t cc_isr(int irq, void *dev_id)
404 {
405 	struct cctrng_drvdata *drvdata = (struct cctrng_drvdata *)dev_id;
406 	struct device *dev = &(drvdata->pdev->dev);
407 	u32 irr;
408 
409 	/* if driver suspended return, probably shared interrupt */
410 	if (pm_runtime_suspended(dev))
411 		return IRQ_NONE;
412 
413 	/* read the interrupt status */
414 	irr = cc_ioread(drvdata, CC_HOST_RGF_IRR_REG_OFFSET);
415 	dev_dbg(dev, "Got IRR=0x%08X\n", irr);
416 
417 	if (irr == 0) /* Probably shared interrupt line */
418 		return IRQ_NONE;
419 
420 	/* clear interrupt - must be before processing events */
421 	cc_iowrite(drvdata, CC_HOST_RGF_ICR_REG_OFFSET, irr);
422 
423 	/* RNG interrupt - most probable */
424 	if (irr & CC_HOST_RNG_IRQ_MASK) {
425 		/* Mask RNG interrupts - will be unmasked in deferred work */
426 		cc_iowrite(drvdata, CC_RNG_IMR_REG_OFFSET, 0xFFFFFFFF);
427 
428 		/* We clear RNG interrupt here,
429 		 * to avoid it from firing as we'll unmask RNG interrupts.
430 		 */
431 		cc_iowrite(drvdata, CC_HOST_RGF_ICR_REG_OFFSET,
432 			   CC_HOST_RNG_IRQ_MASK);
433 
434 		irr &= ~CC_HOST_RNG_IRQ_MASK;
435 
436 		/* schedule execution of deferred work handler */
437 		schedule_work(&drvdata->compwork);
438 	}
439 
440 	if (irr) {
441 		dev_dbg_ratelimited(dev,
442 				"IRR includes unknown cause bits (0x%08X)\n",
443 				irr);
444 		/* Just warning */
445 	}
446 
447 	return IRQ_HANDLED;
448 }
449 
450 void cc_trng_startwork_handler(struct work_struct *w)
451 {
452 	struct cctrng_drvdata *drvdata =
453 			container_of(w, struct cctrng_drvdata, startwork);
454 
455 	drvdata->active_rosc = 0;
456 	cc_trng_hw_trigger(drvdata);
457 }
458 
459 
460 static int cc_trng_clk_init(struct cctrng_drvdata *drvdata)
461 {
462 	struct clk *clk;
463 	struct device *dev = &(drvdata->pdev->dev);
464 	int rc = 0;
465 
466 	clk = devm_clk_get_optional(dev, NULL);
467 	if (IS_ERR(clk)) {
468 		if (PTR_ERR(clk) != -EPROBE_DEFER)
469 			dev_err(dev, "Error getting clock: %pe\n", clk);
470 		return PTR_ERR(clk);
471 	}
472 	drvdata->clk = clk;
473 
474 	rc = clk_prepare_enable(drvdata->clk);
475 	if (rc) {
476 		dev_err(dev, "Failed to enable clock\n");
477 		return rc;
478 	}
479 
480 	return 0;
481 }
482 
483 static void cc_trng_clk_fini(struct cctrng_drvdata *drvdata)
484 {
485 	clk_disable_unprepare(drvdata->clk);
486 }
487 
488 
489 static int cctrng_probe(struct platform_device *pdev)
490 {
491 	struct resource *req_mem_cc_regs = NULL;
492 	struct cctrng_drvdata *drvdata;
493 	struct device *dev = &pdev->dev;
494 	int rc = 0;
495 	u32 val;
496 	int irq;
497 
498 	drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
499 	if (!drvdata)
500 		return -ENOMEM;
501 
502 	drvdata->rng.name = devm_kstrdup(dev, dev_name(dev), GFP_KERNEL);
503 	if (!drvdata->rng.name)
504 		return -ENOMEM;
505 
506 	drvdata->rng.read = cctrng_read;
507 	drvdata->rng.priv = (unsigned long)drvdata;
508 	drvdata->rng.quality = CC_TRNG_QUALITY;
509 
510 	platform_set_drvdata(pdev, drvdata);
511 	drvdata->pdev = pdev;
512 
513 	drvdata->circ.buf = (char *)drvdata->data_buf;
514 
515 	/* Get device resources */
516 	/* First CC registers space */
517 	req_mem_cc_regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
518 	/* Map registers space */
519 	drvdata->cc_base = devm_ioremap_resource(dev, req_mem_cc_regs);
520 	if (IS_ERR(drvdata->cc_base)) {
521 		dev_err(dev, "Failed to ioremap registers");
522 		return PTR_ERR(drvdata->cc_base);
523 	}
524 
525 	dev_dbg(dev, "Got MEM resource (%s): %pR\n", req_mem_cc_regs->name,
526 		req_mem_cc_regs);
527 	dev_dbg(dev, "CC registers mapped from %pa to 0x%p\n",
528 		&req_mem_cc_regs->start, drvdata->cc_base);
529 
530 	/* Then IRQ */
531 	irq = platform_get_irq(pdev, 0);
532 	if (irq < 0) {
533 		dev_err(dev, "Failed getting IRQ resource\n");
534 		return irq;
535 	}
536 
537 	/* parse sampling rate from device tree */
538 	rc = cc_trng_parse_sampling_ratio(drvdata);
539 	if (rc) {
540 		dev_err(dev, "Failed to get legal sampling ratio for rosc\n");
541 		return rc;
542 	}
543 
544 	rc = cc_trng_clk_init(drvdata);
545 	if (rc) {
546 		dev_err(dev, "cc_trng_clk_init failed\n");
547 		return rc;
548 	}
549 
550 	INIT_WORK(&drvdata->compwork, cc_trng_compwork_handler);
551 	INIT_WORK(&drvdata->startwork, cc_trng_startwork_handler);
552 	spin_lock_init(&drvdata->read_lock);
553 
554 	/* register the driver isr function */
555 	rc = devm_request_irq(dev, irq, cc_isr, IRQF_SHARED, "cctrng", drvdata);
556 	if (rc) {
557 		dev_err(dev, "Could not register to interrupt %d\n", irq);
558 		goto post_clk_err;
559 	}
560 	dev_dbg(dev, "Registered to IRQ: %d\n", irq);
561 
562 	/* Clear all pending interrupts */
563 	val = cc_ioread(drvdata, CC_HOST_RGF_IRR_REG_OFFSET);
564 	dev_dbg(dev, "IRR=0x%08X\n", val);
565 	cc_iowrite(drvdata, CC_HOST_RGF_ICR_REG_OFFSET, val);
566 
567 	/* unmask HOST RNG interrupt */
568 	cc_iowrite(drvdata, CC_HOST_RGF_IMR_REG_OFFSET,
569 		   cc_ioread(drvdata, CC_HOST_RGF_IMR_REG_OFFSET) &
570 		   ~CC_HOST_RNG_IRQ_MASK);
571 
572 	/* init PM */
573 	rc = cc_trng_pm_init(drvdata);
574 	if (rc) {
575 		dev_err(dev, "cc_trng_pm_init failed\n");
576 		goto post_clk_err;
577 	}
578 
579 	/* increment device's usage counter */
580 	rc = cc_trng_pm_get(dev);
581 	if (rc) {
582 		dev_err(dev, "cc_trng_pm_get returned %x\n", rc);
583 		goto post_pm_err;
584 	}
585 
586 	/* set pending_hw to verify that HW won't be triggered from read */
587 	atomic_set(&drvdata->pending_hw, 1);
588 
589 	/* registration of the hwrng device */
590 	rc = hwrng_register(&drvdata->rng);
591 	if (rc) {
592 		dev_err(dev, "Could not register hwrng device.\n");
593 		goto post_pm_err;
594 	}
595 
596 	/* trigger HW to start generate data */
597 	drvdata->active_rosc = 0;
598 	cc_trng_hw_trigger(drvdata);
599 
600 	/* All set, we can allow auto-suspend */
601 	cc_trng_pm_go(drvdata);
602 
603 	dev_info(dev, "ARM cctrng device initialized\n");
604 
605 	return 0;
606 
607 post_pm_err:
608 	cc_trng_pm_fini(drvdata);
609 
610 post_clk_err:
611 	cc_trng_clk_fini(drvdata);
612 
613 	return rc;
614 }
615 
616 static int cctrng_remove(struct platform_device *pdev)
617 {
618 	struct cctrng_drvdata *drvdata = platform_get_drvdata(pdev);
619 	struct device *dev = &pdev->dev;
620 
621 	dev_dbg(dev, "Releasing cctrng resources...\n");
622 
623 	hwrng_unregister(&drvdata->rng);
624 
625 	cc_trng_pm_fini(drvdata);
626 
627 	cc_trng_clk_fini(drvdata);
628 
629 	dev_info(dev, "ARM cctrng device terminated\n");
630 
631 	return 0;
632 }
633 
634 static int __maybe_unused cctrng_suspend(struct device *dev)
635 {
636 	struct cctrng_drvdata *drvdata = dev_get_drvdata(dev);
637 
638 	dev_dbg(dev, "set HOST_POWER_DOWN_EN\n");
639 	cc_iowrite(drvdata, CC_HOST_POWER_DOWN_EN_REG_OFFSET,
640 			POWER_DOWN_ENABLE);
641 
642 	clk_disable_unprepare(drvdata->clk);
643 
644 	return 0;
645 }
646 
647 static bool cctrng_wait_for_reset_completion(struct cctrng_drvdata *drvdata)
648 {
649 	unsigned int val;
650 	unsigned int i;
651 
652 	for (i = 0; i < CC_HW_RESET_LOOP_COUNT; i++) {
653 		/* in cc7x3 NVM_IS_IDLE indicates that CC reset is
654 		 *  completed and device is fully functional
655 		 */
656 		val = cc_ioread(drvdata, CC_NVM_IS_IDLE_REG_OFFSET);
657 		if (val & BIT(CC_NVM_IS_IDLE_VALUE_BIT_SHIFT)) {
658 			/* hw indicate reset completed */
659 			return true;
660 		}
661 		/* allow scheduling other process on the processor */
662 		schedule();
663 	}
664 	/* reset not completed */
665 	return false;
666 }
667 
668 static int __maybe_unused cctrng_resume(struct device *dev)
669 {
670 	struct cctrng_drvdata *drvdata = dev_get_drvdata(dev);
671 	int rc;
672 
673 	dev_dbg(dev, "unset HOST_POWER_DOWN_EN\n");
674 	/* Enables the device source clk */
675 	rc = clk_prepare_enable(drvdata->clk);
676 	if (rc) {
677 		dev_err(dev, "failed getting clock back on. We're toast.\n");
678 		return rc;
679 	}
680 
681 	/* wait for Cryptocell reset completion */
682 	if (!cctrng_wait_for_reset_completion(drvdata)) {
683 		dev_err(dev, "Cryptocell reset not completed");
684 		return -EBUSY;
685 	}
686 
687 	/* unmask HOST RNG interrupt */
688 	cc_iowrite(drvdata, CC_HOST_RGF_IMR_REG_OFFSET,
689 		   cc_ioread(drvdata, CC_HOST_RGF_IMR_REG_OFFSET) &
690 		   ~CC_HOST_RNG_IRQ_MASK);
691 
692 	cc_iowrite(drvdata, CC_HOST_POWER_DOWN_EN_REG_OFFSET,
693 		   POWER_DOWN_DISABLE);
694 
695 	return 0;
696 }
697 
698 static UNIVERSAL_DEV_PM_OPS(cctrng_pm, cctrng_suspend, cctrng_resume, NULL);
699 
700 static const struct of_device_id arm_cctrng_dt_match[] = {
701 	{ .compatible = "arm,cryptocell-713-trng", },
702 	{ .compatible = "arm,cryptocell-703-trng", },
703 	{},
704 };
705 MODULE_DEVICE_TABLE(of, arm_cctrng_dt_match);
706 
707 static struct platform_driver cctrng_driver = {
708 	.driver = {
709 		.name = "cctrng",
710 		.of_match_table = arm_cctrng_dt_match,
711 		.pm = &cctrng_pm,
712 	},
713 	.probe = cctrng_probe,
714 	.remove = cctrng_remove,
715 };
716 
717 static int __init cctrng_mod_init(void)
718 {
719 	/* Compile time assertion checks */
720 	BUILD_BUG_ON(CCTRNG_DATA_BUF_WORDS < 6);
721 	BUILD_BUG_ON((CCTRNG_DATA_BUF_WORDS & (CCTRNG_DATA_BUF_WORDS-1)) != 0);
722 
723 	return platform_driver_register(&cctrng_driver);
724 }
725 module_init(cctrng_mod_init);
726 
727 static void __exit cctrng_mod_exit(void)
728 {
729 	platform_driver_unregister(&cctrng_driver);
730 }
731 module_exit(cctrng_mod_exit);
732 
733 /* Module description */
734 MODULE_DESCRIPTION("ARM CryptoCell TRNG Driver");
735 MODULE_AUTHOR("ARM");
736 MODULE_LICENSE("GPL v2");
737