xref: /linux/drivers/crypto/caam/ctrl.c (revision 313ea293e9c4d1eabcaddd2c0800f083b03c2a2e)
1 /*
2  * CAAM control-plane driver backend
3  * Controller-level driver, kernel property detection, initialization
4  *
5  * Copyright 2008-2012 Freescale Semiconductor, Inc.
6  */
7 
8 #include "compat.h"
9 #include "regs.h"
10 #include "intern.h"
11 #include "jr.h"
12 #include "desc_constr.h"
13 #include "error.h"
14 #include "ctrl.h"
15 
16 /*
17  * Descriptor to instantiate RNG State Handle 0 in normal mode and
18  * load the JDKEK, TDKEK and TDSK registers
19  */
20 static void build_instantiation_desc(u32 *desc, int handle, int do_sk)
21 {
22 	u32 *jump_cmd, op_flags;
23 
24 	init_job_desc(desc, 0);
25 
26 	op_flags = OP_TYPE_CLASS1_ALG | OP_ALG_ALGSEL_RNG |
27 			(handle << OP_ALG_AAI_SHIFT) | OP_ALG_AS_INIT;
28 
29 	/* INIT RNG in non-test mode */
30 	append_operation(desc, op_flags);
31 
32 	if (!handle && do_sk) {
33 		/*
34 		 * For SH0, Secure Keys must be generated as well
35 		 */
36 
37 		/* wait for done */
38 		jump_cmd = append_jump(desc, JUMP_CLASS_CLASS1);
39 		set_jump_tgt_here(desc, jump_cmd);
40 
41 		/*
42 		 * load 1 to clear written reg:
43 		 * resets the done interrrupt and returns the RNG to idle.
44 		 */
45 		append_load_imm_u32(desc, 1, LDST_SRCDST_WORD_CLRW);
46 
47 		/* Initialize State Handle  */
48 		append_operation(desc, OP_TYPE_CLASS1_ALG | OP_ALG_ALGSEL_RNG |
49 				 OP_ALG_AAI_RNG4_SK);
50 	}
51 
52 	append_jump(desc, JUMP_CLASS_CLASS1 | JUMP_TYPE_HALT);
53 }
54 
55 /* Descriptor for deinstantiation of State Handle 0 of the RNG block. */
56 static void build_deinstantiation_desc(u32 *desc, int handle)
57 {
58 	init_job_desc(desc, 0);
59 
60 	/* Uninstantiate State Handle 0 */
61 	append_operation(desc, OP_TYPE_CLASS1_ALG | OP_ALG_ALGSEL_RNG |
62 			 (handle << OP_ALG_AAI_SHIFT) | OP_ALG_AS_INITFINAL);
63 
64 	append_jump(desc, JUMP_CLASS_CLASS1 | JUMP_TYPE_HALT);
65 }
66 
67 /*
68  * run_descriptor_deco0 - runs a descriptor on DECO0, under direct control of
69  *			  the software (no JR/QI used).
70  * @ctrldev - pointer to device
71  * @status - descriptor status, after being run
72  *
73  * Return: - 0 if no error occurred
74  *	   - -ENODEV if the DECO couldn't be acquired
75  *	   - -EAGAIN if an error occurred while executing the descriptor
76  */
77 static inline int run_descriptor_deco0(struct device *ctrldev, u32 *desc,
78 					u32 *status)
79 {
80 	struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev);
81 	struct caam_full __iomem *topregs;
82 	unsigned int timeout = 100000;
83 	u32 deco_dbg_reg, flags;
84 	int i;
85 
86 	/* Set the bit to request direct access to DECO0 */
87 	topregs = (struct caam_full __iomem *)ctrlpriv->ctrl;
88 	setbits32(&topregs->ctrl.deco_rq, DECORR_RQD0ENABLE);
89 
90 	while (!(rd_reg32(&topregs->ctrl.deco_rq) & DECORR_DEN0) &&
91 								 --timeout)
92 		cpu_relax();
93 
94 	if (!timeout) {
95 		dev_err(ctrldev, "failed to acquire DECO 0\n");
96 		clrbits32(&topregs->ctrl.deco_rq, DECORR_RQD0ENABLE);
97 		return -ENODEV;
98 	}
99 
100 	for (i = 0; i < desc_len(desc); i++)
101 		wr_reg32(&topregs->deco.descbuf[i], *(desc + i));
102 
103 	flags = DECO_JQCR_WHL;
104 	/*
105 	 * If the descriptor length is longer than 4 words, then the
106 	 * FOUR bit in JRCTRL register must be set.
107 	 */
108 	if (desc_len(desc) >= 4)
109 		flags |= DECO_JQCR_FOUR;
110 
111 	/* Instruct the DECO to execute it */
112 	wr_reg32(&topregs->deco.jr_ctl_hi, flags);
113 
114 	timeout = 10000000;
115 	do {
116 		deco_dbg_reg = rd_reg32(&topregs->deco.desc_dbg);
117 		/*
118 		 * If an error occured in the descriptor, then
119 		 * the DECO status field will be set to 0x0D
120 		 */
121 		if ((deco_dbg_reg & DESC_DBG_DECO_STAT_MASK) ==
122 		    DESC_DBG_DECO_STAT_HOST_ERR)
123 			break;
124 		cpu_relax();
125 	} while ((deco_dbg_reg & DESC_DBG_DECO_STAT_VALID) && --timeout);
126 
127 	*status = rd_reg32(&topregs->deco.op_status_hi) &
128 		  DECO_OP_STATUS_HI_ERR_MASK;
129 
130 	/* Mark the DECO as free */
131 	clrbits32(&topregs->ctrl.deco_rq, DECORR_RQD0ENABLE);
132 
133 	if (!timeout)
134 		return -EAGAIN;
135 
136 	return 0;
137 }
138 
139 /*
140  * instantiate_rng - builds and executes a descriptor on DECO0,
141  *		     which initializes the RNG block.
142  * @ctrldev - pointer to device
143  * @state_handle_mask - bitmask containing the instantiation status
144  *			for the RNG4 state handles which exist in
145  *			the RNG4 block: 1 if it's been instantiated
146  *			by an external entry, 0 otherwise.
147  * @gen_sk  - generate data to be loaded into the JDKEK, TDKEK and TDSK;
148  *	      Caution: this can be done only once; if the keys need to be
149  *	      regenerated, a POR is required
150  *
151  * Return: - 0 if no error occurred
152  *	   - -ENOMEM if there isn't enough memory to allocate the descriptor
153  *	   - -ENODEV if DECO0 couldn't be acquired
154  *	   - -EAGAIN if an error occurred when executing the descriptor
155  *	      f.i. there was a RNG hardware error due to not "good enough"
156  *	      entropy being aquired.
157  */
158 static int instantiate_rng(struct device *ctrldev, int state_handle_mask,
159 			   int gen_sk)
160 {
161 	struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev);
162 	struct caam_full __iomem *topregs;
163 	struct rng4tst __iomem *r4tst;
164 	u32 *desc, status, rdsta_val;
165 	int ret = 0, sh_idx;
166 
167 	topregs = (struct caam_full __iomem *)ctrlpriv->ctrl;
168 	r4tst = &topregs->ctrl.r4tst[0];
169 
170 	desc = kmalloc(CAAM_CMD_SZ * 7, GFP_KERNEL);
171 	if (!desc)
172 		return -ENOMEM;
173 
174 	for (sh_idx = 0; sh_idx < RNG4_MAX_HANDLES; sh_idx++) {
175 		/*
176 		 * If the corresponding bit is set, this state handle
177 		 * was initialized by somebody else, so it's left alone.
178 		 */
179 		if ((1 << sh_idx) & state_handle_mask)
180 			continue;
181 
182 		/* Create the descriptor for instantiating RNG State Handle */
183 		build_instantiation_desc(desc, sh_idx, gen_sk);
184 
185 		/* Try to run it through DECO0 */
186 		ret = run_descriptor_deco0(ctrldev, desc, &status);
187 
188 		/*
189 		 * If ret is not 0, or descriptor status is not 0, then
190 		 * something went wrong. No need to try the next state
191 		 * handle (if available), bail out here.
192 		 * Also, if for some reason, the State Handle didn't get
193 		 * instantiated although the descriptor has finished
194 		 * without any error (HW optimizations for later
195 		 * CAAM eras), then try again.
196 		 */
197 		rdsta_val =
198 			rd_reg32(&topregs->ctrl.r4tst[0].rdsta) & RDSTA_IFMASK;
199 		if (status || !(rdsta_val & (1 << sh_idx)))
200 			ret = -EAGAIN;
201 		if (ret)
202 			break;
203 
204 		dev_info(ctrldev, "Instantiated RNG4 SH%d\n", sh_idx);
205 		/* Clear the contents before recreating the descriptor */
206 		memset(desc, 0x00, CAAM_CMD_SZ * 7);
207 	}
208 
209 	kfree(desc);
210 
211 	return ret;
212 }
213 
214 /*
215  * deinstantiate_rng - builds and executes a descriptor on DECO0,
216  *		       which deinitializes the RNG block.
217  * @ctrldev - pointer to device
218  * @state_handle_mask - bitmask containing the instantiation status
219  *			for the RNG4 state handles which exist in
220  *			the RNG4 block: 1 if it's been instantiated
221  *
222  * Return: - 0 if no error occurred
223  *	   - -ENOMEM if there isn't enough memory to allocate the descriptor
224  *	   - -ENODEV if DECO0 couldn't be acquired
225  *	   - -EAGAIN if an error occurred when executing the descriptor
226  */
227 static int deinstantiate_rng(struct device *ctrldev, int state_handle_mask)
228 {
229 	u32 *desc, status;
230 	int sh_idx, ret = 0;
231 
232 	desc = kmalloc(CAAM_CMD_SZ * 3, GFP_KERNEL);
233 	if (!desc)
234 		return -ENOMEM;
235 
236 	for (sh_idx = 0; sh_idx < RNG4_MAX_HANDLES; sh_idx++) {
237 		/*
238 		 * If the corresponding bit is set, then it means the state
239 		 * handle was initialized by us, and thus it needs to be
240 		 * deintialized as well
241 		 */
242 		if ((1 << sh_idx) & state_handle_mask) {
243 			/*
244 			 * Create the descriptor for deinstantating this state
245 			 * handle
246 			 */
247 			build_deinstantiation_desc(desc, sh_idx);
248 
249 			/* Try to run it through DECO0 */
250 			ret = run_descriptor_deco0(ctrldev, desc, &status);
251 
252 			if (ret || status) {
253 				dev_err(ctrldev,
254 					"Failed to deinstantiate RNG4 SH%d\n",
255 					sh_idx);
256 				break;
257 			}
258 			dev_info(ctrldev, "Deinstantiated RNG4 SH%d\n", sh_idx);
259 		}
260 	}
261 
262 	kfree(desc);
263 
264 	return ret;
265 }
266 
267 static int caam_remove(struct platform_device *pdev)
268 {
269 	struct device *ctrldev;
270 	struct caam_drv_private *ctrlpriv;
271 	struct caam_full __iomem *topregs;
272 	int ring, ret = 0;
273 
274 	ctrldev = &pdev->dev;
275 	ctrlpriv = dev_get_drvdata(ctrldev);
276 	topregs = (struct caam_full __iomem *)ctrlpriv->ctrl;
277 
278 	/* Remove platform devices for JobRs */
279 	for (ring = 0; ring < ctrlpriv->total_jobrs; ring++) {
280 		if (ctrlpriv->jrpdev[ring])
281 			of_device_unregister(ctrlpriv->jrpdev[ring]);
282 	}
283 
284 	/* De-initialize RNG state handles initialized by this driver. */
285 	if (ctrlpriv->rng4_sh_init)
286 		deinstantiate_rng(ctrldev, ctrlpriv->rng4_sh_init);
287 
288 	/* Shut down debug views */
289 #ifdef CONFIG_DEBUG_FS
290 	debugfs_remove_recursive(ctrlpriv->dfs_root);
291 #endif
292 
293 	/* Unmap controller region */
294 	iounmap(&topregs->ctrl);
295 
296 	kfree(ctrlpriv->jrpdev);
297 	kfree(ctrlpriv);
298 
299 	return ret;
300 }
301 
302 /*
303  * kick_trng - sets the various parameters for enabling the initialization
304  *	       of the RNG4 block in CAAM
305  * @pdev - pointer to the platform device
306  * @ent_delay - Defines the length (in system clocks) of each entropy sample.
307  */
308 static void kick_trng(struct platform_device *pdev, int ent_delay)
309 {
310 	struct device *ctrldev = &pdev->dev;
311 	struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev);
312 	struct caam_full __iomem *topregs;
313 	struct rng4tst __iomem *r4tst;
314 	u32 val;
315 
316 	topregs = (struct caam_full __iomem *)ctrlpriv->ctrl;
317 	r4tst = &topregs->ctrl.r4tst[0];
318 
319 	/* put RNG4 into program mode */
320 	setbits32(&r4tst->rtmctl, RTMCTL_PRGM);
321 
322 	/*
323 	 * Performance-wise, it does not make sense to
324 	 * set the delay to a value that is lower
325 	 * than the last one that worked (i.e. the state handles
326 	 * were instantiated properly. Thus, instead of wasting
327 	 * time trying to set the values controlling the sample
328 	 * frequency, the function simply returns.
329 	 */
330 	val = (rd_reg32(&r4tst->rtsdctl) & RTSDCTL_ENT_DLY_MASK)
331 	      >> RTSDCTL_ENT_DLY_SHIFT;
332 	if (ent_delay <= val) {
333 		/* put RNG4 into run mode */
334 		clrbits32(&r4tst->rtmctl, RTMCTL_PRGM);
335 		return;
336 	}
337 
338 	val = rd_reg32(&r4tst->rtsdctl);
339 	val = (val & ~RTSDCTL_ENT_DLY_MASK) |
340 	      (ent_delay << RTSDCTL_ENT_DLY_SHIFT);
341 	wr_reg32(&r4tst->rtsdctl, val);
342 	/* min. freq. count, equal to 1/4 of the entropy sample length */
343 	wr_reg32(&r4tst->rtfrqmin, ent_delay >> 2);
344 	/* max. freq. count, equal to 8 times the entropy sample length */
345 	wr_reg32(&r4tst->rtfrqmax, ent_delay << 3);
346 	/* put RNG4 into run mode */
347 	clrbits32(&r4tst->rtmctl, RTMCTL_PRGM);
348 }
349 
350 /**
351  * caam_get_era() - Return the ERA of the SEC on SoC, based
352  * on the SEC_VID register.
353  * Returns the ERA number (1..4) or -ENOTSUPP if the ERA is unknown.
354  * @caam_id - the value of the SEC_VID register
355  **/
356 int caam_get_era(u64 caam_id)
357 {
358 	struct sec_vid *sec_vid = (struct sec_vid *)&caam_id;
359 	static const struct {
360 		u16 ip_id;
361 		u8 maj_rev;
362 		u8 era;
363 	} caam_eras[] = {
364 		{0x0A10, 1, 1},
365 		{0x0A10, 2, 2},
366 		{0x0A12, 1, 3},
367 		{0x0A14, 1, 3},
368 		{0x0A14, 2, 4},
369 		{0x0A16, 1, 4},
370 		{0x0A11, 1, 4}
371 	};
372 	int i;
373 
374 	for (i = 0; i < ARRAY_SIZE(caam_eras); i++)
375 		if (caam_eras[i].ip_id == sec_vid->ip_id &&
376 			caam_eras[i].maj_rev == sec_vid->maj_rev)
377 				return caam_eras[i].era;
378 
379 	return -ENOTSUPP;
380 }
381 EXPORT_SYMBOL(caam_get_era);
382 
383 /* Probe routine for CAAM top (controller) level */
384 static int caam_probe(struct platform_device *pdev)
385 {
386 	int ret, ring, rspec, gen_sk, ent_delay = RTSDCTL_ENT_DLY_MIN;
387 	u64 caam_id;
388 	struct device *dev;
389 	struct device_node *nprop, *np;
390 	struct caam_ctrl __iomem *ctrl;
391 	struct caam_full __iomem *topregs;
392 	struct caam_drv_private *ctrlpriv;
393 #ifdef CONFIG_DEBUG_FS
394 	struct caam_perfmon *perfmon;
395 #endif
396 	u64 cha_vid;
397 
398 	ctrlpriv = kzalloc(sizeof(struct caam_drv_private), GFP_KERNEL);
399 	if (!ctrlpriv)
400 		return -ENOMEM;
401 
402 	dev = &pdev->dev;
403 	dev_set_drvdata(dev, ctrlpriv);
404 	ctrlpriv->pdev = pdev;
405 	nprop = pdev->dev.of_node;
406 
407 	/* Get configuration properties from device tree */
408 	/* First, get register page */
409 	ctrl = of_iomap(nprop, 0);
410 	if (ctrl == NULL) {
411 		dev_err(dev, "caam: of_iomap() failed\n");
412 		return -ENOMEM;
413 	}
414 	ctrlpriv->ctrl = (struct caam_ctrl __force *)ctrl;
415 
416 	/* topregs used to derive pointers to CAAM sub-blocks only */
417 	topregs = (struct caam_full __iomem *)ctrl;
418 
419 	/* Get the IRQ of the controller (for security violations only) */
420 	ctrlpriv->secvio_irq = of_irq_to_resource(nprop, 0, NULL);
421 
422 	/*
423 	 * Enable DECO watchdogs and, if this is a PHYS_ADDR_T_64BIT kernel,
424 	 * long pointers in master configuration register
425 	 */
426 	setbits32(&topregs->ctrl.mcr, MCFGR_WDENABLE |
427 		  (sizeof(dma_addr_t) == sizeof(u64) ? MCFGR_LONG_PTR : 0));
428 
429 	if (sizeof(dma_addr_t) == sizeof(u64))
430 		if (of_device_is_compatible(nprop, "fsl,sec-v5.0"))
431 			dma_set_mask(dev, DMA_BIT_MASK(40));
432 		else
433 			dma_set_mask(dev, DMA_BIT_MASK(36));
434 	else
435 		dma_set_mask(dev, DMA_BIT_MASK(32));
436 
437 	/*
438 	 * Detect and enable JobRs
439 	 * First, find out how many ring spec'ed, allocate references
440 	 * for all, then go probe each one.
441 	 */
442 	rspec = 0;
443 	for_each_compatible_node(np, NULL, "fsl,sec-v4.0-job-ring")
444 		rspec++;
445 	if (!rspec) {
446 		/* for backward compatible with device trees */
447 		for_each_compatible_node(np, NULL, "fsl,sec4.0-job-ring")
448 			rspec++;
449 	}
450 
451 	ctrlpriv->jrpdev = kzalloc(sizeof(struct platform_device *) * rspec,
452 								GFP_KERNEL);
453 	if (ctrlpriv->jrpdev == NULL) {
454 		iounmap(&topregs->ctrl);
455 		return -ENOMEM;
456 	}
457 
458 	ring = 0;
459 	ctrlpriv->total_jobrs = 0;
460 	for_each_compatible_node(np, NULL, "fsl,sec-v4.0-job-ring") {
461 		ctrlpriv->jrpdev[ring] =
462 				of_platform_device_create(np, NULL, dev);
463 		if (!ctrlpriv->jrpdev[ring]) {
464 			pr_warn("JR%d Platform device creation error\n", ring);
465 			continue;
466 		}
467 		ctrlpriv->total_jobrs++;
468 		ring++;
469 	}
470 	if (!ring) {
471 		for_each_compatible_node(np, NULL, "fsl,sec4.0-job-ring") {
472 			ctrlpriv->jrpdev[ring] =
473 				of_platform_device_create(np, NULL, dev);
474 			if (!ctrlpriv->jrpdev[ring]) {
475 				pr_warn("JR%d Platform device creation error\n",
476 					ring);
477 				continue;
478 			}
479 			ctrlpriv->total_jobrs++;
480 			ring++;
481 		}
482 	}
483 
484 	/* Check to see if QI present. If so, enable */
485 	ctrlpriv->qi_present = !!(rd_reg64(&topregs->ctrl.perfmon.comp_parms) &
486 				  CTPR_QI_MASK);
487 	if (ctrlpriv->qi_present) {
488 		ctrlpriv->qi = (struct caam_queue_if __force *)&topregs->qi;
489 		/* This is all that's required to physically enable QI */
490 		wr_reg32(&topregs->qi.qi_control_lo, QICTL_DQEN);
491 	}
492 
493 	/* If no QI and no rings specified, quit and go home */
494 	if ((!ctrlpriv->qi_present) && (!ctrlpriv->total_jobrs)) {
495 		dev_err(dev, "no queues configured, terminating\n");
496 		caam_remove(pdev);
497 		return -ENOMEM;
498 	}
499 
500 	cha_vid = rd_reg64(&topregs->ctrl.perfmon.cha_id);
501 
502 	/*
503 	 * If SEC has RNG version >= 4 and RNG state handle has not been
504 	 * already instantiated, do RNG instantiation
505 	 */
506 	if ((cha_vid & CHA_ID_RNG_MASK) >> CHA_ID_RNG_SHIFT >= 4) {
507 		ctrlpriv->rng4_sh_init =
508 			rd_reg32(&topregs->ctrl.r4tst[0].rdsta);
509 		/*
510 		 * If the secure keys (TDKEK, JDKEK, TDSK), were already
511 		 * generated, signal this to the function that is instantiating
512 		 * the state handles. An error would occur if RNG4 attempts
513 		 * to regenerate these keys before the next POR.
514 		 */
515 		gen_sk = ctrlpriv->rng4_sh_init & RDSTA_SKVN ? 0 : 1;
516 		ctrlpriv->rng4_sh_init &= RDSTA_IFMASK;
517 		do {
518 			int inst_handles =
519 				rd_reg32(&topregs->ctrl.r4tst[0].rdsta) &
520 								RDSTA_IFMASK;
521 			/*
522 			 * If either SH were instantiated by somebody else
523 			 * (e.g. u-boot) then it is assumed that the entropy
524 			 * parameters are properly set and thus the function
525 			 * setting these (kick_trng(...)) is skipped.
526 			 * Also, if a handle was instantiated, do not change
527 			 * the TRNG parameters.
528 			 */
529 			if (!(ctrlpriv->rng4_sh_init || inst_handles)) {
530 				kick_trng(pdev, ent_delay);
531 				ent_delay += 400;
532 			}
533 			/*
534 			 * if instantiate_rng(...) fails, the loop will rerun
535 			 * and the kick_trng(...) function will modfiy the
536 			 * upper and lower limits of the entropy sampling
537 			 * interval, leading to a sucessful initialization of
538 			 * the RNG.
539 			 */
540 			ret = instantiate_rng(dev, inst_handles,
541 					      gen_sk);
542 		} while ((ret == -EAGAIN) && (ent_delay < RTSDCTL_ENT_DLY_MAX));
543 		if (ret) {
544 			dev_err(dev, "failed to instantiate RNG");
545 			caam_remove(pdev);
546 			return ret;
547 		}
548 		/*
549 		 * Set handles init'ed by this module as the complement of the
550 		 * already initialized ones
551 		 */
552 		ctrlpriv->rng4_sh_init = ~ctrlpriv->rng4_sh_init & RDSTA_IFMASK;
553 
554 		/* Enable RDB bit so that RNG works faster */
555 		setbits32(&topregs->ctrl.scfgr, SCFGR_RDBENABLE);
556 	}
557 
558 	/* NOTE: RTIC detection ought to go here, around Si time */
559 
560 	caam_id = rd_reg64(&topregs->ctrl.perfmon.caam_id);
561 
562 	/* Report "alive" for developer to see */
563 	dev_info(dev, "device ID = 0x%016llx (Era %d)\n", caam_id,
564 		 caam_get_era(caam_id));
565 	dev_info(dev, "job rings = %d, qi = %d\n",
566 		 ctrlpriv->total_jobrs, ctrlpriv->qi_present);
567 
568 #ifdef CONFIG_DEBUG_FS
569 	/*
570 	 * FIXME: needs better naming distinction, as some amalgamation of
571 	 * "caam" and nprop->full_name. The OF name isn't distinctive,
572 	 * but does separate instances
573 	 */
574 	perfmon = (struct caam_perfmon __force *)&ctrl->perfmon;
575 
576 	ctrlpriv->dfs_root = debugfs_create_dir("caam", NULL);
577 	ctrlpriv->ctl = debugfs_create_dir("ctl", ctrlpriv->dfs_root);
578 
579 	/* Controller-level - performance monitor counters */
580 	ctrlpriv->ctl_rq_dequeued =
581 		debugfs_create_u64("rq_dequeued",
582 				   S_IRUSR | S_IRGRP | S_IROTH,
583 				   ctrlpriv->ctl, &perfmon->req_dequeued);
584 	ctrlpriv->ctl_ob_enc_req =
585 		debugfs_create_u64("ob_rq_encrypted",
586 				   S_IRUSR | S_IRGRP | S_IROTH,
587 				   ctrlpriv->ctl, &perfmon->ob_enc_req);
588 	ctrlpriv->ctl_ib_dec_req =
589 		debugfs_create_u64("ib_rq_decrypted",
590 				   S_IRUSR | S_IRGRP | S_IROTH,
591 				   ctrlpriv->ctl, &perfmon->ib_dec_req);
592 	ctrlpriv->ctl_ob_enc_bytes =
593 		debugfs_create_u64("ob_bytes_encrypted",
594 				   S_IRUSR | S_IRGRP | S_IROTH,
595 				   ctrlpriv->ctl, &perfmon->ob_enc_bytes);
596 	ctrlpriv->ctl_ob_prot_bytes =
597 		debugfs_create_u64("ob_bytes_protected",
598 				   S_IRUSR | S_IRGRP | S_IROTH,
599 				   ctrlpriv->ctl, &perfmon->ob_prot_bytes);
600 	ctrlpriv->ctl_ib_dec_bytes =
601 		debugfs_create_u64("ib_bytes_decrypted",
602 				   S_IRUSR | S_IRGRP | S_IROTH,
603 				   ctrlpriv->ctl, &perfmon->ib_dec_bytes);
604 	ctrlpriv->ctl_ib_valid_bytes =
605 		debugfs_create_u64("ib_bytes_validated",
606 				   S_IRUSR | S_IRGRP | S_IROTH,
607 				   ctrlpriv->ctl, &perfmon->ib_valid_bytes);
608 
609 	/* Controller level - global status values */
610 	ctrlpriv->ctl_faultaddr =
611 		debugfs_create_u64("fault_addr",
612 				   S_IRUSR | S_IRGRP | S_IROTH,
613 				   ctrlpriv->ctl, &perfmon->faultaddr);
614 	ctrlpriv->ctl_faultdetail =
615 		debugfs_create_u32("fault_detail",
616 				   S_IRUSR | S_IRGRP | S_IROTH,
617 				   ctrlpriv->ctl, &perfmon->faultdetail);
618 	ctrlpriv->ctl_faultstatus =
619 		debugfs_create_u32("fault_status",
620 				   S_IRUSR | S_IRGRP | S_IROTH,
621 				   ctrlpriv->ctl, &perfmon->status);
622 
623 	/* Internal covering keys (useful in non-secure mode only) */
624 	ctrlpriv->ctl_kek_wrap.data = &ctrlpriv->ctrl->kek[0];
625 	ctrlpriv->ctl_kek_wrap.size = KEK_KEY_SIZE * sizeof(u32);
626 	ctrlpriv->ctl_kek = debugfs_create_blob("kek",
627 						S_IRUSR |
628 						S_IRGRP | S_IROTH,
629 						ctrlpriv->ctl,
630 						&ctrlpriv->ctl_kek_wrap);
631 
632 	ctrlpriv->ctl_tkek_wrap.data = &ctrlpriv->ctrl->tkek[0];
633 	ctrlpriv->ctl_tkek_wrap.size = KEK_KEY_SIZE * sizeof(u32);
634 	ctrlpriv->ctl_tkek = debugfs_create_blob("tkek",
635 						 S_IRUSR |
636 						 S_IRGRP | S_IROTH,
637 						 ctrlpriv->ctl,
638 						 &ctrlpriv->ctl_tkek_wrap);
639 
640 	ctrlpriv->ctl_tdsk_wrap.data = &ctrlpriv->ctrl->tdsk[0];
641 	ctrlpriv->ctl_tdsk_wrap.size = KEK_KEY_SIZE * sizeof(u32);
642 	ctrlpriv->ctl_tdsk = debugfs_create_blob("tdsk",
643 						 S_IRUSR |
644 						 S_IRGRP | S_IROTH,
645 						 ctrlpriv->ctl,
646 						 &ctrlpriv->ctl_tdsk_wrap);
647 #endif
648 	return 0;
649 }
650 
651 static struct of_device_id caam_match[] = {
652 	{
653 		.compatible = "fsl,sec-v4.0",
654 	},
655 	{
656 		.compatible = "fsl,sec4.0",
657 	},
658 	{},
659 };
660 MODULE_DEVICE_TABLE(of, caam_match);
661 
662 static struct platform_driver caam_driver = {
663 	.driver = {
664 		.name = "caam",
665 		.owner = THIS_MODULE,
666 		.of_match_table = caam_match,
667 	},
668 	.probe       = caam_probe,
669 	.remove      = caam_remove,
670 };
671 
672 module_platform_driver(caam_driver);
673 
674 MODULE_LICENSE("GPL");
675 MODULE_DESCRIPTION("FSL CAAM request backend");
676 MODULE_AUTHOR("Freescale Semiconductor - NMG/STC");
677