xref: /linux/drivers/crypto/inside-secure/safexcel.c (revision 0866ba23b7efcc6837d6b4231bf91b79647b81ea)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2017 Marvell
4  *
5  * Antoine Tenart <antoine.tenart@free-electrons.com>
6  */
7 
8 #include <linux/clk.h>
9 #include <linux/device.h>
10 #include <linux/dma-mapping.h>
11 #include <linux/dmapool.h>
12 #include <linux/firmware.h>
13 #include <linux/interrupt.h>
14 #include <linux/module.h>
15 #include <linux/of_platform.h>
16 #include <linux/of_irq.h>
17 #include <linux/platform_device.h>
18 #include <linux/workqueue.h>
19 
20 #include <crypto/internal/aead.h>
21 #include <crypto/internal/hash.h>
22 #include <crypto/internal/skcipher.h>
23 
24 #include "safexcel.h"
25 
26 static u32 max_rings = EIP197_MAX_RINGS;
27 module_param(max_rings, uint, 0644);
28 MODULE_PARM_DESC(max_rings, "Maximum number of rings to use.");
29 
30 static void eip197_trc_cache_init(struct safexcel_crypto_priv *priv)
31 {
32 	u32 val, htable_offset;
33 	int i, cs_rc_max, cs_ht_wc, cs_trc_rec_wc, cs_trc_lg_rec_wc;
34 
35 	if (priv->version == EIP197B) {
36 		cs_rc_max = EIP197B_CS_RC_MAX;
37 		cs_ht_wc = EIP197B_CS_HT_WC;
38 		cs_trc_rec_wc = EIP197B_CS_TRC_REC_WC;
39 		cs_trc_lg_rec_wc = EIP197B_CS_TRC_LG_REC_WC;
40 	} else {
41 		cs_rc_max = EIP197D_CS_RC_MAX;
42 		cs_ht_wc = EIP197D_CS_HT_WC;
43 		cs_trc_rec_wc = EIP197D_CS_TRC_REC_WC;
44 		cs_trc_lg_rec_wc = EIP197D_CS_TRC_LG_REC_WC;
45 	}
46 
47 	/* Enable the record cache memory access */
48 	val = readl(priv->base + EIP197_CS_RAM_CTRL);
49 	val &= ~EIP197_TRC_ENABLE_MASK;
50 	val |= EIP197_TRC_ENABLE_0;
51 	writel(val, priv->base + EIP197_CS_RAM_CTRL);
52 
53 	/* Clear all ECC errors */
54 	writel(0, priv->base + EIP197_TRC_ECCCTRL);
55 
56 	/*
57 	 * Make sure the cache memory is accessible by taking record cache into
58 	 * reset.
59 	 */
60 	val = readl(priv->base + EIP197_TRC_PARAMS);
61 	val |= EIP197_TRC_PARAMS_SW_RESET;
62 	val &= ~EIP197_TRC_PARAMS_DATA_ACCESS;
63 	writel(val, priv->base + EIP197_TRC_PARAMS);
64 
65 	/* Clear all records */
66 	for (i = 0; i < cs_rc_max; i++) {
67 		u32 val, offset = EIP197_CLASSIFICATION_RAMS + i * EIP197_CS_RC_SIZE;
68 
69 		writel(EIP197_CS_RC_NEXT(EIP197_RC_NULL) |
70 		       EIP197_CS_RC_PREV(EIP197_RC_NULL),
71 		       priv->base + offset);
72 
73 		val = EIP197_CS_RC_NEXT(i+1) | EIP197_CS_RC_PREV(i-1);
74 		if (i == 0)
75 			val |= EIP197_CS_RC_PREV(EIP197_RC_NULL);
76 		else if (i == cs_rc_max - 1)
77 			val |= EIP197_CS_RC_NEXT(EIP197_RC_NULL);
78 		writel(val, priv->base + offset + sizeof(u32));
79 	}
80 
81 	/* Clear the hash table entries */
82 	htable_offset = cs_rc_max * EIP197_CS_RC_SIZE;
83 	for (i = 0; i < cs_ht_wc; i++)
84 		writel(GENMASK(29, 0),
85 		       priv->base + EIP197_CLASSIFICATION_RAMS + htable_offset + i * sizeof(u32));
86 
87 	/* Disable the record cache memory access */
88 	val = readl(priv->base + EIP197_CS_RAM_CTRL);
89 	val &= ~EIP197_TRC_ENABLE_MASK;
90 	writel(val, priv->base + EIP197_CS_RAM_CTRL);
91 
92 	/* Write head and tail pointers of the record free chain */
93 	val = EIP197_TRC_FREECHAIN_HEAD_PTR(0) |
94 	      EIP197_TRC_FREECHAIN_TAIL_PTR(cs_rc_max - 1);
95 	writel(val, priv->base + EIP197_TRC_FREECHAIN);
96 
97 	/* Configure the record cache #1 */
98 	val = EIP197_TRC_PARAMS2_RC_SZ_SMALL(cs_trc_rec_wc) |
99 	      EIP197_TRC_PARAMS2_HTABLE_PTR(cs_rc_max);
100 	writel(val, priv->base + EIP197_TRC_PARAMS2);
101 
102 	/* Configure the record cache #2 */
103 	val = EIP197_TRC_PARAMS_RC_SZ_LARGE(cs_trc_lg_rec_wc) |
104 	      EIP197_TRC_PARAMS_BLK_TIMER_SPEED(1) |
105 	      EIP197_TRC_PARAMS_HTABLE_SZ(2);
106 	writel(val, priv->base + EIP197_TRC_PARAMS);
107 }
108 
109 static void eip197_write_firmware(struct safexcel_crypto_priv *priv,
110 				  const struct firmware *fw, int pe, u32 ctrl,
111 				  u32 prog_en)
112 {
113 	const u32 *data = (const u32 *)fw->data;
114 	u32 val;
115 	int i;
116 
117 	/* Reset the engine to make its program memory accessible */
118 	writel(EIP197_PE_ICE_x_CTRL_SW_RESET |
119 	       EIP197_PE_ICE_x_CTRL_CLR_ECC_CORR |
120 	       EIP197_PE_ICE_x_CTRL_CLR_ECC_NON_CORR,
121 	       EIP197_PE(priv) + ctrl);
122 
123 	/* Enable access to the program memory */
124 	writel(prog_en, EIP197_PE(priv) + EIP197_PE_ICE_RAM_CTRL(pe));
125 
126 	/* Write the firmware */
127 	for (i = 0; i < fw->size / sizeof(u32); i++)
128 		writel(be32_to_cpu(data[i]),
129 		       priv->base + EIP197_CLASSIFICATION_RAMS + i * sizeof(u32));
130 
131 	/* Disable access to the program memory */
132 	writel(0, EIP197_PE(priv) + EIP197_PE_ICE_RAM_CTRL(pe));
133 
134 	/* Release engine from reset */
135 	val = readl(EIP197_PE(priv) + ctrl);
136 	val &= ~EIP197_PE_ICE_x_CTRL_SW_RESET;
137 	writel(val, EIP197_PE(priv) + ctrl);
138 }
139 
140 static int eip197_load_firmwares(struct safexcel_crypto_priv *priv)
141 {
142 	const char *fw_name[] = {"ifpp.bin", "ipue.bin"};
143 	const struct firmware *fw[FW_NB];
144 	char fw_path[31], *dir = NULL;
145 	int i, j, ret = 0, pe;
146 	u32 val;
147 
148 	switch (priv->version) {
149 	case EIP197B:
150 		dir = "eip197b";
151 		break;
152 	case EIP197D:
153 		dir = "eip197d";
154 		break;
155 	default:
156 		/* No firmware is required */
157 		return 0;
158 	}
159 
160 	for (i = 0; i < FW_NB; i++) {
161 		snprintf(fw_path, 31, "inside-secure/%s/%s", dir, fw_name[i]);
162 		ret = request_firmware(&fw[i], fw_path, priv->dev);
163 		if (ret) {
164 			if (priv->version != EIP197B)
165 				goto release_fw;
166 
167 			/* Fallback to the old firmware location for the
168 			 * EIP197b.
169 			 */
170 			ret = request_firmware(&fw[i], fw_name[i], priv->dev);
171 			if (ret) {
172 				dev_err(priv->dev,
173 					"Failed to request firmware %s (%d)\n",
174 					fw_name[i], ret);
175 				goto release_fw;
176 			}
177 		}
178 	}
179 
180 	for (pe = 0; pe < priv->config.pes; pe++) {
181 		/* Clear the scratchpad memory */
182 		val = readl(EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_CTRL(pe));
183 		val |= EIP197_PE_ICE_SCRATCH_CTRL_CHANGE_TIMER |
184 		       EIP197_PE_ICE_SCRATCH_CTRL_TIMER_EN |
185 		       EIP197_PE_ICE_SCRATCH_CTRL_SCRATCH_ACCESS |
186 		       EIP197_PE_ICE_SCRATCH_CTRL_CHANGE_ACCESS;
187 		writel(val, EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_CTRL(pe));
188 
189 		memset_io(EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_RAM(pe), 0,
190 			  EIP197_NUM_OF_SCRATCH_BLOCKS * sizeof(u32));
191 
192 		eip197_write_firmware(priv, fw[FW_IFPP], pe,
193 				      EIP197_PE_ICE_FPP_CTRL(pe),
194 				      EIP197_PE_ICE_RAM_CTRL_FPP_PROG_EN);
195 
196 		eip197_write_firmware(priv, fw[FW_IPUE], pe,
197 				      EIP197_PE_ICE_PUE_CTRL(pe),
198 				      EIP197_PE_ICE_RAM_CTRL_PUE_PROG_EN);
199 	}
200 
201 release_fw:
202 	for (j = 0; j < i; j++)
203 		release_firmware(fw[j]);
204 
205 	return ret;
206 }
207 
208 static int safexcel_hw_setup_cdesc_rings(struct safexcel_crypto_priv *priv)
209 {
210 	u32 hdw, cd_size_rnd, val;
211 	int i;
212 
213 	hdw = readl(EIP197_HIA_AIC_G(priv) + EIP197_HIA_OPTIONS);
214 	hdw &= GENMASK(27, 25);
215 	hdw >>= 25;
216 
217 	cd_size_rnd = (priv->config.cd_size + (BIT(hdw) - 1)) >> hdw;
218 
219 	for (i = 0; i < priv->config.rings; i++) {
220 		/* ring base address */
221 		writel(lower_32_bits(priv->ring[i].cdr.base_dma),
222 		       EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_LO);
223 		writel(upper_32_bits(priv->ring[i].cdr.base_dma),
224 		       EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI);
225 
226 		writel(EIP197_xDR_DESC_MODE_64BIT | (priv->config.cd_offset << 16) |
227 		       priv->config.cd_size,
228 		       EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_DESC_SIZE);
229 		writel(((EIP197_FETCH_COUNT * (cd_size_rnd << hdw)) << 16) |
230 		       (EIP197_FETCH_COUNT * priv->config.cd_offset),
231 		       EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_CFG);
232 
233 		/* Configure DMA tx control */
234 		val = EIP197_HIA_xDR_CFG_WR_CACHE(WR_CACHE_3BITS);
235 		val |= EIP197_HIA_xDR_CFG_RD_CACHE(RD_CACHE_3BITS);
236 		writel(val, EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_DMA_CFG);
237 
238 		/* clear any pending interrupt */
239 		writel(GENMASK(5, 0),
240 		       EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_STAT);
241 	}
242 
243 	return 0;
244 }
245 
246 static int safexcel_hw_setup_rdesc_rings(struct safexcel_crypto_priv *priv)
247 {
248 	u32 hdw, rd_size_rnd, val;
249 	int i;
250 
251 	hdw = readl(EIP197_HIA_AIC_G(priv) + EIP197_HIA_OPTIONS);
252 	hdw &= GENMASK(27, 25);
253 	hdw >>= 25;
254 
255 	rd_size_rnd = (priv->config.rd_size + (BIT(hdw) - 1)) >> hdw;
256 
257 	for (i = 0; i < priv->config.rings; i++) {
258 		/* ring base address */
259 		writel(lower_32_bits(priv->ring[i].rdr.base_dma),
260 		       EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_LO);
261 		writel(upper_32_bits(priv->ring[i].rdr.base_dma),
262 		       EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI);
263 
264 		writel(EIP197_xDR_DESC_MODE_64BIT | (priv->config.rd_offset << 16) |
265 		       priv->config.rd_size,
266 		       EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_DESC_SIZE);
267 
268 		writel(((EIP197_FETCH_COUNT * (rd_size_rnd << hdw)) << 16) |
269 		       (EIP197_FETCH_COUNT * priv->config.rd_offset),
270 		       EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_CFG);
271 
272 		/* Configure DMA tx control */
273 		val = EIP197_HIA_xDR_CFG_WR_CACHE(WR_CACHE_3BITS);
274 		val |= EIP197_HIA_xDR_CFG_RD_CACHE(RD_CACHE_3BITS);
275 		val |= EIP197_HIA_xDR_WR_RES_BUF | EIP197_HIA_xDR_WR_CTRL_BUF;
276 		writel(val,
277 		       EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_DMA_CFG);
278 
279 		/* clear any pending interrupt */
280 		writel(GENMASK(7, 0),
281 		       EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_STAT);
282 
283 		/* enable ring interrupt */
284 		val = readl(EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ENABLE_CTRL(i));
285 		val |= EIP197_RDR_IRQ(i);
286 		writel(val, EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ENABLE_CTRL(i));
287 	}
288 
289 	return 0;
290 }
291 
292 static int safexcel_hw_init(struct safexcel_crypto_priv *priv)
293 {
294 	u32 version, val;
295 	int i, ret, pe;
296 
297 	/* Determine endianess and configure byte swap */
298 	version = readl(EIP197_HIA_AIC(priv) + EIP197_HIA_VERSION);
299 	val = readl(EIP197_HIA_AIC(priv) + EIP197_HIA_MST_CTRL);
300 
301 	if ((version & 0xffff) == EIP197_HIA_VERSION_BE)
302 		val |= EIP197_MST_CTRL_BYTE_SWAP;
303 	else if (((version >> 16) & 0xffff) == EIP197_HIA_VERSION_LE)
304 		val |= (EIP197_MST_CTRL_NO_BYTE_SWAP >> 24);
305 
306 	/* For EIP197 set maximum number of TX commands to 2^5 = 32 */
307 	if (priv->version == EIP197B || priv->version == EIP197D)
308 		val |= EIP197_MST_CTRL_TX_MAX_CMD(5);
309 
310 	writel(val, EIP197_HIA_AIC(priv) + EIP197_HIA_MST_CTRL);
311 
312 	/* Configure wr/rd cache values */
313 	writel(EIP197_MST_CTRL_RD_CACHE(RD_CACHE_4BITS) |
314 	       EIP197_MST_CTRL_WD_CACHE(WR_CACHE_4BITS),
315 	       EIP197_HIA_GEN_CFG(priv) + EIP197_MST_CTRL);
316 
317 	/* Interrupts reset */
318 
319 	/* Disable all global interrupts */
320 	writel(0, EIP197_HIA_AIC_G(priv) + EIP197_HIA_AIC_G_ENABLE_CTRL);
321 
322 	/* Clear any pending interrupt */
323 	writel(GENMASK(31, 0), EIP197_HIA_AIC_G(priv) + EIP197_HIA_AIC_G_ACK);
324 
325 	/* Processing Engine configuration */
326 	for (pe = 0; pe < priv->config.pes; pe++) {
327 		/* Data Fetch Engine configuration */
328 
329 		/* Reset all DFE threads */
330 		writel(EIP197_DxE_THR_CTRL_RESET_PE,
331 		       EIP197_HIA_DFE_THR(priv) + EIP197_HIA_DFE_THR_CTRL(pe));
332 
333 		if (priv->version == EIP197B || priv->version == EIP197D) {
334 			/* Reset HIA input interface arbiter */
335 			writel(EIP197_HIA_RA_PE_CTRL_RESET,
336 			       EIP197_HIA_AIC(priv) + EIP197_HIA_RA_PE_CTRL(pe));
337 		}
338 
339 		/* DMA transfer size to use */
340 		val = EIP197_HIA_DFE_CFG_DIS_DEBUG;
341 		val |= EIP197_HIA_DxE_CFG_MIN_DATA_SIZE(6) |
342 		       EIP197_HIA_DxE_CFG_MAX_DATA_SIZE(9);
343 		val |= EIP197_HIA_DxE_CFG_MIN_CTRL_SIZE(6) |
344 		       EIP197_HIA_DxE_CFG_MAX_CTRL_SIZE(7);
345 		val |= EIP197_HIA_DxE_CFG_DATA_CACHE_CTRL(RD_CACHE_3BITS);
346 		val |= EIP197_HIA_DxE_CFG_CTRL_CACHE_CTRL(RD_CACHE_3BITS);
347 		writel(val, EIP197_HIA_DFE(priv) + EIP197_HIA_DFE_CFG(pe));
348 
349 		/* Leave the DFE threads reset state */
350 		writel(0, EIP197_HIA_DFE_THR(priv) + EIP197_HIA_DFE_THR_CTRL(pe));
351 
352 		/* Configure the processing engine thresholds */
353 		writel(EIP197_PE_IN_xBUF_THRES_MIN(6) |
354 		       EIP197_PE_IN_xBUF_THRES_MAX(9),
355 		       EIP197_PE(priv) + EIP197_PE_IN_DBUF_THRES(pe));
356 		writel(EIP197_PE_IN_xBUF_THRES_MIN(6) |
357 		       EIP197_PE_IN_xBUF_THRES_MAX(7),
358 		       EIP197_PE(priv) + EIP197_PE_IN_TBUF_THRES(pe));
359 
360 		if (priv->version == EIP197B || priv->version == EIP197D) {
361 			/* enable HIA input interface arbiter and rings */
362 			writel(EIP197_HIA_RA_PE_CTRL_EN |
363 			       GENMASK(priv->config.rings - 1, 0),
364 			       EIP197_HIA_AIC(priv) + EIP197_HIA_RA_PE_CTRL(pe));
365 		}
366 
367 		/* Data Store Engine configuration */
368 
369 		/* Reset all DSE threads */
370 		writel(EIP197_DxE_THR_CTRL_RESET_PE,
371 		       EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_CTRL(pe));
372 
373 		/* Wait for all DSE threads to complete */
374 		while ((readl(EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_STAT(pe)) &
375 			GENMASK(15, 12)) != GENMASK(15, 12))
376 			;
377 
378 		/* DMA transfer size to use */
379 		val = EIP197_HIA_DSE_CFG_DIS_DEBUG;
380 		val |= EIP197_HIA_DxE_CFG_MIN_DATA_SIZE(7) |
381 		       EIP197_HIA_DxE_CFG_MAX_DATA_SIZE(8);
382 		val |= EIP197_HIA_DxE_CFG_DATA_CACHE_CTRL(WR_CACHE_3BITS);
383 		val |= EIP197_HIA_DSE_CFG_ALWAYS_BUFFERABLE;
384 		/* FIXME: instability issues can occur for EIP97 but disabling it impact
385 		 * performances.
386 		 */
387 		if (priv->version == EIP197B || priv->version == EIP197D)
388 			val |= EIP197_HIA_DSE_CFG_EN_SINGLE_WR;
389 		writel(val, EIP197_HIA_DSE(priv) + EIP197_HIA_DSE_CFG(pe));
390 
391 		/* Leave the DSE threads reset state */
392 		writel(0, EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_CTRL(pe));
393 
394 		/* Configure the procesing engine thresholds */
395 		writel(EIP197_PE_OUT_DBUF_THRES_MIN(7) |
396 		       EIP197_PE_OUT_DBUF_THRES_MAX(8),
397 		       EIP197_PE(priv) + EIP197_PE_OUT_DBUF_THRES(pe));
398 
399 		/* Processing Engine configuration */
400 
401 		/* Token & context configuration */
402 		val = EIP197_PE_EIP96_TOKEN_CTRL_CTX_UPDATES |
403 		      EIP197_PE_EIP96_TOKEN_CTRL_REUSE_CTX |
404 		      EIP197_PE_EIP96_TOKEN_CTRL_POST_REUSE_CTX;
405 		writel(val, EIP197_PE(priv) + EIP197_PE_EIP96_TOKEN_CTRL(pe));
406 
407 		/* H/W capabilities selection: just enable everything */
408 		writel(EIP197_FUNCTION_ALL,
409 		       EIP197_PE(priv) + EIP197_PE_EIP96_FUNCTION_EN(pe));
410 	}
411 
412 	/* Command Descriptor Rings prepare */
413 	for (i = 0; i < priv->config.rings; i++) {
414 		/* Clear interrupts for this ring */
415 		writel(GENMASK(31, 0),
416 		       EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ENABLE_CLR(i));
417 
418 		/* Disable external triggering */
419 		writel(0, EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_CFG);
420 
421 		/* Clear the pending prepared counter */
422 		writel(EIP197_xDR_PREP_CLR_COUNT,
423 		       EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_PREP_COUNT);
424 
425 		/* Clear the pending processed counter */
426 		writel(EIP197_xDR_PROC_CLR_COUNT,
427 		       EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_PROC_COUNT);
428 
429 		writel(0,
430 		       EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_PREP_PNTR);
431 		writel(0,
432 		       EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_PROC_PNTR);
433 
434 		writel((EIP197_DEFAULT_RING_SIZE * priv->config.cd_offset) << 2,
435 		       EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_SIZE);
436 	}
437 
438 	/* Result Descriptor Ring prepare */
439 	for (i = 0; i < priv->config.rings; i++) {
440 		/* Disable external triggering*/
441 		writel(0, EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_CFG);
442 
443 		/* Clear the pending prepared counter */
444 		writel(EIP197_xDR_PREP_CLR_COUNT,
445 		       EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_PREP_COUNT);
446 
447 		/* Clear the pending processed counter */
448 		writel(EIP197_xDR_PROC_CLR_COUNT,
449 		       EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_PROC_COUNT);
450 
451 		writel(0,
452 		       EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_PREP_PNTR);
453 		writel(0,
454 		       EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_PROC_PNTR);
455 
456 		/* Ring size */
457 		writel((EIP197_DEFAULT_RING_SIZE * priv->config.rd_offset) << 2,
458 		       EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_SIZE);
459 	}
460 
461 	for (pe = 0; pe < priv->config.pes; pe++) {
462 		/* Enable command descriptor rings */
463 		writel(EIP197_DxE_THR_CTRL_EN | GENMASK(priv->config.rings - 1, 0),
464 		       EIP197_HIA_DFE_THR(priv) + EIP197_HIA_DFE_THR_CTRL(pe));
465 
466 		/* Enable result descriptor rings */
467 		writel(EIP197_DxE_THR_CTRL_EN | GENMASK(priv->config.rings - 1, 0),
468 		       EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_CTRL(pe));
469 	}
470 
471 	/* Clear any HIA interrupt */
472 	writel(GENMASK(30, 20), EIP197_HIA_AIC_G(priv) + EIP197_HIA_AIC_G_ACK);
473 
474 	if (priv->version == EIP197B || priv->version == EIP197D) {
475 		eip197_trc_cache_init(priv);
476 
477 		ret = eip197_load_firmwares(priv);
478 		if (ret)
479 			return ret;
480 	}
481 
482 	safexcel_hw_setup_cdesc_rings(priv);
483 	safexcel_hw_setup_rdesc_rings(priv);
484 
485 	return 0;
486 }
487 
488 /* Called with ring's lock taken */
489 static void safexcel_try_push_requests(struct safexcel_crypto_priv *priv,
490 				       int ring)
491 {
492 	int coal = min_t(int, priv->ring[ring].requests, EIP197_MAX_BATCH_SZ);
493 
494 	if (!coal)
495 		return;
496 
497 	/* Configure when we want an interrupt */
498 	writel(EIP197_HIA_RDR_THRESH_PKT_MODE |
499 	       EIP197_HIA_RDR_THRESH_PROC_PKT(coal),
500 	       EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_THRESH);
501 }
502 
503 void safexcel_dequeue(struct safexcel_crypto_priv *priv, int ring)
504 {
505 	struct crypto_async_request *req, *backlog;
506 	struct safexcel_context *ctx;
507 	int ret, nreq = 0, cdesc = 0, rdesc = 0, commands, results;
508 
509 	/* If a request wasn't properly dequeued because of a lack of resources,
510 	 * proceeded it first,
511 	 */
512 	req = priv->ring[ring].req;
513 	backlog = priv->ring[ring].backlog;
514 	if (req)
515 		goto handle_req;
516 
517 	while (true) {
518 		spin_lock_bh(&priv->ring[ring].queue_lock);
519 		backlog = crypto_get_backlog(&priv->ring[ring].queue);
520 		req = crypto_dequeue_request(&priv->ring[ring].queue);
521 		spin_unlock_bh(&priv->ring[ring].queue_lock);
522 
523 		if (!req) {
524 			priv->ring[ring].req = NULL;
525 			priv->ring[ring].backlog = NULL;
526 			goto finalize;
527 		}
528 
529 handle_req:
530 		ctx = crypto_tfm_ctx(req->tfm);
531 		ret = ctx->send(req, ring, &commands, &results);
532 		if (ret)
533 			goto request_failed;
534 
535 		if (backlog)
536 			backlog->complete(backlog, -EINPROGRESS);
537 
538 		/* In case the send() helper did not issue any command to push
539 		 * to the engine because the input data was cached, continue to
540 		 * dequeue other requests as this is valid and not an error.
541 		 */
542 		if (!commands && !results)
543 			continue;
544 
545 		cdesc += commands;
546 		rdesc += results;
547 		nreq++;
548 	}
549 
550 request_failed:
551 	/* Not enough resources to handle all the requests. Bail out and save
552 	 * the request and the backlog for the next dequeue call (per-ring).
553 	 */
554 	priv->ring[ring].req = req;
555 	priv->ring[ring].backlog = backlog;
556 
557 finalize:
558 	if (!nreq)
559 		return;
560 
561 	spin_lock_bh(&priv->ring[ring].lock);
562 
563 	priv->ring[ring].requests += nreq;
564 
565 	if (!priv->ring[ring].busy) {
566 		safexcel_try_push_requests(priv, ring);
567 		priv->ring[ring].busy = true;
568 	}
569 
570 	spin_unlock_bh(&priv->ring[ring].lock);
571 
572 	/* let the RDR know we have pending descriptors */
573 	writel((rdesc * priv->config.rd_offset) << 2,
574 	       EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_PREP_COUNT);
575 
576 	/* let the CDR know we have pending descriptors */
577 	writel((cdesc * priv->config.cd_offset) << 2,
578 	       EIP197_HIA_CDR(priv, ring) + EIP197_HIA_xDR_PREP_COUNT);
579 }
580 
581 inline int safexcel_rdesc_check_errors(struct safexcel_crypto_priv *priv,
582 				       struct safexcel_result_desc *rdesc)
583 {
584 	if (likely((!rdesc->descriptor_overflow) &&
585 		   (!rdesc->buffer_overflow) &&
586 		   (!rdesc->result_data.error_code)))
587 		return 0;
588 
589 	if (rdesc->descriptor_overflow)
590 		dev_err(priv->dev, "Descriptor overflow detected");
591 
592 	if (rdesc->buffer_overflow)
593 		dev_err(priv->dev, "Buffer overflow detected");
594 
595 	if (rdesc->result_data.error_code & 0x4066) {
596 		/* Fatal error (bits 1,2,5,6 & 14) */
597 		dev_err(priv->dev,
598 			"result descriptor error (%x)",
599 			rdesc->result_data.error_code);
600 		return -EIO;
601 	} else if (rdesc->result_data.error_code &
602 		   (BIT(7) | BIT(4) | BIT(3) | BIT(0))) {
603 		/*
604 		 * Give priority over authentication fails:
605 		 * Blocksize, length & overflow errors,
606 		 * something wrong with the input!
607 		 */
608 		return -EINVAL;
609 	} else if (rdesc->result_data.error_code & BIT(9)) {
610 		/* Authentication failed */
611 		return -EBADMSG;
612 	}
613 
614 	/* All other non-fatal errors */
615 	return -EINVAL;
616 }
617 
618 inline void safexcel_rdr_req_set(struct safexcel_crypto_priv *priv,
619 				 int ring,
620 				 struct safexcel_result_desc *rdesc,
621 				 struct crypto_async_request *req)
622 {
623 	int i = safexcel_ring_rdr_rdesc_index(priv, ring, rdesc);
624 
625 	priv->ring[ring].rdr_req[i] = req;
626 }
627 
628 inline struct crypto_async_request *
629 safexcel_rdr_req_get(struct safexcel_crypto_priv *priv, int ring)
630 {
631 	int i = safexcel_ring_first_rdr_index(priv, ring);
632 
633 	return priv->ring[ring].rdr_req[i];
634 }
635 
636 void safexcel_complete(struct safexcel_crypto_priv *priv, int ring)
637 {
638 	struct safexcel_command_desc *cdesc;
639 
640 	/* Acknowledge the command descriptors */
641 	do {
642 		cdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].cdr);
643 		if (IS_ERR(cdesc)) {
644 			dev_err(priv->dev,
645 				"Could not retrieve the command descriptor\n");
646 			return;
647 		}
648 	} while (!cdesc->last_seg);
649 }
650 
651 void safexcel_inv_complete(struct crypto_async_request *req, int error)
652 {
653 	struct safexcel_inv_result *result = req->data;
654 
655 	if (error == -EINPROGRESS)
656 		return;
657 
658 	result->error = error;
659 	complete(&result->completion);
660 }
661 
662 int safexcel_invalidate_cache(struct crypto_async_request *async,
663 			      struct safexcel_crypto_priv *priv,
664 			      dma_addr_t ctxr_dma, int ring)
665 {
666 	struct safexcel_command_desc *cdesc;
667 	struct safexcel_result_desc *rdesc;
668 	int ret = 0;
669 
670 	/* Prepare command descriptor */
671 	cdesc = safexcel_add_cdesc(priv, ring, true, true, 0, 0, 0, ctxr_dma);
672 	if (IS_ERR(cdesc))
673 		return PTR_ERR(cdesc);
674 
675 	cdesc->control_data.type = EIP197_TYPE_EXTENDED;
676 	cdesc->control_data.options = 0;
677 	cdesc->control_data.refresh = 0;
678 	cdesc->control_data.control0 = CONTEXT_CONTROL_INV_TR;
679 
680 	/* Prepare result descriptor */
681 	rdesc = safexcel_add_rdesc(priv, ring, true, true, 0, 0);
682 
683 	if (IS_ERR(rdesc)) {
684 		ret = PTR_ERR(rdesc);
685 		goto cdesc_rollback;
686 	}
687 
688 	safexcel_rdr_req_set(priv, ring, rdesc, async);
689 
690 	return ret;
691 
692 cdesc_rollback:
693 	safexcel_ring_rollback_wptr(priv, &priv->ring[ring].cdr);
694 
695 	return ret;
696 }
697 
698 static inline void safexcel_handle_result_descriptor(struct safexcel_crypto_priv *priv,
699 						     int ring)
700 {
701 	struct crypto_async_request *req;
702 	struct safexcel_context *ctx;
703 	int ret, i, nreq, ndesc, tot_descs, handled = 0;
704 	bool should_complete;
705 
706 handle_results:
707 	tot_descs = 0;
708 
709 	nreq = readl(EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_PROC_COUNT);
710 	nreq >>= EIP197_xDR_PROC_xD_PKT_OFFSET;
711 	nreq &= EIP197_xDR_PROC_xD_PKT_MASK;
712 	if (!nreq)
713 		goto requests_left;
714 
715 	for (i = 0; i < nreq; i++) {
716 		req = safexcel_rdr_req_get(priv, ring);
717 
718 		ctx = crypto_tfm_ctx(req->tfm);
719 		ndesc = ctx->handle_result(priv, ring, req,
720 					   &should_complete, &ret);
721 		if (ndesc < 0) {
722 			dev_err(priv->dev, "failed to handle result (%d)", ndesc);
723 			goto acknowledge;
724 		}
725 
726 		if (should_complete) {
727 			local_bh_disable();
728 			req->complete(req, ret);
729 			local_bh_enable();
730 		}
731 
732 		tot_descs += ndesc;
733 		handled++;
734 	}
735 
736 acknowledge:
737 	if (i)
738 		writel(EIP197_xDR_PROC_xD_PKT(i) |
739 		       EIP197_xDR_PROC_xD_COUNT(tot_descs * priv->config.rd_offset),
740 		       EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_PROC_COUNT);
741 
742 	/* If the number of requests overflowed the counter, try to proceed more
743 	 * requests.
744 	 */
745 	if (nreq == EIP197_xDR_PROC_xD_PKT_MASK)
746 		goto handle_results;
747 
748 requests_left:
749 	spin_lock_bh(&priv->ring[ring].lock);
750 
751 	priv->ring[ring].requests -= handled;
752 	safexcel_try_push_requests(priv, ring);
753 
754 	if (!priv->ring[ring].requests)
755 		priv->ring[ring].busy = false;
756 
757 	spin_unlock_bh(&priv->ring[ring].lock);
758 }
759 
760 static void safexcel_dequeue_work(struct work_struct *work)
761 {
762 	struct safexcel_work_data *data =
763 			container_of(work, struct safexcel_work_data, work);
764 
765 	safexcel_dequeue(data->priv, data->ring);
766 }
767 
768 struct safexcel_ring_irq_data {
769 	struct safexcel_crypto_priv *priv;
770 	int ring;
771 };
772 
773 static irqreturn_t safexcel_irq_ring(int irq, void *data)
774 {
775 	struct safexcel_ring_irq_data *irq_data = data;
776 	struct safexcel_crypto_priv *priv = irq_data->priv;
777 	int ring = irq_data->ring, rc = IRQ_NONE;
778 	u32 status, stat;
779 
780 	status = readl(EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ENABLED_STAT(ring));
781 	if (!status)
782 		return rc;
783 
784 	/* RDR interrupts */
785 	if (status & EIP197_RDR_IRQ(ring)) {
786 		stat = readl(EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_STAT);
787 
788 		if (unlikely(stat & EIP197_xDR_ERR)) {
789 			/*
790 			 * Fatal error, the RDR is unusable and must be
791 			 * reinitialized. This should not happen under
792 			 * normal circumstances.
793 			 */
794 			dev_err(priv->dev, "RDR: fatal error.");
795 		} else if (likely(stat & EIP197_xDR_THRESH)) {
796 			rc = IRQ_WAKE_THREAD;
797 		}
798 
799 		/* ACK the interrupts */
800 		writel(stat & 0xff,
801 		       EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_STAT);
802 	}
803 
804 	/* ACK the interrupts */
805 	writel(status, EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ACK(ring));
806 
807 	return rc;
808 }
809 
810 static irqreturn_t safexcel_irq_ring_thread(int irq, void *data)
811 {
812 	struct safexcel_ring_irq_data *irq_data = data;
813 	struct safexcel_crypto_priv *priv = irq_data->priv;
814 	int ring = irq_data->ring;
815 
816 	safexcel_handle_result_descriptor(priv, ring);
817 
818 	queue_work(priv->ring[ring].workqueue,
819 		   &priv->ring[ring].work_data.work);
820 
821 	return IRQ_HANDLED;
822 }
823 
824 static int safexcel_request_ring_irq(struct platform_device *pdev, const char *name,
825 				     irq_handler_t handler,
826 				     irq_handler_t threaded_handler,
827 				     struct safexcel_ring_irq_data *ring_irq_priv)
828 {
829 	int ret, irq = platform_get_irq_byname(pdev, name);
830 
831 	if (irq < 0) {
832 		dev_err(&pdev->dev, "unable to get IRQ '%s'\n", name);
833 		return irq;
834 	}
835 
836 	ret = devm_request_threaded_irq(&pdev->dev, irq, handler,
837 					threaded_handler, IRQF_ONESHOT,
838 					dev_name(&pdev->dev), ring_irq_priv);
839 	if (ret) {
840 		dev_err(&pdev->dev, "unable to request IRQ %d\n", irq);
841 		return ret;
842 	}
843 
844 	return irq;
845 }
846 
847 static struct safexcel_alg_template *safexcel_algs[] = {
848 	&safexcel_alg_ecb_des,
849 	&safexcel_alg_cbc_des,
850 	&safexcel_alg_ecb_des3_ede,
851 	&safexcel_alg_cbc_des3_ede,
852 	&safexcel_alg_ecb_aes,
853 	&safexcel_alg_cbc_aes,
854 	&safexcel_alg_ctr_aes,
855 	&safexcel_alg_md5,
856 	&safexcel_alg_sha1,
857 	&safexcel_alg_sha224,
858 	&safexcel_alg_sha256,
859 	&safexcel_alg_sha384,
860 	&safexcel_alg_sha512,
861 	&safexcel_alg_hmac_md5,
862 	&safexcel_alg_hmac_sha1,
863 	&safexcel_alg_hmac_sha224,
864 	&safexcel_alg_hmac_sha256,
865 	&safexcel_alg_hmac_sha384,
866 	&safexcel_alg_hmac_sha512,
867 	&safexcel_alg_authenc_hmac_sha1_cbc_aes,
868 	&safexcel_alg_authenc_hmac_sha224_cbc_aes,
869 	&safexcel_alg_authenc_hmac_sha256_cbc_aes,
870 	&safexcel_alg_authenc_hmac_sha384_cbc_aes,
871 	&safexcel_alg_authenc_hmac_sha512_cbc_aes,
872 	&safexcel_alg_authenc_hmac_sha1_cbc_des3_ede,
873 	&safexcel_alg_authenc_hmac_sha1_ctr_aes,
874 	&safexcel_alg_authenc_hmac_sha224_ctr_aes,
875 	&safexcel_alg_authenc_hmac_sha256_ctr_aes,
876 	&safexcel_alg_authenc_hmac_sha384_ctr_aes,
877 	&safexcel_alg_authenc_hmac_sha512_ctr_aes,
878 };
879 
880 static int safexcel_register_algorithms(struct safexcel_crypto_priv *priv)
881 {
882 	int i, j, ret = 0;
883 
884 	for (i = 0; i < ARRAY_SIZE(safexcel_algs); i++) {
885 		safexcel_algs[i]->priv = priv;
886 
887 		if (!(safexcel_algs[i]->engines & priv->version))
888 			continue;
889 
890 		if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_SKCIPHER)
891 			ret = crypto_register_skcipher(&safexcel_algs[i]->alg.skcipher);
892 		else if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_AEAD)
893 			ret = crypto_register_aead(&safexcel_algs[i]->alg.aead);
894 		else
895 			ret = crypto_register_ahash(&safexcel_algs[i]->alg.ahash);
896 
897 		if (ret)
898 			goto fail;
899 	}
900 
901 	return 0;
902 
903 fail:
904 	for (j = 0; j < i; j++) {
905 		if (!(safexcel_algs[j]->engines & priv->version))
906 			continue;
907 
908 		if (safexcel_algs[j]->type == SAFEXCEL_ALG_TYPE_SKCIPHER)
909 			crypto_unregister_skcipher(&safexcel_algs[j]->alg.skcipher);
910 		else if (safexcel_algs[j]->type == SAFEXCEL_ALG_TYPE_AEAD)
911 			crypto_unregister_aead(&safexcel_algs[j]->alg.aead);
912 		else
913 			crypto_unregister_ahash(&safexcel_algs[j]->alg.ahash);
914 	}
915 
916 	return ret;
917 }
918 
919 static void safexcel_unregister_algorithms(struct safexcel_crypto_priv *priv)
920 {
921 	int i;
922 
923 	for (i = 0; i < ARRAY_SIZE(safexcel_algs); i++) {
924 		if (!(safexcel_algs[i]->engines & priv->version))
925 			continue;
926 
927 		if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_SKCIPHER)
928 			crypto_unregister_skcipher(&safexcel_algs[i]->alg.skcipher);
929 		else if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_AEAD)
930 			crypto_unregister_aead(&safexcel_algs[i]->alg.aead);
931 		else
932 			crypto_unregister_ahash(&safexcel_algs[i]->alg.ahash);
933 	}
934 }
935 
936 static void safexcel_configure(struct safexcel_crypto_priv *priv)
937 {
938 	u32 val, mask = 0;
939 
940 	val = readl(EIP197_HIA_AIC_G(priv) + EIP197_HIA_OPTIONS);
941 
942 	/* Read number of PEs from the engine */
943 	switch (priv->version) {
944 	case EIP197B:
945 	case EIP197D:
946 		mask = EIP197_N_PES_MASK;
947 		break;
948 	default:
949 		mask = EIP97_N_PES_MASK;
950 	}
951 	priv->config.pes = (val >> EIP197_N_PES_OFFSET) & mask;
952 
953 	val = (val & GENMASK(27, 25)) >> 25;
954 	mask = BIT(val) - 1;
955 
956 	val = readl(EIP197_HIA_AIC_G(priv) + EIP197_HIA_OPTIONS);
957 	priv->config.rings = min_t(u32, val & GENMASK(3, 0), max_rings);
958 
959 	priv->config.cd_size = (sizeof(struct safexcel_command_desc) / sizeof(u32));
960 	priv->config.cd_offset = (priv->config.cd_size + mask) & ~mask;
961 
962 	priv->config.rd_size = (sizeof(struct safexcel_result_desc) / sizeof(u32));
963 	priv->config.rd_offset = (priv->config.rd_size + mask) & ~mask;
964 }
965 
966 static void safexcel_init_register_offsets(struct safexcel_crypto_priv *priv)
967 {
968 	struct safexcel_register_offsets *offsets = &priv->offsets;
969 
970 	switch (priv->version) {
971 	case EIP197B:
972 	case EIP197D:
973 		offsets->hia_aic	= EIP197_HIA_AIC_BASE;
974 		offsets->hia_aic_g	= EIP197_HIA_AIC_G_BASE;
975 		offsets->hia_aic_r	= EIP197_HIA_AIC_R_BASE;
976 		offsets->hia_aic_xdr	= EIP197_HIA_AIC_xDR_BASE;
977 		offsets->hia_dfe	= EIP197_HIA_DFE_BASE;
978 		offsets->hia_dfe_thr	= EIP197_HIA_DFE_THR_BASE;
979 		offsets->hia_dse	= EIP197_HIA_DSE_BASE;
980 		offsets->hia_dse_thr	= EIP197_HIA_DSE_THR_BASE;
981 		offsets->hia_gen_cfg	= EIP197_HIA_GEN_CFG_BASE;
982 		offsets->pe		= EIP197_PE_BASE;
983 		break;
984 	case EIP97IES:
985 		offsets->hia_aic	= EIP97_HIA_AIC_BASE;
986 		offsets->hia_aic_g	= EIP97_HIA_AIC_G_BASE;
987 		offsets->hia_aic_r	= EIP97_HIA_AIC_R_BASE;
988 		offsets->hia_aic_xdr	= EIP97_HIA_AIC_xDR_BASE;
989 		offsets->hia_dfe	= EIP97_HIA_DFE_BASE;
990 		offsets->hia_dfe_thr	= EIP97_HIA_DFE_THR_BASE;
991 		offsets->hia_dse	= EIP97_HIA_DSE_BASE;
992 		offsets->hia_dse_thr	= EIP97_HIA_DSE_THR_BASE;
993 		offsets->hia_gen_cfg	= EIP97_HIA_GEN_CFG_BASE;
994 		offsets->pe		= EIP97_PE_BASE;
995 		break;
996 	}
997 }
998 
999 static int safexcel_probe(struct platform_device *pdev)
1000 {
1001 	struct device *dev = &pdev->dev;
1002 	struct resource *res;
1003 	struct safexcel_crypto_priv *priv;
1004 	int i, ret;
1005 
1006 	priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
1007 	if (!priv)
1008 		return -ENOMEM;
1009 
1010 	priv->dev = dev;
1011 	priv->version = (enum safexcel_eip_version)of_device_get_match_data(dev);
1012 
1013 	if (priv->version == EIP197B || priv->version == EIP197D)
1014 		priv->flags |= EIP197_TRC_CACHE;
1015 
1016 	safexcel_init_register_offsets(priv);
1017 
1018 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1019 	priv->base = devm_ioremap_resource(dev, res);
1020 	if (IS_ERR(priv->base)) {
1021 		dev_err(dev, "failed to get resource\n");
1022 		return PTR_ERR(priv->base);
1023 	}
1024 
1025 	priv->clk = devm_clk_get(&pdev->dev, NULL);
1026 	ret = PTR_ERR_OR_ZERO(priv->clk);
1027 	/* The clock isn't mandatory */
1028 	if  (ret != -ENOENT) {
1029 		if (ret)
1030 			return ret;
1031 
1032 		ret = clk_prepare_enable(priv->clk);
1033 		if (ret) {
1034 			dev_err(dev, "unable to enable clk (%d)\n", ret);
1035 			return ret;
1036 		}
1037 	}
1038 
1039 	priv->reg_clk = devm_clk_get(&pdev->dev, "reg");
1040 	ret = PTR_ERR_OR_ZERO(priv->reg_clk);
1041 	/* The clock isn't mandatory */
1042 	if  (ret != -ENOENT) {
1043 		if (ret)
1044 			goto err_core_clk;
1045 
1046 		ret = clk_prepare_enable(priv->reg_clk);
1047 		if (ret) {
1048 			dev_err(dev, "unable to enable reg clk (%d)\n", ret);
1049 			goto err_core_clk;
1050 		}
1051 	}
1052 
1053 	ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
1054 	if (ret)
1055 		goto err_reg_clk;
1056 
1057 	priv->context_pool = dmam_pool_create("safexcel-context", dev,
1058 					      sizeof(struct safexcel_context_record),
1059 					      1, 0);
1060 	if (!priv->context_pool) {
1061 		ret = -ENOMEM;
1062 		goto err_reg_clk;
1063 	}
1064 
1065 	safexcel_configure(priv);
1066 
1067 	priv->ring = devm_kcalloc(dev, priv->config.rings,
1068 				  sizeof(*priv->ring),
1069 				  GFP_KERNEL);
1070 	if (!priv->ring) {
1071 		ret = -ENOMEM;
1072 		goto err_reg_clk;
1073 	}
1074 
1075 	for (i = 0; i < priv->config.rings; i++) {
1076 		char irq_name[6] = {0}; /* "ringX\0" */
1077 		char wq_name[9] = {0}; /* "wq_ringX\0" */
1078 		int irq;
1079 		struct safexcel_ring_irq_data *ring_irq;
1080 
1081 		ret = safexcel_init_ring_descriptors(priv,
1082 						     &priv->ring[i].cdr,
1083 						     &priv->ring[i].rdr);
1084 		if (ret)
1085 			goto err_reg_clk;
1086 
1087 		priv->ring[i].rdr_req = devm_kcalloc(dev,
1088 			EIP197_DEFAULT_RING_SIZE,
1089 			sizeof(priv->ring[i].rdr_req),
1090 			GFP_KERNEL);
1091 		if (!priv->ring[i].rdr_req) {
1092 			ret = -ENOMEM;
1093 			goto err_reg_clk;
1094 		}
1095 
1096 		ring_irq = devm_kzalloc(dev, sizeof(*ring_irq), GFP_KERNEL);
1097 		if (!ring_irq) {
1098 			ret = -ENOMEM;
1099 			goto err_reg_clk;
1100 		}
1101 
1102 		ring_irq->priv = priv;
1103 		ring_irq->ring = i;
1104 
1105 		snprintf(irq_name, 6, "ring%d", i);
1106 		irq = safexcel_request_ring_irq(pdev, irq_name, safexcel_irq_ring,
1107 						safexcel_irq_ring_thread,
1108 						ring_irq);
1109 		if (irq < 0) {
1110 			ret = irq;
1111 			goto err_reg_clk;
1112 		}
1113 
1114 		priv->ring[i].work_data.priv = priv;
1115 		priv->ring[i].work_data.ring = i;
1116 		INIT_WORK(&priv->ring[i].work_data.work, safexcel_dequeue_work);
1117 
1118 		snprintf(wq_name, 9, "wq_ring%d", i);
1119 		priv->ring[i].workqueue = create_singlethread_workqueue(wq_name);
1120 		if (!priv->ring[i].workqueue) {
1121 			ret = -ENOMEM;
1122 			goto err_reg_clk;
1123 		}
1124 
1125 		priv->ring[i].requests = 0;
1126 		priv->ring[i].busy = false;
1127 
1128 		crypto_init_queue(&priv->ring[i].queue,
1129 				  EIP197_DEFAULT_RING_SIZE);
1130 
1131 		spin_lock_init(&priv->ring[i].lock);
1132 		spin_lock_init(&priv->ring[i].queue_lock);
1133 	}
1134 
1135 	platform_set_drvdata(pdev, priv);
1136 	atomic_set(&priv->ring_used, 0);
1137 
1138 	ret = safexcel_hw_init(priv);
1139 	if (ret) {
1140 		dev_err(dev, "EIP h/w init failed (%d)\n", ret);
1141 		goto err_reg_clk;
1142 	}
1143 
1144 	ret = safexcel_register_algorithms(priv);
1145 	if (ret) {
1146 		dev_err(dev, "Failed to register algorithms (%d)\n", ret);
1147 		goto err_reg_clk;
1148 	}
1149 
1150 	return 0;
1151 
1152 err_reg_clk:
1153 	clk_disable_unprepare(priv->reg_clk);
1154 err_core_clk:
1155 	clk_disable_unprepare(priv->clk);
1156 	return ret;
1157 }
1158 
1159 static void safexcel_hw_reset_rings(struct safexcel_crypto_priv *priv)
1160 {
1161 	int i;
1162 
1163 	for (i = 0; i < priv->config.rings; i++) {
1164 		/* clear any pending interrupt */
1165 		writel(GENMASK(5, 0), EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_STAT);
1166 		writel(GENMASK(7, 0), EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_STAT);
1167 
1168 		/* Reset the CDR base address */
1169 		writel(0, EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_LO);
1170 		writel(0, EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI);
1171 
1172 		/* Reset the RDR base address */
1173 		writel(0, EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_LO);
1174 		writel(0, EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI);
1175 	}
1176 }
1177 
1178 static int safexcel_remove(struct platform_device *pdev)
1179 {
1180 	struct safexcel_crypto_priv *priv = platform_get_drvdata(pdev);
1181 	int i;
1182 
1183 	safexcel_unregister_algorithms(priv);
1184 	safexcel_hw_reset_rings(priv);
1185 
1186 	clk_disable_unprepare(priv->clk);
1187 
1188 	for (i = 0; i < priv->config.rings; i++)
1189 		destroy_workqueue(priv->ring[i].workqueue);
1190 
1191 	return 0;
1192 }
1193 
1194 static const struct of_device_id safexcel_of_match_table[] = {
1195 	{
1196 		.compatible = "inside-secure,safexcel-eip97ies",
1197 		.data = (void *)EIP97IES,
1198 	},
1199 	{
1200 		.compatible = "inside-secure,safexcel-eip197b",
1201 		.data = (void *)EIP197B,
1202 	},
1203 	{
1204 		.compatible = "inside-secure,safexcel-eip197d",
1205 		.data = (void *)EIP197D,
1206 	},
1207 	{
1208 		/* Deprecated. Kept for backward compatibility. */
1209 		.compatible = "inside-secure,safexcel-eip97",
1210 		.data = (void *)EIP97IES,
1211 	},
1212 	{
1213 		/* Deprecated. Kept for backward compatibility. */
1214 		.compatible = "inside-secure,safexcel-eip197",
1215 		.data = (void *)EIP197B,
1216 	},
1217 	{},
1218 };
1219 
1220 
1221 static struct platform_driver  crypto_safexcel = {
1222 	.probe		= safexcel_probe,
1223 	.remove		= safexcel_remove,
1224 	.driver		= {
1225 		.name	= "crypto-safexcel",
1226 		.of_match_table = safexcel_of_match_table,
1227 	},
1228 };
1229 module_platform_driver(crypto_safexcel);
1230 
1231 MODULE_AUTHOR("Antoine Tenart <antoine.tenart@free-electrons.com>");
1232 MODULE_AUTHOR("Ofer Heifetz <oferh@marvell.com>");
1233 MODULE_AUTHOR("Igal Liberman <igall@marvell.com>");
1234 MODULE_DESCRIPTION("Support for SafeXcel cryptographic engine EIP197");
1235 MODULE_LICENSE("GPL v2");
1236