xref: /linux/drivers/crypto/inside-secure/safexcel.c (revision 0ade34c37012ea5c516d9aa4d19a56e9f40a55ed)
1 /*
2  * Copyright (C) 2017 Marvell
3  *
4  * Antoine Tenart <antoine.tenart@free-electrons.com>
5  *
6  * This file is licensed under the terms of the GNU General Public
7  * License version 2. This program is licensed "as is" without any
8  * warranty of any kind, whether express or implied.
9  */
10 
11 #include <linux/clk.h>
12 #include <linux/device.h>
13 #include <linux/dma-mapping.h>
14 #include <linux/dmapool.h>
15 #include <linux/firmware.h>
16 #include <linux/interrupt.h>
17 #include <linux/module.h>
18 #include <linux/of_platform.h>
19 #include <linux/of_irq.h>
20 #include <linux/platform_device.h>
21 #include <linux/workqueue.h>
22 
23 #include <crypto/internal/hash.h>
24 #include <crypto/internal/skcipher.h>
25 
26 #include "safexcel.h"
27 
28 static u32 max_rings = EIP197_MAX_RINGS;
29 module_param(max_rings, uint, 0644);
30 MODULE_PARM_DESC(max_rings, "Maximum number of rings to use.");
31 
32 static void eip197_trc_cache_init(struct safexcel_crypto_priv *priv)
33 {
34 	u32 val, htable_offset;
35 	int i;
36 
37 	/* Enable the record cache memory access */
38 	val = readl(priv->base + EIP197_CS_RAM_CTRL);
39 	val &= ~EIP197_TRC_ENABLE_MASK;
40 	val |= EIP197_TRC_ENABLE_0;
41 	writel(val, priv->base + EIP197_CS_RAM_CTRL);
42 
43 	/* Clear all ECC errors */
44 	writel(0, priv->base + EIP197_TRC_ECCCTRL);
45 
46 	/*
47 	 * Make sure the cache memory is accessible by taking record cache into
48 	 * reset.
49 	 */
50 	val = readl(priv->base + EIP197_TRC_PARAMS);
51 	val |= EIP197_TRC_PARAMS_SW_RESET;
52 	val &= ~EIP197_TRC_PARAMS_DATA_ACCESS;
53 	writel(val, priv->base + EIP197_TRC_PARAMS);
54 
55 	/* Clear all records */
56 	for (i = 0; i < EIP197_CS_RC_MAX; i++) {
57 		u32 val, offset = EIP197_CLASSIFICATION_RAMS + i * EIP197_CS_RC_SIZE;
58 
59 		writel(EIP197_CS_RC_NEXT(EIP197_RC_NULL) |
60 		       EIP197_CS_RC_PREV(EIP197_RC_NULL),
61 		       priv->base + offset);
62 
63 		val = EIP197_CS_RC_NEXT(i+1) | EIP197_CS_RC_PREV(i-1);
64 		if (i == 0)
65 			val |= EIP197_CS_RC_PREV(EIP197_RC_NULL);
66 		else if (i == EIP197_CS_RC_MAX - 1)
67 			val |= EIP197_CS_RC_NEXT(EIP197_RC_NULL);
68 		writel(val, priv->base + offset + sizeof(u32));
69 	}
70 
71 	/* Clear the hash table entries */
72 	htable_offset = EIP197_CS_RC_MAX * EIP197_CS_RC_SIZE;
73 	for (i = 0; i < 64; i++)
74 		writel(GENMASK(29, 0),
75 		       priv->base + EIP197_CLASSIFICATION_RAMS + htable_offset + i * sizeof(u32));
76 
77 	/* Disable the record cache memory access */
78 	val = readl(priv->base + EIP197_CS_RAM_CTRL);
79 	val &= ~EIP197_TRC_ENABLE_MASK;
80 	writel(val, priv->base + EIP197_CS_RAM_CTRL);
81 
82 	/* Write head and tail pointers of the record free chain */
83 	val = EIP197_TRC_FREECHAIN_HEAD_PTR(0) |
84 	      EIP197_TRC_FREECHAIN_TAIL_PTR(EIP197_CS_RC_MAX - 1);
85 	writel(val, priv->base + EIP197_TRC_FREECHAIN);
86 
87 	/* Configure the record cache #1 */
88 	val = EIP197_TRC_PARAMS2_RC_SZ_SMALL(EIP197_CS_TRC_REC_WC) |
89 	      EIP197_TRC_PARAMS2_HTABLE_PTR(EIP197_CS_RC_MAX);
90 	writel(val, priv->base + EIP197_TRC_PARAMS2);
91 
92 	/* Configure the record cache #2 */
93 	val = EIP197_TRC_PARAMS_RC_SZ_LARGE(EIP197_CS_TRC_LG_REC_WC) |
94 	      EIP197_TRC_PARAMS_BLK_TIMER_SPEED(1) |
95 	      EIP197_TRC_PARAMS_HTABLE_SZ(2);
96 	writel(val, priv->base + EIP197_TRC_PARAMS);
97 }
98 
99 static void eip197_write_firmware(struct safexcel_crypto_priv *priv,
100 				  const struct firmware *fw, u32 ctrl,
101 				  u32 prog_en)
102 {
103 	const u32 *data = (const u32 *)fw->data;
104 	u32 val;
105 	int i;
106 
107 	/* Reset the engine to make its program memory accessible */
108 	writel(EIP197_PE_ICE_x_CTRL_SW_RESET |
109 	       EIP197_PE_ICE_x_CTRL_CLR_ECC_CORR |
110 	       EIP197_PE_ICE_x_CTRL_CLR_ECC_NON_CORR,
111 	       EIP197_PE(priv) + ctrl);
112 
113 	/* Enable access to the program memory */
114 	writel(prog_en, EIP197_PE(priv) + EIP197_PE_ICE_RAM_CTRL);
115 
116 	/* Write the firmware */
117 	for (i = 0; i < fw->size / sizeof(u32); i++)
118 		writel(be32_to_cpu(data[i]),
119 		       priv->base + EIP197_CLASSIFICATION_RAMS + i * sizeof(u32));
120 
121 	/* Disable access to the program memory */
122 	writel(0, EIP197_PE(priv) + EIP197_PE_ICE_RAM_CTRL);
123 
124 	/* Release engine from reset */
125 	val = readl(EIP197_PE(priv) + ctrl);
126 	val &= ~EIP197_PE_ICE_x_CTRL_SW_RESET;
127 	writel(val, EIP197_PE(priv) + ctrl);
128 }
129 
130 static int eip197_load_firmwares(struct safexcel_crypto_priv *priv)
131 {
132 	const char *fw_name[] = {"ifpp.bin", "ipue.bin"};
133 	const struct firmware *fw[FW_NB];
134 	int i, j, ret = 0;
135 	u32 val;
136 
137 	for (i = 0; i < FW_NB; i++) {
138 		ret = request_firmware(&fw[i], fw_name[i], priv->dev);
139 		if (ret) {
140 			dev_err(priv->dev,
141 				"Failed to request firmware %s (%d)\n",
142 				fw_name[i], ret);
143 			goto release_fw;
144 		}
145 	 }
146 
147 	/* Clear the scratchpad memory */
148 	val = readl(EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_CTRL);
149 	val |= EIP197_PE_ICE_SCRATCH_CTRL_CHANGE_TIMER |
150 	       EIP197_PE_ICE_SCRATCH_CTRL_TIMER_EN |
151 	       EIP197_PE_ICE_SCRATCH_CTRL_SCRATCH_ACCESS |
152 	       EIP197_PE_ICE_SCRATCH_CTRL_CHANGE_ACCESS;
153 	writel(val, EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_CTRL);
154 
155 	memset(EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_RAM, 0,
156 	       EIP197_NUM_OF_SCRATCH_BLOCKS * sizeof(u32));
157 
158 	eip197_write_firmware(priv, fw[FW_IFPP], EIP197_PE_ICE_FPP_CTRL,
159 			      EIP197_PE_ICE_RAM_CTRL_FPP_PROG_EN);
160 
161 	eip197_write_firmware(priv, fw[FW_IPUE], EIP197_PE_ICE_PUE_CTRL,
162 			      EIP197_PE_ICE_RAM_CTRL_PUE_PROG_EN);
163 
164 release_fw:
165 	for (j = 0; j < i; j++)
166 		release_firmware(fw[j]);
167 
168 	return ret;
169 }
170 
171 static int safexcel_hw_setup_cdesc_rings(struct safexcel_crypto_priv *priv)
172 {
173 	u32 hdw, cd_size_rnd, val;
174 	int i;
175 
176 	hdw = readl(EIP197_HIA_AIC_G(priv) + EIP197_HIA_OPTIONS);
177 	hdw &= GENMASK(27, 25);
178 	hdw >>= 25;
179 
180 	cd_size_rnd = (priv->config.cd_size + (BIT(hdw) - 1)) >> hdw;
181 
182 	for (i = 0; i < priv->config.rings; i++) {
183 		/* ring base address */
184 		writel(lower_32_bits(priv->ring[i].cdr.base_dma),
185 		       EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_LO);
186 		writel(upper_32_bits(priv->ring[i].cdr.base_dma),
187 		       EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI);
188 
189 		writel(EIP197_xDR_DESC_MODE_64BIT | (priv->config.cd_offset << 16) |
190 		       priv->config.cd_size,
191 		       EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_DESC_SIZE);
192 		writel(((EIP197_FETCH_COUNT * (cd_size_rnd << hdw)) << 16) |
193 		       (EIP197_FETCH_COUNT * priv->config.cd_offset),
194 		       EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_CFG);
195 
196 		/* Configure DMA tx control */
197 		val = EIP197_HIA_xDR_CFG_WR_CACHE(WR_CACHE_3BITS);
198 		val |= EIP197_HIA_xDR_CFG_RD_CACHE(RD_CACHE_3BITS);
199 		writel(val, EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_DMA_CFG);
200 
201 		/* clear any pending interrupt */
202 		writel(GENMASK(5, 0),
203 		       EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_STAT);
204 	}
205 
206 	return 0;
207 }
208 
209 static int safexcel_hw_setup_rdesc_rings(struct safexcel_crypto_priv *priv)
210 {
211 	u32 hdw, rd_size_rnd, val;
212 	int i;
213 
214 	hdw = readl(EIP197_HIA_AIC_G(priv) + EIP197_HIA_OPTIONS);
215 	hdw &= GENMASK(27, 25);
216 	hdw >>= 25;
217 
218 	rd_size_rnd = (priv->config.rd_size + (BIT(hdw) - 1)) >> hdw;
219 
220 	for (i = 0; i < priv->config.rings; i++) {
221 		/* ring base address */
222 		writel(lower_32_bits(priv->ring[i].rdr.base_dma),
223 		       EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_LO);
224 		writel(upper_32_bits(priv->ring[i].rdr.base_dma),
225 		       EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI);
226 
227 		writel(EIP197_xDR_DESC_MODE_64BIT | (priv->config.rd_offset << 16) |
228 		       priv->config.rd_size,
229 		       EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_DESC_SIZE);
230 
231 		writel(((EIP197_FETCH_COUNT * (rd_size_rnd << hdw)) << 16) |
232 		       (EIP197_FETCH_COUNT * priv->config.rd_offset),
233 		       EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_CFG);
234 
235 		/* Configure DMA tx control */
236 		val = EIP197_HIA_xDR_CFG_WR_CACHE(WR_CACHE_3BITS);
237 		val |= EIP197_HIA_xDR_CFG_RD_CACHE(RD_CACHE_3BITS);
238 		val |= EIP197_HIA_xDR_WR_RES_BUF | EIP197_HIA_xDR_WR_CTRL_BUG;
239 		writel(val,
240 		       EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_DMA_CFG);
241 
242 		/* clear any pending interrupt */
243 		writel(GENMASK(7, 0),
244 		       EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_STAT);
245 
246 		/* enable ring interrupt */
247 		val = readl(EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ENABLE_CTRL(i));
248 		val |= EIP197_RDR_IRQ(i);
249 		writel(val, EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ENABLE_CTRL(i));
250 	}
251 
252 	return 0;
253 }
254 
255 static int safexcel_hw_init(struct safexcel_crypto_priv *priv)
256 {
257 	u32 version, val;
258 	int i, ret;
259 
260 	/* Determine endianess and configure byte swap */
261 	version = readl(EIP197_HIA_AIC(priv) + EIP197_HIA_VERSION);
262 	val = readl(EIP197_HIA_AIC(priv) + EIP197_HIA_MST_CTRL);
263 
264 	if ((version & 0xffff) == EIP197_HIA_VERSION_BE)
265 		val |= EIP197_MST_CTRL_BYTE_SWAP;
266 	else if (((version >> 16) & 0xffff) == EIP197_HIA_VERSION_LE)
267 		val |= (EIP197_MST_CTRL_NO_BYTE_SWAP >> 24);
268 
269 	writel(val, EIP197_HIA_AIC(priv) + EIP197_HIA_MST_CTRL);
270 
271 	/* Configure wr/rd cache values */
272 	writel(EIP197_MST_CTRL_RD_CACHE(RD_CACHE_4BITS) |
273 	       EIP197_MST_CTRL_WD_CACHE(WR_CACHE_4BITS),
274 	       EIP197_HIA_GEN_CFG(priv) + EIP197_MST_CTRL);
275 
276 	/* Interrupts reset */
277 
278 	/* Disable all global interrupts */
279 	writel(0, EIP197_HIA_AIC_G(priv) + EIP197_HIA_AIC_G_ENABLE_CTRL);
280 
281 	/* Clear any pending interrupt */
282 	writel(GENMASK(31, 0), EIP197_HIA_AIC_G(priv) + EIP197_HIA_AIC_G_ACK);
283 
284 	/* Data Fetch Engine configuration */
285 
286 	/* Reset all DFE threads */
287 	writel(EIP197_DxE_THR_CTRL_RESET_PE,
288 	       EIP197_HIA_DFE_THR(priv) + EIP197_HIA_DFE_THR_CTRL);
289 
290 	if (priv->version == EIP197) {
291 		/* Reset HIA input interface arbiter */
292 		writel(EIP197_HIA_RA_PE_CTRL_RESET,
293 		       EIP197_HIA_AIC(priv) + EIP197_HIA_RA_PE_CTRL);
294 	}
295 
296 	/* DMA transfer size to use */
297 	val = EIP197_HIA_DFE_CFG_DIS_DEBUG;
298 	val |= EIP197_HIA_DxE_CFG_MIN_DATA_SIZE(5) | EIP197_HIA_DxE_CFG_MAX_DATA_SIZE(9);
299 	val |= EIP197_HIA_DxE_CFG_MIN_CTRL_SIZE(5) | EIP197_HIA_DxE_CFG_MAX_CTRL_SIZE(7);
300 	val |= EIP197_HIA_DxE_CFG_DATA_CACHE_CTRL(RD_CACHE_3BITS);
301 	val |= EIP197_HIA_DxE_CFG_CTRL_CACHE_CTRL(RD_CACHE_3BITS);
302 	writel(val, EIP197_HIA_DFE(priv) + EIP197_HIA_DFE_CFG);
303 
304 	/* Leave the DFE threads reset state */
305 	writel(0, EIP197_HIA_DFE_THR(priv) + EIP197_HIA_DFE_THR_CTRL);
306 
307 	/* Configure the procesing engine thresholds */
308 	writel(EIP197_PE_IN_xBUF_THRES_MIN(5) | EIP197_PE_IN_xBUF_THRES_MAX(9),
309 	       EIP197_PE(priv) + EIP197_PE_IN_DBUF_THRES);
310 	writel(EIP197_PE_IN_xBUF_THRES_MIN(5) | EIP197_PE_IN_xBUF_THRES_MAX(7),
311 	       EIP197_PE(priv) + EIP197_PE_IN_TBUF_THRES);
312 
313 	if (priv->version == EIP197) {
314 		/* enable HIA input interface arbiter and rings */
315 		writel(EIP197_HIA_RA_PE_CTRL_EN |
316 		       GENMASK(priv->config.rings - 1, 0),
317 		       EIP197_HIA_AIC(priv) + EIP197_HIA_RA_PE_CTRL);
318 	}
319 
320 	/* Data Store Engine configuration */
321 
322 	/* Reset all DSE threads */
323 	writel(EIP197_DxE_THR_CTRL_RESET_PE,
324 	       EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_CTRL);
325 
326 	/* Wait for all DSE threads to complete */
327 	while ((readl(EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_STAT) &
328 		GENMASK(15, 12)) != GENMASK(15, 12))
329 		;
330 
331 	/* DMA transfer size to use */
332 	val = EIP197_HIA_DSE_CFG_DIS_DEBUG;
333 	val |= EIP197_HIA_DxE_CFG_MIN_DATA_SIZE(7) | EIP197_HIA_DxE_CFG_MAX_DATA_SIZE(8);
334 	val |= EIP197_HIA_DxE_CFG_DATA_CACHE_CTRL(WR_CACHE_3BITS);
335 	val |= EIP197_HIA_DSE_CFG_ALLWAYS_BUFFERABLE;
336 	/* FIXME: instability issues can occur for EIP97 but disabling it impact
337 	 * performances.
338 	 */
339 	if (priv->version == EIP197)
340 		val |= EIP197_HIA_DSE_CFG_EN_SINGLE_WR;
341 	writel(val, EIP197_HIA_DSE(priv) + EIP197_HIA_DSE_CFG);
342 
343 	/* Leave the DSE threads reset state */
344 	writel(0, EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_CTRL);
345 
346 	/* Configure the procesing engine thresholds */
347 	writel(EIP197_PE_OUT_DBUF_THRES_MIN(7) | EIP197_PE_OUT_DBUF_THRES_MAX(8),
348 	       EIP197_PE(priv) + EIP197_PE_OUT_DBUF_THRES);
349 
350 	/* Processing Engine configuration */
351 
352 	/* H/W capabilities selection */
353 	val = EIP197_FUNCTION_RSVD;
354 	val |= EIP197_PROTOCOL_ENCRYPT_ONLY | EIP197_PROTOCOL_HASH_ONLY;
355 	val |= EIP197_ALG_AES_ECB | EIP197_ALG_AES_CBC;
356 	val |= EIP197_ALG_SHA1 | EIP197_ALG_HMAC_SHA1;
357 	val |= EIP197_ALG_SHA2;
358 	writel(val, EIP197_PE(priv) + EIP197_PE_EIP96_FUNCTION_EN);
359 
360 	/* Command Descriptor Rings prepare */
361 	for (i = 0; i < priv->config.rings; i++) {
362 		/* Clear interrupts for this ring */
363 		writel(GENMASK(31, 0),
364 		       EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ENABLE_CLR(i));
365 
366 		/* Disable external triggering */
367 		writel(0, EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_CFG);
368 
369 		/* Clear the pending prepared counter */
370 		writel(EIP197_xDR_PREP_CLR_COUNT,
371 		       EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_PREP_COUNT);
372 
373 		/* Clear the pending processed counter */
374 		writel(EIP197_xDR_PROC_CLR_COUNT,
375 		       EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_PROC_COUNT);
376 
377 		writel(0,
378 		       EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_PREP_PNTR);
379 		writel(0,
380 		       EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_PROC_PNTR);
381 
382 		writel((EIP197_DEFAULT_RING_SIZE * priv->config.cd_offset) << 2,
383 		       EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_SIZE);
384 	}
385 
386 	/* Result Descriptor Ring prepare */
387 	for (i = 0; i < priv->config.rings; i++) {
388 		/* Disable external triggering*/
389 		writel(0, EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_CFG);
390 
391 		/* Clear the pending prepared counter */
392 		writel(EIP197_xDR_PREP_CLR_COUNT,
393 		       EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_PREP_COUNT);
394 
395 		/* Clear the pending processed counter */
396 		writel(EIP197_xDR_PROC_CLR_COUNT,
397 		       EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_PROC_COUNT);
398 
399 		writel(0,
400 		       EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_PREP_PNTR);
401 		writel(0,
402 		       EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_PROC_PNTR);
403 
404 		/* Ring size */
405 		writel((EIP197_DEFAULT_RING_SIZE * priv->config.rd_offset) << 2,
406 		       EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_SIZE);
407 	}
408 
409 	/* Enable command descriptor rings */
410 	writel(EIP197_DxE_THR_CTRL_EN | GENMASK(priv->config.rings - 1, 0),
411 	       EIP197_HIA_DFE_THR(priv) + EIP197_HIA_DFE_THR_CTRL);
412 
413 	/* Enable result descriptor rings */
414 	writel(EIP197_DxE_THR_CTRL_EN | GENMASK(priv->config.rings - 1, 0),
415 	       EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_CTRL);
416 
417 	/* Clear any HIA interrupt */
418 	writel(GENMASK(30, 20), EIP197_HIA_AIC_G(priv) + EIP197_HIA_AIC_G_ACK);
419 
420 	if (priv->version == EIP197) {
421 		eip197_trc_cache_init(priv);
422 
423 		ret = eip197_load_firmwares(priv);
424 		if (ret)
425 			return ret;
426 	}
427 
428 	safexcel_hw_setup_cdesc_rings(priv);
429 	safexcel_hw_setup_rdesc_rings(priv);
430 
431 	return 0;
432 }
433 
434 /* Called with ring's lock taken */
435 static int safexcel_try_push_requests(struct safexcel_crypto_priv *priv,
436 				      int ring, int reqs)
437 {
438 	int coal = min_t(int, reqs, EIP197_MAX_BATCH_SZ);
439 
440 	if (!coal)
441 		return 0;
442 
443 	/* Configure when we want an interrupt */
444 	writel(EIP197_HIA_RDR_THRESH_PKT_MODE |
445 	       EIP197_HIA_RDR_THRESH_PROC_PKT(coal),
446 	       EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_THRESH);
447 
448 	return coal;
449 }
450 
451 void safexcel_dequeue(struct safexcel_crypto_priv *priv, int ring)
452 {
453 	struct crypto_async_request *req, *backlog;
454 	struct safexcel_context *ctx;
455 	struct safexcel_request *request;
456 	int ret, nreq = 0, cdesc = 0, rdesc = 0, commands, results;
457 
458 	/* If a request wasn't properly dequeued because of a lack of resources,
459 	 * proceeded it first,
460 	 */
461 	req = priv->ring[ring].req;
462 	backlog = priv->ring[ring].backlog;
463 	if (req)
464 		goto handle_req;
465 
466 	while (true) {
467 		spin_lock_bh(&priv->ring[ring].queue_lock);
468 		backlog = crypto_get_backlog(&priv->ring[ring].queue);
469 		req = crypto_dequeue_request(&priv->ring[ring].queue);
470 		spin_unlock_bh(&priv->ring[ring].queue_lock);
471 
472 		if (!req) {
473 			priv->ring[ring].req = NULL;
474 			priv->ring[ring].backlog = NULL;
475 			goto finalize;
476 		}
477 
478 handle_req:
479 		request = kzalloc(sizeof(*request), EIP197_GFP_FLAGS(*req));
480 		if (!request)
481 			goto request_failed;
482 
483 		ctx = crypto_tfm_ctx(req->tfm);
484 		ret = ctx->send(req, ring, request, &commands, &results);
485 		if (ret) {
486 			kfree(request);
487 			goto request_failed;
488 		}
489 
490 		if (backlog)
491 			backlog->complete(backlog, -EINPROGRESS);
492 
493 		spin_lock_bh(&priv->ring[ring].egress_lock);
494 		list_add_tail(&request->list, &priv->ring[ring].list);
495 		spin_unlock_bh(&priv->ring[ring].egress_lock);
496 
497 		cdesc += commands;
498 		rdesc += results;
499 		nreq++;
500 	}
501 
502 request_failed:
503 	/* Not enough resources to handle all the requests. Bail out and save
504 	 * the request and the backlog for the next dequeue call (per-ring).
505 	 */
506 	priv->ring[ring].req = req;
507 	priv->ring[ring].backlog = backlog;
508 
509 finalize:
510 	if (!nreq)
511 		return;
512 
513 	spin_lock_bh(&priv->ring[ring].egress_lock);
514 
515 	if (!priv->ring[ring].busy) {
516 		nreq -= safexcel_try_push_requests(priv, ring, nreq);
517 		if (nreq)
518 			priv->ring[ring].busy = true;
519 	}
520 
521 	priv->ring[ring].requests_left += nreq;
522 
523 	spin_unlock_bh(&priv->ring[ring].egress_lock);
524 
525 	/* let the RDR know we have pending descriptors */
526 	writel((rdesc * priv->config.rd_offset) << 2,
527 	       EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_PREP_COUNT);
528 
529 	/* let the CDR know we have pending descriptors */
530 	writel((cdesc * priv->config.cd_offset) << 2,
531 	       EIP197_HIA_CDR(priv, ring) + EIP197_HIA_xDR_PREP_COUNT);
532 }
533 
534 void safexcel_free_context(struct safexcel_crypto_priv *priv,
535 			   struct crypto_async_request *req,
536 			   int result_sz)
537 {
538 	struct safexcel_context *ctx = crypto_tfm_ctx(req->tfm);
539 
540 	if (ctx->result_dma)
541 		dma_unmap_single(priv->dev, ctx->result_dma, result_sz,
542 				 DMA_FROM_DEVICE);
543 
544 	if (ctx->cache) {
545 		dma_unmap_single(priv->dev, ctx->cache_dma, ctx->cache_sz,
546 				 DMA_TO_DEVICE);
547 		kfree(ctx->cache);
548 		ctx->cache = NULL;
549 		ctx->cache_sz = 0;
550 	}
551 }
552 
553 void safexcel_complete(struct safexcel_crypto_priv *priv, int ring)
554 {
555 	struct safexcel_command_desc *cdesc;
556 
557 	/* Acknowledge the command descriptors */
558 	do {
559 		cdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].cdr);
560 		if (IS_ERR(cdesc)) {
561 			dev_err(priv->dev,
562 				"Could not retrieve the command descriptor\n");
563 			return;
564 		}
565 	} while (!cdesc->last_seg);
566 }
567 
568 void safexcel_inv_complete(struct crypto_async_request *req, int error)
569 {
570 	struct safexcel_inv_result *result = req->data;
571 
572 	if (error == -EINPROGRESS)
573 		return;
574 
575 	result->error = error;
576 	complete(&result->completion);
577 }
578 
579 int safexcel_invalidate_cache(struct crypto_async_request *async,
580 			      struct safexcel_crypto_priv *priv,
581 			      dma_addr_t ctxr_dma, int ring,
582 			      struct safexcel_request *request)
583 {
584 	struct safexcel_command_desc *cdesc;
585 	struct safexcel_result_desc *rdesc;
586 	int ret = 0;
587 
588 	spin_lock_bh(&priv->ring[ring].egress_lock);
589 
590 	/* Prepare command descriptor */
591 	cdesc = safexcel_add_cdesc(priv, ring, true, true, 0, 0, 0, ctxr_dma);
592 	if (IS_ERR(cdesc)) {
593 		ret = PTR_ERR(cdesc);
594 		goto unlock;
595 	}
596 
597 	cdesc->control_data.type = EIP197_TYPE_EXTENDED;
598 	cdesc->control_data.options = 0;
599 	cdesc->control_data.refresh = 0;
600 	cdesc->control_data.control0 = CONTEXT_CONTROL_INV_TR;
601 
602 	/* Prepare result descriptor */
603 	rdesc = safexcel_add_rdesc(priv, ring, true, true, 0, 0);
604 
605 	if (IS_ERR(rdesc)) {
606 		ret = PTR_ERR(rdesc);
607 		goto cdesc_rollback;
608 	}
609 
610 	request->req = async;
611 	goto unlock;
612 
613 cdesc_rollback:
614 	safexcel_ring_rollback_wptr(priv, &priv->ring[ring].cdr);
615 
616 unlock:
617 	spin_unlock_bh(&priv->ring[ring].egress_lock);
618 	return ret;
619 }
620 
621 static inline void safexcel_handle_result_descriptor(struct safexcel_crypto_priv *priv,
622 						     int ring)
623 {
624 	struct safexcel_request *sreq;
625 	struct safexcel_context *ctx;
626 	int ret, i, nreq, ndesc, tot_descs, done;
627 	bool should_complete;
628 
629 handle_results:
630 	tot_descs = 0;
631 
632 	nreq = readl(EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_PROC_COUNT);
633 	nreq >>= EIP197_xDR_PROC_xD_PKT_OFFSET;
634 	nreq &= EIP197_xDR_PROC_xD_PKT_MASK;
635 	if (!nreq)
636 		goto requests_left;
637 
638 	for (i = 0; i < nreq; i++) {
639 		spin_lock_bh(&priv->ring[ring].egress_lock);
640 		sreq = list_first_entry(&priv->ring[ring].list,
641 					struct safexcel_request, list);
642 		list_del(&sreq->list);
643 		spin_unlock_bh(&priv->ring[ring].egress_lock);
644 
645 		ctx = crypto_tfm_ctx(sreq->req->tfm);
646 		ndesc = ctx->handle_result(priv, ring, sreq->req,
647 					   &should_complete, &ret);
648 		if (ndesc < 0) {
649 			kfree(sreq);
650 			dev_err(priv->dev, "failed to handle result (%d)", ndesc);
651 			goto acknowledge;
652 		}
653 
654 		if (should_complete) {
655 			local_bh_disable();
656 			sreq->req->complete(sreq->req, ret);
657 			local_bh_enable();
658 		}
659 
660 		kfree(sreq);
661 		tot_descs += ndesc;
662 	}
663 
664 acknowledge:
665 	if (i) {
666 		writel(EIP197_xDR_PROC_xD_PKT(i) |
667 		       EIP197_xDR_PROC_xD_COUNT(tot_descs * priv->config.rd_offset),
668 		       EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_PROC_COUNT);
669 	}
670 
671 	/* If the number of requests overflowed the counter, try to proceed more
672 	 * requests.
673 	 */
674 	if (nreq == EIP197_xDR_PROC_xD_PKT_MASK)
675 		goto handle_results;
676 
677 requests_left:
678 	spin_lock_bh(&priv->ring[ring].egress_lock);
679 
680 	done = safexcel_try_push_requests(priv, ring,
681 					  priv->ring[ring].requests_left);
682 
683 	priv->ring[ring].requests_left -= done;
684 	if (!done && !priv->ring[ring].requests_left)
685 		priv->ring[ring].busy = false;
686 
687 	spin_unlock_bh(&priv->ring[ring].egress_lock);
688 }
689 
690 static void safexcel_dequeue_work(struct work_struct *work)
691 {
692 	struct safexcel_work_data *data =
693 			container_of(work, struct safexcel_work_data, work);
694 
695 	safexcel_dequeue(data->priv, data->ring);
696 }
697 
698 struct safexcel_ring_irq_data {
699 	struct safexcel_crypto_priv *priv;
700 	int ring;
701 };
702 
703 static irqreturn_t safexcel_irq_ring(int irq, void *data)
704 {
705 	struct safexcel_ring_irq_data *irq_data = data;
706 	struct safexcel_crypto_priv *priv = irq_data->priv;
707 	int ring = irq_data->ring, rc = IRQ_NONE;
708 	u32 status, stat;
709 
710 	status = readl(EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ENABLED_STAT(ring));
711 	if (!status)
712 		return rc;
713 
714 	/* RDR interrupts */
715 	if (status & EIP197_RDR_IRQ(ring)) {
716 		stat = readl(EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_STAT);
717 
718 		if (unlikely(stat & EIP197_xDR_ERR)) {
719 			/*
720 			 * Fatal error, the RDR is unusable and must be
721 			 * reinitialized. This should not happen under
722 			 * normal circumstances.
723 			 */
724 			dev_err(priv->dev, "RDR: fatal error.");
725 		} else if (likely(stat & EIP197_xDR_THRESH)) {
726 			rc = IRQ_WAKE_THREAD;
727 		}
728 
729 		/* ACK the interrupts */
730 		writel(stat & 0xff,
731 		       EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_STAT);
732 	}
733 
734 	/* ACK the interrupts */
735 	writel(status, EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ACK(ring));
736 
737 	return rc;
738 }
739 
740 static irqreturn_t safexcel_irq_ring_thread(int irq, void *data)
741 {
742 	struct safexcel_ring_irq_data *irq_data = data;
743 	struct safexcel_crypto_priv *priv = irq_data->priv;
744 	int ring = irq_data->ring;
745 
746 	safexcel_handle_result_descriptor(priv, ring);
747 
748 	queue_work(priv->ring[ring].workqueue,
749 		   &priv->ring[ring].work_data.work);
750 
751 	return IRQ_HANDLED;
752 }
753 
754 static int safexcel_request_ring_irq(struct platform_device *pdev, const char *name,
755 				     irq_handler_t handler,
756 				     irq_handler_t threaded_handler,
757 				     struct safexcel_ring_irq_data *ring_irq_priv)
758 {
759 	int ret, irq = platform_get_irq_byname(pdev, name);
760 
761 	if (irq < 0) {
762 		dev_err(&pdev->dev, "unable to get IRQ '%s'\n", name);
763 		return irq;
764 	}
765 
766 	ret = devm_request_threaded_irq(&pdev->dev, irq, handler,
767 					threaded_handler, IRQF_ONESHOT,
768 					dev_name(&pdev->dev), ring_irq_priv);
769 	if (ret) {
770 		dev_err(&pdev->dev, "unable to request IRQ %d\n", irq);
771 		return ret;
772 	}
773 
774 	return irq;
775 }
776 
777 static struct safexcel_alg_template *safexcel_algs[] = {
778 	&safexcel_alg_ecb_aes,
779 	&safexcel_alg_cbc_aes,
780 	&safexcel_alg_sha1,
781 	&safexcel_alg_sha224,
782 	&safexcel_alg_sha256,
783 	&safexcel_alg_hmac_sha1,
784 };
785 
786 static int safexcel_register_algorithms(struct safexcel_crypto_priv *priv)
787 {
788 	int i, j, ret = 0;
789 
790 	for (i = 0; i < ARRAY_SIZE(safexcel_algs); i++) {
791 		safexcel_algs[i]->priv = priv;
792 
793 		if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_SKCIPHER)
794 			ret = crypto_register_skcipher(&safexcel_algs[i]->alg.skcipher);
795 		else
796 			ret = crypto_register_ahash(&safexcel_algs[i]->alg.ahash);
797 
798 		if (ret)
799 			goto fail;
800 	}
801 
802 	return 0;
803 
804 fail:
805 	for (j = 0; j < i; j++) {
806 		if (safexcel_algs[j]->type == SAFEXCEL_ALG_TYPE_SKCIPHER)
807 			crypto_unregister_skcipher(&safexcel_algs[j]->alg.skcipher);
808 		else
809 			crypto_unregister_ahash(&safexcel_algs[j]->alg.ahash);
810 	}
811 
812 	return ret;
813 }
814 
815 static void safexcel_unregister_algorithms(struct safexcel_crypto_priv *priv)
816 {
817 	int i;
818 
819 	for (i = 0; i < ARRAY_SIZE(safexcel_algs); i++) {
820 		if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_SKCIPHER)
821 			crypto_unregister_skcipher(&safexcel_algs[i]->alg.skcipher);
822 		else
823 			crypto_unregister_ahash(&safexcel_algs[i]->alg.ahash);
824 	}
825 }
826 
827 static void safexcel_configure(struct safexcel_crypto_priv *priv)
828 {
829 	u32 val, mask;
830 
831 	val = readl(EIP197_HIA_AIC_G(priv) + EIP197_HIA_OPTIONS);
832 	val = (val & GENMASK(27, 25)) >> 25;
833 	mask = BIT(val) - 1;
834 
835 	val = readl(EIP197_HIA_AIC_G(priv) + EIP197_HIA_OPTIONS);
836 	priv->config.rings = min_t(u32, val & GENMASK(3, 0), max_rings);
837 
838 	priv->config.cd_size = (sizeof(struct safexcel_command_desc) / sizeof(u32));
839 	priv->config.cd_offset = (priv->config.cd_size + mask) & ~mask;
840 
841 	priv->config.rd_size = (sizeof(struct safexcel_result_desc) / sizeof(u32));
842 	priv->config.rd_offset = (priv->config.rd_size + mask) & ~mask;
843 }
844 
845 static void safexcel_init_register_offsets(struct safexcel_crypto_priv *priv)
846 {
847 	struct safexcel_register_offsets *offsets = &priv->offsets;
848 
849 	if (priv->version == EIP197) {
850 		offsets->hia_aic	= EIP197_HIA_AIC_BASE;
851 		offsets->hia_aic_g	= EIP197_HIA_AIC_G_BASE;
852 		offsets->hia_aic_r	= EIP197_HIA_AIC_R_BASE;
853 		offsets->hia_aic_xdr	= EIP197_HIA_AIC_xDR_BASE;
854 		offsets->hia_dfe	= EIP197_HIA_DFE_BASE;
855 		offsets->hia_dfe_thr	= EIP197_HIA_DFE_THR_BASE;
856 		offsets->hia_dse	= EIP197_HIA_DSE_BASE;
857 		offsets->hia_dse_thr	= EIP197_HIA_DSE_THR_BASE;
858 		offsets->hia_gen_cfg	= EIP197_HIA_GEN_CFG_BASE;
859 		offsets->pe		= EIP197_PE_BASE;
860 	} else {
861 		offsets->hia_aic	= EIP97_HIA_AIC_BASE;
862 		offsets->hia_aic_g	= EIP97_HIA_AIC_G_BASE;
863 		offsets->hia_aic_r	= EIP97_HIA_AIC_R_BASE;
864 		offsets->hia_aic_xdr	= EIP97_HIA_AIC_xDR_BASE;
865 		offsets->hia_dfe	= EIP97_HIA_DFE_BASE;
866 		offsets->hia_dfe_thr	= EIP97_HIA_DFE_THR_BASE;
867 		offsets->hia_dse	= EIP97_HIA_DSE_BASE;
868 		offsets->hia_dse_thr	= EIP97_HIA_DSE_THR_BASE;
869 		offsets->hia_gen_cfg	= EIP97_HIA_GEN_CFG_BASE;
870 		offsets->pe		= EIP97_PE_BASE;
871 	}
872 }
873 
874 static int safexcel_probe(struct platform_device *pdev)
875 {
876 	struct device *dev = &pdev->dev;
877 	struct resource *res;
878 	struct safexcel_crypto_priv *priv;
879 	int i, ret;
880 
881 	priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
882 	if (!priv)
883 		return -ENOMEM;
884 
885 	priv->dev = dev;
886 	priv->version = (enum safexcel_eip_version)of_device_get_match_data(dev);
887 
888 	safexcel_init_register_offsets(priv);
889 
890 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
891 	priv->base = devm_ioremap_resource(dev, res);
892 	if (IS_ERR(priv->base)) {
893 		dev_err(dev, "failed to get resource\n");
894 		return PTR_ERR(priv->base);
895 	}
896 
897 	priv->clk = of_clk_get(dev->of_node, 0);
898 	if (!IS_ERR(priv->clk)) {
899 		ret = clk_prepare_enable(priv->clk);
900 		if (ret) {
901 			dev_err(dev, "unable to enable clk (%d)\n", ret);
902 			return ret;
903 		}
904 	} else {
905 		/* The clock isn't mandatory */
906 		if (PTR_ERR(priv->clk) == -EPROBE_DEFER)
907 			return -EPROBE_DEFER;
908 	}
909 
910 	ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
911 	if (ret)
912 		goto err_clk;
913 
914 	priv->context_pool = dmam_pool_create("safexcel-context", dev,
915 					      sizeof(struct safexcel_context_record),
916 					      1, 0);
917 	if (!priv->context_pool) {
918 		ret = -ENOMEM;
919 		goto err_clk;
920 	}
921 
922 	safexcel_configure(priv);
923 
924 	for (i = 0; i < priv->config.rings; i++) {
925 		char irq_name[6] = {0}; /* "ringX\0" */
926 		char wq_name[9] = {0}; /* "wq_ringX\0" */
927 		int irq;
928 		struct safexcel_ring_irq_data *ring_irq;
929 
930 		ret = safexcel_init_ring_descriptors(priv,
931 						     &priv->ring[i].cdr,
932 						     &priv->ring[i].rdr);
933 		if (ret)
934 			goto err_clk;
935 
936 		ring_irq = devm_kzalloc(dev, sizeof(*ring_irq), GFP_KERNEL);
937 		if (!ring_irq) {
938 			ret = -ENOMEM;
939 			goto err_clk;
940 		}
941 
942 		ring_irq->priv = priv;
943 		ring_irq->ring = i;
944 
945 		snprintf(irq_name, 6, "ring%d", i);
946 		irq = safexcel_request_ring_irq(pdev, irq_name, safexcel_irq_ring,
947 						safexcel_irq_ring_thread,
948 						ring_irq);
949 		if (irq < 0) {
950 			ret = irq;
951 			goto err_clk;
952 		}
953 
954 		priv->ring[i].work_data.priv = priv;
955 		priv->ring[i].work_data.ring = i;
956 		INIT_WORK(&priv->ring[i].work_data.work, safexcel_dequeue_work);
957 
958 		snprintf(wq_name, 9, "wq_ring%d", i);
959 		priv->ring[i].workqueue = create_singlethread_workqueue(wq_name);
960 		if (!priv->ring[i].workqueue) {
961 			ret = -ENOMEM;
962 			goto err_clk;
963 		}
964 
965 		priv->ring[i].requests_left = 0;
966 		priv->ring[i].busy = false;
967 
968 		crypto_init_queue(&priv->ring[i].queue,
969 				  EIP197_DEFAULT_RING_SIZE);
970 
971 		INIT_LIST_HEAD(&priv->ring[i].list);
972 		spin_lock_init(&priv->ring[i].lock);
973 		spin_lock_init(&priv->ring[i].egress_lock);
974 		spin_lock_init(&priv->ring[i].queue_lock);
975 	}
976 
977 	platform_set_drvdata(pdev, priv);
978 	atomic_set(&priv->ring_used, 0);
979 
980 	ret = safexcel_hw_init(priv);
981 	if (ret) {
982 		dev_err(dev, "EIP h/w init failed (%d)\n", ret);
983 		goto err_clk;
984 	}
985 
986 	ret = safexcel_register_algorithms(priv);
987 	if (ret) {
988 		dev_err(dev, "Failed to register algorithms (%d)\n", ret);
989 		goto err_clk;
990 	}
991 
992 	return 0;
993 
994 err_clk:
995 	clk_disable_unprepare(priv->clk);
996 	return ret;
997 }
998 
999 
1000 static int safexcel_remove(struct platform_device *pdev)
1001 {
1002 	struct safexcel_crypto_priv *priv = platform_get_drvdata(pdev);
1003 	int i;
1004 
1005 	safexcel_unregister_algorithms(priv);
1006 	clk_disable_unprepare(priv->clk);
1007 
1008 	for (i = 0; i < priv->config.rings; i++)
1009 		destroy_workqueue(priv->ring[i].workqueue);
1010 
1011 	return 0;
1012 }
1013 
1014 static const struct of_device_id safexcel_of_match_table[] = {
1015 	{
1016 		.compatible = "inside-secure,safexcel-eip97",
1017 		.data = (void *)EIP97,
1018 	},
1019 	{
1020 		.compatible = "inside-secure,safexcel-eip197",
1021 		.data = (void *)EIP197,
1022 	},
1023 	{},
1024 };
1025 
1026 
1027 static struct platform_driver  crypto_safexcel = {
1028 	.probe		= safexcel_probe,
1029 	.remove		= safexcel_remove,
1030 	.driver		= {
1031 		.name	= "crypto-safexcel",
1032 		.of_match_table = safexcel_of_match_table,
1033 	},
1034 };
1035 module_platform_driver(crypto_safexcel);
1036 
1037 MODULE_AUTHOR("Antoine Tenart <antoine.tenart@free-electrons.com>");
1038 MODULE_AUTHOR("Ofer Heifetz <oferh@marvell.com>");
1039 MODULE_AUTHOR("Igal Liberman <igall@marvell.com>");
1040 MODULE_DESCRIPTION("Support for SafeXcel cryptographic engine EIP197");
1041 MODULE_LICENSE("GPL v2");
1042