xref: /linux/drivers/crypto/amlogic/amlogic-gxl-core.c (revision 223981db9bafb80f558162c148f261e2ff043dbe)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * amlgoic-core.c - hardware cryptographic offloader for Amlogic GXL SoC
4  *
5  * Copyright (C) 2018-2019 Corentin Labbe <clabbe@baylibre.com>
6  *
7  * Core file which registers crypto algorithms supported by the hardware.
8  */
9 
10 #include <crypto/engine.h>
11 #include <crypto/internal/skcipher.h>
12 #include <linux/clk.h>
13 #include <linux/dma-mapping.h>
14 #include <linux/err.h>
15 #include <linux/interrupt.h>
16 #include <linux/io.h>
17 #include <linux/irq.h>
18 #include <linux/kernel.h>
19 #include <linux/module.h>
20 #include <linux/of.h>
21 #include <linux/platform_device.h>
22 
23 #include "amlogic-gxl.h"
24 
25 static irqreturn_t meson_irq_handler(int irq, void *data)
26 {
27 	struct meson_dev *mc = (struct meson_dev *)data;
28 	int flow;
29 	u32 p;
30 
31 	for (flow = 0; flow < MAXFLOW; flow++) {
32 		if (mc->irqs[flow] == irq) {
33 			p = readl(mc->base + ((0x04 + flow) << 2));
34 			if (p) {
35 				writel_relaxed(0xF, mc->base + ((0x4 + flow) << 2));
36 				mc->chanlist[flow].status = 1;
37 				complete(&mc->chanlist[flow].complete);
38 				return IRQ_HANDLED;
39 			}
40 			dev_err(mc->dev, "%s %d Got irq for flow %d but ctrl is empty\n", __func__, irq, flow);
41 		}
42 	}
43 
44 	dev_err(mc->dev, "%s %d from unknown irq\n", __func__, irq);
45 	return IRQ_HANDLED;
46 }
47 
48 static struct meson_alg_template mc_algs[] = {
49 {
50 	.type = CRYPTO_ALG_TYPE_SKCIPHER,
51 	.blockmode = MESON_OPMODE_CBC,
52 	.alg.skcipher.base = {
53 		.base = {
54 			.cra_name = "cbc(aes)",
55 			.cra_driver_name = "cbc-aes-gxl",
56 			.cra_priority = 400,
57 			.cra_blocksize = AES_BLOCK_SIZE,
58 			.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
59 				CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY |
60 				CRYPTO_ALG_NEED_FALLBACK,
61 			.cra_ctxsize = sizeof(struct meson_cipher_tfm_ctx),
62 			.cra_module = THIS_MODULE,
63 			.cra_alignmask = 0xf,
64 			.cra_init = meson_cipher_init,
65 			.cra_exit = meson_cipher_exit,
66 		},
67 		.min_keysize	= AES_MIN_KEY_SIZE,
68 		.max_keysize	= AES_MAX_KEY_SIZE,
69 		.ivsize		= AES_BLOCK_SIZE,
70 		.setkey		= meson_aes_setkey,
71 		.encrypt	= meson_skencrypt,
72 		.decrypt	= meson_skdecrypt,
73 	},
74 	.alg.skcipher.op = {
75 		.do_one_request = meson_handle_cipher_request,
76 	},
77 },
78 {
79 	.type = CRYPTO_ALG_TYPE_SKCIPHER,
80 	.blockmode = MESON_OPMODE_ECB,
81 	.alg.skcipher.base = {
82 		.base = {
83 			.cra_name = "ecb(aes)",
84 			.cra_driver_name = "ecb-aes-gxl",
85 			.cra_priority = 400,
86 			.cra_blocksize = AES_BLOCK_SIZE,
87 			.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
88 				CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY |
89 				CRYPTO_ALG_NEED_FALLBACK,
90 			.cra_ctxsize = sizeof(struct meson_cipher_tfm_ctx),
91 			.cra_module = THIS_MODULE,
92 			.cra_alignmask = 0xf,
93 			.cra_init = meson_cipher_init,
94 			.cra_exit = meson_cipher_exit,
95 		},
96 		.min_keysize	= AES_MIN_KEY_SIZE,
97 		.max_keysize	= AES_MAX_KEY_SIZE,
98 		.setkey		= meson_aes_setkey,
99 		.encrypt	= meson_skencrypt,
100 		.decrypt	= meson_skdecrypt,
101 	},
102 	.alg.skcipher.op = {
103 		.do_one_request = meson_handle_cipher_request,
104 	},
105 },
106 };
107 
108 static int meson_debugfs_show(struct seq_file *seq, void *v)
109 {
110 	struct meson_dev *mc __maybe_unused = seq->private;
111 	int i;
112 
113 	for (i = 0; i < MAXFLOW; i++)
114 		seq_printf(seq, "Channel %d: nreq %lu\n", i,
115 #ifdef CONFIG_CRYPTO_DEV_AMLOGIC_GXL_DEBUG
116 			   mc->chanlist[i].stat_req);
117 #else
118 			   0ul);
119 #endif
120 
121 	for (i = 0; i < ARRAY_SIZE(mc_algs); i++) {
122 		switch (mc_algs[i].type) {
123 		case CRYPTO_ALG_TYPE_SKCIPHER:
124 			seq_printf(seq, "%s %s %lu %lu\n",
125 				   mc_algs[i].alg.skcipher.base.base.cra_driver_name,
126 				   mc_algs[i].alg.skcipher.base.base.cra_name,
127 #ifdef CONFIG_CRYPTO_DEV_AMLOGIC_GXL_DEBUG
128 				   mc_algs[i].stat_req, mc_algs[i].stat_fb);
129 #else
130 				   0ul, 0ul);
131 #endif
132 			break;
133 		}
134 	}
135 	return 0;
136 }
137 DEFINE_SHOW_ATTRIBUTE(meson_debugfs);
138 
139 static void meson_free_chanlist(struct meson_dev *mc, int i)
140 {
141 	while (i >= 0) {
142 		crypto_engine_exit(mc->chanlist[i].engine);
143 		if (mc->chanlist[i].tl)
144 			dma_free_coherent(mc->dev, sizeof(struct meson_desc) * MAXDESC,
145 					  mc->chanlist[i].tl,
146 					  mc->chanlist[i].t_phy);
147 		i--;
148 	}
149 }
150 
151 /*
152  * Allocate the channel list structure
153  */
154 static int meson_allocate_chanlist(struct meson_dev *mc)
155 {
156 	int i, err;
157 
158 	mc->chanlist = devm_kcalloc(mc->dev, MAXFLOW,
159 				    sizeof(struct meson_flow), GFP_KERNEL);
160 	if (!mc->chanlist)
161 		return -ENOMEM;
162 
163 	for (i = 0; i < MAXFLOW; i++) {
164 		init_completion(&mc->chanlist[i].complete);
165 
166 		mc->chanlist[i].engine = crypto_engine_alloc_init(mc->dev, true);
167 		if (!mc->chanlist[i].engine) {
168 			dev_err(mc->dev, "Cannot allocate engine\n");
169 			i--;
170 			err = -ENOMEM;
171 			goto error_engine;
172 		}
173 		err = crypto_engine_start(mc->chanlist[i].engine);
174 		if (err) {
175 			dev_err(mc->dev, "Cannot start engine\n");
176 			goto error_engine;
177 		}
178 		mc->chanlist[i].tl = dma_alloc_coherent(mc->dev,
179 							sizeof(struct meson_desc) * MAXDESC,
180 							&mc->chanlist[i].t_phy,
181 							GFP_KERNEL);
182 		if (!mc->chanlist[i].tl) {
183 			err = -ENOMEM;
184 			goto error_engine;
185 		}
186 	}
187 	return 0;
188 error_engine:
189 	meson_free_chanlist(mc, i);
190 	return err;
191 }
192 
193 static int meson_register_algs(struct meson_dev *mc)
194 {
195 	int err, i;
196 
197 	for (i = 0; i < ARRAY_SIZE(mc_algs); i++) {
198 		mc_algs[i].mc = mc;
199 		switch (mc_algs[i].type) {
200 		case CRYPTO_ALG_TYPE_SKCIPHER:
201 			err = crypto_engine_register_skcipher(&mc_algs[i].alg.skcipher);
202 			if (err) {
203 				dev_err(mc->dev, "Fail to register %s\n",
204 					mc_algs[i].alg.skcipher.base.base.cra_name);
205 				mc_algs[i].mc = NULL;
206 				return err;
207 			}
208 			break;
209 		}
210 	}
211 
212 	return 0;
213 }
214 
215 static void meson_unregister_algs(struct meson_dev *mc)
216 {
217 	int i;
218 
219 	for (i = 0; i < ARRAY_SIZE(mc_algs); i++) {
220 		if (!mc_algs[i].mc)
221 			continue;
222 		switch (mc_algs[i].type) {
223 		case CRYPTO_ALG_TYPE_SKCIPHER:
224 			crypto_engine_unregister_skcipher(&mc_algs[i].alg.skcipher);
225 			break;
226 		}
227 	}
228 }
229 
230 static int meson_crypto_probe(struct platform_device *pdev)
231 {
232 	struct meson_dev *mc;
233 	int err, i;
234 
235 	mc = devm_kzalloc(&pdev->dev, sizeof(*mc), GFP_KERNEL);
236 	if (!mc)
237 		return -ENOMEM;
238 
239 	mc->dev = &pdev->dev;
240 	platform_set_drvdata(pdev, mc);
241 
242 	mc->base = devm_platform_ioremap_resource(pdev, 0);
243 	if (IS_ERR(mc->base)) {
244 		err = PTR_ERR(mc->base);
245 		dev_err(&pdev->dev, "Cannot request MMIO err=%d\n", err);
246 		return err;
247 	}
248 	mc->busclk = devm_clk_get(&pdev->dev, "blkmv");
249 	if (IS_ERR(mc->busclk)) {
250 		err = PTR_ERR(mc->busclk);
251 		dev_err(&pdev->dev, "Cannot get core clock err=%d\n", err);
252 		return err;
253 	}
254 
255 	for (i = 0; i < MAXFLOW; i++) {
256 		mc->irqs[i] = platform_get_irq(pdev, i);
257 		if (mc->irqs[i] < 0)
258 			return mc->irqs[i];
259 
260 		err = devm_request_irq(&pdev->dev, mc->irqs[i], meson_irq_handler, 0,
261 				       "gxl-crypto", mc);
262 		if (err < 0) {
263 			dev_err(mc->dev, "Cannot request IRQ for flow %d\n", i);
264 			return err;
265 		}
266 	}
267 
268 	err = clk_prepare_enable(mc->busclk);
269 	if (err != 0) {
270 		dev_err(&pdev->dev, "Cannot prepare_enable busclk\n");
271 		return err;
272 	}
273 
274 	err = meson_allocate_chanlist(mc);
275 	if (err)
276 		goto error_flow;
277 
278 	err = meson_register_algs(mc);
279 	if (err)
280 		goto error_alg;
281 
282 	if (IS_ENABLED(CONFIG_CRYPTO_DEV_AMLOGIC_GXL_DEBUG)) {
283 		struct dentry *dbgfs_dir;
284 
285 		dbgfs_dir = debugfs_create_dir("gxl-crypto", NULL);
286 		debugfs_create_file("stats", 0444, dbgfs_dir, mc, &meson_debugfs_fops);
287 
288 #ifdef CONFIG_CRYPTO_DEV_AMLOGIC_GXL_DEBUG
289 		mc->dbgfs_dir = dbgfs_dir;
290 #endif
291 	}
292 
293 	return 0;
294 error_alg:
295 	meson_unregister_algs(mc);
296 error_flow:
297 	meson_free_chanlist(mc, MAXFLOW - 1);
298 	clk_disable_unprepare(mc->busclk);
299 	return err;
300 }
301 
302 static void meson_crypto_remove(struct platform_device *pdev)
303 {
304 	struct meson_dev *mc = platform_get_drvdata(pdev);
305 
306 #ifdef CONFIG_CRYPTO_DEV_AMLOGIC_GXL_DEBUG
307 	debugfs_remove_recursive(mc->dbgfs_dir);
308 #endif
309 
310 	meson_unregister_algs(mc);
311 
312 	meson_free_chanlist(mc, MAXFLOW - 1);
313 
314 	clk_disable_unprepare(mc->busclk);
315 }
316 
317 static const struct of_device_id meson_crypto_of_match_table[] = {
318 	{ .compatible = "amlogic,gxl-crypto", },
319 	{}
320 };
321 MODULE_DEVICE_TABLE(of, meson_crypto_of_match_table);
322 
323 static struct platform_driver meson_crypto_driver = {
324 	.probe		 = meson_crypto_probe,
325 	.remove_new	 = meson_crypto_remove,
326 	.driver		 = {
327 		.name		   = "gxl-crypto",
328 		.of_match_table	= meson_crypto_of_match_table,
329 	},
330 };
331 
332 module_platform_driver(meson_crypto_driver);
333 
334 MODULE_DESCRIPTION("Amlogic GXL cryptographic offloader");
335 MODULE_LICENSE("GPL");
336 MODULE_AUTHOR("Corentin Labbe <clabbe@baylibre.com>");
337