xref: /linux/drivers/spi/spi-ingenic.c (revision 257ca10c7317d4a424e48bb95d14ca53a1f1dd6f)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * SPI bus driver for the Ingenic SoCs
4  * Copyright (c) 2017-2021 Artur Rojek <contact@artur-rojek.eu>
5  * Copyright (c) 2017-2021 Paul Cercueil <paul@crapouillou.net>
6  * Copyright (c) 2022 周琰杰 (Zhou Yanjie) <zhouyanjie@wanyeetech.com>
7  */
8 
9 #include <linux/clk.h>
10 #include <linux/delay.h>
11 #include <linux/dmaengine.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/iopoll.h>
14 #include <linux/module.h>
15 #include <linux/of.h>
16 #include <linux/platform_device.h>
17 #include <linux/regmap.h>
18 #include <linux/spi/spi.h>
19 
20 #define REG_SSIDR	0x0
21 #define REG_SSICR0	0x4
22 #define REG_SSICR1	0x8
23 #define REG_SSISR	0xc
24 #define REG_SSIGR	0x18
25 
26 #define REG_SSICR0_TENDIAN_LSB		BIT(19)
27 #define REG_SSICR0_RENDIAN_LSB		BIT(17)
28 #define REG_SSICR0_SSIE			BIT(15)
29 #define REG_SSICR0_LOOP			BIT(10)
30 #define REG_SSICR0_EACLRUN		BIT(7)
31 #define REG_SSICR0_FSEL			BIT(6)
32 #define REG_SSICR0_TFLUSH		BIT(2)
33 #define REG_SSICR0_RFLUSH		BIT(1)
34 
35 #define REG_SSICR1_FRMHL_MASK		(BIT(31) | BIT(30))
36 #define REG_SSICR1_FRMHL		BIT(30)
37 #define REG_SSICR1_LFST			BIT(25)
38 #define REG_SSICR1_UNFIN		BIT(23)
39 #define REG_SSICR1_PHA			BIT(1)
40 #define REG_SSICR1_POL			BIT(0)
41 
42 #define REG_SSISR_END			BIT(7)
43 #define REG_SSISR_BUSY			BIT(6)
44 #define REG_SSISR_TFF			BIT(5)
45 #define REG_SSISR_RFE			BIT(4)
46 #define REG_SSISR_RFHF			BIT(2)
47 #define REG_SSISR_UNDR			BIT(1)
48 #define REG_SSISR_OVER			BIT(0)
49 
50 #define SPI_INGENIC_FIFO_SIZE		128u
51 
52 struct jz_soc_info {
53 	u32 bits_per_word_mask;
54 	struct reg_field flen_field;
55 	bool has_trendian;
56 
57 	unsigned int max_speed_hz;
58 	unsigned int max_native_cs;
59 };
60 
61 struct ingenic_spi {
62 	const struct jz_soc_info *soc_info;
63 	struct clk *clk;
64 	struct resource *mem_res;
65 
66 	struct regmap *map;
67 	struct regmap_field *flen_field;
68 };
69 
70 static int spi_ingenic_wait(struct ingenic_spi *priv,
71 			    unsigned long mask,
72 			    bool condition)
73 {
74 	unsigned int val;
75 
76 	return regmap_read_poll_timeout(priv->map, REG_SSISR, val,
77 					!!(val & mask) == condition,
78 					100, 10000);
79 }
80 
81 static void spi_ingenic_set_cs(struct spi_device *spi, bool disable)
82 {
83 	struct ingenic_spi *priv = spi_controller_get_devdata(spi->controller);
84 
85 	if (disable) {
86 		regmap_clear_bits(priv->map, REG_SSICR1, REG_SSICR1_UNFIN);
87 		regmap_clear_bits(priv->map, REG_SSISR,
88 				  REG_SSISR_UNDR | REG_SSISR_OVER);
89 
90 		spi_ingenic_wait(priv, REG_SSISR_END, true);
91 	} else {
92 		regmap_set_bits(priv->map, REG_SSICR1, REG_SSICR1_UNFIN);
93 	}
94 
95 	regmap_set_bits(priv->map, REG_SSICR0,
96 			REG_SSICR0_RFLUSH | REG_SSICR0_TFLUSH);
97 }
98 
99 static void spi_ingenic_prepare_transfer(struct ingenic_spi *priv,
100 					 struct spi_device *spi,
101 					 struct spi_transfer *xfer)
102 {
103 	unsigned long clk_hz = clk_get_rate(priv->clk);
104 	u32 cdiv, speed_hz = xfer->speed_hz ?: spi->max_speed_hz,
105 	    bits_per_word = xfer->bits_per_word ?: spi->bits_per_word;
106 
107 	cdiv = clk_hz / (speed_hz * 2);
108 	cdiv = clamp(cdiv, 1u, 0x100u) - 1;
109 
110 	regmap_write(priv->map, REG_SSIGR, cdiv);
111 
112 	regmap_field_write(priv->flen_field, bits_per_word - 2);
113 }
114 
115 static void spi_ingenic_finalize_transfer(void *controller)
116 {
117 	spi_finalize_current_transfer(controller);
118 }
119 
120 static struct dma_async_tx_descriptor *
121 spi_ingenic_prepare_dma(struct spi_controller *ctlr, struct dma_chan *chan,
122 			struct sg_table *sg, enum dma_transfer_direction dir,
123 			unsigned int bits)
124 {
125 	struct ingenic_spi *priv = spi_controller_get_devdata(ctlr);
126 	struct dma_slave_config cfg = {
127 		.direction = dir,
128 		.src_addr = priv->mem_res->start + REG_SSIDR,
129 		.dst_addr = priv->mem_res->start + REG_SSIDR,
130 	};
131 	struct dma_async_tx_descriptor *desc;
132 	dma_cookie_t cookie;
133 	int ret;
134 
135 	if (bits > 16) {
136 		cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
137 		cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
138 		cfg.src_maxburst = cfg.dst_maxburst = 4;
139 	} else if (bits > 8) {
140 		cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
141 		cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
142 		cfg.src_maxburst = cfg.dst_maxburst = 2;
143 	} else {
144 		cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
145 		cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
146 		cfg.src_maxburst = cfg.dst_maxburst = 1;
147 	}
148 
149 	ret = dmaengine_slave_config(chan, &cfg);
150 	if (ret)
151 		return ERR_PTR(ret);
152 
153 	desc = dmaengine_prep_slave_sg(chan, sg->sgl, sg->nents, dir,
154 				       DMA_PREP_INTERRUPT);
155 	if (!desc)
156 		return ERR_PTR(-ENOMEM);
157 
158 	if (dir == DMA_DEV_TO_MEM) {
159 		desc->callback = spi_ingenic_finalize_transfer;
160 		desc->callback_param = ctlr;
161 	}
162 
163 	cookie = dmaengine_submit(desc);
164 
165 	ret = dma_submit_error(cookie);
166 	if (ret) {
167 		dmaengine_desc_free(desc);
168 		return ERR_PTR(ret);
169 	}
170 
171 	return desc;
172 }
173 
174 static int spi_ingenic_dma_tx(struct spi_controller *ctlr,
175 			      struct spi_transfer *xfer, unsigned int bits)
176 {
177 	struct dma_async_tx_descriptor *rx_desc, *tx_desc;
178 
179 	rx_desc = spi_ingenic_prepare_dma(ctlr, ctlr->dma_rx,
180 					  &xfer->rx_sg, DMA_DEV_TO_MEM, bits);
181 	if (IS_ERR(rx_desc))
182 		return PTR_ERR(rx_desc);
183 
184 	tx_desc = spi_ingenic_prepare_dma(ctlr, ctlr->dma_tx,
185 					  &xfer->tx_sg, DMA_MEM_TO_DEV, bits);
186 	if (IS_ERR(tx_desc)) {
187 		dmaengine_terminate_async(ctlr->dma_rx);
188 		dmaengine_desc_free(rx_desc);
189 		return PTR_ERR(tx_desc);
190 	}
191 
192 	dma_async_issue_pending(ctlr->dma_rx);
193 	dma_async_issue_pending(ctlr->dma_tx);
194 
195 	return 1;
196 }
197 
198 #define SPI_INGENIC_TX(x)							\
199 static int spi_ingenic_tx##x(struct ingenic_spi *priv,				\
200 			     struct spi_transfer *xfer)				\
201 {										\
202 	unsigned int count = xfer->len / (x / 8);				\
203 	unsigned int prefill = min(count, SPI_INGENIC_FIFO_SIZE);		\
204 	const u##x *tx_buf = xfer->tx_buf;					\
205 	u##x *rx_buf = xfer->rx_buf;						\
206 	unsigned int i, val;							\
207 	int err;								\
208 										\
209 	/* Fill up the TX fifo */						\
210 	for (i = 0; i < prefill; i++) {						\
211 		val = tx_buf ? tx_buf[i] : 0;					\
212 										\
213 		regmap_write(priv->map, REG_SSIDR, val);			\
214 	}									\
215 										\
216 	for (i = 0; i < count; i++) {						\
217 		err = spi_ingenic_wait(priv, REG_SSISR_RFE, false);		\
218 		if (err)							\
219 			return err;						\
220 										\
221 		regmap_read(priv->map, REG_SSIDR, &val);			\
222 		if (rx_buf)							\
223 			rx_buf[i] = val;					\
224 										\
225 		if (i < count - prefill) {					\
226 			val = tx_buf ? tx_buf[i + prefill] : 0;			\
227 										\
228 			regmap_write(priv->map, REG_SSIDR, val);		\
229 		}								\
230 	}									\
231 										\
232 	return 0;								\
233 }
234 SPI_INGENIC_TX(8)
235 SPI_INGENIC_TX(16)
236 SPI_INGENIC_TX(32)
237 #undef SPI_INGENIC_TX
238 
239 static int spi_ingenic_transfer_one(struct spi_controller *ctlr,
240 				    struct spi_device *spi,
241 				    struct spi_transfer *xfer)
242 {
243 	struct ingenic_spi *priv = spi_controller_get_devdata(ctlr);
244 	unsigned int bits = xfer->bits_per_word ?: spi->bits_per_word;
245 	bool can_dma = ctlr->can_dma && ctlr->can_dma(ctlr, spi, xfer);
246 
247 	spi_ingenic_prepare_transfer(priv, spi, xfer);
248 
249 	if (ctlr->cur_msg_mapped && can_dma)
250 		return spi_ingenic_dma_tx(ctlr, xfer, bits);
251 
252 	if (bits > 16)
253 		return spi_ingenic_tx32(priv, xfer);
254 
255 	if (bits > 8)
256 		return spi_ingenic_tx16(priv, xfer);
257 
258 	return spi_ingenic_tx8(priv, xfer);
259 }
260 
261 static int spi_ingenic_prepare_message(struct spi_controller *ctlr,
262 				       struct spi_message *message)
263 {
264 	struct ingenic_spi *priv = spi_controller_get_devdata(ctlr);
265 	struct spi_device *spi = message->spi;
266 	unsigned int cs = REG_SSICR1_FRMHL << spi_get_chipselect(spi, 0);
267 	unsigned int ssicr0_mask = REG_SSICR0_LOOP | REG_SSICR0_FSEL;
268 	unsigned int ssicr1_mask = REG_SSICR1_PHA | REG_SSICR1_POL | cs;
269 	unsigned int ssicr0 = 0, ssicr1 = 0;
270 
271 	if (priv->soc_info->has_trendian) {
272 		ssicr0_mask |= REG_SSICR0_RENDIAN_LSB | REG_SSICR0_TENDIAN_LSB;
273 
274 		if (spi->mode & SPI_LSB_FIRST)
275 			ssicr0 |= REG_SSICR0_RENDIAN_LSB | REG_SSICR0_TENDIAN_LSB;
276 	} else {
277 		ssicr1_mask |= REG_SSICR1_LFST;
278 
279 		if (spi->mode & SPI_LSB_FIRST)
280 			ssicr1 |= REG_SSICR1_LFST;
281 	}
282 
283 	if (spi->mode & SPI_LOOP)
284 		ssicr0 |= REG_SSICR0_LOOP;
285 	if (spi_get_chipselect(spi, 0))
286 		ssicr0 |= REG_SSICR0_FSEL;
287 
288 	if (spi->mode & SPI_CPHA)
289 		ssicr1 |= REG_SSICR1_PHA;
290 	if (spi->mode & SPI_CPOL)
291 		ssicr1 |= REG_SSICR1_POL;
292 	if (spi->mode & SPI_CS_HIGH)
293 		ssicr1 |= cs;
294 
295 	regmap_update_bits(priv->map, REG_SSICR0, ssicr0_mask, ssicr0);
296 	regmap_update_bits(priv->map, REG_SSICR1, ssicr1_mask, ssicr1);
297 
298 	return 0;
299 }
300 
301 static int spi_ingenic_prepare_hardware(struct spi_controller *ctlr)
302 {
303 	struct ingenic_spi *priv = spi_controller_get_devdata(ctlr);
304 	int ret;
305 
306 	ret = clk_prepare_enable(priv->clk);
307 	if (ret)
308 		return ret;
309 
310 	regmap_write(priv->map, REG_SSICR0, REG_SSICR0_EACLRUN);
311 	regmap_write(priv->map, REG_SSICR1, 0);
312 	regmap_write(priv->map, REG_SSISR, 0);
313 	regmap_set_bits(priv->map, REG_SSICR0, REG_SSICR0_SSIE);
314 
315 	return 0;
316 }
317 
318 static int spi_ingenic_unprepare_hardware(struct spi_controller *ctlr)
319 {
320 	struct ingenic_spi *priv = spi_controller_get_devdata(ctlr);
321 
322 	regmap_clear_bits(priv->map, REG_SSICR0, REG_SSICR0_SSIE);
323 
324 	clk_disable_unprepare(priv->clk);
325 
326 	return 0;
327 }
328 
329 static bool spi_ingenic_can_dma(struct spi_controller *ctlr,
330 				struct spi_device *spi,
331 				struct spi_transfer *xfer)
332 {
333 	struct dma_slave_caps caps;
334 	int ret;
335 
336 	ret = dma_get_slave_caps(ctlr->dma_tx, &caps);
337 	if (ret) {
338 		dev_err(&spi->dev, "Unable to get slave caps: %d\n", ret);
339 		return false;
340 	}
341 
342 	return !caps.max_sg_burst ||
343 		xfer->len <= caps.max_sg_burst * SPI_INGENIC_FIFO_SIZE;
344 }
345 
346 static int spi_ingenic_request_dma(struct spi_controller *ctlr,
347 				   struct device *dev)
348 {
349 	struct dma_chan *chan;
350 
351 	chan = dma_request_chan(dev, "tx");
352 	if (IS_ERR(chan))
353 		return PTR_ERR(chan);
354 	ctlr->dma_tx = chan;
355 
356 	chan = dma_request_chan(dev, "rx");
357 	if (IS_ERR(chan))
358 		return PTR_ERR(chan);
359 	ctlr->dma_rx = chan;
360 
361 	ctlr->can_dma = spi_ingenic_can_dma;
362 
363 	return 0;
364 }
365 
366 static void spi_ingenic_release_dma(void *data)
367 {
368 	struct spi_controller *ctlr = data;
369 
370 	if (ctlr->dma_tx)
371 		dma_release_channel(ctlr->dma_tx);
372 	if (ctlr->dma_rx)
373 		dma_release_channel(ctlr->dma_rx);
374 }
375 
376 static const struct regmap_config spi_ingenic_regmap_config = {
377 	.reg_bits = 32,
378 	.val_bits = 32,
379 	.reg_stride = 4,
380 	.max_register = REG_SSIGR,
381 };
382 
383 static int spi_ingenic_probe(struct platform_device *pdev)
384 {
385 	const struct jz_soc_info *pdata;
386 	struct device *dev = &pdev->dev;
387 	struct spi_controller *ctlr;
388 	struct ingenic_spi *priv;
389 	void __iomem *base;
390 	int num_cs, ret;
391 
392 	pdata = of_device_get_match_data(dev);
393 	if (!pdata) {
394 		dev_err(dev, "Missing platform data.\n");
395 		return -EINVAL;
396 	}
397 
398 	ctlr = devm_spi_alloc_host(dev, sizeof(*priv));
399 	if (!ctlr) {
400 		dev_err(dev, "Unable to allocate SPI controller.\n");
401 		return -ENOMEM;
402 	}
403 
404 	priv = spi_controller_get_devdata(ctlr);
405 	priv->soc_info = pdata;
406 
407 	priv->clk = devm_clk_get(dev, NULL);
408 	if (IS_ERR(priv->clk)) {
409 		return dev_err_probe(dev, PTR_ERR(priv->clk),
410 				     "Unable to get clock.\n");
411 	}
412 
413 	base = devm_platform_get_and_ioremap_resource(pdev, 0, &priv->mem_res);
414 	if (IS_ERR(base))
415 		return PTR_ERR(base);
416 
417 	priv->map = devm_regmap_init_mmio(dev, base, &spi_ingenic_regmap_config);
418 	if (IS_ERR(priv->map))
419 		return PTR_ERR(priv->map);
420 
421 	priv->flen_field = devm_regmap_field_alloc(dev, priv->map,
422 						   pdata->flen_field);
423 	if (IS_ERR(priv->flen_field))
424 		return PTR_ERR(priv->flen_field);
425 
426 	if (device_property_read_u32(dev, "num-cs", &num_cs))
427 		num_cs = pdata->max_native_cs;
428 
429 	platform_set_drvdata(pdev, ctlr);
430 
431 	ctlr->prepare_transfer_hardware = spi_ingenic_prepare_hardware;
432 	ctlr->unprepare_transfer_hardware = spi_ingenic_unprepare_hardware;
433 	ctlr->prepare_message = spi_ingenic_prepare_message;
434 	ctlr->set_cs = spi_ingenic_set_cs;
435 	ctlr->transfer_one = spi_ingenic_transfer_one;
436 	ctlr->mode_bits = SPI_MODE_3 | SPI_LSB_FIRST | SPI_LOOP | SPI_CS_HIGH;
437 	ctlr->flags = SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX;
438 	ctlr->max_dma_len = SPI_INGENIC_FIFO_SIZE;
439 	ctlr->bits_per_word_mask = pdata->bits_per_word_mask;
440 	ctlr->min_speed_hz = 7200;
441 	ctlr->max_speed_hz = pdata->max_speed_hz;
442 	ctlr->use_gpio_descriptors = true;
443 	ctlr->max_native_cs = pdata->max_native_cs;
444 	ctlr->num_chipselect = num_cs;
445 	ctlr->dev.of_node = pdev->dev.of_node;
446 
447 	if (spi_ingenic_request_dma(ctlr, dev))
448 		dev_warn(dev, "DMA not available.\n");
449 
450 	ret = devm_add_action_or_reset(dev, spi_ingenic_release_dma, ctlr);
451 	if (ret) {
452 		dev_err(dev, "Unable to add action.\n");
453 		return ret;
454 	}
455 
456 	ret = devm_spi_register_controller(dev, ctlr);
457 	if (ret)
458 		dev_err(dev, "Unable to register SPI controller.\n");
459 
460 	return ret;
461 }
462 
463 static const struct jz_soc_info jz4750_soc_info = {
464 	.bits_per_word_mask = SPI_BPW_RANGE_MASK(2, 17),
465 	.flen_field = REG_FIELD(REG_SSICR1, 4, 7),
466 	.has_trendian = false,
467 
468 	.max_speed_hz = 54000000,
469 	.max_native_cs = 2,
470 };
471 
472 static const struct jz_soc_info jz4780_soc_info = {
473 	.bits_per_word_mask = SPI_BPW_RANGE_MASK(2, 32),
474 	.flen_field = REG_FIELD(REG_SSICR1, 3, 7),
475 	.has_trendian = true,
476 
477 	.max_speed_hz = 54000000,
478 	.max_native_cs = 2,
479 };
480 
481 static const struct jz_soc_info x1000_soc_info = {
482 	.bits_per_word_mask = SPI_BPW_RANGE_MASK(2, 32),
483 	.flen_field = REG_FIELD(REG_SSICR1, 3, 7),
484 	.has_trendian = true,
485 
486 	.max_speed_hz = 50000000,
487 	.max_native_cs = 2,
488 };
489 
490 static const struct jz_soc_info x2000_soc_info = {
491 	.bits_per_word_mask = SPI_BPW_RANGE_MASK(2, 32),
492 	.flen_field = REG_FIELD(REG_SSICR1, 3, 7),
493 	.has_trendian = true,
494 
495 	.max_speed_hz = 50000000,
496 	.max_native_cs = 1,
497 };
498 
499 static const struct of_device_id spi_ingenic_of_match[] = {
500 	{ .compatible = "ingenic,jz4750-spi", .data = &jz4750_soc_info },
501 	{ .compatible = "ingenic,jz4775-spi", .data = &jz4780_soc_info },
502 	{ .compatible = "ingenic,jz4780-spi", .data = &jz4780_soc_info },
503 	{ .compatible = "ingenic,x1000-spi", .data = &x1000_soc_info },
504 	{ .compatible = "ingenic,x2000-spi", .data = &x2000_soc_info },
505 	{}
506 };
507 MODULE_DEVICE_TABLE(of, spi_ingenic_of_match);
508 
509 static struct platform_driver spi_ingenic_driver = {
510 	.driver = {
511 		.name = "spi-ingenic",
512 		.of_match_table = spi_ingenic_of_match,
513 	},
514 	.probe = spi_ingenic_probe,
515 };
516 
517 module_platform_driver(spi_ingenic_driver);
518 MODULE_DESCRIPTION("SPI bus driver for the Ingenic SoCs");
519 MODULE_AUTHOR("Artur Rojek <contact@artur-rojek.eu>");
520 MODULE_AUTHOR("Paul Cercueil <paul@crapouillou.net>");
521 MODULE_AUTHOR("周琰杰 (Zhou Yanjie) <zhouyanjie@wanyeetech.com>");
522 MODULE_LICENSE("GPL");
523