xref: /linux/drivers/spi/spi-hisi-kunpeng.c (revision 55ec81f7517fad09135f65552cea0a3ee84fff30)
1 // SPDX-License-Identifier: GPL-2.0-only
2 //
3 // HiSilicon SPI Controller Driver for Kunpeng SoCs
4 //
5 // Copyright (c) 2021 HiSilicon Technologies Co., Ltd.
6 // Author: Jay Fang <f.fangjian@huawei.com>
7 //
8 // This code is based on spi-dw-core.c.
9 
10 #include <linux/acpi.h>
11 #include <linux/bitfield.h>
12 #include <linux/debugfs.h>
13 #include <linux/delay.h>
14 #include <linux/err.h>
15 #include <linux/interrupt.h>
16 #include <linux/module.h>
17 #include <linux/property.h>
18 #include <linux/platform_device.h>
19 #include <linux/slab.h>
20 #include <linux/spi/spi.h>
21 
22 /* Register offsets */
23 #define HISI_SPI_CSCR		0x00	/* cs control register */
24 #define HISI_SPI_CR		0x04	/* spi common control register */
25 #define HISI_SPI_ENR		0x08	/* spi enable register */
26 #define HISI_SPI_FIFOC		0x0c	/* fifo level control register */
27 #define HISI_SPI_IMR		0x10	/* interrupt mask register */
28 #define HISI_SPI_DIN		0x14	/* data in register */
29 #define HISI_SPI_DOUT		0x18	/* data out register */
30 #define HISI_SPI_SR		0x1c	/* status register */
31 #define HISI_SPI_RISR		0x20	/* raw interrupt status register */
32 #define HISI_SPI_ISR		0x24	/* interrupt status register */
33 #define HISI_SPI_ICR		0x28	/* interrupt clear register */
34 #define HISI_SPI_VERSION	0xe0	/* version register */
35 
36 /* Bit fields in HISI_SPI_CR */
37 #define CR_LOOP_MASK		GENMASK(1, 1)
38 #define CR_CPOL_MASK		GENMASK(2, 2)
39 #define CR_CPHA_MASK		GENMASK(3, 3)
40 #define CR_DIV_PRE_MASK		GENMASK(11, 4)
41 #define CR_DIV_POST_MASK	GENMASK(19, 12)
42 #define CR_BPW_MASK		GENMASK(24, 20)
43 #define CR_SPD_MODE_MASK	GENMASK(25, 25)
44 
45 /* Bit fields in HISI_SPI_FIFOC */
46 #define FIFOC_TX_MASK		GENMASK(5, 3)
47 #define FIFOC_RX_MASK		GENMASK(11, 9)
48 
49 /* Bit fields in HISI_SPI_IMR, 4 bits */
50 #define IMR_RXOF		BIT(0)		/* Receive Overflow */
51 #define IMR_RXTO		BIT(1)		/* Receive Timeout */
52 #define IMR_RX			BIT(2)		/* Receive */
53 #define IMR_TX			BIT(3)		/* Transmit */
54 #define IMR_MASK		(IMR_RXOF | IMR_RXTO | IMR_RX | IMR_TX)
55 
56 /* Bit fields in HISI_SPI_SR, 5 bits */
57 #define SR_TXE			BIT(0)		/* Transmit FIFO empty */
58 #define SR_TXNF			BIT(1)		/* Transmit FIFO not full */
59 #define SR_RXNE			BIT(2)		/* Receive FIFO not empty */
60 #define SR_RXF			BIT(3)		/* Receive FIFO full */
61 #define SR_BUSY			BIT(4)		/* Busy Flag */
62 
63 /* Bit fields in HISI_SPI_ISR, 4 bits */
64 #define ISR_RXOF		BIT(0)		/* Receive Overflow */
65 #define ISR_RXTO		BIT(1)		/* Receive Timeout */
66 #define ISR_RX			BIT(2)		/* Receive */
67 #define ISR_TX			BIT(3)		/* Transmit */
68 #define ISR_MASK		(ISR_RXOF | ISR_RXTO | ISR_RX | ISR_TX)
69 
70 /* Bit fields in HISI_SPI_ICR, 2 bits */
71 #define ICR_RXOF		BIT(0)		/* Receive Overflow */
72 #define ICR_RXTO		BIT(1)		/* Receive Timeout */
73 #define ICR_MASK		(ICR_RXOF | ICR_RXTO)
74 
75 #define DIV_POST_MAX		0xFF
76 #define DIV_POST_MIN		0x00
77 #define DIV_PRE_MAX		0xFE
78 #define DIV_PRE_MIN		0x02
79 #define CLK_DIV_MAX		((1 + DIV_POST_MAX) * DIV_PRE_MAX)
80 #define CLK_DIV_MIN		((1 + DIV_POST_MIN) * DIV_PRE_MIN)
81 
82 #define DEFAULT_NUM_CS		1
83 
84 #define HISI_SPI_WAIT_TIMEOUT_MS	10UL
85 
86 enum hisi_spi_rx_level_trig {
87 	HISI_SPI_RX_1,
88 	HISI_SPI_RX_4,
89 	HISI_SPI_RX_8,
90 	HISI_SPI_RX_16,
91 	HISI_SPI_RX_32,
92 	HISI_SPI_RX_64,
93 	HISI_SPI_RX_128
94 };
95 
96 enum hisi_spi_tx_level_trig {
97 	HISI_SPI_TX_1_OR_LESS,
98 	HISI_SPI_TX_4_OR_LESS,
99 	HISI_SPI_TX_8_OR_LESS,
100 	HISI_SPI_TX_16_OR_LESS,
101 	HISI_SPI_TX_32_OR_LESS,
102 	HISI_SPI_TX_64_OR_LESS,
103 	HISI_SPI_TX_128_OR_LESS
104 };
105 
106 enum hisi_spi_frame_n_bytes {
107 	HISI_SPI_N_BYTES_NULL,
108 	HISI_SPI_N_BYTES_U8,
109 	HISI_SPI_N_BYTES_U16,
110 	HISI_SPI_N_BYTES_U32 = 4
111 };
112 
113 /* Slave spi_dev related */
114 struct hisi_chip_data {
115 	u32 cr;
116 	u32 speed_hz;	/* baud rate */
117 	u16 clk_div;	/* baud rate divider */
118 
119 	/* clk_div = (1 + div_post) * div_pre */
120 	u8 div_post;	/* value from 0 to 255 */
121 	u8 div_pre;	/* value from 2 to 254 (even only!) */
122 };
123 
124 struct hisi_spi {
125 	struct device		*dev;
126 
127 	void __iomem		*regs;
128 	int			irq;
129 	u32			fifo_len; /* depth of the FIFO buffer */
130 
131 	/* Current message transfer state info */
132 	const void		*tx;
133 	unsigned int		tx_len;
134 	void			*rx;
135 	unsigned int		rx_len;
136 	u8			n_bytes; /* current is a 1/2/4 bytes op */
137 
138 	struct dentry *debugfs;
139 	struct debugfs_regset32 regset;
140 };
141 
142 #define HISI_SPI_DBGFS_REG(_name, _off)	\
143 {					\
144 	.name = _name,			\
145 	.offset = _off,			\
146 }
147 
148 static const struct debugfs_reg32 hisi_spi_regs[] = {
149 	HISI_SPI_DBGFS_REG("CSCR", HISI_SPI_CSCR),
150 	HISI_SPI_DBGFS_REG("CR", HISI_SPI_CR),
151 	HISI_SPI_DBGFS_REG("ENR", HISI_SPI_ENR),
152 	HISI_SPI_DBGFS_REG("FIFOC", HISI_SPI_FIFOC),
153 	HISI_SPI_DBGFS_REG("IMR", HISI_SPI_IMR),
154 	HISI_SPI_DBGFS_REG("DIN", HISI_SPI_DIN),
155 	HISI_SPI_DBGFS_REG("DOUT", HISI_SPI_DOUT),
156 	HISI_SPI_DBGFS_REG("SR", HISI_SPI_SR),
157 	HISI_SPI_DBGFS_REG("RISR", HISI_SPI_RISR),
158 	HISI_SPI_DBGFS_REG("ISR", HISI_SPI_ISR),
159 	HISI_SPI_DBGFS_REG("ICR", HISI_SPI_ICR),
160 	HISI_SPI_DBGFS_REG("VERSION", HISI_SPI_VERSION),
161 };
162 
163 static int hisi_spi_debugfs_init(struct hisi_spi *hs)
164 {
165 	char name[32];
166 
167 	struct spi_controller *host;
168 
169 	host = container_of(hs->dev, struct spi_controller, dev);
170 	snprintf(name, 32, "hisi_spi%d", host->bus_num);
171 	hs->debugfs = debugfs_create_dir(name, NULL);
172 	if (IS_ERR(hs->debugfs))
173 		return -ENOMEM;
174 
175 	hs->regset.regs = hisi_spi_regs;
176 	hs->regset.nregs = ARRAY_SIZE(hisi_spi_regs);
177 	hs->regset.base = hs->regs;
178 	debugfs_create_regset32("registers", 0400, hs->debugfs, &hs->regset);
179 
180 	return 0;
181 }
182 
183 static u32 hisi_spi_busy(struct hisi_spi *hs)
184 {
185 	return readl(hs->regs + HISI_SPI_SR) & SR_BUSY;
186 }
187 
188 static u32 hisi_spi_rx_not_empty(struct hisi_spi *hs)
189 {
190 	return readl(hs->regs + HISI_SPI_SR) & SR_RXNE;
191 }
192 
193 static u32 hisi_spi_tx_not_full(struct hisi_spi *hs)
194 {
195 	return readl(hs->regs + HISI_SPI_SR) & SR_TXNF;
196 }
197 
198 static void hisi_spi_flush_fifo(struct hisi_spi *hs)
199 {
200 	unsigned long limit = loops_per_jiffy << 1;
201 
202 	do {
203 		while (hisi_spi_rx_not_empty(hs))
204 			readl(hs->regs + HISI_SPI_DOUT);
205 	} while (hisi_spi_busy(hs) && limit--);
206 }
207 
208 /* Disable the controller and all interrupts */
209 static void hisi_spi_disable(struct hisi_spi *hs)
210 {
211 	writel(0, hs->regs + HISI_SPI_ENR);
212 	writel(IMR_MASK, hs->regs + HISI_SPI_IMR);
213 	writel(ICR_MASK, hs->regs + HISI_SPI_ICR);
214 }
215 
216 static u8 hisi_spi_n_bytes(struct spi_transfer *transfer)
217 {
218 	if (transfer->bits_per_word <= 8)
219 		return HISI_SPI_N_BYTES_U8;
220 	else if (transfer->bits_per_word <= 16)
221 		return HISI_SPI_N_BYTES_U16;
222 	else
223 		return HISI_SPI_N_BYTES_U32;
224 }
225 
226 static void hisi_spi_reader(struct hisi_spi *hs)
227 {
228 	u32 max = min_t(u32, hs->rx_len, hs->fifo_len);
229 	u32 rxw;
230 
231 	while (hisi_spi_rx_not_empty(hs) && max--) {
232 		rxw = readl(hs->regs + HISI_SPI_DOUT);
233 		/* Check the transfer's original "rx" is not null */
234 		if (hs->rx) {
235 			switch (hs->n_bytes) {
236 			case HISI_SPI_N_BYTES_U8:
237 				*(u8 *)(hs->rx) = rxw;
238 				break;
239 			case HISI_SPI_N_BYTES_U16:
240 				*(u16 *)(hs->rx) = rxw;
241 				break;
242 			case HISI_SPI_N_BYTES_U32:
243 				*(u32 *)(hs->rx) = rxw;
244 				break;
245 			}
246 			hs->rx += hs->n_bytes;
247 		}
248 		--hs->rx_len;
249 	}
250 }
251 
252 static void hisi_spi_writer(struct hisi_spi *hs)
253 {
254 	u32 max = min_t(u32, hs->tx_len, hs->fifo_len);
255 	u32 txw = 0;
256 
257 	while (hisi_spi_tx_not_full(hs) && max--) {
258 		/* Check the transfer's original "tx" is not null */
259 		if (hs->tx) {
260 			switch (hs->n_bytes) {
261 			case HISI_SPI_N_BYTES_U8:
262 				txw = *(u8 *)(hs->tx);
263 				break;
264 			case HISI_SPI_N_BYTES_U16:
265 				txw = *(u16 *)(hs->tx);
266 				break;
267 			case HISI_SPI_N_BYTES_U32:
268 				txw = *(u32 *)(hs->tx);
269 				break;
270 			}
271 			hs->tx += hs->n_bytes;
272 		}
273 		writel(txw, hs->regs + HISI_SPI_DIN);
274 		--hs->tx_len;
275 	}
276 }
277 
278 static void __hisi_calc_div_reg(struct hisi_chip_data *chip)
279 {
280 	chip->div_pre = DIV_PRE_MAX;
281 	while (chip->div_pre >= DIV_PRE_MIN) {
282 		if (chip->clk_div % chip->div_pre == 0)
283 			break;
284 
285 		chip->div_pre -= 2;
286 	}
287 
288 	if (chip->div_pre > chip->clk_div)
289 		chip->div_pre = chip->clk_div;
290 
291 	chip->div_post = (chip->clk_div / chip->div_pre) - 1;
292 }
293 
294 static u32 hisi_calc_effective_speed(struct spi_controller *host,
295 			struct hisi_chip_data *chip, u32 speed_hz)
296 {
297 	u32 effective_speed;
298 
299 	/* Note clock divider doesn't support odd numbers */
300 	chip->clk_div = DIV_ROUND_UP(host->max_speed_hz, speed_hz) + 1;
301 	chip->clk_div &= 0xfffe;
302 	if (chip->clk_div > CLK_DIV_MAX)
303 		chip->clk_div = CLK_DIV_MAX;
304 
305 	effective_speed = host->max_speed_hz / chip->clk_div;
306 	if (chip->speed_hz != effective_speed) {
307 		__hisi_calc_div_reg(chip);
308 		chip->speed_hz = effective_speed;
309 	}
310 
311 	return effective_speed;
312 }
313 
314 static u32 hisi_spi_prepare_cr(struct spi_device *spi)
315 {
316 	u32 cr = FIELD_PREP(CR_SPD_MODE_MASK, 1);
317 
318 	cr |= FIELD_PREP(CR_CPHA_MASK, (spi->mode & SPI_CPHA) ? 1 : 0);
319 	cr |= FIELD_PREP(CR_CPOL_MASK, (spi->mode & SPI_CPOL) ? 1 : 0);
320 	cr |= FIELD_PREP(CR_LOOP_MASK, (spi->mode & SPI_LOOP) ? 1 : 0);
321 
322 	return cr;
323 }
324 
325 static void hisi_spi_hw_init(struct hisi_spi *hs)
326 {
327 	hisi_spi_disable(hs);
328 
329 	/* FIFO default config */
330 	writel(FIELD_PREP(FIFOC_TX_MASK, HISI_SPI_TX_64_OR_LESS) |
331 		FIELD_PREP(FIFOC_RX_MASK, HISI_SPI_RX_16),
332 		hs->regs + HISI_SPI_FIFOC);
333 
334 	hs->fifo_len = 256;
335 }
336 
337 static irqreturn_t hisi_spi_irq(int irq, void *dev_id)
338 {
339 	struct spi_controller *host = dev_id;
340 	struct hisi_spi *hs = spi_controller_get_devdata(host);
341 	u32 irq_status = readl(hs->regs + HISI_SPI_ISR) & ISR_MASK;
342 
343 	if (!irq_status)
344 		return IRQ_NONE;
345 
346 	if (!host->cur_msg)
347 		return IRQ_HANDLED;
348 
349 	/* Error handling */
350 	if (irq_status & ISR_RXOF) {
351 		dev_err(hs->dev, "interrupt_transfer: fifo overflow\n");
352 		host->cur_msg->status = -EIO;
353 		goto finalize_transfer;
354 	}
355 
356 	/*
357 	 * Read data from the Rx FIFO every time. If there is
358 	 * nothing left to receive, finalize the transfer.
359 	 */
360 	hisi_spi_reader(hs);
361 	if (!hs->rx_len)
362 		goto finalize_transfer;
363 
364 	/* Send data out when Tx FIFO IRQ triggered */
365 	if (irq_status & ISR_TX)
366 		hisi_spi_writer(hs);
367 
368 	return IRQ_HANDLED;
369 
370 finalize_transfer:
371 	hisi_spi_disable(hs);
372 	spi_finalize_current_transfer(host);
373 	return IRQ_HANDLED;
374 }
375 
376 static int hisi_spi_transfer_one(struct spi_controller *host,
377 		struct spi_device *spi, struct spi_transfer *transfer)
378 {
379 	struct hisi_spi *hs = spi_controller_get_devdata(host);
380 	struct hisi_chip_data *chip = spi_get_ctldata(spi);
381 	u32 cr = chip->cr;
382 
383 	/* Update per transfer options for speed and bpw */
384 	transfer->effective_speed_hz =
385 		hisi_calc_effective_speed(host, chip, transfer->speed_hz);
386 	cr |= FIELD_PREP(CR_DIV_PRE_MASK, chip->div_pre);
387 	cr |= FIELD_PREP(CR_DIV_POST_MASK, chip->div_post);
388 	cr |= FIELD_PREP(CR_BPW_MASK, transfer->bits_per_word - 1);
389 	writel(cr, hs->regs + HISI_SPI_CR);
390 
391 	hisi_spi_flush_fifo(hs);
392 
393 	hs->n_bytes = hisi_spi_n_bytes(transfer);
394 	hs->tx = transfer->tx_buf;
395 	hs->tx_len = transfer->len / hs->n_bytes;
396 	hs->rx = transfer->rx_buf;
397 	hs->rx_len = hs->tx_len;
398 
399 	/*
400 	 * Ensure that the transfer data above has been updated
401 	 * before the interrupt to start.
402 	 */
403 	smp_mb();
404 
405 	/* Enable all interrupts and the controller */
406 	writel(~(u32)IMR_MASK, hs->regs + HISI_SPI_IMR);
407 	writel(1, hs->regs + HISI_SPI_ENR);
408 
409 	return 1;
410 }
411 
412 static void hisi_spi_handle_err(struct spi_controller *host,
413 		struct spi_message *msg)
414 {
415 	struct hisi_spi *hs = spi_controller_get_devdata(host);
416 
417 	hisi_spi_disable(hs);
418 
419 	/*
420 	 * Wait for interrupt handler that is
421 	 * already in timeout to complete.
422 	 */
423 	msleep(HISI_SPI_WAIT_TIMEOUT_MS);
424 }
425 
426 static int hisi_spi_setup(struct spi_device *spi)
427 {
428 	struct hisi_chip_data *chip;
429 
430 	/* Only alloc on first setup */
431 	chip = spi_get_ctldata(spi);
432 	if (!chip) {
433 		chip = kzalloc(sizeof(*chip), GFP_KERNEL);
434 		if (!chip)
435 			return -ENOMEM;
436 		spi_set_ctldata(spi, chip);
437 	}
438 
439 	chip->cr = hisi_spi_prepare_cr(spi);
440 
441 	return 0;
442 }
443 
444 static void hisi_spi_cleanup(struct spi_device *spi)
445 {
446 	struct hisi_chip_data *chip = spi_get_ctldata(spi);
447 
448 	kfree(chip);
449 	spi_set_ctldata(spi, NULL);
450 }
451 
452 static int hisi_spi_probe(struct platform_device *pdev)
453 {
454 	struct device *dev = &pdev->dev;
455 	struct spi_controller *host;
456 	struct hisi_spi *hs;
457 	int ret, irq;
458 
459 	irq = platform_get_irq(pdev, 0);
460 	if (irq < 0)
461 		return irq;
462 
463 	host = devm_spi_alloc_host(dev, sizeof(*hs));
464 	if (!host)
465 		return -ENOMEM;
466 
467 	platform_set_drvdata(pdev, host);
468 
469 	hs = spi_controller_get_devdata(host);
470 	hs->dev = dev;
471 	hs->irq = irq;
472 
473 	hs->regs = devm_platform_ioremap_resource(pdev, 0);
474 	if (IS_ERR(hs->regs))
475 		return PTR_ERR(hs->regs);
476 
477 	/* Specify maximum SPI clocking speed (host only) by firmware */
478 	ret = device_property_read_u32(dev, "spi-max-frequency",
479 					&host->max_speed_hz);
480 	if (ret) {
481 		dev_err(dev, "failed to get max SPI clocking speed, ret=%d\n",
482 			ret);
483 		return -EINVAL;
484 	}
485 
486 	ret = device_property_read_u16(dev, "num-cs",
487 					&host->num_chipselect);
488 	if (ret)
489 		host->num_chipselect = DEFAULT_NUM_CS;
490 
491 	host->use_gpio_descriptors = true;
492 	host->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LOOP;
493 	host->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
494 	host->bus_num = pdev->id;
495 	host->setup = hisi_spi_setup;
496 	host->cleanup = hisi_spi_cleanup;
497 	host->transfer_one = hisi_spi_transfer_one;
498 	host->handle_err = hisi_spi_handle_err;
499 	host->dev.fwnode = dev->fwnode;
500 
501 	hisi_spi_hw_init(hs);
502 
503 	ret = devm_request_irq(dev, hs->irq, hisi_spi_irq, 0, dev_name(dev),
504 			       host);
505 	if (ret < 0) {
506 		dev_err(dev, "failed to get IRQ=%d, ret=%d\n", hs->irq, ret);
507 		return ret;
508 	}
509 
510 	ret = spi_register_controller(host);
511 	if (ret) {
512 		dev_err(dev, "failed to register spi host, ret=%d\n", ret);
513 		return ret;
514 	}
515 
516 	if (hisi_spi_debugfs_init(hs))
517 		dev_info(dev, "failed to create debugfs dir\n");
518 
519 	dev_info(dev, "hw version:0x%x max-freq:%u kHz\n",
520 		readl(hs->regs + HISI_SPI_VERSION),
521 		host->max_speed_hz / 1000);
522 
523 	return 0;
524 }
525 
526 static void hisi_spi_remove(struct platform_device *pdev)
527 {
528 	struct spi_controller *host = platform_get_drvdata(pdev);
529 	struct hisi_spi *hs = spi_controller_get_devdata(host);
530 
531 	debugfs_remove_recursive(hs->debugfs);
532 	spi_unregister_controller(host);
533 }
534 
535 static const struct acpi_device_id hisi_spi_acpi_match[] = {
536 	{"HISI03E1", 0},
537 	{}
538 };
539 MODULE_DEVICE_TABLE(acpi, hisi_spi_acpi_match);
540 
541 static struct platform_driver hisi_spi_driver = {
542 	.probe		= hisi_spi_probe,
543 	.remove_new	= hisi_spi_remove,
544 	.driver		= {
545 		.name	= "hisi-kunpeng-spi",
546 		.acpi_match_table = hisi_spi_acpi_match,
547 	},
548 };
549 module_platform_driver(hisi_spi_driver);
550 
551 MODULE_AUTHOR("Jay Fang <f.fangjian@huawei.com>");
552 MODULE_DESCRIPTION("HiSilicon SPI Controller Driver for Kunpeng SoCs");
553 MODULE_LICENSE("GPL v2");
554