xref: /linux/drivers/spi/spi-amlogic-spisg.c (revision 0262163136de813894cb172aa8ccf762b92e5fd7)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Driver for Amlogic SPI communication Scatter-Gather Controller
4  *
5  * Copyright (C) 2025 Amlogic, Inc. All rights reserved
6  *
7  * Author: Sunny Luo <sunny.luo@amlogic.com>
8  * Author: Xianwei Zhao <xianwei.zhao@amlogic.com>
9  */
10 
11 #include <linux/bitfield.h>
12 #include <linux/device.h>
13 #include <linux/module.h>
14 #include <linux/of.h>
15 #include <linux/clk.h>
16 #include <linux/clk-provider.h>
17 #include <linux/dma-mapping.h>
18 #include <linux/platform_device.h>
19 #include <linux/pinctrl/consumer.h>
20 #include <linux/pm_runtime.h>
21 #include <linux/spi/spi.h>
22 #include <linux/types.h>
23 #include <linux/interrupt.h>
24 #include <linux/reset.h>
25 #include <linux/regmap.h>
26 
27 /* Register Map */
28 #define SPISG_REG_CFG_READY		0x00
29 
30 #define SPISG_REG_CFG_SPI		0x04
31 #define CFG_BUS64_EN			BIT(0)
32 #define CFG_SLAVE_EN			BIT(1)
33 #define CFG_SLAVE_SELECT		GENMASK(3, 2)
34 #define CFG_SFLASH_WP			BIT(4)
35 #define CFG_SFLASH_HD			BIT(5)
36 /* start on vsync rising */
37 #define CFG_HW_POS			BIT(6)
38 /* start on vsync falling */
39 #define CFG_HW_NEG			BIT(7)
40 
41 #define SPISG_REG_CFG_START		0x08
42 #define CFG_BLOCK_NUM			GENMASK(19, 0)
43 #define CFG_BLOCK_SIZE			GENMASK(22, 20)
44 #define CFG_DATA_COMMAND		BIT(23)
45 #define CFG_OP_MODE			GENMASK(25, 24)
46 #define CFG_RXD_MODE			GENMASK(27, 26)
47 #define CFG_TXD_MODE			GENMASK(29, 28)
48 #define CFG_EOC				BIT(30)
49 #define CFG_PEND			BIT(31)
50 
51 #define SPISG_REG_CFG_BUS		0x0C
52 #define CFG_CLK_DIV			GENMASK(7, 0)
53 #define CLK_DIV_WIDTH			8
54 #define CFG_RX_TUNING			GENMASK(11, 8)
55 #define CFG_TX_TUNING			GENMASK(15, 12)
56 #define CFG_CS_SETUP			GENMASK(19, 16)
57 #define CFG_LANE			GENMASK(21, 20)
58 #define CFG_HALF_DUPLEX			BIT(22)
59 #define CFG_B_L_ENDIAN			BIT(23)
60 #define CFG_DC_MODE			BIT(24)
61 #define CFG_NULL_CTL			BIT(25)
62 #define CFG_DUMMY_CTL			BIT(26)
63 #define CFG_READ_TURN			GENMASK(28, 27)
64 #define CFG_KEEP_SS			BIT(29)
65 #define CFG_CPHA			BIT(30)
66 #define CFG_CPOL			BIT(31)
67 
68 #define SPISG_REG_PIO_TX_DATA_L		0x10
69 #define SPISG_REG_PIO_TX_DATA_H		0x14
70 #define SPISG_REG_PIO_RX_DATA_L		0x18
71 #define SPISG_REG_PIO_RX_DATA_H		0x1C
72 #define SPISG_REG_MEM_TX_ADDR_L		0x10
73 #define SPISG_REG_MEM_TX_ADDR_H		0x14
74 #define SPISG_REG_MEM_RX_ADDR_L		0x18
75 #define SPISG_REG_MEM_RX_ADDR_H		0x1C
76 #define SPISG_REG_DESC_LIST_L		0x20
77 #define SPISG_REG_DESC_LIST_H		0x24
78 #define LIST_DESC_PENDING		BIT(31)
79 #define SPISG_REG_DESC_CURRENT_L	0x28
80 #define SPISG_REG_DESC_CURRENT_H	0x2c
81 #define SPISG_REG_IRQ_STS		0x30
82 #define SPISG_REG_IRQ_ENABLE		0x34
83 #define IRQ_RCH_DESC_EOC		BIT(0)
84 #define IRQ_RCH_DESC_INVALID		BIT(1)
85 #define IRQ_RCH_DESC_RESP		BIT(2)
86 #define IRQ_RCH_DATA_RESP		BIT(3)
87 #define IRQ_WCH_DESC_EOC		BIT(4)
88 #define IRQ_WCH_DESC_INVALID		BIT(5)
89 #define IRQ_WCH_DESC_RESP		BIT(6)
90 #define IRQ_WCH_DATA_RESP		BIT(7)
91 #define IRQ_DESC_ERR			BIT(8)
92 #define IRQ_SPI_READY			BIT(9)
93 #define IRQ_DESC_DONE			BIT(10)
94 #define IRQ_DESC_CHAIN_DONE		BIT(11)
95 
96 #define SPISG_MAX_REG			0x40
97 
98 #define SPISG_BLOCK_MAX			0x100000
99 
100 #define SPISG_OP_MODE_WRITE_CMD		0
101 #define SPISG_OP_MODE_READ_STS		1
102 #define SPISG_OP_MODE_WRITE		2
103 #define SPISG_OP_MODE_READ		3
104 
105 #define SPISG_DATA_MODE_NONE		0
106 #define SPISG_DATA_MODE_PIO		1
107 #define SPISG_DATA_MODE_MEM		2
108 #define SPISG_DATA_MODE_SG		3
109 
110 #define SPISG_CLK_DIV_MAX		256
111 /* recommended by specification */
112 #define SPISG_CLK_DIV_MIN		4
113 #define DIV_NUM (SPISG_CLK_DIV_MAX - SPISG_CLK_DIV_MIN + 1)
114 
115 #define SPISG_PCLK_RATE_MIN		24000000
116 
117 #define SPISG_SINGLE_SPI		0
118 #define SPISG_DUAL_SPI			1
119 #define SPISG_QUAD_SPI			2
120 
121 struct spisg_sg_link {
122 #define LINK_ADDR_VALID		BIT(0)
123 #define LINK_ADDR_EOC		BIT(1)
124 #define LINK_ADDR_IRQ		BIT(2)
125 #define LINK_ADDR_ACT		GENMASK(5, 3)
126 #define LINK_ADDR_RING		BIT(6)
127 #define LINK_ADDR_LEN		GENMASK(31, 8)
128 	u32			addr;
129 	u32			addr1;
130 };
131 
132 struct spisg_descriptor {
133 	u32				cfg_start;
134 	u32				cfg_bus;
135 	u64				tx_paddr;
136 	u64				rx_paddr;
137 };
138 
139 struct spisg_descriptor_extra {
140 	struct spisg_sg_link		*tx_ccsg;
141 	struct spisg_sg_link		*rx_ccsg;
142 	int				tx_ccsg_len;
143 	int				rx_ccsg_len;
144 };
145 
146 struct spisg_device {
147 	struct spi_controller		*controller;
148 	struct platform_device		*pdev;
149 	struct regmap			*map;
150 	struct clk			*core;
151 	struct clk			*pclk;
152 	struct clk			*sclk;
153 	struct clk_div_table		*tbl;
154 	struct completion		completion;
155 	u32				status;
156 	u32				speed_hz;
157 	u32				effective_speed_hz;
158 	u32				bytes_per_word;
159 	u32				cfg_spi;
160 	u32				cfg_start;
161 	u32				cfg_bus;
162 };
163 
spi_delay_to_sclk(u32 slck_speed_hz,struct spi_delay * delay)164 static int spi_delay_to_sclk(u32 slck_speed_hz, struct spi_delay *delay)
165 {
166 	s32 ns;
167 
168 	if (!delay)
169 		return 0;
170 
171 	if (delay->unit == SPI_DELAY_UNIT_SCK)
172 		return delay->value;
173 
174 	ns = spi_delay_to_ns(delay, NULL);
175 	if (ns < 0)
176 		return 0;
177 
178 	return DIV_ROUND_UP_ULL(slck_speed_hz * ns, NSEC_PER_SEC);
179 }
180 
aml_spisg_sem_down_read(struct spisg_device * spisg)181 static inline u32 aml_spisg_sem_down_read(struct spisg_device *spisg)
182 {
183 	u32 ret;
184 
185 	regmap_read(spisg->map, SPISG_REG_CFG_READY, &ret);
186 	if (ret)
187 		regmap_write(spisg->map, SPISG_REG_CFG_READY, 0);
188 
189 	return ret;
190 }
191 
aml_spisg_sem_up_write(struct spisg_device * spisg)192 static inline void aml_spisg_sem_up_write(struct spisg_device *spisg)
193 {
194 	regmap_write(spisg->map, SPISG_REG_CFG_READY, 1);
195 }
196 
aml_spisg_set_speed(struct spisg_device * spisg,uint speed_hz)197 static int aml_spisg_set_speed(struct spisg_device *spisg, uint speed_hz)
198 {
199 	u32 cfg_bus;
200 
201 	if (!speed_hz || speed_hz == spisg->speed_hz)
202 		return 0;
203 
204 	spisg->speed_hz = speed_hz;
205 	clk_set_rate(spisg->sclk, speed_hz);
206 	/* Store the div for the descriptor mode */
207 	regmap_read(spisg->map, SPISG_REG_CFG_BUS, &cfg_bus);
208 	spisg->cfg_bus &= ~CFG_CLK_DIV;
209 	spisg->cfg_bus |= cfg_bus & CFG_CLK_DIV;
210 	spisg->effective_speed_hz = clk_get_rate(spisg->sclk);
211 	dev_dbg(&spisg->pdev->dev,
212 		"desired speed %dHz, effective speed %dHz\n",
213 		speed_hz, spisg->effective_speed_hz);
214 
215 	return 0;
216 }
217 
aml_spisg_can_dma(struct spi_controller * ctlr,struct spi_device * spi,struct spi_transfer * xfer)218 static bool aml_spisg_can_dma(struct spi_controller *ctlr,
219 			      struct spi_device *spi,
220 			      struct spi_transfer *xfer)
221 {
222 	return true;
223 }
224 
aml_spisg_sg_xlate(struct sg_table * sgt,struct spisg_sg_link * ccsg)225 static void aml_spisg_sg_xlate(struct sg_table *sgt, struct spisg_sg_link *ccsg)
226 {
227 	struct scatterlist *sg;
228 	int i;
229 
230 	for_each_sg(sgt->sgl, sg, sgt->nents, i) {
231 		ccsg->addr = FIELD_PREP(LINK_ADDR_VALID, 1) |
232 			     FIELD_PREP(LINK_ADDR_RING, 0) |
233 			     FIELD_PREP(LINK_ADDR_EOC, sg_is_last(sg)) |
234 			     FIELD_PREP(LINK_ADDR_LEN, sg_dma_len(sg));
235 		ccsg->addr1 = (u32)sg_dma_address(sg);
236 		ccsg++;
237 	}
238 }
239 
240 static int nbits_to_lane[] = {
241 	SPISG_SINGLE_SPI,
242 	SPISG_SINGLE_SPI,
243 	SPISG_DUAL_SPI,
244 	-EINVAL,
245 	SPISG_QUAD_SPI
246 };
247 
aml_spisg_setup_transfer(struct spisg_device * spisg,struct spi_transfer * xfer,struct spisg_descriptor * desc,struct spisg_descriptor_extra * exdesc)248 static int aml_spisg_setup_transfer(struct spisg_device *spisg,
249 				    struct spi_transfer *xfer,
250 				    struct spisg_descriptor *desc,
251 				    struct spisg_descriptor_extra *exdesc)
252 {
253 	int block_size, blocks;
254 	struct device *dev = &spisg->pdev->dev;
255 	struct spisg_sg_link *ccsg;
256 	int ccsg_len;
257 	dma_addr_t paddr;
258 	int ret;
259 
260 	memset(desc, 0, sizeof(*desc));
261 	if (exdesc)
262 		memset(exdesc, 0, sizeof(*exdesc));
263 	aml_spisg_set_speed(spisg, xfer->speed_hz);
264 	xfer->effective_speed_hz = spisg->effective_speed_hz;
265 
266 	desc->cfg_start = spisg->cfg_start;
267 	desc->cfg_bus = spisg->cfg_bus;
268 
269 	block_size = xfer->bits_per_word >> 3;
270 	blocks = xfer->len / block_size;
271 
272 	desc->cfg_start |= FIELD_PREP(CFG_EOC, 0);
273 	desc->cfg_bus |= FIELD_PREP(CFG_KEEP_SS, !xfer->cs_change);
274 	desc->cfg_bus |= FIELD_PREP(CFG_NULL_CTL, 0);
275 
276 	if (xfer->tx_buf || xfer->tx_dma) {
277 		desc->cfg_bus |= FIELD_PREP(CFG_LANE, nbits_to_lane[xfer->tx_nbits]);
278 		desc->cfg_start |= FIELD_PREP(CFG_OP_MODE, SPISG_OP_MODE_WRITE);
279 	}
280 	if (xfer->rx_buf || xfer->rx_dma) {
281 		desc->cfg_bus |= FIELD_PREP(CFG_LANE, nbits_to_lane[xfer->rx_nbits]);
282 		desc->cfg_start |= FIELD_PREP(CFG_OP_MODE, SPISG_OP_MODE_READ);
283 	}
284 
285 	if (FIELD_GET(CFG_OP_MODE, desc->cfg_start) == SPISG_OP_MODE_READ_STS) {
286 		desc->cfg_start |= FIELD_PREP(CFG_BLOCK_SIZE, blocks) |
287 				   FIELD_PREP(CFG_BLOCK_NUM, 1);
288 	} else {
289 		blocks = min_t(int, blocks, SPISG_BLOCK_MAX);
290 		desc->cfg_start |= FIELD_PREP(CFG_BLOCK_SIZE, block_size & 0x7) |
291 				   FIELD_PREP(CFG_BLOCK_NUM, blocks);
292 	}
293 
294 	if (xfer->tx_sg.nents && xfer->tx_sg.sgl) {
295 		ccsg_len = xfer->tx_sg.nents * sizeof(struct spisg_sg_link);
296 		ccsg = kzalloc(ccsg_len, GFP_KERNEL | GFP_DMA);
297 		if (!ccsg) {
298 			dev_err(dev, "alloc tx_ccsg failed\n");
299 			return -ENOMEM;
300 		}
301 
302 		aml_spisg_sg_xlate(&xfer->tx_sg, ccsg);
303 		paddr = dma_map_single(dev, (void *)ccsg,
304 				       ccsg_len, DMA_TO_DEVICE);
305 		ret = dma_mapping_error(dev, paddr);
306 		if (ret) {
307 			kfree(ccsg);
308 			dev_err(dev, "tx ccsg map failed\n");
309 			return ret;
310 		}
311 
312 		desc->tx_paddr = paddr;
313 		desc->cfg_start |= FIELD_PREP(CFG_TXD_MODE, SPISG_DATA_MODE_SG);
314 		exdesc->tx_ccsg = ccsg;
315 		exdesc->tx_ccsg_len = ccsg_len;
316 		dma_sync_sgtable_for_device(spisg->controller->cur_tx_dma_dev,
317 					    &xfer->tx_sg, DMA_TO_DEVICE);
318 	} else if (xfer->tx_buf || xfer->tx_dma) {
319 		paddr = xfer->tx_dma;
320 		if (!paddr) {
321 			paddr = dma_map_single(dev, (void *)xfer->tx_buf,
322 					       xfer->len, DMA_TO_DEVICE);
323 			ret = dma_mapping_error(dev, paddr);
324 			if (ret) {
325 				dev_err(dev, "tx buf map failed\n");
326 				return ret;
327 			}
328 		}
329 		desc->tx_paddr = paddr;
330 		desc->cfg_start |= FIELD_PREP(CFG_TXD_MODE, SPISG_DATA_MODE_MEM);
331 	}
332 
333 	if (xfer->rx_sg.nents && xfer->rx_sg.sgl) {
334 		ccsg_len = xfer->rx_sg.nents * sizeof(struct spisg_sg_link);
335 		ccsg = kzalloc(ccsg_len, GFP_KERNEL | GFP_DMA);
336 		if (!ccsg) {
337 			dev_err(dev, "alloc rx_ccsg failed\n");
338 			return -ENOMEM;
339 		}
340 
341 		aml_spisg_sg_xlate(&xfer->rx_sg, ccsg);
342 		paddr = dma_map_single(dev, (void *)ccsg,
343 				       ccsg_len, DMA_TO_DEVICE);
344 		ret = dma_mapping_error(dev, paddr);
345 		if (ret) {
346 			kfree(ccsg);
347 			dev_err(dev, "rx ccsg map failed\n");
348 			return ret;
349 		}
350 
351 		desc->rx_paddr = paddr;
352 		desc->cfg_start |= FIELD_PREP(CFG_RXD_MODE, SPISG_DATA_MODE_SG);
353 		exdesc->rx_ccsg = ccsg;
354 		exdesc->rx_ccsg_len = ccsg_len;
355 		dma_sync_sgtable_for_device(spisg->controller->cur_rx_dma_dev,
356 					    &xfer->rx_sg, DMA_FROM_DEVICE);
357 	} else if (xfer->rx_buf || xfer->rx_dma) {
358 		paddr = xfer->rx_dma;
359 		if (!paddr) {
360 			paddr = dma_map_single(dev, xfer->rx_buf,
361 					       xfer->len, DMA_FROM_DEVICE);
362 			ret = dma_mapping_error(dev, paddr);
363 			if (ret) {
364 				dev_err(dev, "rx buf map failed\n");
365 				return ret;
366 			}
367 		}
368 
369 		desc->rx_paddr = paddr;
370 		desc->cfg_start |= FIELD_PREP(CFG_RXD_MODE, SPISG_DATA_MODE_MEM);
371 	}
372 
373 	return 0;
374 }
375 
aml_spisg_cleanup_transfer(struct spisg_device * spisg,struct spi_transfer * xfer,struct spisg_descriptor * desc,struct spisg_descriptor_extra * exdesc)376 static void aml_spisg_cleanup_transfer(struct spisg_device *spisg,
377 				       struct spi_transfer *xfer,
378 				       struct spisg_descriptor *desc,
379 				       struct spisg_descriptor_extra *exdesc)
380 {
381 	struct device *dev = &spisg->pdev->dev;
382 
383 	if (desc->tx_paddr) {
384 		if (FIELD_GET(CFG_TXD_MODE, desc->cfg_start) == SPISG_DATA_MODE_SG) {
385 			dma_unmap_single(dev, (dma_addr_t)desc->tx_paddr,
386 					 exdesc->tx_ccsg_len, DMA_TO_DEVICE);
387 			kfree(exdesc->tx_ccsg);
388 			dma_sync_sgtable_for_cpu(spisg->controller->cur_tx_dma_dev,
389 						 &xfer->tx_sg, DMA_TO_DEVICE);
390 		} else if (!xfer->tx_dma) {
391 			dma_unmap_single(dev, (dma_addr_t)desc->tx_paddr,
392 					 xfer->len, DMA_TO_DEVICE);
393 		}
394 	}
395 
396 	if (desc->rx_paddr) {
397 		if (FIELD_GET(CFG_RXD_MODE, desc->cfg_start) == SPISG_DATA_MODE_SG) {
398 			dma_unmap_single(dev, (dma_addr_t)desc->rx_paddr,
399 					 exdesc->rx_ccsg_len, DMA_TO_DEVICE);
400 			kfree(exdesc->rx_ccsg);
401 			dma_sync_sgtable_for_cpu(spisg->controller->cur_rx_dma_dev,
402 						 &xfer->rx_sg, DMA_FROM_DEVICE);
403 		} else if (!xfer->rx_dma) {
404 			dma_unmap_single(dev, (dma_addr_t)desc->rx_paddr,
405 					 xfer->len, DMA_FROM_DEVICE);
406 		}
407 	}
408 }
409 
aml_spisg_setup_null_desc(struct spisg_device * spisg,struct spisg_descriptor * desc,u32 n_sclk)410 static void aml_spisg_setup_null_desc(struct spisg_device *spisg,
411 				      struct spisg_descriptor *desc,
412 				      u32 n_sclk)
413 {
414 	/* unit is the last xfer sclk */
415 	desc->cfg_start = spisg->cfg_start;
416 	desc->cfg_bus = spisg->cfg_bus;
417 
418 	desc->cfg_start |= FIELD_PREP(CFG_OP_MODE, SPISG_OP_MODE_WRITE) |
419 			   FIELD_PREP(CFG_BLOCK_SIZE, 1) |
420 			   FIELD_PREP(CFG_BLOCK_NUM, DIV_ROUND_UP(n_sclk, 8));
421 
422 	desc->cfg_bus |= FIELD_PREP(CFG_NULL_CTL, 1);
423 }
424 
aml_spisg_pending(struct spisg_device * spisg,dma_addr_t desc_paddr,bool trig,bool irq_en)425 static void aml_spisg_pending(struct spisg_device *spisg,
426 			      dma_addr_t desc_paddr,
427 			      bool trig,
428 			      bool irq_en)
429 {
430 	u32 desc_l, desc_h, cfg_spi, irq_enable;
431 
432 #ifdef	CONFIG_ARCH_DMA_ADDR_T_64BIT
433 	desc_l = (u64)desc_paddr & 0xffffffff;
434 	desc_h = (u64)desc_paddr >> 32;
435 #else
436 	desc_l = desc_paddr & 0xffffffff;
437 	desc_h = 0;
438 #endif
439 
440 	cfg_spi = spisg->cfg_spi;
441 	if (trig)
442 		cfg_spi |= CFG_HW_POS;
443 	else
444 		desc_h |= LIST_DESC_PENDING;
445 
446 	irq_enable = IRQ_RCH_DESC_INVALID | IRQ_RCH_DESC_RESP |
447 		     IRQ_RCH_DATA_RESP | IRQ_WCH_DESC_INVALID |
448 		     IRQ_WCH_DESC_RESP | IRQ_WCH_DATA_RESP |
449 		     IRQ_DESC_ERR | IRQ_DESC_CHAIN_DONE;
450 	regmap_write(spisg->map, SPISG_REG_IRQ_ENABLE, irq_en ? irq_enable : 0);
451 	regmap_write(spisg->map, SPISG_REG_CFG_SPI, cfg_spi);
452 	regmap_write(spisg->map, SPISG_REG_DESC_LIST_L, desc_l);
453 	regmap_write(spisg->map, SPISG_REG_DESC_LIST_H, desc_h);
454 }
455 
aml_spisg_irq(int irq,void * data)456 static irqreturn_t aml_spisg_irq(int irq, void *data)
457 {
458 	struct spisg_device *spisg = (void *)data;
459 	u32 sts;
460 
461 	spisg->status = 0;
462 	regmap_read(spisg->map, SPISG_REG_IRQ_STS, &sts);
463 	regmap_write(spisg->map, SPISG_REG_IRQ_STS, sts);
464 	if (sts & (IRQ_RCH_DESC_INVALID |
465 		   IRQ_RCH_DESC_RESP |
466 		   IRQ_RCH_DATA_RESP |
467 		   IRQ_WCH_DESC_INVALID |
468 		   IRQ_WCH_DESC_RESP |
469 		   IRQ_WCH_DATA_RESP |
470 		   IRQ_DESC_ERR))
471 		spisg->status = sts;
472 	else if (sts & IRQ_DESC_CHAIN_DONE)
473 		spisg->status = 0;
474 	else
475 		return IRQ_NONE;
476 
477 	complete(&spisg->completion);
478 
479 	return IRQ_HANDLED;
480 }
481 
aml_spisg_transfer_one_message(struct spi_controller * ctlr,struct spi_message * msg)482 static int aml_spisg_transfer_one_message(struct spi_controller *ctlr,
483 					  struct spi_message *msg)
484 {
485 	struct spisg_device *spisg = spi_controller_get_devdata(ctlr);
486 	struct device *dev = &spisg->pdev->dev;
487 	unsigned long long ms = 0;
488 	struct spi_transfer *xfer;
489 	struct spisg_descriptor *descs, *desc;
490 	struct spisg_descriptor_extra *exdescs, *exdesc;
491 	dma_addr_t descs_paddr;
492 	int desc_num = 1, descs_len;
493 	u32 cs_hold_in_sclk = 0;
494 	int ret = -EIO;
495 
496 	if (!aml_spisg_sem_down_read(spisg)) {
497 		spi_finalize_current_message(ctlr);
498 		dev_err(dev, "controller busy\n");
499 		return -EBUSY;
500 	}
501 
502 	/* calculate the desc num for all xfer */
503 	list_for_each_entry(xfer, &msg->transfers, transfer_list)
504 		desc_num++;
505 
506 	/* alloc descriptor/extra-descriptor table */
507 	descs = kcalloc(desc_num, sizeof(*desc) + sizeof(*exdesc),
508 			GFP_KERNEL | GFP_DMA);
509 	if (!descs) {
510 		spi_finalize_current_message(ctlr);
511 		aml_spisg_sem_up_write(spisg);
512 		return -ENOMEM;
513 	}
514 	descs_len = sizeof(*desc) * desc_num;
515 	exdescs = (struct spisg_descriptor_extra *)(descs + desc_num);
516 
517 	/* config descriptor for each xfer */
518 	desc = descs;
519 	exdesc = exdescs;
520 	list_for_each_entry(xfer, &msg->transfers, transfer_list) {
521 		ret = aml_spisg_setup_transfer(spisg, xfer, desc, exdesc);
522 		if (ret) {
523 			dev_err(dev, "config descriptor failed\n");
524 			goto end;
525 		}
526 
527 		/* calculate cs-setup delay with the first xfer speed */
528 		if (list_is_first(&xfer->transfer_list, &msg->transfers))
529 			desc->cfg_bus |= FIELD_PREP(CFG_CS_SETUP,
530 				spi_delay_to_sclk(xfer->effective_speed_hz, &msg->spi->cs_setup));
531 
532 		/* calculate cs-hold delay with the last xfer speed */
533 		if (list_is_last(&xfer->transfer_list, &msg->transfers))
534 			cs_hold_in_sclk =
535 				spi_delay_to_sclk(xfer->effective_speed_hz, &msg->spi->cs_hold);
536 
537 		desc++;
538 		exdesc++;
539 		ms += DIV_ROUND_UP_ULL(8LL * MSEC_PER_SEC * xfer->len,
540 				       xfer->effective_speed_hz);
541 	}
542 
543 	if (cs_hold_in_sclk)
544 		/* additional null-descriptor to achieve the cs-hold delay */
545 		aml_spisg_setup_null_desc(spisg, desc, cs_hold_in_sclk);
546 	else
547 		desc--;
548 
549 	desc->cfg_bus |= FIELD_PREP(CFG_KEEP_SS, 0);
550 	desc->cfg_start |= FIELD_PREP(CFG_EOC, 1);
551 
552 	/* some tolerances */
553 	ms += ms + 20;
554 	if (ms > UINT_MAX)
555 		ms = UINT_MAX;
556 
557 	descs_paddr = dma_map_single(dev, (void *)descs,
558 				     descs_len, DMA_TO_DEVICE);
559 	ret = dma_mapping_error(dev, descs_paddr);
560 	if (ret) {
561 		dev_err(dev, "desc table map failed\n");
562 		goto end;
563 	}
564 
565 	reinit_completion(&spisg->completion);
566 	aml_spisg_pending(spisg, descs_paddr, false, true);
567 	if (wait_for_completion_timeout(&spisg->completion,
568 					spi_controller_is_target(spisg->controller) ?
569 					MAX_SCHEDULE_TIMEOUT : msecs_to_jiffies(ms)))
570 		ret = spisg->status ? -EIO : 0;
571 	else
572 		ret = -ETIMEDOUT;
573 
574 	dma_unmap_single(dev, descs_paddr, descs_len, DMA_TO_DEVICE);
575 end:
576 	desc = descs;
577 	exdesc = exdescs;
578 	list_for_each_entry(xfer, &msg->transfers, transfer_list)
579 		aml_spisg_cleanup_transfer(spisg, xfer, desc++, exdesc++);
580 	kfree(descs);
581 
582 	if (!ret)
583 		msg->actual_length = msg->frame_length;
584 	msg->status = ret;
585 	spi_finalize_current_message(ctlr);
586 	aml_spisg_sem_up_write(spisg);
587 
588 	return ret;
589 }
590 
aml_spisg_prepare_message(struct spi_controller * ctlr,struct spi_message * message)591 static int aml_spisg_prepare_message(struct spi_controller *ctlr,
592 				     struct spi_message *message)
593 {
594 	struct spisg_device *spisg = spi_controller_get_devdata(ctlr);
595 	struct spi_device *spi = message->spi;
596 
597 	if (!spi->bits_per_word || spi->bits_per_word % 8) {
598 		dev_err(&spisg->pdev->dev, "invalid wordlen %d\n", spi->bits_per_word);
599 		return -EINVAL;
600 	}
601 
602 	spisg->bytes_per_word = spi->bits_per_word >> 3;
603 
604 	spisg->cfg_spi &= ~CFG_SLAVE_SELECT;
605 	spisg->cfg_spi |= FIELD_PREP(CFG_SLAVE_SELECT, spi_get_chipselect(spi, 0));
606 
607 	spisg->cfg_bus &= ~(CFG_CPOL | CFG_CPHA | CFG_B_L_ENDIAN | CFG_HALF_DUPLEX);
608 	spisg->cfg_bus |= FIELD_PREP(CFG_CPOL, !!(spi->mode & SPI_CPOL)) |
609 			  FIELD_PREP(CFG_CPHA, !!(spi->mode & SPI_CPHA)) |
610 			  FIELD_PREP(CFG_B_L_ENDIAN, !!(spi->mode & SPI_LSB_FIRST)) |
611 			  FIELD_PREP(CFG_HALF_DUPLEX, !!(spi->mode & SPI_3WIRE));
612 
613 	return 0;
614 }
615 
aml_spisg_setup(struct spi_device * spi)616 static int aml_spisg_setup(struct spi_device *spi)
617 {
618 	if (!spi->controller_state)
619 		spi->controller_state = spi_controller_get_devdata(spi->controller);
620 
621 	return 0;
622 }
623 
aml_spisg_cleanup(struct spi_device * spi)624 static void aml_spisg_cleanup(struct spi_device *spi)
625 {
626 	spi->controller_state = NULL;
627 }
628 
aml_spisg_target_abort(struct spi_controller * ctlr)629 static int aml_spisg_target_abort(struct spi_controller *ctlr)
630 {
631 	struct spisg_device *spisg = spi_controller_get_devdata(ctlr);
632 
633 	spisg->status = 0;
634 	regmap_write(spisg->map, SPISG_REG_DESC_LIST_H, 0);
635 	complete(&spisg->completion);
636 
637 	return 0;
638 }
639 
aml_spisg_clk_init(struct spisg_device * spisg,void __iomem * base)640 static int aml_spisg_clk_init(struct spisg_device *spisg, void __iomem *base)
641 {
642 	struct device *dev = &spisg->pdev->dev;
643 	struct clk_init_data init;
644 	struct clk_divider *div;
645 	struct clk_div_table *tbl;
646 	char name[32];
647 	int ret, i;
648 
649 	spisg->core = devm_clk_get_enabled(dev, "core");
650 	if (IS_ERR_OR_NULL(spisg->core)) {
651 		dev_err(dev, "core clock request failed\n");
652 		return PTR_ERR(spisg->core);
653 	}
654 
655 	spisg->pclk = devm_clk_get_enabled(dev, "pclk");
656 	if (IS_ERR_OR_NULL(spisg->pclk)) {
657 		dev_err(dev, "pclk clock request failed\n");
658 		return PTR_ERR(spisg->pclk);
659 	}
660 
661 	clk_set_min_rate(spisg->pclk, SPISG_PCLK_RATE_MIN);
662 
663 	clk_disable_unprepare(spisg->pclk);
664 
665 	tbl = devm_kzalloc(dev, sizeof(struct clk_div_table) * (DIV_NUM + 1), GFP_KERNEL);
666 	if (!tbl)
667 		return -ENOMEM;
668 
669 	for (i = 0; i < DIV_NUM; i++) {
670 		tbl[i].val = i + SPISG_CLK_DIV_MIN - 1;
671 		tbl[i].div = i + SPISG_CLK_DIV_MIN;
672 	}
673 	spisg->tbl = tbl;
674 
675 	div = devm_kzalloc(dev, sizeof(*div), GFP_KERNEL);
676 	if (!div)
677 		return -ENOMEM;
678 
679 	div->flags = CLK_DIVIDER_ROUND_CLOSEST;
680 	div->reg = base + SPISG_REG_CFG_BUS;
681 	div->shift = __bf_shf(CFG_CLK_DIV);
682 	div->width = CLK_DIV_WIDTH;
683 	div->table = tbl;
684 
685 	/* Register value should not be outside of the table */
686 	regmap_update_bits(spisg->map, SPISG_REG_CFG_BUS, CFG_CLK_DIV,
687 			   FIELD_PREP(CFG_CLK_DIV, SPISG_CLK_DIV_MIN - 1));
688 
689 	/* Register clk-divider */
690 	snprintf(name, sizeof(name), "%s_div", dev_name(dev));
691 	init.name = name;
692 	init.ops = &clk_divider_ops;
693 	init.flags = CLK_SET_RATE_PARENT;
694 	init.parent_data = &(const struct clk_parent_data) {
695 				.fw_name = "pclk",
696 			   };
697 	init.num_parents = 1;
698 	div->hw.init = &init;
699 	ret = devm_clk_hw_register(dev, &div->hw);
700 	if (ret) {
701 		dev_err(dev, "clock registration failed\n");
702 		return ret;
703 	}
704 
705 	spisg->sclk = devm_clk_hw_get_clk(dev, &div->hw, NULL);
706 	if (IS_ERR_OR_NULL(spisg->sclk)) {
707 		dev_err(dev, "get clock failed\n");
708 		return PTR_ERR(spisg->sclk);
709 	}
710 
711 	clk_prepare_enable(spisg->sclk);
712 
713 	return 0;
714 }
715 
aml_spisg_probe(struct platform_device * pdev)716 static int aml_spisg_probe(struct platform_device *pdev)
717 {
718 	struct spi_controller *ctlr;
719 	struct spisg_device *spisg;
720 	struct device *dev = &pdev->dev;
721 	void __iomem *base;
722 	int ret, irq;
723 
724 	const struct regmap_config aml_regmap_config = {
725 		.reg_bits = 32,
726 		.val_bits = 32,
727 		.reg_stride = 4,
728 		.max_register = SPISG_MAX_REG,
729 	};
730 
731 	if (of_property_read_bool(dev->of_node, "spi-slave"))
732 		ctlr = spi_alloc_target(dev, sizeof(*spisg));
733 	else
734 		ctlr = spi_alloc_host(dev, sizeof(*spisg));
735 	if (!ctlr)
736 		return dev_err_probe(dev, -ENOMEM, "controller allocation failed\n");
737 
738 	spisg = spi_controller_get_devdata(ctlr);
739 	spisg->controller = ctlr;
740 
741 	spisg->pdev = pdev;
742 	platform_set_drvdata(pdev, spisg);
743 
744 	base = devm_platform_ioremap_resource(pdev, 0);
745 	if (IS_ERR(base))
746 		return dev_err_probe(dev, PTR_ERR(base), "resource ioremap failed\n");
747 
748 	spisg->map = devm_regmap_init_mmio(dev, base, &aml_regmap_config);
749 	if (IS_ERR(spisg->map))
750 		return dev_err_probe(dev, PTR_ERR(spisg->map), "regmap init failed\n");
751 
752 	irq = platform_get_irq(pdev, 0);
753 	if (irq < 0) {
754 		ret = irq;
755 		goto out_controller;
756 	}
757 
758 	ret = device_reset_optional(dev);
759 	if (ret)
760 		return dev_err_probe(dev, ret, "reset dev failed\n");
761 
762 	ret = aml_spisg_clk_init(spisg, base);
763 	if (ret)
764 		return dev_err_probe(dev, ret, "clock init failed\n");
765 
766 	spisg->cfg_spi = 0;
767 	spisg->cfg_start = 0;
768 	spisg->cfg_bus = 0;
769 
770 	spisg->cfg_spi = FIELD_PREP(CFG_SFLASH_WP, 1) |
771 			 FIELD_PREP(CFG_SFLASH_HD, 1);
772 	if (spi_controller_is_target(ctlr)) {
773 		spisg->cfg_spi |= FIELD_PREP(CFG_SLAVE_EN, 1);
774 		spisg->cfg_bus = FIELD_PREP(CFG_TX_TUNING, 0xf);
775 	}
776 	/* default pending */
777 	spisg->cfg_start = FIELD_PREP(CFG_PEND, 1);
778 
779 	pm_runtime_set_active(&spisg->pdev->dev);
780 	pm_runtime_enable(&spisg->pdev->dev);
781 	pm_runtime_resume_and_get(&spisg->pdev->dev);
782 
783 	ctlr->num_chipselect = 4;
784 	ctlr->dev.of_node = pdev->dev.of_node;
785 	ctlr->mode_bits = SPI_CPHA | SPI_CPOL | SPI_LSB_FIRST |
786 			  SPI_3WIRE | SPI_TX_QUAD | SPI_RX_QUAD;
787 	ctlr->max_speed_hz = 1000 * 1000 * 100;
788 	ctlr->min_speed_hz = 1000 * 10;
789 	ctlr->setup = aml_spisg_setup;
790 	ctlr->cleanup = aml_spisg_cleanup;
791 	ctlr->prepare_message = aml_spisg_prepare_message;
792 	ctlr->transfer_one_message = aml_spisg_transfer_one_message;
793 	ctlr->target_abort = aml_spisg_target_abort;
794 	ctlr->can_dma = aml_spisg_can_dma;
795 	ctlr->max_dma_len = SPISG_BLOCK_MAX;
796 	ctlr->auto_runtime_pm = true;
797 
798 	dma_set_max_seg_size(&pdev->dev, SPISG_BLOCK_MAX);
799 
800 	ret = devm_request_irq(&pdev->dev, irq, aml_spisg_irq, 0, NULL, spisg);
801 	if (ret) {
802 		dev_err(&pdev->dev, "irq request failed\n");
803 		goto out_clk;
804 	}
805 
806 	ret = devm_spi_register_controller(dev, ctlr);
807 	if (ret) {
808 		dev_err(&pdev->dev, "spi controller registration failed\n");
809 		goto out_clk;
810 	}
811 
812 	init_completion(&spisg->completion);
813 
814 	pm_runtime_put(&spisg->pdev->dev);
815 
816 	return 0;
817 out_clk:
818 	if (spisg->core)
819 		clk_disable_unprepare(spisg->core);
820 	clk_disable_unprepare(spisg->pclk);
821 out_controller:
822 	spi_controller_put(ctlr);
823 
824 	return ret;
825 }
826 
aml_spisg_remove(struct platform_device * pdev)827 static void aml_spisg_remove(struct platform_device *pdev)
828 {
829 	struct spisg_device *spisg = platform_get_drvdata(pdev);
830 
831 	if (!pm_runtime_suspended(&pdev->dev)) {
832 		pinctrl_pm_select_sleep_state(&spisg->pdev->dev);
833 		clk_disable_unprepare(spisg->core);
834 		clk_disable_unprepare(spisg->pclk);
835 	}
836 }
837 
spisg_suspend_runtime(struct device * dev)838 static int spisg_suspend_runtime(struct device *dev)
839 {
840 	struct spisg_device *spisg = dev_get_drvdata(dev);
841 
842 	pinctrl_pm_select_sleep_state(&spisg->pdev->dev);
843 	clk_disable_unprepare(spisg->sclk);
844 	clk_disable_unprepare(spisg->core);
845 
846 	return 0;
847 }
848 
spisg_resume_runtime(struct device * dev)849 static int spisg_resume_runtime(struct device *dev)
850 {
851 	struct spisg_device *spisg = dev_get_drvdata(dev);
852 
853 	clk_prepare_enable(spisg->core);
854 	clk_prepare_enable(spisg->sclk);
855 	pinctrl_pm_select_default_state(&spisg->pdev->dev);
856 
857 	return 0;
858 }
859 
860 static const struct dev_pm_ops amlogic_spisg_pm_ops = {
861 	.runtime_suspend	= spisg_suspend_runtime,
862 	.runtime_resume		= spisg_resume_runtime,
863 };
864 
865 static const struct of_device_id amlogic_spisg_of_match[] = {
866 	{
867 		.compatible = "amlogic,a4-spisg",
868 	},
869 
870 	{ /* sentinel */ }
871 };
872 MODULE_DEVICE_TABLE(of, amlogic_spisg_of_match);
873 
874 static struct platform_driver amlogic_spisg_driver = {
875 	.probe = aml_spisg_probe,
876 	.remove = aml_spisg_remove,
877 	.driver  = {
878 		.name = "amlogic-spisg",
879 		.of_match_table = amlogic_spisg_of_match,
880 		.pm = &amlogic_spisg_pm_ops,
881 	},
882 };
883 
884 module_platform_driver(amlogic_spisg_driver);
885 
886 MODULE_DESCRIPTION("Amlogic SPI Scatter-Gather Controller driver");
887 MODULE_AUTHOR("Sunny Luo <sunny.luo@amlogic.com>");
888 MODULE_LICENSE("GPL");
889