xref: /linux/drivers/spi/spi-mxic.c (revision a1ff5a7d78a036d6c2178ee5acd6ba4946243800)
1 // SPDX-License-Identifier: GPL-2.0
2 //
3 // Copyright (C) 2018 Macronix International Co., Ltd.
4 //
5 // Authors:
6 //	Mason Yang <masonccyang@mxic.com.tw>
7 //	zhengxunli <zhengxunli@mxic.com.tw>
8 //	Boris Brezillon <boris.brezillon@bootlin.com>
9 //
10 
11 #include <linux/clk.h>
12 #include <linux/io.h>
13 #include <linux/iopoll.h>
14 #include <linux/module.h>
15 #include <linux/mtd/nand.h>
16 #include <linux/mtd/nand-ecc-mxic.h>
17 #include <linux/platform_device.h>
18 #include <linux/pm_runtime.h>
19 #include <linux/spi/spi.h>
20 #include <linux/spi/spi-mem.h>
21 
22 #define HC_CFG			0x0
23 #define HC_CFG_IF_CFG(x)	((x) << 27)
24 #define HC_CFG_DUAL_SLAVE	BIT(31)
25 #define HC_CFG_INDIVIDUAL	BIT(30)
26 #define HC_CFG_NIO(x)		(((x) / 4) << 27)
27 #define HC_CFG_TYPE(s, t)	((t) << (23 + ((s) * 2)))
28 #define HC_CFG_TYPE_SPI_NOR	0
29 #define HC_CFG_TYPE_SPI_NAND	1
30 #define HC_CFG_TYPE_SPI_RAM	2
31 #define HC_CFG_TYPE_RAW_NAND	3
32 #define HC_CFG_SLV_ACT(x)	((x) << 21)
33 #define HC_CFG_CLK_PH_EN	BIT(20)
34 #define HC_CFG_CLK_POL_INV	BIT(19)
35 #define HC_CFG_BIG_ENDIAN	BIT(18)
36 #define HC_CFG_DATA_PASS	BIT(17)
37 #define HC_CFG_IDLE_SIO_LVL(x)	((x) << 16)
38 #define HC_CFG_MAN_START_EN	BIT(3)
39 #define HC_CFG_MAN_START	BIT(2)
40 #define HC_CFG_MAN_CS_EN	BIT(1)
41 #define HC_CFG_MAN_CS_ASSERT	BIT(0)
42 
43 #define INT_STS			0x4
44 #define INT_STS_EN		0x8
45 #define INT_SIG_EN		0xc
46 #define INT_STS_ALL		GENMASK(31, 0)
47 #define INT_RDY_PIN		BIT(26)
48 #define INT_RDY_SR		BIT(25)
49 #define INT_LNR_SUSP		BIT(24)
50 #define INT_ECC_ERR		BIT(17)
51 #define INT_CRC_ERR		BIT(16)
52 #define INT_LWR_DIS		BIT(12)
53 #define INT_LRD_DIS		BIT(11)
54 #define INT_SDMA_INT		BIT(10)
55 #define INT_DMA_FINISH		BIT(9)
56 #define INT_RX_NOT_FULL		BIT(3)
57 #define INT_RX_NOT_EMPTY	BIT(2)
58 #define INT_TX_NOT_FULL		BIT(1)
59 #define INT_TX_EMPTY		BIT(0)
60 
61 #define HC_EN			0x10
62 #define HC_EN_BIT		BIT(0)
63 
64 #define TXD(x)			(0x14 + ((x) * 4))
65 #define RXD			0x24
66 
67 #define SS_CTRL(s)		(0x30 + ((s) * 4))
68 #define LRD_CFG			0x44
69 #define LWR_CFG			0x80
70 #define RWW_CFG			0x70
71 #define OP_READ			BIT(23)
72 #define OP_DUMMY_CYC(x)		((x) << 17)
73 #define OP_ADDR_BYTES(x)	((x) << 14)
74 #define OP_CMD_BYTES(x)		(((x) - 1) << 13)
75 #define OP_OCTA_CRC_EN		BIT(12)
76 #define OP_DQS_EN		BIT(11)
77 #define OP_ENHC_EN		BIT(10)
78 #define OP_PREAMBLE_EN		BIT(9)
79 #define OP_DATA_DDR		BIT(8)
80 #define OP_DATA_BUSW(x)		((x) << 6)
81 #define OP_ADDR_DDR		BIT(5)
82 #define OP_ADDR_BUSW(x)		((x) << 3)
83 #define OP_CMD_DDR		BIT(2)
84 #define OP_CMD_BUSW(x)		(x)
85 #define OP_BUSW_1		0
86 #define OP_BUSW_2		1
87 #define OP_BUSW_4		2
88 #define OP_BUSW_8		3
89 
90 #define OCTA_CRC		0x38
91 #define OCTA_CRC_IN_EN(s)	BIT(3 + ((s) * 16))
92 #define OCTA_CRC_CHUNK(s, x)	((fls((x) / 32)) << (1 + ((s) * 16)))
93 #define OCTA_CRC_OUT_EN(s)	BIT(0 + ((s) * 16))
94 
95 #define ONFI_DIN_CNT(s)		(0x3c + (s))
96 
97 #define LRD_CTRL		0x48
98 #define RWW_CTRL		0x74
99 #define LWR_CTRL		0x84
100 #define LMODE_EN		BIT(31)
101 #define LMODE_SLV_ACT(x)	((x) << 21)
102 #define LMODE_CMD1(x)		((x) << 8)
103 #define LMODE_CMD0(x)		(x)
104 
105 #define LRD_ADDR		0x4c
106 #define LWR_ADDR		0x88
107 #define LRD_RANGE		0x50
108 #define LWR_RANGE		0x8c
109 
110 #define AXI_SLV_ADDR		0x54
111 
112 #define DMAC_RD_CFG		0x58
113 #define DMAC_WR_CFG		0x94
114 #define DMAC_CFG_PERIPH_EN	BIT(31)
115 #define DMAC_CFG_ALLFLUSH_EN	BIT(30)
116 #define DMAC_CFG_LASTFLUSH_EN	BIT(29)
117 #define DMAC_CFG_QE(x)		(((x) + 1) << 16)
118 #define DMAC_CFG_BURST_LEN(x)	(((x) + 1) << 12)
119 #define DMAC_CFG_BURST_SZ(x)	((x) << 8)
120 #define DMAC_CFG_DIR_READ	BIT(1)
121 #define DMAC_CFG_START		BIT(0)
122 
123 #define DMAC_RD_CNT		0x5c
124 #define DMAC_WR_CNT		0x98
125 
126 #define SDMA_ADDR		0x60
127 
128 #define DMAM_CFG		0x64
129 #define DMAM_CFG_START		BIT(31)
130 #define DMAM_CFG_CONT		BIT(30)
131 #define DMAM_CFG_SDMA_GAP(x)	(fls((x) / 8192) << 2)
132 #define DMAM_CFG_DIR_READ	BIT(1)
133 #define DMAM_CFG_EN		BIT(0)
134 
135 #define DMAM_CNT		0x68
136 
137 #define LNR_TIMER_TH		0x6c
138 
139 #define RDM_CFG0		0x78
140 #define RDM_CFG0_POLY(x)	(x)
141 
142 #define RDM_CFG1		0x7c
143 #define RDM_CFG1_RDM_EN		BIT(31)
144 #define RDM_CFG1_SEED(x)	(x)
145 
146 #define LWR_SUSP_CTRL		0x90
147 #define LWR_SUSP_CTRL_EN	BIT(31)
148 
149 #define DMAS_CTRL		0x9c
150 #define DMAS_CTRL_EN		BIT(31)
151 #define DMAS_CTRL_DIR_READ	BIT(30)
152 
153 #define DATA_STROB		0xa0
154 #define DATA_STROB_EDO_EN	BIT(2)
155 #define DATA_STROB_INV_POL	BIT(1)
156 #define DATA_STROB_DELAY_2CYC	BIT(0)
157 
158 #define IDLY_CODE(x)		(0xa4 + ((x) * 4))
159 #define IDLY_CODE_VAL(x, v)	((v) << (((x) % 4) * 8))
160 
161 #define GPIO			0xc4
162 #define GPIO_PT(x)		BIT(3 + ((x) * 16))
163 #define GPIO_RESET(x)		BIT(2 + ((x) * 16))
164 #define GPIO_HOLDB(x)		BIT(1 + ((x) * 16))
165 #define GPIO_WPB(x)		BIT((x) * 16)
166 
167 #define HC_VER			0xd0
168 
169 #define HW_TEST(x)		(0xe0 + ((x) * 4))
170 
171 struct mxic_spi {
172 	struct device *dev;
173 	struct clk *ps_clk;
174 	struct clk *send_clk;
175 	struct clk *send_dly_clk;
176 	void __iomem *regs;
177 	u32 cur_speed_hz;
178 	struct {
179 		void __iomem *map;
180 		dma_addr_t dma;
181 		size_t size;
182 	} linear;
183 
184 	struct {
185 		bool use_pipelined_conf;
186 		struct nand_ecc_engine *pipelined_engine;
187 		void *ctx;
188 	} ecc;
189 };
190 
mxic_spi_clk_enable(struct mxic_spi * mxic)191 static int mxic_spi_clk_enable(struct mxic_spi *mxic)
192 {
193 	int ret;
194 
195 	ret = clk_prepare_enable(mxic->send_clk);
196 	if (ret)
197 		return ret;
198 
199 	ret = clk_prepare_enable(mxic->send_dly_clk);
200 	if (ret)
201 		goto err_send_dly_clk;
202 
203 	return ret;
204 
205 err_send_dly_clk:
206 	clk_disable_unprepare(mxic->send_clk);
207 
208 	return ret;
209 }
210 
mxic_spi_clk_disable(struct mxic_spi * mxic)211 static void mxic_spi_clk_disable(struct mxic_spi *mxic)
212 {
213 	clk_disable_unprepare(mxic->send_clk);
214 	clk_disable_unprepare(mxic->send_dly_clk);
215 }
216 
mxic_spi_set_input_delay_dqs(struct mxic_spi * mxic,u8 idly_code)217 static void mxic_spi_set_input_delay_dqs(struct mxic_spi *mxic, u8 idly_code)
218 {
219 	writel(IDLY_CODE_VAL(0, idly_code) |
220 	       IDLY_CODE_VAL(1, idly_code) |
221 	       IDLY_CODE_VAL(2, idly_code) |
222 	       IDLY_CODE_VAL(3, idly_code),
223 	       mxic->regs + IDLY_CODE(0));
224 	writel(IDLY_CODE_VAL(4, idly_code) |
225 	       IDLY_CODE_VAL(5, idly_code) |
226 	       IDLY_CODE_VAL(6, idly_code) |
227 	       IDLY_CODE_VAL(7, idly_code),
228 	       mxic->regs + IDLY_CODE(1));
229 }
230 
mxic_spi_clk_setup(struct mxic_spi * mxic,unsigned long freq)231 static int mxic_spi_clk_setup(struct mxic_spi *mxic, unsigned long freq)
232 {
233 	int ret;
234 
235 	ret = clk_set_rate(mxic->send_clk, freq);
236 	if (ret)
237 		return ret;
238 
239 	ret = clk_set_rate(mxic->send_dly_clk, freq);
240 	if (ret)
241 		return ret;
242 
243 	/*
244 	 * A constant delay range from 0x0 ~ 0x1F for input delay,
245 	 * the unit is 78 ps, the max input delay is 2.418 ns.
246 	 */
247 	mxic_spi_set_input_delay_dqs(mxic, 0xf);
248 
249 	/*
250 	 * Phase degree = 360 * freq * output-delay
251 	 * where output-delay is a constant value 1 ns in FPGA.
252 	 *
253 	 * Get Phase degree = 360 * freq * 1 ns
254 	 *                  = 360 * freq * 1 sec / 1000000000
255 	 *                  = 9 * freq / 25000000
256 	 */
257 	ret = clk_set_phase(mxic->send_dly_clk, 9 * freq / 25000000);
258 	if (ret)
259 		return ret;
260 
261 	return 0;
262 }
263 
mxic_spi_set_freq(struct mxic_spi * mxic,unsigned long freq)264 static int mxic_spi_set_freq(struct mxic_spi *mxic, unsigned long freq)
265 {
266 	int ret;
267 
268 	if (mxic->cur_speed_hz == freq)
269 		return 0;
270 
271 	mxic_spi_clk_disable(mxic);
272 	ret = mxic_spi_clk_setup(mxic, freq);
273 	if (ret)
274 		return ret;
275 
276 	ret = mxic_spi_clk_enable(mxic);
277 	if (ret)
278 		return ret;
279 
280 	mxic->cur_speed_hz = freq;
281 
282 	return 0;
283 }
284 
mxic_spi_hw_init(struct mxic_spi * mxic)285 static void mxic_spi_hw_init(struct mxic_spi *mxic)
286 {
287 	writel(0, mxic->regs + DATA_STROB);
288 	writel(INT_STS_ALL, mxic->regs + INT_STS_EN);
289 	writel(0, mxic->regs + HC_EN);
290 	writel(0, mxic->regs + LRD_CFG);
291 	writel(0, mxic->regs + LRD_CTRL);
292 	writel(HC_CFG_NIO(1) | HC_CFG_TYPE(0, HC_CFG_TYPE_SPI_NOR) |
293 	       HC_CFG_SLV_ACT(0) | HC_CFG_MAN_CS_EN | HC_CFG_IDLE_SIO_LVL(1),
294 	       mxic->regs + HC_CFG);
295 }
296 
mxic_spi_prep_hc_cfg(struct spi_device * spi,u32 flags)297 static u32 mxic_spi_prep_hc_cfg(struct spi_device *spi, u32 flags)
298 {
299 	int nio = 1;
300 
301 	if (spi->mode & (SPI_TX_OCTAL | SPI_RX_OCTAL))
302 		nio = 8;
303 	else if (spi->mode & (SPI_TX_QUAD | SPI_RX_QUAD))
304 		nio = 4;
305 	else if (spi->mode & (SPI_TX_DUAL | SPI_RX_DUAL))
306 		nio = 2;
307 
308 	return flags | HC_CFG_NIO(nio) |
309 	       HC_CFG_TYPE(spi_get_chipselect(spi, 0), HC_CFG_TYPE_SPI_NOR) |
310 	       HC_CFG_SLV_ACT(spi_get_chipselect(spi, 0)) | HC_CFG_IDLE_SIO_LVL(1);
311 }
312 
mxic_spi_mem_prep_op_cfg(const struct spi_mem_op * op,unsigned int data_len)313 static u32 mxic_spi_mem_prep_op_cfg(const struct spi_mem_op *op,
314 				    unsigned int data_len)
315 {
316 	u32 cfg = OP_CMD_BYTES(op->cmd.nbytes) |
317 		  OP_CMD_BUSW(fls(op->cmd.buswidth) - 1) |
318 		  (op->cmd.dtr ? OP_CMD_DDR : 0);
319 
320 	if (op->addr.nbytes)
321 		cfg |= OP_ADDR_BYTES(op->addr.nbytes) |
322 		       OP_ADDR_BUSW(fls(op->addr.buswidth) - 1) |
323 		       (op->addr.dtr ? OP_ADDR_DDR : 0);
324 
325 	if (op->dummy.nbytes)
326 		cfg |= OP_DUMMY_CYC(op->dummy.nbytes);
327 
328 	/* Direct mapping data.nbytes field is not populated */
329 	if (data_len) {
330 		cfg |= OP_DATA_BUSW(fls(op->data.buswidth) - 1) |
331 		       (op->data.dtr ? OP_DATA_DDR : 0);
332 		if (op->data.dir == SPI_MEM_DATA_IN) {
333 			cfg |= OP_READ;
334 			if (op->data.dtr)
335 				cfg |= OP_DQS_EN;
336 		}
337 	}
338 
339 	return cfg;
340 }
341 
mxic_spi_data_xfer(struct mxic_spi * mxic,const void * txbuf,void * rxbuf,unsigned int len)342 static int mxic_spi_data_xfer(struct mxic_spi *mxic, const void *txbuf,
343 			      void *rxbuf, unsigned int len)
344 {
345 	unsigned int pos = 0;
346 
347 	while (pos < len) {
348 		unsigned int nbytes = len - pos;
349 		u32 data = 0xffffffff;
350 		u32 sts;
351 		int ret;
352 
353 		if (nbytes > 4)
354 			nbytes = 4;
355 
356 		if (txbuf)
357 			memcpy(&data, txbuf + pos, nbytes);
358 
359 		ret = readl_poll_timeout(mxic->regs + INT_STS, sts,
360 					 sts & INT_TX_EMPTY, 0, USEC_PER_SEC);
361 		if (ret)
362 			return ret;
363 
364 		writel(data, mxic->regs + TXD(nbytes % 4));
365 
366 		ret = readl_poll_timeout(mxic->regs + INT_STS, sts,
367 					 sts & INT_TX_EMPTY, 0, USEC_PER_SEC);
368 		if (ret)
369 			return ret;
370 
371 		ret = readl_poll_timeout(mxic->regs + INT_STS, sts,
372 					 sts & INT_RX_NOT_EMPTY, 0,
373 					 USEC_PER_SEC);
374 		if (ret)
375 			return ret;
376 
377 		data = readl(mxic->regs + RXD);
378 		if (rxbuf) {
379 			data >>= (8 * (4 - nbytes));
380 			memcpy(rxbuf + pos, &data, nbytes);
381 		}
382 		WARN_ON(readl(mxic->regs + INT_STS) & INT_RX_NOT_EMPTY);
383 
384 		pos += nbytes;
385 	}
386 
387 	return 0;
388 }
389 
mxic_spi_mem_dirmap_read(struct spi_mem_dirmap_desc * desc,u64 offs,size_t len,void * buf)390 static ssize_t mxic_spi_mem_dirmap_read(struct spi_mem_dirmap_desc *desc,
391 					u64 offs, size_t len, void *buf)
392 {
393 	struct mxic_spi *mxic = spi_controller_get_devdata(desc->mem->spi->controller);
394 	int ret;
395 	u32 sts;
396 
397 	if (WARN_ON(offs + desc->info.offset + len > U32_MAX))
398 		return -EINVAL;
399 
400 	writel(mxic_spi_prep_hc_cfg(desc->mem->spi, 0), mxic->regs + HC_CFG);
401 
402 	writel(mxic_spi_mem_prep_op_cfg(&desc->info.op_tmpl, len),
403 	       mxic->regs + LRD_CFG);
404 	writel(desc->info.offset + offs, mxic->regs + LRD_ADDR);
405 	len = min_t(size_t, len, mxic->linear.size);
406 	writel(len, mxic->regs + LRD_RANGE);
407 	writel(LMODE_CMD0(desc->info.op_tmpl.cmd.opcode) |
408 	       LMODE_SLV_ACT(spi_get_chipselect(desc->mem->spi, 0)) |
409 	       LMODE_EN,
410 	       mxic->regs + LRD_CTRL);
411 
412 	if (mxic->ecc.use_pipelined_conf && desc->info.op_tmpl.data.ecc) {
413 		ret = mxic_ecc_process_data_pipelined(mxic->ecc.pipelined_engine,
414 						      NAND_PAGE_READ,
415 						      mxic->linear.dma + offs);
416 		if (ret)
417 			return ret;
418 	} else {
419 		memcpy_fromio(buf, mxic->linear.map, len);
420 	}
421 
422 	writel(INT_LRD_DIS, mxic->regs + INT_STS);
423 	writel(0, mxic->regs + LRD_CTRL);
424 
425 	ret = readl_poll_timeout(mxic->regs + INT_STS, sts,
426 				 sts & INT_LRD_DIS, 0, USEC_PER_SEC);
427 	if (ret)
428 		return ret;
429 
430 	return len;
431 }
432 
mxic_spi_mem_dirmap_write(struct spi_mem_dirmap_desc * desc,u64 offs,size_t len,const void * buf)433 static ssize_t mxic_spi_mem_dirmap_write(struct spi_mem_dirmap_desc *desc,
434 					 u64 offs, size_t len,
435 					 const void *buf)
436 {
437 	struct mxic_spi *mxic = spi_controller_get_devdata(desc->mem->spi->controller);
438 	u32 sts;
439 	int ret;
440 
441 	if (WARN_ON(offs + desc->info.offset + len > U32_MAX))
442 		return -EINVAL;
443 
444 	writel(mxic_spi_prep_hc_cfg(desc->mem->spi, 0), mxic->regs + HC_CFG);
445 
446 	writel(mxic_spi_mem_prep_op_cfg(&desc->info.op_tmpl, len),
447 	       mxic->regs + LWR_CFG);
448 	writel(desc->info.offset + offs, mxic->regs + LWR_ADDR);
449 	len = min_t(size_t, len, mxic->linear.size);
450 	writel(len, mxic->regs + LWR_RANGE);
451 	writel(LMODE_CMD0(desc->info.op_tmpl.cmd.opcode) |
452 	       LMODE_SLV_ACT(spi_get_chipselect(desc->mem->spi, 0)) |
453 	       LMODE_EN,
454 	       mxic->regs + LWR_CTRL);
455 
456 	if (mxic->ecc.use_pipelined_conf && desc->info.op_tmpl.data.ecc) {
457 		ret = mxic_ecc_process_data_pipelined(mxic->ecc.pipelined_engine,
458 						      NAND_PAGE_WRITE,
459 						      mxic->linear.dma + offs);
460 		if (ret)
461 			return ret;
462 	} else {
463 		memcpy_toio(mxic->linear.map, buf, len);
464 	}
465 
466 	writel(INT_LWR_DIS, mxic->regs + INT_STS);
467 	writel(0, mxic->regs + LWR_CTRL);
468 
469 	ret = readl_poll_timeout(mxic->regs + INT_STS, sts,
470 				 sts & INT_LWR_DIS, 0, USEC_PER_SEC);
471 	if (ret)
472 		return ret;
473 
474 	return len;
475 }
476 
mxic_spi_mem_supports_op(struct spi_mem * mem,const struct spi_mem_op * op)477 static bool mxic_spi_mem_supports_op(struct spi_mem *mem,
478 				     const struct spi_mem_op *op)
479 {
480 	if (op->data.buswidth > 8 || op->addr.buswidth > 8 ||
481 	    op->dummy.buswidth > 8 || op->cmd.buswidth > 8)
482 		return false;
483 
484 	if (op->data.nbytes && op->dummy.nbytes &&
485 	    op->data.buswidth != op->dummy.buswidth)
486 		return false;
487 
488 	if (op->addr.nbytes > 7)
489 		return false;
490 
491 	return spi_mem_default_supports_op(mem, op);
492 }
493 
mxic_spi_mem_dirmap_create(struct spi_mem_dirmap_desc * desc)494 static int mxic_spi_mem_dirmap_create(struct spi_mem_dirmap_desc *desc)
495 {
496 	struct mxic_spi *mxic = spi_controller_get_devdata(desc->mem->spi->controller);
497 
498 	if (!mxic->linear.map)
499 		return -EOPNOTSUPP;
500 
501 	if (desc->info.offset + desc->info.length > U32_MAX)
502 		return -EINVAL;
503 
504 	if (!mxic_spi_mem_supports_op(desc->mem, &desc->info.op_tmpl))
505 		return -EOPNOTSUPP;
506 
507 	return 0;
508 }
509 
mxic_spi_mem_exec_op(struct spi_mem * mem,const struct spi_mem_op * op)510 static int mxic_spi_mem_exec_op(struct spi_mem *mem,
511 				const struct spi_mem_op *op)
512 {
513 	struct mxic_spi *mxic = spi_controller_get_devdata(mem->spi->controller);
514 	int i, ret;
515 	u8 addr[8], cmd[2];
516 
517 	ret = mxic_spi_set_freq(mxic, mem->spi->max_speed_hz);
518 	if (ret)
519 		return ret;
520 
521 	writel(mxic_spi_prep_hc_cfg(mem->spi, HC_CFG_MAN_CS_EN),
522 	       mxic->regs + HC_CFG);
523 
524 	writel(HC_EN_BIT, mxic->regs + HC_EN);
525 
526 	writel(mxic_spi_mem_prep_op_cfg(op, op->data.nbytes),
527 	       mxic->regs + SS_CTRL(spi_get_chipselect(mem->spi, 0)));
528 
529 	writel(readl(mxic->regs + HC_CFG) | HC_CFG_MAN_CS_ASSERT,
530 	       mxic->regs + HC_CFG);
531 
532 	for (i = 0; i < op->cmd.nbytes; i++)
533 		cmd[i] = op->cmd.opcode >> (8 * (op->cmd.nbytes - i - 1));
534 
535 	ret = mxic_spi_data_xfer(mxic, cmd, NULL, op->cmd.nbytes);
536 	if (ret)
537 		goto out;
538 
539 	for (i = 0; i < op->addr.nbytes; i++)
540 		addr[i] = op->addr.val >> (8 * (op->addr.nbytes - i - 1));
541 
542 	ret = mxic_spi_data_xfer(mxic, addr, NULL, op->addr.nbytes);
543 	if (ret)
544 		goto out;
545 
546 	ret = mxic_spi_data_xfer(mxic, NULL, NULL, op->dummy.nbytes);
547 	if (ret)
548 		goto out;
549 
550 	ret = mxic_spi_data_xfer(mxic,
551 				 op->data.dir == SPI_MEM_DATA_OUT ?
552 				 op->data.buf.out : NULL,
553 				 op->data.dir == SPI_MEM_DATA_IN ?
554 				 op->data.buf.in : NULL,
555 				 op->data.nbytes);
556 
557 out:
558 	writel(readl(mxic->regs + HC_CFG) & ~HC_CFG_MAN_CS_ASSERT,
559 	       mxic->regs + HC_CFG);
560 	writel(0, mxic->regs + HC_EN);
561 
562 	return ret;
563 }
564 
565 static const struct spi_controller_mem_ops mxic_spi_mem_ops = {
566 	.supports_op = mxic_spi_mem_supports_op,
567 	.exec_op = mxic_spi_mem_exec_op,
568 	.dirmap_create = mxic_spi_mem_dirmap_create,
569 	.dirmap_read = mxic_spi_mem_dirmap_read,
570 	.dirmap_write = mxic_spi_mem_dirmap_write,
571 };
572 
573 static const struct spi_controller_mem_caps mxic_spi_mem_caps = {
574 	.dtr = true,
575 	.ecc = true,
576 };
577 
mxic_spi_set_cs(struct spi_device * spi,bool lvl)578 static void mxic_spi_set_cs(struct spi_device *spi, bool lvl)
579 {
580 	struct mxic_spi *mxic = spi_controller_get_devdata(spi->controller);
581 
582 	if (!lvl) {
583 		writel(readl(mxic->regs + HC_CFG) | HC_CFG_MAN_CS_EN,
584 		       mxic->regs + HC_CFG);
585 		writel(HC_EN_BIT, mxic->regs + HC_EN);
586 		writel(readl(mxic->regs + HC_CFG) | HC_CFG_MAN_CS_ASSERT,
587 		       mxic->regs + HC_CFG);
588 	} else {
589 		writel(readl(mxic->regs + HC_CFG) & ~HC_CFG_MAN_CS_ASSERT,
590 		       mxic->regs + HC_CFG);
591 		writel(0, mxic->regs + HC_EN);
592 	}
593 }
594 
mxic_spi_transfer_one(struct spi_controller * host,struct spi_device * spi,struct spi_transfer * t)595 static int mxic_spi_transfer_one(struct spi_controller *host,
596 				 struct spi_device *spi,
597 				 struct spi_transfer *t)
598 {
599 	struct mxic_spi *mxic = spi_controller_get_devdata(host);
600 	unsigned int busw = OP_BUSW_1;
601 	int ret;
602 
603 	if (t->rx_buf && t->tx_buf) {
604 		if (((spi->mode & SPI_TX_QUAD) &&
605 		     !(spi->mode & SPI_RX_QUAD)) ||
606 		    ((spi->mode & SPI_TX_DUAL) &&
607 		     !(spi->mode & SPI_RX_DUAL)))
608 			return -ENOTSUPP;
609 	}
610 
611 	ret = mxic_spi_set_freq(mxic, t->speed_hz);
612 	if (ret)
613 		return ret;
614 
615 	if (t->tx_buf) {
616 		if (spi->mode & SPI_TX_QUAD)
617 			busw = OP_BUSW_4;
618 		else if (spi->mode & SPI_TX_DUAL)
619 			busw = OP_BUSW_2;
620 	} else if (t->rx_buf) {
621 		if (spi->mode & SPI_RX_QUAD)
622 			busw = OP_BUSW_4;
623 		else if (spi->mode & SPI_RX_DUAL)
624 			busw = OP_BUSW_2;
625 	}
626 
627 	writel(OP_CMD_BYTES(1) | OP_CMD_BUSW(busw) |
628 	       OP_DATA_BUSW(busw) | (t->rx_buf ? OP_READ : 0),
629 	       mxic->regs + SS_CTRL(0));
630 
631 	ret = mxic_spi_data_xfer(mxic, t->tx_buf, t->rx_buf, t->len);
632 	if (ret)
633 		return ret;
634 
635 	spi_finalize_current_transfer(host);
636 
637 	return 0;
638 }
639 
640 /* ECC wrapper */
mxic_spi_mem_ecc_init_ctx(struct nand_device * nand)641 static int mxic_spi_mem_ecc_init_ctx(struct nand_device *nand)
642 {
643 	struct nand_ecc_engine_ops *ops = mxic_ecc_get_pipelined_ops();
644 	struct mxic_spi *mxic = nand->ecc.engine->priv;
645 
646 	mxic->ecc.use_pipelined_conf = true;
647 
648 	return ops->init_ctx(nand);
649 }
650 
mxic_spi_mem_ecc_cleanup_ctx(struct nand_device * nand)651 static void mxic_spi_mem_ecc_cleanup_ctx(struct nand_device *nand)
652 {
653 	struct nand_ecc_engine_ops *ops = mxic_ecc_get_pipelined_ops();
654 	struct mxic_spi *mxic = nand->ecc.engine->priv;
655 
656 	mxic->ecc.use_pipelined_conf = false;
657 
658 	ops->cleanup_ctx(nand);
659 }
660 
mxic_spi_mem_ecc_prepare_io_req(struct nand_device * nand,struct nand_page_io_req * req)661 static int mxic_spi_mem_ecc_prepare_io_req(struct nand_device *nand,
662 					   struct nand_page_io_req *req)
663 {
664 	struct nand_ecc_engine_ops *ops = mxic_ecc_get_pipelined_ops();
665 
666 	return ops->prepare_io_req(nand, req);
667 }
668 
mxic_spi_mem_ecc_finish_io_req(struct nand_device * nand,struct nand_page_io_req * req)669 static int mxic_spi_mem_ecc_finish_io_req(struct nand_device *nand,
670 					  struct nand_page_io_req *req)
671 {
672 	struct nand_ecc_engine_ops *ops = mxic_ecc_get_pipelined_ops();
673 
674 	return ops->finish_io_req(nand, req);
675 }
676 
677 static struct nand_ecc_engine_ops mxic_spi_mem_ecc_engine_pipelined_ops = {
678 	.init_ctx = mxic_spi_mem_ecc_init_ctx,
679 	.cleanup_ctx = mxic_spi_mem_ecc_cleanup_ctx,
680 	.prepare_io_req = mxic_spi_mem_ecc_prepare_io_req,
681 	.finish_io_req = mxic_spi_mem_ecc_finish_io_req,
682 };
683 
mxic_spi_mem_ecc_remove(struct mxic_spi * mxic)684 static void mxic_spi_mem_ecc_remove(struct mxic_spi *mxic)
685 {
686 	if (mxic->ecc.pipelined_engine) {
687 		mxic_ecc_put_pipelined_engine(mxic->ecc.pipelined_engine);
688 		nand_ecc_unregister_on_host_hw_engine(mxic->ecc.pipelined_engine);
689 	}
690 }
691 
mxic_spi_mem_ecc_probe(struct platform_device * pdev,struct mxic_spi * mxic)692 static int mxic_spi_mem_ecc_probe(struct platform_device *pdev,
693 				  struct mxic_spi *mxic)
694 {
695 	struct nand_ecc_engine *eng;
696 
697 	if (!mxic_ecc_get_pipelined_ops())
698 		return -EOPNOTSUPP;
699 
700 	eng = mxic_ecc_get_pipelined_engine(pdev);
701 	if (IS_ERR(eng))
702 		return PTR_ERR(eng);
703 
704 	eng->dev = &pdev->dev;
705 	eng->integration = NAND_ECC_ENGINE_INTEGRATION_PIPELINED;
706 	eng->ops = &mxic_spi_mem_ecc_engine_pipelined_ops;
707 	eng->priv = mxic;
708 	mxic->ecc.pipelined_engine = eng;
709 	nand_ecc_register_on_host_hw_engine(eng);
710 
711 	return 0;
712 }
713 
mxic_spi_runtime_suspend(struct device * dev)714 static int __maybe_unused mxic_spi_runtime_suspend(struct device *dev)
715 {
716 	struct spi_controller *host = dev_get_drvdata(dev);
717 	struct mxic_spi *mxic = spi_controller_get_devdata(host);
718 
719 	mxic_spi_clk_disable(mxic);
720 	clk_disable_unprepare(mxic->ps_clk);
721 
722 	return 0;
723 }
724 
mxic_spi_runtime_resume(struct device * dev)725 static int __maybe_unused mxic_spi_runtime_resume(struct device *dev)
726 {
727 	struct spi_controller *host = dev_get_drvdata(dev);
728 	struct mxic_spi *mxic = spi_controller_get_devdata(host);
729 	int ret;
730 
731 	ret = clk_prepare_enable(mxic->ps_clk);
732 	if (ret) {
733 		dev_err(dev, "Cannot enable ps_clock.\n");
734 		return ret;
735 	}
736 
737 	return mxic_spi_clk_enable(mxic);
738 }
739 
740 static const struct dev_pm_ops mxic_spi_dev_pm_ops = {
741 	SET_RUNTIME_PM_OPS(mxic_spi_runtime_suspend,
742 			   mxic_spi_runtime_resume, NULL)
743 };
744 
mxic_spi_probe(struct platform_device * pdev)745 static int mxic_spi_probe(struct platform_device *pdev)
746 {
747 	struct spi_controller *host;
748 	struct resource *res;
749 	struct mxic_spi *mxic;
750 	int ret;
751 
752 	host = devm_spi_alloc_host(&pdev->dev, sizeof(struct mxic_spi));
753 	if (!host)
754 		return -ENOMEM;
755 
756 	platform_set_drvdata(pdev, host);
757 
758 	mxic = spi_controller_get_devdata(host);
759 	mxic->dev = &pdev->dev;
760 
761 	host->dev.of_node = pdev->dev.of_node;
762 
763 	mxic->ps_clk = devm_clk_get(&pdev->dev, "ps_clk");
764 	if (IS_ERR(mxic->ps_clk))
765 		return PTR_ERR(mxic->ps_clk);
766 
767 	mxic->send_clk = devm_clk_get(&pdev->dev, "send_clk");
768 	if (IS_ERR(mxic->send_clk))
769 		return PTR_ERR(mxic->send_clk);
770 
771 	mxic->send_dly_clk = devm_clk_get(&pdev->dev, "send_dly_clk");
772 	if (IS_ERR(mxic->send_dly_clk))
773 		return PTR_ERR(mxic->send_dly_clk);
774 
775 	mxic->regs = devm_platform_ioremap_resource_byname(pdev, "regs");
776 	if (IS_ERR(mxic->regs))
777 		return PTR_ERR(mxic->regs);
778 
779 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dirmap");
780 	mxic->linear.map = devm_ioremap_resource(&pdev->dev, res);
781 	if (!IS_ERR(mxic->linear.map)) {
782 		mxic->linear.dma = res->start;
783 		mxic->linear.size = resource_size(res);
784 	} else {
785 		mxic->linear.map = NULL;
786 	}
787 
788 	pm_runtime_enable(&pdev->dev);
789 	host->auto_runtime_pm = true;
790 
791 	host->num_chipselect = 1;
792 	host->mem_ops = &mxic_spi_mem_ops;
793 	host->mem_caps = &mxic_spi_mem_caps;
794 
795 	host->set_cs = mxic_spi_set_cs;
796 	host->transfer_one = mxic_spi_transfer_one;
797 	host->bits_per_word_mask = SPI_BPW_MASK(8);
798 	host->mode_bits = SPI_CPOL | SPI_CPHA |
799 			  SPI_RX_DUAL | SPI_TX_DUAL |
800 			  SPI_RX_QUAD | SPI_TX_QUAD |
801 			  SPI_RX_OCTAL | SPI_TX_OCTAL;
802 
803 	mxic_spi_hw_init(mxic);
804 
805 	ret = mxic_spi_mem_ecc_probe(pdev, mxic);
806 	if (ret == -EPROBE_DEFER) {
807 		pm_runtime_disable(&pdev->dev);
808 		return ret;
809 	}
810 
811 	ret = spi_register_controller(host);
812 	if (ret) {
813 		dev_err(&pdev->dev, "spi_register_controller failed\n");
814 		pm_runtime_disable(&pdev->dev);
815 		mxic_spi_mem_ecc_remove(mxic);
816 	}
817 
818 	return ret;
819 }
820 
mxic_spi_remove(struct platform_device * pdev)821 static void mxic_spi_remove(struct platform_device *pdev)
822 {
823 	struct spi_controller *host = platform_get_drvdata(pdev);
824 	struct mxic_spi *mxic = spi_controller_get_devdata(host);
825 
826 	pm_runtime_disable(&pdev->dev);
827 	mxic_spi_mem_ecc_remove(mxic);
828 	spi_unregister_controller(host);
829 }
830 
831 static const struct of_device_id mxic_spi_of_ids[] = {
832 	{ .compatible = "mxicy,mx25f0a-spi", },
833 	{ /* sentinel */ }
834 };
835 MODULE_DEVICE_TABLE(of, mxic_spi_of_ids);
836 
837 static struct platform_driver mxic_spi_driver = {
838 	.probe = mxic_spi_probe,
839 	.remove_new = mxic_spi_remove,
840 	.driver = {
841 		.name = "mxic-spi",
842 		.of_match_table = mxic_spi_of_ids,
843 		.pm = &mxic_spi_dev_pm_ops,
844 	},
845 };
846 module_platform_driver(mxic_spi_driver);
847 
848 MODULE_AUTHOR("Mason Yang <masonccyang@mxic.com.tw>");
849 MODULE_DESCRIPTION("MX25F0A SPI controller driver");
850 MODULE_LICENSE("GPL v2");
851