xref: /linux/drivers/spi/spi-mtk-snfi.c (revision 976a689122df6c7d8d826ad36578c0291a1dc214)
1764f1b74SChuanhong Guo // SPDX-License-Identifier: GPL-2.0
2764f1b74SChuanhong Guo //
3764f1b74SChuanhong Guo // Driver for the SPI-NAND mode of Mediatek NAND Flash Interface
4764f1b74SChuanhong Guo //
5764f1b74SChuanhong Guo // Copyright (c) 2022 Chuanhong Guo <gch981213@gmail.com>
6764f1b74SChuanhong Guo //
7764f1b74SChuanhong Guo // This driver is based on the SPI-NAND mtd driver from Mediatek SDK:
8764f1b74SChuanhong Guo //
9764f1b74SChuanhong Guo // Copyright (C) 2020 MediaTek Inc.
10764f1b74SChuanhong Guo // Author: Weijie Gao <weijie.gao@mediatek.com>
11764f1b74SChuanhong Guo //
12764f1b74SChuanhong Guo // This controller organize the page data as several interleaved sectors
13764f1b74SChuanhong Guo // like the following: (sizeof(FDM + ECC) = snf->nfi_cfg.spare_size)
14764f1b74SChuanhong Guo // +---------+------+------+---------+------+------+-----+
15764f1b74SChuanhong Guo // | Sector1 | FDM1 | ECC1 | Sector2 | FDM2 | ECC2 | ... |
16764f1b74SChuanhong Guo // +---------+------+------+---------+------+------+-----+
17764f1b74SChuanhong Guo // With auto-format turned on, DMA only returns this part:
18764f1b74SChuanhong Guo // +---------+---------+-----+
19764f1b74SChuanhong Guo // | Sector1 | Sector2 | ... |
20764f1b74SChuanhong Guo // +---------+---------+-----+
21764f1b74SChuanhong Guo // The FDM data will be filled to the registers, and ECC parity data isn't
22764f1b74SChuanhong Guo // accessible.
23764f1b74SChuanhong Guo // With auto-format off, all ((Sector+FDM+ECC)*nsectors) will be read over DMA
24764f1b74SChuanhong Guo // in it's original order shown in the first table. ECC can't be turned on when
25764f1b74SChuanhong Guo // auto-format is off.
26764f1b74SChuanhong Guo //
27764f1b74SChuanhong Guo // However, Linux SPI-NAND driver expects the data returned as:
28764f1b74SChuanhong Guo // +------+-----+
29764f1b74SChuanhong Guo // | Page | OOB |
30764f1b74SChuanhong Guo // +------+-----+
31764f1b74SChuanhong Guo // where the page data is continuously stored instead of interleaved.
32764f1b74SChuanhong Guo // So we assume all instructions matching the page_op template between ECC
33764f1b74SChuanhong Guo // prepare_io_req and finish_io_req are for page cache r/w.
34764f1b74SChuanhong Guo // Here's how this spi-mem driver operates when reading:
35764f1b74SChuanhong Guo //  1. Always set snf->autofmt = true in prepare_io_req (even when ECC is off).
36764f1b74SChuanhong Guo //  2. Perform page ops and let the controller fill the DMA bounce buffer with
37764f1b74SChuanhong Guo //     de-interleaved sector data and set FDM registers.
38764f1b74SChuanhong Guo //  3. Return the data as:
39764f1b74SChuanhong Guo //     +---------+---------+-----+------+------+-----+
40764f1b74SChuanhong Guo //     | Sector1 | Sector2 | ... | FDM1 | FDM2 | ... |
41764f1b74SChuanhong Guo //     +---------+---------+-----+------+------+-----+
42764f1b74SChuanhong Guo //  4. For other matching spi_mem ops outside a prepare/finish_io_req pair,
43764f1b74SChuanhong Guo //     read the data with auto-format off into the bounce buffer and copy
44764f1b74SChuanhong Guo //     needed data to the buffer specified in the request.
45764f1b74SChuanhong Guo //
46764f1b74SChuanhong Guo // Write requests operates in a similar manner.
47764f1b74SChuanhong Guo // As a limitation of this strategy, we won't be able to access any ECC parity
48764f1b74SChuanhong Guo // data at all in Linux.
49764f1b74SChuanhong Guo //
50764f1b74SChuanhong Guo // Here's the bad block mark situation on MTK chips:
51764f1b74SChuanhong Guo // In older chips like mt7622, MTK uses the first FDM byte in the first sector
52764f1b74SChuanhong Guo // as the bad block mark. After de-interleaving, this byte appears at [pagesize]
53764f1b74SChuanhong Guo // in the returned data, which is the BBM position expected by kernel. However,
54764f1b74SChuanhong Guo // the conventional bad block mark is the first byte of the OOB, which is part
55764f1b74SChuanhong Guo // of the last sector data in the interleaved layout. Instead of fixing their
56764f1b74SChuanhong Guo // hardware, MTK decided to address this inconsistency in software. On these
57764f1b74SChuanhong Guo // later chips, the BootROM expects the following:
58764f1b74SChuanhong Guo // 1. The [pagesize] byte on a nand page is used as BBM, which will appear at
59764f1b74SChuanhong Guo //    (page_size - (nsectors - 1) * spare_size) in the DMA buffer.
60764f1b74SChuanhong Guo // 2. The original byte stored at that position in the DMA buffer will be stored
61764f1b74SChuanhong Guo //    as the first byte of the FDM section in the last sector.
62764f1b74SChuanhong Guo // We can't disagree with the BootROM, so after de-interleaving, we need to
63764f1b74SChuanhong Guo // perform the following swaps in read:
64764f1b74SChuanhong Guo // 1. Store the BBM at [page_size - (nsectors - 1) * spare_size] to [page_size],
65764f1b74SChuanhong Guo //    which is the expected BBM position by kernel.
66764f1b74SChuanhong Guo // 2. Store the page data byte at [pagesize + (nsectors-1) * fdm] back to
67764f1b74SChuanhong Guo //    [page_size - (nsectors - 1) * spare_size]
68764f1b74SChuanhong Guo // Similarly, when writing, we need to perform swaps in the other direction.
69764f1b74SChuanhong Guo 
70764f1b74SChuanhong Guo #include <linux/kernel.h>
71764f1b74SChuanhong Guo #include <linux/module.h>
72764f1b74SChuanhong Guo #include <linux/init.h>
73764f1b74SChuanhong Guo #include <linux/device.h>
74764f1b74SChuanhong Guo #include <linux/mutex.h>
75764f1b74SChuanhong Guo #include <linux/clk.h>
76764f1b74SChuanhong Guo #include <linux/interrupt.h>
77764f1b74SChuanhong Guo #include <linux/dma-mapping.h>
78764f1b74SChuanhong Guo #include <linux/iopoll.h>
79764f1b74SChuanhong Guo #include <linux/of_platform.h>
80764f1b74SChuanhong Guo #include <linux/mtd/nand-ecc-mtk.h>
81764f1b74SChuanhong Guo #include <linux/spi/spi.h>
82764f1b74SChuanhong Guo #include <linux/spi/spi-mem.h>
83764f1b74SChuanhong Guo #include <linux/mtd/nand.h>
84764f1b74SChuanhong Guo 
85764f1b74SChuanhong Guo // NFI registers
86764f1b74SChuanhong Guo #define NFI_CNFG 0x000
87764f1b74SChuanhong Guo #define CNFG_OP_MODE_S 12
88764f1b74SChuanhong Guo #define CNFG_OP_MODE_CUST 6
89764f1b74SChuanhong Guo #define CNFG_OP_MODE_PROGRAM 3
90764f1b74SChuanhong Guo #define CNFG_AUTO_FMT_EN BIT(9)
91764f1b74SChuanhong Guo #define CNFG_HW_ECC_EN BIT(8)
92764f1b74SChuanhong Guo #define CNFG_DMA_BURST_EN BIT(2)
93764f1b74SChuanhong Guo #define CNFG_READ_MODE BIT(1)
94764f1b74SChuanhong Guo #define CNFG_DMA_MODE BIT(0)
95764f1b74SChuanhong Guo 
96764f1b74SChuanhong Guo #define NFI_PAGEFMT 0x0004
97764f1b74SChuanhong Guo #define NFI_SPARE_SIZE_LS_S 16
98764f1b74SChuanhong Guo #define NFI_FDM_ECC_NUM_S 12
99764f1b74SChuanhong Guo #define NFI_FDM_NUM_S 8
100764f1b74SChuanhong Guo #define NFI_SPARE_SIZE_S 4
101764f1b74SChuanhong Guo #define NFI_SEC_SEL_512 BIT(2)
102764f1b74SChuanhong Guo #define NFI_PAGE_SIZE_S 0
103764f1b74SChuanhong Guo #define NFI_PAGE_SIZE_512_2K 0
104764f1b74SChuanhong Guo #define NFI_PAGE_SIZE_2K_4K 1
105764f1b74SChuanhong Guo #define NFI_PAGE_SIZE_4K_8K 2
106764f1b74SChuanhong Guo #define NFI_PAGE_SIZE_8K_16K 3
107764f1b74SChuanhong Guo 
108764f1b74SChuanhong Guo #define NFI_CON 0x008
109764f1b74SChuanhong Guo #define CON_SEC_NUM_S 12
110764f1b74SChuanhong Guo #define CON_BWR BIT(9)
111764f1b74SChuanhong Guo #define CON_BRD BIT(8)
112764f1b74SChuanhong Guo #define CON_NFI_RST BIT(1)
113764f1b74SChuanhong Guo #define CON_FIFO_FLUSH BIT(0)
114764f1b74SChuanhong Guo 
115764f1b74SChuanhong Guo #define NFI_INTR_EN 0x010
116764f1b74SChuanhong Guo #define NFI_INTR_STA 0x014
117764f1b74SChuanhong Guo #define NFI_IRQ_INTR_EN BIT(31)
118764f1b74SChuanhong Guo #define NFI_IRQ_CUS_READ BIT(8)
119764f1b74SChuanhong Guo #define NFI_IRQ_CUS_PG BIT(7)
120764f1b74SChuanhong Guo 
121764f1b74SChuanhong Guo #define NFI_CMD 0x020
122764f1b74SChuanhong Guo #define NFI_CMD_DUMMY_READ 0x00
123764f1b74SChuanhong Guo #define NFI_CMD_DUMMY_WRITE 0x80
124764f1b74SChuanhong Guo 
125764f1b74SChuanhong Guo #define NFI_STRDATA 0x040
126764f1b74SChuanhong Guo #define STR_DATA BIT(0)
127764f1b74SChuanhong Guo 
128764f1b74SChuanhong Guo #define NFI_STA 0x060
1297073888cSXiangsheng Hou #define NFI_NAND_FSM_7622 GENMASK(28, 24)
1307073888cSXiangsheng Hou #define NFI_NAND_FSM_7986 GENMASK(29, 23)
131764f1b74SChuanhong Guo #define NFI_FSM GENMASK(19, 16)
132764f1b74SChuanhong Guo #define READ_EMPTY BIT(12)
133764f1b74SChuanhong Guo 
134764f1b74SChuanhong Guo #define NFI_FIFOSTA 0x064
135764f1b74SChuanhong Guo #define FIFO_WR_REMAIN_S 8
136764f1b74SChuanhong Guo #define FIFO_RD_REMAIN_S 0
137764f1b74SChuanhong Guo 
138764f1b74SChuanhong Guo #define NFI_ADDRCNTR 0x070
139764f1b74SChuanhong Guo #define SEC_CNTR GENMASK(16, 12)
140764f1b74SChuanhong Guo #define SEC_CNTR_S 12
141764f1b74SChuanhong Guo #define NFI_SEC_CNTR(val) (((val)&SEC_CNTR) >> SEC_CNTR_S)
142764f1b74SChuanhong Guo 
143764f1b74SChuanhong Guo #define NFI_STRADDR 0x080
144764f1b74SChuanhong Guo 
145764f1b74SChuanhong Guo #define NFI_BYTELEN 0x084
146764f1b74SChuanhong Guo #define BUS_SEC_CNTR(val) (((val)&SEC_CNTR) >> SEC_CNTR_S)
147764f1b74SChuanhong Guo 
148764f1b74SChuanhong Guo #define NFI_FDM0L 0x0a0
149764f1b74SChuanhong Guo #define NFI_FDM0M 0x0a4
150764f1b74SChuanhong Guo #define NFI_FDML(n) (NFI_FDM0L + (n)*8)
151764f1b74SChuanhong Guo #define NFI_FDMM(n) (NFI_FDM0M + (n)*8)
152764f1b74SChuanhong Guo 
153764f1b74SChuanhong Guo #define NFI_DEBUG_CON1 0x220
154764f1b74SChuanhong Guo #define WBUF_EN BIT(2)
155764f1b74SChuanhong Guo 
156764f1b74SChuanhong Guo #define NFI_MASTERSTA 0x224
157764f1b74SChuanhong Guo #define MAS_ADDR GENMASK(11, 9)
158764f1b74SChuanhong Guo #define MAS_RD GENMASK(8, 6)
159764f1b74SChuanhong Guo #define MAS_WR GENMASK(5, 3)
160764f1b74SChuanhong Guo #define MAS_RDDLY GENMASK(2, 0)
161764f1b74SChuanhong Guo #define NFI_MASTERSTA_MASK_7622 (MAS_ADDR | MAS_RD | MAS_WR | MAS_RDDLY)
1627073888cSXiangsheng Hou #define NFI_MASTERSTA_MASK_7986 3
163764f1b74SChuanhong Guo 
164764f1b74SChuanhong Guo // SNFI registers
165764f1b74SChuanhong Guo #define SNF_MAC_CTL 0x500
166764f1b74SChuanhong Guo #define MAC_XIO_SEL BIT(4)
167764f1b74SChuanhong Guo #define SF_MAC_EN BIT(3)
168764f1b74SChuanhong Guo #define SF_TRIG BIT(2)
169764f1b74SChuanhong Guo #define WIP_READY BIT(1)
170764f1b74SChuanhong Guo #define WIP BIT(0)
171764f1b74SChuanhong Guo 
172764f1b74SChuanhong Guo #define SNF_MAC_OUTL 0x504
173764f1b74SChuanhong Guo #define SNF_MAC_INL 0x508
174764f1b74SChuanhong Guo 
175764f1b74SChuanhong Guo #define SNF_RD_CTL2 0x510
176764f1b74SChuanhong Guo #define DATA_READ_DUMMY_S 8
177764f1b74SChuanhong Guo #define DATA_READ_MAX_DUMMY 0xf
178764f1b74SChuanhong Guo #define DATA_READ_CMD_S 0
179764f1b74SChuanhong Guo 
180764f1b74SChuanhong Guo #define SNF_RD_CTL3 0x514
181764f1b74SChuanhong Guo 
182764f1b74SChuanhong Guo #define SNF_PG_CTL1 0x524
183764f1b74SChuanhong Guo #define PG_LOAD_CMD_S 8
184764f1b74SChuanhong Guo 
185764f1b74SChuanhong Guo #define SNF_PG_CTL2 0x528
186764f1b74SChuanhong Guo 
187764f1b74SChuanhong Guo #define SNF_MISC_CTL 0x538
188764f1b74SChuanhong Guo #define SW_RST BIT(28)
189764f1b74SChuanhong Guo #define FIFO_RD_LTC_S 25
190764f1b74SChuanhong Guo #define PG_LOAD_X4_EN BIT(20)
191764f1b74SChuanhong Guo #define DATA_READ_MODE_S 16
192764f1b74SChuanhong Guo #define DATA_READ_MODE GENMASK(18, 16)
193764f1b74SChuanhong Guo #define DATA_READ_MODE_X1 0
194764f1b74SChuanhong Guo #define DATA_READ_MODE_X2 1
195764f1b74SChuanhong Guo #define DATA_READ_MODE_X4 2
196764f1b74SChuanhong Guo #define DATA_READ_MODE_DUAL 5
197764f1b74SChuanhong Guo #define DATA_READ_MODE_QUAD 6
1981d36c990SXiangsheng Hou #define DATA_READ_LATCH_LAT GENMASK(9, 8)
1991d36c990SXiangsheng Hou #define DATA_READ_LATCH_LAT_S 8
200764f1b74SChuanhong Guo #define PG_LOAD_CUSTOM_EN BIT(7)
201764f1b74SChuanhong Guo #define DATARD_CUSTOM_EN BIT(6)
202764f1b74SChuanhong Guo #define CS_DESELECT_CYC_S 0
203764f1b74SChuanhong Guo 
204764f1b74SChuanhong Guo #define SNF_MISC_CTL2 0x53c
205764f1b74SChuanhong Guo #define PROGRAM_LOAD_BYTE_NUM_S 16
206764f1b74SChuanhong Guo #define READ_DATA_BYTE_NUM_S 11
207764f1b74SChuanhong Guo 
208764f1b74SChuanhong Guo #define SNF_DLY_CTL3 0x548
209764f1b74SChuanhong Guo #define SFCK_SAM_DLY_S 0
2101d36c990SXiangsheng Hou #define SFCK_SAM_DLY GENMASK(5, 0)
2111d36c990SXiangsheng Hou #define SFCK_SAM_DLY_TOTAL 9
2121d36c990SXiangsheng Hou #define SFCK_SAM_DLY_RANGE 47
213764f1b74SChuanhong Guo 
214764f1b74SChuanhong Guo #define SNF_STA_CTL1 0x550
215764f1b74SChuanhong Guo #define CUS_PG_DONE BIT(28)
216764f1b74SChuanhong Guo #define CUS_READ_DONE BIT(27)
217764f1b74SChuanhong Guo #define SPI_STATE_S 0
218764f1b74SChuanhong Guo #define SPI_STATE GENMASK(3, 0)
219764f1b74SChuanhong Guo 
220764f1b74SChuanhong Guo #define SNF_CFG 0x55c
221764f1b74SChuanhong Guo #define SPI_MODE BIT(0)
222764f1b74SChuanhong Guo 
223764f1b74SChuanhong Guo #define SNF_GPRAM 0x800
224764f1b74SChuanhong Guo #define SNF_GPRAM_SIZE 0xa0
225764f1b74SChuanhong Guo 
226764f1b74SChuanhong Guo #define SNFI_POLL_INTERVAL 1000000
227764f1b74SChuanhong Guo 
228764f1b74SChuanhong Guo static const u8 mt7622_spare_sizes[] = { 16, 26, 27, 28 };
229764f1b74SChuanhong Guo 
2307073888cSXiangsheng Hou static const u8 mt7986_spare_sizes[] = {
2317073888cSXiangsheng Hou 	16, 26, 27, 28, 32, 36, 40, 44, 48, 49, 50, 51, 52, 62, 61, 63, 64, 67,
2327073888cSXiangsheng Hou 	74
2337073888cSXiangsheng Hou };
2347073888cSXiangsheng Hou 
235764f1b74SChuanhong Guo struct mtk_snand_caps {
236764f1b74SChuanhong Guo 	u16 sector_size;
237764f1b74SChuanhong Guo 	u16 max_sectors;
238764f1b74SChuanhong Guo 	u16 fdm_size;
239764f1b74SChuanhong Guo 	u16 fdm_ecc_size;
240764f1b74SChuanhong Guo 	u16 fifo_size;
241764f1b74SChuanhong Guo 
242764f1b74SChuanhong Guo 	bool bbm_swap;
243764f1b74SChuanhong Guo 	bool empty_page_check;
244764f1b74SChuanhong Guo 	u32 mastersta_mask;
2457073888cSXiangsheng Hou 	u32 nandfsm_mask;
246764f1b74SChuanhong Guo 
247764f1b74SChuanhong Guo 	const u8 *spare_sizes;
248764f1b74SChuanhong Guo 	u32 num_spare_size;
249764f1b74SChuanhong Guo };
250764f1b74SChuanhong Guo 
251764f1b74SChuanhong Guo static const struct mtk_snand_caps mt7622_snand_caps = {
252764f1b74SChuanhong Guo 	.sector_size = 512,
253764f1b74SChuanhong Guo 	.max_sectors = 8,
254764f1b74SChuanhong Guo 	.fdm_size = 8,
255764f1b74SChuanhong Guo 	.fdm_ecc_size = 1,
256764f1b74SChuanhong Guo 	.fifo_size = 32,
257764f1b74SChuanhong Guo 	.bbm_swap = false,
258764f1b74SChuanhong Guo 	.empty_page_check = false,
259764f1b74SChuanhong Guo 	.mastersta_mask = NFI_MASTERSTA_MASK_7622,
2607073888cSXiangsheng Hou 	.nandfsm_mask = NFI_NAND_FSM_7622,
261764f1b74SChuanhong Guo 	.spare_sizes = mt7622_spare_sizes,
262764f1b74SChuanhong Guo 	.num_spare_size = ARRAY_SIZE(mt7622_spare_sizes)
263764f1b74SChuanhong Guo };
264764f1b74SChuanhong Guo 
265764f1b74SChuanhong Guo static const struct mtk_snand_caps mt7629_snand_caps = {
266764f1b74SChuanhong Guo 	.sector_size = 512,
267764f1b74SChuanhong Guo 	.max_sectors = 8,
268764f1b74SChuanhong Guo 	.fdm_size = 8,
269764f1b74SChuanhong Guo 	.fdm_ecc_size = 1,
270764f1b74SChuanhong Guo 	.fifo_size = 32,
271764f1b74SChuanhong Guo 	.bbm_swap = true,
272764f1b74SChuanhong Guo 	.empty_page_check = false,
273764f1b74SChuanhong Guo 	.mastersta_mask = NFI_MASTERSTA_MASK_7622,
2747073888cSXiangsheng Hou 	.nandfsm_mask = NFI_NAND_FSM_7622,
275764f1b74SChuanhong Guo 	.spare_sizes = mt7622_spare_sizes,
276764f1b74SChuanhong Guo 	.num_spare_size = ARRAY_SIZE(mt7622_spare_sizes)
277764f1b74SChuanhong Guo };
278764f1b74SChuanhong Guo 
2797073888cSXiangsheng Hou static const struct mtk_snand_caps mt7986_snand_caps = {
2807073888cSXiangsheng Hou 	.sector_size = 1024,
2817073888cSXiangsheng Hou 	.max_sectors = 8,
2827073888cSXiangsheng Hou 	.fdm_size = 8,
2837073888cSXiangsheng Hou 	.fdm_ecc_size = 1,
2847073888cSXiangsheng Hou 	.fifo_size = 64,
2857073888cSXiangsheng Hou 	.bbm_swap = true,
2867073888cSXiangsheng Hou 	.empty_page_check = true,
2877073888cSXiangsheng Hou 	.mastersta_mask = NFI_MASTERSTA_MASK_7986,
2887073888cSXiangsheng Hou 	.nandfsm_mask = NFI_NAND_FSM_7986,
2897073888cSXiangsheng Hou 	.spare_sizes = mt7986_spare_sizes,
2907073888cSXiangsheng Hou 	.num_spare_size = ARRAY_SIZE(mt7986_spare_sizes)
2917073888cSXiangsheng Hou };
2927073888cSXiangsheng Hou 
293764f1b74SChuanhong Guo struct mtk_snand_conf {
294764f1b74SChuanhong Guo 	size_t page_size;
295764f1b74SChuanhong Guo 	size_t oob_size;
296764f1b74SChuanhong Guo 	u8 nsectors;
297764f1b74SChuanhong Guo 	u8 spare_size;
298764f1b74SChuanhong Guo };
299764f1b74SChuanhong Guo 
300764f1b74SChuanhong Guo struct mtk_snand {
301764f1b74SChuanhong Guo 	struct spi_controller *ctlr;
302764f1b74SChuanhong Guo 	struct device *dev;
303764f1b74SChuanhong Guo 	struct clk *nfi_clk;
304764f1b74SChuanhong Guo 	struct clk *pad_clk;
305e40fa328SXiangsheng Hou 	struct clk *nfi_hclk;
306764f1b74SChuanhong Guo 	void __iomem *nfi_base;
307764f1b74SChuanhong Guo 	int irq;
308764f1b74SChuanhong Guo 	struct completion op_done;
309764f1b74SChuanhong Guo 	const struct mtk_snand_caps *caps;
310764f1b74SChuanhong Guo 	struct mtk_ecc_config *ecc_cfg;
311764f1b74SChuanhong Guo 	struct mtk_ecc *ecc;
312764f1b74SChuanhong Guo 	struct mtk_snand_conf nfi_cfg;
313764f1b74SChuanhong Guo 	struct mtk_ecc_stats ecc_stats;
314764f1b74SChuanhong Guo 	struct nand_ecc_engine ecc_eng;
315764f1b74SChuanhong Guo 	bool autofmt;
316764f1b74SChuanhong Guo 	u8 *buf;
317764f1b74SChuanhong Guo 	size_t buf_len;
318764f1b74SChuanhong Guo };
319764f1b74SChuanhong Guo 
320764f1b74SChuanhong Guo static struct mtk_snand *nand_to_mtk_snand(struct nand_device *nand)
321764f1b74SChuanhong Guo {
322764f1b74SChuanhong Guo 	struct nand_ecc_engine *eng = nand->ecc.engine;
323764f1b74SChuanhong Guo 
324764f1b74SChuanhong Guo 	return container_of(eng, struct mtk_snand, ecc_eng);
325764f1b74SChuanhong Guo }
326764f1b74SChuanhong Guo 
327764f1b74SChuanhong Guo static inline int snand_prepare_bouncebuf(struct mtk_snand *snf, size_t size)
328764f1b74SChuanhong Guo {
329764f1b74SChuanhong Guo 	if (snf->buf_len >= size)
330764f1b74SChuanhong Guo 		return 0;
331764f1b74SChuanhong Guo 	kfree(snf->buf);
332764f1b74SChuanhong Guo 	snf->buf = kmalloc(size, GFP_KERNEL);
333764f1b74SChuanhong Guo 	if (!snf->buf)
334764f1b74SChuanhong Guo 		return -ENOMEM;
335764f1b74SChuanhong Guo 	snf->buf_len = size;
336764f1b74SChuanhong Guo 	memset(snf->buf, 0xff, snf->buf_len);
337764f1b74SChuanhong Guo 	return 0;
338764f1b74SChuanhong Guo }
339764f1b74SChuanhong Guo 
340764f1b74SChuanhong Guo static inline u32 nfi_read32(struct mtk_snand *snf, u32 reg)
341764f1b74SChuanhong Guo {
342764f1b74SChuanhong Guo 	return readl(snf->nfi_base + reg);
343764f1b74SChuanhong Guo }
344764f1b74SChuanhong Guo 
345764f1b74SChuanhong Guo static inline void nfi_write32(struct mtk_snand *snf, u32 reg, u32 val)
346764f1b74SChuanhong Guo {
347764f1b74SChuanhong Guo 	writel(val, snf->nfi_base + reg);
348764f1b74SChuanhong Guo }
349764f1b74SChuanhong Guo 
350764f1b74SChuanhong Guo static inline void nfi_write16(struct mtk_snand *snf, u32 reg, u16 val)
351764f1b74SChuanhong Guo {
352764f1b74SChuanhong Guo 	writew(val, snf->nfi_base + reg);
353764f1b74SChuanhong Guo }
354764f1b74SChuanhong Guo 
355764f1b74SChuanhong Guo static inline void nfi_rmw32(struct mtk_snand *snf, u32 reg, u32 clr, u32 set)
356764f1b74SChuanhong Guo {
357764f1b74SChuanhong Guo 	u32 val;
358764f1b74SChuanhong Guo 
359764f1b74SChuanhong Guo 	val = readl(snf->nfi_base + reg);
360764f1b74SChuanhong Guo 	val &= ~clr;
361764f1b74SChuanhong Guo 	val |= set;
362764f1b74SChuanhong Guo 	writel(val, snf->nfi_base + reg);
363764f1b74SChuanhong Guo }
364764f1b74SChuanhong Guo 
365764f1b74SChuanhong Guo static void nfi_read_data(struct mtk_snand *snf, u32 reg, u8 *data, u32 len)
366764f1b74SChuanhong Guo {
367764f1b74SChuanhong Guo 	u32 i, val = 0, es = sizeof(u32);
368764f1b74SChuanhong Guo 
369764f1b74SChuanhong Guo 	for (i = reg; i < reg + len; i++) {
370764f1b74SChuanhong Guo 		if (i == reg || i % es == 0)
371764f1b74SChuanhong Guo 			val = nfi_read32(snf, i & ~(es - 1));
372764f1b74SChuanhong Guo 
373764f1b74SChuanhong Guo 		*data++ = (u8)(val >> (8 * (i % es)));
374764f1b74SChuanhong Guo 	}
375764f1b74SChuanhong Guo }
376764f1b74SChuanhong Guo 
377764f1b74SChuanhong Guo static int mtk_nfi_reset(struct mtk_snand *snf)
378764f1b74SChuanhong Guo {
379764f1b74SChuanhong Guo 	u32 val, fifo_mask;
380764f1b74SChuanhong Guo 	int ret;
381764f1b74SChuanhong Guo 
382764f1b74SChuanhong Guo 	nfi_write32(snf, NFI_CON, CON_FIFO_FLUSH | CON_NFI_RST);
383764f1b74SChuanhong Guo 
384764f1b74SChuanhong Guo 	ret = readw_poll_timeout(snf->nfi_base + NFI_MASTERSTA, val,
385764f1b74SChuanhong Guo 				 !(val & snf->caps->mastersta_mask), 0,
386764f1b74SChuanhong Guo 				 SNFI_POLL_INTERVAL);
387764f1b74SChuanhong Guo 	if (ret) {
388764f1b74SChuanhong Guo 		dev_err(snf->dev, "NFI master is still busy after reset\n");
389764f1b74SChuanhong Guo 		return ret;
390764f1b74SChuanhong Guo 	}
391764f1b74SChuanhong Guo 
392764f1b74SChuanhong Guo 	ret = readl_poll_timeout(snf->nfi_base + NFI_STA, val,
3937073888cSXiangsheng Hou 				 !(val & (NFI_FSM | snf->caps->nandfsm_mask)), 0,
394764f1b74SChuanhong Guo 				 SNFI_POLL_INTERVAL);
395764f1b74SChuanhong Guo 	if (ret) {
396764f1b74SChuanhong Guo 		dev_err(snf->dev, "Failed to reset NFI\n");
397764f1b74SChuanhong Guo 		return ret;
398764f1b74SChuanhong Guo 	}
399764f1b74SChuanhong Guo 
400764f1b74SChuanhong Guo 	fifo_mask = ((snf->caps->fifo_size - 1) << FIFO_RD_REMAIN_S) |
401764f1b74SChuanhong Guo 		    ((snf->caps->fifo_size - 1) << FIFO_WR_REMAIN_S);
402764f1b74SChuanhong Guo 	ret = readw_poll_timeout(snf->nfi_base + NFI_FIFOSTA, val,
403764f1b74SChuanhong Guo 				 !(val & fifo_mask), 0, SNFI_POLL_INTERVAL);
404764f1b74SChuanhong Guo 	if (ret) {
405764f1b74SChuanhong Guo 		dev_err(snf->dev, "NFI FIFOs are not empty\n");
406764f1b74SChuanhong Guo 		return ret;
407764f1b74SChuanhong Guo 	}
408764f1b74SChuanhong Guo 
409764f1b74SChuanhong Guo 	return 0;
410764f1b74SChuanhong Guo }
411764f1b74SChuanhong Guo 
412764f1b74SChuanhong Guo static int mtk_snand_mac_reset(struct mtk_snand *snf)
413764f1b74SChuanhong Guo {
414764f1b74SChuanhong Guo 	int ret;
415764f1b74SChuanhong Guo 	u32 val;
416764f1b74SChuanhong Guo 
417764f1b74SChuanhong Guo 	nfi_rmw32(snf, SNF_MISC_CTL, 0, SW_RST);
418764f1b74SChuanhong Guo 
419764f1b74SChuanhong Guo 	ret = readl_poll_timeout(snf->nfi_base + SNF_STA_CTL1, val,
420764f1b74SChuanhong Guo 				 !(val & SPI_STATE), 0, SNFI_POLL_INTERVAL);
421764f1b74SChuanhong Guo 	if (ret)
422764f1b74SChuanhong Guo 		dev_err(snf->dev, "Failed to reset SNFI MAC\n");
423764f1b74SChuanhong Guo 
424764f1b74SChuanhong Guo 	nfi_write32(snf, SNF_MISC_CTL,
425764f1b74SChuanhong Guo 		    (2 << FIFO_RD_LTC_S) | (10 << CS_DESELECT_CYC_S));
426764f1b74SChuanhong Guo 
427764f1b74SChuanhong Guo 	return ret;
428764f1b74SChuanhong Guo }
429764f1b74SChuanhong Guo 
430764f1b74SChuanhong Guo static int mtk_snand_mac_trigger(struct mtk_snand *snf, u32 outlen, u32 inlen)
431764f1b74SChuanhong Guo {
432764f1b74SChuanhong Guo 	int ret;
433764f1b74SChuanhong Guo 	u32 val;
434764f1b74SChuanhong Guo 
435764f1b74SChuanhong Guo 	nfi_write32(snf, SNF_MAC_CTL, SF_MAC_EN);
436764f1b74SChuanhong Guo 	nfi_write32(snf, SNF_MAC_OUTL, outlen);
437764f1b74SChuanhong Guo 	nfi_write32(snf, SNF_MAC_INL, inlen);
438764f1b74SChuanhong Guo 
439764f1b74SChuanhong Guo 	nfi_write32(snf, SNF_MAC_CTL, SF_MAC_EN | SF_TRIG);
440764f1b74SChuanhong Guo 
441764f1b74SChuanhong Guo 	ret = readl_poll_timeout(snf->nfi_base + SNF_MAC_CTL, val,
442764f1b74SChuanhong Guo 				 val & WIP_READY, 0, SNFI_POLL_INTERVAL);
443764f1b74SChuanhong Guo 	if (ret) {
444764f1b74SChuanhong Guo 		dev_err(snf->dev, "Timed out waiting for WIP_READY\n");
445764f1b74SChuanhong Guo 		goto cleanup;
446764f1b74SChuanhong Guo 	}
447764f1b74SChuanhong Guo 
448764f1b74SChuanhong Guo 	ret = readl_poll_timeout(snf->nfi_base + SNF_MAC_CTL, val, !(val & WIP),
449764f1b74SChuanhong Guo 				 0, SNFI_POLL_INTERVAL);
450764f1b74SChuanhong Guo 	if (ret)
451764f1b74SChuanhong Guo 		dev_err(snf->dev, "Timed out waiting for WIP cleared\n");
452764f1b74SChuanhong Guo 
453764f1b74SChuanhong Guo cleanup:
454764f1b74SChuanhong Guo 	nfi_write32(snf, SNF_MAC_CTL, 0);
455764f1b74SChuanhong Guo 
456764f1b74SChuanhong Guo 	return ret;
457764f1b74SChuanhong Guo }
458764f1b74SChuanhong Guo 
459764f1b74SChuanhong Guo static int mtk_snand_mac_io(struct mtk_snand *snf, const struct spi_mem_op *op)
460764f1b74SChuanhong Guo {
461764f1b74SChuanhong Guo 	u32 rx_len = 0;
462764f1b74SChuanhong Guo 	u32 reg_offs = 0;
463764f1b74SChuanhong Guo 	u32 val = 0;
464764f1b74SChuanhong Guo 	const u8 *tx_buf = NULL;
465764f1b74SChuanhong Guo 	u8 *rx_buf = NULL;
466764f1b74SChuanhong Guo 	int i, ret;
467764f1b74SChuanhong Guo 	u8 b;
468764f1b74SChuanhong Guo 
469764f1b74SChuanhong Guo 	if (op->data.dir == SPI_MEM_DATA_IN) {
470764f1b74SChuanhong Guo 		rx_len = op->data.nbytes;
471764f1b74SChuanhong Guo 		rx_buf = op->data.buf.in;
472764f1b74SChuanhong Guo 	} else {
473764f1b74SChuanhong Guo 		tx_buf = op->data.buf.out;
474764f1b74SChuanhong Guo 	}
475764f1b74SChuanhong Guo 
476764f1b74SChuanhong Guo 	mtk_snand_mac_reset(snf);
477764f1b74SChuanhong Guo 
478764f1b74SChuanhong Guo 	for (i = 0; i < op->cmd.nbytes; i++, reg_offs++) {
479764f1b74SChuanhong Guo 		b = (op->cmd.opcode >> ((op->cmd.nbytes - i - 1) * 8)) & 0xff;
480764f1b74SChuanhong Guo 		val |= b << (8 * (reg_offs % 4));
481764f1b74SChuanhong Guo 		if (reg_offs % 4 == 3) {
482764f1b74SChuanhong Guo 			nfi_write32(snf, SNF_GPRAM + reg_offs - 3, val);
483764f1b74SChuanhong Guo 			val = 0;
484764f1b74SChuanhong Guo 		}
485764f1b74SChuanhong Guo 	}
486764f1b74SChuanhong Guo 
487764f1b74SChuanhong Guo 	for (i = 0; i < op->addr.nbytes; i++, reg_offs++) {
488764f1b74SChuanhong Guo 		b = (op->addr.val >> ((op->addr.nbytes - i - 1) * 8)) & 0xff;
489764f1b74SChuanhong Guo 		val |= b << (8 * (reg_offs % 4));
490764f1b74SChuanhong Guo 		if (reg_offs % 4 == 3) {
491764f1b74SChuanhong Guo 			nfi_write32(snf, SNF_GPRAM + reg_offs - 3, val);
492764f1b74SChuanhong Guo 			val = 0;
493764f1b74SChuanhong Guo 		}
494764f1b74SChuanhong Guo 	}
495764f1b74SChuanhong Guo 
496764f1b74SChuanhong Guo 	for (i = 0; i < op->dummy.nbytes; i++, reg_offs++) {
497764f1b74SChuanhong Guo 		if (reg_offs % 4 == 3) {
498764f1b74SChuanhong Guo 			nfi_write32(snf, SNF_GPRAM + reg_offs - 3, val);
499764f1b74SChuanhong Guo 			val = 0;
500764f1b74SChuanhong Guo 		}
501764f1b74SChuanhong Guo 	}
502764f1b74SChuanhong Guo 
503764f1b74SChuanhong Guo 	if (op->data.dir == SPI_MEM_DATA_OUT) {
504764f1b74SChuanhong Guo 		for (i = 0; i < op->data.nbytes; i++, reg_offs++) {
505764f1b74SChuanhong Guo 			val |= tx_buf[i] << (8 * (reg_offs % 4));
506764f1b74SChuanhong Guo 			if (reg_offs % 4 == 3) {
507764f1b74SChuanhong Guo 				nfi_write32(snf, SNF_GPRAM + reg_offs - 3, val);
508764f1b74SChuanhong Guo 				val = 0;
509764f1b74SChuanhong Guo 			}
510764f1b74SChuanhong Guo 		}
511764f1b74SChuanhong Guo 	}
512764f1b74SChuanhong Guo 
513764f1b74SChuanhong Guo 	if (reg_offs % 4)
514764f1b74SChuanhong Guo 		nfi_write32(snf, SNF_GPRAM + (reg_offs & ~3), val);
515764f1b74SChuanhong Guo 
516764f1b74SChuanhong Guo 	for (i = 0; i < reg_offs; i += 4)
517764f1b74SChuanhong Guo 		dev_dbg(snf->dev, "%d: %08X", i,
518764f1b74SChuanhong Guo 			nfi_read32(snf, SNF_GPRAM + i));
519764f1b74SChuanhong Guo 
520764f1b74SChuanhong Guo 	dev_dbg(snf->dev, "SNF TX: %u RX: %u", reg_offs, rx_len);
521764f1b74SChuanhong Guo 
522764f1b74SChuanhong Guo 	ret = mtk_snand_mac_trigger(snf, reg_offs, rx_len);
523764f1b74SChuanhong Guo 	if (ret)
524764f1b74SChuanhong Guo 		return ret;
525764f1b74SChuanhong Guo 
526764f1b74SChuanhong Guo 	if (!rx_len)
527764f1b74SChuanhong Guo 		return 0;
528764f1b74SChuanhong Guo 
529764f1b74SChuanhong Guo 	nfi_read_data(snf, SNF_GPRAM + reg_offs, rx_buf, rx_len);
530764f1b74SChuanhong Guo 	return 0;
531764f1b74SChuanhong Guo }
532764f1b74SChuanhong Guo 
533764f1b74SChuanhong Guo static int mtk_snand_setup_pagefmt(struct mtk_snand *snf, u32 page_size,
534764f1b74SChuanhong Guo 				   u32 oob_size)
535764f1b74SChuanhong Guo {
536764f1b74SChuanhong Guo 	int spare_idx = -1;
537764f1b74SChuanhong Guo 	u32 spare_size, spare_size_shift, pagesize_idx;
538764f1b74SChuanhong Guo 	u32 sector_size_512;
539764f1b74SChuanhong Guo 	u8 nsectors;
540764f1b74SChuanhong Guo 	int i;
541764f1b74SChuanhong Guo 
542764f1b74SChuanhong Guo 	// skip if it's already configured as required.
543764f1b74SChuanhong Guo 	if (snf->nfi_cfg.page_size == page_size &&
544764f1b74SChuanhong Guo 	    snf->nfi_cfg.oob_size == oob_size)
545764f1b74SChuanhong Guo 		return 0;
546764f1b74SChuanhong Guo 
547764f1b74SChuanhong Guo 	nsectors = page_size / snf->caps->sector_size;
548764f1b74SChuanhong Guo 	if (nsectors > snf->caps->max_sectors) {
549764f1b74SChuanhong Guo 		dev_err(snf->dev, "too many sectors required.\n");
550764f1b74SChuanhong Guo 		goto err;
551764f1b74SChuanhong Guo 	}
552764f1b74SChuanhong Guo 
553764f1b74SChuanhong Guo 	if (snf->caps->sector_size == 512) {
554764f1b74SChuanhong Guo 		sector_size_512 = NFI_SEC_SEL_512;
555764f1b74SChuanhong Guo 		spare_size_shift = NFI_SPARE_SIZE_S;
556764f1b74SChuanhong Guo 	} else {
557764f1b74SChuanhong Guo 		sector_size_512 = 0;
558764f1b74SChuanhong Guo 		spare_size_shift = NFI_SPARE_SIZE_LS_S;
559764f1b74SChuanhong Guo 	}
560764f1b74SChuanhong Guo 
561764f1b74SChuanhong Guo 	switch (page_size) {
562764f1b74SChuanhong Guo 	case SZ_512:
563764f1b74SChuanhong Guo 		pagesize_idx = NFI_PAGE_SIZE_512_2K;
564764f1b74SChuanhong Guo 		break;
565764f1b74SChuanhong Guo 	case SZ_2K:
566764f1b74SChuanhong Guo 		if (snf->caps->sector_size == 512)
567764f1b74SChuanhong Guo 			pagesize_idx = NFI_PAGE_SIZE_2K_4K;
568764f1b74SChuanhong Guo 		else
569764f1b74SChuanhong Guo 			pagesize_idx = NFI_PAGE_SIZE_512_2K;
570764f1b74SChuanhong Guo 		break;
571764f1b74SChuanhong Guo 	case SZ_4K:
572764f1b74SChuanhong Guo 		if (snf->caps->sector_size == 512)
573764f1b74SChuanhong Guo 			pagesize_idx = NFI_PAGE_SIZE_4K_8K;
574764f1b74SChuanhong Guo 		else
575764f1b74SChuanhong Guo 			pagesize_idx = NFI_PAGE_SIZE_2K_4K;
576764f1b74SChuanhong Guo 		break;
577764f1b74SChuanhong Guo 	case SZ_8K:
578764f1b74SChuanhong Guo 		if (snf->caps->sector_size == 512)
579764f1b74SChuanhong Guo 			pagesize_idx = NFI_PAGE_SIZE_8K_16K;
580764f1b74SChuanhong Guo 		else
581764f1b74SChuanhong Guo 			pagesize_idx = NFI_PAGE_SIZE_4K_8K;
582764f1b74SChuanhong Guo 		break;
583764f1b74SChuanhong Guo 	case SZ_16K:
584764f1b74SChuanhong Guo 		pagesize_idx = NFI_PAGE_SIZE_8K_16K;
585764f1b74SChuanhong Guo 		break;
586764f1b74SChuanhong Guo 	default:
587764f1b74SChuanhong Guo 		dev_err(snf->dev, "unsupported page size.\n");
588764f1b74SChuanhong Guo 		goto err;
589764f1b74SChuanhong Guo 	}
590764f1b74SChuanhong Guo 
591764f1b74SChuanhong Guo 	spare_size = oob_size / nsectors;
592764f1b74SChuanhong Guo 	// If we're using the 1KB sector size, HW will automatically double the
593764f1b74SChuanhong Guo 	// spare size. We should only use half of the value in this case.
594764f1b74SChuanhong Guo 	if (snf->caps->sector_size == 1024)
595764f1b74SChuanhong Guo 		spare_size /= 2;
596764f1b74SChuanhong Guo 
597764f1b74SChuanhong Guo 	for (i = snf->caps->num_spare_size - 1; i >= 0; i--) {
598764f1b74SChuanhong Guo 		if (snf->caps->spare_sizes[i] <= spare_size) {
599764f1b74SChuanhong Guo 			spare_size = snf->caps->spare_sizes[i];
600764f1b74SChuanhong Guo 			if (snf->caps->sector_size == 1024)
601764f1b74SChuanhong Guo 				spare_size *= 2;
602764f1b74SChuanhong Guo 			spare_idx = i;
603764f1b74SChuanhong Guo 			break;
604764f1b74SChuanhong Guo 		}
605764f1b74SChuanhong Guo 	}
606764f1b74SChuanhong Guo 
607764f1b74SChuanhong Guo 	if (spare_idx < 0) {
608764f1b74SChuanhong Guo 		dev_err(snf->dev, "unsupported spare size: %u\n", spare_size);
609764f1b74SChuanhong Guo 		goto err;
610764f1b74SChuanhong Guo 	}
611764f1b74SChuanhong Guo 
612764f1b74SChuanhong Guo 	nfi_write32(snf, NFI_PAGEFMT,
613764f1b74SChuanhong Guo 		    (snf->caps->fdm_ecc_size << NFI_FDM_ECC_NUM_S) |
614764f1b74SChuanhong Guo 			    (snf->caps->fdm_size << NFI_FDM_NUM_S) |
615764f1b74SChuanhong Guo 			    (spare_idx << spare_size_shift) |
616764f1b74SChuanhong Guo 			    (pagesize_idx << NFI_PAGE_SIZE_S) |
617764f1b74SChuanhong Guo 			    sector_size_512);
618764f1b74SChuanhong Guo 
619764f1b74SChuanhong Guo 	snf->nfi_cfg.page_size = page_size;
620764f1b74SChuanhong Guo 	snf->nfi_cfg.oob_size = oob_size;
621764f1b74SChuanhong Guo 	snf->nfi_cfg.nsectors = nsectors;
622764f1b74SChuanhong Guo 	snf->nfi_cfg.spare_size = spare_size;
623764f1b74SChuanhong Guo 
624764f1b74SChuanhong Guo 	dev_dbg(snf->dev, "page format: (%u + %u) * %u\n",
625764f1b74SChuanhong Guo 		snf->caps->sector_size, spare_size, nsectors);
626764f1b74SChuanhong Guo 	return snand_prepare_bouncebuf(snf, page_size + oob_size);
627764f1b74SChuanhong Guo err:
628764f1b74SChuanhong Guo 	dev_err(snf->dev, "page size %u + %u is not supported\n", page_size,
629764f1b74SChuanhong Guo 		oob_size);
630764f1b74SChuanhong Guo 	return -EOPNOTSUPP;
631764f1b74SChuanhong Guo }
632764f1b74SChuanhong Guo 
633764f1b74SChuanhong Guo static int mtk_snand_ooblayout_ecc(struct mtd_info *mtd, int section,
634764f1b74SChuanhong Guo 				   struct mtd_oob_region *oobecc)
635764f1b74SChuanhong Guo {
636764f1b74SChuanhong Guo 	// ECC area is not accessible
637764f1b74SChuanhong Guo 	return -ERANGE;
638764f1b74SChuanhong Guo }
639764f1b74SChuanhong Guo 
640764f1b74SChuanhong Guo static int mtk_snand_ooblayout_free(struct mtd_info *mtd, int section,
641764f1b74SChuanhong Guo 				    struct mtd_oob_region *oobfree)
642764f1b74SChuanhong Guo {
643764f1b74SChuanhong Guo 	struct nand_device *nand = mtd_to_nanddev(mtd);
644764f1b74SChuanhong Guo 	struct mtk_snand *ms = nand_to_mtk_snand(nand);
645764f1b74SChuanhong Guo 
646764f1b74SChuanhong Guo 	if (section >= ms->nfi_cfg.nsectors)
647764f1b74SChuanhong Guo 		return -ERANGE;
648764f1b74SChuanhong Guo 
649764f1b74SChuanhong Guo 	oobfree->length = ms->caps->fdm_size - 1;
650764f1b74SChuanhong Guo 	oobfree->offset = section * ms->caps->fdm_size + 1;
651764f1b74SChuanhong Guo 	return 0;
652764f1b74SChuanhong Guo }
653764f1b74SChuanhong Guo 
654764f1b74SChuanhong Guo static const struct mtd_ooblayout_ops mtk_snand_ooblayout = {
655764f1b74SChuanhong Guo 	.ecc = mtk_snand_ooblayout_ecc,
656764f1b74SChuanhong Guo 	.free = mtk_snand_ooblayout_free,
657764f1b74SChuanhong Guo };
658764f1b74SChuanhong Guo 
659764f1b74SChuanhong Guo static int mtk_snand_ecc_init_ctx(struct nand_device *nand)
660764f1b74SChuanhong Guo {
661764f1b74SChuanhong Guo 	struct mtk_snand *snf = nand_to_mtk_snand(nand);
662764f1b74SChuanhong Guo 	struct nand_ecc_props *conf = &nand->ecc.ctx.conf;
663764f1b74SChuanhong Guo 	struct nand_ecc_props *reqs = &nand->ecc.requirements;
664764f1b74SChuanhong Guo 	struct nand_ecc_props *user = &nand->ecc.user_conf;
665764f1b74SChuanhong Guo 	struct mtd_info *mtd = nanddev_to_mtd(nand);
666764f1b74SChuanhong Guo 	int step_size = 0, strength = 0, desired_correction = 0, steps;
667764f1b74SChuanhong Guo 	bool ecc_user = false;
668764f1b74SChuanhong Guo 	int ret;
669764f1b74SChuanhong Guo 	u32 parity_bits, max_ecc_bytes;
670764f1b74SChuanhong Guo 	struct mtk_ecc_config *ecc_cfg;
671764f1b74SChuanhong Guo 
672764f1b74SChuanhong Guo 	ret = mtk_snand_setup_pagefmt(snf, nand->memorg.pagesize,
673764f1b74SChuanhong Guo 				      nand->memorg.oobsize);
674764f1b74SChuanhong Guo 	if (ret)
675764f1b74SChuanhong Guo 		return ret;
676764f1b74SChuanhong Guo 
677764f1b74SChuanhong Guo 	ecc_cfg = kzalloc(sizeof(*ecc_cfg), GFP_KERNEL);
678764f1b74SChuanhong Guo 	if (!ecc_cfg)
679764f1b74SChuanhong Guo 		return -ENOMEM;
680764f1b74SChuanhong Guo 
681764f1b74SChuanhong Guo 	nand->ecc.ctx.priv = ecc_cfg;
682764f1b74SChuanhong Guo 
683764f1b74SChuanhong Guo 	if (user->step_size && user->strength) {
684764f1b74SChuanhong Guo 		step_size = user->step_size;
685764f1b74SChuanhong Guo 		strength = user->strength;
686764f1b74SChuanhong Guo 		ecc_user = true;
687764f1b74SChuanhong Guo 	} else if (reqs->step_size && reqs->strength) {
688764f1b74SChuanhong Guo 		step_size = reqs->step_size;
689764f1b74SChuanhong Guo 		strength = reqs->strength;
690764f1b74SChuanhong Guo 	}
691764f1b74SChuanhong Guo 
692764f1b74SChuanhong Guo 	if (step_size && strength) {
693764f1b74SChuanhong Guo 		steps = mtd->writesize / step_size;
694764f1b74SChuanhong Guo 		desired_correction = steps * strength;
695764f1b74SChuanhong Guo 		strength = desired_correction / snf->nfi_cfg.nsectors;
696764f1b74SChuanhong Guo 	}
697764f1b74SChuanhong Guo 
698764f1b74SChuanhong Guo 	ecc_cfg->mode = ECC_NFI_MODE;
699764f1b74SChuanhong Guo 	ecc_cfg->sectors = snf->nfi_cfg.nsectors;
700764f1b74SChuanhong Guo 	ecc_cfg->len = snf->caps->sector_size + snf->caps->fdm_ecc_size;
701764f1b74SChuanhong Guo 
702764f1b74SChuanhong Guo 	// calculate the max possible strength under current page format
703764f1b74SChuanhong Guo 	parity_bits = mtk_ecc_get_parity_bits(snf->ecc);
704764f1b74SChuanhong Guo 	max_ecc_bytes = snf->nfi_cfg.spare_size - snf->caps->fdm_size;
705764f1b74SChuanhong Guo 	ecc_cfg->strength = max_ecc_bytes * 8 / parity_bits;
706764f1b74SChuanhong Guo 	mtk_ecc_adjust_strength(snf->ecc, &ecc_cfg->strength);
707764f1b74SChuanhong Guo 
708764f1b74SChuanhong Guo 	// if there's a user requested strength, find the minimum strength that
709764f1b74SChuanhong Guo 	// meets the requirement. Otherwise use the maximum strength which is
710764f1b74SChuanhong Guo 	// expected by BootROM.
711764f1b74SChuanhong Guo 	if (ecc_user && strength) {
712764f1b74SChuanhong Guo 		u32 s_next = ecc_cfg->strength - 1;
713764f1b74SChuanhong Guo 
714764f1b74SChuanhong Guo 		while (1) {
715764f1b74SChuanhong Guo 			mtk_ecc_adjust_strength(snf->ecc, &s_next);
716764f1b74SChuanhong Guo 			if (s_next >= ecc_cfg->strength)
717764f1b74SChuanhong Guo 				break;
718764f1b74SChuanhong Guo 			if (s_next < strength)
719764f1b74SChuanhong Guo 				break;
720764f1b74SChuanhong Guo 			s_next = ecc_cfg->strength - 1;
721764f1b74SChuanhong Guo 		}
722764f1b74SChuanhong Guo 	}
723764f1b74SChuanhong Guo 
724764f1b74SChuanhong Guo 	mtd_set_ooblayout(mtd, &mtk_snand_ooblayout);
725764f1b74SChuanhong Guo 
726764f1b74SChuanhong Guo 	conf->step_size = snf->caps->sector_size;
727764f1b74SChuanhong Guo 	conf->strength = ecc_cfg->strength;
728764f1b74SChuanhong Guo 
729764f1b74SChuanhong Guo 	if (ecc_cfg->strength < strength)
730764f1b74SChuanhong Guo 		dev_warn(snf->dev, "unable to fulfill ECC of %u bits.\n",
731764f1b74SChuanhong Guo 			 strength);
732764f1b74SChuanhong Guo 	dev_info(snf->dev, "ECC strength: %u bits per %u bytes\n",
733764f1b74SChuanhong Guo 		 ecc_cfg->strength, snf->caps->sector_size);
734764f1b74SChuanhong Guo 
735764f1b74SChuanhong Guo 	return 0;
736764f1b74SChuanhong Guo }
737764f1b74SChuanhong Guo 
738764f1b74SChuanhong Guo static void mtk_snand_ecc_cleanup_ctx(struct nand_device *nand)
739764f1b74SChuanhong Guo {
740764f1b74SChuanhong Guo 	struct mtk_ecc_config *ecc_cfg = nand_to_ecc_ctx(nand);
741764f1b74SChuanhong Guo 
742764f1b74SChuanhong Guo 	kfree(ecc_cfg);
743764f1b74SChuanhong Guo }
744764f1b74SChuanhong Guo 
745764f1b74SChuanhong Guo static int mtk_snand_ecc_prepare_io_req(struct nand_device *nand,
746764f1b74SChuanhong Guo 					struct nand_page_io_req *req)
747764f1b74SChuanhong Guo {
748764f1b74SChuanhong Guo 	struct mtk_snand *snf = nand_to_mtk_snand(nand);
749764f1b74SChuanhong Guo 	struct mtk_ecc_config *ecc_cfg = nand_to_ecc_ctx(nand);
750764f1b74SChuanhong Guo 	int ret;
751764f1b74SChuanhong Guo 
752764f1b74SChuanhong Guo 	ret = mtk_snand_setup_pagefmt(snf, nand->memorg.pagesize,
753764f1b74SChuanhong Guo 				      nand->memorg.oobsize);
754764f1b74SChuanhong Guo 	if (ret)
755764f1b74SChuanhong Guo 		return ret;
756764f1b74SChuanhong Guo 	snf->autofmt = true;
757764f1b74SChuanhong Guo 	snf->ecc_cfg = ecc_cfg;
758764f1b74SChuanhong Guo 	return 0;
759764f1b74SChuanhong Guo }
760764f1b74SChuanhong Guo 
761764f1b74SChuanhong Guo static int mtk_snand_ecc_finish_io_req(struct nand_device *nand,
762764f1b74SChuanhong Guo 				       struct nand_page_io_req *req)
763764f1b74SChuanhong Guo {
764764f1b74SChuanhong Guo 	struct mtk_snand *snf = nand_to_mtk_snand(nand);
765764f1b74SChuanhong Guo 	struct mtd_info *mtd = nanddev_to_mtd(nand);
766764f1b74SChuanhong Guo 
767764f1b74SChuanhong Guo 	snf->ecc_cfg = NULL;
768764f1b74SChuanhong Guo 	snf->autofmt = false;
769764f1b74SChuanhong Guo 	if ((req->mode == MTD_OPS_RAW) || (req->type != NAND_PAGE_READ))
770764f1b74SChuanhong Guo 		return 0;
771764f1b74SChuanhong Guo 
772764f1b74SChuanhong Guo 	if (snf->ecc_stats.failed)
773764f1b74SChuanhong Guo 		mtd->ecc_stats.failed += snf->ecc_stats.failed;
774764f1b74SChuanhong Guo 	mtd->ecc_stats.corrected += snf->ecc_stats.corrected;
775764f1b74SChuanhong Guo 	return snf->ecc_stats.failed ? -EBADMSG : snf->ecc_stats.bitflips;
776764f1b74SChuanhong Guo }
777764f1b74SChuanhong Guo 
778764f1b74SChuanhong Guo static struct nand_ecc_engine_ops mtk_snfi_ecc_engine_ops = {
779764f1b74SChuanhong Guo 	.init_ctx = mtk_snand_ecc_init_ctx,
780764f1b74SChuanhong Guo 	.cleanup_ctx = mtk_snand_ecc_cleanup_ctx,
781764f1b74SChuanhong Guo 	.prepare_io_req = mtk_snand_ecc_prepare_io_req,
782764f1b74SChuanhong Guo 	.finish_io_req = mtk_snand_ecc_finish_io_req,
783764f1b74SChuanhong Guo };
784764f1b74SChuanhong Guo 
785764f1b74SChuanhong Guo static void mtk_snand_read_fdm(struct mtk_snand *snf, u8 *buf)
786764f1b74SChuanhong Guo {
787764f1b74SChuanhong Guo 	u32 vall, valm;
788764f1b74SChuanhong Guo 	u8 *oobptr = buf;
789764f1b74SChuanhong Guo 	int i, j;
790764f1b74SChuanhong Guo 
791764f1b74SChuanhong Guo 	for (i = 0; i < snf->nfi_cfg.nsectors; i++) {
792764f1b74SChuanhong Guo 		vall = nfi_read32(snf, NFI_FDML(i));
793764f1b74SChuanhong Guo 		valm = nfi_read32(snf, NFI_FDMM(i));
794764f1b74SChuanhong Guo 
795764f1b74SChuanhong Guo 		for (j = 0; j < snf->caps->fdm_size; j++)
796764f1b74SChuanhong Guo 			oobptr[j] = (j >= 4 ? valm : vall) >> ((j % 4) * 8);
797764f1b74SChuanhong Guo 
798764f1b74SChuanhong Guo 		oobptr += snf->caps->fdm_size;
799764f1b74SChuanhong Guo 	}
800764f1b74SChuanhong Guo }
801764f1b74SChuanhong Guo 
802764f1b74SChuanhong Guo static void mtk_snand_write_fdm(struct mtk_snand *snf, const u8 *buf)
803764f1b74SChuanhong Guo {
804764f1b74SChuanhong Guo 	u32 fdm_size = snf->caps->fdm_size;
805764f1b74SChuanhong Guo 	const u8 *oobptr = buf;
806764f1b74SChuanhong Guo 	u32 vall, valm;
807764f1b74SChuanhong Guo 	int i, j;
808764f1b74SChuanhong Guo 
809764f1b74SChuanhong Guo 	for (i = 0; i < snf->nfi_cfg.nsectors; i++) {
810764f1b74SChuanhong Guo 		vall = 0;
811764f1b74SChuanhong Guo 		valm = 0;
812764f1b74SChuanhong Guo 
813764f1b74SChuanhong Guo 		for (j = 0; j < 8; j++) {
814764f1b74SChuanhong Guo 			if (j < 4)
815764f1b74SChuanhong Guo 				vall |= (j < fdm_size ? oobptr[j] : 0xff)
816764f1b74SChuanhong Guo 					<< (j * 8);
817764f1b74SChuanhong Guo 			else
818764f1b74SChuanhong Guo 				valm |= (j < fdm_size ? oobptr[j] : 0xff)
819764f1b74SChuanhong Guo 					<< ((j - 4) * 8);
820764f1b74SChuanhong Guo 		}
821764f1b74SChuanhong Guo 
822764f1b74SChuanhong Guo 		nfi_write32(snf, NFI_FDML(i), vall);
823764f1b74SChuanhong Guo 		nfi_write32(snf, NFI_FDMM(i), valm);
824764f1b74SChuanhong Guo 
825764f1b74SChuanhong Guo 		oobptr += fdm_size;
826764f1b74SChuanhong Guo 	}
827764f1b74SChuanhong Guo }
828764f1b74SChuanhong Guo 
829764f1b74SChuanhong Guo static void mtk_snand_bm_swap(struct mtk_snand *snf, u8 *buf)
830764f1b74SChuanhong Guo {
831764f1b74SChuanhong Guo 	u32 buf_bbm_pos, fdm_bbm_pos;
832764f1b74SChuanhong Guo 
833764f1b74SChuanhong Guo 	if (!snf->caps->bbm_swap || snf->nfi_cfg.nsectors == 1)
834764f1b74SChuanhong Guo 		return;
835764f1b74SChuanhong Guo 
836764f1b74SChuanhong Guo 	// swap [pagesize] byte on nand with the first fdm byte
837764f1b74SChuanhong Guo 	// in the last sector.
838764f1b74SChuanhong Guo 	buf_bbm_pos = snf->nfi_cfg.page_size -
839764f1b74SChuanhong Guo 		      (snf->nfi_cfg.nsectors - 1) * snf->nfi_cfg.spare_size;
840764f1b74SChuanhong Guo 	fdm_bbm_pos = snf->nfi_cfg.page_size +
841764f1b74SChuanhong Guo 		      (snf->nfi_cfg.nsectors - 1) * snf->caps->fdm_size;
842764f1b74SChuanhong Guo 
843764f1b74SChuanhong Guo 	swap(snf->buf[fdm_bbm_pos], buf[buf_bbm_pos]);
844764f1b74SChuanhong Guo }
845764f1b74SChuanhong Guo 
846764f1b74SChuanhong Guo static void mtk_snand_fdm_bm_swap(struct mtk_snand *snf)
847764f1b74SChuanhong Guo {
848764f1b74SChuanhong Guo 	u32 fdm_bbm_pos1, fdm_bbm_pos2;
849764f1b74SChuanhong Guo 
850764f1b74SChuanhong Guo 	if (!snf->caps->bbm_swap || snf->nfi_cfg.nsectors == 1)
851764f1b74SChuanhong Guo 		return;
852764f1b74SChuanhong Guo 
853764f1b74SChuanhong Guo 	// swap the first fdm byte in the first and the last sector.
854764f1b74SChuanhong Guo 	fdm_bbm_pos1 = snf->nfi_cfg.page_size;
855764f1b74SChuanhong Guo 	fdm_bbm_pos2 = snf->nfi_cfg.page_size +
856764f1b74SChuanhong Guo 		       (snf->nfi_cfg.nsectors - 1) * snf->caps->fdm_size;
857764f1b74SChuanhong Guo 	swap(snf->buf[fdm_bbm_pos1], snf->buf[fdm_bbm_pos2]);
858764f1b74SChuanhong Guo }
859764f1b74SChuanhong Guo 
860764f1b74SChuanhong Guo static int mtk_snand_read_page_cache(struct mtk_snand *snf,
861764f1b74SChuanhong Guo 				     const struct spi_mem_op *op)
862764f1b74SChuanhong Guo {
863764f1b74SChuanhong Guo 	u8 *buf = snf->buf;
864764f1b74SChuanhong Guo 	u8 *buf_fdm = buf + snf->nfi_cfg.page_size;
865764f1b74SChuanhong Guo 	// the address part to be sent by the controller
866764f1b74SChuanhong Guo 	u32 op_addr = op->addr.val;
867764f1b74SChuanhong Guo 	// where to start copying data from bounce buffer
868764f1b74SChuanhong Guo 	u32 rd_offset = 0;
869764f1b74SChuanhong Guo 	u32 dummy_clk = (op->dummy.nbytes * BITS_PER_BYTE / op->dummy.buswidth);
870764f1b74SChuanhong Guo 	u32 op_mode = 0;
871764f1b74SChuanhong Guo 	u32 dma_len = snf->buf_len;
872764f1b74SChuanhong Guo 	int ret = 0;
873764f1b74SChuanhong Guo 	u32 rd_mode, rd_bytes, val;
874764f1b74SChuanhong Guo 	dma_addr_t buf_dma;
875764f1b74SChuanhong Guo 
876764f1b74SChuanhong Guo 	if (snf->autofmt) {
877764f1b74SChuanhong Guo 		u32 last_bit;
878764f1b74SChuanhong Guo 		u32 mask;
879764f1b74SChuanhong Guo 
880764f1b74SChuanhong Guo 		dma_len = snf->nfi_cfg.page_size;
881764f1b74SChuanhong Guo 		op_mode = CNFG_AUTO_FMT_EN;
882764f1b74SChuanhong Guo 		if (op->data.ecc)
883764f1b74SChuanhong Guo 			op_mode |= CNFG_HW_ECC_EN;
884764f1b74SChuanhong Guo 		// extract the plane bit:
885764f1b74SChuanhong Guo 		// Find the highest bit set in (pagesize+oobsize).
886764f1b74SChuanhong Guo 		// Bits higher than that in op->addr are kept and sent over SPI
887764f1b74SChuanhong Guo 		// Lower bits are used as an offset for copying data from DMA
888764f1b74SChuanhong Guo 		// bounce buffer.
889764f1b74SChuanhong Guo 		last_bit = fls(snf->nfi_cfg.page_size + snf->nfi_cfg.oob_size);
890764f1b74SChuanhong Guo 		mask = (1 << last_bit) - 1;
891764f1b74SChuanhong Guo 		rd_offset = op_addr & mask;
892764f1b74SChuanhong Guo 		op_addr &= ~mask;
893764f1b74SChuanhong Guo 
894764f1b74SChuanhong Guo 		// check if we can dma to the caller memory
895764f1b74SChuanhong Guo 		if (rd_offset == 0 && op->data.nbytes >= snf->nfi_cfg.page_size)
896764f1b74SChuanhong Guo 			buf = op->data.buf.in;
897764f1b74SChuanhong Guo 	}
898764f1b74SChuanhong Guo 	mtk_snand_mac_reset(snf);
899764f1b74SChuanhong Guo 	mtk_nfi_reset(snf);
900764f1b74SChuanhong Guo 
901764f1b74SChuanhong Guo 	// command and dummy cycles
902764f1b74SChuanhong Guo 	nfi_write32(snf, SNF_RD_CTL2,
903764f1b74SChuanhong Guo 		    (dummy_clk << DATA_READ_DUMMY_S) |
904764f1b74SChuanhong Guo 			    (op->cmd.opcode << DATA_READ_CMD_S));
905764f1b74SChuanhong Guo 
906764f1b74SChuanhong Guo 	// read address
907764f1b74SChuanhong Guo 	nfi_write32(snf, SNF_RD_CTL3, op_addr);
908764f1b74SChuanhong Guo 
909764f1b74SChuanhong Guo 	// Set read op_mode
910764f1b74SChuanhong Guo 	if (op->data.buswidth == 4)
911764f1b74SChuanhong Guo 		rd_mode = op->addr.buswidth == 4 ? DATA_READ_MODE_QUAD :
912764f1b74SChuanhong Guo 						   DATA_READ_MODE_X4;
913764f1b74SChuanhong Guo 	else if (op->data.buswidth == 2)
914764f1b74SChuanhong Guo 		rd_mode = op->addr.buswidth == 2 ? DATA_READ_MODE_DUAL :
915764f1b74SChuanhong Guo 						   DATA_READ_MODE_X2;
916764f1b74SChuanhong Guo 	else
917764f1b74SChuanhong Guo 		rd_mode = DATA_READ_MODE_X1;
918764f1b74SChuanhong Guo 	rd_mode <<= DATA_READ_MODE_S;
919764f1b74SChuanhong Guo 	nfi_rmw32(snf, SNF_MISC_CTL, DATA_READ_MODE,
920764f1b74SChuanhong Guo 		  rd_mode | DATARD_CUSTOM_EN);
921764f1b74SChuanhong Guo 
922764f1b74SChuanhong Guo 	// Set bytes to read
923764f1b74SChuanhong Guo 	rd_bytes = (snf->nfi_cfg.spare_size + snf->caps->sector_size) *
924764f1b74SChuanhong Guo 		   snf->nfi_cfg.nsectors;
925764f1b74SChuanhong Guo 	nfi_write32(snf, SNF_MISC_CTL2,
926764f1b74SChuanhong Guo 		    (rd_bytes << PROGRAM_LOAD_BYTE_NUM_S) | rd_bytes);
927764f1b74SChuanhong Guo 
928764f1b74SChuanhong Guo 	// NFI read prepare
929764f1b74SChuanhong Guo 	nfi_write16(snf, NFI_CNFG,
930764f1b74SChuanhong Guo 		    (CNFG_OP_MODE_CUST << CNFG_OP_MODE_S) | CNFG_DMA_BURST_EN |
931764f1b74SChuanhong Guo 			    CNFG_READ_MODE | CNFG_DMA_MODE | op_mode);
932764f1b74SChuanhong Guo 
933764f1b74SChuanhong Guo 	nfi_write32(snf, NFI_CON, (snf->nfi_cfg.nsectors << CON_SEC_NUM_S));
934764f1b74SChuanhong Guo 
935764f1b74SChuanhong Guo 	buf_dma = dma_map_single(snf->dev, buf, dma_len, DMA_FROM_DEVICE);
93673c1a515SDan Carpenter 	ret = dma_mapping_error(snf->dev, buf_dma);
93773c1a515SDan Carpenter 	if (ret) {
938764f1b74SChuanhong Guo 		dev_err(snf->dev, "DMA mapping failed.\n");
939764f1b74SChuanhong Guo 		goto cleanup;
940764f1b74SChuanhong Guo 	}
941764f1b74SChuanhong Guo 	nfi_write32(snf, NFI_STRADDR, buf_dma);
942764f1b74SChuanhong Guo 	if (op->data.ecc) {
943764f1b74SChuanhong Guo 		snf->ecc_cfg->op = ECC_DECODE;
944764f1b74SChuanhong Guo 		ret = mtk_ecc_enable(snf->ecc, snf->ecc_cfg);
945764f1b74SChuanhong Guo 		if (ret)
946764f1b74SChuanhong Guo 			goto cleanup_dma;
947764f1b74SChuanhong Guo 	}
948764f1b74SChuanhong Guo 	// Prepare for custom read interrupt
949764f1b74SChuanhong Guo 	nfi_write32(snf, NFI_INTR_EN, NFI_IRQ_INTR_EN | NFI_IRQ_CUS_READ);
950764f1b74SChuanhong Guo 	reinit_completion(&snf->op_done);
951764f1b74SChuanhong Guo 
952764f1b74SChuanhong Guo 	// Trigger NFI into custom mode
953764f1b74SChuanhong Guo 	nfi_write16(snf, NFI_CMD, NFI_CMD_DUMMY_READ);
954764f1b74SChuanhong Guo 
955764f1b74SChuanhong Guo 	// Start DMA read
956764f1b74SChuanhong Guo 	nfi_rmw32(snf, NFI_CON, 0, CON_BRD);
957764f1b74SChuanhong Guo 	nfi_write16(snf, NFI_STRDATA, STR_DATA);
958764f1b74SChuanhong Guo 
959764f1b74SChuanhong Guo 	if (!wait_for_completion_timeout(
960764f1b74SChuanhong Guo 		    &snf->op_done, usecs_to_jiffies(SNFI_POLL_INTERVAL))) {
961764f1b74SChuanhong Guo 		dev_err(snf->dev, "DMA timed out for reading from cache.\n");
962764f1b74SChuanhong Guo 		ret = -ETIMEDOUT;
963764f1b74SChuanhong Guo 		goto cleanup;
964764f1b74SChuanhong Guo 	}
965764f1b74SChuanhong Guo 
966764f1b74SChuanhong Guo 	// Wait for BUS_SEC_CNTR returning expected value
967764f1b74SChuanhong Guo 	ret = readl_poll_timeout(snf->nfi_base + NFI_BYTELEN, val,
968764f1b74SChuanhong Guo 				 BUS_SEC_CNTR(val) >= snf->nfi_cfg.nsectors, 0,
969764f1b74SChuanhong Guo 				 SNFI_POLL_INTERVAL);
970764f1b74SChuanhong Guo 	if (ret) {
971764f1b74SChuanhong Guo 		dev_err(snf->dev, "Timed out waiting for BUS_SEC_CNTR\n");
972764f1b74SChuanhong Guo 		goto cleanup2;
973764f1b74SChuanhong Guo 	}
974764f1b74SChuanhong Guo 
975764f1b74SChuanhong Guo 	// Wait for bus becoming idle
976764f1b74SChuanhong Guo 	ret = readl_poll_timeout(snf->nfi_base + NFI_MASTERSTA, val,
977764f1b74SChuanhong Guo 				 !(val & snf->caps->mastersta_mask), 0,
978764f1b74SChuanhong Guo 				 SNFI_POLL_INTERVAL);
979764f1b74SChuanhong Guo 	if (ret) {
980764f1b74SChuanhong Guo 		dev_err(snf->dev, "Timed out waiting for bus becoming idle\n");
981764f1b74SChuanhong Guo 		goto cleanup2;
982764f1b74SChuanhong Guo 	}
983764f1b74SChuanhong Guo 
984764f1b74SChuanhong Guo 	if (op->data.ecc) {
985764f1b74SChuanhong Guo 		ret = mtk_ecc_wait_done(snf->ecc, ECC_DECODE);
986764f1b74SChuanhong Guo 		if (ret) {
987764f1b74SChuanhong Guo 			dev_err(snf->dev, "wait ecc done timeout\n");
988764f1b74SChuanhong Guo 			goto cleanup2;
989764f1b74SChuanhong Guo 		}
990764f1b74SChuanhong Guo 		// save status before disabling ecc
991764f1b74SChuanhong Guo 		mtk_ecc_get_stats(snf->ecc, &snf->ecc_stats,
992764f1b74SChuanhong Guo 				  snf->nfi_cfg.nsectors);
993764f1b74SChuanhong Guo 	}
994764f1b74SChuanhong Guo 
995764f1b74SChuanhong Guo 	dma_unmap_single(snf->dev, buf_dma, dma_len, DMA_FROM_DEVICE);
996764f1b74SChuanhong Guo 
997764f1b74SChuanhong Guo 	if (snf->autofmt) {
998764f1b74SChuanhong Guo 		mtk_snand_read_fdm(snf, buf_fdm);
999764f1b74SChuanhong Guo 		if (snf->caps->bbm_swap) {
1000764f1b74SChuanhong Guo 			mtk_snand_bm_swap(snf, buf);
1001764f1b74SChuanhong Guo 			mtk_snand_fdm_bm_swap(snf);
1002764f1b74SChuanhong Guo 		}
1003764f1b74SChuanhong Guo 	}
1004764f1b74SChuanhong Guo 
1005764f1b74SChuanhong Guo 	// copy data back
1006764f1b74SChuanhong Guo 	if (nfi_read32(snf, NFI_STA) & READ_EMPTY) {
1007764f1b74SChuanhong Guo 		memset(op->data.buf.in, 0xff, op->data.nbytes);
1008764f1b74SChuanhong Guo 		snf->ecc_stats.bitflips = 0;
1009764f1b74SChuanhong Guo 		snf->ecc_stats.failed = 0;
1010764f1b74SChuanhong Guo 		snf->ecc_stats.corrected = 0;
1011764f1b74SChuanhong Guo 	} else {
1012764f1b74SChuanhong Guo 		if (buf == op->data.buf.in) {
1013764f1b74SChuanhong Guo 			u32 cap_len = snf->buf_len - snf->nfi_cfg.page_size;
1014764f1b74SChuanhong Guo 			u32 req_left = op->data.nbytes - snf->nfi_cfg.page_size;
1015764f1b74SChuanhong Guo 
1016764f1b74SChuanhong Guo 			if (req_left)
1017764f1b74SChuanhong Guo 				memcpy(op->data.buf.in + snf->nfi_cfg.page_size,
1018764f1b74SChuanhong Guo 				       buf_fdm,
1019764f1b74SChuanhong Guo 				       cap_len < req_left ? cap_len : req_left);
1020764f1b74SChuanhong Guo 		} else if (rd_offset < snf->buf_len) {
1021764f1b74SChuanhong Guo 			u32 cap_len = snf->buf_len - rd_offset;
1022764f1b74SChuanhong Guo 
1023764f1b74SChuanhong Guo 			if (op->data.nbytes < cap_len)
1024764f1b74SChuanhong Guo 				cap_len = op->data.nbytes;
1025764f1b74SChuanhong Guo 			memcpy(op->data.buf.in, snf->buf + rd_offset, cap_len);
1026764f1b74SChuanhong Guo 		}
1027764f1b74SChuanhong Guo 	}
1028764f1b74SChuanhong Guo cleanup2:
1029764f1b74SChuanhong Guo 	if (op->data.ecc)
1030764f1b74SChuanhong Guo 		mtk_ecc_disable(snf->ecc);
1031764f1b74SChuanhong Guo cleanup_dma:
1032764f1b74SChuanhong Guo 	// unmap dma only if any error happens. (otherwise it's done before
1033764f1b74SChuanhong Guo 	// data copying)
1034764f1b74SChuanhong Guo 	if (ret)
1035764f1b74SChuanhong Guo 		dma_unmap_single(snf->dev, buf_dma, dma_len, DMA_FROM_DEVICE);
1036764f1b74SChuanhong Guo cleanup:
1037764f1b74SChuanhong Guo 	// Stop read
1038764f1b74SChuanhong Guo 	nfi_write32(snf, NFI_CON, 0);
1039764f1b74SChuanhong Guo 	nfi_write16(snf, NFI_CNFG, 0);
1040764f1b74SChuanhong Guo 
1041764f1b74SChuanhong Guo 	// Clear SNF done flag
1042764f1b74SChuanhong Guo 	nfi_rmw32(snf, SNF_STA_CTL1, 0, CUS_READ_DONE);
1043764f1b74SChuanhong Guo 	nfi_write32(snf, SNF_STA_CTL1, 0);
1044764f1b74SChuanhong Guo 
1045764f1b74SChuanhong Guo 	// Disable interrupt
1046764f1b74SChuanhong Guo 	nfi_read32(snf, NFI_INTR_STA);
1047764f1b74SChuanhong Guo 	nfi_write32(snf, NFI_INTR_EN, 0);
1048764f1b74SChuanhong Guo 
1049764f1b74SChuanhong Guo 	nfi_rmw32(snf, SNF_MISC_CTL, DATARD_CUSTOM_EN, 0);
1050764f1b74SChuanhong Guo 	return ret;
1051764f1b74SChuanhong Guo }
1052764f1b74SChuanhong Guo 
1053764f1b74SChuanhong Guo static int mtk_snand_write_page_cache(struct mtk_snand *snf,
1054764f1b74SChuanhong Guo 				      const struct spi_mem_op *op)
1055764f1b74SChuanhong Guo {
1056764f1b74SChuanhong Guo 	// the address part to be sent by the controller
1057764f1b74SChuanhong Guo 	u32 op_addr = op->addr.val;
1058764f1b74SChuanhong Guo 	// where to start copying data from bounce buffer
1059764f1b74SChuanhong Guo 	u32 wr_offset = 0;
1060764f1b74SChuanhong Guo 	u32 op_mode = 0;
1061764f1b74SChuanhong Guo 	int ret = 0;
1062764f1b74SChuanhong Guo 	u32 wr_mode = 0;
1063764f1b74SChuanhong Guo 	u32 dma_len = snf->buf_len;
1064764f1b74SChuanhong Guo 	u32 wr_bytes, val;
1065764f1b74SChuanhong Guo 	size_t cap_len;
1066764f1b74SChuanhong Guo 	dma_addr_t buf_dma;
1067764f1b74SChuanhong Guo 
1068764f1b74SChuanhong Guo 	if (snf->autofmt) {
1069764f1b74SChuanhong Guo 		u32 last_bit;
1070764f1b74SChuanhong Guo 		u32 mask;
1071764f1b74SChuanhong Guo 
1072764f1b74SChuanhong Guo 		dma_len = snf->nfi_cfg.page_size;
1073764f1b74SChuanhong Guo 		op_mode = CNFG_AUTO_FMT_EN;
1074764f1b74SChuanhong Guo 		if (op->data.ecc)
1075764f1b74SChuanhong Guo 			op_mode |= CNFG_HW_ECC_EN;
1076764f1b74SChuanhong Guo 
1077764f1b74SChuanhong Guo 		last_bit = fls(snf->nfi_cfg.page_size + snf->nfi_cfg.oob_size);
1078764f1b74SChuanhong Guo 		mask = (1 << last_bit) - 1;
1079764f1b74SChuanhong Guo 		wr_offset = op_addr & mask;
1080764f1b74SChuanhong Guo 		op_addr &= ~mask;
1081764f1b74SChuanhong Guo 	}
1082764f1b74SChuanhong Guo 	mtk_snand_mac_reset(snf);
1083764f1b74SChuanhong Guo 	mtk_nfi_reset(snf);
1084764f1b74SChuanhong Guo 
1085764f1b74SChuanhong Guo 	if (wr_offset)
1086764f1b74SChuanhong Guo 		memset(snf->buf, 0xff, wr_offset);
1087764f1b74SChuanhong Guo 
1088764f1b74SChuanhong Guo 	cap_len = snf->buf_len - wr_offset;
1089764f1b74SChuanhong Guo 	if (op->data.nbytes < cap_len)
1090764f1b74SChuanhong Guo 		cap_len = op->data.nbytes;
1091764f1b74SChuanhong Guo 	memcpy(snf->buf + wr_offset, op->data.buf.out, cap_len);
1092764f1b74SChuanhong Guo 	if (snf->autofmt) {
1093764f1b74SChuanhong Guo 		if (snf->caps->bbm_swap) {
1094764f1b74SChuanhong Guo 			mtk_snand_fdm_bm_swap(snf);
1095764f1b74SChuanhong Guo 			mtk_snand_bm_swap(snf, snf->buf);
1096764f1b74SChuanhong Guo 		}
1097764f1b74SChuanhong Guo 		mtk_snand_write_fdm(snf, snf->buf + snf->nfi_cfg.page_size);
1098764f1b74SChuanhong Guo 	}
1099764f1b74SChuanhong Guo 
1100764f1b74SChuanhong Guo 	// Command
1101764f1b74SChuanhong Guo 	nfi_write32(snf, SNF_PG_CTL1, (op->cmd.opcode << PG_LOAD_CMD_S));
1102764f1b74SChuanhong Guo 
1103764f1b74SChuanhong Guo 	// write address
1104764f1b74SChuanhong Guo 	nfi_write32(snf, SNF_PG_CTL2, op_addr);
1105764f1b74SChuanhong Guo 
1106764f1b74SChuanhong Guo 	// Set read op_mode
1107764f1b74SChuanhong Guo 	if (op->data.buswidth == 4)
1108764f1b74SChuanhong Guo 		wr_mode = PG_LOAD_X4_EN;
1109764f1b74SChuanhong Guo 
1110764f1b74SChuanhong Guo 	nfi_rmw32(snf, SNF_MISC_CTL, PG_LOAD_X4_EN,
1111764f1b74SChuanhong Guo 		  wr_mode | PG_LOAD_CUSTOM_EN);
1112764f1b74SChuanhong Guo 
1113764f1b74SChuanhong Guo 	// Set bytes to write
1114764f1b74SChuanhong Guo 	wr_bytes = (snf->nfi_cfg.spare_size + snf->caps->sector_size) *
1115764f1b74SChuanhong Guo 		   snf->nfi_cfg.nsectors;
1116764f1b74SChuanhong Guo 	nfi_write32(snf, SNF_MISC_CTL2,
1117764f1b74SChuanhong Guo 		    (wr_bytes << PROGRAM_LOAD_BYTE_NUM_S) | wr_bytes);
1118764f1b74SChuanhong Guo 
1119764f1b74SChuanhong Guo 	// NFI write prepare
1120764f1b74SChuanhong Guo 	nfi_write16(snf, NFI_CNFG,
1121764f1b74SChuanhong Guo 		    (CNFG_OP_MODE_PROGRAM << CNFG_OP_MODE_S) |
1122764f1b74SChuanhong Guo 			    CNFG_DMA_BURST_EN | CNFG_DMA_MODE | op_mode);
1123764f1b74SChuanhong Guo 
1124764f1b74SChuanhong Guo 	nfi_write32(snf, NFI_CON, (snf->nfi_cfg.nsectors << CON_SEC_NUM_S));
1125764f1b74SChuanhong Guo 	buf_dma = dma_map_single(snf->dev, snf->buf, dma_len, DMA_TO_DEVICE);
112673c1a515SDan Carpenter 	ret = dma_mapping_error(snf->dev, buf_dma);
112773c1a515SDan Carpenter 	if (ret) {
1128764f1b74SChuanhong Guo 		dev_err(snf->dev, "DMA mapping failed.\n");
1129764f1b74SChuanhong Guo 		goto cleanup;
1130764f1b74SChuanhong Guo 	}
1131764f1b74SChuanhong Guo 	nfi_write32(snf, NFI_STRADDR, buf_dma);
1132764f1b74SChuanhong Guo 	if (op->data.ecc) {
1133764f1b74SChuanhong Guo 		snf->ecc_cfg->op = ECC_ENCODE;
1134764f1b74SChuanhong Guo 		ret = mtk_ecc_enable(snf->ecc, snf->ecc_cfg);
1135764f1b74SChuanhong Guo 		if (ret)
1136764f1b74SChuanhong Guo 			goto cleanup_dma;
1137764f1b74SChuanhong Guo 	}
1138764f1b74SChuanhong Guo 	// Prepare for custom write interrupt
1139764f1b74SChuanhong Guo 	nfi_write32(snf, NFI_INTR_EN, NFI_IRQ_INTR_EN | NFI_IRQ_CUS_PG);
1140764f1b74SChuanhong Guo 	reinit_completion(&snf->op_done);
1141764f1b74SChuanhong Guo 	;
1142764f1b74SChuanhong Guo 
1143764f1b74SChuanhong Guo 	// Trigger NFI into custom mode
1144764f1b74SChuanhong Guo 	nfi_write16(snf, NFI_CMD, NFI_CMD_DUMMY_WRITE);
1145764f1b74SChuanhong Guo 
1146764f1b74SChuanhong Guo 	// Start DMA write
1147764f1b74SChuanhong Guo 	nfi_rmw32(snf, NFI_CON, 0, CON_BWR);
1148764f1b74SChuanhong Guo 	nfi_write16(snf, NFI_STRDATA, STR_DATA);
1149764f1b74SChuanhong Guo 
1150764f1b74SChuanhong Guo 	if (!wait_for_completion_timeout(
1151764f1b74SChuanhong Guo 		    &snf->op_done, usecs_to_jiffies(SNFI_POLL_INTERVAL))) {
1152764f1b74SChuanhong Guo 		dev_err(snf->dev, "DMA timed out for program load.\n");
1153764f1b74SChuanhong Guo 		ret = -ETIMEDOUT;
1154764f1b74SChuanhong Guo 		goto cleanup_ecc;
1155764f1b74SChuanhong Guo 	}
1156764f1b74SChuanhong Guo 
1157764f1b74SChuanhong Guo 	// Wait for NFI_SEC_CNTR returning expected value
1158764f1b74SChuanhong Guo 	ret = readl_poll_timeout(snf->nfi_base + NFI_ADDRCNTR, val,
1159764f1b74SChuanhong Guo 				 NFI_SEC_CNTR(val) >= snf->nfi_cfg.nsectors, 0,
1160764f1b74SChuanhong Guo 				 SNFI_POLL_INTERVAL);
1161764f1b74SChuanhong Guo 	if (ret)
1162764f1b74SChuanhong Guo 		dev_err(snf->dev, "Timed out waiting for NFI_SEC_CNTR\n");
1163764f1b74SChuanhong Guo 
1164764f1b74SChuanhong Guo cleanup_ecc:
1165764f1b74SChuanhong Guo 	if (op->data.ecc)
1166764f1b74SChuanhong Guo 		mtk_ecc_disable(snf->ecc);
1167764f1b74SChuanhong Guo cleanup_dma:
1168764f1b74SChuanhong Guo 	dma_unmap_single(snf->dev, buf_dma, dma_len, DMA_TO_DEVICE);
1169764f1b74SChuanhong Guo cleanup:
1170764f1b74SChuanhong Guo 	// Stop write
1171764f1b74SChuanhong Guo 	nfi_write32(snf, NFI_CON, 0);
1172764f1b74SChuanhong Guo 	nfi_write16(snf, NFI_CNFG, 0);
1173764f1b74SChuanhong Guo 
1174764f1b74SChuanhong Guo 	// Clear SNF done flag
1175764f1b74SChuanhong Guo 	nfi_rmw32(snf, SNF_STA_CTL1, 0, CUS_PG_DONE);
1176764f1b74SChuanhong Guo 	nfi_write32(snf, SNF_STA_CTL1, 0);
1177764f1b74SChuanhong Guo 
1178764f1b74SChuanhong Guo 	// Disable interrupt
1179764f1b74SChuanhong Guo 	nfi_read32(snf, NFI_INTR_STA);
1180764f1b74SChuanhong Guo 	nfi_write32(snf, NFI_INTR_EN, 0);
1181764f1b74SChuanhong Guo 
1182764f1b74SChuanhong Guo 	nfi_rmw32(snf, SNF_MISC_CTL, PG_LOAD_CUSTOM_EN, 0);
1183764f1b74SChuanhong Guo 
1184764f1b74SChuanhong Guo 	return ret;
1185764f1b74SChuanhong Guo }
1186764f1b74SChuanhong Guo 
1187764f1b74SChuanhong Guo /**
1188764f1b74SChuanhong Guo  * mtk_snand_is_page_ops() - check if the op is a controller supported page op.
1189764f1b74SChuanhong Guo  * @op spi-mem op to check
1190764f1b74SChuanhong Guo  *
1191764f1b74SChuanhong Guo  * Check whether op can be executed with read_from_cache or program_load
1192764f1b74SChuanhong Guo  * mode in the controller.
1193764f1b74SChuanhong Guo  * This controller can execute typical Read From Cache and Program Load
1194764f1b74SChuanhong Guo  * instructions found on SPI-NAND with 2-byte address.
1195764f1b74SChuanhong Guo  * DTR and cmd buswidth & nbytes should be checked before calling this.
1196764f1b74SChuanhong Guo  *
1197764f1b74SChuanhong Guo  * Return: true if the op matches the instruction template
1198764f1b74SChuanhong Guo  */
1199764f1b74SChuanhong Guo static bool mtk_snand_is_page_ops(const struct spi_mem_op *op)
1200764f1b74SChuanhong Guo {
1201764f1b74SChuanhong Guo 	if (op->addr.nbytes != 2)
1202764f1b74SChuanhong Guo 		return false;
1203764f1b74SChuanhong Guo 
1204764f1b74SChuanhong Guo 	if (op->addr.buswidth != 1 && op->addr.buswidth != 2 &&
1205764f1b74SChuanhong Guo 	    op->addr.buswidth != 4)
1206764f1b74SChuanhong Guo 		return false;
1207764f1b74SChuanhong Guo 
1208764f1b74SChuanhong Guo 	// match read from page instructions
1209764f1b74SChuanhong Guo 	if (op->data.dir == SPI_MEM_DATA_IN) {
1210764f1b74SChuanhong Guo 		// check dummy cycle first
1211764f1b74SChuanhong Guo 		if (op->dummy.nbytes * BITS_PER_BYTE / op->dummy.buswidth >
1212764f1b74SChuanhong Guo 		    DATA_READ_MAX_DUMMY)
1213764f1b74SChuanhong Guo 			return false;
1214764f1b74SChuanhong Guo 		// quad io / quad out
1215764f1b74SChuanhong Guo 		if ((op->addr.buswidth == 4 || op->addr.buswidth == 1) &&
1216764f1b74SChuanhong Guo 		    op->data.buswidth == 4)
1217764f1b74SChuanhong Guo 			return true;
1218764f1b74SChuanhong Guo 
1219764f1b74SChuanhong Guo 		// dual io / dual out
1220764f1b74SChuanhong Guo 		if ((op->addr.buswidth == 2 || op->addr.buswidth == 1) &&
1221764f1b74SChuanhong Guo 		    op->data.buswidth == 2)
1222764f1b74SChuanhong Guo 			return true;
1223764f1b74SChuanhong Guo 
1224764f1b74SChuanhong Guo 		// standard spi
1225764f1b74SChuanhong Guo 		if (op->addr.buswidth == 1 && op->data.buswidth == 1)
1226764f1b74SChuanhong Guo 			return true;
1227764f1b74SChuanhong Guo 	} else if (op->data.dir == SPI_MEM_DATA_OUT) {
1228764f1b74SChuanhong Guo 		// check dummy cycle first
1229764f1b74SChuanhong Guo 		if (op->dummy.nbytes)
1230764f1b74SChuanhong Guo 			return false;
1231764f1b74SChuanhong Guo 		// program load quad out
1232764f1b74SChuanhong Guo 		if (op->addr.buswidth == 1 && op->data.buswidth == 4)
1233764f1b74SChuanhong Guo 			return true;
1234764f1b74SChuanhong Guo 		// standard spi
1235764f1b74SChuanhong Guo 		if (op->addr.buswidth == 1 && op->data.buswidth == 1)
1236764f1b74SChuanhong Guo 			return true;
1237764f1b74SChuanhong Guo 	}
1238764f1b74SChuanhong Guo 	return false;
1239764f1b74SChuanhong Guo }
1240764f1b74SChuanhong Guo 
1241764f1b74SChuanhong Guo static bool mtk_snand_supports_op(struct spi_mem *mem,
1242764f1b74SChuanhong Guo 				  const struct spi_mem_op *op)
1243764f1b74SChuanhong Guo {
1244764f1b74SChuanhong Guo 	if (!spi_mem_default_supports_op(mem, op))
1245764f1b74SChuanhong Guo 		return false;
1246764f1b74SChuanhong Guo 	if (op->cmd.nbytes != 1 || op->cmd.buswidth != 1)
1247764f1b74SChuanhong Guo 		return false;
1248764f1b74SChuanhong Guo 	if (mtk_snand_is_page_ops(op))
1249764f1b74SChuanhong Guo 		return true;
1250764f1b74SChuanhong Guo 	return ((op->addr.nbytes == 0 || op->addr.buswidth == 1) &&
1251764f1b74SChuanhong Guo 		(op->dummy.nbytes == 0 || op->dummy.buswidth == 1) &&
1252764f1b74SChuanhong Guo 		(op->data.nbytes == 0 || op->data.buswidth == 1));
1253764f1b74SChuanhong Guo }
1254764f1b74SChuanhong Guo 
1255764f1b74SChuanhong Guo static int mtk_snand_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op)
1256764f1b74SChuanhong Guo {
1257764f1b74SChuanhong Guo 	struct mtk_snand *ms = spi_controller_get_devdata(mem->spi->master);
1258764f1b74SChuanhong Guo 	// page ops transfer size must be exactly ((sector_size + spare_size) *
1259764f1b74SChuanhong Guo 	// nsectors). Limit the op size if the caller requests more than that.
1260764f1b74SChuanhong Guo 	// exec_op will read more than needed and discard the leftover if the
1261764f1b74SChuanhong Guo 	// caller requests less data.
1262764f1b74SChuanhong Guo 	if (mtk_snand_is_page_ops(op)) {
1263764f1b74SChuanhong Guo 		size_t l;
1264764f1b74SChuanhong Guo 		// skip adjust_op_size for page ops
1265764f1b74SChuanhong Guo 		if (ms->autofmt)
1266764f1b74SChuanhong Guo 			return 0;
1267764f1b74SChuanhong Guo 		l = ms->caps->sector_size + ms->nfi_cfg.spare_size;
1268764f1b74SChuanhong Guo 		l *= ms->nfi_cfg.nsectors;
1269764f1b74SChuanhong Guo 		if (op->data.nbytes > l)
1270764f1b74SChuanhong Guo 			op->data.nbytes = l;
1271764f1b74SChuanhong Guo 	} else {
1272764f1b74SChuanhong Guo 		size_t hl = op->cmd.nbytes + op->addr.nbytes + op->dummy.nbytes;
1273764f1b74SChuanhong Guo 
1274764f1b74SChuanhong Guo 		if (hl >= SNF_GPRAM_SIZE)
1275764f1b74SChuanhong Guo 			return -EOPNOTSUPP;
1276764f1b74SChuanhong Guo 		if (op->data.nbytes > SNF_GPRAM_SIZE - hl)
1277764f1b74SChuanhong Guo 			op->data.nbytes = SNF_GPRAM_SIZE - hl;
1278764f1b74SChuanhong Guo 	}
1279764f1b74SChuanhong Guo 	return 0;
1280764f1b74SChuanhong Guo }
1281764f1b74SChuanhong Guo 
1282764f1b74SChuanhong Guo static int mtk_snand_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
1283764f1b74SChuanhong Guo {
1284764f1b74SChuanhong Guo 	struct mtk_snand *ms = spi_controller_get_devdata(mem->spi->master);
1285764f1b74SChuanhong Guo 
1286764f1b74SChuanhong Guo 	dev_dbg(ms->dev, "OP %02x ADDR %08llX@%d:%u DATA %d:%u", op->cmd.opcode,
1287764f1b74SChuanhong Guo 		op->addr.val, op->addr.buswidth, op->addr.nbytes,
1288764f1b74SChuanhong Guo 		op->data.buswidth, op->data.nbytes);
1289764f1b74SChuanhong Guo 	if (mtk_snand_is_page_ops(op)) {
1290764f1b74SChuanhong Guo 		if (op->data.dir == SPI_MEM_DATA_IN)
1291764f1b74SChuanhong Guo 			return mtk_snand_read_page_cache(ms, op);
1292764f1b74SChuanhong Guo 		else
1293764f1b74SChuanhong Guo 			return mtk_snand_write_page_cache(ms, op);
1294764f1b74SChuanhong Guo 	} else {
1295764f1b74SChuanhong Guo 		return mtk_snand_mac_io(ms, op);
1296764f1b74SChuanhong Guo 	}
1297764f1b74SChuanhong Guo }
1298764f1b74SChuanhong Guo 
1299764f1b74SChuanhong Guo static const struct spi_controller_mem_ops mtk_snand_mem_ops = {
1300764f1b74SChuanhong Guo 	.adjust_op_size = mtk_snand_adjust_op_size,
1301764f1b74SChuanhong Guo 	.supports_op = mtk_snand_supports_op,
1302764f1b74SChuanhong Guo 	.exec_op = mtk_snand_exec_op,
1303764f1b74SChuanhong Guo };
1304764f1b74SChuanhong Guo 
1305764f1b74SChuanhong Guo static const struct spi_controller_mem_caps mtk_snand_mem_caps = {
1306764f1b74SChuanhong Guo 	.ecc = true,
1307764f1b74SChuanhong Guo };
1308764f1b74SChuanhong Guo 
1309764f1b74SChuanhong Guo static irqreturn_t mtk_snand_irq(int irq, void *id)
1310764f1b74SChuanhong Guo {
1311764f1b74SChuanhong Guo 	struct mtk_snand *snf = id;
1312764f1b74SChuanhong Guo 	u32 sta, ien;
1313764f1b74SChuanhong Guo 
1314764f1b74SChuanhong Guo 	sta = nfi_read32(snf, NFI_INTR_STA);
1315764f1b74SChuanhong Guo 	ien = nfi_read32(snf, NFI_INTR_EN);
1316764f1b74SChuanhong Guo 
1317764f1b74SChuanhong Guo 	if (!(sta & ien))
1318764f1b74SChuanhong Guo 		return IRQ_NONE;
1319764f1b74SChuanhong Guo 
1320764f1b74SChuanhong Guo 	nfi_write32(snf, NFI_INTR_EN, 0);
1321764f1b74SChuanhong Guo 	complete(&snf->op_done);
1322764f1b74SChuanhong Guo 	return IRQ_HANDLED;
1323764f1b74SChuanhong Guo }
1324764f1b74SChuanhong Guo 
1325764f1b74SChuanhong Guo static const struct of_device_id mtk_snand_ids[] = {
1326764f1b74SChuanhong Guo 	{ .compatible = "mediatek,mt7622-snand", .data = &mt7622_snand_caps },
1327764f1b74SChuanhong Guo 	{ .compatible = "mediatek,mt7629-snand", .data = &mt7629_snand_caps },
13287073888cSXiangsheng Hou 	{ .compatible = "mediatek,mt7986-snand", .data = &mt7986_snand_caps },
1329764f1b74SChuanhong Guo 	{},
1330764f1b74SChuanhong Guo };
1331764f1b74SChuanhong Guo 
1332764f1b74SChuanhong Guo MODULE_DEVICE_TABLE(of, mtk_snand_ids);
1333764f1b74SChuanhong Guo 
1334764f1b74SChuanhong Guo static int mtk_snand_enable_clk(struct mtk_snand *ms)
1335764f1b74SChuanhong Guo {
1336764f1b74SChuanhong Guo 	int ret;
1337764f1b74SChuanhong Guo 
1338764f1b74SChuanhong Guo 	ret = clk_prepare_enable(ms->nfi_clk);
1339764f1b74SChuanhong Guo 	if (ret) {
1340764f1b74SChuanhong Guo 		dev_err(ms->dev, "unable to enable nfi clk\n");
1341764f1b74SChuanhong Guo 		return ret;
1342764f1b74SChuanhong Guo 	}
1343764f1b74SChuanhong Guo 	ret = clk_prepare_enable(ms->pad_clk);
1344764f1b74SChuanhong Guo 	if (ret) {
1345764f1b74SChuanhong Guo 		dev_err(ms->dev, "unable to enable pad clk\n");
1346764f1b74SChuanhong Guo 		goto err1;
1347764f1b74SChuanhong Guo 	}
1348e40fa328SXiangsheng Hou 	ret = clk_prepare_enable(ms->nfi_hclk);
1349e40fa328SXiangsheng Hou 	if (ret) {
1350e40fa328SXiangsheng Hou 		dev_err(ms->dev, "unable to enable nfi hclk\n");
1351e40fa328SXiangsheng Hou 		goto err2;
1352e40fa328SXiangsheng Hou 	}
1353e40fa328SXiangsheng Hou 
1354764f1b74SChuanhong Guo 	return 0;
1355e40fa328SXiangsheng Hou 
1356e40fa328SXiangsheng Hou err2:
1357e40fa328SXiangsheng Hou 	clk_disable_unprepare(ms->pad_clk);
1358764f1b74SChuanhong Guo err1:
1359764f1b74SChuanhong Guo 	clk_disable_unprepare(ms->nfi_clk);
1360764f1b74SChuanhong Guo 	return ret;
1361764f1b74SChuanhong Guo }
1362764f1b74SChuanhong Guo 
1363764f1b74SChuanhong Guo static void mtk_snand_disable_clk(struct mtk_snand *ms)
1364764f1b74SChuanhong Guo {
1365e40fa328SXiangsheng Hou 	clk_disable_unprepare(ms->nfi_hclk);
1366764f1b74SChuanhong Guo 	clk_disable_unprepare(ms->pad_clk);
1367764f1b74SChuanhong Guo 	clk_disable_unprepare(ms->nfi_clk);
1368764f1b74SChuanhong Guo }
1369764f1b74SChuanhong Guo 
1370764f1b74SChuanhong Guo static int mtk_snand_probe(struct platform_device *pdev)
1371764f1b74SChuanhong Guo {
1372764f1b74SChuanhong Guo 	struct device_node *np = pdev->dev.of_node;
1373764f1b74SChuanhong Guo 	const struct of_device_id *dev_id;
1374764f1b74SChuanhong Guo 	struct spi_controller *ctlr;
1375764f1b74SChuanhong Guo 	struct mtk_snand *ms;
13761d36c990SXiangsheng Hou 	unsigned long spi_freq;
13771d36c990SXiangsheng Hou 	u32 val = 0;
1378764f1b74SChuanhong Guo 	int ret;
1379764f1b74SChuanhong Guo 
1380764f1b74SChuanhong Guo 	dev_id = of_match_node(mtk_snand_ids, np);
1381764f1b74SChuanhong Guo 	if (!dev_id)
1382764f1b74SChuanhong Guo 		return -EINVAL;
1383764f1b74SChuanhong Guo 
1384764f1b74SChuanhong Guo 	ctlr = devm_spi_alloc_master(&pdev->dev, sizeof(*ms));
1385764f1b74SChuanhong Guo 	if (!ctlr)
1386764f1b74SChuanhong Guo 		return -ENOMEM;
1387764f1b74SChuanhong Guo 	platform_set_drvdata(pdev, ctlr);
1388764f1b74SChuanhong Guo 
1389764f1b74SChuanhong Guo 	ms = spi_controller_get_devdata(ctlr);
1390764f1b74SChuanhong Guo 
1391764f1b74SChuanhong Guo 	ms->ctlr = ctlr;
1392764f1b74SChuanhong Guo 	ms->caps = dev_id->data;
1393764f1b74SChuanhong Guo 
1394764f1b74SChuanhong Guo 	ms->ecc = of_mtk_ecc_get(np);
1395764f1b74SChuanhong Guo 	if (IS_ERR(ms->ecc))
1396764f1b74SChuanhong Guo 		return PTR_ERR(ms->ecc);
1397764f1b74SChuanhong Guo 	else if (!ms->ecc)
1398764f1b74SChuanhong Guo 		return -ENODEV;
1399764f1b74SChuanhong Guo 
1400764f1b74SChuanhong Guo 	ms->nfi_base = devm_platform_ioremap_resource(pdev, 0);
1401764f1b74SChuanhong Guo 	if (IS_ERR(ms->nfi_base)) {
1402764f1b74SChuanhong Guo 		ret = PTR_ERR(ms->nfi_base);
1403764f1b74SChuanhong Guo 		goto release_ecc;
1404764f1b74SChuanhong Guo 	}
1405764f1b74SChuanhong Guo 
1406764f1b74SChuanhong Guo 	ms->dev = &pdev->dev;
1407764f1b74SChuanhong Guo 
1408764f1b74SChuanhong Guo 	ms->nfi_clk = devm_clk_get(&pdev->dev, "nfi_clk");
1409764f1b74SChuanhong Guo 	if (IS_ERR(ms->nfi_clk)) {
1410764f1b74SChuanhong Guo 		ret = PTR_ERR(ms->nfi_clk);
1411764f1b74SChuanhong Guo 		dev_err(&pdev->dev, "unable to get nfi_clk, err = %d\n", ret);
1412764f1b74SChuanhong Guo 		goto release_ecc;
1413764f1b74SChuanhong Guo 	}
1414764f1b74SChuanhong Guo 
1415764f1b74SChuanhong Guo 	ms->pad_clk = devm_clk_get(&pdev->dev, "pad_clk");
1416764f1b74SChuanhong Guo 	if (IS_ERR(ms->pad_clk)) {
1417764f1b74SChuanhong Guo 		ret = PTR_ERR(ms->pad_clk);
1418764f1b74SChuanhong Guo 		dev_err(&pdev->dev, "unable to get pad_clk, err = %d\n", ret);
1419764f1b74SChuanhong Guo 		goto release_ecc;
1420764f1b74SChuanhong Guo 	}
1421764f1b74SChuanhong Guo 
1422e40fa328SXiangsheng Hou 	ms->nfi_hclk = devm_clk_get_optional(&pdev->dev, "nfi_hclk");
1423e40fa328SXiangsheng Hou 	if (IS_ERR(ms->nfi_hclk)) {
1424e40fa328SXiangsheng Hou 		ret = PTR_ERR(ms->nfi_hclk);
1425e40fa328SXiangsheng Hou 		dev_err(&pdev->dev, "unable to get nfi_hclk, err = %d\n", ret);
1426e40fa328SXiangsheng Hou 		goto release_ecc;
1427e40fa328SXiangsheng Hou 	}
1428e40fa328SXiangsheng Hou 
1429764f1b74SChuanhong Guo 	ret = mtk_snand_enable_clk(ms);
1430764f1b74SChuanhong Guo 	if (ret)
1431764f1b74SChuanhong Guo 		goto release_ecc;
1432764f1b74SChuanhong Guo 
1433764f1b74SChuanhong Guo 	init_completion(&ms->op_done);
1434764f1b74SChuanhong Guo 
1435764f1b74SChuanhong Guo 	ms->irq = platform_get_irq(pdev, 0);
1436764f1b74SChuanhong Guo 	if (ms->irq < 0) {
1437764f1b74SChuanhong Guo 		ret = ms->irq;
1438764f1b74SChuanhong Guo 		goto disable_clk;
1439764f1b74SChuanhong Guo 	}
1440764f1b74SChuanhong Guo 	ret = devm_request_irq(ms->dev, ms->irq, mtk_snand_irq, 0x0,
1441764f1b74SChuanhong Guo 			       "mtk-snand", ms);
1442764f1b74SChuanhong Guo 	if (ret) {
1443764f1b74SChuanhong Guo 		dev_err(ms->dev, "failed to request snfi irq\n");
1444764f1b74SChuanhong Guo 		goto disable_clk;
1445764f1b74SChuanhong Guo 	}
1446764f1b74SChuanhong Guo 
1447764f1b74SChuanhong Guo 	ret = dma_set_mask(ms->dev, DMA_BIT_MASK(32));
1448764f1b74SChuanhong Guo 	if (ret) {
1449764f1b74SChuanhong Guo 		dev_err(ms->dev, "failed to set dma mask\n");
1450764f1b74SChuanhong Guo 		goto disable_clk;
1451764f1b74SChuanhong Guo 	}
1452764f1b74SChuanhong Guo 
1453764f1b74SChuanhong Guo 	// switch to SNFI mode
1454764f1b74SChuanhong Guo 	nfi_write32(ms, SNF_CFG, SPI_MODE);
1455764f1b74SChuanhong Guo 
14561d36c990SXiangsheng Hou 	ret = of_property_read_u32(np, "rx-sample-delay-ns", &val);
14571d36c990SXiangsheng Hou 	if (!ret)
14581d36c990SXiangsheng Hou 		nfi_rmw32(ms, SNF_DLY_CTL3, SFCK_SAM_DLY,
14591d36c990SXiangsheng Hou 			  val * SFCK_SAM_DLY_RANGE / SFCK_SAM_DLY_TOTAL);
14601d36c990SXiangsheng Hou 
14611d36c990SXiangsheng Hou 	ret = of_property_read_u32(np, "mediatek,rx-latch-latency-ns", &val);
14621d36c990SXiangsheng Hou 	if (!ret) {
14631d36c990SXiangsheng Hou 		spi_freq = clk_get_rate(ms->pad_clk);
14641d36c990SXiangsheng Hou 		val = DIV_ROUND_CLOSEST(val, NSEC_PER_SEC / spi_freq);
14651d36c990SXiangsheng Hou 		nfi_rmw32(ms, SNF_MISC_CTL, DATA_READ_LATCH_LAT,
14661d36c990SXiangsheng Hou 			  val << DATA_READ_LATCH_LAT_S);
14671d36c990SXiangsheng Hou 	}
14681d36c990SXiangsheng Hou 
1469764f1b74SChuanhong Guo 	// setup an initial page format for ops matching page_cache_op template
1470764f1b74SChuanhong Guo 	// before ECC is called.
14712b1e1981SXiangsheng Hou 	ret = mtk_snand_setup_pagefmt(ms, SZ_2K, SZ_64);
1472764f1b74SChuanhong Guo 	if (ret) {
1473764f1b74SChuanhong Guo 		dev_err(ms->dev, "failed to set initial page format\n");
1474764f1b74SChuanhong Guo 		goto disable_clk;
1475764f1b74SChuanhong Guo 	}
1476764f1b74SChuanhong Guo 
1477764f1b74SChuanhong Guo 	// setup ECC engine
1478764f1b74SChuanhong Guo 	ms->ecc_eng.dev = &pdev->dev;
1479764f1b74SChuanhong Guo 	ms->ecc_eng.integration = NAND_ECC_ENGINE_INTEGRATION_PIPELINED;
1480764f1b74SChuanhong Guo 	ms->ecc_eng.ops = &mtk_snfi_ecc_engine_ops;
1481764f1b74SChuanhong Guo 	ms->ecc_eng.priv = ms;
1482764f1b74SChuanhong Guo 
1483764f1b74SChuanhong Guo 	ret = nand_ecc_register_on_host_hw_engine(&ms->ecc_eng);
1484764f1b74SChuanhong Guo 	if (ret) {
1485764f1b74SChuanhong Guo 		dev_err(&pdev->dev, "failed to register ecc engine.\n");
1486764f1b74SChuanhong Guo 		goto disable_clk;
1487764f1b74SChuanhong Guo 	}
1488764f1b74SChuanhong Guo 
1489764f1b74SChuanhong Guo 	ctlr->num_chipselect = 1;
1490764f1b74SChuanhong Guo 	ctlr->mem_ops = &mtk_snand_mem_ops;
1491764f1b74SChuanhong Guo 	ctlr->mem_caps = &mtk_snand_mem_caps;
1492764f1b74SChuanhong Guo 	ctlr->bits_per_word_mask = SPI_BPW_MASK(8);
1493764f1b74SChuanhong Guo 	ctlr->mode_bits = SPI_RX_DUAL | SPI_RX_QUAD | SPI_TX_DUAL | SPI_TX_QUAD;
1494764f1b74SChuanhong Guo 	ctlr->dev.of_node = pdev->dev.of_node;
1495764f1b74SChuanhong Guo 	ret = spi_register_controller(ctlr);
1496764f1b74SChuanhong Guo 	if (ret) {
1497764f1b74SChuanhong Guo 		dev_err(&pdev->dev, "spi_register_controller failed.\n");
1498764f1b74SChuanhong Guo 		goto disable_clk;
1499764f1b74SChuanhong Guo 	}
1500764f1b74SChuanhong Guo 
1501764f1b74SChuanhong Guo 	return 0;
1502764f1b74SChuanhong Guo disable_clk:
1503764f1b74SChuanhong Guo 	mtk_snand_disable_clk(ms);
1504764f1b74SChuanhong Guo release_ecc:
1505764f1b74SChuanhong Guo 	mtk_ecc_release(ms->ecc);
1506764f1b74SChuanhong Guo 	return ret;
1507764f1b74SChuanhong Guo }
1508764f1b74SChuanhong Guo 
1509*976a6891SUwe Kleine-König static void mtk_snand_remove(struct platform_device *pdev)
1510764f1b74SChuanhong Guo {
1511764f1b74SChuanhong Guo 	struct spi_controller *ctlr = platform_get_drvdata(pdev);
1512764f1b74SChuanhong Guo 	struct mtk_snand *ms = spi_controller_get_devdata(ctlr);
1513764f1b74SChuanhong Guo 
1514764f1b74SChuanhong Guo 	spi_unregister_controller(ctlr);
1515764f1b74SChuanhong Guo 	mtk_snand_disable_clk(ms);
1516764f1b74SChuanhong Guo 	mtk_ecc_release(ms->ecc);
1517764f1b74SChuanhong Guo 	kfree(ms->buf);
1518764f1b74SChuanhong Guo }
1519764f1b74SChuanhong Guo 
1520764f1b74SChuanhong Guo static struct platform_driver mtk_snand_driver = {
1521764f1b74SChuanhong Guo 	.probe = mtk_snand_probe,
1522*976a6891SUwe Kleine-König 	.remove_new = mtk_snand_remove,
1523764f1b74SChuanhong Guo 	.driver = {
1524764f1b74SChuanhong Guo 		.name = "mtk-snand",
1525764f1b74SChuanhong Guo 		.of_match_table = mtk_snand_ids,
1526764f1b74SChuanhong Guo 	},
1527764f1b74SChuanhong Guo };
1528764f1b74SChuanhong Guo 
1529764f1b74SChuanhong Guo module_platform_driver(mtk_snand_driver);
1530764f1b74SChuanhong Guo 
1531764f1b74SChuanhong Guo MODULE_LICENSE("GPL");
1532764f1b74SChuanhong Guo MODULE_AUTHOR("Chuanhong Guo <gch981213@gmail.com>");
1533764f1b74SChuanhong Guo MODULE_DESCRIPTION("MeidaTek SPI-NAND Flash Controller Driver");
1534