1764f1b74SChuanhong Guo // SPDX-License-Identifier: GPL-2.0
2764f1b74SChuanhong Guo //
3764f1b74SChuanhong Guo // Driver for the SPI-NAND mode of Mediatek NAND Flash Interface
4764f1b74SChuanhong Guo //
5764f1b74SChuanhong Guo // Copyright (c) 2022 Chuanhong Guo <gch981213@gmail.com>
6764f1b74SChuanhong Guo //
7764f1b74SChuanhong Guo // This driver is based on the SPI-NAND mtd driver from Mediatek SDK:
8764f1b74SChuanhong Guo //
9764f1b74SChuanhong Guo // Copyright (C) 2020 MediaTek Inc.
10764f1b74SChuanhong Guo // Author: Weijie Gao <weijie.gao@mediatek.com>
11764f1b74SChuanhong Guo //
12764f1b74SChuanhong Guo // This controller organize the page data as several interleaved sectors
13764f1b74SChuanhong Guo // like the following: (sizeof(FDM + ECC) = snf->nfi_cfg.spare_size)
14764f1b74SChuanhong Guo // +---------+------+------+---------+------+------+-----+
15764f1b74SChuanhong Guo // | Sector1 | FDM1 | ECC1 | Sector2 | FDM2 | ECC2 | ... |
16764f1b74SChuanhong Guo // +---------+------+------+---------+------+------+-----+
17764f1b74SChuanhong Guo // With auto-format turned on, DMA only returns this part:
18764f1b74SChuanhong Guo // +---------+---------+-----+
19764f1b74SChuanhong Guo // | Sector1 | Sector2 | ... |
20764f1b74SChuanhong Guo // +---------+---------+-----+
21764f1b74SChuanhong Guo // The FDM data will be filled to the registers, and ECC parity data isn't
22764f1b74SChuanhong Guo // accessible.
23764f1b74SChuanhong Guo // With auto-format off, all ((Sector+FDM+ECC)*nsectors) will be read over DMA
24764f1b74SChuanhong Guo // in it's original order shown in the first table. ECC can't be turned on when
25764f1b74SChuanhong Guo // auto-format is off.
26764f1b74SChuanhong Guo //
27764f1b74SChuanhong Guo // However, Linux SPI-NAND driver expects the data returned as:
28764f1b74SChuanhong Guo // +------+-----+
29764f1b74SChuanhong Guo // | Page | OOB |
30764f1b74SChuanhong Guo // +------+-----+
31764f1b74SChuanhong Guo // where the page data is continuously stored instead of interleaved.
32764f1b74SChuanhong Guo // So we assume all instructions matching the page_op template between ECC
33764f1b74SChuanhong Guo // prepare_io_req and finish_io_req are for page cache r/w.
34764f1b74SChuanhong Guo // Here's how this spi-mem driver operates when reading:
35764f1b74SChuanhong Guo // 1. Always set snf->autofmt = true in prepare_io_req (even when ECC is off).
36764f1b74SChuanhong Guo // 2. Perform page ops and let the controller fill the DMA bounce buffer with
37764f1b74SChuanhong Guo // de-interleaved sector data and set FDM registers.
38764f1b74SChuanhong Guo // 3. Return the data as:
39764f1b74SChuanhong Guo // +---------+---------+-----+------+------+-----+
40764f1b74SChuanhong Guo // | Sector1 | Sector2 | ... | FDM1 | FDM2 | ... |
41764f1b74SChuanhong Guo // +---------+---------+-----+------+------+-----+
42764f1b74SChuanhong Guo // 4. For other matching spi_mem ops outside a prepare/finish_io_req pair,
43764f1b74SChuanhong Guo // read the data with auto-format off into the bounce buffer and copy
44764f1b74SChuanhong Guo // needed data to the buffer specified in the request.
45764f1b74SChuanhong Guo //
46764f1b74SChuanhong Guo // Write requests operates in a similar manner.
47764f1b74SChuanhong Guo // As a limitation of this strategy, we won't be able to access any ECC parity
48764f1b74SChuanhong Guo // data at all in Linux.
49764f1b74SChuanhong Guo //
50764f1b74SChuanhong Guo // Here's the bad block mark situation on MTK chips:
51764f1b74SChuanhong Guo // In older chips like mt7622, MTK uses the first FDM byte in the first sector
52764f1b74SChuanhong Guo // as the bad block mark. After de-interleaving, this byte appears at [pagesize]
53764f1b74SChuanhong Guo // in the returned data, which is the BBM position expected by kernel. However,
54764f1b74SChuanhong Guo // the conventional bad block mark is the first byte of the OOB, which is part
55764f1b74SChuanhong Guo // of the last sector data in the interleaved layout. Instead of fixing their
56764f1b74SChuanhong Guo // hardware, MTK decided to address this inconsistency in software. On these
57764f1b74SChuanhong Guo // later chips, the BootROM expects the following:
58764f1b74SChuanhong Guo // 1. The [pagesize] byte on a nand page is used as BBM, which will appear at
59764f1b74SChuanhong Guo // (page_size - (nsectors - 1) * spare_size) in the DMA buffer.
60764f1b74SChuanhong Guo // 2. The original byte stored at that position in the DMA buffer will be stored
61764f1b74SChuanhong Guo // as the first byte of the FDM section in the last sector.
62764f1b74SChuanhong Guo // We can't disagree with the BootROM, so after de-interleaving, we need to
63764f1b74SChuanhong Guo // perform the following swaps in read:
64764f1b74SChuanhong Guo // 1. Store the BBM at [page_size - (nsectors - 1) * spare_size] to [page_size],
65764f1b74SChuanhong Guo // which is the expected BBM position by kernel.
66764f1b74SChuanhong Guo // 2. Store the page data byte at [pagesize + (nsectors-1) * fdm] back to
67764f1b74SChuanhong Guo // [page_size - (nsectors - 1) * spare_size]
68764f1b74SChuanhong Guo // Similarly, when writing, we need to perform swaps in the other direction.
69764f1b74SChuanhong Guo
70764f1b74SChuanhong Guo #include <linux/kernel.h>
71764f1b74SChuanhong Guo #include <linux/module.h>
72764f1b74SChuanhong Guo #include <linux/init.h>
73764f1b74SChuanhong Guo #include <linux/device.h>
74764f1b74SChuanhong Guo #include <linux/mutex.h>
75764f1b74SChuanhong Guo #include <linux/clk.h>
76764f1b74SChuanhong Guo #include <linux/interrupt.h>
77764f1b74SChuanhong Guo #include <linux/dma-mapping.h>
78764f1b74SChuanhong Guo #include <linux/iopoll.h>
79749396cbSRob Herring #include <linux/of.h>
80749396cbSRob Herring #include <linux/platform_device.h>
81764f1b74SChuanhong Guo #include <linux/mtd/nand-ecc-mtk.h>
82764f1b74SChuanhong Guo #include <linux/spi/spi.h>
83764f1b74SChuanhong Guo #include <linux/spi/spi-mem.h>
84764f1b74SChuanhong Guo #include <linux/mtd/nand.h>
85764f1b74SChuanhong Guo
86764f1b74SChuanhong Guo // NFI registers
87764f1b74SChuanhong Guo #define NFI_CNFG 0x000
88764f1b74SChuanhong Guo #define CNFG_OP_MODE_S 12
89764f1b74SChuanhong Guo #define CNFG_OP_MODE_CUST 6
90764f1b74SChuanhong Guo #define CNFG_OP_MODE_PROGRAM 3
91764f1b74SChuanhong Guo #define CNFG_AUTO_FMT_EN BIT(9)
92764f1b74SChuanhong Guo #define CNFG_HW_ECC_EN BIT(8)
93764f1b74SChuanhong Guo #define CNFG_DMA_BURST_EN BIT(2)
94764f1b74SChuanhong Guo #define CNFG_READ_MODE BIT(1)
95764f1b74SChuanhong Guo #define CNFG_DMA_MODE BIT(0)
96764f1b74SChuanhong Guo
97764f1b74SChuanhong Guo #define NFI_PAGEFMT 0x0004
98764f1b74SChuanhong Guo #define NFI_SPARE_SIZE_LS_S 16
99764f1b74SChuanhong Guo #define NFI_FDM_ECC_NUM_S 12
100764f1b74SChuanhong Guo #define NFI_FDM_NUM_S 8
101764f1b74SChuanhong Guo #define NFI_SPARE_SIZE_S 4
102764f1b74SChuanhong Guo #define NFI_SEC_SEL_512 BIT(2)
103764f1b74SChuanhong Guo #define NFI_PAGE_SIZE_S 0
104764f1b74SChuanhong Guo #define NFI_PAGE_SIZE_512_2K 0
105764f1b74SChuanhong Guo #define NFI_PAGE_SIZE_2K_4K 1
106764f1b74SChuanhong Guo #define NFI_PAGE_SIZE_4K_8K 2
107764f1b74SChuanhong Guo #define NFI_PAGE_SIZE_8K_16K 3
108764f1b74SChuanhong Guo
109764f1b74SChuanhong Guo #define NFI_CON 0x008
110764f1b74SChuanhong Guo #define CON_SEC_NUM_S 12
111764f1b74SChuanhong Guo #define CON_BWR BIT(9)
112764f1b74SChuanhong Guo #define CON_BRD BIT(8)
113764f1b74SChuanhong Guo #define CON_NFI_RST BIT(1)
114764f1b74SChuanhong Guo #define CON_FIFO_FLUSH BIT(0)
115764f1b74SChuanhong Guo
116764f1b74SChuanhong Guo #define NFI_INTR_EN 0x010
117764f1b74SChuanhong Guo #define NFI_INTR_STA 0x014
118764f1b74SChuanhong Guo #define NFI_IRQ_INTR_EN BIT(31)
119764f1b74SChuanhong Guo #define NFI_IRQ_CUS_READ BIT(8)
120764f1b74SChuanhong Guo #define NFI_IRQ_CUS_PG BIT(7)
121764f1b74SChuanhong Guo
122764f1b74SChuanhong Guo #define NFI_CMD 0x020
123764f1b74SChuanhong Guo #define NFI_CMD_DUMMY_READ 0x00
124764f1b74SChuanhong Guo #define NFI_CMD_DUMMY_WRITE 0x80
125764f1b74SChuanhong Guo
126764f1b74SChuanhong Guo #define NFI_STRDATA 0x040
127764f1b74SChuanhong Guo #define STR_DATA BIT(0)
128764f1b74SChuanhong Guo
129764f1b74SChuanhong Guo #define NFI_STA 0x060
1307073888cSXiangsheng Hou #define NFI_NAND_FSM_7622 GENMASK(28, 24)
1317073888cSXiangsheng Hou #define NFI_NAND_FSM_7986 GENMASK(29, 23)
132764f1b74SChuanhong Guo #define NFI_FSM GENMASK(19, 16)
133764f1b74SChuanhong Guo #define READ_EMPTY BIT(12)
134764f1b74SChuanhong Guo
135764f1b74SChuanhong Guo #define NFI_FIFOSTA 0x064
136764f1b74SChuanhong Guo #define FIFO_WR_REMAIN_S 8
137764f1b74SChuanhong Guo #define FIFO_RD_REMAIN_S 0
138764f1b74SChuanhong Guo
139764f1b74SChuanhong Guo #define NFI_ADDRCNTR 0x070
140764f1b74SChuanhong Guo #define SEC_CNTR GENMASK(16, 12)
141764f1b74SChuanhong Guo #define SEC_CNTR_S 12
142764f1b74SChuanhong Guo #define NFI_SEC_CNTR(val) (((val)&SEC_CNTR) >> SEC_CNTR_S)
143764f1b74SChuanhong Guo
144764f1b74SChuanhong Guo #define NFI_STRADDR 0x080
145764f1b74SChuanhong Guo
146764f1b74SChuanhong Guo #define NFI_BYTELEN 0x084
147764f1b74SChuanhong Guo #define BUS_SEC_CNTR(val) (((val)&SEC_CNTR) >> SEC_CNTR_S)
148764f1b74SChuanhong Guo
149764f1b74SChuanhong Guo #define NFI_FDM0L 0x0a0
150764f1b74SChuanhong Guo #define NFI_FDM0M 0x0a4
151764f1b74SChuanhong Guo #define NFI_FDML(n) (NFI_FDM0L + (n)*8)
152764f1b74SChuanhong Guo #define NFI_FDMM(n) (NFI_FDM0M + (n)*8)
153764f1b74SChuanhong Guo
154764f1b74SChuanhong Guo #define NFI_DEBUG_CON1 0x220
155764f1b74SChuanhong Guo #define WBUF_EN BIT(2)
156764f1b74SChuanhong Guo
157764f1b74SChuanhong Guo #define NFI_MASTERSTA 0x224
158764f1b74SChuanhong Guo #define MAS_ADDR GENMASK(11, 9)
159764f1b74SChuanhong Guo #define MAS_RD GENMASK(8, 6)
160764f1b74SChuanhong Guo #define MAS_WR GENMASK(5, 3)
161764f1b74SChuanhong Guo #define MAS_RDDLY GENMASK(2, 0)
162764f1b74SChuanhong Guo #define NFI_MASTERSTA_MASK_7622 (MAS_ADDR | MAS_RD | MAS_WR | MAS_RDDLY)
1637073888cSXiangsheng Hou #define NFI_MASTERSTA_MASK_7986 3
164764f1b74SChuanhong Guo
165764f1b74SChuanhong Guo // SNFI registers
166764f1b74SChuanhong Guo #define SNF_MAC_CTL 0x500
167764f1b74SChuanhong Guo #define MAC_XIO_SEL BIT(4)
168764f1b74SChuanhong Guo #define SF_MAC_EN BIT(3)
169764f1b74SChuanhong Guo #define SF_TRIG BIT(2)
170764f1b74SChuanhong Guo #define WIP_READY BIT(1)
171764f1b74SChuanhong Guo #define WIP BIT(0)
172764f1b74SChuanhong Guo
173764f1b74SChuanhong Guo #define SNF_MAC_OUTL 0x504
174764f1b74SChuanhong Guo #define SNF_MAC_INL 0x508
175764f1b74SChuanhong Guo
176764f1b74SChuanhong Guo #define SNF_RD_CTL2 0x510
177764f1b74SChuanhong Guo #define DATA_READ_DUMMY_S 8
178764f1b74SChuanhong Guo #define DATA_READ_MAX_DUMMY 0xf
179764f1b74SChuanhong Guo #define DATA_READ_CMD_S 0
180764f1b74SChuanhong Guo
181764f1b74SChuanhong Guo #define SNF_RD_CTL3 0x514
182764f1b74SChuanhong Guo
183764f1b74SChuanhong Guo #define SNF_PG_CTL1 0x524
184764f1b74SChuanhong Guo #define PG_LOAD_CMD_S 8
185764f1b74SChuanhong Guo
186764f1b74SChuanhong Guo #define SNF_PG_CTL2 0x528
187764f1b74SChuanhong Guo
188764f1b74SChuanhong Guo #define SNF_MISC_CTL 0x538
189764f1b74SChuanhong Guo #define SW_RST BIT(28)
190764f1b74SChuanhong Guo #define FIFO_RD_LTC_S 25
191764f1b74SChuanhong Guo #define PG_LOAD_X4_EN BIT(20)
192764f1b74SChuanhong Guo #define DATA_READ_MODE_S 16
193764f1b74SChuanhong Guo #define DATA_READ_MODE GENMASK(18, 16)
194764f1b74SChuanhong Guo #define DATA_READ_MODE_X1 0
195764f1b74SChuanhong Guo #define DATA_READ_MODE_X2 1
196764f1b74SChuanhong Guo #define DATA_READ_MODE_X4 2
197764f1b74SChuanhong Guo #define DATA_READ_MODE_DUAL 5
198764f1b74SChuanhong Guo #define DATA_READ_MODE_QUAD 6
1991d36c990SXiangsheng Hou #define DATA_READ_LATCH_LAT GENMASK(9, 8)
2001d36c990SXiangsheng Hou #define DATA_READ_LATCH_LAT_S 8
201764f1b74SChuanhong Guo #define PG_LOAD_CUSTOM_EN BIT(7)
202764f1b74SChuanhong Guo #define DATARD_CUSTOM_EN BIT(6)
203764f1b74SChuanhong Guo #define CS_DESELECT_CYC_S 0
204764f1b74SChuanhong Guo
205764f1b74SChuanhong Guo #define SNF_MISC_CTL2 0x53c
206764f1b74SChuanhong Guo #define PROGRAM_LOAD_BYTE_NUM_S 16
207764f1b74SChuanhong Guo #define READ_DATA_BYTE_NUM_S 11
208764f1b74SChuanhong Guo
209764f1b74SChuanhong Guo #define SNF_DLY_CTL3 0x548
210764f1b74SChuanhong Guo #define SFCK_SAM_DLY_S 0
2111d36c990SXiangsheng Hou #define SFCK_SAM_DLY GENMASK(5, 0)
2121d36c990SXiangsheng Hou #define SFCK_SAM_DLY_TOTAL 9
2131d36c990SXiangsheng Hou #define SFCK_SAM_DLY_RANGE 47
214764f1b74SChuanhong Guo
215764f1b74SChuanhong Guo #define SNF_STA_CTL1 0x550
216764f1b74SChuanhong Guo #define CUS_PG_DONE BIT(28)
217764f1b74SChuanhong Guo #define CUS_READ_DONE BIT(27)
218764f1b74SChuanhong Guo #define SPI_STATE_S 0
219764f1b74SChuanhong Guo #define SPI_STATE GENMASK(3, 0)
220764f1b74SChuanhong Guo
221764f1b74SChuanhong Guo #define SNF_CFG 0x55c
222764f1b74SChuanhong Guo #define SPI_MODE BIT(0)
223764f1b74SChuanhong Guo
224764f1b74SChuanhong Guo #define SNF_GPRAM 0x800
225764f1b74SChuanhong Guo #define SNF_GPRAM_SIZE 0xa0
226764f1b74SChuanhong Guo
227764f1b74SChuanhong Guo #define SNFI_POLL_INTERVAL 1000000
228764f1b74SChuanhong Guo
229764f1b74SChuanhong Guo static const u8 mt7622_spare_sizes[] = { 16, 26, 27, 28 };
230764f1b74SChuanhong Guo
2317073888cSXiangsheng Hou static const u8 mt7986_spare_sizes[] = {
2327073888cSXiangsheng Hou 16, 26, 27, 28, 32, 36, 40, 44, 48, 49, 50, 51, 52, 62, 61, 63, 64, 67,
2337073888cSXiangsheng Hou 74
2347073888cSXiangsheng Hou };
2357073888cSXiangsheng Hou
236764f1b74SChuanhong Guo struct mtk_snand_caps {
237764f1b74SChuanhong Guo u16 sector_size;
238764f1b74SChuanhong Guo u16 max_sectors;
239764f1b74SChuanhong Guo u16 fdm_size;
240764f1b74SChuanhong Guo u16 fdm_ecc_size;
241764f1b74SChuanhong Guo u16 fifo_size;
242764f1b74SChuanhong Guo
243764f1b74SChuanhong Guo bool bbm_swap;
244764f1b74SChuanhong Guo bool empty_page_check;
245764f1b74SChuanhong Guo u32 mastersta_mask;
2467073888cSXiangsheng Hou u32 nandfsm_mask;
247764f1b74SChuanhong Guo
248764f1b74SChuanhong Guo const u8 *spare_sizes;
249764f1b74SChuanhong Guo u32 num_spare_size;
250764f1b74SChuanhong Guo };
251764f1b74SChuanhong Guo
252764f1b74SChuanhong Guo static const struct mtk_snand_caps mt7622_snand_caps = {
253764f1b74SChuanhong Guo .sector_size = 512,
254764f1b74SChuanhong Guo .max_sectors = 8,
255764f1b74SChuanhong Guo .fdm_size = 8,
256764f1b74SChuanhong Guo .fdm_ecc_size = 1,
257764f1b74SChuanhong Guo .fifo_size = 32,
258764f1b74SChuanhong Guo .bbm_swap = false,
259764f1b74SChuanhong Guo .empty_page_check = false,
260764f1b74SChuanhong Guo .mastersta_mask = NFI_MASTERSTA_MASK_7622,
2617073888cSXiangsheng Hou .nandfsm_mask = NFI_NAND_FSM_7622,
262764f1b74SChuanhong Guo .spare_sizes = mt7622_spare_sizes,
263764f1b74SChuanhong Guo .num_spare_size = ARRAY_SIZE(mt7622_spare_sizes)
264764f1b74SChuanhong Guo };
265764f1b74SChuanhong Guo
266764f1b74SChuanhong Guo static const struct mtk_snand_caps mt7629_snand_caps = {
267764f1b74SChuanhong Guo .sector_size = 512,
268764f1b74SChuanhong Guo .max_sectors = 8,
269764f1b74SChuanhong Guo .fdm_size = 8,
270764f1b74SChuanhong Guo .fdm_ecc_size = 1,
271764f1b74SChuanhong Guo .fifo_size = 32,
272764f1b74SChuanhong Guo .bbm_swap = true,
273764f1b74SChuanhong Guo .empty_page_check = false,
274764f1b74SChuanhong Guo .mastersta_mask = NFI_MASTERSTA_MASK_7622,
2757073888cSXiangsheng Hou .nandfsm_mask = NFI_NAND_FSM_7622,
276764f1b74SChuanhong Guo .spare_sizes = mt7622_spare_sizes,
277764f1b74SChuanhong Guo .num_spare_size = ARRAY_SIZE(mt7622_spare_sizes)
278764f1b74SChuanhong Guo };
279764f1b74SChuanhong Guo
2807073888cSXiangsheng Hou static const struct mtk_snand_caps mt7986_snand_caps = {
2817073888cSXiangsheng Hou .sector_size = 1024,
2827073888cSXiangsheng Hou .max_sectors = 8,
2837073888cSXiangsheng Hou .fdm_size = 8,
2847073888cSXiangsheng Hou .fdm_ecc_size = 1,
2857073888cSXiangsheng Hou .fifo_size = 64,
2867073888cSXiangsheng Hou .bbm_swap = true,
2877073888cSXiangsheng Hou .empty_page_check = true,
2887073888cSXiangsheng Hou .mastersta_mask = NFI_MASTERSTA_MASK_7986,
2897073888cSXiangsheng Hou .nandfsm_mask = NFI_NAND_FSM_7986,
2907073888cSXiangsheng Hou .spare_sizes = mt7986_spare_sizes,
2917073888cSXiangsheng Hou .num_spare_size = ARRAY_SIZE(mt7986_spare_sizes)
2927073888cSXiangsheng Hou };
2937073888cSXiangsheng Hou
294764f1b74SChuanhong Guo struct mtk_snand_conf {
295764f1b74SChuanhong Guo size_t page_size;
296764f1b74SChuanhong Guo size_t oob_size;
297764f1b74SChuanhong Guo u8 nsectors;
298764f1b74SChuanhong Guo u8 spare_size;
299764f1b74SChuanhong Guo };
300764f1b74SChuanhong Guo
301764f1b74SChuanhong Guo struct mtk_snand {
302764f1b74SChuanhong Guo struct spi_controller *ctlr;
303764f1b74SChuanhong Guo struct device *dev;
304764f1b74SChuanhong Guo struct clk *nfi_clk;
305764f1b74SChuanhong Guo struct clk *pad_clk;
306e40fa328SXiangsheng Hou struct clk *nfi_hclk;
307764f1b74SChuanhong Guo void __iomem *nfi_base;
308764f1b74SChuanhong Guo int irq;
309764f1b74SChuanhong Guo struct completion op_done;
310764f1b74SChuanhong Guo const struct mtk_snand_caps *caps;
311764f1b74SChuanhong Guo struct mtk_ecc_config *ecc_cfg;
312764f1b74SChuanhong Guo struct mtk_ecc *ecc;
313764f1b74SChuanhong Guo struct mtk_snand_conf nfi_cfg;
314764f1b74SChuanhong Guo struct mtk_ecc_stats ecc_stats;
315764f1b74SChuanhong Guo struct nand_ecc_engine ecc_eng;
316764f1b74SChuanhong Guo bool autofmt;
317764f1b74SChuanhong Guo u8 *buf;
318764f1b74SChuanhong Guo size_t buf_len;
319764f1b74SChuanhong Guo };
320764f1b74SChuanhong Guo
nand_to_mtk_snand(struct nand_device * nand)321764f1b74SChuanhong Guo static struct mtk_snand *nand_to_mtk_snand(struct nand_device *nand)
322764f1b74SChuanhong Guo {
323764f1b74SChuanhong Guo struct nand_ecc_engine *eng = nand->ecc.engine;
324764f1b74SChuanhong Guo
325764f1b74SChuanhong Guo return container_of(eng, struct mtk_snand, ecc_eng);
326764f1b74SChuanhong Guo }
327764f1b74SChuanhong Guo
snand_prepare_bouncebuf(struct mtk_snand * snf,size_t size)328764f1b74SChuanhong Guo static inline int snand_prepare_bouncebuf(struct mtk_snand *snf, size_t size)
329764f1b74SChuanhong Guo {
330764f1b74SChuanhong Guo if (snf->buf_len >= size)
331764f1b74SChuanhong Guo return 0;
332764f1b74SChuanhong Guo kfree(snf->buf);
333764f1b74SChuanhong Guo snf->buf = kmalloc(size, GFP_KERNEL);
334764f1b74SChuanhong Guo if (!snf->buf)
335764f1b74SChuanhong Guo return -ENOMEM;
336764f1b74SChuanhong Guo snf->buf_len = size;
337764f1b74SChuanhong Guo memset(snf->buf, 0xff, snf->buf_len);
338764f1b74SChuanhong Guo return 0;
339764f1b74SChuanhong Guo }
340764f1b74SChuanhong Guo
nfi_read32(struct mtk_snand * snf,u32 reg)341764f1b74SChuanhong Guo static inline u32 nfi_read32(struct mtk_snand *snf, u32 reg)
342764f1b74SChuanhong Guo {
343764f1b74SChuanhong Guo return readl(snf->nfi_base + reg);
344764f1b74SChuanhong Guo }
345764f1b74SChuanhong Guo
nfi_write32(struct mtk_snand * snf,u32 reg,u32 val)346764f1b74SChuanhong Guo static inline void nfi_write32(struct mtk_snand *snf, u32 reg, u32 val)
347764f1b74SChuanhong Guo {
348764f1b74SChuanhong Guo writel(val, snf->nfi_base + reg);
349764f1b74SChuanhong Guo }
350764f1b74SChuanhong Guo
nfi_write16(struct mtk_snand * snf,u32 reg,u16 val)351764f1b74SChuanhong Guo static inline void nfi_write16(struct mtk_snand *snf, u32 reg, u16 val)
352764f1b74SChuanhong Guo {
353764f1b74SChuanhong Guo writew(val, snf->nfi_base + reg);
354764f1b74SChuanhong Guo }
355764f1b74SChuanhong Guo
nfi_rmw32(struct mtk_snand * snf,u32 reg,u32 clr,u32 set)356764f1b74SChuanhong Guo static inline void nfi_rmw32(struct mtk_snand *snf, u32 reg, u32 clr, u32 set)
357764f1b74SChuanhong Guo {
358764f1b74SChuanhong Guo u32 val;
359764f1b74SChuanhong Guo
360764f1b74SChuanhong Guo val = readl(snf->nfi_base + reg);
361764f1b74SChuanhong Guo val &= ~clr;
362764f1b74SChuanhong Guo val |= set;
363764f1b74SChuanhong Guo writel(val, snf->nfi_base + reg);
364764f1b74SChuanhong Guo }
365764f1b74SChuanhong Guo
nfi_read_data(struct mtk_snand * snf,u32 reg,u8 * data,u32 len)366764f1b74SChuanhong Guo static void nfi_read_data(struct mtk_snand *snf, u32 reg, u8 *data, u32 len)
367764f1b74SChuanhong Guo {
368764f1b74SChuanhong Guo u32 i, val = 0, es = sizeof(u32);
369764f1b74SChuanhong Guo
370764f1b74SChuanhong Guo for (i = reg; i < reg + len; i++) {
371764f1b74SChuanhong Guo if (i == reg || i % es == 0)
372764f1b74SChuanhong Guo val = nfi_read32(snf, i & ~(es - 1));
373764f1b74SChuanhong Guo
374764f1b74SChuanhong Guo *data++ = (u8)(val >> (8 * (i % es)));
375764f1b74SChuanhong Guo }
376764f1b74SChuanhong Guo }
377764f1b74SChuanhong Guo
mtk_nfi_reset(struct mtk_snand * snf)378764f1b74SChuanhong Guo static int mtk_nfi_reset(struct mtk_snand *snf)
379764f1b74SChuanhong Guo {
380764f1b74SChuanhong Guo u32 val, fifo_mask;
381764f1b74SChuanhong Guo int ret;
382764f1b74SChuanhong Guo
383764f1b74SChuanhong Guo nfi_write32(snf, NFI_CON, CON_FIFO_FLUSH | CON_NFI_RST);
384764f1b74SChuanhong Guo
385764f1b74SChuanhong Guo ret = readw_poll_timeout(snf->nfi_base + NFI_MASTERSTA, val,
386764f1b74SChuanhong Guo !(val & snf->caps->mastersta_mask), 0,
387764f1b74SChuanhong Guo SNFI_POLL_INTERVAL);
388764f1b74SChuanhong Guo if (ret) {
389764f1b74SChuanhong Guo dev_err(snf->dev, "NFI master is still busy after reset\n");
390764f1b74SChuanhong Guo return ret;
391764f1b74SChuanhong Guo }
392764f1b74SChuanhong Guo
393764f1b74SChuanhong Guo ret = readl_poll_timeout(snf->nfi_base + NFI_STA, val,
3947073888cSXiangsheng Hou !(val & (NFI_FSM | snf->caps->nandfsm_mask)), 0,
395764f1b74SChuanhong Guo SNFI_POLL_INTERVAL);
396764f1b74SChuanhong Guo if (ret) {
397764f1b74SChuanhong Guo dev_err(snf->dev, "Failed to reset NFI\n");
398764f1b74SChuanhong Guo return ret;
399764f1b74SChuanhong Guo }
400764f1b74SChuanhong Guo
401764f1b74SChuanhong Guo fifo_mask = ((snf->caps->fifo_size - 1) << FIFO_RD_REMAIN_S) |
402764f1b74SChuanhong Guo ((snf->caps->fifo_size - 1) << FIFO_WR_REMAIN_S);
403764f1b74SChuanhong Guo ret = readw_poll_timeout(snf->nfi_base + NFI_FIFOSTA, val,
404764f1b74SChuanhong Guo !(val & fifo_mask), 0, SNFI_POLL_INTERVAL);
405764f1b74SChuanhong Guo if (ret) {
406764f1b74SChuanhong Guo dev_err(snf->dev, "NFI FIFOs are not empty\n");
407764f1b74SChuanhong Guo return ret;
408764f1b74SChuanhong Guo }
409764f1b74SChuanhong Guo
410764f1b74SChuanhong Guo return 0;
411764f1b74SChuanhong Guo }
412764f1b74SChuanhong Guo
mtk_snand_mac_reset(struct mtk_snand * snf)413764f1b74SChuanhong Guo static int mtk_snand_mac_reset(struct mtk_snand *snf)
414764f1b74SChuanhong Guo {
415764f1b74SChuanhong Guo int ret;
416764f1b74SChuanhong Guo u32 val;
417764f1b74SChuanhong Guo
418764f1b74SChuanhong Guo nfi_rmw32(snf, SNF_MISC_CTL, 0, SW_RST);
419764f1b74SChuanhong Guo
420764f1b74SChuanhong Guo ret = readl_poll_timeout(snf->nfi_base + SNF_STA_CTL1, val,
421764f1b74SChuanhong Guo !(val & SPI_STATE), 0, SNFI_POLL_INTERVAL);
422764f1b74SChuanhong Guo if (ret)
423764f1b74SChuanhong Guo dev_err(snf->dev, "Failed to reset SNFI MAC\n");
424764f1b74SChuanhong Guo
425764f1b74SChuanhong Guo nfi_write32(snf, SNF_MISC_CTL,
426764f1b74SChuanhong Guo (2 << FIFO_RD_LTC_S) | (10 << CS_DESELECT_CYC_S));
427764f1b74SChuanhong Guo
428764f1b74SChuanhong Guo return ret;
429764f1b74SChuanhong Guo }
430764f1b74SChuanhong Guo
mtk_snand_mac_trigger(struct mtk_snand * snf,u32 outlen,u32 inlen)431764f1b74SChuanhong Guo static int mtk_snand_mac_trigger(struct mtk_snand *snf, u32 outlen, u32 inlen)
432764f1b74SChuanhong Guo {
433764f1b74SChuanhong Guo int ret;
434764f1b74SChuanhong Guo u32 val;
435764f1b74SChuanhong Guo
436764f1b74SChuanhong Guo nfi_write32(snf, SNF_MAC_CTL, SF_MAC_EN);
437764f1b74SChuanhong Guo nfi_write32(snf, SNF_MAC_OUTL, outlen);
438764f1b74SChuanhong Guo nfi_write32(snf, SNF_MAC_INL, inlen);
439764f1b74SChuanhong Guo
440764f1b74SChuanhong Guo nfi_write32(snf, SNF_MAC_CTL, SF_MAC_EN | SF_TRIG);
441764f1b74SChuanhong Guo
442764f1b74SChuanhong Guo ret = readl_poll_timeout(snf->nfi_base + SNF_MAC_CTL, val,
443764f1b74SChuanhong Guo val & WIP_READY, 0, SNFI_POLL_INTERVAL);
444764f1b74SChuanhong Guo if (ret) {
445764f1b74SChuanhong Guo dev_err(snf->dev, "Timed out waiting for WIP_READY\n");
446764f1b74SChuanhong Guo goto cleanup;
447764f1b74SChuanhong Guo }
448764f1b74SChuanhong Guo
449764f1b74SChuanhong Guo ret = readl_poll_timeout(snf->nfi_base + SNF_MAC_CTL, val, !(val & WIP),
450764f1b74SChuanhong Guo 0, SNFI_POLL_INTERVAL);
451764f1b74SChuanhong Guo if (ret)
452764f1b74SChuanhong Guo dev_err(snf->dev, "Timed out waiting for WIP cleared\n");
453764f1b74SChuanhong Guo
454764f1b74SChuanhong Guo cleanup:
455764f1b74SChuanhong Guo nfi_write32(snf, SNF_MAC_CTL, 0);
456764f1b74SChuanhong Guo
457764f1b74SChuanhong Guo return ret;
458764f1b74SChuanhong Guo }
459764f1b74SChuanhong Guo
mtk_snand_mac_io(struct mtk_snand * snf,const struct spi_mem_op * op)460764f1b74SChuanhong Guo static int mtk_snand_mac_io(struct mtk_snand *snf, const struct spi_mem_op *op)
461764f1b74SChuanhong Guo {
462764f1b74SChuanhong Guo u32 rx_len = 0;
463764f1b74SChuanhong Guo u32 reg_offs = 0;
464764f1b74SChuanhong Guo u32 val = 0;
465764f1b74SChuanhong Guo const u8 *tx_buf = NULL;
466764f1b74SChuanhong Guo u8 *rx_buf = NULL;
467764f1b74SChuanhong Guo int i, ret;
468764f1b74SChuanhong Guo u8 b;
469764f1b74SChuanhong Guo
470764f1b74SChuanhong Guo if (op->data.dir == SPI_MEM_DATA_IN) {
471764f1b74SChuanhong Guo rx_len = op->data.nbytes;
472764f1b74SChuanhong Guo rx_buf = op->data.buf.in;
473764f1b74SChuanhong Guo } else {
474764f1b74SChuanhong Guo tx_buf = op->data.buf.out;
475764f1b74SChuanhong Guo }
476764f1b74SChuanhong Guo
477764f1b74SChuanhong Guo mtk_snand_mac_reset(snf);
478764f1b74SChuanhong Guo
479764f1b74SChuanhong Guo for (i = 0; i < op->cmd.nbytes; i++, reg_offs++) {
480764f1b74SChuanhong Guo b = (op->cmd.opcode >> ((op->cmd.nbytes - i - 1) * 8)) & 0xff;
481764f1b74SChuanhong Guo val |= b << (8 * (reg_offs % 4));
482764f1b74SChuanhong Guo if (reg_offs % 4 == 3) {
483764f1b74SChuanhong Guo nfi_write32(snf, SNF_GPRAM + reg_offs - 3, val);
484764f1b74SChuanhong Guo val = 0;
485764f1b74SChuanhong Guo }
486764f1b74SChuanhong Guo }
487764f1b74SChuanhong Guo
488764f1b74SChuanhong Guo for (i = 0; i < op->addr.nbytes; i++, reg_offs++) {
489764f1b74SChuanhong Guo b = (op->addr.val >> ((op->addr.nbytes - i - 1) * 8)) & 0xff;
490764f1b74SChuanhong Guo val |= b << (8 * (reg_offs % 4));
491764f1b74SChuanhong Guo if (reg_offs % 4 == 3) {
492764f1b74SChuanhong Guo nfi_write32(snf, SNF_GPRAM + reg_offs - 3, val);
493764f1b74SChuanhong Guo val = 0;
494764f1b74SChuanhong Guo }
495764f1b74SChuanhong Guo }
496764f1b74SChuanhong Guo
497764f1b74SChuanhong Guo for (i = 0; i < op->dummy.nbytes; i++, reg_offs++) {
498764f1b74SChuanhong Guo if (reg_offs % 4 == 3) {
499764f1b74SChuanhong Guo nfi_write32(snf, SNF_GPRAM + reg_offs - 3, val);
500764f1b74SChuanhong Guo val = 0;
501764f1b74SChuanhong Guo }
502764f1b74SChuanhong Guo }
503764f1b74SChuanhong Guo
504764f1b74SChuanhong Guo if (op->data.dir == SPI_MEM_DATA_OUT) {
505764f1b74SChuanhong Guo for (i = 0; i < op->data.nbytes; i++, reg_offs++) {
506764f1b74SChuanhong Guo val |= tx_buf[i] << (8 * (reg_offs % 4));
507764f1b74SChuanhong Guo if (reg_offs % 4 == 3) {
508764f1b74SChuanhong Guo nfi_write32(snf, SNF_GPRAM + reg_offs - 3, val);
509764f1b74SChuanhong Guo val = 0;
510764f1b74SChuanhong Guo }
511764f1b74SChuanhong Guo }
512764f1b74SChuanhong Guo }
513764f1b74SChuanhong Guo
514764f1b74SChuanhong Guo if (reg_offs % 4)
515764f1b74SChuanhong Guo nfi_write32(snf, SNF_GPRAM + (reg_offs & ~3), val);
516764f1b74SChuanhong Guo
517764f1b74SChuanhong Guo for (i = 0; i < reg_offs; i += 4)
518764f1b74SChuanhong Guo dev_dbg(snf->dev, "%d: %08X", i,
519764f1b74SChuanhong Guo nfi_read32(snf, SNF_GPRAM + i));
520764f1b74SChuanhong Guo
521764f1b74SChuanhong Guo dev_dbg(snf->dev, "SNF TX: %u RX: %u", reg_offs, rx_len);
522764f1b74SChuanhong Guo
523764f1b74SChuanhong Guo ret = mtk_snand_mac_trigger(snf, reg_offs, rx_len);
524764f1b74SChuanhong Guo if (ret)
525764f1b74SChuanhong Guo return ret;
526764f1b74SChuanhong Guo
527764f1b74SChuanhong Guo if (!rx_len)
528764f1b74SChuanhong Guo return 0;
529764f1b74SChuanhong Guo
530764f1b74SChuanhong Guo nfi_read_data(snf, SNF_GPRAM + reg_offs, rx_buf, rx_len);
531764f1b74SChuanhong Guo return 0;
532764f1b74SChuanhong Guo }
533764f1b74SChuanhong Guo
mtk_snand_setup_pagefmt(struct mtk_snand * snf,u32 page_size,u32 oob_size)534764f1b74SChuanhong Guo static int mtk_snand_setup_pagefmt(struct mtk_snand *snf, u32 page_size,
535764f1b74SChuanhong Guo u32 oob_size)
536764f1b74SChuanhong Guo {
537764f1b74SChuanhong Guo int spare_idx = -1;
538764f1b74SChuanhong Guo u32 spare_size, spare_size_shift, pagesize_idx;
539764f1b74SChuanhong Guo u32 sector_size_512;
540764f1b74SChuanhong Guo u8 nsectors;
541764f1b74SChuanhong Guo int i;
542764f1b74SChuanhong Guo
543764f1b74SChuanhong Guo // skip if it's already configured as required.
544764f1b74SChuanhong Guo if (snf->nfi_cfg.page_size == page_size &&
545764f1b74SChuanhong Guo snf->nfi_cfg.oob_size == oob_size)
546764f1b74SChuanhong Guo return 0;
547764f1b74SChuanhong Guo
548764f1b74SChuanhong Guo nsectors = page_size / snf->caps->sector_size;
549764f1b74SChuanhong Guo if (nsectors > snf->caps->max_sectors) {
550764f1b74SChuanhong Guo dev_err(snf->dev, "too many sectors required.\n");
551764f1b74SChuanhong Guo goto err;
552764f1b74SChuanhong Guo }
553764f1b74SChuanhong Guo
554764f1b74SChuanhong Guo if (snf->caps->sector_size == 512) {
555764f1b74SChuanhong Guo sector_size_512 = NFI_SEC_SEL_512;
556764f1b74SChuanhong Guo spare_size_shift = NFI_SPARE_SIZE_S;
557764f1b74SChuanhong Guo } else {
558764f1b74SChuanhong Guo sector_size_512 = 0;
559764f1b74SChuanhong Guo spare_size_shift = NFI_SPARE_SIZE_LS_S;
560764f1b74SChuanhong Guo }
561764f1b74SChuanhong Guo
562764f1b74SChuanhong Guo switch (page_size) {
563764f1b74SChuanhong Guo case SZ_512:
564764f1b74SChuanhong Guo pagesize_idx = NFI_PAGE_SIZE_512_2K;
565764f1b74SChuanhong Guo break;
566764f1b74SChuanhong Guo case SZ_2K:
567764f1b74SChuanhong Guo if (snf->caps->sector_size == 512)
568764f1b74SChuanhong Guo pagesize_idx = NFI_PAGE_SIZE_2K_4K;
569764f1b74SChuanhong Guo else
570764f1b74SChuanhong Guo pagesize_idx = NFI_PAGE_SIZE_512_2K;
571764f1b74SChuanhong Guo break;
572764f1b74SChuanhong Guo case SZ_4K:
573764f1b74SChuanhong Guo if (snf->caps->sector_size == 512)
574764f1b74SChuanhong Guo pagesize_idx = NFI_PAGE_SIZE_4K_8K;
575764f1b74SChuanhong Guo else
576764f1b74SChuanhong Guo pagesize_idx = NFI_PAGE_SIZE_2K_4K;
577764f1b74SChuanhong Guo break;
578764f1b74SChuanhong Guo case SZ_8K:
579764f1b74SChuanhong Guo if (snf->caps->sector_size == 512)
580764f1b74SChuanhong Guo pagesize_idx = NFI_PAGE_SIZE_8K_16K;
581764f1b74SChuanhong Guo else
582764f1b74SChuanhong Guo pagesize_idx = NFI_PAGE_SIZE_4K_8K;
583764f1b74SChuanhong Guo break;
584764f1b74SChuanhong Guo case SZ_16K:
585764f1b74SChuanhong Guo pagesize_idx = NFI_PAGE_SIZE_8K_16K;
586764f1b74SChuanhong Guo break;
587764f1b74SChuanhong Guo default:
588764f1b74SChuanhong Guo dev_err(snf->dev, "unsupported page size.\n");
589764f1b74SChuanhong Guo goto err;
590764f1b74SChuanhong Guo }
591764f1b74SChuanhong Guo
592764f1b74SChuanhong Guo spare_size = oob_size / nsectors;
593764f1b74SChuanhong Guo // If we're using the 1KB sector size, HW will automatically double the
594764f1b74SChuanhong Guo // spare size. We should only use half of the value in this case.
595764f1b74SChuanhong Guo if (snf->caps->sector_size == 1024)
596764f1b74SChuanhong Guo spare_size /= 2;
597764f1b74SChuanhong Guo
598764f1b74SChuanhong Guo for (i = snf->caps->num_spare_size - 1; i >= 0; i--) {
599764f1b74SChuanhong Guo if (snf->caps->spare_sizes[i] <= spare_size) {
600764f1b74SChuanhong Guo spare_size = snf->caps->spare_sizes[i];
601764f1b74SChuanhong Guo if (snf->caps->sector_size == 1024)
602764f1b74SChuanhong Guo spare_size *= 2;
603764f1b74SChuanhong Guo spare_idx = i;
604764f1b74SChuanhong Guo break;
605764f1b74SChuanhong Guo }
606764f1b74SChuanhong Guo }
607764f1b74SChuanhong Guo
608764f1b74SChuanhong Guo if (spare_idx < 0) {
609764f1b74SChuanhong Guo dev_err(snf->dev, "unsupported spare size: %u\n", spare_size);
610764f1b74SChuanhong Guo goto err;
611764f1b74SChuanhong Guo }
612764f1b74SChuanhong Guo
613764f1b74SChuanhong Guo nfi_write32(snf, NFI_PAGEFMT,
614764f1b74SChuanhong Guo (snf->caps->fdm_ecc_size << NFI_FDM_ECC_NUM_S) |
615764f1b74SChuanhong Guo (snf->caps->fdm_size << NFI_FDM_NUM_S) |
616764f1b74SChuanhong Guo (spare_idx << spare_size_shift) |
617764f1b74SChuanhong Guo (pagesize_idx << NFI_PAGE_SIZE_S) |
618764f1b74SChuanhong Guo sector_size_512);
619764f1b74SChuanhong Guo
620764f1b74SChuanhong Guo snf->nfi_cfg.page_size = page_size;
621764f1b74SChuanhong Guo snf->nfi_cfg.oob_size = oob_size;
622764f1b74SChuanhong Guo snf->nfi_cfg.nsectors = nsectors;
623764f1b74SChuanhong Guo snf->nfi_cfg.spare_size = spare_size;
624764f1b74SChuanhong Guo
625764f1b74SChuanhong Guo dev_dbg(snf->dev, "page format: (%u + %u) * %u\n",
626764f1b74SChuanhong Guo snf->caps->sector_size, spare_size, nsectors);
627764f1b74SChuanhong Guo return snand_prepare_bouncebuf(snf, page_size + oob_size);
628764f1b74SChuanhong Guo err:
629764f1b74SChuanhong Guo dev_err(snf->dev, "page size %u + %u is not supported\n", page_size,
630764f1b74SChuanhong Guo oob_size);
631764f1b74SChuanhong Guo return -EOPNOTSUPP;
632764f1b74SChuanhong Guo }
633764f1b74SChuanhong Guo
mtk_snand_ooblayout_ecc(struct mtd_info * mtd,int section,struct mtd_oob_region * oobecc)634764f1b74SChuanhong Guo static int mtk_snand_ooblayout_ecc(struct mtd_info *mtd, int section,
635764f1b74SChuanhong Guo struct mtd_oob_region *oobecc)
636764f1b74SChuanhong Guo {
637764f1b74SChuanhong Guo // ECC area is not accessible
638764f1b74SChuanhong Guo return -ERANGE;
639764f1b74SChuanhong Guo }
640764f1b74SChuanhong Guo
mtk_snand_ooblayout_free(struct mtd_info * mtd,int section,struct mtd_oob_region * oobfree)641764f1b74SChuanhong Guo static int mtk_snand_ooblayout_free(struct mtd_info *mtd, int section,
642764f1b74SChuanhong Guo struct mtd_oob_region *oobfree)
643764f1b74SChuanhong Guo {
644764f1b74SChuanhong Guo struct nand_device *nand = mtd_to_nanddev(mtd);
645764f1b74SChuanhong Guo struct mtk_snand *ms = nand_to_mtk_snand(nand);
646764f1b74SChuanhong Guo
647764f1b74SChuanhong Guo if (section >= ms->nfi_cfg.nsectors)
648764f1b74SChuanhong Guo return -ERANGE;
649764f1b74SChuanhong Guo
650764f1b74SChuanhong Guo oobfree->length = ms->caps->fdm_size - 1;
651764f1b74SChuanhong Guo oobfree->offset = section * ms->caps->fdm_size + 1;
652764f1b74SChuanhong Guo return 0;
653764f1b74SChuanhong Guo }
654764f1b74SChuanhong Guo
655764f1b74SChuanhong Guo static const struct mtd_ooblayout_ops mtk_snand_ooblayout = {
656764f1b74SChuanhong Guo .ecc = mtk_snand_ooblayout_ecc,
657764f1b74SChuanhong Guo .free = mtk_snand_ooblayout_free,
658764f1b74SChuanhong Guo };
659764f1b74SChuanhong Guo
mtk_snand_ecc_init_ctx(struct nand_device * nand)660764f1b74SChuanhong Guo static int mtk_snand_ecc_init_ctx(struct nand_device *nand)
661764f1b74SChuanhong Guo {
662764f1b74SChuanhong Guo struct mtk_snand *snf = nand_to_mtk_snand(nand);
663764f1b74SChuanhong Guo struct nand_ecc_props *conf = &nand->ecc.ctx.conf;
664764f1b74SChuanhong Guo struct nand_ecc_props *reqs = &nand->ecc.requirements;
665764f1b74SChuanhong Guo struct nand_ecc_props *user = &nand->ecc.user_conf;
666764f1b74SChuanhong Guo struct mtd_info *mtd = nanddev_to_mtd(nand);
667764f1b74SChuanhong Guo int step_size = 0, strength = 0, desired_correction = 0, steps;
668764f1b74SChuanhong Guo bool ecc_user = false;
669764f1b74SChuanhong Guo int ret;
670764f1b74SChuanhong Guo u32 parity_bits, max_ecc_bytes;
671764f1b74SChuanhong Guo struct mtk_ecc_config *ecc_cfg;
672764f1b74SChuanhong Guo
673764f1b74SChuanhong Guo ret = mtk_snand_setup_pagefmt(snf, nand->memorg.pagesize,
674764f1b74SChuanhong Guo nand->memorg.oobsize);
675764f1b74SChuanhong Guo if (ret)
676764f1b74SChuanhong Guo return ret;
677764f1b74SChuanhong Guo
678764f1b74SChuanhong Guo ecc_cfg = kzalloc(sizeof(*ecc_cfg), GFP_KERNEL);
679764f1b74SChuanhong Guo if (!ecc_cfg)
680764f1b74SChuanhong Guo return -ENOMEM;
681764f1b74SChuanhong Guo
682764f1b74SChuanhong Guo nand->ecc.ctx.priv = ecc_cfg;
683764f1b74SChuanhong Guo
684764f1b74SChuanhong Guo if (user->step_size && user->strength) {
685764f1b74SChuanhong Guo step_size = user->step_size;
686764f1b74SChuanhong Guo strength = user->strength;
687764f1b74SChuanhong Guo ecc_user = true;
688764f1b74SChuanhong Guo } else if (reqs->step_size && reqs->strength) {
689764f1b74SChuanhong Guo step_size = reqs->step_size;
690764f1b74SChuanhong Guo strength = reqs->strength;
691764f1b74SChuanhong Guo }
692764f1b74SChuanhong Guo
693764f1b74SChuanhong Guo if (step_size && strength) {
694764f1b74SChuanhong Guo steps = mtd->writesize / step_size;
695764f1b74SChuanhong Guo desired_correction = steps * strength;
696764f1b74SChuanhong Guo strength = desired_correction / snf->nfi_cfg.nsectors;
697764f1b74SChuanhong Guo }
698764f1b74SChuanhong Guo
699764f1b74SChuanhong Guo ecc_cfg->mode = ECC_NFI_MODE;
700764f1b74SChuanhong Guo ecc_cfg->sectors = snf->nfi_cfg.nsectors;
701764f1b74SChuanhong Guo ecc_cfg->len = snf->caps->sector_size + snf->caps->fdm_ecc_size;
702764f1b74SChuanhong Guo
703764f1b74SChuanhong Guo // calculate the max possible strength under current page format
704764f1b74SChuanhong Guo parity_bits = mtk_ecc_get_parity_bits(snf->ecc);
705764f1b74SChuanhong Guo max_ecc_bytes = snf->nfi_cfg.spare_size - snf->caps->fdm_size;
706764f1b74SChuanhong Guo ecc_cfg->strength = max_ecc_bytes * 8 / parity_bits;
707764f1b74SChuanhong Guo mtk_ecc_adjust_strength(snf->ecc, &ecc_cfg->strength);
708764f1b74SChuanhong Guo
709764f1b74SChuanhong Guo // if there's a user requested strength, find the minimum strength that
710764f1b74SChuanhong Guo // meets the requirement. Otherwise use the maximum strength which is
711764f1b74SChuanhong Guo // expected by BootROM.
712764f1b74SChuanhong Guo if (ecc_user && strength) {
713764f1b74SChuanhong Guo u32 s_next = ecc_cfg->strength - 1;
714764f1b74SChuanhong Guo
715764f1b74SChuanhong Guo while (1) {
716764f1b74SChuanhong Guo mtk_ecc_adjust_strength(snf->ecc, &s_next);
717764f1b74SChuanhong Guo if (s_next >= ecc_cfg->strength)
718764f1b74SChuanhong Guo break;
719764f1b74SChuanhong Guo if (s_next < strength)
720764f1b74SChuanhong Guo break;
721764f1b74SChuanhong Guo s_next = ecc_cfg->strength - 1;
722764f1b74SChuanhong Guo }
723764f1b74SChuanhong Guo }
724764f1b74SChuanhong Guo
725764f1b74SChuanhong Guo mtd_set_ooblayout(mtd, &mtk_snand_ooblayout);
726764f1b74SChuanhong Guo
727764f1b74SChuanhong Guo conf->step_size = snf->caps->sector_size;
728764f1b74SChuanhong Guo conf->strength = ecc_cfg->strength;
729764f1b74SChuanhong Guo
730764f1b74SChuanhong Guo if (ecc_cfg->strength < strength)
731764f1b74SChuanhong Guo dev_warn(snf->dev, "unable to fulfill ECC of %u bits.\n",
732764f1b74SChuanhong Guo strength);
733764f1b74SChuanhong Guo dev_info(snf->dev, "ECC strength: %u bits per %u bytes\n",
734764f1b74SChuanhong Guo ecc_cfg->strength, snf->caps->sector_size);
735764f1b74SChuanhong Guo
736764f1b74SChuanhong Guo return 0;
737764f1b74SChuanhong Guo }
738764f1b74SChuanhong Guo
mtk_snand_ecc_cleanup_ctx(struct nand_device * nand)739764f1b74SChuanhong Guo static void mtk_snand_ecc_cleanup_ctx(struct nand_device *nand)
740764f1b74SChuanhong Guo {
741764f1b74SChuanhong Guo struct mtk_ecc_config *ecc_cfg = nand_to_ecc_ctx(nand);
742764f1b74SChuanhong Guo
743764f1b74SChuanhong Guo kfree(ecc_cfg);
744764f1b74SChuanhong Guo }
745764f1b74SChuanhong Guo
mtk_snand_ecc_prepare_io_req(struct nand_device * nand,struct nand_page_io_req * req)746764f1b74SChuanhong Guo static int mtk_snand_ecc_prepare_io_req(struct nand_device *nand,
747764f1b74SChuanhong Guo struct nand_page_io_req *req)
748764f1b74SChuanhong Guo {
749764f1b74SChuanhong Guo struct mtk_snand *snf = nand_to_mtk_snand(nand);
750764f1b74SChuanhong Guo struct mtk_ecc_config *ecc_cfg = nand_to_ecc_ctx(nand);
751764f1b74SChuanhong Guo int ret;
752764f1b74SChuanhong Guo
753764f1b74SChuanhong Guo ret = mtk_snand_setup_pagefmt(snf, nand->memorg.pagesize,
754764f1b74SChuanhong Guo nand->memorg.oobsize);
755764f1b74SChuanhong Guo if (ret)
756764f1b74SChuanhong Guo return ret;
757764f1b74SChuanhong Guo snf->autofmt = true;
758764f1b74SChuanhong Guo snf->ecc_cfg = ecc_cfg;
759764f1b74SChuanhong Guo return 0;
760764f1b74SChuanhong Guo }
761764f1b74SChuanhong Guo
mtk_snand_ecc_finish_io_req(struct nand_device * nand,struct nand_page_io_req * req)762764f1b74SChuanhong Guo static int mtk_snand_ecc_finish_io_req(struct nand_device *nand,
763764f1b74SChuanhong Guo struct nand_page_io_req *req)
764764f1b74SChuanhong Guo {
765764f1b74SChuanhong Guo struct mtk_snand *snf = nand_to_mtk_snand(nand);
766764f1b74SChuanhong Guo struct mtd_info *mtd = nanddev_to_mtd(nand);
767764f1b74SChuanhong Guo
768764f1b74SChuanhong Guo snf->ecc_cfg = NULL;
769764f1b74SChuanhong Guo snf->autofmt = false;
770764f1b74SChuanhong Guo if ((req->mode == MTD_OPS_RAW) || (req->type != NAND_PAGE_READ))
771764f1b74SChuanhong Guo return 0;
772764f1b74SChuanhong Guo
773764f1b74SChuanhong Guo if (snf->ecc_stats.failed)
774764f1b74SChuanhong Guo mtd->ecc_stats.failed += snf->ecc_stats.failed;
775764f1b74SChuanhong Guo mtd->ecc_stats.corrected += snf->ecc_stats.corrected;
776764f1b74SChuanhong Guo return snf->ecc_stats.failed ? -EBADMSG : snf->ecc_stats.bitflips;
777764f1b74SChuanhong Guo }
778764f1b74SChuanhong Guo
779764f1b74SChuanhong Guo static struct nand_ecc_engine_ops mtk_snfi_ecc_engine_ops = {
780764f1b74SChuanhong Guo .init_ctx = mtk_snand_ecc_init_ctx,
781764f1b74SChuanhong Guo .cleanup_ctx = mtk_snand_ecc_cleanup_ctx,
782764f1b74SChuanhong Guo .prepare_io_req = mtk_snand_ecc_prepare_io_req,
783764f1b74SChuanhong Guo .finish_io_req = mtk_snand_ecc_finish_io_req,
784764f1b74SChuanhong Guo };
785764f1b74SChuanhong Guo
mtk_snand_read_fdm(struct mtk_snand * snf,u8 * buf)786764f1b74SChuanhong Guo static void mtk_snand_read_fdm(struct mtk_snand *snf, u8 *buf)
787764f1b74SChuanhong Guo {
788764f1b74SChuanhong Guo u32 vall, valm;
789764f1b74SChuanhong Guo u8 *oobptr = buf;
790764f1b74SChuanhong Guo int i, j;
791764f1b74SChuanhong Guo
792764f1b74SChuanhong Guo for (i = 0; i < snf->nfi_cfg.nsectors; i++) {
793764f1b74SChuanhong Guo vall = nfi_read32(snf, NFI_FDML(i));
794764f1b74SChuanhong Guo valm = nfi_read32(snf, NFI_FDMM(i));
795764f1b74SChuanhong Guo
796764f1b74SChuanhong Guo for (j = 0; j < snf->caps->fdm_size; j++)
797764f1b74SChuanhong Guo oobptr[j] = (j >= 4 ? valm : vall) >> ((j % 4) * 8);
798764f1b74SChuanhong Guo
799764f1b74SChuanhong Guo oobptr += snf->caps->fdm_size;
800764f1b74SChuanhong Guo }
801764f1b74SChuanhong Guo }
802764f1b74SChuanhong Guo
mtk_snand_write_fdm(struct mtk_snand * snf,const u8 * buf)803764f1b74SChuanhong Guo static void mtk_snand_write_fdm(struct mtk_snand *snf, const u8 *buf)
804764f1b74SChuanhong Guo {
805764f1b74SChuanhong Guo u32 fdm_size = snf->caps->fdm_size;
806764f1b74SChuanhong Guo const u8 *oobptr = buf;
807764f1b74SChuanhong Guo u32 vall, valm;
808764f1b74SChuanhong Guo int i, j;
809764f1b74SChuanhong Guo
810764f1b74SChuanhong Guo for (i = 0; i < snf->nfi_cfg.nsectors; i++) {
811764f1b74SChuanhong Guo vall = 0;
812764f1b74SChuanhong Guo valm = 0;
813764f1b74SChuanhong Guo
814764f1b74SChuanhong Guo for (j = 0; j < 8; j++) {
815764f1b74SChuanhong Guo if (j < 4)
816764f1b74SChuanhong Guo vall |= (j < fdm_size ? oobptr[j] : 0xff)
817764f1b74SChuanhong Guo << (j * 8);
818764f1b74SChuanhong Guo else
819764f1b74SChuanhong Guo valm |= (j < fdm_size ? oobptr[j] : 0xff)
820764f1b74SChuanhong Guo << ((j - 4) * 8);
821764f1b74SChuanhong Guo }
822764f1b74SChuanhong Guo
823764f1b74SChuanhong Guo nfi_write32(snf, NFI_FDML(i), vall);
824764f1b74SChuanhong Guo nfi_write32(snf, NFI_FDMM(i), valm);
825764f1b74SChuanhong Guo
826764f1b74SChuanhong Guo oobptr += fdm_size;
827764f1b74SChuanhong Guo }
828764f1b74SChuanhong Guo }
829764f1b74SChuanhong Guo
mtk_snand_bm_swap(struct mtk_snand * snf,u8 * buf)830764f1b74SChuanhong Guo static void mtk_snand_bm_swap(struct mtk_snand *snf, u8 *buf)
831764f1b74SChuanhong Guo {
832764f1b74SChuanhong Guo u32 buf_bbm_pos, fdm_bbm_pos;
833764f1b74SChuanhong Guo
834764f1b74SChuanhong Guo if (!snf->caps->bbm_swap || snf->nfi_cfg.nsectors == 1)
835764f1b74SChuanhong Guo return;
836764f1b74SChuanhong Guo
837764f1b74SChuanhong Guo // swap [pagesize] byte on nand with the first fdm byte
838764f1b74SChuanhong Guo // in the last sector.
839764f1b74SChuanhong Guo buf_bbm_pos = snf->nfi_cfg.page_size -
840764f1b74SChuanhong Guo (snf->nfi_cfg.nsectors - 1) * snf->nfi_cfg.spare_size;
841764f1b74SChuanhong Guo fdm_bbm_pos = snf->nfi_cfg.page_size +
842764f1b74SChuanhong Guo (snf->nfi_cfg.nsectors - 1) * snf->caps->fdm_size;
843764f1b74SChuanhong Guo
844764f1b74SChuanhong Guo swap(snf->buf[fdm_bbm_pos], buf[buf_bbm_pos]);
845764f1b74SChuanhong Guo }
846764f1b74SChuanhong Guo
mtk_snand_fdm_bm_swap(struct mtk_snand * snf)847764f1b74SChuanhong Guo static void mtk_snand_fdm_bm_swap(struct mtk_snand *snf)
848764f1b74SChuanhong Guo {
849764f1b74SChuanhong Guo u32 fdm_bbm_pos1, fdm_bbm_pos2;
850764f1b74SChuanhong Guo
851764f1b74SChuanhong Guo if (!snf->caps->bbm_swap || snf->nfi_cfg.nsectors == 1)
852764f1b74SChuanhong Guo return;
853764f1b74SChuanhong Guo
854764f1b74SChuanhong Guo // swap the first fdm byte in the first and the last sector.
855764f1b74SChuanhong Guo fdm_bbm_pos1 = snf->nfi_cfg.page_size;
856764f1b74SChuanhong Guo fdm_bbm_pos2 = snf->nfi_cfg.page_size +
857764f1b74SChuanhong Guo (snf->nfi_cfg.nsectors - 1) * snf->caps->fdm_size;
858764f1b74SChuanhong Guo swap(snf->buf[fdm_bbm_pos1], snf->buf[fdm_bbm_pos2]);
859764f1b74SChuanhong Guo }
860764f1b74SChuanhong Guo
mtk_snand_read_page_cache(struct mtk_snand * snf,const struct spi_mem_op * op)861764f1b74SChuanhong Guo static int mtk_snand_read_page_cache(struct mtk_snand *snf,
862764f1b74SChuanhong Guo const struct spi_mem_op *op)
863764f1b74SChuanhong Guo {
864764f1b74SChuanhong Guo u8 *buf = snf->buf;
865764f1b74SChuanhong Guo u8 *buf_fdm = buf + snf->nfi_cfg.page_size;
866764f1b74SChuanhong Guo // the address part to be sent by the controller
867764f1b74SChuanhong Guo u32 op_addr = op->addr.val;
868764f1b74SChuanhong Guo // where to start copying data from bounce buffer
869764f1b74SChuanhong Guo u32 rd_offset = 0;
870764f1b74SChuanhong Guo u32 dummy_clk = (op->dummy.nbytes * BITS_PER_BYTE / op->dummy.buswidth);
871764f1b74SChuanhong Guo u32 op_mode = 0;
872764f1b74SChuanhong Guo u32 dma_len = snf->buf_len;
873764f1b74SChuanhong Guo int ret = 0;
874764f1b74SChuanhong Guo u32 rd_mode, rd_bytes, val;
875764f1b74SChuanhong Guo dma_addr_t buf_dma;
876764f1b74SChuanhong Guo
877764f1b74SChuanhong Guo if (snf->autofmt) {
878764f1b74SChuanhong Guo u32 last_bit;
879764f1b74SChuanhong Guo u32 mask;
880764f1b74SChuanhong Guo
881764f1b74SChuanhong Guo dma_len = snf->nfi_cfg.page_size;
882764f1b74SChuanhong Guo op_mode = CNFG_AUTO_FMT_EN;
883764f1b74SChuanhong Guo if (op->data.ecc)
884764f1b74SChuanhong Guo op_mode |= CNFG_HW_ECC_EN;
885764f1b74SChuanhong Guo // extract the plane bit:
886764f1b74SChuanhong Guo // Find the highest bit set in (pagesize+oobsize).
887764f1b74SChuanhong Guo // Bits higher than that in op->addr are kept and sent over SPI
888764f1b74SChuanhong Guo // Lower bits are used as an offset for copying data from DMA
889764f1b74SChuanhong Guo // bounce buffer.
890764f1b74SChuanhong Guo last_bit = fls(snf->nfi_cfg.page_size + snf->nfi_cfg.oob_size);
891764f1b74SChuanhong Guo mask = (1 << last_bit) - 1;
892764f1b74SChuanhong Guo rd_offset = op_addr & mask;
893764f1b74SChuanhong Guo op_addr &= ~mask;
894764f1b74SChuanhong Guo
895764f1b74SChuanhong Guo // check if we can dma to the caller memory
896764f1b74SChuanhong Guo if (rd_offset == 0 && op->data.nbytes >= snf->nfi_cfg.page_size)
897764f1b74SChuanhong Guo buf = op->data.buf.in;
898764f1b74SChuanhong Guo }
899764f1b74SChuanhong Guo mtk_snand_mac_reset(snf);
900764f1b74SChuanhong Guo mtk_nfi_reset(snf);
901764f1b74SChuanhong Guo
902764f1b74SChuanhong Guo // command and dummy cycles
903764f1b74SChuanhong Guo nfi_write32(snf, SNF_RD_CTL2,
904764f1b74SChuanhong Guo (dummy_clk << DATA_READ_DUMMY_S) |
905764f1b74SChuanhong Guo (op->cmd.opcode << DATA_READ_CMD_S));
906764f1b74SChuanhong Guo
907764f1b74SChuanhong Guo // read address
908764f1b74SChuanhong Guo nfi_write32(snf, SNF_RD_CTL3, op_addr);
909764f1b74SChuanhong Guo
910764f1b74SChuanhong Guo // Set read op_mode
911764f1b74SChuanhong Guo if (op->data.buswidth == 4)
912764f1b74SChuanhong Guo rd_mode = op->addr.buswidth == 4 ? DATA_READ_MODE_QUAD :
913764f1b74SChuanhong Guo DATA_READ_MODE_X4;
914764f1b74SChuanhong Guo else if (op->data.buswidth == 2)
915764f1b74SChuanhong Guo rd_mode = op->addr.buswidth == 2 ? DATA_READ_MODE_DUAL :
916764f1b74SChuanhong Guo DATA_READ_MODE_X2;
917764f1b74SChuanhong Guo else
918764f1b74SChuanhong Guo rd_mode = DATA_READ_MODE_X1;
919764f1b74SChuanhong Guo rd_mode <<= DATA_READ_MODE_S;
920764f1b74SChuanhong Guo nfi_rmw32(snf, SNF_MISC_CTL, DATA_READ_MODE,
921764f1b74SChuanhong Guo rd_mode | DATARD_CUSTOM_EN);
922764f1b74SChuanhong Guo
923764f1b74SChuanhong Guo // Set bytes to read
924764f1b74SChuanhong Guo rd_bytes = (snf->nfi_cfg.spare_size + snf->caps->sector_size) *
925764f1b74SChuanhong Guo snf->nfi_cfg.nsectors;
926764f1b74SChuanhong Guo nfi_write32(snf, SNF_MISC_CTL2,
927764f1b74SChuanhong Guo (rd_bytes << PROGRAM_LOAD_BYTE_NUM_S) | rd_bytes);
928764f1b74SChuanhong Guo
929764f1b74SChuanhong Guo // NFI read prepare
930764f1b74SChuanhong Guo nfi_write16(snf, NFI_CNFG,
931764f1b74SChuanhong Guo (CNFG_OP_MODE_CUST << CNFG_OP_MODE_S) | CNFG_DMA_BURST_EN |
932764f1b74SChuanhong Guo CNFG_READ_MODE | CNFG_DMA_MODE | op_mode);
933764f1b74SChuanhong Guo
934764f1b74SChuanhong Guo nfi_write32(snf, NFI_CON, (snf->nfi_cfg.nsectors << CON_SEC_NUM_S));
935764f1b74SChuanhong Guo
936764f1b74SChuanhong Guo buf_dma = dma_map_single(snf->dev, buf, dma_len, DMA_FROM_DEVICE);
93773c1a515SDan Carpenter ret = dma_mapping_error(snf->dev, buf_dma);
93873c1a515SDan Carpenter if (ret) {
939764f1b74SChuanhong Guo dev_err(snf->dev, "DMA mapping failed.\n");
940764f1b74SChuanhong Guo goto cleanup;
941764f1b74SChuanhong Guo }
942764f1b74SChuanhong Guo nfi_write32(snf, NFI_STRADDR, buf_dma);
943764f1b74SChuanhong Guo if (op->data.ecc) {
944764f1b74SChuanhong Guo snf->ecc_cfg->op = ECC_DECODE;
945764f1b74SChuanhong Guo ret = mtk_ecc_enable(snf->ecc, snf->ecc_cfg);
946764f1b74SChuanhong Guo if (ret)
947764f1b74SChuanhong Guo goto cleanup_dma;
948764f1b74SChuanhong Guo }
949764f1b74SChuanhong Guo // Prepare for custom read interrupt
950764f1b74SChuanhong Guo nfi_write32(snf, NFI_INTR_EN, NFI_IRQ_INTR_EN | NFI_IRQ_CUS_READ);
951764f1b74SChuanhong Guo reinit_completion(&snf->op_done);
952764f1b74SChuanhong Guo
953764f1b74SChuanhong Guo // Trigger NFI into custom mode
954764f1b74SChuanhong Guo nfi_write16(snf, NFI_CMD, NFI_CMD_DUMMY_READ);
955764f1b74SChuanhong Guo
956764f1b74SChuanhong Guo // Start DMA read
957764f1b74SChuanhong Guo nfi_rmw32(snf, NFI_CON, 0, CON_BRD);
958764f1b74SChuanhong Guo nfi_write16(snf, NFI_STRDATA, STR_DATA);
959764f1b74SChuanhong Guo
960764f1b74SChuanhong Guo if (!wait_for_completion_timeout(
961764f1b74SChuanhong Guo &snf->op_done, usecs_to_jiffies(SNFI_POLL_INTERVAL))) {
962764f1b74SChuanhong Guo dev_err(snf->dev, "DMA timed out for reading from cache.\n");
963764f1b74SChuanhong Guo ret = -ETIMEDOUT;
964764f1b74SChuanhong Guo goto cleanup;
965764f1b74SChuanhong Guo }
966764f1b74SChuanhong Guo
967764f1b74SChuanhong Guo // Wait for BUS_SEC_CNTR returning expected value
968764f1b74SChuanhong Guo ret = readl_poll_timeout(snf->nfi_base + NFI_BYTELEN, val,
969764f1b74SChuanhong Guo BUS_SEC_CNTR(val) >= snf->nfi_cfg.nsectors, 0,
970764f1b74SChuanhong Guo SNFI_POLL_INTERVAL);
971764f1b74SChuanhong Guo if (ret) {
972764f1b74SChuanhong Guo dev_err(snf->dev, "Timed out waiting for BUS_SEC_CNTR\n");
973764f1b74SChuanhong Guo goto cleanup2;
974764f1b74SChuanhong Guo }
975764f1b74SChuanhong Guo
976764f1b74SChuanhong Guo // Wait for bus becoming idle
977764f1b74SChuanhong Guo ret = readl_poll_timeout(snf->nfi_base + NFI_MASTERSTA, val,
978764f1b74SChuanhong Guo !(val & snf->caps->mastersta_mask), 0,
979764f1b74SChuanhong Guo SNFI_POLL_INTERVAL);
980764f1b74SChuanhong Guo if (ret) {
981764f1b74SChuanhong Guo dev_err(snf->dev, "Timed out waiting for bus becoming idle\n");
982764f1b74SChuanhong Guo goto cleanup2;
983764f1b74SChuanhong Guo }
984764f1b74SChuanhong Guo
985764f1b74SChuanhong Guo if (op->data.ecc) {
986764f1b74SChuanhong Guo ret = mtk_ecc_wait_done(snf->ecc, ECC_DECODE);
987764f1b74SChuanhong Guo if (ret) {
988764f1b74SChuanhong Guo dev_err(snf->dev, "wait ecc done timeout\n");
989764f1b74SChuanhong Guo goto cleanup2;
990764f1b74SChuanhong Guo }
991764f1b74SChuanhong Guo // save status before disabling ecc
992764f1b74SChuanhong Guo mtk_ecc_get_stats(snf->ecc, &snf->ecc_stats,
993764f1b74SChuanhong Guo snf->nfi_cfg.nsectors);
994764f1b74SChuanhong Guo }
995764f1b74SChuanhong Guo
996764f1b74SChuanhong Guo dma_unmap_single(snf->dev, buf_dma, dma_len, DMA_FROM_DEVICE);
997764f1b74SChuanhong Guo
998764f1b74SChuanhong Guo if (snf->autofmt) {
999764f1b74SChuanhong Guo mtk_snand_read_fdm(snf, buf_fdm);
1000764f1b74SChuanhong Guo if (snf->caps->bbm_swap) {
1001764f1b74SChuanhong Guo mtk_snand_bm_swap(snf, buf);
1002764f1b74SChuanhong Guo mtk_snand_fdm_bm_swap(snf);
1003764f1b74SChuanhong Guo }
1004764f1b74SChuanhong Guo }
1005764f1b74SChuanhong Guo
1006764f1b74SChuanhong Guo // copy data back
1007764f1b74SChuanhong Guo if (nfi_read32(snf, NFI_STA) & READ_EMPTY) {
1008764f1b74SChuanhong Guo memset(op->data.buf.in, 0xff, op->data.nbytes);
1009764f1b74SChuanhong Guo snf->ecc_stats.bitflips = 0;
1010764f1b74SChuanhong Guo snf->ecc_stats.failed = 0;
1011764f1b74SChuanhong Guo snf->ecc_stats.corrected = 0;
1012764f1b74SChuanhong Guo } else {
1013764f1b74SChuanhong Guo if (buf == op->data.buf.in) {
1014764f1b74SChuanhong Guo u32 cap_len = snf->buf_len - snf->nfi_cfg.page_size;
1015764f1b74SChuanhong Guo u32 req_left = op->data.nbytes - snf->nfi_cfg.page_size;
1016764f1b74SChuanhong Guo
1017764f1b74SChuanhong Guo if (req_left)
1018764f1b74SChuanhong Guo memcpy(op->data.buf.in + snf->nfi_cfg.page_size,
1019764f1b74SChuanhong Guo buf_fdm,
1020764f1b74SChuanhong Guo cap_len < req_left ? cap_len : req_left);
1021764f1b74SChuanhong Guo } else if (rd_offset < snf->buf_len) {
1022764f1b74SChuanhong Guo u32 cap_len = snf->buf_len - rd_offset;
1023764f1b74SChuanhong Guo
1024764f1b74SChuanhong Guo if (op->data.nbytes < cap_len)
1025764f1b74SChuanhong Guo cap_len = op->data.nbytes;
1026764f1b74SChuanhong Guo memcpy(op->data.buf.in, snf->buf + rd_offset, cap_len);
1027764f1b74SChuanhong Guo }
1028764f1b74SChuanhong Guo }
1029764f1b74SChuanhong Guo cleanup2:
1030764f1b74SChuanhong Guo if (op->data.ecc)
1031764f1b74SChuanhong Guo mtk_ecc_disable(snf->ecc);
1032764f1b74SChuanhong Guo cleanup_dma:
1033764f1b74SChuanhong Guo // unmap dma only if any error happens. (otherwise it's done before
1034764f1b74SChuanhong Guo // data copying)
1035764f1b74SChuanhong Guo if (ret)
1036764f1b74SChuanhong Guo dma_unmap_single(snf->dev, buf_dma, dma_len, DMA_FROM_DEVICE);
1037764f1b74SChuanhong Guo cleanup:
1038764f1b74SChuanhong Guo // Stop read
1039764f1b74SChuanhong Guo nfi_write32(snf, NFI_CON, 0);
1040764f1b74SChuanhong Guo nfi_write16(snf, NFI_CNFG, 0);
1041764f1b74SChuanhong Guo
1042764f1b74SChuanhong Guo // Clear SNF done flag
1043764f1b74SChuanhong Guo nfi_rmw32(snf, SNF_STA_CTL1, 0, CUS_READ_DONE);
1044764f1b74SChuanhong Guo nfi_write32(snf, SNF_STA_CTL1, 0);
1045764f1b74SChuanhong Guo
1046764f1b74SChuanhong Guo // Disable interrupt
1047764f1b74SChuanhong Guo nfi_read32(snf, NFI_INTR_STA);
1048764f1b74SChuanhong Guo nfi_write32(snf, NFI_INTR_EN, 0);
1049764f1b74SChuanhong Guo
1050764f1b74SChuanhong Guo nfi_rmw32(snf, SNF_MISC_CTL, DATARD_CUSTOM_EN, 0);
1051764f1b74SChuanhong Guo return ret;
1052764f1b74SChuanhong Guo }
1053764f1b74SChuanhong Guo
mtk_snand_write_page_cache(struct mtk_snand * snf,const struct spi_mem_op * op)1054764f1b74SChuanhong Guo static int mtk_snand_write_page_cache(struct mtk_snand *snf,
1055764f1b74SChuanhong Guo const struct spi_mem_op *op)
1056764f1b74SChuanhong Guo {
1057764f1b74SChuanhong Guo // the address part to be sent by the controller
1058764f1b74SChuanhong Guo u32 op_addr = op->addr.val;
1059764f1b74SChuanhong Guo // where to start copying data from bounce buffer
1060764f1b74SChuanhong Guo u32 wr_offset = 0;
1061764f1b74SChuanhong Guo u32 op_mode = 0;
1062764f1b74SChuanhong Guo int ret = 0;
1063764f1b74SChuanhong Guo u32 wr_mode = 0;
1064764f1b74SChuanhong Guo u32 dma_len = snf->buf_len;
1065764f1b74SChuanhong Guo u32 wr_bytes, val;
1066764f1b74SChuanhong Guo size_t cap_len;
1067764f1b74SChuanhong Guo dma_addr_t buf_dma;
1068764f1b74SChuanhong Guo
1069764f1b74SChuanhong Guo if (snf->autofmt) {
1070764f1b74SChuanhong Guo u32 last_bit;
1071764f1b74SChuanhong Guo u32 mask;
1072764f1b74SChuanhong Guo
1073764f1b74SChuanhong Guo dma_len = snf->nfi_cfg.page_size;
1074764f1b74SChuanhong Guo op_mode = CNFG_AUTO_FMT_EN;
1075764f1b74SChuanhong Guo if (op->data.ecc)
1076764f1b74SChuanhong Guo op_mode |= CNFG_HW_ECC_EN;
1077764f1b74SChuanhong Guo
1078764f1b74SChuanhong Guo last_bit = fls(snf->nfi_cfg.page_size + snf->nfi_cfg.oob_size);
1079764f1b74SChuanhong Guo mask = (1 << last_bit) - 1;
1080764f1b74SChuanhong Guo wr_offset = op_addr & mask;
1081764f1b74SChuanhong Guo op_addr &= ~mask;
1082764f1b74SChuanhong Guo }
1083764f1b74SChuanhong Guo mtk_snand_mac_reset(snf);
1084764f1b74SChuanhong Guo mtk_nfi_reset(snf);
1085764f1b74SChuanhong Guo
1086764f1b74SChuanhong Guo if (wr_offset)
1087764f1b74SChuanhong Guo memset(snf->buf, 0xff, wr_offset);
1088764f1b74SChuanhong Guo
1089764f1b74SChuanhong Guo cap_len = snf->buf_len - wr_offset;
1090764f1b74SChuanhong Guo if (op->data.nbytes < cap_len)
1091764f1b74SChuanhong Guo cap_len = op->data.nbytes;
1092764f1b74SChuanhong Guo memcpy(snf->buf + wr_offset, op->data.buf.out, cap_len);
1093764f1b74SChuanhong Guo if (snf->autofmt) {
1094764f1b74SChuanhong Guo if (snf->caps->bbm_swap) {
1095764f1b74SChuanhong Guo mtk_snand_fdm_bm_swap(snf);
1096764f1b74SChuanhong Guo mtk_snand_bm_swap(snf, snf->buf);
1097764f1b74SChuanhong Guo }
1098764f1b74SChuanhong Guo mtk_snand_write_fdm(snf, snf->buf + snf->nfi_cfg.page_size);
1099764f1b74SChuanhong Guo }
1100764f1b74SChuanhong Guo
1101764f1b74SChuanhong Guo // Command
1102764f1b74SChuanhong Guo nfi_write32(snf, SNF_PG_CTL1, (op->cmd.opcode << PG_LOAD_CMD_S));
1103764f1b74SChuanhong Guo
1104764f1b74SChuanhong Guo // write address
1105764f1b74SChuanhong Guo nfi_write32(snf, SNF_PG_CTL2, op_addr);
1106764f1b74SChuanhong Guo
1107764f1b74SChuanhong Guo // Set read op_mode
1108764f1b74SChuanhong Guo if (op->data.buswidth == 4)
1109764f1b74SChuanhong Guo wr_mode = PG_LOAD_X4_EN;
1110764f1b74SChuanhong Guo
1111764f1b74SChuanhong Guo nfi_rmw32(snf, SNF_MISC_CTL, PG_LOAD_X4_EN,
1112764f1b74SChuanhong Guo wr_mode | PG_LOAD_CUSTOM_EN);
1113764f1b74SChuanhong Guo
1114764f1b74SChuanhong Guo // Set bytes to write
1115764f1b74SChuanhong Guo wr_bytes = (snf->nfi_cfg.spare_size + snf->caps->sector_size) *
1116764f1b74SChuanhong Guo snf->nfi_cfg.nsectors;
1117764f1b74SChuanhong Guo nfi_write32(snf, SNF_MISC_CTL2,
1118764f1b74SChuanhong Guo (wr_bytes << PROGRAM_LOAD_BYTE_NUM_S) | wr_bytes);
1119764f1b74SChuanhong Guo
1120764f1b74SChuanhong Guo // NFI write prepare
1121764f1b74SChuanhong Guo nfi_write16(snf, NFI_CNFG,
1122764f1b74SChuanhong Guo (CNFG_OP_MODE_PROGRAM << CNFG_OP_MODE_S) |
1123764f1b74SChuanhong Guo CNFG_DMA_BURST_EN | CNFG_DMA_MODE | op_mode);
1124764f1b74SChuanhong Guo
1125764f1b74SChuanhong Guo nfi_write32(snf, NFI_CON, (snf->nfi_cfg.nsectors << CON_SEC_NUM_S));
1126764f1b74SChuanhong Guo buf_dma = dma_map_single(snf->dev, snf->buf, dma_len, DMA_TO_DEVICE);
112773c1a515SDan Carpenter ret = dma_mapping_error(snf->dev, buf_dma);
112873c1a515SDan Carpenter if (ret) {
1129764f1b74SChuanhong Guo dev_err(snf->dev, "DMA mapping failed.\n");
1130764f1b74SChuanhong Guo goto cleanup;
1131764f1b74SChuanhong Guo }
1132764f1b74SChuanhong Guo nfi_write32(snf, NFI_STRADDR, buf_dma);
1133764f1b74SChuanhong Guo if (op->data.ecc) {
1134764f1b74SChuanhong Guo snf->ecc_cfg->op = ECC_ENCODE;
1135764f1b74SChuanhong Guo ret = mtk_ecc_enable(snf->ecc, snf->ecc_cfg);
1136764f1b74SChuanhong Guo if (ret)
1137764f1b74SChuanhong Guo goto cleanup_dma;
1138764f1b74SChuanhong Guo }
1139764f1b74SChuanhong Guo // Prepare for custom write interrupt
1140764f1b74SChuanhong Guo nfi_write32(snf, NFI_INTR_EN, NFI_IRQ_INTR_EN | NFI_IRQ_CUS_PG);
1141764f1b74SChuanhong Guo reinit_completion(&snf->op_done);
1142764f1b74SChuanhong Guo ;
1143764f1b74SChuanhong Guo
1144764f1b74SChuanhong Guo // Trigger NFI into custom mode
1145764f1b74SChuanhong Guo nfi_write16(snf, NFI_CMD, NFI_CMD_DUMMY_WRITE);
1146764f1b74SChuanhong Guo
1147764f1b74SChuanhong Guo // Start DMA write
1148764f1b74SChuanhong Guo nfi_rmw32(snf, NFI_CON, 0, CON_BWR);
1149764f1b74SChuanhong Guo nfi_write16(snf, NFI_STRDATA, STR_DATA);
1150764f1b74SChuanhong Guo
1151764f1b74SChuanhong Guo if (!wait_for_completion_timeout(
1152764f1b74SChuanhong Guo &snf->op_done, usecs_to_jiffies(SNFI_POLL_INTERVAL))) {
1153764f1b74SChuanhong Guo dev_err(snf->dev, "DMA timed out for program load.\n");
1154764f1b74SChuanhong Guo ret = -ETIMEDOUT;
1155764f1b74SChuanhong Guo goto cleanup_ecc;
1156764f1b74SChuanhong Guo }
1157764f1b74SChuanhong Guo
1158764f1b74SChuanhong Guo // Wait for NFI_SEC_CNTR returning expected value
1159764f1b74SChuanhong Guo ret = readl_poll_timeout(snf->nfi_base + NFI_ADDRCNTR, val,
1160764f1b74SChuanhong Guo NFI_SEC_CNTR(val) >= snf->nfi_cfg.nsectors, 0,
1161764f1b74SChuanhong Guo SNFI_POLL_INTERVAL);
1162764f1b74SChuanhong Guo if (ret)
1163764f1b74SChuanhong Guo dev_err(snf->dev, "Timed out waiting for NFI_SEC_CNTR\n");
1164764f1b74SChuanhong Guo
1165764f1b74SChuanhong Guo cleanup_ecc:
1166764f1b74SChuanhong Guo if (op->data.ecc)
1167764f1b74SChuanhong Guo mtk_ecc_disable(snf->ecc);
1168764f1b74SChuanhong Guo cleanup_dma:
1169764f1b74SChuanhong Guo dma_unmap_single(snf->dev, buf_dma, dma_len, DMA_TO_DEVICE);
1170764f1b74SChuanhong Guo cleanup:
1171764f1b74SChuanhong Guo // Stop write
1172764f1b74SChuanhong Guo nfi_write32(snf, NFI_CON, 0);
1173764f1b74SChuanhong Guo nfi_write16(snf, NFI_CNFG, 0);
1174764f1b74SChuanhong Guo
1175764f1b74SChuanhong Guo // Clear SNF done flag
1176764f1b74SChuanhong Guo nfi_rmw32(snf, SNF_STA_CTL1, 0, CUS_PG_DONE);
1177764f1b74SChuanhong Guo nfi_write32(snf, SNF_STA_CTL1, 0);
1178764f1b74SChuanhong Guo
1179764f1b74SChuanhong Guo // Disable interrupt
1180764f1b74SChuanhong Guo nfi_read32(snf, NFI_INTR_STA);
1181764f1b74SChuanhong Guo nfi_write32(snf, NFI_INTR_EN, 0);
1182764f1b74SChuanhong Guo
1183764f1b74SChuanhong Guo nfi_rmw32(snf, SNF_MISC_CTL, PG_LOAD_CUSTOM_EN, 0);
1184764f1b74SChuanhong Guo
1185764f1b74SChuanhong Guo return ret;
1186764f1b74SChuanhong Guo }
1187764f1b74SChuanhong Guo
1188764f1b74SChuanhong Guo /**
1189764f1b74SChuanhong Guo * mtk_snand_is_page_ops() - check if the op is a controller supported page op.
1190*f2b5b820SBartosz Golaszewski * @op: spi-mem op to check
1191764f1b74SChuanhong Guo *
1192764f1b74SChuanhong Guo * Check whether op can be executed with read_from_cache or program_load
1193764f1b74SChuanhong Guo * mode in the controller.
1194764f1b74SChuanhong Guo * This controller can execute typical Read From Cache and Program Load
1195764f1b74SChuanhong Guo * instructions found on SPI-NAND with 2-byte address.
1196764f1b74SChuanhong Guo * DTR and cmd buswidth & nbytes should be checked before calling this.
1197764f1b74SChuanhong Guo *
1198764f1b74SChuanhong Guo * Return: true if the op matches the instruction template
1199764f1b74SChuanhong Guo */
mtk_snand_is_page_ops(const struct spi_mem_op * op)1200764f1b74SChuanhong Guo static bool mtk_snand_is_page_ops(const struct spi_mem_op *op)
1201764f1b74SChuanhong Guo {
1202764f1b74SChuanhong Guo if (op->addr.nbytes != 2)
1203764f1b74SChuanhong Guo return false;
1204764f1b74SChuanhong Guo
1205764f1b74SChuanhong Guo if (op->addr.buswidth != 1 && op->addr.buswidth != 2 &&
1206764f1b74SChuanhong Guo op->addr.buswidth != 4)
1207764f1b74SChuanhong Guo return false;
1208764f1b74SChuanhong Guo
1209764f1b74SChuanhong Guo // match read from page instructions
1210764f1b74SChuanhong Guo if (op->data.dir == SPI_MEM_DATA_IN) {
1211764f1b74SChuanhong Guo // check dummy cycle first
1212764f1b74SChuanhong Guo if (op->dummy.nbytes * BITS_PER_BYTE / op->dummy.buswidth >
1213764f1b74SChuanhong Guo DATA_READ_MAX_DUMMY)
1214764f1b74SChuanhong Guo return false;
1215764f1b74SChuanhong Guo // quad io / quad out
1216764f1b74SChuanhong Guo if ((op->addr.buswidth == 4 || op->addr.buswidth == 1) &&
1217764f1b74SChuanhong Guo op->data.buswidth == 4)
1218764f1b74SChuanhong Guo return true;
1219764f1b74SChuanhong Guo
1220764f1b74SChuanhong Guo // dual io / dual out
1221764f1b74SChuanhong Guo if ((op->addr.buswidth == 2 || op->addr.buswidth == 1) &&
1222764f1b74SChuanhong Guo op->data.buswidth == 2)
1223764f1b74SChuanhong Guo return true;
1224764f1b74SChuanhong Guo
1225764f1b74SChuanhong Guo // standard spi
1226764f1b74SChuanhong Guo if (op->addr.buswidth == 1 && op->data.buswidth == 1)
1227764f1b74SChuanhong Guo return true;
1228764f1b74SChuanhong Guo } else if (op->data.dir == SPI_MEM_DATA_OUT) {
1229764f1b74SChuanhong Guo // check dummy cycle first
1230764f1b74SChuanhong Guo if (op->dummy.nbytes)
1231764f1b74SChuanhong Guo return false;
1232764f1b74SChuanhong Guo // program load quad out
1233764f1b74SChuanhong Guo if (op->addr.buswidth == 1 && op->data.buswidth == 4)
1234764f1b74SChuanhong Guo return true;
1235764f1b74SChuanhong Guo // standard spi
1236764f1b74SChuanhong Guo if (op->addr.buswidth == 1 && op->data.buswidth == 1)
1237764f1b74SChuanhong Guo return true;
1238764f1b74SChuanhong Guo }
1239764f1b74SChuanhong Guo return false;
1240764f1b74SChuanhong Guo }
1241764f1b74SChuanhong Guo
mtk_snand_supports_op(struct spi_mem * mem,const struct spi_mem_op * op)1242764f1b74SChuanhong Guo static bool mtk_snand_supports_op(struct spi_mem *mem,
1243764f1b74SChuanhong Guo const struct spi_mem_op *op)
1244764f1b74SChuanhong Guo {
1245764f1b74SChuanhong Guo if (!spi_mem_default_supports_op(mem, op))
1246764f1b74SChuanhong Guo return false;
1247764f1b74SChuanhong Guo if (op->cmd.nbytes != 1 || op->cmd.buswidth != 1)
1248764f1b74SChuanhong Guo return false;
1249764f1b74SChuanhong Guo if (mtk_snand_is_page_ops(op))
1250764f1b74SChuanhong Guo return true;
1251764f1b74SChuanhong Guo return ((op->addr.nbytes == 0 || op->addr.buswidth == 1) &&
1252764f1b74SChuanhong Guo (op->dummy.nbytes == 0 || op->dummy.buswidth == 1) &&
1253764f1b74SChuanhong Guo (op->data.nbytes == 0 || op->data.buswidth == 1));
1254764f1b74SChuanhong Guo }
1255764f1b74SChuanhong Guo
mtk_snand_adjust_op_size(struct spi_mem * mem,struct spi_mem_op * op)1256764f1b74SChuanhong Guo static int mtk_snand_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op)
1257764f1b74SChuanhong Guo {
1258efdf4c99SYang Yingliang struct mtk_snand *ms = spi_controller_get_devdata(mem->spi->controller);
1259764f1b74SChuanhong Guo // page ops transfer size must be exactly ((sector_size + spare_size) *
1260764f1b74SChuanhong Guo // nsectors). Limit the op size if the caller requests more than that.
1261764f1b74SChuanhong Guo // exec_op will read more than needed and discard the leftover if the
1262764f1b74SChuanhong Guo // caller requests less data.
1263764f1b74SChuanhong Guo if (mtk_snand_is_page_ops(op)) {
1264764f1b74SChuanhong Guo size_t l;
1265764f1b74SChuanhong Guo // skip adjust_op_size for page ops
1266764f1b74SChuanhong Guo if (ms->autofmt)
1267764f1b74SChuanhong Guo return 0;
1268764f1b74SChuanhong Guo l = ms->caps->sector_size + ms->nfi_cfg.spare_size;
1269764f1b74SChuanhong Guo l *= ms->nfi_cfg.nsectors;
1270764f1b74SChuanhong Guo if (op->data.nbytes > l)
1271764f1b74SChuanhong Guo op->data.nbytes = l;
1272764f1b74SChuanhong Guo } else {
1273764f1b74SChuanhong Guo size_t hl = op->cmd.nbytes + op->addr.nbytes + op->dummy.nbytes;
1274764f1b74SChuanhong Guo
1275764f1b74SChuanhong Guo if (hl >= SNF_GPRAM_SIZE)
1276764f1b74SChuanhong Guo return -EOPNOTSUPP;
1277764f1b74SChuanhong Guo if (op->data.nbytes > SNF_GPRAM_SIZE - hl)
1278764f1b74SChuanhong Guo op->data.nbytes = SNF_GPRAM_SIZE - hl;
1279764f1b74SChuanhong Guo }
1280764f1b74SChuanhong Guo return 0;
1281764f1b74SChuanhong Guo }
1282764f1b74SChuanhong Guo
mtk_snand_exec_op(struct spi_mem * mem,const struct spi_mem_op * op)1283764f1b74SChuanhong Guo static int mtk_snand_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
1284764f1b74SChuanhong Guo {
1285efdf4c99SYang Yingliang struct mtk_snand *ms = spi_controller_get_devdata(mem->spi->controller);
1286764f1b74SChuanhong Guo
1287764f1b74SChuanhong Guo dev_dbg(ms->dev, "OP %02x ADDR %08llX@%d:%u DATA %d:%u", op->cmd.opcode,
1288764f1b74SChuanhong Guo op->addr.val, op->addr.buswidth, op->addr.nbytes,
1289764f1b74SChuanhong Guo op->data.buswidth, op->data.nbytes);
1290764f1b74SChuanhong Guo if (mtk_snand_is_page_ops(op)) {
1291764f1b74SChuanhong Guo if (op->data.dir == SPI_MEM_DATA_IN)
1292764f1b74SChuanhong Guo return mtk_snand_read_page_cache(ms, op);
1293764f1b74SChuanhong Guo else
1294764f1b74SChuanhong Guo return mtk_snand_write_page_cache(ms, op);
1295764f1b74SChuanhong Guo } else {
1296764f1b74SChuanhong Guo return mtk_snand_mac_io(ms, op);
1297764f1b74SChuanhong Guo }
1298764f1b74SChuanhong Guo }
1299764f1b74SChuanhong Guo
1300764f1b74SChuanhong Guo static const struct spi_controller_mem_ops mtk_snand_mem_ops = {
1301764f1b74SChuanhong Guo .adjust_op_size = mtk_snand_adjust_op_size,
1302764f1b74SChuanhong Guo .supports_op = mtk_snand_supports_op,
1303764f1b74SChuanhong Guo .exec_op = mtk_snand_exec_op,
1304764f1b74SChuanhong Guo };
1305764f1b74SChuanhong Guo
1306764f1b74SChuanhong Guo static const struct spi_controller_mem_caps mtk_snand_mem_caps = {
1307764f1b74SChuanhong Guo .ecc = true,
1308764f1b74SChuanhong Guo };
1309764f1b74SChuanhong Guo
mtk_snand_irq(int irq,void * id)1310764f1b74SChuanhong Guo static irqreturn_t mtk_snand_irq(int irq, void *id)
1311764f1b74SChuanhong Guo {
1312764f1b74SChuanhong Guo struct mtk_snand *snf = id;
1313764f1b74SChuanhong Guo u32 sta, ien;
1314764f1b74SChuanhong Guo
1315764f1b74SChuanhong Guo sta = nfi_read32(snf, NFI_INTR_STA);
1316764f1b74SChuanhong Guo ien = nfi_read32(snf, NFI_INTR_EN);
1317764f1b74SChuanhong Guo
1318764f1b74SChuanhong Guo if (!(sta & ien))
1319764f1b74SChuanhong Guo return IRQ_NONE;
1320764f1b74SChuanhong Guo
1321764f1b74SChuanhong Guo nfi_write32(snf, NFI_INTR_EN, 0);
1322764f1b74SChuanhong Guo complete(&snf->op_done);
1323764f1b74SChuanhong Guo return IRQ_HANDLED;
1324764f1b74SChuanhong Guo }
1325764f1b74SChuanhong Guo
1326764f1b74SChuanhong Guo static const struct of_device_id mtk_snand_ids[] = {
1327764f1b74SChuanhong Guo { .compatible = "mediatek,mt7622-snand", .data = &mt7622_snand_caps },
1328764f1b74SChuanhong Guo { .compatible = "mediatek,mt7629-snand", .data = &mt7629_snand_caps },
13297073888cSXiangsheng Hou { .compatible = "mediatek,mt7986-snand", .data = &mt7986_snand_caps },
1330764f1b74SChuanhong Guo {},
1331764f1b74SChuanhong Guo };
1332764f1b74SChuanhong Guo
1333764f1b74SChuanhong Guo MODULE_DEVICE_TABLE(of, mtk_snand_ids);
1334764f1b74SChuanhong Guo
mtk_snand_probe(struct platform_device * pdev)1335764f1b74SChuanhong Guo static int mtk_snand_probe(struct platform_device *pdev)
1336764f1b74SChuanhong Guo {
1337764f1b74SChuanhong Guo struct device_node *np = pdev->dev.of_node;
1338764f1b74SChuanhong Guo const struct of_device_id *dev_id;
1339764f1b74SChuanhong Guo struct spi_controller *ctlr;
1340764f1b74SChuanhong Guo struct mtk_snand *ms;
13411d36c990SXiangsheng Hou unsigned long spi_freq;
13421d36c990SXiangsheng Hou u32 val = 0;
1343764f1b74SChuanhong Guo int ret;
1344764f1b74SChuanhong Guo
1345764f1b74SChuanhong Guo dev_id = of_match_node(mtk_snand_ids, np);
1346764f1b74SChuanhong Guo if (!dev_id)
1347764f1b74SChuanhong Guo return -EINVAL;
1348764f1b74SChuanhong Guo
1349efdf4c99SYang Yingliang ctlr = devm_spi_alloc_host(&pdev->dev, sizeof(*ms));
1350764f1b74SChuanhong Guo if (!ctlr)
1351764f1b74SChuanhong Guo return -ENOMEM;
1352764f1b74SChuanhong Guo platform_set_drvdata(pdev, ctlr);
1353764f1b74SChuanhong Guo
1354764f1b74SChuanhong Guo ms = spi_controller_get_devdata(ctlr);
1355764f1b74SChuanhong Guo
1356764f1b74SChuanhong Guo ms->ctlr = ctlr;
1357764f1b74SChuanhong Guo ms->caps = dev_id->data;
1358764f1b74SChuanhong Guo
1359764f1b74SChuanhong Guo ms->ecc = of_mtk_ecc_get(np);
1360764f1b74SChuanhong Guo if (IS_ERR(ms->ecc))
1361764f1b74SChuanhong Guo return PTR_ERR(ms->ecc);
1362764f1b74SChuanhong Guo else if (!ms->ecc)
1363764f1b74SChuanhong Guo return -ENODEV;
1364764f1b74SChuanhong Guo
1365764f1b74SChuanhong Guo ms->nfi_base = devm_platform_ioremap_resource(pdev, 0);
1366764f1b74SChuanhong Guo if (IS_ERR(ms->nfi_base)) {
1367764f1b74SChuanhong Guo ret = PTR_ERR(ms->nfi_base);
1368764f1b74SChuanhong Guo goto release_ecc;
1369764f1b74SChuanhong Guo }
1370764f1b74SChuanhong Guo
1371764f1b74SChuanhong Guo ms->dev = &pdev->dev;
1372764f1b74SChuanhong Guo
1373a06b6935SLi Zetao ms->nfi_clk = devm_clk_get_enabled(&pdev->dev, "nfi_clk");
1374764f1b74SChuanhong Guo if (IS_ERR(ms->nfi_clk)) {
1375764f1b74SChuanhong Guo ret = PTR_ERR(ms->nfi_clk);
1376764f1b74SChuanhong Guo dev_err(&pdev->dev, "unable to get nfi_clk, err = %d\n", ret);
1377764f1b74SChuanhong Guo goto release_ecc;
1378764f1b74SChuanhong Guo }
1379764f1b74SChuanhong Guo
1380a06b6935SLi Zetao ms->pad_clk = devm_clk_get_enabled(&pdev->dev, "pad_clk");
1381764f1b74SChuanhong Guo if (IS_ERR(ms->pad_clk)) {
1382764f1b74SChuanhong Guo ret = PTR_ERR(ms->pad_clk);
1383764f1b74SChuanhong Guo dev_err(&pdev->dev, "unable to get pad_clk, err = %d\n", ret);
1384764f1b74SChuanhong Guo goto release_ecc;
1385764f1b74SChuanhong Guo }
1386764f1b74SChuanhong Guo
1387a06b6935SLi Zetao ms->nfi_hclk = devm_clk_get_optional_enabled(&pdev->dev, "nfi_hclk");
1388e40fa328SXiangsheng Hou if (IS_ERR(ms->nfi_hclk)) {
1389e40fa328SXiangsheng Hou ret = PTR_ERR(ms->nfi_hclk);
1390e40fa328SXiangsheng Hou dev_err(&pdev->dev, "unable to get nfi_hclk, err = %d\n", ret);
1391e40fa328SXiangsheng Hou goto release_ecc;
1392e40fa328SXiangsheng Hou }
1393e40fa328SXiangsheng Hou
1394764f1b74SChuanhong Guo init_completion(&ms->op_done);
1395764f1b74SChuanhong Guo
1396764f1b74SChuanhong Guo ms->irq = platform_get_irq(pdev, 0);
1397764f1b74SChuanhong Guo if (ms->irq < 0) {
1398764f1b74SChuanhong Guo ret = ms->irq;
1399a06b6935SLi Zetao goto release_ecc;
1400764f1b74SChuanhong Guo }
1401764f1b74SChuanhong Guo ret = devm_request_irq(ms->dev, ms->irq, mtk_snand_irq, 0x0,
1402764f1b74SChuanhong Guo "mtk-snand", ms);
1403764f1b74SChuanhong Guo if (ret) {
1404764f1b74SChuanhong Guo dev_err(ms->dev, "failed to request snfi irq\n");
1405a06b6935SLi Zetao goto release_ecc;
1406764f1b74SChuanhong Guo }
1407764f1b74SChuanhong Guo
1408764f1b74SChuanhong Guo ret = dma_set_mask(ms->dev, DMA_BIT_MASK(32));
1409764f1b74SChuanhong Guo if (ret) {
1410764f1b74SChuanhong Guo dev_err(ms->dev, "failed to set dma mask\n");
1411a06b6935SLi Zetao goto release_ecc;
1412764f1b74SChuanhong Guo }
1413764f1b74SChuanhong Guo
1414764f1b74SChuanhong Guo // switch to SNFI mode
1415764f1b74SChuanhong Guo nfi_write32(ms, SNF_CFG, SPI_MODE);
1416764f1b74SChuanhong Guo
14171d36c990SXiangsheng Hou ret = of_property_read_u32(np, "rx-sample-delay-ns", &val);
14181d36c990SXiangsheng Hou if (!ret)
14191d36c990SXiangsheng Hou nfi_rmw32(ms, SNF_DLY_CTL3, SFCK_SAM_DLY,
14201d36c990SXiangsheng Hou val * SFCK_SAM_DLY_RANGE / SFCK_SAM_DLY_TOTAL);
14211d36c990SXiangsheng Hou
14221d36c990SXiangsheng Hou ret = of_property_read_u32(np, "mediatek,rx-latch-latency-ns", &val);
14231d36c990SXiangsheng Hou if (!ret) {
14241d36c990SXiangsheng Hou spi_freq = clk_get_rate(ms->pad_clk);
14251d36c990SXiangsheng Hou val = DIV_ROUND_CLOSEST(val, NSEC_PER_SEC / spi_freq);
14261d36c990SXiangsheng Hou nfi_rmw32(ms, SNF_MISC_CTL, DATA_READ_LATCH_LAT,
14271d36c990SXiangsheng Hou val << DATA_READ_LATCH_LAT_S);
14281d36c990SXiangsheng Hou }
14291d36c990SXiangsheng Hou
1430764f1b74SChuanhong Guo // setup an initial page format for ops matching page_cache_op template
1431764f1b74SChuanhong Guo // before ECC is called.
14322b1e1981SXiangsheng Hou ret = mtk_snand_setup_pagefmt(ms, SZ_2K, SZ_64);
1433764f1b74SChuanhong Guo if (ret) {
1434764f1b74SChuanhong Guo dev_err(ms->dev, "failed to set initial page format\n");
1435a06b6935SLi Zetao goto release_ecc;
1436764f1b74SChuanhong Guo }
1437764f1b74SChuanhong Guo
1438764f1b74SChuanhong Guo // setup ECC engine
1439764f1b74SChuanhong Guo ms->ecc_eng.dev = &pdev->dev;
1440764f1b74SChuanhong Guo ms->ecc_eng.integration = NAND_ECC_ENGINE_INTEGRATION_PIPELINED;
1441764f1b74SChuanhong Guo ms->ecc_eng.ops = &mtk_snfi_ecc_engine_ops;
1442764f1b74SChuanhong Guo ms->ecc_eng.priv = ms;
1443764f1b74SChuanhong Guo
1444764f1b74SChuanhong Guo ret = nand_ecc_register_on_host_hw_engine(&ms->ecc_eng);
1445764f1b74SChuanhong Guo if (ret) {
1446764f1b74SChuanhong Guo dev_err(&pdev->dev, "failed to register ecc engine.\n");
1447a06b6935SLi Zetao goto release_ecc;
1448764f1b74SChuanhong Guo }
1449764f1b74SChuanhong Guo
1450764f1b74SChuanhong Guo ctlr->num_chipselect = 1;
1451764f1b74SChuanhong Guo ctlr->mem_ops = &mtk_snand_mem_ops;
1452764f1b74SChuanhong Guo ctlr->mem_caps = &mtk_snand_mem_caps;
1453764f1b74SChuanhong Guo ctlr->bits_per_word_mask = SPI_BPW_MASK(8);
1454764f1b74SChuanhong Guo ctlr->mode_bits = SPI_RX_DUAL | SPI_RX_QUAD | SPI_TX_DUAL | SPI_TX_QUAD;
1455764f1b74SChuanhong Guo ctlr->dev.of_node = pdev->dev.of_node;
1456764f1b74SChuanhong Guo ret = spi_register_controller(ctlr);
1457764f1b74SChuanhong Guo if (ret) {
1458764f1b74SChuanhong Guo dev_err(&pdev->dev, "spi_register_controller failed.\n");
1459a06b6935SLi Zetao goto release_ecc;
1460764f1b74SChuanhong Guo }
1461764f1b74SChuanhong Guo
1462764f1b74SChuanhong Guo return 0;
1463764f1b74SChuanhong Guo release_ecc:
1464764f1b74SChuanhong Guo mtk_ecc_release(ms->ecc);
1465764f1b74SChuanhong Guo return ret;
1466764f1b74SChuanhong Guo }
1467764f1b74SChuanhong Guo
mtk_snand_remove(struct platform_device * pdev)1468976a6891SUwe Kleine-König static void mtk_snand_remove(struct platform_device *pdev)
1469764f1b74SChuanhong Guo {
1470764f1b74SChuanhong Guo struct spi_controller *ctlr = platform_get_drvdata(pdev);
1471764f1b74SChuanhong Guo struct mtk_snand *ms = spi_controller_get_devdata(ctlr);
1472764f1b74SChuanhong Guo
1473764f1b74SChuanhong Guo spi_unregister_controller(ctlr);
1474764f1b74SChuanhong Guo mtk_ecc_release(ms->ecc);
1475764f1b74SChuanhong Guo kfree(ms->buf);
1476764f1b74SChuanhong Guo }
1477764f1b74SChuanhong Guo
1478764f1b74SChuanhong Guo static struct platform_driver mtk_snand_driver = {
1479764f1b74SChuanhong Guo .probe = mtk_snand_probe,
1480976a6891SUwe Kleine-König .remove_new = mtk_snand_remove,
1481764f1b74SChuanhong Guo .driver = {
1482764f1b74SChuanhong Guo .name = "mtk-snand",
1483764f1b74SChuanhong Guo .of_match_table = mtk_snand_ids,
1484764f1b74SChuanhong Guo },
1485764f1b74SChuanhong Guo };
1486764f1b74SChuanhong Guo
1487764f1b74SChuanhong Guo module_platform_driver(mtk_snand_driver);
1488764f1b74SChuanhong Guo
1489764f1b74SChuanhong Guo MODULE_LICENSE("GPL");
1490764f1b74SChuanhong Guo MODULE_AUTHOR("Chuanhong Guo <gch981213@gmail.com>");
1491764f1b74SChuanhong Guo MODULE_DESCRIPTION("MeidaTek SPI-NAND Flash Controller Driver");
1492