1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * Cadence NAND flash controller driver
4 *
5 * Copyright (C) 2019 Cadence
6 *
7 * Author: Piotr Sroka <piotrs@cadence.com>
8 */
9
10 #include <linux/bitfield.h>
11 #include <linux/clk.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/dmaengine.h>
14 #include <linux/interrupt.h>
15 #include <linux/module.h>
16 #include <linux/mtd/mtd.h>
17 #include <linux/mtd/rawnand.h>
18 #include <linux/iopoll.h>
19 #include <linux/of.h>
20 #include <linux/platform_device.h>
21 #include <linux/property.h>
22 #include <linux/slab.h>
23
24 /*
25 * HPNFC can work in 3 modes:
26 * - PIO - can work in master or slave DMA
27 * - CDMA - needs Master DMA for accessing command descriptors.
28 * - Generic mode - can use only slave DMA.
29 * CDMA and PIO modes can be used to execute only base commands.
30 * Generic mode can be used to execute any command
31 * on NAND flash memory. Driver uses CDMA mode for
32 * block erasing, page reading, page programing.
33 * Generic mode is used for executing rest of commands.
34 */
35
36 #define MAX_ADDRESS_CYC 6
37 #define MAX_ERASE_ADDRESS_CYC 3
38 #define MAX_DATA_SIZE 0xFFFC
39 #define DMA_DATA_SIZE_ALIGN 8
40
41 /* Register definition. */
42 /*
43 * Command register 0.
44 * Writing data to this register will initiate a new transaction
45 * of the NF controller.
46 */
47 #define CMD_REG0 0x0000
48 /* Command type field mask. */
49 #define CMD_REG0_CT GENMASK(31, 30)
50 /* Command type CDMA. */
51 #define CMD_REG0_CT_CDMA 0uL
52 /* Command type generic. */
53 #define CMD_REG0_CT_GEN 3uL
54 /* Command thread number field mask. */
55 #define CMD_REG0_TN GENMASK(27, 24)
56
57 /* Command register 2. */
58 #define CMD_REG2 0x0008
59 /* Command register 3. */
60 #define CMD_REG3 0x000C
61 /* Pointer register to select which thread status will be selected. */
62 #define CMD_STATUS_PTR 0x0010
63 /* Command status register for selected thread. */
64 #define CMD_STATUS 0x0014
65
66 /* Interrupt status register. */
67 #define INTR_STATUS 0x0110
68 #define INTR_STATUS_SDMA_ERR BIT(22)
69 #define INTR_STATUS_SDMA_TRIGG BIT(21)
70 #define INTR_STATUS_UNSUPP_CMD BIT(19)
71 #define INTR_STATUS_DDMA_TERR BIT(18)
72 #define INTR_STATUS_CDMA_TERR BIT(17)
73 #define INTR_STATUS_CDMA_IDL BIT(16)
74
75 /* Interrupt enable register. */
76 #define INTR_ENABLE 0x0114
77 #define INTR_ENABLE_INTR_EN BIT(31)
78 #define INTR_ENABLE_SDMA_ERR_EN BIT(22)
79 #define INTR_ENABLE_SDMA_TRIGG_EN BIT(21)
80 #define INTR_ENABLE_UNSUPP_CMD_EN BIT(19)
81 #define INTR_ENABLE_DDMA_TERR_EN BIT(18)
82 #define INTR_ENABLE_CDMA_TERR_EN BIT(17)
83 #define INTR_ENABLE_CDMA_IDLE_EN BIT(16)
84
85 /* Controller internal state. */
86 #define CTRL_STATUS 0x0118
87 #define CTRL_STATUS_INIT_COMP BIT(9)
88 #define CTRL_STATUS_CTRL_BUSY BIT(8)
89
90 /* Command Engine threads state. */
91 #define TRD_STATUS 0x0120
92
93 /* Command Engine interrupt thread error status. */
94 #define TRD_ERR_INT_STATUS 0x0128
95 /* Command Engine interrupt thread error enable. */
96 #define TRD_ERR_INT_STATUS_EN 0x0130
97 /* Command Engine interrupt thread complete status. */
98 #define TRD_COMP_INT_STATUS 0x0138
99
100 /*
101 * Transfer config 0 register.
102 * Configures data transfer parameters.
103 */
104 #define TRAN_CFG_0 0x0400
105 /* Offset value from the beginning of the page. */
106 #define TRAN_CFG_0_OFFSET GENMASK(31, 16)
107 /* Numbers of sectors to transfer within singlNF device's page. */
108 #define TRAN_CFG_0_SEC_CNT GENMASK(7, 0)
109
110 /*
111 * Transfer config 1 register.
112 * Configures data transfer parameters.
113 */
114 #define TRAN_CFG_1 0x0404
115 /* Size of last data sector. */
116 #define TRAN_CFG_1_LAST_SEC_SIZE GENMASK(31, 16)
117 /* Size of not-last data sector. */
118 #define TRAN_CFG_1_SECTOR_SIZE GENMASK(15, 0)
119
120 /* ECC engine configuration register 0. */
121 #define ECC_CONFIG_0 0x0428
122 /* Correction strength. */
123 #define ECC_CONFIG_0_CORR_STR GENMASK(10, 8)
124 /* Enable erased pages detection mechanism. */
125 #define ECC_CONFIG_0_ERASE_DET_EN BIT(1)
126 /* Enable controller ECC check bits generation and correction. */
127 #define ECC_CONFIG_0_ECC_EN BIT(0)
128
129 /* ECC engine configuration register 1. */
130 #define ECC_CONFIG_1 0x042C
131
132 /* Multiplane settings register. */
133 #define MULTIPLANE_CFG 0x0434
134 /* Cache operation settings. */
135 #define CACHE_CFG 0x0438
136
137 /* DMA settings register. */
138 #define DMA_SETINGS 0x043C
139 /* Enable SDMA error report on access unprepared slave DMA interface. */
140 #define DMA_SETINGS_SDMA_ERR_RSP BIT(17)
141
142 /* Transferred data block size for the slave DMA module. */
143 #define SDMA_SIZE 0x0440
144
145 /* Thread number associated with transferred data block
146 * for the slave DMA module.
147 */
148 #define SDMA_TRD_NUM 0x0444
149 /* Thread number mask. */
150 #define SDMA_TRD_NUM_SDMA_TRD GENMASK(2, 0)
151
152 #define CONTROL_DATA_CTRL 0x0494
153 /* Thread number mask. */
154 #define CONTROL_DATA_CTRL_SIZE GENMASK(15, 0)
155
156 #define CTRL_VERSION 0x800
157 #define CTRL_VERSION_REV GENMASK(7, 0)
158
159 /* Available hardware features of the controller. */
160 #define CTRL_FEATURES 0x804
161 /* Support for NV-DDR2/3 work mode. */
162 #define CTRL_FEATURES_NVDDR_2_3 BIT(28)
163 /* Support for NV-DDR work mode. */
164 #define CTRL_FEATURES_NVDDR BIT(27)
165 /* Support for asynchronous work mode. */
166 #define CTRL_FEATURES_ASYNC BIT(26)
167 /* Support for asynchronous work mode. */
168 #define CTRL_FEATURES_N_BANKS GENMASK(25, 24)
169 /* Slave and Master DMA data width. */
170 #define CTRL_FEATURES_DMA_DWITH64 BIT(21)
171 /* Availability of Control Data feature.*/
172 #define CTRL_FEATURES_CONTROL_DATA BIT(10)
173
174 /* BCH Engine identification register 0 - correction strengths. */
175 #define BCH_CFG_0 0x838
176 #define BCH_CFG_0_CORR_CAP_0 GENMASK(7, 0)
177 #define BCH_CFG_0_CORR_CAP_1 GENMASK(15, 8)
178 #define BCH_CFG_0_CORR_CAP_2 GENMASK(23, 16)
179 #define BCH_CFG_0_CORR_CAP_3 GENMASK(31, 24)
180
181 /* BCH Engine identification register 1 - correction strengths. */
182 #define BCH_CFG_1 0x83C
183 #define BCH_CFG_1_CORR_CAP_4 GENMASK(7, 0)
184 #define BCH_CFG_1_CORR_CAP_5 GENMASK(15, 8)
185 #define BCH_CFG_1_CORR_CAP_6 GENMASK(23, 16)
186 #define BCH_CFG_1_CORR_CAP_7 GENMASK(31, 24)
187
188 /* BCH Engine identification register 2 - sector sizes. */
189 #define BCH_CFG_2 0x840
190 #define BCH_CFG_2_SECT_0 GENMASK(15, 0)
191 #define BCH_CFG_2_SECT_1 GENMASK(31, 16)
192
193 /* BCH Engine identification register 3. */
194 #define BCH_CFG_3 0x844
195 #define BCH_CFG_3_METADATA_SIZE GENMASK(23, 16)
196
197 /* Ready/Busy# line status. */
198 #define RBN_SETINGS 0x1004
199
200 /* Common settings. */
201 #define COMMON_SET 0x1008
202 #define OPR_MODE_NVDDR BIT(0)
203 /* 16 bit device connected to the NAND Flash interface. */
204 #define COMMON_SET_DEVICE_16BIT BIT(8)
205
206 /* Skip_bytes registers. */
207 #define SKIP_BYTES_CONF 0x100C
208 #define SKIP_BYTES_MARKER_VALUE GENMASK(31, 16)
209 #define SKIP_BYTES_NUM_OF_BYTES GENMASK(7, 0)
210
211 #define SKIP_BYTES_OFFSET 0x1010
212 #define SKIP_BYTES_OFFSET_VALUE GENMASK(23, 0)
213
214 /* Timings configuration. */
215 #define TOGGLE_TIMINGS_0 0x1014
216 #define TOGGLE_TIMINGS_1 0x1018
217
218 #define ASYNC_TOGGLE_TIMINGS 0x101c
219 #define ASYNC_TOGGLE_TIMINGS_TRH GENMASK(28, 24)
220 #define ASYNC_TOGGLE_TIMINGS_TRP GENMASK(20, 16)
221 #define ASYNC_TOGGLE_TIMINGS_TWH GENMASK(12, 8)
222 #define ASYNC_TOGGLE_TIMINGS_TWP GENMASK(4, 0)
223
224 #define SYNC_TIMINGS 0x1020
225 #define SYNC_TCKWR GENMASK(21, 16)
226 #define SYNC_TWRCK GENMASK(13, 8)
227 #define SYNC_TCAD GENMASK(5, 0)
228
229 #define TIMINGS0 0x1024
230 #define TIMINGS0_TADL GENMASK(31, 24)
231 #define TIMINGS0_TCCS GENMASK(23, 16)
232 #define TIMINGS0_TWHR GENMASK(15, 8)
233 #define TIMINGS0_TRHW GENMASK(7, 0)
234
235 #define TIMINGS1 0x1028
236 #define TIMINGS1_TRHZ GENMASK(31, 24)
237 #define TIMINGS1_TWB GENMASK(23, 16)
238 #define TIMINGS1_TCWAW GENMASK(15, 8)
239 #define TIMINGS1_TVDLY GENMASK(7, 0)
240
241 #define TIMINGS2 0x102c
242 #define TIMINGS2_TFEAT GENMASK(25, 16)
243 #define TIMINGS2_CS_HOLD_TIME GENMASK(13, 8)
244 #define TIMINGS2_CS_SETUP_TIME GENMASK(5, 0)
245
246 /* Configuration of the resynchronization of slave DLL of PHY. */
247 #define DLL_PHY_CTRL 0x1034
248 #define DLL_PHY_CTRL_DLL_RST_N BIT(24)
249 #define DLL_PHY_CTRL_EXTENDED_WR_MODE BIT(17)
250 #define DLL_PHY_CTRL_EXTENDED_RD_MODE BIT(16)
251 #define DLL_PHY_CTRL_RS_HIGH_WAIT_CNT GENMASK(11, 8)
252 #define DLL_PHY_CTRL_RS_IDLE_CNT GENMASK(7, 0)
253
254 /* Register controlling DQ related timing. */
255 #define PHY_DQ_TIMING 0x2000
256 #define PHY_DQ_TIMING_OE_END GENMASK(2, 0)
257 #define PHY_DQ_TIMING_OE_START GENMASK(6, 4)
258 #define PHY_DQ_TIMING_TSEL_END GENMASK(11, 8)
259 #define PHY_DQ_TIMING_TSEL_START GENMASK(15, 12)
260
261 /* Register controlling DSQ related timing. */
262 #define PHY_DQS_TIMING 0x2004
263 #define PHY_DQS_TIMING_DQS_SEL_OE_END GENMASK(3, 0)
264 #define PHY_DQS_TIMING_DQS_SEL_OE_START GENMASK(7, 4)
265 #define PHY_DQS_TIMING_DQS_SEL_TSEL_END GENMASK(11, 8)
266 #define PHY_DQS_TIMING_PHONY_DQS_SEL BIT(16)
267 #define PHY_DQS_TIMING_USE_PHONY_DQS BIT(20)
268
269 /* Register controlling the gate and loopback control related timing. */
270 #define PHY_GATE_LPBK_CTRL 0x2008
271 #define PHY_GATE_LPBK_CTRL_GATE_CFG GENMASK(3, 0)
272 #define PHY_GATE_LPBK_CTRL_GATE_CFG_CLOSE GENMASK(5, 4)
273 #define PHY_GATE_LPBK_CTRL_RDS GENMASK(24, 19)
274
275 /* Register holds the control for the master DLL logic. */
276 #define PHY_DLL_MASTER_CTRL 0x200C
277 #define PHY_DLL_MASTER_CTRL_BYPASS_MODE BIT(23)
278
279 /* Register holds the control for the slave DLL logic. */
280 #define PHY_DLL_SLAVE_CTRL 0x2010
281
282 /* Register controls the DQS related timing. */
283 #define PHY_IE_TIMING 0x2014
284 #define PHY_IE_TIMING_DQS_IE_START GENMASK(10, 8)
285 #define PHY_IE_TIMING_DQ_IE_START GENMASK(18, 16)
286 #define PHY_IE_TIMING_IE_ALWAYS_ON BIT(20)
287
288 /* This register handles the global control settings for the PHY. */
289 #define PHY_CTRL 0x2080
290 #define PHY_CTRL_SDR_DQS BIT(14)
291 #define PHY_CTRL_PHONY_DQS GENMASK(9, 4)
292
293 /*
294 * This register handles the global control settings
295 * for the termination selects for reads.
296 */
297 #define PHY_TSEL 0x2084
298
299 /* Generic command layout. */
300 #define GCMD_LAY_CS GENMASK_ULL(11, 8)
301 /*
302 * This bit informs the minicotroller if it has to wait for tWB
303 * after sending the last CMD/ADDR/DATA in the sequence.
304 */
305 #define GCMD_LAY_TWB BIT_ULL(6)
306 /* Type of generic instruction. */
307 #define GCMD_LAY_INSTR GENMASK_ULL(5, 0)
308
309 /* Generic CMD sequence type. */
310 #define GCMD_LAY_INSTR_CMD 0
311 /* Generic ADDR sequence type. */
312 #define GCMD_LAY_INSTR_ADDR 1
313 /* Generic data transfer sequence type. */
314 #define GCMD_LAY_INSTR_DATA 2
315
316 /* Input part of generic command type of input is command. */
317 #define GCMD_LAY_INPUT_CMD GENMASK_ULL(23, 16)
318
319 /* Generic command address sequence - address fields. */
320 #define GCMD_LAY_INPUT_ADDR GENMASK_ULL(63, 16)
321 /* Generic command address sequence - address size. */
322 #define GCMD_LAY_INPUT_ADDR_SIZE GENMASK_ULL(13, 11)
323
324 /* Transfer direction field of generic command data sequence. */
325 #define GCMD_DIR BIT_ULL(11)
326 /* Read transfer direction of generic command data sequence. */
327 #define GCMD_DIR_READ 0
328 /* Write transfer direction of generic command data sequence. */
329 #define GCMD_DIR_WRITE 1
330
331 /* ECC enabled flag of generic command data sequence - ECC enabled. */
332 #define GCMD_ECC_EN BIT_ULL(12)
333 /* Generic command data sequence - sector size. */
334 #define GCMD_SECT_SIZE GENMASK_ULL(31, 16)
335 /* Generic command data sequence - sector count. */
336 #define GCMD_SECT_CNT GENMASK_ULL(39, 32)
337 /* Generic command data sequence - last sector size. */
338 #define GCMD_LAST_SIZE GENMASK_ULL(55, 40)
339
340 /* CDMA descriptor fields. */
341 /* Erase command type of CDMA descriptor. */
342 #define CDMA_CT_ERASE 0x1000
343 /* Program page command type of CDMA descriptor. */
344 #define CDMA_CT_WR 0x2100
345 /* Read page command type of CDMA descriptor. */
346 #define CDMA_CT_RD 0x2200
347
348 /* Flash pointer memory shift. */
349 #define CDMA_CFPTR_MEM_SHIFT 24
350 /* Flash pointer memory mask. */
351 #define CDMA_CFPTR_MEM GENMASK(26, 24)
352
353 /*
354 * Command DMA descriptor flags. If set causes issue interrupt after
355 * the completion of descriptor processing.
356 */
357 #define CDMA_CF_INT BIT(8)
358 /*
359 * Command DMA descriptor flags - the next descriptor
360 * address field is valid and descriptor processing should continue.
361 */
362 #define CDMA_CF_CONT BIT(9)
363 /* DMA master flag of command DMA descriptor. */
364 #define CDMA_CF_DMA_MASTER BIT(10)
365
366 /* Operation complete status of command descriptor. */
367 #define CDMA_CS_COMP BIT(15)
368 /* Operation complete status of command descriptor. */
369 /* Command descriptor status - operation fail. */
370 #define CDMA_CS_FAIL BIT(14)
371 /* Command descriptor status - page erased. */
372 #define CDMA_CS_ERP BIT(11)
373 /* Command descriptor status - timeout occurred. */
374 #define CDMA_CS_TOUT BIT(10)
375 /*
376 * Maximum amount of correction applied to one ECC sector.
377 * It is part of command descriptor status.
378 */
379 #define CDMA_CS_MAXERR GENMASK(9, 2)
380 /* Command descriptor status - uncorrectable ECC error. */
381 #define CDMA_CS_UNCE BIT(1)
382 /* Command descriptor status - descriptor error. */
383 #define CDMA_CS_ERR BIT(0)
384
385 /* Status of operation - OK. */
386 #define STAT_OK 0
387 /* Status of operation - FAIL. */
388 #define STAT_FAIL 2
389 /* Status of operation - uncorrectable ECC error. */
390 #define STAT_ECC_UNCORR 3
391 /* Status of operation - page erased. */
392 #define STAT_ERASED 5
393 /* Status of operation - correctable ECC error. */
394 #define STAT_ECC_CORR 6
395 /* Status of operation - unsuspected state. */
396 #define STAT_UNKNOWN 7
397 /* Status of operation - operation is not completed yet. */
398 #define STAT_BUSY 0xFF
399
400 #define BCH_MAX_NUM_CORR_CAPS 8
401 #define BCH_MAX_NUM_SECTOR_SIZES 2
402
403 /* NVDDR mode specific parameters and register values based on cadence specs */
404 #define NVDDR_PHY_RD_DELAY 29
405 #define NVDDR_PHY_RD_DELAY_MAX 31
406 #define NVDDR_GATE_CFG_OPT 14
407 #define NVDDR_GATE_CFG_STD 7
408 #define NVDDR_GATE_CFG_MAX 15
409 #define NVDDR_DATA_SEL_OE_START 1
410 #define NVDDR_DATA_SEL_OE_START_MAX 7
411 #define NVDDR_DATA_SEL_OE_END 6
412 #define NVDDR_DATA_SEL_OE_END_MIN 4
413 #define NVDDR_DATA_SEL_OE_END_MAX 15
414 #define NVDDR_RS_HIGH_WAIT_CNT 7
415 #define NVDDR_RS_IDLE_CNT 7
416 #define NVDDR_TCWAW_DELAY 250000
417 #define NVDDR_TVDLY_DELAY 500000
418 #define NVDDR_TOGGLE_TIMINGS_0 0x00000301
419 #define NVDDR_TOGGLE_TIMINGS_1 0x0a060102
420 #define NVDDR_ASYNC_TOGGLE_TIMINGS 0
421 #define NVDDR_PHY_CTRL 0x00004000
422 #define NVDDR_PHY_TSEL 0
423 #define NVDDR_PHY_DLL_MASTER_CTRL 0x00140004
424 #define NVDDR_PHY_DLL_SLAVE_CTRL 0x00003c3c
425
426 struct cadence_nand_timings {
427 u32 async_toggle_timings;
428 u32 sync_timings;
429 u32 timings0;
430 u32 timings1;
431 u32 timings2;
432 u32 dll_phy_ctrl;
433 u32 phy_ctrl;
434 u32 phy_dq_timing;
435 u32 phy_dqs_timing;
436 u32 phy_gate_lpbk_ctrl;
437 u32 phy_ie_timing;
438 };
439
440 /* Command DMA descriptor. */
441 struct cadence_nand_cdma_desc {
442 /* Next descriptor address. */
443 u64 next_pointer;
444
445 /* Flash address is a 32-bit address comprising of BANK and ROW ADDR. */
446 u32 flash_pointer;
447 /*field appears in HPNFC version 13*/
448 u16 bank;
449 u16 rsvd0;
450
451 /* Operation the controller needs to perform. */
452 u16 command_type;
453 u16 rsvd1;
454 /* Flags for operation of this command. */
455 u16 command_flags;
456 u16 rsvd2;
457
458 /* System/host memory address required for data DMA commands. */
459 u64 memory_pointer;
460
461 /* Status of operation. */
462 u32 status;
463 u32 rsvd3;
464
465 /* Address pointer to sync buffer location. */
466 u64 sync_flag_pointer;
467
468 /* Controls the buffer sync mechanism. */
469 u32 sync_arguments;
470 u32 rsvd4;
471
472 /* Control data pointer. */
473 u64 ctrl_data_ptr;
474 };
475
476 /* Interrupt status. */
477 struct cadence_nand_irq_status {
478 /* Thread operation complete status. */
479 u32 trd_status;
480 /* Thread operation error. */
481 u32 trd_error;
482 /* Controller status. */
483 u32 status;
484 };
485
486 /* Cadence NAND flash controller capabilities get from driver data. */
487 struct cadence_nand_dt_devdata {
488 /* Skew value of the output signals of the NAND Flash interface. */
489 u32 if_skew;
490 /* It informs if slave DMA interface is connected to DMA engine. */
491 unsigned int has_dma:1;
492 };
493
494 /* Cadence NAND flash controller capabilities read from registers. */
495 struct cdns_nand_caps {
496 /* Maximum number of banks supported by hardware. */
497 u8 max_banks;
498 /* Slave and Master DMA data width in bytes (4 or 8). */
499 u8 data_dma_width;
500 /* Control Data feature supported. */
501 bool data_control_supp;
502 /* Is PHY type DLL. */
503 bool is_phy_type_dll;
504 };
505
506 struct cdns_nand_ctrl {
507 struct device *dev;
508 struct nand_controller controller;
509 struct cadence_nand_cdma_desc *cdma_desc;
510 /* IP capability. */
511 const struct cadence_nand_dt_devdata *caps1;
512 struct cdns_nand_caps caps2;
513 u8 ctrl_rev;
514 dma_addr_t dma_cdma_desc;
515 u8 *buf;
516 u32 buf_size;
517 u8 curr_corr_str_idx;
518
519 /* Register interface. */
520 void __iomem *reg;
521
522 struct {
523 void __iomem *virt;
524 dma_addr_t dma;
525 dma_addr_t iova_dma;
526 u32 size;
527 } io;
528
529 int irq;
530 /* Interrupts that have happened. */
531 struct cadence_nand_irq_status irq_status;
532 /* Interrupts we are waiting for. */
533 struct cadence_nand_irq_status irq_mask;
534 struct completion complete;
535 /* Protect irq_mask and irq_status. */
536 spinlock_t irq_lock;
537
538 int ecc_strengths[BCH_MAX_NUM_CORR_CAPS];
539 struct nand_ecc_step_info ecc_stepinfos[BCH_MAX_NUM_SECTOR_SIZES];
540 struct nand_ecc_caps ecc_caps;
541
542 int curr_trans_type;
543
544 struct dma_chan *dmac;
545
546 u32 nf_clk_rate;
547 /*
548 * Estimated Board delay. The value includes the total
549 * round trip delay for the signals and is used for deciding on values
550 * associated with data read capture.
551 */
552 u32 board_delay;
553
554 struct nand_chip *selected_chip;
555
556 unsigned long assigned_cs;
557 struct list_head chips;
558 u8 bch_metadata_size;
559 };
560
561 struct cdns_nand_chip {
562 struct cadence_nand_timings timings;
563 struct nand_chip chip;
564 u8 nsels;
565 struct list_head node;
566
567 /*
568 * part of oob area of NAND flash memory page.
569 * This part is available for user to read or write.
570 */
571 u32 avail_oob_size;
572
573 /* Sector size. There are few sectors per mtd->writesize */
574 u32 sector_size;
575 u32 sector_count;
576
577 /* Offset of BBM. */
578 u8 bbm_offs;
579 /* Number of bytes reserved for BBM. */
580 u8 bbm_len;
581 /* ECC strength index. */
582 u8 corr_str_idx;
583
584 u8 cs[] __counted_by(nsels);
585 };
586
587 static inline struct
to_cdns_nand_chip(struct nand_chip * chip)588 cdns_nand_chip *to_cdns_nand_chip(struct nand_chip *chip)
589 {
590 return container_of(chip, struct cdns_nand_chip, chip);
591 }
592
593 static inline struct
to_cdns_nand_ctrl(struct nand_controller * controller)594 cdns_nand_ctrl *to_cdns_nand_ctrl(struct nand_controller *controller)
595 {
596 return container_of(controller, struct cdns_nand_ctrl, controller);
597 }
598
599 static bool
cadence_nand_dma_buf_ok(struct cdns_nand_ctrl * cdns_ctrl,const void * buf,u32 buf_len)600 cadence_nand_dma_buf_ok(struct cdns_nand_ctrl *cdns_ctrl, const void *buf,
601 u32 buf_len)
602 {
603 u8 data_dma_width = cdns_ctrl->caps2.data_dma_width;
604
605 return buf && virt_addr_valid(buf) &&
606 likely(IS_ALIGNED((uintptr_t)buf, data_dma_width)) &&
607 likely(IS_ALIGNED(buf_len, DMA_DATA_SIZE_ALIGN));
608 }
609
cadence_nand_wait_for_value(struct cdns_nand_ctrl * cdns_ctrl,u32 reg_offset,u32 timeout_us,u32 mask,bool is_clear)610 static int cadence_nand_wait_for_value(struct cdns_nand_ctrl *cdns_ctrl,
611 u32 reg_offset, u32 timeout_us,
612 u32 mask, bool is_clear)
613 {
614 u32 val;
615 int ret;
616
617 ret = readl_relaxed_poll_timeout(cdns_ctrl->reg + reg_offset,
618 val, !(val & mask) == is_clear,
619 10, timeout_us);
620
621 if (ret < 0) {
622 dev_err(cdns_ctrl->dev,
623 "Timeout while waiting for reg %x with mask %x is clear %d\n",
624 reg_offset, mask, is_clear);
625 }
626
627 return ret;
628 }
629
cadence_nand_set_ecc_enable(struct cdns_nand_ctrl * cdns_ctrl,bool enable)630 static int cadence_nand_set_ecc_enable(struct cdns_nand_ctrl *cdns_ctrl,
631 bool enable)
632 {
633 u32 reg;
634
635 if (cadence_nand_wait_for_value(cdns_ctrl, CTRL_STATUS,
636 1000000,
637 CTRL_STATUS_CTRL_BUSY, true))
638 return -ETIMEDOUT;
639
640 reg = readl_relaxed(cdns_ctrl->reg + ECC_CONFIG_0);
641
642 if (enable)
643 reg |= ECC_CONFIG_0_ECC_EN;
644 else
645 reg &= ~ECC_CONFIG_0_ECC_EN;
646
647 writel_relaxed(reg, cdns_ctrl->reg + ECC_CONFIG_0);
648
649 return 0;
650 }
651
cadence_nand_set_ecc_strength(struct cdns_nand_ctrl * cdns_ctrl,u8 corr_str_idx)652 static void cadence_nand_set_ecc_strength(struct cdns_nand_ctrl *cdns_ctrl,
653 u8 corr_str_idx)
654 {
655 u32 reg;
656
657 if (cdns_ctrl->curr_corr_str_idx == corr_str_idx)
658 return;
659
660 reg = readl_relaxed(cdns_ctrl->reg + ECC_CONFIG_0);
661 reg &= ~ECC_CONFIG_0_CORR_STR;
662 reg |= FIELD_PREP(ECC_CONFIG_0_CORR_STR, corr_str_idx);
663 writel_relaxed(reg, cdns_ctrl->reg + ECC_CONFIG_0);
664
665 cdns_ctrl->curr_corr_str_idx = corr_str_idx;
666 }
667
cadence_nand_get_ecc_strength_idx(struct cdns_nand_ctrl * cdns_ctrl,u8 strength)668 static int cadence_nand_get_ecc_strength_idx(struct cdns_nand_ctrl *cdns_ctrl,
669 u8 strength)
670 {
671 int i, corr_str_idx = -1;
672
673 for (i = 0; i < BCH_MAX_NUM_CORR_CAPS; i++) {
674 if (cdns_ctrl->ecc_strengths[i] == strength) {
675 corr_str_idx = i;
676 break;
677 }
678 }
679
680 return corr_str_idx;
681 }
682
cadence_nand_set_skip_marker_val(struct cdns_nand_ctrl * cdns_ctrl,u16 marker_value)683 static int cadence_nand_set_skip_marker_val(struct cdns_nand_ctrl *cdns_ctrl,
684 u16 marker_value)
685 {
686 u32 reg;
687
688 if (cadence_nand_wait_for_value(cdns_ctrl, CTRL_STATUS,
689 1000000,
690 CTRL_STATUS_CTRL_BUSY, true))
691 return -ETIMEDOUT;
692
693 reg = readl_relaxed(cdns_ctrl->reg + SKIP_BYTES_CONF);
694 reg &= ~SKIP_BYTES_MARKER_VALUE;
695 reg |= FIELD_PREP(SKIP_BYTES_MARKER_VALUE,
696 marker_value);
697
698 writel_relaxed(reg, cdns_ctrl->reg + SKIP_BYTES_CONF);
699
700 return 0;
701 }
702
cadence_nand_set_skip_bytes_conf(struct cdns_nand_ctrl * cdns_ctrl,u8 num_of_bytes,u32 offset_value,int enable)703 static int cadence_nand_set_skip_bytes_conf(struct cdns_nand_ctrl *cdns_ctrl,
704 u8 num_of_bytes,
705 u32 offset_value,
706 int enable)
707 {
708 u32 reg, skip_bytes_offset;
709
710 if (cadence_nand_wait_for_value(cdns_ctrl, CTRL_STATUS,
711 1000000,
712 CTRL_STATUS_CTRL_BUSY, true))
713 return -ETIMEDOUT;
714
715 if (!enable) {
716 num_of_bytes = 0;
717 offset_value = 0;
718 }
719
720 reg = readl_relaxed(cdns_ctrl->reg + SKIP_BYTES_CONF);
721 reg &= ~SKIP_BYTES_NUM_OF_BYTES;
722 reg |= FIELD_PREP(SKIP_BYTES_NUM_OF_BYTES,
723 num_of_bytes);
724 skip_bytes_offset = FIELD_PREP(SKIP_BYTES_OFFSET_VALUE,
725 offset_value);
726
727 writel_relaxed(reg, cdns_ctrl->reg + SKIP_BYTES_CONF);
728 writel_relaxed(skip_bytes_offset, cdns_ctrl->reg + SKIP_BYTES_OFFSET);
729
730 return 0;
731 }
732
733 /* Functions enables/disables hardware detection of erased data */
cadence_nand_set_erase_detection(struct cdns_nand_ctrl * cdns_ctrl,bool enable,u8 bitflips_threshold)734 static void cadence_nand_set_erase_detection(struct cdns_nand_ctrl *cdns_ctrl,
735 bool enable,
736 u8 bitflips_threshold)
737 {
738 u32 reg;
739
740 reg = readl_relaxed(cdns_ctrl->reg + ECC_CONFIG_0);
741
742 if (enable)
743 reg |= ECC_CONFIG_0_ERASE_DET_EN;
744 else
745 reg &= ~ECC_CONFIG_0_ERASE_DET_EN;
746
747 writel_relaxed(reg, cdns_ctrl->reg + ECC_CONFIG_0);
748 writel_relaxed(bitflips_threshold, cdns_ctrl->reg + ECC_CONFIG_1);
749 }
750
cadence_nand_set_access_width16(struct cdns_nand_ctrl * cdns_ctrl,bool bit_bus16)751 static int cadence_nand_set_access_width16(struct cdns_nand_ctrl *cdns_ctrl,
752 bool bit_bus16)
753 {
754 u32 reg;
755
756 if (cadence_nand_wait_for_value(cdns_ctrl, CTRL_STATUS,
757 1000000,
758 CTRL_STATUS_CTRL_BUSY, true))
759 return -ETIMEDOUT;
760
761 reg = readl_relaxed(cdns_ctrl->reg + COMMON_SET);
762
763 if (!bit_bus16)
764 reg &= ~COMMON_SET_DEVICE_16BIT;
765 else
766 reg |= COMMON_SET_DEVICE_16BIT;
767 writel_relaxed(reg, cdns_ctrl->reg + COMMON_SET);
768
769 return 0;
770 }
771
772 static void
cadence_nand_clear_interrupt(struct cdns_nand_ctrl * cdns_ctrl,struct cadence_nand_irq_status * irq_status)773 cadence_nand_clear_interrupt(struct cdns_nand_ctrl *cdns_ctrl,
774 struct cadence_nand_irq_status *irq_status)
775 {
776 writel_relaxed(irq_status->status, cdns_ctrl->reg + INTR_STATUS);
777 writel_relaxed(irq_status->trd_status,
778 cdns_ctrl->reg + TRD_COMP_INT_STATUS);
779 writel_relaxed(irq_status->trd_error,
780 cdns_ctrl->reg + TRD_ERR_INT_STATUS);
781 }
782
783 static void
cadence_nand_read_int_status(struct cdns_nand_ctrl * cdns_ctrl,struct cadence_nand_irq_status * irq_status)784 cadence_nand_read_int_status(struct cdns_nand_ctrl *cdns_ctrl,
785 struct cadence_nand_irq_status *irq_status)
786 {
787 irq_status->status = readl_relaxed(cdns_ctrl->reg + INTR_STATUS);
788 irq_status->trd_status = readl_relaxed(cdns_ctrl->reg
789 + TRD_COMP_INT_STATUS);
790 irq_status->trd_error = readl_relaxed(cdns_ctrl->reg
791 + TRD_ERR_INT_STATUS);
792 }
793
irq_detected(struct cdns_nand_ctrl * cdns_ctrl,struct cadence_nand_irq_status * irq_status)794 static u32 irq_detected(struct cdns_nand_ctrl *cdns_ctrl,
795 struct cadence_nand_irq_status *irq_status)
796 {
797 cadence_nand_read_int_status(cdns_ctrl, irq_status);
798
799 return irq_status->status || irq_status->trd_status ||
800 irq_status->trd_error;
801 }
802
cadence_nand_reset_irq(struct cdns_nand_ctrl * cdns_ctrl)803 static void cadence_nand_reset_irq(struct cdns_nand_ctrl *cdns_ctrl)
804 {
805 unsigned long flags;
806
807 spin_lock_irqsave(&cdns_ctrl->irq_lock, flags);
808 memset(&cdns_ctrl->irq_status, 0, sizeof(cdns_ctrl->irq_status));
809 memset(&cdns_ctrl->irq_mask, 0, sizeof(cdns_ctrl->irq_mask));
810 spin_unlock_irqrestore(&cdns_ctrl->irq_lock, flags);
811 }
812
813 /*
814 * This is the interrupt service routine. It handles all interrupts
815 * sent to this device.
816 */
cadence_nand_isr(int irq,void * dev_id)817 static irqreturn_t cadence_nand_isr(int irq, void *dev_id)
818 {
819 struct cdns_nand_ctrl *cdns_ctrl = dev_id;
820 struct cadence_nand_irq_status irq_status;
821 irqreturn_t result = IRQ_NONE;
822
823 spin_lock(&cdns_ctrl->irq_lock);
824
825 if (irq_detected(cdns_ctrl, &irq_status)) {
826 /* Handle interrupt. */
827 /* First acknowledge it. */
828 cadence_nand_clear_interrupt(cdns_ctrl, &irq_status);
829 /* Status in the device context for someone to read. */
830 cdns_ctrl->irq_status.status |= irq_status.status;
831 cdns_ctrl->irq_status.trd_status |= irq_status.trd_status;
832 cdns_ctrl->irq_status.trd_error |= irq_status.trd_error;
833 /* Notify anyone who cares that it happened. */
834 complete(&cdns_ctrl->complete);
835 /* Tell the OS that we've handled this. */
836 result = IRQ_HANDLED;
837 }
838 spin_unlock(&cdns_ctrl->irq_lock);
839
840 return result;
841 }
842
cadence_nand_set_irq_mask(struct cdns_nand_ctrl * cdns_ctrl,struct cadence_nand_irq_status * irq_mask)843 static void cadence_nand_set_irq_mask(struct cdns_nand_ctrl *cdns_ctrl,
844 struct cadence_nand_irq_status *irq_mask)
845 {
846 writel_relaxed(INTR_ENABLE_INTR_EN | irq_mask->status,
847 cdns_ctrl->reg + INTR_ENABLE);
848
849 writel_relaxed(irq_mask->trd_error,
850 cdns_ctrl->reg + TRD_ERR_INT_STATUS_EN);
851 }
852
853 static void
cadence_nand_wait_for_irq(struct cdns_nand_ctrl * cdns_ctrl,struct cadence_nand_irq_status * irq_mask,struct cadence_nand_irq_status * irq_status)854 cadence_nand_wait_for_irq(struct cdns_nand_ctrl *cdns_ctrl,
855 struct cadence_nand_irq_status *irq_mask,
856 struct cadence_nand_irq_status *irq_status)
857 {
858 unsigned long timeout = msecs_to_jiffies(10000);
859 unsigned long time_left;
860
861 time_left = wait_for_completion_timeout(&cdns_ctrl->complete,
862 timeout);
863
864 *irq_status = cdns_ctrl->irq_status;
865 if (time_left == 0) {
866 /* Timeout error. */
867 dev_err(cdns_ctrl->dev, "timeout occurred:\n");
868 dev_err(cdns_ctrl->dev, "\tstatus = 0x%x, mask = 0x%x\n",
869 irq_status->status, irq_mask->status);
870 dev_err(cdns_ctrl->dev,
871 "\ttrd_status = 0x%x, trd_status mask = 0x%x\n",
872 irq_status->trd_status, irq_mask->trd_status);
873 dev_err(cdns_ctrl->dev,
874 "\t trd_error = 0x%x, trd_error mask = 0x%x\n",
875 irq_status->trd_error, irq_mask->trd_error);
876 }
877 }
878
879 /* Execute generic command on NAND controller. */
cadence_nand_generic_cmd_send(struct cdns_nand_ctrl * cdns_ctrl,u8 chip_nr,u64 mini_ctrl_cmd)880 static int cadence_nand_generic_cmd_send(struct cdns_nand_ctrl *cdns_ctrl,
881 u8 chip_nr,
882 u64 mini_ctrl_cmd)
883 {
884 u32 mini_ctrl_cmd_l, mini_ctrl_cmd_h, reg;
885
886 mini_ctrl_cmd |= FIELD_PREP(GCMD_LAY_CS, chip_nr);
887 mini_ctrl_cmd_l = mini_ctrl_cmd & 0xFFFFFFFF;
888 mini_ctrl_cmd_h = mini_ctrl_cmd >> 32;
889
890 if (cadence_nand_wait_for_value(cdns_ctrl, CTRL_STATUS,
891 1000000,
892 CTRL_STATUS_CTRL_BUSY, true))
893 return -ETIMEDOUT;
894
895 cadence_nand_reset_irq(cdns_ctrl);
896
897 writel_relaxed(mini_ctrl_cmd_l, cdns_ctrl->reg + CMD_REG2);
898 writel_relaxed(mini_ctrl_cmd_h, cdns_ctrl->reg + CMD_REG3);
899
900 /* Select generic command. */
901 reg = FIELD_PREP(CMD_REG0_CT, CMD_REG0_CT_GEN);
902 /* Thread number. */
903 reg |= FIELD_PREP(CMD_REG0_TN, 0);
904
905 /* Issue command. */
906 writel_relaxed(reg, cdns_ctrl->reg + CMD_REG0);
907
908 return 0;
909 }
910
911 /* Wait for data on slave DMA interface. */
cadence_nand_wait_on_sdma(struct cdns_nand_ctrl * cdns_ctrl,u8 * out_sdma_trd,u32 * out_sdma_size)912 static int cadence_nand_wait_on_sdma(struct cdns_nand_ctrl *cdns_ctrl,
913 u8 *out_sdma_trd,
914 u32 *out_sdma_size)
915 {
916 struct cadence_nand_irq_status irq_mask, irq_status;
917
918 irq_mask.trd_status = 0;
919 irq_mask.trd_error = 0;
920 irq_mask.status = INTR_STATUS_SDMA_TRIGG
921 | INTR_STATUS_SDMA_ERR
922 | INTR_STATUS_UNSUPP_CMD;
923
924 cadence_nand_set_irq_mask(cdns_ctrl, &irq_mask);
925 cadence_nand_wait_for_irq(cdns_ctrl, &irq_mask, &irq_status);
926 if (irq_status.status == 0) {
927 dev_err(cdns_ctrl->dev, "Timeout while waiting for SDMA\n");
928 return -ETIMEDOUT;
929 }
930
931 if (irq_status.status & INTR_STATUS_SDMA_TRIGG) {
932 *out_sdma_size = readl_relaxed(cdns_ctrl->reg + SDMA_SIZE);
933 *out_sdma_trd = readl_relaxed(cdns_ctrl->reg + SDMA_TRD_NUM);
934 *out_sdma_trd =
935 FIELD_GET(SDMA_TRD_NUM_SDMA_TRD, *out_sdma_trd);
936 } else {
937 dev_err(cdns_ctrl->dev, "SDMA error - irq_status %x\n",
938 irq_status.status);
939 return -EIO;
940 }
941
942 return 0;
943 }
944
cadence_nand_get_caps(struct cdns_nand_ctrl * cdns_ctrl)945 static void cadence_nand_get_caps(struct cdns_nand_ctrl *cdns_ctrl)
946 {
947 u32 reg;
948
949 reg = readl_relaxed(cdns_ctrl->reg + CTRL_FEATURES);
950
951 cdns_ctrl->caps2.max_banks = 1 << FIELD_GET(CTRL_FEATURES_N_BANKS, reg);
952
953 if (FIELD_GET(CTRL_FEATURES_DMA_DWITH64, reg))
954 cdns_ctrl->caps2.data_dma_width = 8;
955 else
956 cdns_ctrl->caps2.data_dma_width = 4;
957
958 if (reg & CTRL_FEATURES_CONTROL_DATA)
959 cdns_ctrl->caps2.data_control_supp = true;
960
961 if (reg & (CTRL_FEATURES_NVDDR_2_3
962 | CTRL_FEATURES_NVDDR))
963 cdns_ctrl->caps2.is_phy_type_dll = true;
964 }
965
966 /* Prepare CDMA descriptor. */
967 static void
cadence_nand_cdma_desc_prepare(struct cdns_nand_ctrl * cdns_ctrl,char nf_mem,u32 flash_ptr,dma_addr_t mem_ptr,dma_addr_t ctrl_data_ptr,u16 ctype)968 cadence_nand_cdma_desc_prepare(struct cdns_nand_ctrl *cdns_ctrl,
969 char nf_mem, u32 flash_ptr, dma_addr_t mem_ptr,
970 dma_addr_t ctrl_data_ptr, u16 ctype)
971 {
972 struct cadence_nand_cdma_desc *cdma_desc = cdns_ctrl->cdma_desc;
973
974 memset(cdma_desc, 0, sizeof(struct cadence_nand_cdma_desc));
975
976 /* Set fields for one descriptor. */
977 cdma_desc->flash_pointer = flash_ptr;
978 if (cdns_ctrl->ctrl_rev >= 13)
979 cdma_desc->bank = nf_mem;
980 else
981 cdma_desc->flash_pointer |= (nf_mem << CDMA_CFPTR_MEM_SHIFT);
982
983 cdma_desc->command_flags |= CDMA_CF_DMA_MASTER;
984 cdma_desc->command_flags |= CDMA_CF_INT;
985
986 cdma_desc->memory_pointer = mem_ptr;
987 cdma_desc->status = 0;
988 cdma_desc->sync_flag_pointer = 0;
989 cdma_desc->sync_arguments = 0;
990
991 cdma_desc->command_type = ctype;
992 cdma_desc->ctrl_data_ptr = ctrl_data_ptr;
993 }
994
cadence_nand_check_desc_error(struct cdns_nand_ctrl * cdns_ctrl,u32 desc_status)995 static u8 cadence_nand_check_desc_error(struct cdns_nand_ctrl *cdns_ctrl,
996 u32 desc_status)
997 {
998 if (desc_status & CDMA_CS_ERP)
999 return STAT_ERASED;
1000
1001 if (desc_status & CDMA_CS_UNCE)
1002 return STAT_ECC_UNCORR;
1003
1004 if (desc_status & CDMA_CS_ERR) {
1005 dev_err(cdns_ctrl->dev, ":CDMA desc error flag detected.\n");
1006 return STAT_FAIL;
1007 }
1008
1009 if (FIELD_GET(CDMA_CS_MAXERR, desc_status))
1010 return STAT_ECC_CORR;
1011
1012 return STAT_FAIL;
1013 }
1014
cadence_nand_cdma_finish(struct cdns_nand_ctrl * cdns_ctrl)1015 static int cadence_nand_cdma_finish(struct cdns_nand_ctrl *cdns_ctrl)
1016 {
1017 struct cadence_nand_cdma_desc *desc_ptr = cdns_ctrl->cdma_desc;
1018 u8 status = STAT_BUSY;
1019
1020 if (desc_ptr->status & CDMA_CS_FAIL) {
1021 status = cadence_nand_check_desc_error(cdns_ctrl,
1022 desc_ptr->status);
1023 dev_err(cdns_ctrl->dev, ":CDMA error %x\n", desc_ptr->status);
1024 } else if (desc_ptr->status & CDMA_CS_COMP) {
1025 /* Descriptor finished with no errors. */
1026 if (desc_ptr->command_flags & CDMA_CF_CONT) {
1027 dev_info(cdns_ctrl->dev, "DMA unsupported flag is set");
1028 status = STAT_UNKNOWN;
1029 } else {
1030 /* Last descriptor. */
1031 status = STAT_OK;
1032 }
1033 }
1034
1035 return status;
1036 }
1037
cadence_nand_cdma_send(struct cdns_nand_ctrl * cdns_ctrl,u8 thread)1038 static int cadence_nand_cdma_send(struct cdns_nand_ctrl *cdns_ctrl,
1039 u8 thread)
1040 {
1041 u32 reg;
1042 int status;
1043
1044 /* Wait for thread ready. */
1045 status = cadence_nand_wait_for_value(cdns_ctrl, TRD_STATUS,
1046 1000000,
1047 BIT(thread), true);
1048 if (status)
1049 return status;
1050
1051 cadence_nand_reset_irq(cdns_ctrl);
1052 reinit_completion(&cdns_ctrl->complete);
1053
1054 writel_relaxed((u32)cdns_ctrl->dma_cdma_desc,
1055 cdns_ctrl->reg + CMD_REG2);
1056 writel_relaxed(0, cdns_ctrl->reg + CMD_REG3);
1057
1058 /* Select CDMA mode. */
1059 reg = FIELD_PREP(CMD_REG0_CT, CMD_REG0_CT_CDMA);
1060 /* Thread number. */
1061 reg |= FIELD_PREP(CMD_REG0_TN, thread);
1062 /* Issue command. */
1063 writel_relaxed(reg, cdns_ctrl->reg + CMD_REG0);
1064
1065 return 0;
1066 }
1067
1068 /* Send SDMA command and wait for finish. */
1069 static int
cadence_nand_cdma_send_and_wait(struct cdns_nand_ctrl * cdns_ctrl,u8 thread)1070 cadence_nand_cdma_send_and_wait(struct cdns_nand_ctrl *cdns_ctrl,
1071 u8 thread)
1072 {
1073 struct cadence_nand_irq_status irq_mask, irq_status = {0};
1074 int status;
1075
1076 irq_mask.trd_status = BIT(thread);
1077 irq_mask.trd_error = BIT(thread);
1078 irq_mask.status = INTR_STATUS_CDMA_TERR;
1079
1080 cadence_nand_set_irq_mask(cdns_ctrl, &irq_mask);
1081
1082 status = cadence_nand_cdma_send(cdns_ctrl, thread);
1083 if (status)
1084 return status;
1085
1086 cadence_nand_wait_for_irq(cdns_ctrl, &irq_mask, &irq_status);
1087
1088 if (irq_status.status == 0 && irq_status.trd_status == 0 &&
1089 irq_status.trd_error == 0) {
1090 dev_err(cdns_ctrl->dev, "CDMA command timeout\n");
1091 return -ETIMEDOUT;
1092 }
1093 if (irq_status.status & irq_mask.status) {
1094 dev_err(cdns_ctrl->dev, "CDMA command failed\n");
1095 return -EIO;
1096 }
1097
1098 return 0;
1099 }
1100
1101 /*
1102 * ECC size depends on configured ECC strength and on maximum supported
1103 * ECC step size.
1104 */
cadence_nand_calc_ecc_bytes(int max_step_size,int strength)1105 static int cadence_nand_calc_ecc_bytes(int max_step_size, int strength)
1106 {
1107 int nbytes = DIV_ROUND_UP(fls(8 * max_step_size) * strength, 8);
1108
1109 return ALIGN(nbytes, 2);
1110 }
1111
1112 #define CADENCE_NAND_CALC_ECC_BYTES(max_step_size) \
1113 static int \
1114 cadence_nand_calc_ecc_bytes_##max_step_size(int step_size, \
1115 int strength)\
1116 {\
1117 return cadence_nand_calc_ecc_bytes(max_step_size, strength);\
1118 }
1119
1120 CADENCE_NAND_CALC_ECC_BYTES(256)
1121 CADENCE_NAND_CALC_ECC_BYTES(512)
1122 CADENCE_NAND_CALC_ECC_BYTES(1024)
1123 CADENCE_NAND_CALC_ECC_BYTES(2048)
1124 CADENCE_NAND_CALC_ECC_BYTES(4096)
1125
1126 /* Function reads BCH capabilities. */
cadence_nand_read_bch_caps(struct cdns_nand_ctrl * cdns_ctrl)1127 static int cadence_nand_read_bch_caps(struct cdns_nand_ctrl *cdns_ctrl)
1128 {
1129 struct nand_ecc_caps *ecc_caps = &cdns_ctrl->ecc_caps;
1130 int max_step_size = 0, nstrengths, i;
1131 u32 reg;
1132
1133 reg = readl_relaxed(cdns_ctrl->reg + BCH_CFG_3);
1134 cdns_ctrl->bch_metadata_size = FIELD_GET(BCH_CFG_3_METADATA_SIZE, reg);
1135 if (cdns_ctrl->bch_metadata_size < 4) {
1136 dev_err(cdns_ctrl->dev,
1137 "Driver needs at least 4 bytes of BCH meta data\n");
1138 return -EIO;
1139 }
1140
1141 reg = readl_relaxed(cdns_ctrl->reg + BCH_CFG_0);
1142 cdns_ctrl->ecc_strengths[0] = FIELD_GET(BCH_CFG_0_CORR_CAP_0, reg);
1143 cdns_ctrl->ecc_strengths[1] = FIELD_GET(BCH_CFG_0_CORR_CAP_1, reg);
1144 cdns_ctrl->ecc_strengths[2] = FIELD_GET(BCH_CFG_0_CORR_CAP_2, reg);
1145 cdns_ctrl->ecc_strengths[3] = FIELD_GET(BCH_CFG_0_CORR_CAP_3, reg);
1146
1147 reg = readl_relaxed(cdns_ctrl->reg + BCH_CFG_1);
1148 cdns_ctrl->ecc_strengths[4] = FIELD_GET(BCH_CFG_1_CORR_CAP_4, reg);
1149 cdns_ctrl->ecc_strengths[5] = FIELD_GET(BCH_CFG_1_CORR_CAP_5, reg);
1150 cdns_ctrl->ecc_strengths[6] = FIELD_GET(BCH_CFG_1_CORR_CAP_6, reg);
1151 cdns_ctrl->ecc_strengths[7] = FIELD_GET(BCH_CFG_1_CORR_CAP_7, reg);
1152
1153 reg = readl_relaxed(cdns_ctrl->reg + BCH_CFG_2);
1154 cdns_ctrl->ecc_stepinfos[0].stepsize =
1155 FIELD_GET(BCH_CFG_2_SECT_0, reg);
1156
1157 cdns_ctrl->ecc_stepinfos[1].stepsize =
1158 FIELD_GET(BCH_CFG_2_SECT_1, reg);
1159
1160 nstrengths = 0;
1161 for (i = 0; i < BCH_MAX_NUM_CORR_CAPS; i++) {
1162 if (cdns_ctrl->ecc_strengths[i] != 0)
1163 nstrengths++;
1164 }
1165
1166 ecc_caps->nstepinfos = 0;
1167 for (i = 0; i < BCH_MAX_NUM_SECTOR_SIZES; i++) {
1168 /* ECC strengths are common for all step infos. */
1169 cdns_ctrl->ecc_stepinfos[i].nstrengths = nstrengths;
1170 cdns_ctrl->ecc_stepinfos[i].strengths =
1171 cdns_ctrl->ecc_strengths;
1172
1173 if (cdns_ctrl->ecc_stepinfos[i].stepsize != 0)
1174 ecc_caps->nstepinfos++;
1175
1176 if (cdns_ctrl->ecc_stepinfos[i].stepsize > max_step_size)
1177 max_step_size = cdns_ctrl->ecc_stepinfos[i].stepsize;
1178 }
1179 ecc_caps->stepinfos = &cdns_ctrl->ecc_stepinfos[0];
1180
1181 switch (max_step_size) {
1182 case 256:
1183 ecc_caps->calc_ecc_bytes = &cadence_nand_calc_ecc_bytes_256;
1184 break;
1185 case 512:
1186 ecc_caps->calc_ecc_bytes = &cadence_nand_calc_ecc_bytes_512;
1187 break;
1188 case 1024:
1189 ecc_caps->calc_ecc_bytes = &cadence_nand_calc_ecc_bytes_1024;
1190 break;
1191 case 2048:
1192 ecc_caps->calc_ecc_bytes = &cadence_nand_calc_ecc_bytes_2048;
1193 break;
1194 case 4096:
1195 ecc_caps->calc_ecc_bytes = &cadence_nand_calc_ecc_bytes_4096;
1196 break;
1197 default:
1198 dev_err(cdns_ctrl->dev,
1199 "Unsupported sector size(ecc step size) %d\n",
1200 max_step_size);
1201 return -EIO;
1202 }
1203
1204 return 0;
1205 }
1206
1207 /* Hardware initialization. */
cadence_nand_hw_init(struct cdns_nand_ctrl * cdns_ctrl)1208 static int cadence_nand_hw_init(struct cdns_nand_ctrl *cdns_ctrl)
1209 {
1210 int status;
1211 u32 reg;
1212
1213 status = cadence_nand_wait_for_value(cdns_ctrl, CTRL_STATUS,
1214 1000000,
1215 CTRL_STATUS_INIT_COMP, false);
1216 if (status)
1217 return status;
1218
1219 reg = readl_relaxed(cdns_ctrl->reg + CTRL_VERSION);
1220 cdns_ctrl->ctrl_rev = FIELD_GET(CTRL_VERSION_REV, reg);
1221
1222 dev_info(cdns_ctrl->dev,
1223 "%s: cadence nand controller version reg %x\n",
1224 __func__, reg);
1225
1226 /* Disable cache and multiplane. */
1227 writel_relaxed(0, cdns_ctrl->reg + MULTIPLANE_CFG);
1228 writel_relaxed(0, cdns_ctrl->reg + CACHE_CFG);
1229
1230 /* Clear all interrupts. */
1231 writel_relaxed(0xFFFFFFFF, cdns_ctrl->reg + INTR_STATUS);
1232
1233 cadence_nand_get_caps(cdns_ctrl);
1234 if (cadence_nand_read_bch_caps(cdns_ctrl))
1235 return -EIO;
1236
1237 #ifndef CONFIG_64BIT
1238 if (cdns_ctrl->caps2.data_dma_width == 8) {
1239 dev_err(cdns_ctrl->dev,
1240 "cannot access 64-bit dma on !64-bit architectures");
1241 return -EIO;
1242 }
1243 #endif
1244
1245 /*
1246 * Set IO width access to 8.
1247 * It is because during SW device discovering width access
1248 * is expected to be 8.
1249 */
1250 status = cadence_nand_set_access_width16(cdns_ctrl, false);
1251
1252 return status;
1253 }
1254
1255 #define TT_MAIN_OOB_AREAS 2
1256 #define TT_RAW_PAGE 3
1257 #define TT_BBM 4
1258 #define TT_MAIN_OOB_AREA_EXT 5
1259
1260 /* Prepare size of data to transfer. */
1261 static void
cadence_nand_prepare_data_size(struct nand_chip * chip,int transfer_type)1262 cadence_nand_prepare_data_size(struct nand_chip *chip,
1263 int transfer_type)
1264 {
1265 struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
1266 struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
1267 struct mtd_info *mtd = nand_to_mtd(chip);
1268 u32 sec_size = 0, offset = 0, sec_cnt = 1;
1269 u32 last_sec_size = cdns_chip->sector_size;
1270 u32 data_ctrl_size = 0;
1271 u32 reg = 0;
1272
1273 if (cdns_ctrl->curr_trans_type == transfer_type)
1274 return;
1275
1276 switch (transfer_type) {
1277 case TT_MAIN_OOB_AREA_EXT:
1278 sec_cnt = cdns_chip->sector_count;
1279 sec_size = cdns_chip->sector_size;
1280 data_ctrl_size = cdns_chip->avail_oob_size;
1281 break;
1282 case TT_MAIN_OOB_AREAS:
1283 sec_cnt = cdns_chip->sector_count;
1284 last_sec_size = cdns_chip->sector_size
1285 + cdns_chip->avail_oob_size;
1286 sec_size = cdns_chip->sector_size;
1287 break;
1288 case TT_RAW_PAGE:
1289 last_sec_size = mtd->writesize + mtd->oobsize;
1290 break;
1291 case TT_BBM:
1292 offset = mtd->writesize + cdns_chip->bbm_offs;
1293 last_sec_size = 8;
1294 break;
1295 }
1296
1297 reg = 0;
1298 reg |= FIELD_PREP(TRAN_CFG_0_OFFSET, offset);
1299 reg |= FIELD_PREP(TRAN_CFG_0_SEC_CNT, sec_cnt);
1300 writel_relaxed(reg, cdns_ctrl->reg + TRAN_CFG_0);
1301
1302 reg = 0;
1303 reg |= FIELD_PREP(TRAN_CFG_1_LAST_SEC_SIZE, last_sec_size);
1304 reg |= FIELD_PREP(TRAN_CFG_1_SECTOR_SIZE, sec_size);
1305 writel_relaxed(reg, cdns_ctrl->reg + TRAN_CFG_1);
1306
1307 if (cdns_ctrl->caps2.data_control_supp) {
1308 reg = readl_relaxed(cdns_ctrl->reg + CONTROL_DATA_CTRL);
1309 reg &= ~CONTROL_DATA_CTRL_SIZE;
1310 reg |= FIELD_PREP(CONTROL_DATA_CTRL_SIZE, data_ctrl_size);
1311 writel_relaxed(reg, cdns_ctrl->reg + CONTROL_DATA_CTRL);
1312 }
1313
1314 cdns_ctrl->curr_trans_type = transfer_type;
1315 }
1316
1317 static int
cadence_nand_cdma_transfer(struct cdns_nand_ctrl * cdns_ctrl,u8 chip_nr,int page,void * buf,void * ctrl_dat,u32 buf_size,u32 ctrl_dat_size,enum dma_data_direction dir,bool with_ecc)1318 cadence_nand_cdma_transfer(struct cdns_nand_ctrl *cdns_ctrl, u8 chip_nr,
1319 int page, void *buf, void *ctrl_dat, u32 buf_size,
1320 u32 ctrl_dat_size, enum dma_data_direction dir,
1321 bool with_ecc)
1322 {
1323 dma_addr_t dma_buf, dma_ctrl_dat = 0;
1324 u8 thread_nr = chip_nr;
1325 int status;
1326 u16 ctype;
1327
1328 if (dir == DMA_FROM_DEVICE)
1329 ctype = CDMA_CT_RD;
1330 else
1331 ctype = CDMA_CT_WR;
1332
1333 cadence_nand_set_ecc_enable(cdns_ctrl, with_ecc);
1334
1335 dma_buf = dma_map_single(cdns_ctrl->dev, buf, buf_size, dir);
1336 if (dma_mapping_error(cdns_ctrl->dev, dma_buf)) {
1337 dev_err(cdns_ctrl->dev, "Failed to map DMA buffer\n");
1338 return -EIO;
1339 }
1340
1341 if (ctrl_dat && ctrl_dat_size) {
1342 dma_ctrl_dat = dma_map_single(cdns_ctrl->dev, ctrl_dat,
1343 ctrl_dat_size, dir);
1344 if (dma_mapping_error(cdns_ctrl->dev, dma_ctrl_dat)) {
1345 dma_unmap_single(cdns_ctrl->dev, dma_buf,
1346 buf_size, dir);
1347 dev_err(cdns_ctrl->dev, "Failed to map DMA buffer\n");
1348 return -EIO;
1349 }
1350 }
1351
1352 cadence_nand_cdma_desc_prepare(cdns_ctrl, chip_nr, page,
1353 dma_buf, dma_ctrl_dat, ctype);
1354
1355 status = cadence_nand_cdma_send_and_wait(cdns_ctrl, thread_nr);
1356
1357 dma_unmap_single(cdns_ctrl->dev, dma_buf,
1358 buf_size, dir);
1359
1360 if (ctrl_dat && ctrl_dat_size)
1361 dma_unmap_single(cdns_ctrl->dev, dma_ctrl_dat,
1362 ctrl_dat_size, dir);
1363 if (status)
1364 return status;
1365
1366 return cadence_nand_cdma_finish(cdns_ctrl);
1367 }
1368
cadence_nand_set_timings(struct cdns_nand_ctrl * cdns_ctrl,struct cadence_nand_timings * t)1369 static void cadence_nand_set_timings(struct cdns_nand_ctrl *cdns_ctrl,
1370 struct cadence_nand_timings *t)
1371 {
1372 writel_relaxed(t->async_toggle_timings,
1373 cdns_ctrl->reg + ASYNC_TOGGLE_TIMINGS);
1374 writel_relaxed(t->timings0, cdns_ctrl->reg + TIMINGS0);
1375 writel_relaxed(t->timings1, cdns_ctrl->reg + TIMINGS1);
1376 writel_relaxed(t->timings2, cdns_ctrl->reg + TIMINGS2);
1377
1378 if (cdns_ctrl->caps2.is_phy_type_dll)
1379 writel_relaxed(t->dll_phy_ctrl, cdns_ctrl->reg + DLL_PHY_CTRL);
1380
1381 writel_relaxed(t->phy_ctrl, cdns_ctrl->reg + PHY_CTRL);
1382
1383 if (cdns_ctrl->caps2.is_phy_type_dll) {
1384 writel_relaxed(0, cdns_ctrl->reg + PHY_TSEL);
1385 writel_relaxed(2, cdns_ctrl->reg + PHY_DQ_TIMING);
1386 writel_relaxed(t->phy_dqs_timing,
1387 cdns_ctrl->reg + PHY_DQS_TIMING);
1388 writel_relaxed(t->phy_gate_lpbk_ctrl,
1389 cdns_ctrl->reg + PHY_GATE_LPBK_CTRL);
1390 writel_relaxed(PHY_DLL_MASTER_CTRL_BYPASS_MODE,
1391 cdns_ctrl->reg + PHY_DLL_MASTER_CTRL);
1392 writel_relaxed(0, cdns_ctrl->reg + PHY_DLL_SLAVE_CTRL);
1393 }
1394 }
1395
cadence_nand_select_target(struct nand_chip * chip)1396 static int cadence_nand_select_target(struct nand_chip *chip)
1397 {
1398 struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
1399 struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
1400
1401 if (chip == cdns_ctrl->selected_chip)
1402 return 0;
1403
1404 if (cadence_nand_wait_for_value(cdns_ctrl, CTRL_STATUS,
1405 1000000,
1406 CTRL_STATUS_CTRL_BUSY, true))
1407 return -ETIMEDOUT;
1408
1409 cadence_nand_set_timings(cdns_ctrl, &cdns_chip->timings);
1410
1411 cadence_nand_set_ecc_strength(cdns_ctrl,
1412 cdns_chip->corr_str_idx);
1413
1414 cadence_nand_set_erase_detection(cdns_ctrl, true,
1415 chip->ecc.strength);
1416
1417 cdns_ctrl->curr_trans_type = -1;
1418 cdns_ctrl->selected_chip = chip;
1419
1420 return 0;
1421 }
1422
cadence_nand_erase(struct nand_chip * chip,u32 page)1423 static int cadence_nand_erase(struct nand_chip *chip, u32 page)
1424 {
1425 struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
1426 struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
1427 int status;
1428 u8 thread_nr = cdns_chip->cs[chip->cur_cs];
1429
1430 cadence_nand_cdma_desc_prepare(cdns_ctrl,
1431 cdns_chip->cs[chip->cur_cs],
1432 page, 0, 0,
1433 CDMA_CT_ERASE);
1434 status = cadence_nand_cdma_send_and_wait(cdns_ctrl, thread_nr);
1435 if (status) {
1436 dev_err(cdns_ctrl->dev, "erase operation failed\n");
1437 return -EIO;
1438 }
1439
1440 status = cadence_nand_cdma_finish(cdns_ctrl);
1441 if (status)
1442 return status;
1443
1444 return 0;
1445 }
1446
cadence_nand_read_bbm(struct nand_chip * chip,int page,u8 * buf)1447 static int cadence_nand_read_bbm(struct nand_chip *chip, int page, u8 *buf)
1448 {
1449 int status;
1450 struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
1451 struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
1452 struct mtd_info *mtd = nand_to_mtd(chip);
1453
1454 cadence_nand_prepare_data_size(chip, TT_BBM);
1455
1456 cadence_nand_set_skip_bytes_conf(cdns_ctrl, 0, 0, 0);
1457
1458 /*
1459 * Read only bad block marker from offset
1460 * defined by a memory manufacturer.
1461 */
1462 status = cadence_nand_cdma_transfer(cdns_ctrl,
1463 cdns_chip->cs[chip->cur_cs],
1464 page, cdns_ctrl->buf, NULL,
1465 mtd->oobsize,
1466 0, DMA_FROM_DEVICE, false);
1467 if (status) {
1468 dev_err(cdns_ctrl->dev, "read BBM failed\n");
1469 return -EIO;
1470 }
1471
1472 memcpy(buf + cdns_chip->bbm_offs, cdns_ctrl->buf, cdns_chip->bbm_len);
1473
1474 return 0;
1475 }
1476
cadence_nand_write_page(struct nand_chip * chip,const u8 * buf,int oob_required,int page)1477 static int cadence_nand_write_page(struct nand_chip *chip,
1478 const u8 *buf, int oob_required,
1479 int page)
1480 {
1481 struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
1482 struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
1483 struct mtd_info *mtd = nand_to_mtd(chip);
1484 int status;
1485 u16 marker_val = 0xFFFF;
1486
1487 status = cadence_nand_select_target(chip);
1488 if (status)
1489 return status;
1490
1491 cadence_nand_set_skip_bytes_conf(cdns_ctrl, cdns_chip->bbm_len,
1492 mtd->writesize
1493 + cdns_chip->bbm_offs,
1494 1);
1495
1496 if (oob_required) {
1497 marker_val = *(u16 *)(chip->oob_poi
1498 + cdns_chip->bbm_offs);
1499 } else {
1500 /* Set oob data to 0xFF. */
1501 memset(cdns_ctrl->buf + mtd->writesize, 0xFF,
1502 cdns_chip->avail_oob_size);
1503 }
1504
1505 cadence_nand_set_skip_marker_val(cdns_ctrl, marker_val);
1506
1507 cadence_nand_prepare_data_size(chip, TT_MAIN_OOB_AREA_EXT);
1508
1509 if (cadence_nand_dma_buf_ok(cdns_ctrl, buf, mtd->writesize) &&
1510 cdns_ctrl->caps2.data_control_supp) {
1511 u8 *oob;
1512
1513 if (oob_required)
1514 oob = chip->oob_poi;
1515 else
1516 oob = cdns_ctrl->buf + mtd->writesize;
1517
1518 status = cadence_nand_cdma_transfer(cdns_ctrl,
1519 cdns_chip->cs[chip->cur_cs],
1520 page, (void *)buf, oob,
1521 mtd->writesize,
1522 cdns_chip->avail_oob_size,
1523 DMA_TO_DEVICE, true);
1524 if (status) {
1525 dev_err(cdns_ctrl->dev, "write page failed\n");
1526 return -EIO;
1527 }
1528
1529 return 0;
1530 }
1531
1532 if (oob_required) {
1533 /* Transfer the data to the oob area. */
1534 memcpy(cdns_ctrl->buf + mtd->writesize, chip->oob_poi,
1535 cdns_chip->avail_oob_size);
1536 }
1537
1538 memcpy(cdns_ctrl->buf, buf, mtd->writesize);
1539
1540 cadence_nand_prepare_data_size(chip, TT_MAIN_OOB_AREAS);
1541
1542 return cadence_nand_cdma_transfer(cdns_ctrl,
1543 cdns_chip->cs[chip->cur_cs],
1544 page, cdns_ctrl->buf, NULL,
1545 mtd->writesize
1546 + cdns_chip->avail_oob_size,
1547 0, DMA_TO_DEVICE, true);
1548 }
1549
cadence_nand_write_oob(struct nand_chip * chip,int page)1550 static int cadence_nand_write_oob(struct nand_chip *chip, int page)
1551 {
1552 struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
1553 struct mtd_info *mtd = nand_to_mtd(chip);
1554
1555 memset(cdns_ctrl->buf, 0xFF, mtd->writesize);
1556
1557 return cadence_nand_write_page(chip, cdns_ctrl->buf, 1, page);
1558 }
1559
cadence_nand_write_page_raw(struct nand_chip * chip,const u8 * buf,int oob_required,int page)1560 static int cadence_nand_write_page_raw(struct nand_chip *chip,
1561 const u8 *buf, int oob_required,
1562 int page)
1563 {
1564 struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
1565 struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
1566 struct mtd_info *mtd = nand_to_mtd(chip);
1567 int writesize = mtd->writesize;
1568 int oobsize = mtd->oobsize;
1569 int ecc_steps = chip->ecc.steps;
1570 int ecc_size = chip->ecc.size;
1571 int ecc_bytes = chip->ecc.bytes;
1572 void *tmp_buf = cdns_ctrl->buf;
1573 int oob_skip = cdns_chip->bbm_len;
1574 size_t size = writesize + oobsize;
1575 int i, pos, len;
1576 int status = 0;
1577
1578 status = cadence_nand_select_target(chip);
1579 if (status)
1580 return status;
1581
1582 /*
1583 * Fill the buffer with 0xff first except the full page transfer.
1584 * This simplifies the logic.
1585 */
1586 if (!buf || !oob_required)
1587 memset(tmp_buf, 0xff, size);
1588
1589 cadence_nand_set_skip_bytes_conf(cdns_ctrl, 0, 0, 0);
1590
1591 /* Arrange the buffer for syndrome payload/ecc layout. */
1592 if (buf) {
1593 for (i = 0; i < ecc_steps; i++) {
1594 pos = i * (ecc_size + ecc_bytes);
1595 len = ecc_size;
1596
1597 if (pos >= writesize)
1598 pos += oob_skip;
1599 else if (pos + len > writesize)
1600 len = writesize - pos;
1601
1602 memcpy(tmp_buf + pos, buf, len);
1603 buf += len;
1604 if (len < ecc_size) {
1605 len = ecc_size - len;
1606 memcpy(tmp_buf + writesize + oob_skip, buf,
1607 len);
1608 buf += len;
1609 }
1610 }
1611 }
1612
1613 if (oob_required) {
1614 const u8 *oob = chip->oob_poi;
1615 u32 oob_data_offset = (cdns_chip->sector_count - 1) *
1616 (cdns_chip->sector_size + chip->ecc.bytes)
1617 + cdns_chip->sector_size + oob_skip;
1618
1619 /* BBM at the beginning of the OOB area. */
1620 memcpy(tmp_buf + writesize, oob, oob_skip);
1621
1622 /* OOB free. */
1623 memcpy(tmp_buf + oob_data_offset, oob,
1624 cdns_chip->avail_oob_size);
1625 oob += cdns_chip->avail_oob_size;
1626
1627 /* OOB ECC. */
1628 for (i = 0; i < ecc_steps; i++) {
1629 pos = ecc_size + i * (ecc_size + ecc_bytes);
1630 if (i == (ecc_steps - 1))
1631 pos += cdns_chip->avail_oob_size;
1632
1633 len = ecc_bytes;
1634
1635 if (pos >= writesize)
1636 pos += oob_skip;
1637 else if (pos + len > writesize)
1638 len = writesize - pos;
1639
1640 memcpy(tmp_buf + pos, oob, len);
1641 oob += len;
1642 if (len < ecc_bytes) {
1643 len = ecc_bytes - len;
1644 memcpy(tmp_buf + writesize + oob_skip, oob,
1645 len);
1646 oob += len;
1647 }
1648 }
1649 }
1650
1651 cadence_nand_prepare_data_size(chip, TT_RAW_PAGE);
1652
1653 return cadence_nand_cdma_transfer(cdns_ctrl,
1654 cdns_chip->cs[chip->cur_cs],
1655 page, cdns_ctrl->buf, NULL,
1656 mtd->writesize +
1657 mtd->oobsize,
1658 0, DMA_TO_DEVICE, false);
1659 }
1660
cadence_nand_write_oob_raw(struct nand_chip * chip,int page)1661 static int cadence_nand_write_oob_raw(struct nand_chip *chip,
1662 int page)
1663 {
1664 return cadence_nand_write_page_raw(chip, NULL, true, page);
1665 }
1666
cadence_nand_read_page(struct nand_chip * chip,u8 * buf,int oob_required,int page)1667 static int cadence_nand_read_page(struct nand_chip *chip,
1668 u8 *buf, int oob_required, int page)
1669 {
1670 struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
1671 struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
1672 struct mtd_info *mtd = nand_to_mtd(chip);
1673 int status = 0;
1674 int ecc_err_count = 0;
1675
1676 status = cadence_nand_select_target(chip);
1677 if (status)
1678 return status;
1679
1680 cadence_nand_set_skip_bytes_conf(cdns_ctrl, cdns_chip->bbm_len,
1681 mtd->writesize
1682 + cdns_chip->bbm_offs, 1);
1683
1684 /*
1685 * If data buffer can be accessed by DMA and data_control feature
1686 * is supported then transfer data and oob directly.
1687 */
1688 if (cadence_nand_dma_buf_ok(cdns_ctrl, buf, mtd->writesize) &&
1689 cdns_ctrl->caps2.data_control_supp) {
1690 u8 *oob;
1691
1692 if (oob_required)
1693 oob = chip->oob_poi;
1694 else
1695 oob = cdns_ctrl->buf + mtd->writesize;
1696
1697 cadence_nand_prepare_data_size(chip, TT_MAIN_OOB_AREA_EXT);
1698 status = cadence_nand_cdma_transfer(cdns_ctrl,
1699 cdns_chip->cs[chip->cur_cs],
1700 page, buf, oob,
1701 mtd->writesize,
1702 cdns_chip->avail_oob_size,
1703 DMA_FROM_DEVICE, true);
1704 /* Otherwise use bounce buffer. */
1705 } else {
1706 cadence_nand_prepare_data_size(chip, TT_MAIN_OOB_AREAS);
1707 status = cadence_nand_cdma_transfer(cdns_ctrl,
1708 cdns_chip->cs[chip->cur_cs],
1709 page, cdns_ctrl->buf,
1710 NULL, mtd->writesize
1711 + cdns_chip->avail_oob_size,
1712 0, DMA_FROM_DEVICE, true);
1713
1714 memcpy(buf, cdns_ctrl->buf, mtd->writesize);
1715 if (oob_required)
1716 memcpy(chip->oob_poi,
1717 cdns_ctrl->buf + mtd->writesize,
1718 mtd->oobsize);
1719 }
1720
1721 switch (status) {
1722 case STAT_ECC_UNCORR:
1723 mtd->ecc_stats.failed++;
1724 ecc_err_count++;
1725 break;
1726 case STAT_ECC_CORR:
1727 ecc_err_count = FIELD_GET(CDMA_CS_MAXERR,
1728 cdns_ctrl->cdma_desc->status);
1729 mtd->ecc_stats.corrected += ecc_err_count;
1730 break;
1731 case STAT_ERASED:
1732 case STAT_OK:
1733 break;
1734 default:
1735 dev_err(cdns_ctrl->dev, "read page failed\n");
1736 return -EIO;
1737 }
1738
1739 if (oob_required)
1740 if (cadence_nand_read_bbm(chip, page, chip->oob_poi))
1741 return -EIO;
1742
1743 return ecc_err_count;
1744 }
1745
1746 /* Reads OOB data from the device. */
cadence_nand_read_oob(struct nand_chip * chip,int page)1747 static int cadence_nand_read_oob(struct nand_chip *chip, int page)
1748 {
1749 struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
1750
1751 return cadence_nand_read_page(chip, cdns_ctrl->buf, 1, page);
1752 }
1753
cadence_nand_read_page_raw(struct nand_chip * chip,u8 * buf,int oob_required,int page)1754 static int cadence_nand_read_page_raw(struct nand_chip *chip,
1755 u8 *buf, int oob_required, int page)
1756 {
1757 struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
1758 struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
1759 struct mtd_info *mtd = nand_to_mtd(chip);
1760 int oob_skip = cdns_chip->bbm_len;
1761 int writesize = mtd->writesize;
1762 int ecc_steps = chip->ecc.steps;
1763 int ecc_size = chip->ecc.size;
1764 int ecc_bytes = chip->ecc.bytes;
1765 void *tmp_buf = cdns_ctrl->buf;
1766 int i, pos, len;
1767 int status = 0;
1768
1769 status = cadence_nand_select_target(chip);
1770 if (status)
1771 return status;
1772
1773 cadence_nand_set_skip_bytes_conf(cdns_ctrl, 0, 0, 0);
1774
1775 cadence_nand_prepare_data_size(chip, TT_RAW_PAGE);
1776 status = cadence_nand_cdma_transfer(cdns_ctrl,
1777 cdns_chip->cs[chip->cur_cs],
1778 page, cdns_ctrl->buf, NULL,
1779 mtd->writesize
1780 + mtd->oobsize,
1781 0, DMA_FROM_DEVICE, false);
1782
1783 switch (status) {
1784 case STAT_ERASED:
1785 case STAT_OK:
1786 break;
1787 default:
1788 dev_err(cdns_ctrl->dev, "read raw page failed\n");
1789 return -EIO;
1790 }
1791
1792 /* Arrange the buffer for syndrome payload/ecc layout. */
1793 if (buf) {
1794 for (i = 0; i < ecc_steps; i++) {
1795 pos = i * (ecc_size + ecc_bytes);
1796 len = ecc_size;
1797
1798 if (pos >= writesize)
1799 pos += oob_skip;
1800 else if (pos + len > writesize)
1801 len = writesize - pos;
1802
1803 memcpy(buf, tmp_buf + pos, len);
1804 buf += len;
1805 if (len < ecc_size) {
1806 len = ecc_size - len;
1807 memcpy(buf, tmp_buf + writesize + oob_skip,
1808 len);
1809 buf += len;
1810 }
1811 }
1812 }
1813
1814 if (oob_required) {
1815 u8 *oob = chip->oob_poi;
1816 u32 oob_data_offset = (cdns_chip->sector_count - 1) *
1817 (cdns_chip->sector_size + chip->ecc.bytes)
1818 + cdns_chip->sector_size + oob_skip;
1819
1820 /* OOB free. */
1821 memcpy(oob, tmp_buf + oob_data_offset,
1822 cdns_chip->avail_oob_size);
1823
1824 /* BBM at the beginning of the OOB area. */
1825 memcpy(oob, tmp_buf + writesize, oob_skip);
1826
1827 oob += cdns_chip->avail_oob_size;
1828
1829 /* OOB ECC */
1830 for (i = 0; i < ecc_steps; i++) {
1831 pos = ecc_size + i * (ecc_size + ecc_bytes);
1832 len = ecc_bytes;
1833
1834 if (i == (ecc_steps - 1))
1835 pos += cdns_chip->avail_oob_size;
1836
1837 if (pos >= writesize)
1838 pos += oob_skip;
1839 else if (pos + len > writesize)
1840 len = writesize - pos;
1841
1842 memcpy(oob, tmp_buf + pos, len);
1843 oob += len;
1844 if (len < ecc_bytes) {
1845 len = ecc_bytes - len;
1846 memcpy(oob, tmp_buf + writesize + oob_skip,
1847 len);
1848 oob += len;
1849 }
1850 }
1851 }
1852
1853 return 0;
1854 }
1855
cadence_nand_read_oob_raw(struct nand_chip * chip,int page)1856 static int cadence_nand_read_oob_raw(struct nand_chip *chip,
1857 int page)
1858 {
1859 return cadence_nand_read_page_raw(chip, NULL, true, page);
1860 }
1861
cadence_nand_slave_dma_transfer_finished(void * data)1862 static void cadence_nand_slave_dma_transfer_finished(void *data)
1863 {
1864 struct completion *finished = data;
1865
1866 complete(finished);
1867 }
1868
cadence_nand_slave_dma_transfer(struct cdns_nand_ctrl * cdns_ctrl,void * buf,dma_addr_t dev_dma,size_t len,enum dma_data_direction dir)1869 static int cadence_nand_slave_dma_transfer(struct cdns_nand_ctrl *cdns_ctrl,
1870 void *buf,
1871 dma_addr_t dev_dma, size_t len,
1872 enum dma_data_direction dir)
1873 {
1874 DECLARE_COMPLETION_ONSTACK(finished);
1875 struct dma_chan *chan;
1876 struct dma_device *dma_dev;
1877 dma_addr_t src_dma, dst_dma, buf_dma;
1878 struct dma_async_tx_descriptor *tx;
1879 dma_cookie_t cookie;
1880
1881 chan = cdns_ctrl->dmac;
1882 dma_dev = chan->device;
1883
1884 buf_dma = dma_map_single(dma_dev->dev, buf, len, dir);
1885 if (dma_mapping_error(dma_dev->dev, buf_dma)) {
1886 dev_err(cdns_ctrl->dev, "Failed to map DMA buffer\n");
1887 goto err;
1888 }
1889
1890 if (dir == DMA_FROM_DEVICE) {
1891 src_dma = cdns_ctrl->io.iova_dma;
1892 dst_dma = buf_dma;
1893 } else {
1894 src_dma = buf_dma;
1895 dst_dma = cdns_ctrl->io.iova_dma;
1896 }
1897
1898 tx = dmaengine_prep_dma_memcpy(cdns_ctrl->dmac, dst_dma, src_dma, len,
1899 DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
1900 if (!tx) {
1901 dev_err(cdns_ctrl->dev, "Failed to prepare DMA memcpy\n");
1902 goto err_unmap;
1903 }
1904
1905 tx->callback = cadence_nand_slave_dma_transfer_finished;
1906 tx->callback_param = &finished;
1907
1908 cookie = dmaengine_submit(tx);
1909 if (dma_submit_error(cookie)) {
1910 dev_err(cdns_ctrl->dev, "Failed to do DMA tx_submit\n");
1911 goto err_unmap;
1912 }
1913
1914 dma_async_issue_pending(cdns_ctrl->dmac);
1915 wait_for_completion(&finished);
1916
1917 dma_unmap_single(dma_dev->dev, buf_dma, len, dir);
1918
1919 return 0;
1920
1921 err_unmap:
1922 dma_unmap_single(dma_dev->dev, buf_dma, len, dir);
1923
1924 err:
1925 dev_dbg(cdns_ctrl->dev, "Fall back to CPU I/O\n");
1926
1927 return -EIO;
1928 }
1929
cadence_nand_read_buf(struct cdns_nand_ctrl * cdns_ctrl,u8 * buf,int len)1930 static int cadence_nand_read_buf(struct cdns_nand_ctrl *cdns_ctrl,
1931 u8 *buf, int len)
1932 {
1933 u8 thread_nr = 0;
1934 u32 sdma_size;
1935 int status;
1936
1937 /* Wait until slave DMA interface is ready to data transfer. */
1938 status = cadence_nand_wait_on_sdma(cdns_ctrl, &thread_nr, &sdma_size);
1939 if (status)
1940 return status;
1941
1942 if (!cdns_ctrl->caps1->has_dma) {
1943 u8 data_dma_width = cdns_ctrl->caps2.data_dma_width;
1944
1945 int len_in_words = (data_dma_width == 4) ? len >> 2 : len >> 3;
1946
1947 /* read alignment data */
1948 if (data_dma_width == 4)
1949 ioread32_rep(cdns_ctrl->io.virt, buf, len_in_words);
1950 #ifdef CONFIG_64BIT
1951 else
1952 readsq(cdns_ctrl->io.virt, buf, len_in_words);
1953 #endif
1954
1955 if (sdma_size > len) {
1956 int read_bytes = (data_dma_width == 4) ?
1957 len_in_words << 2 : len_in_words << 3;
1958
1959 /* read rest data from slave DMA interface if any */
1960 if (data_dma_width == 4)
1961 ioread32_rep(cdns_ctrl->io.virt,
1962 cdns_ctrl->buf,
1963 sdma_size / 4 - len_in_words);
1964 #ifdef CONFIG_64BIT
1965 else
1966 readsq(cdns_ctrl->io.virt, cdns_ctrl->buf,
1967 sdma_size / 8 - len_in_words);
1968 #endif
1969
1970 /* copy rest of data */
1971 memcpy(buf + read_bytes, cdns_ctrl->buf,
1972 len - read_bytes);
1973 }
1974 return 0;
1975 }
1976
1977 if (cadence_nand_dma_buf_ok(cdns_ctrl, buf, len)) {
1978 status = cadence_nand_slave_dma_transfer(cdns_ctrl, buf,
1979 cdns_ctrl->io.dma,
1980 len, DMA_FROM_DEVICE);
1981 if (status == 0)
1982 return 0;
1983
1984 dev_warn(cdns_ctrl->dev,
1985 "Slave DMA transfer failed. Try again using bounce buffer.");
1986 }
1987
1988 /* If DMA transfer is not possible or failed then use bounce buffer. */
1989 status = cadence_nand_slave_dma_transfer(cdns_ctrl, cdns_ctrl->buf,
1990 cdns_ctrl->io.dma,
1991 sdma_size, DMA_FROM_DEVICE);
1992
1993 if (status) {
1994 dev_err(cdns_ctrl->dev, "Slave DMA transfer failed");
1995 return status;
1996 }
1997
1998 memcpy(buf, cdns_ctrl->buf, len);
1999
2000 return 0;
2001 }
2002
cadence_nand_write_buf(struct cdns_nand_ctrl * cdns_ctrl,const u8 * buf,int len)2003 static int cadence_nand_write_buf(struct cdns_nand_ctrl *cdns_ctrl,
2004 const u8 *buf, int len)
2005 {
2006 u8 thread_nr = 0;
2007 u32 sdma_size;
2008 int status;
2009
2010 /* Wait until slave DMA interface is ready to data transfer. */
2011 status = cadence_nand_wait_on_sdma(cdns_ctrl, &thread_nr, &sdma_size);
2012 if (status)
2013 return status;
2014
2015 if (!cdns_ctrl->caps1->has_dma) {
2016 u8 data_dma_width = cdns_ctrl->caps2.data_dma_width;
2017
2018 int len_in_words = (data_dma_width == 4) ? len >> 2 : len >> 3;
2019
2020 if (data_dma_width == 4)
2021 iowrite32_rep(cdns_ctrl->io.virt, buf, len_in_words);
2022 #ifdef CONFIG_64BIT
2023 else
2024 writesq(cdns_ctrl->io.virt, buf, len_in_words);
2025 #endif
2026
2027 if (sdma_size > len) {
2028 int written_bytes = (data_dma_width == 4) ?
2029 len_in_words << 2 : len_in_words << 3;
2030
2031 /* copy rest of data */
2032 memcpy(cdns_ctrl->buf, buf + written_bytes,
2033 len - written_bytes);
2034
2035 /* write all expected by nand controller data */
2036 if (data_dma_width == 4)
2037 iowrite32_rep(cdns_ctrl->io.virt,
2038 cdns_ctrl->buf,
2039 sdma_size / 4 - len_in_words);
2040 #ifdef CONFIG_64BIT
2041 else
2042 writesq(cdns_ctrl->io.virt, cdns_ctrl->buf,
2043 sdma_size / 8 - len_in_words);
2044 #endif
2045 }
2046
2047 return 0;
2048 }
2049
2050 if (cadence_nand_dma_buf_ok(cdns_ctrl, buf, len)) {
2051 status = cadence_nand_slave_dma_transfer(cdns_ctrl, (void *)buf,
2052 cdns_ctrl->io.dma,
2053 len, DMA_TO_DEVICE);
2054 if (status == 0)
2055 return 0;
2056
2057 dev_warn(cdns_ctrl->dev,
2058 "Slave DMA transfer failed. Try again using bounce buffer.");
2059 }
2060
2061 /* If DMA transfer is not possible or failed then use bounce buffer. */
2062 memcpy(cdns_ctrl->buf, buf, len);
2063
2064 status = cadence_nand_slave_dma_transfer(cdns_ctrl, cdns_ctrl->buf,
2065 cdns_ctrl->io.dma,
2066 sdma_size, DMA_TO_DEVICE);
2067
2068 if (status)
2069 dev_err(cdns_ctrl->dev, "Slave DMA transfer failed");
2070
2071 return status;
2072 }
2073
cadence_nand_force_byte_access(struct nand_chip * chip,bool force_8bit)2074 static int cadence_nand_force_byte_access(struct nand_chip *chip,
2075 bool force_8bit)
2076 {
2077 struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
2078
2079 /*
2080 * Callers of this function do not verify if the NAND is using a 16-bit
2081 * an 8-bit bus for normal operations, so we need to take care of that
2082 * here by leaving the configuration unchanged if the NAND does not have
2083 * the NAND_BUSWIDTH_16 flag set.
2084 */
2085 if (!(chip->options & NAND_BUSWIDTH_16))
2086 return 0;
2087
2088 return cadence_nand_set_access_width16(cdns_ctrl, !force_8bit);
2089 }
2090
cadence_nand_cmd_opcode(struct nand_chip * chip,const struct nand_subop * subop)2091 static int cadence_nand_cmd_opcode(struct nand_chip *chip,
2092 const struct nand_subop *subop)
2093 {
2094 struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
2095 struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
2096 const struct nand_op_instr *instr;
2097 unsigned int op_id = 0;
2098 u64 mini_ctrl_cmd = 0;
2099 int ret;
2100
2101 instr = &subop->instrs[op_id];
2102
2103 if (instr->delay_ns > 0)
2104 mini_ctrl_cmd |= GCMD_LAY_TWB;
2105
2106 mini_ctrl_cmd |= FIELD_PREP(GCMD_LAY_INSTR,
2107 GCMD_LAY_INSTR_CMD);
2108 mini_ctrl_cmd |= FIELD_PREP(GCMD_LAY_INPUT_CMD,
2109 instr->ctx.cmd.opcode);
2110
2111 ret = cadence_nand_generic_cmd_send(cdns_ctrl,
2112 cdns_chip->cs[chip->cur_cs],
2113 mini_ctrl_cmd);
2114 if (ret)
2115 dev_err(cdns_ctrl->dev, "send cmd %x failed\n",
2116 instr->ctx.cmd.opcode);
2117
2118 return ret;
2119 }
2120
cadence_nand_cmd_address(struct nand_chip * chip,const struct nand_subop * subop)2121 static int cadence_nand_cmd_address(struct nand_chip *chip,
2122 const struct nand_subop *subop)
2123 {
2124 struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
2125 struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
2126 const struct nand_op_instr *instr;
2127 unsigned int op_id = 0;
2128 u64 mini_ctrl_cmd = 0;
2129 unsigned int offset, naddrs;
2130 u64 address = 0;
2131 const u8 *addrs;
2132 int ret;
2133 int i;
2134
2135 instr = &subop->instrs[op_id];
2136
2137 if (instr->delay_ns > 0)
2138 mini_ctrl_cmd |= GCMD_LAY_TWB;
2139
2140 mini_ctrl_cmd |= FIELD_PREP(GCMD_LAY_INSTR,
2141 GCMD_LAY_INSTR_ADDR);
2142
2143 offset = nand_subop_get_addr_start_off(subop, op_id);
2144 naddrs = nand_subop_get_num_addr_cyc(subop, op_id);
2145 addrs = &instr->ctx.addr.addrs[offset];
2146
2147 for (i = 0; i < naddrs; i++)
2148 address |= (u64)addrs[i] << (8 * i);
2149
2150 mini_ctrl_cmd |= FIELD_PREP(GCMD_LAY_INPUT_ADDR,
2151 address);
2152 mini_ctrl_cmd |= FIELD_PREP(GCMD_LAY_INPUT_ADDR_SIZE,
2153 naddrs - 1);
2154
2155 ret = cadence_nand_generic_cmd_send(cdns_ctrl,
2156 cdns_chip->cs[chip->cur_cs],
2157 mini_ctrl_cmd);
2158 if (ret)
2159 dev_err(cdns_ctrl->dev, "send address %llx failed\n", address);
2160
2161 return ret;
2162 }
2163
cadence_nand_cmd_erase(struct nand_chip * chip,const struct nand_subop * subop)2164 static int cadence_nand_cmd_erase(struct nand_chip *chip,
2165 const struct nand_subop *subop)
2166 {
2167 unsigned int op_id;
2168
2169 if (subop->instrs[0].ctx.cmd.opcode == NAND_CMD_ERASE1) {
2170 int i;
2171 const struct nand_op_instr *instr = NULL;
2172 unsigned int offset, naddrs;
2173 const u8 *addrs;
2174 u32 page = 0;
2175
2176 instr = &subop->instrs[1];
2177 offset = nand_subop_get_addr_start_off(subop, 1);
2178 naddrs = nand_subop_get_num_addr_cyc(subop, 1);
2179 addrs = &instr->ctx.addr.addrs[offset];
2180
2181 for (i = 0; i < naddrs; i++)
2182 page |= (u32)addrs[i] << (8 * i);
2183
2184 return cadence_nand_erase(chip, page);
2185 }
2186
2187 /*
2188 * If it is not an erase operation then handle operation
2189 * by calling exec_op function.
2190 */
2191 for (op_id = 0; op_id < subop->ninstrs; op_id++) {
2192 int ret;
2193 const struct nand_operation nand_op = {
2194 .cs = chip->cur_cs,
2195 .instrs = &subop->instrs[op_id],
2196 .ninstrs = 1};
2197 ret = chip->controller->ops->exec_op(chip, &nand_op, false);
2198 if (ret)
2199 return ret;
2200 }
2201
2202 return 0;
2203 }
2204
cadence_nand_cmd_data(struct nand_chip * chip,const struct nand_subop * subop)2205 static int cadence_nand_cmd_data(struct nand_chip *chip,
2206 const struct nand_subop *subop)
2207 {
2208 struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
2209 struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
2210 const struct nand_op_instr *instr;
2211 unsigned int offset, op_id = 0;
2212 u64 mini_ctrl_cmd = 0;
2213 int len = 0;
2214 int ret;
2215
2216 instr = &subop->instrs[op_id];
2217
2218 if (instr->delay_ns > 0)
2219 mini_ctrl_cmd |= GCMD_LAY_TWB;
2220
2221 mini_ctrl_cmd |= FIELD_PREP(GCMD_LAY_INSTR,
2222 GCMD_LAY_INSTR_DATA);
2223
2224 if (instr->type == NAND_OP_DATA_OUT_INSTR)
2225 mini_ctrl_cmd |= FIELD_PREP(GCMD_DIR,
2226 GCMD_DIR_WRITE);
2227
2228 len = nand_subop_get_data_len(subop, op_id);
2229 offset = nand_subop_get_data_start_off(subop, op_id);
2230 mini_ctrl_cmd |= FIELD_PREP(GCMD_SECT_CNT, 1);
2231 mini_ctrl_cmd |= FIELD_PREP(GCMD_LAST_SIZE, len);
2232 if (instr->ctx.data.force_8bit) {
2233 ret = cadence_nand_force_byte_access(chip, true);
2234 if (ret) {
2235 dev_err(cdns_ctrl->dev,
2236 "cannot change byte access generic data cmd failed\n");
2237 return ret;
2238 }
2239 }
2240
2241 ret = cadence_nand_generic_cmd_send(cdns_ctrl,
2242 cdns_chip->cs[chip->cur_cs],
2243 mini_ctrl_cmd);
2244 if (ret) {
2245 dev_err(cdns_ctrl->dev, "send generic data cmd failed\n");
2246 return ret;
2247 }
2248
2249 if (instr->type == NAND_OP_DATA_IN_INSTR) {
2250 void *buf = instr->ctx.data.buf.in + offset;
2251
2252 ret = cadence_nand_read_buf(cdns_ctrl, buf, len);
2253 } else {
2254 const void *buf = instr->ctx.data.buf.out + offset;
2255
2256 ret = cadence_nand_write_buf(cdns_ctrl, buf, len);
2257 }
2258
2259 if (ret) {
2260 dev_err(cdns_ctrl->dev, "data transfer failed for generic command\n");
2261 return ret;
2262 }
2263
2264 if (instr->ctx.data.force_8bit) {
2265 ret = cadence_nand_force_byte_access(chip, false);
2266 if (ret) {
2267 dev_err(cdns_ctrl->dev,
2268 "cannot change byte access generic data cmd failed\n");
2269 }
2270 }
2271
2272 return ret;
2273 }
2274
cadence_nand_cmd_waitrdy(struct nand_chip * chip,const struct nand_subop * subop)2275 static int cadence_nand_cmd_waitrdy(struct nand_chip *chip,
2276 const struct nand_subop *subop)
2277 {
2278 int status;
2279 unsigned int op_id = 0;
2280 struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
2281 struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
2282 const struct nand_op_instr *instr = &subop->instrs[op_id];
2283 u32 timeout_us = instr->ctx.waitrdy.timeout_ms * 1000;
2284
2285 status = cadence_nand_wait_for_value(cdns_ctrl, RBN_SETINGS,
2286 timeout_us,
2287 BIT(cdns_chip->cs[chip->cur_cs]),
2288 false);
2289 return status;
2290 }
2291
2292 static const struct nand_op_parser cadence_nand_op_parser = NAND_OP_PARSER(
2293 NAND_OP_PARSER_PATTERN(
2294 cadence_nand_cmd_erase,
2295 NAND_OP_PARSER_PAT_CMD_ELEM(false),
2296 NAND_OP_PARSER_PAT_ADDR_ELEM(false, MAX_ERASE_ADDRESS_CYC),
2297 NAND_OP_PARSER_PAT_CMD_ELEM(false),
2298 NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)),
2299 NAND_OP_PARSER_PATTERN(
2300 cadence_nand_cmd_opcode,
2301 NAND_OP_PARSER_PAT_CMD_ELEM(false)),
2302 NAND_OP_PARSER_PATTERN(
2303 cadence_nand_cmd_address,
2304 NAND_OP_PARSER_PAT_ADDR_ELEM(false, MAX_ADDRESS_CYC)),
2305 NAND_OP_PARSER_PATTERN(
2306 cadence_nand_cmd_data,
2307 NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, MAX_DATA_SIZE)),
2308 NAND_OP_PARSER_PATTERN(
2309 cadence_nand_cmd_data,
2310 NAND_OP_PARSER_PAT_DATA_OUT_ELEM(false, MAX_DATA_SIZE)),
2311 NAND_OP_PARSER_PATTERN(
2312 cadence_nand_cmd_waitrdy,
2313 NAND_OP_PARSER_PAT_WAITRDY_ELEM(false))
2314 );
2315
cadence_nand_exec_op(struct nand_chip * chip,const struct nand_operation * op,bool check_only)2316 static int cadence_nand_exec_op(struct nand_chip *chip,
2317 const struct nand_operation *op,
2318 bool check_only)
2319 {
2320 if (!check_only) {
2321 int status = cadence_nand_select_target(chip);
2322
2323 if (status)
2324 return status;
2325 }
2326
2327 return nand_op_parser_exec_op(chip, &cadence_nand_op_parser, op,
2328 check_only);
2329 }
2330
cadence_nand_ooblayout_free(struct mtd_info * mtd,int section,struct mtd_oob_region * oobregion)2331 static int cadence_nand_ooblayout_free(struct mtd_info *mtd, int section,
2332 struct mtd_oob_region *oobregion)
2333 {
2334 struct nand_chip *chip = mtd_to_nand(mtd);
2335 struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
2336
2337 if (section)
2338 return -ERANGE;
2339
2340 oobregion->offset = cdns_chip->bbm_len;
2341 oobregion->length = cdns_chip->avail_oob_size
2342 - cdns_chip->bbm_len;
2343
2344 return 0;
2345 }
2346
cadence_nand_ooblayout_ecc(struct mtd_info * mtd,int section,struct mtd_oob_region * oobregion)2347 static int cadence_nand_ooblayout_ecc(struct mtd_info *mtd, int section,
2348 struct mtd_oob_region *oobregion)
2349 {
2350 struct nand_chip *chip = mtd_to_nand(mtd);
2351 struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
2352
2353 if (section)
2354 return -ERANGE;
2355
2356 oobregion->offset = cdns_chip->avail_oob_size;
2357 oobregion->length = chip->ecc.total;
2358
2359 return 0;
2360 }
2361
2362 static const struct mtd_ooblayout_ops cadence_nand_ooblayout_ops = {
2363 .free = cadence_nand_ooblayout_free,
2364 .ecc = cadence_nand_ooblayout_ecc,
2365 };
2366
calc_cycl(u32 timing,u32 clock)2367 static int calc_cycl(u32 timing, u32 clock)
2368 {
2369 if (timing == 0 || clock == 0)
2370 return 0;
2371
2372 if ((timing % clock) > 0)
2373 return timing / clock;
2374 else
2375 return timing / clock - 1;
2376 }
2377
2378 /* Calculate max data valid window. */
calc_tdvw_max(u32 trp_cnt,u32 clk_period,u32 trhoh_min,u32 board_delay_skew_min,u32 ext_mode)2379 static inline u32 calc_tdvw_max(u32 trp_cnt, u32 clk_period, u32 trhoh_min,
2380 u32 board_delay_skew_min, u32 ext_mode)
2381 {
2382 if (ext_mode == 0)
2383 clk_period /= 2;
2384
2385 return (trp_cnt + 1) * clk_period + trhoh_min +
2386 board_delay_skew_min;
2387 }
2388
2389 /* Calculate data valid window. */
calc_tdvw(u32 trp_cnt,u32 clk_period,u32 trhoh_min,u32 trea_max,u32 ext_mode)2390 static inline u32 calc_tdvw(u32 trp_cnt, u32 clk_period, u32 trhoh_min,
2391 u32 trea_max, u32 ext_mode)
2392 {
2393 if (ext_mode == 0)
2394 clk_period /= 2;
2395
2396 return (trp_cnt + 1) * clk_period + trhoh_min - trea_max;
2397 }
2398
cadence_nand_setup_sdr_interface(struct nand_chip * chip,const struct nand_sdr_timings * sdr)2399 static int cadence_nand_setup_sdr_interface(struct nand_chip *chip,
2400 const struct nand_sdr_timings *sdr)
2401 {
2402 struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
2403 struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
2404 struct cadence_nand_timings *t = &cdns_chip->timings;
2405 u32 reg;
2406 u32 board_delay = cdns_ctrl->board_delay;
2407 u32 clk_period = DIV_ROUND_DOWN_ULL(1000000000000ULL,
2408 cdns_ctrl->nf_clk_rate);
2409 u32 tceh_cnt, tcs_cnt, tadl_cnt, tccs_cnt;
2410 u32 tfeat_cnt, trhz_cnt, tvdly_cnt;
2411 u32 trhw_cnt, twb_cnt, twh_cnt = 0, twhr_cnt;
2412 u32 twp_cnt = 0, trp_cnt = 0, trh_cnt = 0;
2413 u32 if_skew = cdns_ctrl->caps1->if_skew;
2414 u32 board_delay_skew_min = board_delay - if_skew;
2415 u32 board_delay_skew_max = board_delay + if_skew;
2416 u32 dqs_sampl_res, phony_dqs_mod;
2417 u32 tdvw, tdvw_min, tdvw_max;
2418 u32 ext_rd_mode, ext_wr_mode;
2419 u32 dll_phy_dqs_timing = 0, phony_dqs_timing = 0, rd_del_sel = 0;
2420 u32 sampling_point;
2421
2422 memset(t, 0, sizeof(*t));
2423 /* Sampling point calculation. */
2424 if (cdns_ctrl->caps2.is_phy_type_dll)
2425 phony_dqs_mod = 2;
2426 else
2427 phony_dqs_mod = 1;
2428
2429 dqs_sampl_res = clk_period / phony_dqs_mod;
2430
2431 tdvw_min = sdr->tREA_max + board_delay_skew_max;
2432 /*
2433 * The idea of those calculation is to get the optimum value
2434 * for tRP and tRH timings. If it is NOT possible to sample data
2435 * with optimal tRP/tRH settings, the parameters will be extended.
2436 * If clk_period is 50ns (the lowest value) this condition is met
2437 * for SDR timing modes 1, 2, 3, 4 and 5.
2438 * If clk_period is 20ns the condition is met only for SDR timing
2439 * mode 5.
2440 */
2441 if (sdr->tRC_min <= clk_period &&
2442 sdr->tRP_min <= (clk_period / 2) &&
2443 sdr->tREH_min <= (clk_period / 2)) {
2444 /* Performance mode. */
2445 ext_rd_mode = 0;
2446 tdvw = calc_tdvw(trp_cnt, clk_period, sdr->tRHOH_min,
2447 sdr->tREA_max, ext_rd_mode);
2448 tdvw_max = calc_tdvw_max(trp_cnt, clk_period, sdr->tRHOH_min,
2449 board_delay_skew_min,
2450 ext_rd_mode);
2451 /*
2452 * Check if data valid window and sampling point can be found
2453 * and is not on the edge (ie. we have hold margin).
2454 * If not extend the tRP timings.
2455 */
2456 if (tdvw > 0) {
2457 if (tdvw_max <= tdvw_min ||
2458 (tdvw_max % dqs_sampl_res) == 0) {
2459 /*
2460 * No valid sampling point so the RE pulse need
2461 * to be widen widening by half clock cycle.
2462 */
2463 ext_rd_mode = 1;
2464 }
2465 } else {
2466 /*
2467 * There is no valid window
2468 * to be able to sample data the tRP need to be widen.
2469 * Very safe calculations are performed here.
2470 */
2471 trp_cnt = (sdr->tREA_max + board_delay_skew_max
2472 + dqs_sampl_res) / clk_period;
2473 ext_rd_mode = 1;
2474 }
2475
2476 } else {
2477 /* Extended read mode. */
2478 u32 trh;
2479
2480 ext_rd_mode = 1;
2481 trp_cnt = calc_cycl(sdr->tRP_min, clk_period);
2482 trh = sdr->tRC_min - ((trp_cnt + 1) * clk_period);
2483 if (sdr->tREH_min >= trh)
2484 trh_cnt = calc_cycl(sdr->tREH_min, clk_period);
2485 else
2486 trh_cnt = calc_cycl(trh, clk_period);
2487
2488 tdvw = calc_tdvw(trp_cnt, clk_period, sdr->tRHOH_min,
2489 sdr->tREA_max, ext_rd_mode);
2490 /*
2491 * Check if data valid window and sampling point can be found
2492 * or if it is at the edge check if previous is valid
2493 * - if not extend the tRP timings.
2494 */
2495 if (tdvw > 0) {
2496 tdvw_max = calc_tdvw_max(trp_cnt, clk_period,
2497 sdr->tRHOH_min,
2498 board_delay_skew_min,
2499 ext_rd_mode);
2500
2501 if ((((tdvw_max / dqs_sampl_res)
2502 * dqs_sampl_res) <= tdvw_min) ||
2503 (((tdvw_max % dqs_sampl_res) == 0) &&
2504 (((tdvw_max / dqs_sampl_res - 1)
2505 * dqs_sampl_res) <= tdvw_min))) {
2506 /*
2507 * Data valid window width is lower than
2508 * sampling resolution and do not hit any
2509 * sampling point to be sure the sampling point
2510 * will be found the RE low pulse width will be
2511 * extended by one clock cycle.
2512 */
2513 trp_cnt = trp_cnt + 1;
2514 }
2515 } else {
2516 /*
2517 * There is no valid window to be able to sample data.
2518 * The tRP need to be widen.
2519 * Very safe calculations are performed here.
2520 */
2521 trp_cnt = (sdr->tREA_max + board_delay_skew_max
2522 + dqs_sampl_res) / clk_period;
2523 }
2524 }
2525
2526 tdvw_max = calc_tdvw_max(trp_cnt, clk_period,
2527 sdr->tRHOH_min,
2528 board_delay_skew_min, ext_rd_mode);
2529
2530 if (sdr->tWC_min <= clk_period &&
2531 (sdr->tWP_min + if_skew) <= (clk_period / 2) &&
2532 (sdr->tWH_min + if_skew) <= (clk_period / 2)) {
2533 ext_wr_mode = 0;
2534 } else {
2535 u32 twh;
2536
2537 ext_wr_mode = 1;
2538 twp_cnt = calc_cycl(sdr->tWP_min + if_skew, clk_period);
2539 if ((twp_cnt + 1) * clk_period < (sdr->tALS_min + if_skew))
2540 twp_cnt = calc_cycl(sdr->tALS_min + if_skew,
2541 clk_period);
2542
2543 twh = (sdr->tWC_min - (twp_cnt + 1) * clk_period);
2544 if (sdr->tWH_min >= twh)
2545 twh = sdr->tWH_min;
2546
2547 twh_cnt = calc_cycl(twh + if_skew, clk_period);
2548 }
2549
2550 reg = FIELD_PREP(ASYNC_TOGGLE_TIMINGS_TRH, trh_cnt);
2551 reg |= FIELD_PREP(ASYNC_TOGGLE_TIMINGS_TRP, trp_cnt);
2552 reg |= FIELD_PREP(ASYNC_TOGGLE_TIMINGS_TWH, twh_cnt);
2553 reg |= FIELD_PREP(ASYNC_TOGGLE_TIMINGS_TWP, twp_cnt);
2554 t->async_toggle_timings = reg;
2555 dev_dbg(cdns_ctrl->dev, "ASYNC_TOGGLE_TIMINGS_SDR\t%x\n", reg);
2556
2557 tadl_cnt = calc_cycl((sdr->tADL_min + if_skew), clk_period);
2558 tccs_cnt = calc_cycl((sdr->tCCS_min + if_skew), clk_period);
2559 twhr_cnt = calc_cycl((sdr->tWHR_min + if_skew), clk_period);
2560 trhw_cnt = calc_cycl((sdr->tRHW_min + if_skew), clk_period);
2561 reg = FIELD_PREP(TIMINGS0_TADL, tadl_cnt);
2562
2563 /*
2564 * If timing exceeds delay field in timing register
2565 * then use maximum value.
2566 */
2567 if (FIELD_FIT(TIMINGS0_TCCS, tccs_cnt))
2568 reg |= FIELD_PREP(TIMINGS0_TCCS, tccs_cnt);
2569 else
2570 reg |= TIMINGS0_TCCS;
2571
2572 reg |= FIELD_PREP(TIMINGS0_TWHR, twhr_cnt);
2573 reg |= FIELD_PREP(TIMINGS0_TRHW, trhw_cnt);
2574 t->timings0 = reg;
2575 dev_dbg(cdns_ctrl->dev, "TIMINGS0_SDR\t%x\n", reg);
2576
2577 /* The following is related to single signal so skew is not needed. */
2578 trhz_cnt = calc_cycl(sdr->tRHZ_max, clk_period);
2579 trhz_cnt = trhz_cnt + 1;
2580 twb_cnt = calc_cycl((sdr->tWB_max + board_delay), clk_period);
2581 /*
2582 * Because of the two stage syncflop the value must be increased by 3
2583 * first value is related with sync, second value is related
2584 * with output if delay.
2585 */
2586 twb_cnt = twb_cnt + 3 + 5;
2587 /*
2588 * The following is related to the we edge of the random data input
2589 * sequence so skew is not needed.
2590 */
2591 tvdly_cnt = calc_cycl(500000 + if_skew, clk_period);
2592 reg = FIELD_PREP(TIMINGS1_TRHZ, trhz_cnt);
2593 reg |= FIELD_PREP(TIMINGS1_TWB, twb_cnt);
2594 reg |= FIELD_PREP(TIMINGS1_TVDLY, tvdly_cnt);
2595 t->timings1 = reg;
2596 dev_dbg(cdns_ctrl->dev, "TIMINGS1_SDR\t%x\n", reg);
2597
2598 tfeat_cnt = calc_cycl(sdr->tFEAT_max, clk_period);
2599 if (tfeat_cnt < twb_cnt)
2600 tfeat_cnt = twb_cnt;
2601
2602 tceh_cnt = calc_cycl(sdr->tCEH_min, clk_period);
2603 tcs_cnt = calc_cycl((sdr->tCS_min + if_skew), clk_period);
2604
2605 reg = FIELD_PREP(TIMINGS2_TFEAT, tfeat_cnt);
2606 reg |= FIELD_PREP(TIMINGS2_CS_HOLD_TIME, tceh_cnt);
2607 reg |= FIELD_PREP(TIMINGS2_CS_SETUP_TIME, tcs_cnt);
2608 t->timings2 = reg;
2609 dev_dbg(cdns_ctrl->dev, "TIMINGS2_SDR\t%x\n", reg);
2610
2611 if (cdns_ctrl->caps2.is_phy_type_dll) {
2612 reg = DLL_PHY_CTRL_DLL_RST_N;
2613 if (ext_wr_mode)
2614 reg |= DLL_PHY_CTRL_EXTENDED_WR_MODE;
2615 if (ext_rd_mode)
2616 reg |= DLL_PHY_CTRL_EXTENDED_RD_MODE;
2617
2618 reg |= FIELD_PREP(DLL_PHY_CTRL_RS_HIGH_WAIT_CNT, 7);
2619 reg |= FIELD_PREP(DLL_PHY_CTRL_RS_IDLE_CNT, 7);
2620 t->dll_phy_ctrl = reg;
2621 dev_dbg(cdns_ctrl->dev, "DLL_PHY_CTRL_SDR\t%x\n", reg);
2622 }
2623
2624 /* Sampling point calculation. */
2625 if ((tdvw_max % dqs_sampl_res) > 0)
2626 sampling_point = tdvw_max / dqs_sampl_res;
2627 else
2628 sampling_point = (tdvw_max / dqs_sampl_res - 1);
2629
2630 if (sampling_point * dqs_sampl_res > tdvw_min) {
2631 dll_phy_dqs_timing =
2632 FIELD_PREP(PHY_DQS_TIMING_DQS_SEL_OE_END, 4);
2633 dll_phy_dqs_timing |= PHY_DQS_TIMING_USE_PHONY_DQS;
2634 phony_dqs_timing = sampling_point / phony_dqs_mod;
2635
2636 if ((sampling_point % 2) > 0) {
2637 dll_phy_dqs_timing |= PHY_DQS_TIMING_PHONY_DQS_SEL;
2638 if ((tdvw_max % dqs_sampl_res) == 0)
2639 /*
2640 * Calculation for sampling point at the edge
2641 * of data and being odd number.
2642 */
2643 phony_dqs_timing = (tdvw_max / dqs_sampl_res)
2644 / phony_dqs_mod - 1;
2645
2646 if (!cdns_ctrl->caps2.is_phy_type_dll)
2647 phony_dqs_timing--;
2648
2649 } else {
2650 phony_dqs_timing--;
2651 }
2652 rd_del_sel = phony_dqs_timing + 3;
2653 } else {
2654 dev_warn(cdns_ctrl->dev,
2655 "ERROR : cannot find valid sampling point\n");
2656 }
2657
2658 reg = FIELD_PREP(PHY_CTRL_PHONY_DQS, phony_dqs_timing);
2659 if (cdns_ctrl->caps2.is_phy_type_dll)
2660 reg |= PHY_CTRL_SDR_DQS;
2661 t->phy_ctrl = reg;
2662 dev_dbg(cdns_ctrl->dev, "PHY_CTRL_REG_SDR\t%x\n", reg);
2663
2664 if (cdns_ctrl->caps2.is_phy_type_dll) {
2665 dev_dbg(cdns_ctrl->dev, "PHY_TSEL_REG_SDR\t%x\n", 0);
2666 dev_dbg(cdns_ctrl->dev, "PHY_DQ_TIMING_REG_SDR\t%x\n", 2);
2667 dev_dbg(cdns_ctrl->dev, "PHY_DQS_TIMING_REG_SDR\t%x\n",
2668 dll_phy_dqs_timing);
2669 t->phy_dqs_timing = dll_phy_dqs_timing;
2670
2671 reg = FIELD_PREP(PHY_GATE_LPBK_CTRL_RDS, rd_del_sel);
2672 dev_dbg(cdns_ctrl->dev, "PHY_GATE_LPBK_CTRL_REG_SDR\t%x\n",
2673 reg);
2674 t->phy_gate_lpbk_ctrl = reg;
2675
2676 dev_dbg(cdns_ctrl->dev, "PHY_DLL_MASTER_CTRL_REG_SDR\t%lx\n",
2677 PHY_DLL_MASTER_CTRL_BYPASS_MODE);
2678 dev_dbg(cdns_ctrl->dev, "PHY_DLL_SLAVE_CTRL_REG_SDR\t%x\n", 0);
2679 }
2680 return 0;
2681 }
2682
2683 static int
cadence_nand_setup_nvddr_interface(struct nand_chip * chip,const struct nand_nvddr_timings * nvddr)2684 cadence_nand_setup_nvddr_interface(struct nand_chip *chip,
2685 const struct nand_nvddr_timings *nvddr)
2686 {
2687 struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
2688 struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
2689 struct cadence_nand_timings *t = &cdns_chip->timings;
2690 u32 board_delay = cdns_ctrl->board_delay;
2691 u32 clk_period = DIV_ROUND_DOWN_ULL(1000000000000ULL,
2692 cdns_ctrl->nf_clk_rate);
2693 u32 ddr_clk_ctrl_period = clk_period * 2;
2694 u32 if_skew = cdns_ctrl->caps1->if_skew;
2695 u32 tceh_cnt, tcs_cnt, tadl_cnt, tccs_cnt;
2696 u32 twrck_cnt, tcad_cnt, tckwr_cnt = 0;
2697 u32 tfeat_cnt, trhz_cnt, tvdly_cnt, tcwaw_cnt;
2698 u32 trhw_cnt, twb_cnt, twhr_cnt;
2699 u32 oe_start, oe_end, oe_end_dqsd;
2700 u32 rd_del_sel = 0;
2701 u32 dqs_driven_by_device, dqs_toogle_by_device, gate_open_delay;
2702 u32 dll_phy_gate_open_delay, gate_close_delay, ie_start;
2703 u32 dll_phy_rd_delay;
2704 u32 reg;
2705
2706 memset(t, 0, sizeof(*t));
2707 twrck_cnt = calc_cycl(nvddr->tWRCK_min, ddr_clk_ctrl_period);
2708 tcad_cnt = calc_cycl(nvddr->tCAD_min, ddr_clk_ctrl_period);
2709
2710 reg = FIELD_PREP(SYNC_TWRCK, twrck_cnt);
2711 reg |= FIELD_PREP(SYNC_TCAD, tcad_cnt);
2712 t->sync_timings = reg;
2713 dev_dbg(cdns_ctrl->dev, "SYNC_TIMINGS_NVDDR\t%08x\n", reg);
2714
2715 tadl_cnt = calc_cycl((nvddr->tADL_min + if_skew), ddr_clk_ctrl_period);
2716 tccs_cnt = calc_cycl((nvddr->tCCS_min + if_skew), ddr_clk_ctrl_period);
2717 twhr_cnt = calc_cycl((nvddr->tWHR_min + if_skew), ddr_clk_ctrl_period);
2718 trhw_cnt = calc_cycl((nvddr->tRHW_min + if_skew), ddr_clk_ctrl_period);
2719 reg = FIELD_PREP(TIMINGS0_TADL, tadl_cnt);
2720 reg |= FIELD_PREP(TIMINGS0_TCCS, tccs_cnt);
2721 reg |= FIELD_PREP(TIMINGS0_TWHR, twhr_cnt);
2722 reg |= FIELD_PREP(TIMINGS0_TRHW, trhw_cnt);
2723 t->timings0 = reg;
2724 dev_dbg(cdns_ctrl->dev, "TIMINGS0_NVDDR\t%08x\n", reg);
2725
2726 twb_cnt = calc_cycl((nvddr->tWB_max + board_delay),
2727 ddr_clk_ctrl_period);
2728 /*
2729 * Because of the two stage syncflop the value must be increased by 3
2730 * first value is related with sync, second value is related
2731 * with output if delay.
2732 */
2733 twb_cnt = twb_cnt + 3 + 5;
2734 tvdly_cnt = calc_cycl(NVDDR_TVDLY_DELAY + if_skew, ddr_clk_ctrl_period);
2735 tcwaw_cnt = calc_cycl(NVDDR_TCWAW_DELAY, ddr_clk_ctrl_period);
2736 trhz_cnt = 1;
2737 reg = FIELD_PREP(TIMINGS1_TWB, twb_cnt);
2738 reg |= FIELD_PREP(TIMINGS1_TVDLY, tvdly_cnt);
2739 reg |= FIELD_PREP(TIMINGS1_TRHZ, trhz_cnt);
2740 reg |= FIELD_PREP(TIMINGS1_TCWAW, tcwaw_cnt);
2741 t->timings1 = reg;
2742 dev_dbg(cdns_ctrl->dev, "TIMINGS1_NVDDR\t%08x\n", reg);
2743
2744 tfeat_cnt = calc_cycl(nvddr->tFEAT_max, ddr_clk_ctrl_period);
2745 if (tfeat_cnt < twb_cnt)
2746 tfeat_cnt = twb_cnt;
2747
2748 tceh_cnt = calc_cycl(nvddr->tCEH_min, ddr_clk_ctrl_period);
2749 tcs_cnt = calc_cycl((nvddr->tCS_min + if_skew), ddr_clk_ctrl_period);
2750 reg = FIELD_PREP(TIMINGS2_TFEAT, tfeat_cnt);
2751 reg |= FIELD_PREP(TIMINGS2_CS_HOLD_TIME, tceh_cnt);
2752 reg |= FIELD_PREP(TIMINGS2_CS_SETUP_TIME, tcs_cnt);
2753 t->timings2 = reg;
2754 dev_dbg(cdns_ctrl->dev, "TIMINGS2_NVDDR\t%08x\n", reg);
2755
2756 reg = FIELD_PREP(DLL_PHY_CTRL_RS_HIGH_WAIT_CNT, NVDDR_RS_HIGH_WAIT_CNT);
2757 reg |= FIELD_PREP(DLL_PHY_CTRL_RS_IDLE_CNT, NVDDR_RS_IDLE_CNT);
2758 t->dll_phy_ctrl = reg;
2759 dev_dbg(cdns_ctrl->dev, "DLL_PHY_CTRL_NVDDR\t%08x\n", reg);
2760
2761 reg = PHY_CTRL_SDR_DQS;
2762 t->phy_ctrl = reg;
2763 dev_dbg(cdns_ctrl->dev, "PHY_CTRL_REG_NVDDR\t%08x\n", reg);
2764
2765 dqs_driven_by_device = (nvddr->tDQSD_max + board_delay) / 1000 +
2766 if_skew;
2767 dqs_toogle_by_device = (nvddr->tDQSCK_max + board_delay) / 1000 -
2768 if_skew;
2769 gate_open_delay = dqs_toogle_by_device / (clk_period / 1000);
2770 if (dqs_toogle_by_device > clk_period / 1000) {
2771 if (gate_open_delay > NVDDR_GATE_CFG_OPT)
2772 dll_phy_gate_open_delay = NVDDR_GATE_CFG_MAX;
2773 else
2774 dll_phy_gate_open_delay = gate_open_delay + 1;
2775 gate_close_delay = 0;
2776 } else {
2777 twrck_cnt = calc_cycl(dqs_driven_by_device * 1000, clk_period);
2778 dll_phy_gate_open_delay = 1;
2779 gate_close_delay = 0;
2780
2781 reg = FIELD_PREP(SYNC_TCKWR, tckwr_cnt);
2782 reg |= FIELD_PREP(SYNC_TWRCK, twrck_cnt);
2783 reg |= FIELD_PREP(SYNC_TCAD, tcad_cnt);
2784 t->sync_timings = reg;
2785 dev_dbg(cdns_ctrl->dev, "SYNC_TIMINGS_NVDDR\t%08x\n", reg);
2786 }
2787
2788 if (dll_phy_gate_open_delay > NVDDR_GATE_CFG_STD)
2789 ie_start = NVDDR_GATE_CFG_STD;
2790 else
2791 ie_start = dll_phy_gate_open_delay;
2792
2793 dll_phy_rd_delay = ((nvddr->tDQSCK_max + board_delay) +
2794 (clk_period / 2)) / clk_period;
2795 if (dll_phy_rd_delay <= NVDDR_PHY_RD_DELAY)
2796 rd_del_sel = dll_phy_rd_delay + 2;
2797 else
2798 rd_del_sel = NVDDR_PHY_RD_DELAY_MAX;
2799
2800 reg = FIELD_PREP(PHY_GATE_LPBK_CTRL_GATE_CFG, dll_phy_gate_open_delay);
2801 reg |= FIELD_PREP(PHY_GATE_LPBK_CTRL_GATE_CFG_CLOSE, gate_close_delay);
2802 reg |= FIELD_PREP(PHY_GATE_LPBK_CTRL_RDS, rd_del_sel);
2803 t->phy_gate_lpbk_ctrl = reg;
2804 dev_dbg(cdns_ctrl->dev, "PHY_GATE_LPBK_CTRL_REG_NVDDR\t%08x\n", reg);
2805
2806 oe_end_dqsd = ((nvddr->tDQSD_max / 1000) / ((clk_period / 2) / 1000))
2807 + NVDDR_DATA_SEL_OE_END_MIN;
2808 oe_end = (NVDDR_DATA_SEL_OE_END_MIN + oe_end_dqsd) / 2;
2809 if (oe_end > NVDDR_DATA_SEL_OE_END_MAX)
2810 oe_end = NVDDR_DATA_SEL_OE_END_MAX;
2811
2812 oe_start = ((nvddr->tDQSHZ_max / 1000) / ((clk_period / 2) / 1000)) + 1;
2813 if (oe_start > NVDDR_DATA_SEL_OE_START_MAX)
2814 oe_start = NVDDR_DATA_SEL_OE_START_MAX;
2815
2816 reg = FIELD_PREP(PHY_DQ_TIMING_OE_END, NVDDR_DATA_SEL_OE_END);
2817 reg |= FIELD_PREP(PHY_DQ_TIMING_OE_START, NVDDR_DATA_SEL_OE_START);
2818 reg |= FIELD_PREP(PHY_DQ_TIMING_TSEL_END, NVDDR_DATA_SEL_OE_END);
2819 reg |= FIELD_PREP(PHY_DQ_TIMING_TSEL_START, NVDDR_DATA_SEL_OE_START);
2820 t->phy_dq_timing = reg;
2821 dev_dbg(cdns_ctrl->dev, "PHY_DQ_TIMING_REG_NVDDR\t%08x\n", reg);
2822
2823 reg = FIELD_PREP(PHY_DQS_TIMING_DQS_SEL_OE_END, oe_end);
2824 reg |= FIELD_PREP(PHY_DQS_TIMING_DQS_SEL_OE_START, oe_start);
2825 reg |= FIELD_PREP(PHY_DQS_TIMING_DQS_SEL_TSEL_END, oe_end);
2826 t->phy_dqs_timing = reg;
2827 dev_dbg(cdns_ctrl->dev, "PHY_DQS_TIMING_REG_NVDDR\t%08x\n", reg);
2828
2829 reg = FIELD_PREP(PHY_IE_TIMING_DQS_IE_START, ie_start);
2830 reg |= FIELD_PREP(PHY_IE_TIMING_DQ_IE_START, ie_start);
2831 reg |= FIELD_PREP(PHY_IE_TIMING_IE_ALWAYS_ON, 0);
2832 t->phy_ie_timing = reg;
2833 dev_dbg(cdns_ctrl->dev, "PHY_IE_TIMING_REG_NVDDR\t%08x\n", reg);
2834
2835 reg = readl_relaxed(cdns_ctrl->reg + DLL_PHY_CTRL);
2836 reg &= ~(DLL_PHY_CTRL_DLL_RST_N |
2837 DLL_PHY_CTRL_EXTENDED_RD_MODE |
2838 DLL_PHY_CTRL_EXTENDED_WR_MODE);
2839 writel_relaxed(reg, cdns_ctrl->reg + DLL_PHY_CTRL);
2840 writel_relaxed(OPR_MODE_NVDDR, cdns_ctrl->reg + COMMON_SET);
2841 writel_relaxed(NVDDR_TOGGLE_TIMINGS_0,
2842 cdns_ctrl->reg + TOGGLE_TIMINGS_0);
2843 writel_relaxed(NVDDR_TOGGLE_TIMINGS_1,
2844 cdns_ctrl->reg + TOGGLE_TIMINGS_1);
2845 writel_relaxed(NVDDR_ASYNC_TOGGLE_TIMINGS,
2846 cdns_ctrl->reg + ASYNC_TOGGLE_TIMINGS);
2847 writel_relaxed(t->sync_timings, cdns_ctrl->reg + SYNC_TIMINGS);
2848 writel_relaxed(t->timings0, cdns_ctrl->reg + TIMINGS0);
2849 writel_relaxed(t->timings1, cdns_ctrl->reg + TIMINGS1);
2850 writel_relaxed(t->timings2, cdns_ctrl->reg + TIMINGS2);
2851 writel_relaxed(t->dll_phy_ctrl, cdns_ctrl->reg + DLL_PHY_CTRL);
2852 writel_relaxed(t->phy_ctrl, cdns_ctrl->reg + PHY_CTRL);
2853 writel_relaxed(NVDDR_PHY_TSEL, cdns_ctrl->reg + PHY_TSEL);
2854 writel_relaxed(t->phy_dq_timing, cdns_ctrl->reg + PHY_DQ_TIMING);
2855 writel_relaxed(t->phy_dqs_timing, cdns_ctrl->reg + PHY_DQS_TIMING);
2856 writel_relaxed(t->phy_gate_lpbk_ctrl,
2857 cdns_ctrl->reg + PHY_GATE_LPBK_CTRL);
2858 writel_relaxed(NVDDR_PHY_DLL_MASTER_CTRL,
2859 cdns_ctrl->reg + PHY_DLL_MASTER_CTRL);
2860 writel_relaxed(NVDDR_PHY_DLL_SLAVE_CTRL,
2861 cdns_ctrl->reg + PHY_DLL_SLAVE_CTRL);
2862 writel_relaxed(t->phy_ie_timing, cdns_ctrl->reg + PHY_IE_TIMING);
2863 writel_relaxed((reg | DLL_PHY_CTRL_DLL_RST_N),
2864 cdns_ctrl->reg + DLL_PHY_CTRL);
2865 return 0;
2866 }
2867
2868 static int
cadence_nand_setup_interface(struct nand_chip * chip,int chipnr,const struct nand_interface_config * conf)2869 cadence_nand_setup_interface(struct nand_chip *chip, int chipnr,
2870 const struct nand_interface_config *conf)
2871 {
2872 int ret = 0;
2873
2874 if (chipnr < 0)
2875 return ret;
2876
2877 if (nand_interface_is_sdr(conf)) {
2878 const struct nand_sdr_timings *sdr = nand_get_sdr_timings(conf);
2879
2880 if (IS_ERR(sdr))
2881 return PTR_ERR(sdr);
2882
2883 ret = cadence_nand_setup_sdr_interface(chip, sdr);
2884 } else {
2885 const struct nand_nvddr_timings *nvddr = nand_get_nvddr_timings(conf);
2886
2887 if (IS_ERR(nvddr))
2888 return PTR_ERR(nvddr);
2889
2890 ret = cadence_nand_setup_nvddr_interface(chip, nvddr);
2891 }
2892 return ret;
2893 }
2894
cadence_nand_attach_chip(struct nand_chip * chip)2895 static int cadence_nand_attach_chip(struct nand_chip *chip)
2896 {
2897 struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
2898 struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
2899 u32 ecc_size;
2900 struct mtd_info *mtd = nand_to_mtd(chip);
2901 int ret;
2902
2903 if (chip->options & NAND_BUSWIDTH_16) {
2904 ret = cadence_nand_set_access_width16(cdns_ctrl, true);
2905 if (ret)
2906 return ret;
2907 }
2908
2909 chip->bbt_options |= NAND_BBT_USE_FLASH;
2910 chip->bbt_options |= NAND_BBT_NO_OOB;
2911 chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
2912
2913 chip->options |= NAND_NO_SUBPAGE_WRITE;
2914
2915 cdns_chip->bbm_offs = chip->badblockpos;
2916 cdns_chip->bbm_offs &= ~0x01;
2917 /* this value should be even number */
2918 cdns_chip->bbm_len = 2;
2919
2920 ret = nand_ecc_choose_conf(chip,
2921 &cdns_ctrl->ecc_caps,
2922 mtd->oobsize - cdns_chip->bbm_len);
2923 if (ret) {
2924 dev_err(cdns_ctrl->dev, "ECC configuration failed\n");
2925 return ret;
2926 }
2927
2928 dev_dbg(cdns_ctrl->dev,
2929 "chosen ECC settings: step=%d, strength=%d, bytes=%d\n",
2930 chip->ecc.size, chip->ecc.strength, chip->ecc.bytes);
2931
2932 /* Error correction configuration. */
2933 cdns_chip->sector_size = chip->ecc.size;
2934 cdns_chip->sector_count = mtd->writesize / cdns_chip->sector_size;
2935 ecc_size = cdns_chip->sector_count * chip->ecc.bytes;
2936
2937 cdns_chip->avail_oob_size = mtd->oobsize - ecc_size;
2938
2939 if (cdns_chip->avail_oob_size > cdns_ctrl->bch_metadata_size)
2940 cdns_chip->avail_oob_size = cdns_ctrl->bch_metadata_size;
2941
2942 if ((cdns_chip->avail_oob_size + cdns_chip->bbm_len + ecc_size)
2943 > mtd->oobsize)
2944 cdns_chip->avail_oob_size -= 4;
2945
2946 ret = cadence_nand_get_ecc_strength_idx(cdns_ctrl, chip->ecc.strength);
2947 if (ret < 0)
2948 return -EINVAL;
2949
2950 cdns_chip->corr_str_idx = (u8)ret;
2951
2952 if (cadence_nand_wait_for_value(cdns_ctrl, CTRL_STATUS,
2953 1000000,
2954 CTRL_STATUS_CTRL_BUSY, true))
2955 return -ETIMEDOUT;
2956
2957 cadence_nand_set_ecc_strength(cdns_ctrl,
2958 cdns_chip->corr_str_idx);
2959
2960 cadence_nand_set_erase_detection(cdns_ctrl, true,
2961 chip->ecc.strength);
2962
2963 /* Override the default read operations. */
2964 chip->ecc.read_page = cadence_nand_read_page;
2965 chip->ecc.read_page_raw = cadence_nand_read_page_raw;
2966 chip->ecc.write_page = cadence_nand_write_page;
2967 chip->ecc.write_page_raw = cadence_nand_write_page_raw;
2968 chip->ecc.read_oob = cadence_nand_read_oob;
2969 chip->ecc.write_oob = cadence_nand_write_oob;
2970 chip->ecc.read_oob_raw = cadence_nand_read_oob_raw;
2971 chip->ecc.write_oob_raw = cadence_nand_write_oob_raw;
2972
2973 if ((mtd->writesize + mtd->oobsize) > cdns_ctrl->buf_size)
2974 cdns_ctrl->buf_size = mtd->writesize + mtd->oobsize;
2975
2976 /* Is 32-bit DMA supported? */
2977 ret = dma_set_mask(cdns_ctrl->dev, DMA_BIT_MASK(32));
2978 if (ret) {
2979 dev_err(cdns_ctrl->dev, "no usable DMA configuration\n");
2980 return ret;
2981 }
2982
2983 mtd_set_ooblayout(mtd, &cadence_nand_ooblayout_ops);
2984
2985 return 0;
2986 }
2987
2988 static const struct nand_controller_ops cadence_nand_controller_ops = {
2989 .attach_chip = cadence_nand_attach_chip,
2990 .exec_op = cadence_nand_exec_op,
2991 .setup_interface = cadence_nand_setup_interface,
2992 };
2993
cadence_nand_chip_init(struct cdns_nand_ctrl * cdns_ctrl,struct device_node * np)2994 static int cadence_nand_chip_init(struct cdns_nand_ctrl *cdns_ctrl,
2995 struct device_node *np)
2996 {
2997 struct cdns_nand_chip *cdns_chip;
2998 struct mtd_info *mtd;
2999 struct nand_chip *chip;
3000 int nsels, ret, i;
3001 u32 cs;
3002
3003 nsels = of_property_count_elems_of_size(np, "reg", sizeof(u32));
3004 if (nsels <= 0) {
3005 dev_err(cdns_ctrl->dev, "missing/invalid reg property\n");
3006 return -EINVAL;
3007 }
3008
3009 /* Allocate the nand chip structure. */
3010 cdns_chip = devm_kzalloc(cdns_ctrl->dev, sizeof(*cdns_chip) +
3011 (nsels * sizeof(u8)),
3012 GFP_KERNEL);
3013 if (!cdns_chip) {
3014 dev_err(cdns_ctrl->dev, "could not allocate chip structure\n");
3015 return -ENOMEM;
3016 }
3017
3018 cdns_chip->nsels = nsels;
3019
3020 for (i = 0; i < nsels; i++) {
3021 /* Retrieve CS id. */
3022 ret = of_property_read_u32_index(np, "reg", i, &cs);
3023 if (ret) {
3024 dev_err(cdns_ctrl->dev,
3025 "could not retrieve reg property: %d\n",
3026 ret);
3027 return ret;
3028 }
3029
3030 if (cs >= cdns_ctrl->caps2.max_banks) {
3031 dev_err(cdns_ctrl->dev,
3032 "invalid reg value: %u (max CS = %d)\n",
3033 cs, cdns_ctrl->caps2.max_banks);
3034 return -EINVAL;
3035 }
3036
3037 if (test_and_set_bit(cs, &cdns_ctrl->assigned_cs)) {
3038 dev_err(cdns_ctrl->dev,
3039 "CS %d already assigned\n", cs);
3040 return -EINVAL;
3041 }
3042
3043 cdns_chip->cs[i] = cs;
3044 }
3045
3046 chip = &cdns_chip->chip;
3047 chip->controller = &cdns_ctrl->controller;
3048 nand_set_flash_node(chip, np);
3049
3050 mtd = nand_to_mtd(chip);
3051 mtd->dev.parent = cdns_ctrl->dev;
3052
3053 /*
3054 * Default to HW ECC engine mode. If the nand-ecc-mode property is given
3055 * in the DT node, this entry will be overwritten in nand_scan_ident().
3056 */
3057 chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
3058
3059 ret = nand_scan(chip, cdns_chip->nsels);
3060 if (ret) {
3061 dev_err(cdns_ctrl->dev, "could not scan the nand chip\n");
3062 return ret;
3063 }
3064
3065 ret = mtd_device_register(mtd, NULL, 0);
3066 if (ret) {
3067 dev_err(cdns_ctrl->dev,
3068 "failed to register mtd device: %d\n", ret);
3069 nand_cleanup(chip);
3070 return ret;
3071 }
3072
3073 list_add_tail(&cdns_chip->node, &cdns_ctrl->chips);
3074
3075 return 0;
3076 }
3077
cadence_nand_chips_cleanup(struct cdns_nand_ctrl * cdns_ctrl)3078 static void cadence_nand_chips_cleanup(struct cdns_nand_ctrl *cdns_ctrl)
3079 {
3080 struct cdns_nand_chip *entry, *temp;
3081 struct nand_chip *chip;
3082 int ret;
3083
3084 list_for_each_entry_safe(entry, temp, &cdns_ctrl->chips, node) {
3085 chip = &entry->chip;
3086 ret = mtd_device_unregister(nand_to_mtd(chip));
3087 WARN_ON(ret);
3088 nand_cleanup(chip);
3089 list_del(&entry->node);
3090 }
3091 }
3092
cadence_nand_chips_init(struct cdns_nand_ctrl * cdns_ctrl)3093 static int cadence_nand_chips_init(struct cdns_nand_ctrl *cdns_ctrl)
3094 {
3095 struct device_node *np = cdns_ctrl->dev->of_node;
3096 int max_cs = cdns_ctrl->caps2.max_banks;
3097 int nchips, ret;
3098
3099 nchips = of_get_child_count(np);
3100
3101 if (nchips > max_cs) {
3102 dev_err(cdns_ctrl->dev,
3103 "too many NAND chips: %d (max = %d CS)\n",
3104 nchips, max_cs);
3105 return -EINVAL;
3106 }
3107
3108 for_each_child_of_node_scoped(np, nand_np) {
3109 ret = cadence_nand_chip_init(cdns_ctrl, nand_np);
3110 if (ret) {
3111 cadence_nand_chips_cleanup(cdns_ctrl);
3112 return ret;
3113 }
3114 }
3115
3116 return 0;
3117 }
3118
3119 static void
cadence_nand_irq_cleanup(int irqnum,struct cdns_nand_ctrl * cdns_ctrl)3120 cadence_nand_irq_cleanup(int irqnum, struct cdns_nand_ctrl *cdns_ctrl)
3121 {
3122 /* Disable interrupts. */
3123 writel_relaxed(INTR_ENABLE_INTR_EN, cdns_ctrl->reg + INTR_ENABLE);
3124 }
3125
cadence_nand_init(struct cdns_nand_ctrl * cdns_ctrl)3126 static int cadence_nand_init(struct cdns_nand_ctrl *cdns_ctrl)
3127 {
3128 dma_cap_mask_t mask;
3129 struct dma_device *dma_dev;
3130 int ret;
3131
3132 cdns_ctrl->cdma_desc = dma_alloc_coherent(cdns_ctrl->dev,
3133 sizeof(*cdns_ctrl->cdma_desc),
3134 &cdns_ctrl->dma_cdma_desc,
3135 GFP_KERNEL);
3136 if (!cdns_ctrl->cdma_desc)
3137 return -ENOMEM;
3138
3139 cdns_ctrl->buf_size = SZ_16K;
3140 cdns_ctrl->buf = kmalloc(cdns_ctrl->buf_size, GFP_KERNEL);
3141 if (!cdns_ctrl->buf) {
3142 ret = -ENOMEM;
3143 goto free_buf_desc;
3144 }
3145
3146 if (devm_request_irq(cdns_ctrl->dev, cdns_ctrl->irq, cadence_nand_isr,
3147 IRQF_SHARED, "cadence-nand-controller",
3148 cdns_ctrl)) {
3149 dev_err(cdns_ctrl->dev, "Unable to allocate IRQ\n");
3150 ret = -ENODEV;
3151 goto free_buf;
3152 }
3153
3154 spin_lock_init(&cdns_ctrl->irq_lock);
3155 init_completion(&cdns_ctrl->complete);
3156
3157 ret = cadence_nand_hw_init(cdns_ctrl);
3158 if (ret)
3159 goto disable_irq;
3160
3161 dma_cap_zero(mask);
3162 dma_cap_set(DMA_MEMCPY, mask);
3163
3164 if (cdns_ctrl->caps1->has_dma) {
3165 cdns_ctrl->dmac = dma_request_chan_by_mask(&mask);
3166 if (IS_ERR(cdns_ctrl->dmac)) {
3167 ret = dev_err_probe(cdns_ctrl->dev, PTR_ERR(cdns_ctrl->dmac),
3168 "%d: Failed to get a DMA channel\n", ret);
3169 goto disable_irq;
3170 }
3171 }
3172
3173 dma_dev = cdns_ctrl->dmac->device;
3174 cdns_ctrl->io.iova_dma = dma_map_resource(dma_dev->dev, cdns_ctrl->io.dma,
3175 cdns_ctrl->io.size,
3176 DMA_BIDIRECTIONAL, 0);
3177
3178 ret = dma_mapping_error(dma_dev->dev, cdns_ctrl->io.iova_dma);
3179 if (ret) {
3180 dev_err(cdns_ctrl->dev, "Failed to map I/O resource to DMA\n");
3181 goto dma_release_chnl;
3182 }
3183
3184 nand_controller_init(&cdns_ctrl->controller);
3185 INIT_LIST_HEAD(&cdns_ctrl->chips);
3186
3187 cdns_ctrl->controller.ops = &cadence_nand_controller_ops;
3188 cdns_ctrl->curr_corr_str_idx = 0xFF;
3189
3190 ret = cadence_nand_chips_init(cdns_ctrl);
3191 if (ret) {
3192 dev_err(cdns_ctrl->dev, "Failed to register MTD: %d\n",
3193 ret);
3194 goto unmap_dma_resource;
3195 }
3196
3197 kfree(cdns_ctrl->buf);
3198 cdns_ctrl->buf = kzalloc(cdns_ctrl->buf_size, GFP_KERNEL);
3199 if (!cdns_ctrl->buf) {
3200 ret = -ENOMEM;
3201 goto unmap_dma_resource;
3202 }
3203
3204 return 0;
3205
3206 unmap_dma_resource:
3207 dma_unmap_resource(dma_dev->dev, cdns_ctrl->io.iova_dma,
3208 cdns_ctrl->io.size, DMA_BIDIRECTIONAL, 0);
3209
3210 dma_release_chnl:
3211 if (cdns_ctrl->dmac)
3212 dma_release_channel(cdns_ctrl->dmac);
3213
3214 disable_irq:
3215 cadence_nand_irq_cleanup(cdns_ctrl->irq, cdns_ctrl);
3216
3217 free_buf:
3218 kfree(cdns_ctrl->buf);
3219
3220 free_buf_desc:
3221 dma_free_coherent(cdns_ctrl->dev, sizeof(struct cadence_nand_cdma_desc),
3222 cdns_ctrl->cdma_desc, cdns_ctrl->dma_cdma_desc);
3223
3224 return ret;
3225 }
3226
3227 /* Driver exit point. */
cadence_nand_remove(struct cdns_nand_ctrl * cdns_ctrl)3228 static void cadence_nand_remove(struct cdns_nand_ctrl *cdns_ctrl)
3229 {
3230 cadence_nand_chips_cleanup(cdns_ctrl);
3231 if (cdns_ctrl->dmac)
3232 dma_unmap_resource(cdns_ctrl->dmac->device->dev,
3233 cdns_ctrl->io.iova_dma, cdns_ctrl->io.size,
3234 DMA_BIDIRECTIONAL, 0);
3235 cadence_nand_irq_cleanup(cdns_ctrl->irq, cdns_ctrl);
3236 kfree(cdns_ctrl->buf);
3237 dma_free_coherent(cdns_ctrl->dev, sizeof(struct cadence_nand_cdma_desc),
3238 cdns_ctrl->cdma_desc, cdns_ctrl->dma_cdma_desc);
3239
3240 if (cdns_ctrl->dmac)
3241 dma_release_channel(cdns_ctrl->dmac);
3242 }
3243
3244 struct cadence_nand_dt {
3245 struct cdns_nand_ctrl cdns_ctrl;
3246 struct clk *clk;
3247 };
3248
3249 static const struct cadence_nand_dt_devdata cadence_nand_default = {
3250 .if_skew = 0,
3251 .has_dma = 1,
3252 };
3253
3254 static const struct of_device_id cadence_nand_dt_ids[] = {
3255 {
3256 .compatible = "cdns,hp-nfc",
3257 .data = &cadence_nand_default
3258 }, {}
3259 };
3260
3261 MODULE_DEVICE_TABLE(of, cadence_nand_dt_ids);
3262
cadence_nand_dt_probe(struct platform_device * ofdev)3263 static int cadence_nand_dt_probe(struct platform_device *ofdev)
3264 {
3265 struct resource *res;
3266 struct cadence_nand_dt *dt;
3267 struct cdns_nand_ctrl *cdns_ctrl;
3268 int ret;
3269 const struct cadence_nand_dt_devdata *devdata;
3270 u32 val;
3271
3272 devdata = device_get_match_data(&ofdev->dev);
3273 if (!devdata) {
3274 pr_err("Failed to find the right device id.\n");
3275 return -ENOMEM;
3276 }
3277
3278 dt = devm_kzalloc(&ofdev->dev, sizeof(*dt), GFP_KERNEL);
3279 if (!dt)
3280 return -ENOMEM;
3281
3282 cdns_ctrl = &dt->cdns_ctrl;
3283 cdns_ctrl->caps1 = devdata;
3284
3285 cdns_ctrl->dev = &ofdev->dev;
3286 cdns_ctrl->irq = platform_get_irq(ofdev, 0);
3287 if (cdns_ctrl->irq < 0)
3288 return cdns_ctrl->irq;
3289
3290 dev_info(cdns_ctrl->dev, "IRQ: nr %d\n", cdns_ctrl->irq);
3291
3292 cdns_ctrl->reg = devm_platform_ioremap_resource(ofdev, 0);
3293 if (IS_ERR(cdns_ctrl->reg))
3294 return PTR_ERR(cdns_ctrl->reg);
3295
3296 cdns_ctrl->io.virt = devm_platform_get_and_ioremap_resource(ofdev, 1, &res);
3297 if (IS_ERR(cdns_ctrl->io.virt))
3298 return PTR_ERR(cdns_ctrl->io.virt);
3299
3300 cdns_ctrl->io.dma = res->start;
3301 cdns_ctrl->io.size = resource_size(res);
3302
3303 dt->clk = devm_clk_get(cdns_ctrl->dev, "nf_clk");
3304 if (IS_ERR(dt->clk))
3305 return PTR_ERR(dt->clk);
3306
3307 cdns_ctrl->nf_clk_rate = clk_get_rate(dt->clk);
3308
3309 ret = of_property_read_u32(ofdev->dev.of_node,
3310 "cdns,board-delay-ps", &val);
3311 if (ret) {
3312 val = 4830;
3313 dev_info(cdns_ctrl->dev,
3314 "missing cdns,board-delay-ps property, %d was set\n",
3315 val);
3316 }
3317 cdns_ctrl->board_delay = val;
3318
3319 ret = cadence_nand_init(cdns_ctrl);
3320 if (ret)
3321 return ret;
3322
3323 platform_set_drvdata(ofdev, dt);
3324 return 0;
3325 }
3326
cadence_nand_dt_remove(struct platform_device * ofdev)3327 static void cadence_nand_dt_remove(struct platform_device *ofdev)
3328 {
3329 struct cadence_nand_dt *dt = platform_get_drvdata(ofdev);
3330
3331 cadence_nand_remove(&dt->cdns_ctrl);
3332 }
3333
3334 static struct platform_driver cadence_nand_dt_driver = {
3335 .probe = cadence_nand_dt_probe,
3336 .remove = cadence_nand_dt_remove,
3337 .driver = {
3338 .name = "cadence-nand-controller",
3339 .of_match_table = cadence_nand_dt_ids,
3340 },
3341 };
3342
3343 module_platform_driver(cadence_nand_dt_driver);
3344
3345 MODULE_AUTHOR("Piotr Sroka <piotrs@cadence.com>");
3346 MODULE_LICENSE("GPL v2");
3347 MODULE_DESCRIPTION("Driver for Cadence NAND flash controller");
3348
3349